浏览代码

HDFS-11428. Change setErasureCodingPolicy to take a required string EC policy name. Contributed by Andrew Wang.

Rakesh Radhakrishnan 8 年之前
父节点
当前提交
82ef9accaf
共有 49 个文件被更改,包括 152 次插入132 次删除
  1. 2 2
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
  2. 6 5
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
  3. 6 4
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
  4. 2 3
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
  5. 2 4
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
  6. 1 1
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/erasurecoding.proto
  7. 1 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
  8. 11 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
  9. 5 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
  10. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
  11. 1 25
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
  12. 4 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
  13. 2 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/ErasureCodeBenchmarkThroughput.java
  14. 2 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java
  15. 2 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java
  16. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java
  17. 2 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java
  18. 21 16
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java
  19. 5 5
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicyWithSnapshot.java
  20. 2 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileChecksum.java
  21. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithECPolicy.java
  22. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java
  23. 2 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithDecoding.java
  24. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithMissingBlocks.java
  25. 2 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java
  26. 2 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeModeWithStripedFile.java
  27. 9 9
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestUnsetAndChangeDirectoryEcPolicy.java
  28. 2 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteReadStripedFile.java
  29. 2 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteStripedFileWithFailure.java
  30. 2 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
  31. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFSStriped.java
  32. 6 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReconstructStripedBlocksWithRackAwareness.java
  33. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSequentialBlockGroupId.java
  34. 2 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeErasureCodingMetrics.java
  35. 2 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java
  36. 2 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddOverReplicatedStripedBlocks.java
  37. 2 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlockInFBR.java
  38. 2 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java
  39. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
  40. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
  41. 5 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
  42. 2 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
  43. 2 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaWithStripedBlocks.java
  44. 4 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReconstructStripedBlocks.java
  45. 4 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java
  46. 2 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
  47. 2 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerWithStripedBlocks.java
  48. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
  49. 1 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/TestDFSIO.java

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java

@@ -2611,12 +2611,12 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
   }
   }
 
 
 
 
-  public void setErasureCodingPolicy(String src, ErasureCodingPolicy ecPolicy)
+  public void setErasureCodingPolicy(String src, String ecPolicyName)
       throws IOException {
       throws IOException {
     checkOpen();
     checkOpen();
     try (TraceScope ignored =
     try (TraceScope ignored =
              newPathTraceScope("setErasureCodingPolicy", src)) {
              newPathTraceScope("setErasureCodingPolicy", src)) {
-      namenode.setErasureCodingPolicy(src, ecPolicy);
+      namenode.setErasureCodingPolicy(src, ecPolicyName);
     } catch (RemoteException re) {
     } catch (RemoteException re) {
       throw re.unwrapRemoteException(AccessControlException.class,
       throw re.unwrapRemoteException(AccessControlException.class,
           SafeModeException.class,
           SafeModeException.class,

+ 6 - 5
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java

@@ -2397,17 +2397,18 @@ public class DistributedFileSystem extends FileSystem {
    * Set the source path to the specified erasure coding policy.
    * Set the source path to the specified erasure coding policy.
    *
    *
    * @param path     The directory to set the policy
    * @param path     The directory to set the policy
-   * @param ecPolicy The erasure coding policy. If not specified default will
-   *                 be used.
+   * @param ecPolicyName The erasure coding policy name.
    * @throws IOException
    * @throws IOException
    */
    */
   public void setErasureCodingPolicy(final Path path,
   public void setErasureCodingPolicy(final Path path,
-      final ErasureCodingPolicy ecPolicy) throws IOException {
+      final String ecPolicyName) throws IOException {
     Path absF = fixRelativePart(path);
     Path absF = fixRelativePart(path);
+    Preconditions.checkNotNull(ecPolicyName, "Erasure coding policy cannot be" +
+        " null.");
     new FileSystemLinkResolver<Void>() {
     new FileSystemLinkResolver<Void>() {
       @Override
       @Override
       public Void doCall(final Path p) throws IOException {
       public Void doCall(final Path p) throws IOException {
-        dfs.setErasureCodingPolicy(getPathName(p), ecPolicy);
+        dfs.setErasureCodingPolicy(getPathName(p), ecPolicyName);
         return null;
         return null;
       }
       }
 
 
@@ -2415,7 +2416,7 @@ public class DistributedFileSystem extends FileSystem {
       public Void next(final FileSystem fs, final Path p) throws IOException {
       public Void next(final FileSystem fs, final Path p) throws IOException {
         if (fs instanceof DistributedFileSystem) {
         if (fs instanceof DistributedFileSystem) {
           DistributedFileSystem myDfs = (DistributedFileSystem) fs;
           DistributedFileSystem myDfs = (DistributedFileSystem) fs;
-          myDfs.setErasureCodingPolicy(p, ecPolicy);
+          myDfs.setErasureCodingPolicy(p, ecPolicyName);
           return null;
           return null;
         }
         }
         throw new UnsupportedOperationException(
         throw new UnsupportedOperationException(

+ 6 - 4
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java

@@ -465,13 +465,15 @@ public class HdfsAdmin {
    * Set the source path to the specified erasure coding policy.
    * Set the source path to the specified erasure coding policy.
    *
    *
    * @param path The source path referring to a directory.
    * @param path The source path referring to a directory.
-   * @param ecPolicy The erasure coding policy for the directory.
-   *                 If null, the default will be used.
+   * @param ecPolicyName The erasure coding policy name for the directory.
+   *
    * @throws IOException
    * @throws IOException
+   * @throws HadoopIllegalArgumentException if the specified EC policy is not
+   * enabled on the cluster
    */
    */
   public void setErasureCodingPolicy(final Path path,
   public void setErasureCodingPolicy(final Path path,
-      final ErasureCodingPolicy ecPolicy) throws IOException {
-    dfs.setErasureCodingPolicy(path, ecPolicy);
+      final String ecPolicyName) throws IOException {
+    dfs.setErasureCodingPolicy(path, ecPolicyName);
   }
   }
 
 
   /**
   /**

+ 2 - 3
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java

@@ -1510,11 +1510,10 @@ public interface ClientProtocol {
   /**
   /**
    * Set an erasure coding policy on a specified path.
    * Set an erasure coding policy on a specified path.
    * @param src The path to set policy on.
    * @param src The path to set policy on.
-   * @param ecPolicy The erasure coding policy. If null, default policy will
-   *                 be used
+   * @param ecPolicyName The erasure coding policy name.
    */
    */
   @AtMostOnce
   @AtMostOnce
-  void setErasureCodingPolicy(String src, ErasureCodingPolicy ecPolicy)
+  void setErasureCodingPolicy(String src, String ecPolicyName)
       throws IOException;
       throws IOException;
 
 
   /**
   /**

+ 2 - 4
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java

@@ -1459,14 +1459,12 @@ public class ClientNamenodeProtocolTranslatorPB implements
   }
   }
 
 
   @Override
   @Override
-  public void setErasureCodingPolicy(String src, ErasureCodingPolicy ecPolicy)
+  public void setErasureCodingPolicy(String src, String ecPolicyName)
       throws IOException {
       throws IOException {
     final SetErasureCodingPolicyRequestProto.Builder builder =
     final SetErasureCodingPolicyRequestProto.Builder builder =
         SetErasureCodingPolicyRequestProto.newBuilder();
         SetErasureCodingPolicyRequestProto.newBuilder();
     builder.setSrc(src);
     builder.setSrc(src);
-    if (ecPolicy != null) {
-      builder.setEcPolicy(PBHelperClient.convertErasureCodingPolicy(ecPolicy));
-    }
+    builder.setEcPolicyName(ecPolicyName);
     SetErasureCodingPolicyRequestProto req = builder.build();
     SetErasureCodingPolicyRequestProto req = builder.build();
     try {
     try {
       rpcProxy.setErasureCodingPolicy(null, req);
       rpcProxy.setErasureCodingPolicy(null, req);

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/erasurecoding.proto

@@ -25,7 +25,7 @@ import "hdfs.proto";
 
 
 message SetErasureCodingPolicyRequestProto {
 message SetErasureCodingPolicyRequestProto {
   required string src = 1;
   required string src = 1;
-  optional ErasureCodingPolicyProto ecPolicy = 2;
+  required string ecPolicyName = 2;
 }
 }
 
 
 message SetErasureCodingPolicyResponseProto {
 message SetErasureCodingPolicyResponseProto {

+ 1 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java

@@ -1442,9 +1442,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
       RpcController controller, SetErasureCodingPolicyRequestProto req)
       RpcController controller, SetErasureCodingPolicyRequestProto req)
       throws ServiceException {
       throws ServiceException {
     try {
     try {
-      ErasureCodingPolicy ecPolicy = req.hasEcPolicy() ?
-          PBHelperClient.convertErasureCodingPolicy(req.getEcPolicy()) : null;
-      server.setErasureCodingPolicy(req.getSrc(), ecPolicy);
+      server.setErasureCodingPolicy(req.getSrc(), req.getEcPolicyName());
       return SetErasureCodingPolicyResponseProto.newBuilder().build();
       return SetErasureCodingPolicyResponseProto.newBuilder().build();
     } catch (IOException e) {
     } catch (IOException e) {
       throw new ServiceException(e);
       throw new ServiceException(e);

+ 11 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java

@@ -59,14 +59,16 @@ final class FSDirErasureCodingOp {
    *
    *
    * @param fsn The namespace
    * @param fsn The namespace
    * @param srcArg The path of the target directory.
    * @param srcArg The path of the target directory.
-   * @param ecPolicy The erasure coding policy to set on the target directory.
+   * @param ecPolicyName The erasure coding policy name to set on the target
+   *                    directory.
    * @param logRetryCache whether to record RPC ids in editlog for retry
    * @param logRetryCache whether to record RPC ids in editlog for retry
    *          cache rebuilding
    *          cache rebuilding
    * @return {@link HdfsFileStatus}
    * @return {@link HdfsFileStatus}
    * @throws IOException
    * @throws IOException
+   * @throws HadoopIllegalArgumentException if the policy is not enabled
    */
    */
   static HdfsFileStatus setErasureCodingPolicy(final FSNamesystem fsn,
   static HdfsFileStatus setErasureCodingPolicy(final FSNamesystem fsn,
-      final String srcArg, final ErasureCodingPolicy ecPolicy,
+      final String srcArg, final String ecPolicyName,
       final boolean logRetryCache) throws IOException {
       final boolean logRetryCache) throws IOException {
     assert fsn.hasWriteLock();
     assert fsn.hasWriteLock();
 
 
@@ -78,6 +80,13 @@ final class FSDirErasureCodingOp {
     List<XAttr> xAttrs;
     List<XAttr> xAttrs;
     fsd.writeLock();
     fsd.writeLock();
     try {
     try {
+      ErasureCodingPolicy ecPolicy = fsn.getErasureCodingPolicyManager()
+          .getPolicyByName(ecPolicyName);
+      if (ecPolicy == null) {
+        throw new HadoopIllegalArgumentException("Policy '" +
+            ecPolicyName + "' does not match any supported erasure coding " +
+            "policies.");
+      }
       iip = fsd.resolvePath(pc, src, DirOp.WRITE_LINK);
       iip = fsd.resolvePath(pc, src, DirOp.WRITE_LINK);
       src = iip.getPath();
       src = iip.getPath();
       xAttrs = setErasureCodingPolicyXAttr(fsn, iip, ecPolicy);
       xAttrs = setErasureCodingPolicyXAttr(fsn, iip, ecPolicy);

+ 5 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

@@ -6772,13 +6772,14 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
   /**
   /**
    * Set an erasure coding policy on the given path.
    * Set an erasure coding policy on the given path.
    * @param srcArg  The path of the target directory.
    * @param srcArg  The path of the target directory.
-   * @param ecPolicy The erasure coding policy to set on the target directory.
+   * @param ecPolicyName The erasure coding policy to set on the target
+   *                    directory.
    * @throws AccessControlException  if the caller is not the superuser.
    * @throws AccessControlException  if the caller is not the superuser.
    * @throws UnresolvedLinkException if the path can't be resolved.
    * @throws UnresolvedLinkException if the path can't be resolved.
    * @throws SafeModeException       if the Namenode is in safe mode.
    * @throws SafeModeException       if the Namenode is in safe mode.
    */
    */
-  void setErasureCodingPolicy(final String srcArg, final ErasureCodingPolicy
-      ecPolicy, final boolean logRetryCache) throws IOException,
+  void setErasureCodingPolicy(final String srcArg, final String ecPolicyName,
+      final boolean logRetryCache) throws IOException,
       UnresolvedLinkException, SafeModeException, AccessControlException {
       UnresolvedLinkException, SafeModeException, AccessControlException {
     final String operationName = "setErasureCodingPolicy";
     final String operationName = "setErasureCodingPolicy";
     checkSuperuserPrivilege();
     checkSuperuserPrivilege();
@@ -6790,7 +6791,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
       checkOperation(OperationCategory.WRITE);
       checkOperation(OperationCategory.WRITE);
       checkNameNodeSafeMode("Cannot set erasure coding policy on " + srcArg);
       checkNameNodeSafeMode("Cannot set erasure coding policy on " + srcArg);
       resultingStat = FSDirErasureCodingOp.setErasureCodingPolicy(this,
       resultingStat = FSDirErasureCodingOp.setErasureCodingPolicy(this,
-          srcArg, ecPolicy, logRetryCache);
+          srcArg, ecPolicyName, logRetryCache);
       success = true;
       success = true;
     } catch (AccessControlException ace) {
     } catch (AccessControlException ace) {
       logAuditEvent(success, operationName, srcArg, null,
       logAuditEvent(success, operationName, srcArg, null,

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java

@@ -2018,7 +2018,7 @@ public class NameNodeRpcServer implements NamenodeProtocols {
   }
   }
 
 
   @Override // ClientProtocol
   @Override // ClientProtocol
-  public void setErasureCodingPolicy(String src, ErasureCodingPolicy ecPolicy)
+  public void setErasureCodingPolicy(String src, String ecPolicyName)
       throws IOException {
       throws IOException {
     checkNNStartup();
     checkNNStartup();
     final CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
     final CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
@@ -2027,7 +2027,7 @@ public class NameNodeRpcServer implements NamenodeProtocols {
     }
     }
     boolean success = false;
     boolean success = false;
     try {
     try {
-      namesystem.setErasureCodingPolicy(src, ecPolicy, cacheEntry != null);
+      namesystem.setErasureCodingPolicy(src, ecPolicyName, cacheEntry != null);
       success = true;
       success = true;
     } finally {
     } finally {
       RetryCache.setState(cacheEntry, success);
       RetryCache.setState(cacheEntry, success);

+ 1 - 25
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java

@@ -28,7 +28,6 @@ import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 import org.apache.hadoop.util.ToolRunner;
 
 
 import java.io.IOException;
 import java.io.IOException;
-import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collection;
 import java.util.LinkedList;
 import java.util.LinkedList;
@@ -227,30 +226,7 @@ public class ECAdmin extends Configured implements Tool {
       final Path p = new Path(path);
       final Path p = new Path(path);
       final DistributedFileSystem dfs = AdminHelper.getDFS(p.toUri(), conf);
       final DistributedFileSystem dfs = AdminHelper.getDFS(p.toUri(), conf);
       try {
       try {
-        ErasureCodingPolicy ecPolicy = null;
-        ErasureCodingPolicy[] ecPolicies =
-            dfs.getClient().getErasureCodingPolicies();
-        for (ErasureCodingPolicy policy : ecPolicies) {
-          if (ecPolicyName.equals(policy.getName())) {
-            ecPolicy = policy;
-            break;
-          }
-        }
-        if (ecPolicy == null) {
-          StringBuilder sb = new StringBuilder();
-          sb.append("Policy '");
-          sb.append(ecPolicyName);
-          sb.append("' does not match any of the supported policies.");
-          sb.append(" Please select any one of ");
-          List<String> ecPolicyNames = new ArrayList<String>();
-          for (ErasureCodingPolicy policy : ecPolicies) {
-            ecPolicyNames.add(policy.getName());
-          }
-          sb.append(ecPolicyNames);
-          System.err.println(sb.toString());
-          return 3;
-        }
-        dfs.setErasureCodingPolicy(p, ecPolicy);
+        dfs.setErasureCodingPolicy(p, ecPolicyName);
         System.out.println("Set erasure coding policy " + ecPolicyName +
         System.out.println("Set erasure coding policy " + ecPolicyName +
             " on " + path);
             " on " + path);
       } catch (Exception e) {
       } catch (Exception e) {

+ 4 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java

@@ -136,6 +136,7 @@ import org.apache.hadoop.hdfs.server.datanode.DataNodeLayoutVersion;
 import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
 import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
 import org.apache.hadoop.hdfs.server.datanode.TestTransferRbw;
 import org.apache.hadoop.hdfs.server.datanode.TestTransferRbw;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
+import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLog;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLog;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
@@ -1898,7 +1899,7 @@ public class DFSTestUtil {
       Path dir, int numBlocks, int numStripesPerBlk, boolean toMkdir)
       Path dir, int numBlocks, int numStripesPerBlk, boolean toMkdir)
       throws Exception {
       throws Exception {
     createStripedFile(cluster, file, dir, numBlocks, numStripesPerBlk,
     createStripedFile(cluster, file, dir, numBlocks, numStripesPerBlk,
-        toMkdir, null);
+        toMkdir, ErasureCodingPolicyManager.getSystemDefaultPolicy());
   }
   }
 
 
   /**
   /**
@@ -1922,7 +1923,8 @@ public class DFSTestUtil {
       assert dir != null;
       assert dir != null;
       dfs.mkdirs(dir);
       dfs.mkdirs(dir);
       try {
       try {
-        dfs.getClient().setErasureCodingPolicy(dir.toString(), ecPolicy);
+        dfs.getClient()
+            .setErasureCodingPolicy(dir.toString(), ecPolicy.getName());
       } catch (IOException e) {
       } catch (IOException e) {
         if (!e.getMessage().contains("non-empty directory")) {
         if (!e.getMessage().contains("non-empty directory")) {
           throw e;
           throw e;

+ 2 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/ErasureCodeBenchmarkThroughput.java

@@ -192,7 +192,8 @@ public class ErasureCodeBenchmarkThroughput
     }
     }
     if (!dfs.exists(ecPath)) {
     if (!dfs.exists(ecPath)) {
       dfs.mkdirs(ecPath);
       dfs.mkdirs(ecPath);
-      dfs.getClient().setErasureCodingPolicy(ecPath.toString(), ecPolicy);
+      dfs.getClient()
+          .setErasureCodingPolicy(ecPath.toString(), ecPolicy.getName());
     } else {
     } else {
       Preconditions.checkArgument(
       Preconditions.checkArgument(
           dfs.getClient().
           dfs.getClient().

+ 2 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java

@@ -108,7 +108,8 @@ public class TestDFSStripedInputStream {
     }
     }
     fs = cluster.getFileSystem();
     fs = cluster.getFileSystem();
     fs.mkdirs(dirPath);
     fs.mkdirs(dirPath);
-    fs.getClient().setErasureCodingPolicy(dirPath.toString(), ecPolicy);
+    fs.getClient()
+        .setErasureCodingPolicy(dirPath.toString(), ecPolicy.getName());
   }
   }
 
 
   @After
   @After

+ 2 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java

@@ -89,7 +89,8 @@ public class TestDFSStripedOutputStream {
           NativeRSRawErasureCoderFactory.class.getCanonicalName());
           NativeRSRawErasureCoderFactory.class.getCanonicalName());
     }
     }
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
-    cluster.getFileSystem().getClient().setErasureCodingPolicy("/", ecPolicy);
+    cluster.getFileSystem().getClient().setErasureCodingPolicy("/", ecPolicy
+        .getName());
     fs = cluster.getFileSystem();
     fs = cluster.getFileSystem();
   }
   }
 
 

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java

@@ -221,7 +221,7 @@ public class TestDFSStripedOutputStreamWithFailure {
     cluster.waitActive();
     cluster.waitActive();
     dfs = cluster.getFileSystem();
     dfs = cluster.getFileSystem();
     dfs.mkdirs(dir);
     dfs.mkdirs(dir);
-    dfs.setErasureCodingPolicy(dir, ecPolicy);
+    dfs.setErasureCodingPolicy(dir, ecPolicy.getName());
   }
   }
 
 
   private void tearDown() {
   private void tearDown() {

+ 2 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java

@@ -142,7 +142,8 @@ public class TestDecommissionWithStriped {
     client = getDfsClient(cluster.getNameNode(0), conf);
     client = getDfsClient(cluster.getNameNode(0), conf);
 
 
     dfs.mkdirs(ecDir);
     dfs.mkdirs(ecDir);
-    dfs.setErasureCodingPolicy(ecDir, null);
+    dfs.setErasureCodingPolicy(ecDir,
+        ErasureCodingPolicyManager.getSystemDefaultPolicy().getName());
   }
   }
 
 
   @After
   @After

+ 21 - 16
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java

@@ -47,6 +47,8 @@ public class TestErasureCodingPolicies {
   private MiniDFSCluster cluster;
   private MiniDFSCluster cluster;
   private DistributedFileSystem fs;
   private DistributedFileSystem fs;
   private static final int BLOCK_SIZE = 1024;
   private static final int BLOCK_SIZE = 1024;
+  private static final ErasureCodingPolicy EC_POLICY =
+      ErasureCodingPolicyManager.getSystemDefaultPolicy();
   private FSNamesystem namesystem;
   private FSNamesystem namesystem;
 
 
   @Before
   @Before
@@ -80,7 +82,8 @@ public class TestErasureCodingPolicies {
     DFSTestUtil.createFile(fs, replicatedFile, 0, (short) 3, 0L);
     DFSTestUtil.createFile(fs, replicatedFile, 0, (short) 3, 0L);
 
 
     // set ec policy on dir
     // set ec policy on dir
-    fs.setErasureCodingPolicy(dir, null);
+    fs.setErasureCodingPolicy(dir,
+        ErasureCodingPolicyManager.getSystemDefaultPolicy().getName());
     // create a file which should be using ec
     // create a file which should be using ec
     final Path ecSubDir = new Path(dir, "ecSubDir");
     final Path ecSubDir = new Path(dir, "ecSubDir");
     final Path ecFile = new Path(ecSubDir, "ecFile");
     final Path ecFile = new Path(ecSubDir, "ecFile");
@@ -132,7 +135,7 @@ public class TestErasureCodingPolicies {
     fs.mkdir(testDir, FsPermission.getDirDefault());
     fs.mkdir(testDir, FsPermission.getDirDefault());
 
 
     /* Normal creation of an erasure coding directory */
     /* Normal creation of an erasure coding directory */
-    fs.getClient().setErasureCodingPolicy(testDir.toString(), null);
+    fs.setErasureCodingPolicy(testDir, EC_POLICY.getName());
 
 
     /* Verify files under the directory are striped */
     /* Verify files under the directory are striped */
     final Path ECFilePath = new Path(testDir, "foo");
     final Path ECFilePath = new Path(testDir, "foo");
@@ -148,7 +151,7 @@ public class TestErasureCodingPolicies {
     fs.mkdir(notEmpty, FsPermission.getDirDefault());
     fs.mkdir(notEmpty, FsPermission.getDirDefault());
     final Path oldFile = new Path(notEmpty, "old");
     final Path oldFile = new Path(notEmpty, "old");
     fs.create(oldFile);
     fs.create(oldFile);
-    fs.getClient().setErasureCodingPolicy(notEmpty.toString(), null);
+    fs.setErasureCodingPolicy(notEmpty, EC_POLICY.getName());
     final Path newFile = new Path(notEmpty, "new");
     final Path newFile = new Path(notEmpty, "new");
     fs.create(newFile);
     fs.create(newFile);
     INode oldInode = namesystem.getFSDirectory().getINode(oldFile.toString());
     INode oldInode = namesystem.getFSDirectory().getINode(oldFile.toString());
@@ -160,10 +163,10 @@ public class TestErasureCodingPolicies {
     final Path dir1 = new Path("/dir1");
     final Path dir1 = new Path("/dir1");
     final Path dir2 = new Path(dir1, "dir2");
     final Path dir2 = new Path(dir1, "dir2");
     fs.mkdir(dir1, FsPermission.getDirDefault());
     fs.mkdir(dir1, FsPermission.getDirDefault());
-    fs.getClient().setErasureCodingPolicy(dir1.toString(), null);
+    fs.setErasureCodingPolicy(dir1, EC_POLICY.getName());
     fs.mkdir(dir2, FsPermission.getDirDefault());
     fs.mkdir(dir2, FsPermission.getDirDefault());
     try {
     try {
-      fs.getClient().setErasureCodingPolicy(dir2.toString(), null);
+      fs.setErasureCodingPolicy(dir2, EC_POLICY.getName());
     } catch (IOException e) {
     } catch (IOException e) {
       fail("Nested erasure coding policies are supported");
       fail("Nested erasure coding policies are supported");
     }
     }
@@ -172,7 +175,7 @@ public class TestErasureCodingPolicies {
     final Path fPath = new Path("/file");
     final Path fPath = new Path("/file");
     fs.create(fPath);
     fs.create(fPath);
     try {
     try {
-      fs.getClient().setErasureCodingPolicy(fPath.toString(), null);
+      fs.setErasureCodingPolicy(fPath, EC_POLICY.getName());
       fail("Erasure coding policy on file");
       fail("Erasure coding policy on file");
     } catch (IOException e) {
     } catch (IOException e) {
       assertExceptionContains("erasure coding policy for a file", e);
       assertExceptionContains("erasure coding policy for a file", e);
@@ -185,8 +188,8 @@ public class TestErasureCodingPolicies {
     final Path dstECDir = new Path("/dstEC");
     final Path dstECDir = new Path("/dstEC");
     fs.mkdir(srcECDir, FsPermission.getDirDefault());
     fs.mkdir(srcECDir, FsPermission.getDirDefault());
     fs.mkdir(dstECDir, FsPermission.getDirDefault());
     fs.mkdir(dstECDir, FsPermission.getDirDefault());
-    fs.getClient().setErasureCodingPolicy(srcECDir.toString(), null);
-    fs.getClient().setErasureCodingPolicy(dstECDir.toString(), null);
+    fs.setErasureCodingPolicy(srcECDir, EC_POLICY.getName());
+    fs.setErasureCodingPolicy(dstECDir, EC_POLICY.getName());
     final Path srcFile = new Path(srcECDir, "foo");
     final Path srcFile = new Path(srcECDir, "foo");
     fs.create(srcFile);
     fs.create(srcFile);
 
 
@@ -220,7 +223,8 @@ public class TestErasureCodingPolicies {
   public void testReplication() throws IOException {
   public void testReplication() throws IOException {
     final Path testDir = new Path("/ec");
     final Path testDir = new Path("/ec");
     fs.mkdir(testDir, FsPermission.getDirDefault());
     fs.mkdir(testDir, FsPermission.getDirDefault());
-    fs.setErasureCodingPolicy(testDir, null);
+    fs.setErasureCodingPolicy(testDir,
+        ErasureCodingPolicyManager.getSystemDefaultPolicy().getName());
     final Path fooFile = new Path(testDir, "foo");
     final Path fooFile = new Path(testDir, "foo");
     // create ec file with replication=0
     // create ec file with replication=0
     fs.create(fooFile, FsPermission.getFileDefault(), true,
     fs.create(fooFile, FsPermission.getFileDefault(), true,
@@ -241,8 +245,9 @@ public class TestErasureCodingPolicies {
     // dir EC policy should be null
     // dir EC policy should be null
     assertNull(fs.getClient().getFileInfo(src).getErasureCodingPolicy());
     assertNull(fs.getClient().getFileInfo(src).getErasureCodingPolicy());
     // dir EC policy after setting
     // dir EC policy after setting
-    fs.getClient().setErasureCodingPolicy(src, null); //Default one will be used.
-    ErasureCodingPolicy sysDefaultECPolicy = ErasureCodingPolicyManager.getSystemDefaultPolicy();
+    ErasureCodingPolicy sysDefaultECPolicy =
+        ErasureCodingPolicyManager.getSystemDefaultPolicy();
+    fs.getClient().setErasureCodingPolicy(src, sysDefaultECPolicy.getName());
     verifyErasureCodingInfo(src, sysDefaultECPolicy);
     verifyErasureCodingInfo(src, sysDefaultECPolicy);
     fs.create(new Path(ecDir, "child1")).close();
     fs.create(new Path(ecDir, "child1")).close();
     // verify for the files in ec dir
     // verify for the files in ec dir
@@ -263,7 +268,7 @@ public class TestErasureCodingPolicies {
     // dir ECInfo before being set
     // dir ECInfo before being set
     assertNull(fs.getClient().getFileInfo(src).getErasureCodingPolicy());
     assertNull(fs.getClient().getFileInfo(src).getErasureCodingPolicy());
     // dir ECInfo after set
     // dir ECInfo after set
-    fs.getClient().setErasureCodingPolicy(src, usingECPolicy);
+    fs.getClient().setErasureCodingPolicy(src, usingECPolicy.getName());
     verifyErasureCodingInfo(src, usingECPolicy);
     verifyErasureCodingInfo(src, usingECPolicy);
     fs.create(new Path(ecDir, "child1")).close();
     fs.create(new Path(ecDir, "child1")).close();
     // verify for the files in ec dir
     // verify for the files in ec dir
@@ -291,12 +296,12 @@ public class TestErasureCodingPolicies {
     final Path ecDir = new Path(src);
     final Path ecDir = new Path(src);
     try {
     try {
       fs.mkdir(ecDir, FsPermission.getDirDefault());
       fs.mkdir(ecDir, FsPermission.getDirDefault());
-      fs.getClient().setErasureCodingPolicy(src, ecPolicy);
+      fs.getClient().setErasureCodingPolicy(src, ecPolicy.getName());
       fail("HadoopIllegalArgumentException should be thrown for"
       fail("HadoopIllegalArgumentException should be thrown for"
           + "setting an invalid erasure coding policy");
           + "setting an invalid erasure coding policy");
     } catch (Exception e) {
     } catch (Exception e) {
-      assertExceptionContains("Policy [ RS-4-2-128k ] does not match " +
-          "any of the supported policies",e);
+      assertExceptionContains("Policy 'RS-4-2-128k' does not match " +
+          "any supported erasure coding policies",e);
     }
     }
   }
   }
 
 
@@ -338,7 +343,7 @@ public class TestErasureCodingPolicies {
       for (ErasureCodingPolicy policy : sysPolicies) {
       for (ErasureCodingPolicy policy : sysPolicies) {
         Path dir = new Path("/policy_" + policy.getId());
         Path dir = new Path("/policy_" + policy.getId());
         fs.mkdir(dir, FsPermission.getDefault());
         fs.mkdir(dir, FsPermission.getDefault());
-        fs.setErasureCodingPolicy(dir, policy);
+        fs.setErasureCodingPolicy(dir, policy.getName());
         Path file = new Path(dir, "child");
         Path file = new Path(dir, "child");
         fs.create(file).close();
         fs.create(file).close();
         assertEquals(policy, fs.getErasureCodingPolicy(file));
         assertEquals(policy, fs.getErasureCodingPolicy(file));

+ 5 - 5
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicyWithSnapshot.java

@@ -75,7 +75,7 @@ public class TestErasureCodingPolicyWithSnapshot {
     fs.mkdirs(ecDir);
     fs.mkdirs(ecDir);
     fs.allowSnapshot(ecDirParent);
     fs.allowSnapshot(ecDirParent);
     // set erasure coding policy
     // set erasure coding policy
-    fs.setErasureCodingPolicy(ecDir, sysDefaultPolicy);
+    fs.setErasureCodingPolicy(ecDir, sysDefaultPolicy.getName());
     DFSTestUtil.createFile(fs, ecFile, len, (short) 1, 0xFEED);
     DFSTestUtil.createFile(fs, ecFile, len, (short) 1, 0xFEED);
     String contents = DFSTestUtil.readFile(fs, ecFile);
     String contents = DFSTestUtil.readFile(fs, ecFile);
     final Path snap1 = fs.createSnapshot(ecDirParent, "snap1");
     final Path snap1 = fs.createSnapshot(ecDirParent, "snap1");
@@ -93,7 +93,7 @@ public class TestErasureCodingPolicyWithSnapshot {
         fs.getErasureCodingPolicy(snap2ECDir));
         fs.getErasureCodingPolicy(snap2ECDir));
 
 
     // Make dir again with system default ec policy
     // Make dir again with system default ec policy
-    fs.setErasureCodingPolicy(ecDir, sysDefaultPolicy);
+    fs.setErasureCodingPolicy(ecDir, sysDefaultPolicy.getName());
     final Path snap3 = fs.createSnapshot(ecDirParent, "snap3");
     final Path snap3 = fs.createSnapshot(ecDirParent, "snap3");
     final Path snap3ECDir = new Path(snap3, ecDir.getName());
     final Path snap3ECDir = new Path(snap3, ecDir.getName());
     // Check that snap3's ECPolicy has the correct settings
     // Check that snap3's ECPolicy has the correct settings
@@ -134,7 +134,7 @@ public class TestErasureCodingPolicyWithSnapshot {
     fs.mkdirs(ecDir);
     fs.mkdirs(ecDir);
     fs.allowSnapshot(ecDir);
     fs.allowSnapshot(ecDir);
 
 
-    fs.setErasureCodingPolicy(ecDir, sysDefaultPolicy);
+    fs.setErasureCodingPolicy(ecDir, sysDefaultPolicy.getName());
     final Path snap1 = fs.createSnapshot(ecDir, "snap1");
     final Path snap1 = fs.createSnapshot(ecDir, "snap1");
     assertEquals("Got unexpected erasure coding policy", sysDefaultPolicy,
     assertEquals("Got unexpected erasure coding policy", sysDefaultPolicy,
         fs.getErasureCodingPolicy(snap1));
         fs.getErasureCodingPolicy(snap1));
@@ -150,7 +150,7 @@ public class TestErasureCodingPolicyWithSnapshot {
     fs.allowSnapshot(ecDir);
     fs.allowSnapshot(ecDir);
 
 
     // set erasure coding policy
     // set erasure coding policy
-    fs.setErasureCodingPolicy(ecDir, sysDefaultPolicy);
+    fs.setErasureCodingPolicy(ecDir, sysDefaultPolicy.getName());
     final Path snap1 = fs.createSnapshot(ecDir, "snap1");
     final Path snap1 = fs.createSnapshot(ecDir, "snap1");
     ErasureCodingPolicy ecSnap = fs.getErasureCodingPolicy(snap1);
     ErasureCodingPolicy ecSnap = fs.getErasureCodingPolicy(snap1);
     assertEquals("Got unexpected erasure coding policy", sysDefaultPolicy,
     assertEquals("Got unexpected erasure coding policy", sysDefaultPolicy,
@@ -182,7 +182,7 @@ public class TestErasureCodingPolicyWithSnapshot {
     fs.allowSnapshot(ecDir);
     fs.allowSnapshot(ecDir);
 
 
     // set erasure coding policy
     // set erasure coding policy
-    fs.setErasureCodingPolicy(ecDir, sysDefaultPolicy);
+    fs.setErasureCodingPolicy(ecDir, sysDefaultPolicy.getName());
     DFSTestUtil.createFile(fs, ecFile, len, (short) 1, 0xFEED);
     DFSTestUtil.createFile(fs, ecFile, len, (short) 1, 0xFEED);
     final Path snap1 = fs.createSnapshot(ecDir, "snap1");
     final Path snap1 = fs.createSnapshot(ecDir, "snap1");
 
 

+ 2 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileChecksum.java

@@ -81,7 +81,8 @@ public class TestFileChecksum {
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
     Path ecPath = new Path(ecDir);
     Path ecPath = new Path(ecDir);
     cluster.getFileSystem().mkdir(ecPath, FsPermission.getDirDefault());
     cluster.getFileSystem().mkdir(ecPath, FsPermission.getDirDefault());
-    cluster.getFileSystem().getClient().setErasureCodingPolicy(ecDir, null);
+    cluster.getFileSystem().getClient().setErasureCodingPolicy(ecDir,
+        ErasureCodingPolicyManager.getSystemDefaultPolicy().getName());
     fs = cluster.getFileSystem();
     fs = cluster.getFileSystem();
     client = fs.getClient();
     client = fs.getClient();
 
 

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatusWithECPolicy.java

@@ -73,7 +73,7 @@ public class TestFileStatusWithECPolicy {
 
 
     final ErasureCodingPolicy ecPolicy1 = ErasureCodingPolicyManager.getSystemDefaultPolicy();
     final ErasureCodingPolicy ecPolicy1 = ErasureCodingPolicyManager.getSystemDefaultPolicy();
     // set EC policy on dir
     // set EC policy on dir
-    fs.setErasureCodingPolicy(dir, ecPolicy1);
+    fs.setErasureCodingPolicy(dir, ecPolicy1.getName());
     final ErasureCodingPolicy ecPolicy2 = client.getFileInfo(dir.toUri().getPath()).getErasureCodingPolicy();
     final ErasureCodingPolicy ecPolicy2 = client.getFileInfo(dir.toUri().getPath()).getErasureCodingPolicy();
     assertNotNull(ecPolicy2);
     assertNotNull(ecPolicy2);
     assertTrue(ecPolicy1.equals(ecPolicy2));
     assertTrue(ecPolicy1.equals(ecPolicy2));

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java

@@ -94,7 +94,7 @@ public class TestLeaseRecoveryStriped {
     cluster.waitActive();
     cluster.waitActive();
     dfs = cluster.getFileSystem();
     dfs = cluster.getFileSystem();
     dfs.mkdirs(dir);
     dfs.mkdirs(dir);
-    dfs.setErasureCodingPolicy(dir, ecPolicy);
+    dfs.setErasureCodingPolicy(dir, ecPolicy.getName());
   }
   }
 
 
   @After
   @After

+ 2 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithDecoding.java

@@ -102,7 +102,8 @@ public class TestReadStripedFileWithDecoding {
     conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY,
     conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY,
         false);
         false);
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
-    cluster.getFileSystem().getClient().setErasureCodingPolicy("/", null);
+    cluster.getFileSystem().getClient().setErasureCodingPolicy("/",
+        ErasureCodingPolicyManager.getSystemDefaultPolicy().getName());
     fs = cluster.getFileSystem();
     fs = cluster.getFileSystem();
   }
   }
 
 

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithMissingBlocks.java

@@ -61,7 +61,7 @@ public class TestReadStripedFileWithMissingBlocks {
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 0);
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 0);
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
     cluster.getFileSystem().getClient().setErasureCodingPolicy(
     cluster.getFileSystem().getClient().setErasureCodingPolicy(
-        "/", ecPolicy);
+        "/", ecPolicy.getName());
     fs = cluster.getFileSystem();
     fs = cluster.getFileSystem();
   }
   }
 
 

+ 2 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java

@@ -107,7 +107,8 @@ public class TestReconstructStripedFile {
     cluster.waitActive();
     cluster.waitActive();
 
 
     fs = cluster.getFileSystem();
     fs = cluster.getFileSystem();
-    fs.getClient().setErasureCodingPolicy("/", null);
+    fs.getClient().setErasureCodingPolicy("/",
+        ErasureCodingPolicyManager.getSystemDefaultPolicy().getName());
 
 
     List<DataNode> datanodes = cluster.getDataNodes();
     List<DataNode> datanodes = cluster.getDataNodes();
     for (int i = 0; i < dnNum; i++) {
     for (int i = 0; i < dnNum; i++) {

+ 2 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeModeWithStripedFile.java

@@ -63,7 +63,8 @@ public class TestSafeModeWithStripedFile {
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 100);
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 100);
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
-    cluster.getFileSystem().getClient().setErasureCodingPolicy("/", null);
+    cluster.getFileSystem().getClient().setErasureCodingPolicy("/",
+        ErasureCodingPolicyManager.getSystemDefaultPolicy().getName());
     cluster.waitActive();
     cluster.waitActive();
   }
   }
 
 

+ 9 - 9
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestUnsetAndChangeDirectoryEcPolicy.java

@@ -101,7 +101,7 @@ public class TestUnsetAndChangeDirectoryEcPolicy {
     // Test unset a directory which has no EC policy
     // Test unset a directory which has no EC policy
     fs.unsetErasureCodingPolicy(dirPath);
     fs.unsetErasureCodingPolicy(dirPath);
     // Set EC policy on directory
     // Set EC policy on directory
-    fs.setErasureCodingPolicy(dirPath, ecPolicy);
+    fs.setErasureCodingPolicy(dirPath, ecPolicy.getName());
 
 
     DFSTestUtil.createFile(fs, ecFilePath, fileLen, (short) 1, 0L);
     DFSTestUtil.createFile(fs, ecFilePath, fileLen, (short) 1, 0L);
     fs.unsetErasureCodingPolicy(dirPath);
     fs.unsetErasureCodingPolicy(dirPath);
@@ -142,12 +142,12 @@ public class TestUnsetAndChangeDirectoryEcPolicy {
         .getPolicyByPolicyID(HdfsConstants.RS_3_2_POLICY_ID);
         .getPolicyByPolicyID(HdfsConstants.RS_3_2_POLICY_ID);
 
 
     fs.mkdirs(parentDir);
     fs.mkdirs(parentDir);
-    fs.setErasureCodingPolicy(parentDir, ecPolicy);
+    fs.setErasureCodingPolicy(parentDir, ecPolicy.getName());
     fs.mkdirs(childDir);
     fs.mkdirs(childDir);
     // Create RS(6,3) EC policy file
     // Create RS(6,3) EC policy file
     DFSTestUtil.createFile(fs, ec63FilePath, fileLen, (short) 1, 0L);
     DFSTestUtil.createFile(fs, ec63FilePath, fileLen, (short) 1, 0L);
     // Set RS(3,2) EC policy on child directory
     // Set RS(3,2) EC policy on child directory
-    fs.setErasureCodingPolicy(childDir, ec32Policy);
+    fs.setErasureCodingPolicy(childDir, ec32Policy.getName());
     // Create RS(3,2) EC policy file
     // Create RS(3,2) EC policy file
     DFSTestUtil.createFile(fs, ec32FilePath, fileLen, (short) 1, 0L);
     DFSTestUtil.createFile(fs, ec32FilePath, fileLen, (short) 1, 0L);
 
 
@@ -202,7 +202,7 @@ public class TestUnsetAndChangeDirectoryEcPolicy {
     // Test unset root path which has no EC policy
     // Test unset root path which has no EC policy
     fs.unsetErasureCodingPolicy(rootPath);
     fs.unsetErasureCodingPolicy(rootPath);
     // Set EC policy on root path
     // Set EC policy on root path
-    fs.setErasureCodingPolicy(rootPath, ecPolicy);
+    fs.setErasureCodingPolicy(rootPath, ecPolicy.getName());
     DFSTestUtil.createFile(fs, ecFilePath, fileLen, (short) 1, 0L);
     DFSTestUtil.createFile(fs, ecFilePath, fileLen, (short) 1, 0L);
     fs.unsetErasureCodingPolicy(rootPath);
     fs.unsetErasureCodingPolicy(rootPath);
     DFSTestUtil.createFile(fs, replicateFilePath, fileLen, (short) 1, 0L);
     DFSTestUtil.createFile(fs, replicateFilePath, fileLen, (short) 1, 0L);
@@ -240,11 +240,11 @@ public class TestUnsetAndChangeDirectoryEcPolicy {
         .getPolicyByPolicyID(HdfsConstants.RS_3_2_POLICY_ID);
         .getPolicyByPolicyID(HdfsConstants.RS_3_2_POLICY_ID);
 
 
     fs.unsetErasureCodingPolicy(rootPath);
     fs.unsetErasureCodingPolicy(rootPath);
-    fs.setErasureCodingPolicy(rootPath, ecPolicy);
+    fs.setErasureCodingPolicy(rootPath, ecPolicy.getName());
     // Create RS(6,3) EC policy file
     // Create RS(6,3) EC policy file
     DFSTestUtil.createFile(fs, ec63FilePath, fileLen, (short) 1, 0L);
     DFSTestUtil.createFile(fs, ec63FilePath, fileLen, (short) 1, 0L);
     // Change EC policy from RS(6,3) to RS(3,2)
     // Change EC policy from RS(6,3) to RS(3,2)
-    fs.setErasureCodingPolicy(rootPath, ec32Policy);
+    fs.setErasureCodingPolicy(rootPath, ec32Policy.getName());
     DFSTestUtil.createFile(fs, ec32FilePath, fileLen, (short) 1, 0L);
     DFSTestUtil.createFile(fs, ec32FilePath, fileLen, (short) 1, 0L);
 
 
     // start to check
     // start to check
@@ -281,7 +281,7 @@ public class TestUnsetAndChangeDirectoryEcPolicy {
     final Path replicateFilePath2 = new Path(ecDirPath, "rep_file2");
     final Path replicateFilePath2 = new Path(ecDirPath, "rep_file2");
 
 
     fs.mkdirs(ecDirPath);
     fs.mkdirs(ecDirPath);
-    fs.setErasureCodingPolicy(ecDirPath, ecPolicy);
+    fs.setErasureCodingPolicy(ecDirPath, ecPolicy.getName());
     DFSTestUtil.createFile(fs, ecFilePath, fileLen, (short) 1, 0L);
     DFSTestUtil.createFile(fs, ecFilePath, fileLen, (short) 1, 0L);
     fs.unsetErasureCodingPolicy(ecDirPath);
     fs.unsetErasureCodingPolicy(ecDirPath);
     DFSTestUtil.createFile(fs, replicateFilePath, fileLen, (short) 3, 0L);
     DFSTestUtil.createFile(fs, replicateFilePath, fileLen, (short) 3, 0L);
@@ -328,7 +328,7 @@ public class TestUnsetAndChangeDirectoryEcPolicy {
 
 
     // Set EC policy on non-existent directory
     // Set EC policy on non-existent directory
     try {
     try {
-      fs.setErasureCodingPolicy(dirPath, ecPolicy);
+      fs.setErasureCodingPolicy(dirPath, ecPolicy.getName());
       fail("FileNotFoundException should be thrown for a non-existent"
       fail("FileNotFoundException should be thrown for a non-existent"
           + " file path");
           + " file path");
     } catch (FileNotFoundException e) {
     } catch (FileNotFoundException e) {
@@ -347,7 +347,7 @@ public class TestUnsetAndChangeDirectoryEcPolicy {
 
 
     // Set EC policy on file
     // Set EC policy on file
     try {
     try {
-      fs.setErasureCodingPolicy(ecFilePath, ecPolicy);
+      fs.setErasureCodingPolicy(ecFilePath, ecPolicy.getName());
       fail("IOException should be thrown for setting EC policy on file");
       fail("IOException should be thrown for setting EC policy on file");
     } catch (IOException e) {
     } catch (IOException e) {
       assertExceptionContains("Attempt to set an erasure coding policy " +
       assertExceptionContains("Attempt to set an erasure coding policy " +

+ 2 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteReadStripedFile.java

@@ -80,7 +80,8 @@ public class TestWriteReadStripedFile {
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
     fs = cluster.getFileSystem();
     fs = cluster.getFileSystem();
     fs.mkdirs(new Path("/ec"));
     fs.mkdirs(new Path("/ec"));
-    cluster.getFileSystem().getClient().setErasureCodingPolicy("/ec", null);
+    cluster.getFileSystem().getClient().setErasureCodingPolicy("/ec",
+        ErasureCodingPolicyManager.getSystemDefaultPolicy().getName());
   }
   }
 
 
   @After
   @After

+ 2 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteStripedFileWithFailure.java

@@ -59,7 +59,8 @@ public class TestWriteStripedFileWithFailure {
   public void setup() throws IOException {
   public void setup() throws IOException {
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
-    cluster.getFileSystem().getClient().setErasureCodingPolicy("/", null);
+    cluster.getFileSystem().getClient().setErasureCodingPolicy("/",
+        ErasureCodingPolicyManager.getSystemDefaultPolicy().getName());
     fs = cluster.getFileSystem();
     fs = cluster.getFileSystem();
   }
   }
 
 

+ 2 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java

@@ -1940,7 +1940,8 @@ public class TestBalancer {
       cluster.waitActive();
       cluster.waitActive();
       client = NameNodeProxies.createProxy(conf, cluster.getFileSystem(0).getUri(),
       client = NameNodeProxies.createProxy(conf, cluster.getFileSystem(0).getUri(),
           ClientProtocol.class).getProxy();
           ClientProtocol.class).getProxy();
-      client.setErasureCodingPolicy("/", null);
+      client.setErasureCodingPolicy("/",
+          ErasureCodingPolicyManager.getSystemDefaultPolicy().getName());
 
 
       long totalCapacity = sum(capacities);
       long totalCapacity = sum(capacities);
 
 

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFSStriped.java

@@ -83,8 +83,8 @@ public class TestBlockTokenWithDFSStriped extends TestBlockTokenWithDFS {
         .nameNodeHttpPort(ServerSocketUtil.getPort(19870, 100))
         .nameNodeHttpPort(ServerSocketUtil.getPort(19870, 100))
         .numDataNodes(numDNs)
         .numDataNodes(numDNs)
         .build();
         .build();
-    cluster.getFileSystem().getClient()
-        .setErasureCodingPolicy("/", null);
+    cluster.getFileSystem().getClient().setErasureCodingPolicy("/",
+        ErasureCodingPolicyManager.getSystemDefaultPolicy().getName());
     try {
     try {
       cluster.waitActive();
       cluster.waitActive();
       doTestRead(conf, cluster, true);
       doTestRead(conf, cluster, true);

+ 6 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReconstructStripedBlocksWithRackAwareness.java

@@ -150,7 +150,8 @@ public class TestReconstructStripedBlocksWithRackAwareness {
         .numDataNodes(hosts.length).build();
         .numDataNodes(hosts.length).build();
     cluster.waitActive();
     cluster.waitActive();
     fs = cluster.getFileSystem();
     fs = cluster.getFileSystem();
-    fs.setErasureCodingPolicy(new Path("/"), null);
+    fs.setErasureCodingPolicy(new Path("/"),
+        ErasureCodingPolicyManager.getSystemDefaultPolicy().getName());
     FSNamesystem fsn = cluster.getNamesystem();
     FSNamesystem fsn = cluster.getNamesystem();
     BlockManager bm = fsn.getBlockManager();
     BlockManager bm = fsn.getBlockManager();
 
 
@@ -220,7 +221,8 @@ public class TestReconstructStripedBlocksWithRackAwareness {
         .numDataNodes(hosts.length).build();
         .numDataNodes(hosts.length).build();
     cluster.waitActive();
     cluster.waitActive();
     fs = cluster.getFileSystem();
     fs = cluster.getFileSystem();
-    fs.setErasureCodingPolicy(new Path("/"), null);
+    fs.setErasureCodingPolicy(new Path("/"),
+        ErasureCodingPolicyManager.getSystemDefaultPolicy().getName());
 
 
     MiniDFSCluster.DataNodeProperties lastHost = stopDataNode(
     MiniDFSCluster.DataNodeProperties lastHost = stopDataNode(
         hosts[hosts.length - 1]);
         hosts[hosts.length - 1]);
@@ -273,7 +275,8 @@ public class TestReconstructStripedBlocksWithRackAwareness {
         .numDataNodes(hostNames.length).build();
         .numDataNodes(hostNames.length).build();
     cluster.waitActive();
     cluster.waitActive();
     fs = cluster.getFileSystem();
     fs = cluster.getFileSystem();
-    fs.setErasureCodingPolicy(new Path("/"), null);
+    fs.setErasureCodingPolicy(new Path("/"),
+        ErasureCodingPolicyManager.getSystemDefaultPolicy().getName());
 
 
     final BlockManager bm = cluster.getNamesystem().getBlockManager();
     final BlockManager bm = cluster.getNamesystem().getBlockManager();
     final DatanodeManager dm = bm.getDatanodeManager();
     final DatanodeManager dm = bm.getDatanodeManager();

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSequentialBlockGroupId.java

@@ -88,8 +88,8 @@ public class TestSequentialBlockGroupId {
     blockGrpIdGenerator = cluster.getNamesystem().getBlockManager()
     blockGrpIdGenerator = cluster.getNamesystem().getBlockManager()
         .getBlockIdManager().getBlockGroupIdGenerator();
         .getBlockIdManager().getBlockGroupIdGenerator();
     fs.mkdirs(ecDir);
     fs.mkdirs(ecDir);
-    cluster.getFileSystem().getClient()
-        .setErasureCodingPolicy("/ecDir", null);
+    cluster.getFileSystem().getClient().setErasureCodingPolicy("/ecDir",
+        ErasureCodingPolicyManager.getSystemDefaultPolicy().getName());
   }
   }
 
 
   @After
   @After

+ 2 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeErasureCodingMetrics.java

@@ -75,7 +75,8 @@ public class TestDataNodeErasureCodingMetrics {
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1);
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1);
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
     cluster.waitActive();
     cluster.waitActive();
-    cluster.getFileSystem().getClient().setErasureCodingPolicy("/", null);
+    cluster.getFileSystem().getClient().setErasureCodingPolicy("/",
+        ErasureCodingPolicyManager.getSystemDefaultPolicy().getName());
     fs = cluster.getFileSystem();
     fs = cluster.getFileSystem();
   }
   }
 
 

+ 2 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java

@@ -537,7 +537,8 @@ public class TestMover {
       client.setStoragePolicy(barDir,
       client.setStoragePolicy(barDir,
           HdfsConstants.HOT_STORAGE_POLICY_NAME);
           HdfsConstants.HOT_STORAGE_POLICY_NAME);
       // set an EC policy on "/bar" directory
       // set an EC policy on "/bar" directory
-      client.setErasureCodingPolicy(barDir, null);
+      client.setErasureCodingPolicy(barDir,
+          ErasureCodingPolicyManager.getSystemDefaultPolicy().getName());
 
 
       // write file to barDir
       // write file to barDir
       final String fooFile = "/bar/foo";
       final String fooFile = "/bar/foo";

+ 2 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddOverReplicatedStripedBlocks.java

@@ -81,7 +81,8 @@ public class TestAddOverReplicatedStripedBlocks {
     cluster.waitActive();
     cluster.waitActive();
     fs = cluster.getFileSystem();
     fs = cluster.getFileSystem();
     fs.mkdirs(dirPath);
     fs.mkdirs(dirPath);
-    fs.getClient().setErasureCodingPolicy(dirPath.toString(), null);
+    fs.getClient().setErasureCodingPolicy(dirPath.toString(),
+        ErasureCodingPolicyManager.getSystemDefaultPolicy().getName());
   }
   }
 
 
   @After
   @After

+ 2 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlockInFBR.java

@@ -87,7 +87,8 @@ public class TestAddStripedBlockInFBR {
     final Path repDir = new Path("/rep");
     final Path repDir = new Path("/rep");
     dfs.mkdirs(ecDir);
     dfs.mkdirs(ecDir);
     dfs.mkdirs(repDir);
     dfs.mkdirs(repDir);
-    dfs.getClient().setErasureCodingPolicy(ecDir.toString(), null);
+    dfs.getClient().setErasureCodingPolicy(ecDir.toString(),
+        ErasureCodingPolicyManager.getSystemDefaultPolicy().getName());
 
 
     // create several non-EC files and one EC file
     // create several non-EC files and one EC file
     final Path[] repFiles = new Path[groupSize];
     final Path[] repFiles = new Path[groupSize];

+ 2 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java

@@ -86,7 +86,8 @@ public class TestAddStripedBlocks {
         .numDataNodes(groupSize).build();
         .numDataNodes(groupSize).build();
     cluster.waitActive();
     cluster.waitActive();
     dfs = cluster.getFileSystem();
     dfs = cluster.getFileSystem();
-    dfs.getClient().setErasureCodingPolicy("/", null);
+    dfs.getClient().setErasureCodingPolicy("/", ErasureCodingPolicyManager
+        .getSystemDefaultPolicy().getName());
   }
   }
 
 
   @After
   @After

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java

@@ -479,7 +479,7 @@ public class TestFSEditLogLoader {
       //set the storage policy of the directory
       //set the storage policy of the directory
       fs.mkdir(new Path(testDir), new FsPermission("755"));
       fs.mkdir(new Path(testDir), new FsPermission("755"));
       fs.getClient().getNamenode().setErasureCodingPolicy(
       fs.getClient().getNamenode().setErasureCodingPolicy(
-          testDir, testECPolicy);
+          testDir, testECPolicy.getName());
 
 
       // Create a file with striped block
       // Create a file with striped block
       Path p = new Path(testFilePath);
       Path p = new Path(testFilePath);
@@ -552,7 +552,7 @@ public class TestFSEditLogLoader {
       //set the storage policy of the directory
       //set the storage policy of the directory
       fs.mkdir(new Path(testDir), new FsPermission("755"));
       fs.mkdir(new Path(testDir), new FsPermission("755"));
       fs.getClient().getNamenode().setErasureCodingPolicy(
       fs.getClient().getNamenode().setErasureCodingPolicy(
-          testDir, testECPolicy);
+          testDir, testECPolicy.getName());
 
 
       //create a file with striped blocks
       //create a file with striped blocks
       Path p = new Path(testFilePath);
       Path p = new Path(testFilePath);

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java

@@ -156,7 +156,7 @@ public class TestFSImage {
   private void testSaveAndLoadStripedINodeFile(FSNamesystem fsn, Configuration conf,
   private void testSaveAndLoadStripedINodeFile(FSNamesystem fsn, Configuration conf,
                                                boolean isUC) throws IOException{
                                                boolean isUC) throws IOException{
     // Construct an INode with StripedBlock for saving and loading
     // Construct an INode with StripedBlock for saving and loading
-    fsn.setErasureCodingPolicy("/", testECPolicy, false);
+    fsn.setErasureCodingPolicy("/", testECPolicy.getName(), false);
     long id = 123456789;
     long id = 123456789;
     byte[] name = "testSaveAndLoadInodeFile_testfile".getBytes();
     byte[] name = "testSaveAndLoadInodeFile_testfile".getBytes();
     PermissionStatus permissionStatus = new PermissionStatus("testuser_a",
     PermissionStatus permissionStatus = new PermissionStatus("testuser_a",
@@ -472,8 +472,8 @@ public class TestFSImage {
       // Create directories and files
       // Create directories and files
       fs.mkdirs(parentDir);
       fs.mkdirs(parentDir);
       fs.mkdirs(childDir);
       fs.mkdirs(childDir);
-      fs.setErasureCodingPolicy(parentDir, testECPolicy);
-      fs.setErasureCodingPolicy(childDir, ec32Policy);
+      fs.setErasureCodingPolicy(parentDir, testECPolicy.getName());
+      fs.setErasureCodingPolicy(childDir, ec32Policy.getName());
       Path file_10_4 = new Path(parentDir, "striped_file_10_4");
       Path file_10_4 = new Path(parentDir, "striped_file_10_4");
       Path file_3_2 = new Path(childDir, "striped_file_3_2");
       Path file_3_2 = new Path(childDir, "striped_file_3_2");
 
 

+ 5 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java

@@ -690,7 +690,7 @@ public class TestFsck {
     util.createFiles(fs, topDir);
     util.createFiles(fs, topDir);
     // set topDir to EC when it has replicated files
     // set topDir to EC when it has replicated files
     cluster.getFileSystem().getClient().setErasureCodingPolicy(
     cluster.getFileSystem().getClient().setErasureCodingPolicy(
-        topDir, ecPolicy);
+        topDir, ecPolicy.getName());
 
 
     // create a new file under topDir
     // create a new file under topDir
     DFSTestUtil.createFile(fs, new Path(topDir, "ecFile"), 1024, (short) 1, 0L);
     DFSTestUtil.createFile(fs, new Path(topDir, "ecFile"), 1024, (short) 1, 0L);
@@ -2307,7 +2307,8 @@ public class TestFsck {
     // create file
     // create file
     Path ecDirPath = new Path("/striped");
     Path ecDirPath = new Path("/striped");
     fs.mkdir(ecDirPath, FsPermission.getDirDefault());
     fs.mkdir(ecDirPath, FsPermission.getDirDefault());
-    fs.getClient().setErasureCodingPolicy(ecDirPath.toString(), null);
+    fs.getClient().setErasureCodingPolicy(ecDirPath.toString(),
+        ErasureCodingPolicyManager.getSystemDefaultPolicy().getName());
     Path file = new Path(ecDirPath, "corrupted");
     Path file = new Path(ecDirPath, "corrupted");
     final int length = cellSize * dataBlocks;
     final int length = cellSize * dataBlocks;
     final byte[] bytes = StripedFileTestUtil.generateBytes(length);
     final byte[] bytes = StripedFileTestUtil.generateBytes(length);
@@ -2372,7 +2373,8 @@ public class TestFsck {
     // create file
     // create file
     Path ecDirPath = new Path("/striped");
     Path ecDirPath = new Path("/striped");
     fs.mkdir(ecDirPath, FsPermission.getDirDefault());
     fs.mkdir(ecDirPath, FsPermission.getDirDefault());
-    fs.getClient().setErasureCodingPolicy(ecDirPath.toString(), null);
+    fs.getClient().setErasureCodingPolicy(ecDirPath.toString(),
+        ErasureCodingPolicyManager.getSystemDefaultPolicy().getName());
     Path file = new Path(ecDirPath, "missing");
     Path file = new Path(ecDirPath, "missing");
     final int length = cellSize * dataBlocks;
     final int length = cellSize * dataBlocks;
     final byte[] bytes = StripedFileTestUtil.generateBytes(length);
     final byte[] bytes = StripedFileTestUtil.generateBytes(length);

+ 2 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java

@@ -738,7 +738,8 @@ public class TestNameNodeMXBean {
       // create file
       // create file
       Path ecDirPath = new Path("/striped");
       Path ecDirPath = new Path("/striped");
       fs.mkdir(ecDirPath, FsPermission.getDirDefault());
       fs.mkdir(ecDirPath, FsPermission.getDirDefault());
-      fs.getClient().setErasureCodingPolicy(ecDirPath.toString(), null);
+      fs.getClient().setErasureCodingPolicy(ecDirPath.toString(),
+          ErasureCodingPolicyManager.getSystemDefaultPolicy().getName());
       Path file = new Path(ecDirPath, "corrupted");
       Path file = new Path(ecDirPath, "corrupted");
       final int length = cellSize * dataBlocks;
       final int length = cellSize * dataBlocks;
       final byte[] bytes = StripedFileTestUtil.generateBytes(length);
       final byte[] bytes = StripedFileTestUtil.generateBytes(length);

+ 2 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaWithStripedBlocks.java

@@ -71,7 +71,8 @@ public class TestQuotaWithStripedBlocks {
     dfs = cluster.getFileSystem();
     dfs = cluster.getFileSystem();
 
 
     dfs.mkdirs(ecDir);
     dfs.mkdirs(ecDir);
-    dfs.getClient().setErasureCodingPolicy(ecDir.toString(), ecPolicy);
+    dfs.getClient()
+        .setErasureCodingPolicy(ecDir.toString(), ecPolicy.getName());
     dfs.setQuota(ecDir, Long.MAX_VALUE - 1, DISK_QUOTA);
     dfs.setQuota(ecDir, Long.MAX_VALUE - 1, DISK_QUOTA);
     dfs.setQuotaByStorageType(ecDir, StorageType.DISK, DISK_QUOTA);
     dfs.setQuotaByStorageType(ecDir, StorageType.DISK, DISK_QUOTA);
     dfs.setStoragePolicy(ecDir, HdfsConstants.HOT_STORAGE_POLICY_NAME);
     dfs.setStoragePolicy(ecDir, HdfsConstants.HOT_STORAGE_POLICY_NAME);

+ 4 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReconstructStripedBlocks.java

@@ -201,7 +201,8 @@ public class TestReconstructStripedBlocks {
       cluster.waitActive();
       cluster.waitActive();
       DistributedFileSystem fs = cluster.getFileSystem();
       DistributedFileSystem fs = cluster.getFileSystem();
       BlockManager bm = cluster.getNamesystem().getBlockManager();
       BlockManager bm = cluster.getNamesystem().getBlockManager();
-      fs.getClient().setErasureCodingPolicy("/", null);
+      fs.getClient().setErasureCodingPolicy("/",
+          ErasureCodingPolicyManager.getSystemDefaultPolicy().getName());
       int fileLen = dataBlocks * blockSize;
       int fileLen = dataBlocks * blockSize;
       Path p = new Path("/test2RecoveryTasksForSameBlockGroup");
       Path p = new Path("/test2RecoveryTasksForSameBlockGroup");
       final byte[] data = new byte[fileLen];
       final byte[] data = new byte[fileLen];
@@ -266,7 +267,8 @@ public class TestReconstructStripedBlocks {
 
 
     try {
     try {
       fs.mkdirs(dirPath);
       fs.mkdirs(dirPath);
-      fs.setErasureCodingPolicy(dirPath, null);
+      fs.setErasureCodingPolicy(dirPath,
+          ErasureCodingPolicyManager.getSystemDefaultPolicy().getName());
       DFSTestUtil.createFile(fs, filePath,
       DFSTestUtil.createFile(fs, filePath,
           cellSize * dataBlocks * 2, (short) 1, 0L);
           cellSize * dataBlocks * 2, (short) 1, 0L);
 
 

+ 4 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java

@@ -307,7 +307,8 @@ public class TestStripedINodeFile {
       dfs.mkdirs(ecDir);
       dfs.mkdirs(ecDir);
 
 
       // set erasure coding policy
       // set erasure coding policy
-      dfs.setErasureCodingPolicy(ecDir, null);
+      dfs.setErasureCodingPolicy(ecDir,
+          ErasureCodingPolicyManager.getSystemDefaultPolicy().getName());
       DFSTestUtil.createFile(dfs, ecFile, len, (short) 1, 0xFEED);
       DFSTestUtil.createFile(dfs, ecFile, len, (short) 1, 0xFEED);
       DFSTestUtil.createFile(dfs, contiguousFile, len, (short) 1, 0xFEED);
       DFSTestUtil.createFile(dfs, contiguousFile, len, (short) 1, 0xFEED);
       final FSDirectory fsd = fsn.getFSDirectory();
       final FSDirectory fsd = fsn.getFSDirectory();
@@ -408,7 +409,8 @@ public class TestStripedINodeFile {
       client.mkdirs(fooDir, new FsPermission((short) 777), true);
       client.mkdirs(fooDir, new FsPermission((short) 777), true);
       client.setStoragePolicy(fooDir, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);
       client.setStoragePolicy(fooDir, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);
       // set an EC policy on "/foo" directory
       // set an EC policy on "/foo" directory
-      client.setErasureCodingPolicy(fooDir, null);
+      client.setErasureCodingPolicy(fooDir,
+          ErasureCodingPolicyManager.getSystemDefaultPolicy().getName());
 
 
       // write file to fooDir
       // write file to fooDir
       final String barFile = "/foo/bar";
       final String barFile = "/foo/bar";

+ 2 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java

@@ -236,7 +236,8 @@ public class TestOfflineImageViewer {
       ErasureCodingPolicy ecPolicy =
       ErasureCodingPolicy ecPolicy =
           ErasureCodingPolicyManager.getPolicyByPolicyID(
           ErasureCodingPolicyManager.getPolicyByPolicyID(
               HdfsConstants.XOR_2_1_POLICY_ID);
               HdfsConstants.XOR_2_1_POLICY_ID);
-      hdfs.getClient().setErasureCodingPolicy(ecDir.toString(), ecPolicy);
+      hdfs.getClient().setErasureCodingPolicy(ecDir.toString(),
+          ecPolicy.getName());
       writtenFiles.put(ecDir.toString(), hdfs.getFileStatus(ecDir));
       writtenFiles.put(ecDir.toString(), hdfs.getFileStatus(ecDir));
 
 
       // Create an empty Erasure Coded file
       // Create an empty Erasure Coded file

+ 2 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerWithStripedBlocks.java

@@ -63,7 +63,8 @@ public class TestOfflineImageViewerWithStripedBlocks {
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
     cluster.waitActive();
     cluster.waitActive();
-    cluster.getFileSystem().getClient().setErasureCodingPolicy("/", null);
+    cluster.getFileSystem().getClient().setErasureCodingPolicy("/",
+        ErasureCodingPolicyManager.getSystemDefaultPolicy().getName());
     fs = cluster.getFileSystem();
     fs = cluster.getFileSystem();
     Path eczone = new Path("/eczone");
     Path eczone = new Path("/eczone");
     fs.mkdirs(eczone);
     fs.mkdirs(eczone);

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml

@@ -359,7 +359,7 @@
       <comparators>
       <comparators>
         <comparator>
         <comparator>
           <type>SubstringComparator</type>
           <type>SubstringComparator</type>
-          <expected-output>Policy 'invalidpolicy' does not match any of the supported policies. Please select any one of [</expected-output>
+          <expected-output>Policy 'invalidpolicy' does not match any supported erasure coding policies.</expected-output>
         </comparator>
         </comparator>
       </comparators>
       </comparators>
     </test>
     </test>

+ 1 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/TestDFSIO.java

@@ -989,7 +989,7 @@ public class TestDFSIO implements Tool {
         ((DistributedFileSystem) fs).getAllErasureCodingPolicies();
         ((DistributedFileSystem) fs).getAllErasureCodingPolicies();
     for (ErasureCodingPolicy ec : list) {
     for (ErasureCodingPolicy ec : list) {
       if (erasureCodePolicyName.equals(ec.getName())) {
       if (erasureCodePolicyName.equals(ec.getName())) {
-        ((DistributedFileSystem) fs).setErasureCodingPolicy(path, ec);
+        ((DistributedFileSystem) fs).setErasureCodingPolicy(path, ec.getName());
         LOG.info("enable erasureCodePolicy = " + erasureCodePolicyName  +
         LOG.info("enable erasureCodePolicy = " + erasureCodePolicyName  +
             " on " + path.toString());
             " on " + path.toString());
         break;
         break;