소스 검색

HDFS-11794. Add ec sub command -listCodec to show currently supported ec codecs. Contributed by SammiChen.

Rakesh Radhakrishnan 8 년 전
부모
커밋
1b5451bf05
16개의 변경된 파일264개의 추가작업 그리고 4개의 파일을 삭제
  1. 17 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecRegistry.java
  2. 7 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
  3. 13 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
  4. 11 2
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
  5. 23 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
  6. 8 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
  7. 2 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
  8. 15 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/erasurecoding.proto
  9. 23 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
  10. 14 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
  11. 14 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
  12. 7 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
  13. 58 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
  14. 6 1
      hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
  15. 17 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java
  16. 29 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml

+ 17 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecRegistry.java

@@ -55,9 +55,14 @@ public final class CodecRegistry {
 
   private Map<String, String[]> coderNameMap;
 
+  // Protobuffer 2.5.0 doesn't support map<String, String[]> type well, so use
+  // the compact value instead
+  private HashMap<String, String> coderNameCompactMap;
+
   private CodecRegistry() {
     coderMap = new HashMap<>();
     coderNameMap = new HashMap<>();
+    coderNameCompactMap = new HashMap<>();
     final ServiceLoader<RawErasureCoderFactory> coderFactories =
         ServiceLoader.load(RawErasureCoderFactory.class);
     updateCoders(coderFactories);
@@ -113,6 +118,9 @@ public final class CodecRegistry {
       coderNameMap.put(codecName, coders.stream().
           map(RawErasureCoderFactory::getCoderName).
           collect(Collectors.toList()).toArray(new String[0]));
+      coderNameCompactMap.put(codecName, coders.stream().
+          map(RawErasureCoderFactory::getCoderName)
+          .collect(Collectors.joining(", ")));
     }
   }
 
@@ -173,4 +181,13 @@ public final class CodecRegistry {
     throw new IllegalArgumentException("No implementation for coder "
         + coderName + " of codec " + codecName);
   }
+
+  /**
+   * Get all codec names and their corresponding coder list.
+   * @return a map of all codec names, and their corresponding code list
+   * separated by ','.
+   */
+  public HashMap<String, String> getCodec2CoderCompactMap() {
+    return coderNameCompactMap;
+  }
 }

+ 7 - 0
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java

@@ -2763,6 +2763,13 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
     }
   }
 
+  public HashMap<String, String> getErasureCodingCodecs() throws IOException {
+    checkOpen();
+    try (TraceScope ignored = tracer.newScope("getErasureCodingCodecs")) {
+      return namenode.getErasureCodingCodecs();
+    }
+  }
+
   public AddingECPolicyResponse[] addErasureCodingPolicies(
       ErasureCodingPolicy[] policies) throws IOException {
     checkOpen();

+ 13 - 0
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java

@@ -26,6 +26,7 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.EnumSet;
+import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
@@ -2535,6 +2536,18 @@ public class DistributedFileSystem extends FileSystem {
     return Arrays.asList(dfs.getErasureCodingPolicies());
   }
 
+  /**
+   * Retrieve all the erasure coding codecs and coders supported by this file
+   * system.
+   *
+   * @return all erasure coding codecs and coders supported by this file system.
+   * @throws IOException
+   */
+  public HashMap<String, String> getAllErasureCodingCodecs()
+      throws IOException {
+    return dfs.getErasureCodingCodecs();
+  }
+
   /**
    * Add Erasure coding policies to HDFS.
    *

+ 11 - 2
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java

@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.protocol;
 
 import java.io.IOException;
 import java.util.EnumSet;
+import java.util.HashMap;
 import java.util.List;
 
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -1536,7 +1537,7 @@ public interface ClientProtocol {
       ErasureCodingPolicy[] policies) throws IOException;
 
   /**
-   * Get the erasure coding policies loaded in Namenode
+   * Get the erasure coding policies loaded in Namenode.
    *
    * @throws IOException
    */
@@ -1544,7 +1545,15 @@ public interface ClientProtocol {
   ErasureCodingPolicy[] getErasureCodingPolicies() throws IOException;
 
   /**
-   * Get the information about the EC policy for the path
+   * Get the erasure coding codecs loaded in Namenode.
+   *
+   * @throws IOException
+   */
+  @Idempotent
+  HashMap<String, String> getErasureCodingCodecs() throws IOException;
+
+  /**
+   * Get the information about the EC policy for the path.
    *
    * @param src path to get the info for
    * @throws IOException

+ 23 - 0
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java

@@ -21,6 +21,7 @@ import java.io.Closeable;
 import java.io.IOException;
 import java.util.Arrays;
 import java.util.EnumSet;
+import java.util.HashMap;
 import java.util.List;
 
 import com.google.common.collect.Lists;
@@ -176,8 +177,11 @@ import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodin
 import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPoliciesResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPolicyRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPolicyResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingCodecsRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingCodecsResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.SetErasureCodingPolicyRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.UnsetErasureCodingPolicyRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.CodecProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto;
 import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.ListXAttrsRequestProto;
@@ -246,6 +250,10 @@ public class ClientNamenodeProtocolTranslatorPB implements
       VOID_GET_EC_POLICIES_REQUEST = GetErasureCodingPoliciesRequestProto
       .newBuilder().build();
 
+  private final static GetErasureCodingCodecsRequestProto
+      VOID_GET_EC_CODEC_REQUEST = GetErasureCodingCodecsRequestProto
+      .newBuilder().build();
+
   public ClientNamenodeProtocolTranslatorPB(ClientNamenodeProtocolPB proxy) {
     rpcProxy = proxy;
   }
@@ -1668,6 +1676,21 @@ public class ClientNamenodeProtocolTranslatorPB implements
     }
   }
 
+  @Override
+  public HashMap<String, String> getErasureCodingCodecs() throws IOException {
+    try {
+      GetErasureCodingCodecsResponseProto response = rpcProxy
+          .getErasureCodingCodecs(null, VOID_GET_EC_CODEC_REQUEST);
+      HashMap<String, String> ecCodecs = new HashMap<String, String>();
+      for (CodecProto codec : response.getCodecList()) {
+        ecCodecs.put(codec.getCodec(), codec.getCoders());
+      }
+      return ecCodecs;
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+  }
+
   @Override
   public ErasureCodingPolicy getErasureCodingPolicy(String src)
       throws IOException {

+ 8 - 0
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java

@@ -172,6 +172,7 @@ import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage.State;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
 import org.apache.hadoop.hdfs.server.protocol.StorageReport;
+import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.CodecProto;
 import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.ShmId;
 import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.SlotId;
 import org.apache.hadoop.io.EnumSetWritable;
@@ -2701,6 +2702,13 @@ public class PBHelperClient {
     return builder.build();
   }
 
+  public static CodecProto convertErasureCodingCodec(String codec,
+      String coders) {
+    CodecProto.Builder builder = CodecProto.newBuilder()
+        .setCodec(codec).setCoders(coders);
+    return builder.build();
+  }
+
   public static AddingECPolicyResponseProto convertAddingECPolicyResponse(
       AddingECPolicyResponse response) {
     AddingECPolicyResponseProto.Builder builder =

+ 2 - 0
hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto

@@ -912,6 +912,8 @@ service ClientNamenodeProtocol {
       returns(AddErasureCodingPoliciesResponseProto);
   rpc getErasureCodingPolicy(GetErasureCodingPolicyRequestProto)
       returns(GetErasureCodingPolicyResponseProto);
+  rpc getErasureCodingCodecs(GetErasureCodingCodecsRequestProto)
+      returns(GetErasureCodingCodecsResponseProto);
   rpc getQuotaUsage(GetQuotaUsageRequestProto)
       returns(GetQuotaUsageResponseProto);
 }

+ 15 - 0
hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/erasurecoding.proto

@@ -38,6 +38,13 @@ message GetErasureCodingPoliciesResponseProto {
   repeated ErasureCodingPolicyProto ecPolicies = 1;
 }
 
+message GetErasureCodingCodecsRequestProto { // void request
+}
+
+message GetErasureCodingCodecsResponseProto {
+  repeated CodecProto codec = 1;
+}
+
 message GetErasureCodingPolicyRequestProto {
   required string src = 1; // path to get the policy info
 }
@@ -73,3 +80,11 @@ message BlockECReconstructionInfoProto {
   required bytes liveBlockIndices = 6;
   required ErasureCodingPolicyProto ecPolicy = 7;
 }
+
+/**
+ * Codec and it's corresponding coders
+ */
+message CodecProto {
+  required string codec  = 1;
+  required string coders = 2;
+}

+ 23 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java

@@ -21,7 +21,9 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.EnumSet;
+import java.util.HashMap;
 import java.util.List;
+import java.util.Map;
 import java.util.stream.Collectors;
 
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -223,6 +225,8 @@ import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.SetErasureCodin
 import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.SetErasureCodingPolicyResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.UnsetErasureCodingPolicyRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.UnsetErasureCodingPolicyResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingCodecsRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingCodecsResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto;
@@ -1618,6 +1622,25 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
     }
   }
 
+  @Override
+  public GetErasureCodingCodecsResponseProto getErasureCodingCodecs(
+      RpcController controller, GetErasureCodingCodecsRequestProto request)
+      throws ServiceException {
+    try {
+      HashMap<String, String> codecs = server.getErasureCodingCodecs();
+      GetErasureCodingCodecsResponseProto.Builder resBuilder =
+          GetErasureCodingCodecsResponseProto.newBuilder();
+      for (Map.Entry<String, String> codec : codecs.entrySet()) {
+        resBuilder.addCodec(
+            PBHelperClient.convertErasureCodingCodec(
+                codec.getKey(), codec.getValue()));
+      }
+      return resBuilder.build();
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+  }
+
   @Override
   public AddErasureCodingPoliciesResponseProto addErasureCodingPolicies(
       RpcController controller, AddErasureCodingPoliciesRequestProto request)

+ 14 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java

@@ -25,6 +25,7 @@ import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.Arrays;
 import java.util.EnumSet;
+import java.util.HashMap;
 import java.util.List;
 import java.util.stream.Collectors;
 
@@ -43,6 +44,7 @@ import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.WritableUtils;
+import org.apache.hadoop.io.erasurecode.CodecRegistry;
 import org.apache.hadoop.security.AccessControlException;
 
 import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.XATTR_ERASURECODING_POLICY;
@@ -311,6 +313,18 @@ final class FSDirErasureCodingOp {
     return fsn.getErasureCodingPolicyManager().getEnabledPolicies();
   }
 
+  /**
+   * Get available erasure coding codecs and coders.
+   *
+   * @param fsn namespace
+   * @return {@link java.util.HashMap} array
+   */
+  static HashMap<String, String> getErasureCodingCodecs(final FSNamesystem fsn)
+      throws IOException {
+    assert fsn.hasReadLock();
+    return CodecRegistry.getInstance().getCodec2CoderCompactMap();
+  }
+
   private static ErasureCodingPolicy getErasureCodingPolicyForPath(
       FSDirectory fsd, INodesInPath iip) throws IOException {
     Preconditions.checkNotNull(iip, "INodes cannot be null");

+ 14 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

@@ -6930,6 +6930,20 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     }
   }
 
+  /**
+   * Get available erasure coding codecs and corresponding coders.
+   */
+  HashMap<String, String> getErasureCodingCodecs() throws IOException {
+    checkOperation(OperationCategory.READ);
+    readLock();
+    try {
+      checkOperation(OperationCategory.READ);
+      return FSDirErasureCodingOp.getErasureCodingCodecs(this);
+    } finally {
+      readUnlock("getErasureCodingCodecs");
+    }
+  }
+
   void setXAttr(String src, XAttr xAttr, EnumSet<XAttrSetFlag> flag,
                 boolean logRetryCache)
       throws IOException {

+ 7 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java

@@ -37,6 +37,7 @@ import java.net.InetSocketAddress;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.EnumSet;
+import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Set;
@@ -2240,6 +2241,12 @@ public class NameNodeRpcServer implements NamenodeProtocols {
     return namesystem.getErasureCodingPolicies();
   }
 
+  @Override // ClientProtocol
+  public HashMap<String, String> getErasureCodingCodecs() throws IOException {
+    checkNNStartup();
+    return namesystem.getErasureCodingCodecs();
+  }
+
   @Override // ClientProtocol
   public ErasureCodingPolicy getErasureCodingPolicy(String src) throws IOException {
     checkNNStartup();

+ 58 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java

@@ -33,8 +33,10 @@ import org.apache.hadoop.util.ToolRunner;
 import java.io.IOException;
 import java.util.Arrays;
 import java.util.Collection;
+import java.util.HashMap;
 import java.util.LinkedList;
 import java.util.List;
+import java.util.Map;
 
 /**
  * CLI for the erasure code encoding operations.
@@ -361,11 +363,66 @@ public class ECAdmin extends Configured implements Tool {
     }
   }
 
+  /** Command to list the set of supported erasure coding codecs and coders. */
+  private static class ListECCodecsCommand
+      implements AdminHelper.Command {
+    @Override
+    public String getName() {
+      return "-listCodecs";
+    }
+
+    @Override
+    public String getShortUsage() {
+      return "[" + getName() + "]\n";
+    }
+
+    @Override
+    public String getLongUsage() {
+      return getShortUsage() + "\n" +
+          "Get the list of supported erasure coding codecs and coders.\n" +
+          "A coder is an implementation of a codec. A codec can have " +
+          "different implementations, thus different coders.\n" +
+          "The coders for a codec are listed in a fall back order.\n";
+    }
+
+    @Override
+    public int run(Configuration conf, List<String> args) throws IOException {
+      if (args.size() > 0) {
+        System.err.println(getName() + ": Too many arguments");
+        return 1;
+      }
+
+      final DistributedFileSystem dfs = AdminHelper.getDFS(conf);
+      try {
+        HashMap<String, String> codecs =
+            dfs.getAllErasureCodingCodecs();
+        if (codecs.isEmpty()) {
+          System.out.println("No erasure coding codecs are supported on the " +
+              "cluster.");
+        } else {
+          System.out.println("Erasure Coding Codecs: Codec [Coder List]");
+          for (Map.Entry<String, String> codec : codecs.entrySet()) {
+            if (codec != null) {
+              System.out.println("\t" + codec.getKey().toUpperCase() + " ["
+                  + codec.getValue().toUpperCase() +"]");
+            }
+          }
+        }
+      } catch (IOException e) {
+        System.err.println(AdminHelper.prettifyException(e));
+        return 2;
+      }
+      return 0;
+    }
+  }
+
+
   private static final AdminHelper.Command[] COMMANDS = {
       new ListECPoliciesCommand(),
       new AddECPoliciesCommand(),
       new GetECPolicyCommand(),
       new SetECPolicyCommand(),
-      new UnsetECPolicyCommand()
+      new UnsetECPolicyCommand(),
+      new ListECCodecsCommand()
   };
 }

+ 6 - 1
hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md

@@ -154,6 +154,7 @@ Deployment
          [-getPolicy -path <path>]
          [-unsetPolicy -path <path>]
          [-listPolicies]
+         [-listCodecs]
          [-usage [cmd ...]]
          [-help [cmd ...]]
 
@@ -181,4 +182,8 @@ Below are the details about each command.
 
  *  `[-addPolicies -policyFile <file>]`
 
-     Add a list of erasure coding policies. Please refer etc/hadoop/user_ec_policies.xml.template for the example policy file.
+     Add a list of erasure coding policies. Please refer etc/hadoop/user_ec_policies.xml.template for the example policy file.
+
+ *  `[-listCodecs]`
+
+     Get the list of supported erasure coding codecs and coders in system. A coder is an implementation of a codec. A codec can have different implementations, thus different coders. The coders for a codec are listed in a fall back order.

+ 17 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingPolicies.java

@@ -48,6 +48,7 @@ import java.io.IOException;
 import java.security.PrivilegedExceptionAction;
 import java.util.Collection;
 import java.util.EnumSet;
+import java.util.HashMap;
 import java.util.List;
 
 import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains;
@@ -622,4 +623,20 @@ public class TestErasureCodingPolicies {
     assertNull(fs.getErasureCodingPolicy(filePath));
     fs.delete(dirPath, true);
   }
+
+  @Test
+  public void testGetAllErasureCodingCodecs() throws Exception {
+    HashMap<String, String> allECCodecs = fs
+        .getAllErasureCodingCodecs();
+    assertTrue("At least 3 system codecs should be enabled",
+        allECCodecs.size() >= 3);
+    System.out.println("Erasure Coding Codecs: Codec [Coder List]");
+    for (String codec : allECCodecs.keySet()) {
+      String coders = allECCodecs.get(codec);
+      if (codec != null && coders != null) {
+        System.out.println("\t" + codec.toUpperCase() + "["
+            + coders.toUpperCase() + "]");
+      }
+    }
+  }
 }

+ 29 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml

@@ -541,5 +541,34 @@
       </comparators>
     </test>
 
+    <test>
+      <description>listCodecs : illegal parameters - too many parameters</description>
+      <test-commands>
+        <ec-admin-command>-fs NAMENODE -listCodecs /ecdir</ec-admin-command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>-listCodecs: Too many arguments</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test>
+      <description>listCodecs : successful list codecs</description>
+      <test-commands>
+        <ec-admin-command>-fs NAMENODE -listCodecs</ec-admin-command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>Erasure Coding Codecs: Codec [Coder List]</expected-output>
+        </comparator>
+      </comparators>
+    </test>
   </tests>
 </configuration>