Forráskód Böngészése

HDFS-12547. Extend TestQuotaWithStripedBlocks with a random EC policy. Contributed by Takanobu Asanuma.

(cherry picked from commit a297fb08866305860dc17813c3db5701e9515101)
Andrew Wang 7 éve
szülő
commit
a26da9dcf2

+ 26 - 14
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaWithStripedBlocks.java

@@ -44,27 +44,39 @@ import java.io.IOException;
  * Make sure we correctly update the quota usage with the striped blocks.
  */
 public class TestQuotaWithStripedBlocks {
-  private static final int BLOCK_SIZE = 1024 * 1024;
-  private static final long DISK_QUOTA = BLOCK_SIZE * 10;
-  private final ErasureCodingPolicy ecPolicy =
-      StripedFileTestUtil.getDefaultECPolicy();
-  private final int dataBlocks = ecPolicy.getNumDataUnits();
-  private final int parityBlocsk = ecPolicy.getNumParityUnits();
-  private final int groupSize = dataBlocks + parityBlocsk;
-  private final int cellSize = ecPolicy.getCellSize();
-  private static final Path ecDir = new Path("/ec");
+  private int blockSize;
+  private ErasureCodingPolicy ecPolicy;
+  private int dataBlocks;
+  private int parityBlocsk;
+  private int groupSize;
+  private int cellSize;
+  private Path ecDir;
+  private long diskQuota;
 
   private MiniDFSCluster cluster;
   private FSDirectory dir;
   private DistributedFileSystem dfs;
 
+  public ErasureCodingPolicy getEcPolicy() {
+    return StripedFileTestUtil.getDefaultECPolicy();
+  }
+
   @Rule
   public Timeout globalTimeout = new Timeout(300000);
 
   @Before
   public void setUp() throws IOException {
+    blockSize = 1024 * 1024;
+    ecPolicy = getEcPolicy();
+    dataBlocks = ecPolicy.getNumDataUnits();
+    parityBlocsk = ecPolicy.getNumParityUnits();
+    groupSize = dataBlocks + parityBlocsk;
+    cellSize = ecPolicy.getCellSize();
+    ecDir = new Path("/ec");
+    diskQuota = blockSize * (groupSize + 1);
+
     final Configuration conf = new Configuration();
-    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
+    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(groupSize).build();
     cluster.waitActive();
 
@@ -75,8 +87,8 @@ public class TestQuotaWithStripedBlocks {
     dfs.mkdirs(ecDir);
     dfs.getClient()
         .setErasureCodingPolicy(ecDir.toString(), ecPolicy.getName());
-    dfs.setQuota(ecDir, Long.MAX_VALUE - 1, DISK_QUOTA);
-    dfs.setQuotaByStorageType(ecDir, StorageType.DISK, DISK_QUOTA);
+    dfs.setQuota(ecDir, Long.MAX_VALUE - 1, diskQuota);
+    dfs.setQuotaByStorageType(ecDir, StorageType.DISK, diskQuota);
     dfs.setStoragePolicy(ecDir, HdfsConstants.HOT_STORAGE_POLICY_NAME);
   }
 
@@ -112,8 +124,8 @@ public class TestQuotaWithStripedBlocks {
       final long diskUsed = dirNode.getDirectoryWithQuotaFeature()
           .getSpaceConsumed().getTypeSpaces().get(StorageType.DISK);
       // When we add a new block we update the quota using the full block size.
-      Assert.assertEquals(BLOCK_SIZE * groupSize, spaceUsed);
-      Assert.assertEquals(BLOCK_SIZE * groupSize, diskUsed);
+      Assert.assertEquals(blockSize * groupSize, spaceUsed);
+      Assert.assertEquals(blockSize * groupSize, diskUsed);
 
       dfs.getClient().getNamenode().complete(file.toString(),
           dfs.getClient().getClientName(), previous, fileNode.getId());

+ 50 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaWithStripedBlocksWithRandomECPolicy.java

@@ -0,0 +1,50 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import org.apache.hadoop.hdfs.StripedFileTestUtil;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * This test extends TestQuotaWithStripedBlocks to use a random
+ * (non-default) EC policy.
+ */
+public class TestQuotaWithStripedBlocksWithRandomECPolicy extends
+    TestQuotaWithStripedBlocks {
+  private static final Logger LOG = LoggerFactory.getLogger(
+      TestQuotaWithStripedBlocksWithRandomECPolicy.class);
+
+  private ErasureCodingPolicy ecPolicy;
+
+  public TestQuotaWithStripedBlocksWithRandomECPolicy() {
+    // If you want to debug this test with a specific ec policy, please use
+    // SystemErasureCodingPolicies class.
+    // e.g. ecPolicy = SystemErasureCodingPolicies.getByID(RS_3_2_POLICY_ID);
+    ecPolicy = StripedFileTestUtil.getRandomNonDefaultECPolicy();
+    LOG.info("run {} with {}.",
+        TestQuotaWithStripedBlocksWithRandomECPolicy.class
+            .getSuperclass().getSimpleName(), ecPolicy.getName());
+  }
+
+  @Override
+  public ErasureCodingPolicy getEcPolicy() {
+    return ecPolicy;
+  }
+}