浏览代码

HDDS-300. Create a config for volume choosing policy. Contributed by Bharat Viswanadham.

Nanda kumar 6 年之前
父节点
当前提交
db465afb5c

+ 4 - 0
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java

@@ -54,4 +54,8 @@ public final class HddsConfigKeys {
   public static final int HDDS_CONTAINER_ACTION_MAX_LIMIT_DEFAULT =
       20;
 
+  // Configuration to allow volume choosing policy.
+  public static final String HDDS_DATANODE_VOLUME_CHOOSING_POLICY =
+      "hdds.datanode.volume.choosing.policy";
+
 }

+ 11 - 0
hadoop-hdds/common/src/main/resources/ozone-default.xml

@@ -74,6 +74,17 @@
       tagged explicitly.
     </description>
   </property>
+  <property>
+    <name>hdds.datanode.volume.choosing.policy</name>
+    <value/>
+    <tag>OZONE, CONTAINER, STORAGE, MANAGEMENT</tag>
+    <description>
+      The class name of the policy for choosing volumes in the list of
+      directories.  Defaults to
+      org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy.
+      This volume choosing policy selects volumes in a round-robin order.
+    </description>
+  </property>
   <property>
     <name>dfs.container.ratis.enabled</name>
     <value>false</value>

+ 10 - 4
hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java

@@ -30,8 +30,6 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .ContainerCommandResponseProto;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .ContainerType;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .CreateContainerRequestProto;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .GetSmallFileRequestProto;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
@@ -66,6 +64,7 @@ import org.apache.hadoop.ozone.container.keyvalue.interfaces.KeyManager;
 import org.apache.hadoop.ozone.container.keyvalue.statemachine
     .background.BlockDeletingService;
 import org.apache.hadoop.util.AutoCloseableLock;
+import org.apache.hadoop.util.ReflectionUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -104,6 +103,8 @@ import static org.apache.hadoop.ozone
     .OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_TIMEOUT;
 import static org.apache.hadoop.ozone
     .OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT;
+import static org.apache.hadoop.hdds.HddsConfigKeys
+    .HDDS_DATANODE_VOLUME_CHOOSING_POLICY;
 
 /**
  * Handler for KeyValue Container type.
@@ -140,8 +141,9 @@ public class KeyValueHandler extends Handler {
         new BlockDeletingService(containerSet, svcInterval, serviceTimeout,
             TimeUnit.MILLISECONDS, config);
     blockDeletingService.start();
-    // TODO: Add supoort for different volumeChoosingPolicies.
-    volumeChoosingPolicy = new RoundRobinVolumeChoosingPolicy();
+    volumeChoosingPolicy = ReflectionUtils.newInstance(conf.getClass(
+        HDDS_DATANODE_VOLUME_CHOOSING_POLICY, RoundRobinVolumeChoosingPolicy
+            .class, VolumeChoosingPolicy.class), conf);
     maxContainerSizeGB = config.getInt(ScmConfigKeys
             .OZONE_SCM_CONTAINER_SIZE_GB, ScmConfigKeys
         .OZONE_SCM_CONTAINER_SIZE_DEFAULT);
@@ -151,6 +153,10 @@ public class KeyValueHandler extends Handler {
     openContainerBlockMap = new OpenContainerBlockMap();
   }
 
+  @VisibleForTesting
+  public VolumeChoosingPolicy getVolumeChoosingPolicyForTesting() {
+    return volumeChoosingPolicy;
+  }
   /**
    * Returns OpenContainerBlockMap instance
    * @return OpenContainerBlockMap

+ 45 - 1
hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java

@@ -19,25 +19,36 @@
 package org.apache.hadoop.ozone.container.keyvalue;
 
 
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .ContainerCommandRequestProto;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics;
+import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
 import org.apache.hadoop.ozone.container.common.impl.HddsDispatcher;
-import org.apache.hadoop.ozone.container.common.interfaces.Container;
+import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.TestRule;
 import org.junit.rules.Timeout;
 
 import org.mockito.Mockito;
+
+import static org.apache.hadoop.hdds.HddsConfigKeys
+    .HDDS_DATANODE_VOLUME_CHOOSING_POLICY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY;
+import static org.junit.Assert.assertEquals;
 import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.ArgumentMatchers.anyLong;
 import static org.mockito.Mockito.doCallRealMethod;
 import static org.mockito.Mockito.times;
 
 
+import java.io.File;
 import java.util.UUID;
 
 /**
@@ -193,6 +204,39 @@ public class TestKeyValueHandler {
         any(ContainerCommandRequestProto.class), any());
   }
 
+  @Test
+  public void testVolumeSetInKeyValueHandler() throws Exception{
+    File path = GenericTestUtils.getRandomizedTestDir();
+    try {
+      Configuration conf = new OzoneConfiguration();
+      conf.set(HDDS_DATANODE_DIR_KEY, path.getAbsolutePath());
+      ContainerSet cset = new ContainerSet();
+      int[] interval = new int[1];
+      interval[0] = 2;
+      ContainerMetrics metrics = new ContainerMetrics(interval);
+      VolumeSet volumeSet = new VolumeSet(UUID.randomUUID().toString(), conf);
+      KeyValueHandler keyValueHandler = new KeyValueHandler(conf, cset,
+          volumeSet, metrics);
+      assertEquals(keyValueHandler.getVolumeChoosingPolicyForTesting()
+          .getClass().getName(), "org.apache.hadoop.ozone.container.common" +
+          ".volume.RoundRobinVolumeChoosingPolicy");
+
+      //Set a class which is not of sub class of VolumeChoosingPolicy
+      conf.set(HDDS_DATANODE_VOLUME_CHOOSING_POLICY,
+          "org.apache.hadoop.ozone.container.common.impl.HddsDispatcher");
+      try {
+        new KeyValueHandler(conf, cset, volumeSet, metrics);
+      } catch (RuntimeException ex) {
+        GenericTestUtils.assertExceptionContains("class org.apache.hadoop" +
+            ".ozone.container.common.impl.HddsDispatcher not org.apache" +
+            ".hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy",
+            ex);
+      }
+    } finally {
+      FileUtil.fullyDelete(path);
+    }
+  }
+
   private ContainerCommandRequestProto getDummyCommandRequestProto(
       ContainerProtos.Type cmdType) {
     ContainerCommandRequestProto request =