Kaynağa Gözat

HDFS-9902. Support different values of dfs.datanode.du.reserved per storage type. (Contributed by Brahma Reddy Battula)

Arpit Agarwal 9 yıl önce
ebeveyn
işleme
ce94203566

+ 4 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java

@@ -58,6 +58,7 @@ import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.util.CloseableReferenceCount;
 import org.apache.hadoop.util.DiskChecker.DiskErrorException;
+import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Time;
 import org.codehaus.jackson.annotate.JsonProperty;
 import org.codehaus.jackson.map.ObjectMapper;
@@ -117,9 +118,10 @@ public class FsVolumeImpl implements FsVolumeSpi {
       Configuration conf, StorageType storageType) throws IOException {
     this.dataset = dataset;
     this.storageID = storageID;
-    this.reserved = conf.getLong(
+    this.reserved = conf.getLong(DFSConfigKeys.DFS_DATANODE_DU_RESERVED_KEY
+        + "." + StringUtils.toLowerCase(storageType.toString()), conf.getLong(
         DFSConfigKeys.DFS_DATANODE_DU_RESERVED_KEY,
-        DFSConfigKeys.DFS_DATANODE_DU_RESERVED_DEFAULT);
+        DFSConfigKeys.DFS_DATANODE_DU_RESERVED_DEFAULT));
     this.reservedForReplicas = new AtomicLong(0L);
     this.currentDir = currentDir;
     File parent = currentDir.getParentFile();

+ 5 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml

@@ -321,6 +321,11 @@
   <name>dfs.datanode.du.reserved</name>
   <value>0</value>
   <description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
+      Specific storage type based reservation is also supported. The property can be followed with
+      corresponding storage types ([ssd]/[disk]/[archive]/[ram_disk]) for cluster with heterogeneous storage.
+      For example, reserved space for RAM_DISK storage can be configured using property
+      'dfs.datanode.du.reserved.ram_disk'. If specific storage type reservation is not configured
+      then dfs.datanode.du.reserved will be used.
   </description>
 </property>
 

+ 35 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java

@@ -27,6 +27,7 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.RoundRobinVolumeChoosingPolicy;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.VolumeChoosingPolicy;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.util.StringUtils;
 import org.junit.Before;
 import org.junit.Test;
 
@@ -36,7 +37,7 @@ import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
 import java.util.concurrent.TimeoutException;
-
+import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotEquals;
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.fail;
@@ -143,4 +144,37 @@ public class TestFsVolumeList {
     volumeList.addVolume(ref);
     assertNull(ref.getVolume());
   }
+
+  @Test
+  public void testDfsReservedForDifferentStorageTypes() throws IOException {
+    Configuration conf = new Configuration();
+    conf.setLong(DFSConfigKeys.DFS_DATANODE_DU_RESERVED_KEY, 100L);
+
+    File volDir = new File(baseDir, "volume-0");
+    volDir.mkdirs();
+    // when storage type reserved is not configured,should consider
+    // dfs.datanode.du.reserved.
+    FsVolumeImpl volume = new FsVolumeImpl(dataset, "storage-id", volDir, conf,
+        StorageType.RAM_DISK);
+    assertEquals("", 100L, volume.getReserved());
+    // when storage type reserved is configured.
+    conf.setLong(
+        DFSConfigKeys.DFS_DATANODE_DU_RESERVED_KEY + "."
+            + StringUtils.toLowerCase(StorageType.RAM_DISK.toString()), 1L);
+    conf.setLong(
+        DFSConfigKeys.DFS_DATANODE_DU_RESERVED_KEY + "."
+            + StringUtils.toLowerCase(StorageType.SSD.toString()), 2L);
+    FsVolumeImpl volume1 = new FsVolumeImpl(dataset, "storage-id", volDir,
+        conf, StorageType.RAM_DISK);
+    assertEquals("", 1L, volume1.getReserved());
+    FsVolumeImpl volume2 = new FsVolumeImpl(dataset, "storage-id", volDir,
+        conf, StorageType.SSD);
+    assertEquals("", 2L, volume2.getReserved());
+    FsVolumeImpl volume3 = new FsVolumeImpl(dataset, "storage-id", volDir,
+        conf, StorageType.DISK);
+    assertEquals("", 100L, volume3.getReserved());
+    FsVolumeImpl volume4 = new FsVolumeImpl(dataset, "storage-id", volDir,
+        conf, StorageType.DEFAULT);
+    assertEquals("", 100L, volume4.getReserved());
+  }
 }