Pārlūkot izejas kodu

HDFS-9902. Support different values of dfs.datanode.du.reserved per storage type. Contributed By Brahma Reddy Battula.

Brahma Reddy Battula 8 gadi atpakaļ
vecāks
revīzija
764fd3ed49

+ 6 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -82,6 +82,12 @@ Release 2.7.4 - UNRELEASED
     HDFS-11717. Add unit test for HDFS-11709 StandbyCheckpointer should handle
     non-existing legacyOivImageDir gracefully. (Erik Krogen via shv)
 
+    HDFS-9902. Support different values of dfs.datanode.du.reserved per storage
+    type. (Brahma Reddy Battula)
+
+    HDFS-11785. Backport HDFS-9902 to branch-2.7: Support different values of
+    dfs.datanode.du.reserved per storage type. (Brahma Reddy Battula)
+
   OPTIMIZATIONS
 
     HDFS-10896. Move lock logging logic from FSNamesystem into FSNamesystemLock.

+ 4 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java

@@ -63,6 +63,7 @@ import org.apache.hadoop.util.CloseableReferenceCount;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.DiskChecker.DiskErrorException;
+import org.apache.hadoop.util.StringUtils;
 
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
 import org.apache.hadoop.util.Time;
@@ -113,9 +114,10 @@ public class FsVolumeImpl implements FsVolumeSpi {
       Configuration conf, StorageType storageType) throws IOException {
     this.dataset = dataset;
     this.storageID = storageID;
-    this.reserved = conf.getLong(
+    this.reserved = conf.getLong(DFSConfigKeys.DFS_DATANODE_DU_RESERVED_KEY
+        + "." + StringUtils.toLowerCase(storageType.toString()), conf.getLong(
         DFSConfigKeys.DFS_DATANODE_DU_RESERVED_KEY,
-        DFSConfigKeys.DFS_DATANODE_DU_RESERVED_DEFAULT);
+        DFSConfigKeys.DFS_DATANODE_DU_RESERVED_DEFAULT));
     this.reservedForRbw = new AtomicLong(0L);
     this.currentDir = currentDir; 
     File parent = currentDir.getParentFile();

+ 5 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml

@@ -276,6 +276,11 @@
   <name>dfs.datanode.du.reserved</name>
   <value>0</value>
   <description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
+      Specific storage type based reservation is also supported. The property can be followed with
+      corresponding storage types ([ssd]/[disk]/[archive]/[ram_disk]) for cluster with heterogeneous storage.
+      For example, reserved space for RAM_DISK storage can be configured using property
+      'dfs.datanode.du.reserved.ram_disk'. If specific storage type reservation is not configured
+      then dfs.datanode.du.reserved will be used.
   </description>
 </property>
 

+ 34 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java

@@ -25,6 +25,7 @@ import org.apache.hadoop.hdfs.server.datanode.BlockScanner;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.RoundRobinVolumeChoosingPolicy;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.VolumeChoosingPolicy;
+import org.apache.hadoop.util.StringUtils;
 import org.junit.Before;
 import org.junit.Test;
 import org.mockito.Mockito;
@@ -159,4 +160,37 @@ public class TestFsVolumeList {
     assertEquals(expectedNonDfsUsage, spyVolume.getNonDfsUsed());
   }
 
+  @Test
+  public void testDfsReservedForDifferentStorageTypes() throws IOException {
+    Configuration conf = new Configuration();
+    conf.setLong(DFSConfigKeys.DFS_DATANODE_DU_RESERVED_KEY, 100L);
+
+    File volDir = new File(baseDir, "volume-0");
+    volDir.mkdirs();
+    // when storage type reserved is not configured,should consider
+    // dfs.datanode.du.reserved.
+    FsVolumeImpl volume = new FsVolumeImpl(dataset, "storage-id", volDir, conf,
+        StorageType.RAM_DISK);
+    assertEquals("", 100L, volume.getReserved());
+    // when storage type reserved is configured.
+    conf.setLong(
+        DFSConfigKeys.DFS_DATANODE_DU_RESERVED_KEY + "."
+            + StringUtils.toLowerCase(StorageType.RAM_DISK.toString()), 1L);
+    conf.setLong(
+        DFSConfigKeys.DFS_DATANODE_DU_RESERVED_KEY + "."
+            + StringUtils.toLowerCase(StorageType.SSD.toString()), 2L);
+    FsVolumeImpl volume1 = new FsVolumeImpl(dataset, "storage-id", volDir,
+        conf, StorageType.RAM_DISK);
+    assertEquals("", 1L, volume1.getReserved());
+    FsVolumeImpl volume2 = new FsVolumeImpl(dataset, "storage-id", volDir,
+        conf, StorageType.SSD);
+    assertEquals("", 2L, volume2.getReserved());
+    FsVolumeImpl volume3 = new FsVolumeImpl(dataset, "storage-id", volDir,
+        conf, StorageType.DISK);
+    assertEquals("", 100L, volume3.getReserved());
+    FsVolumeImpl volume4 = new FsVolumeImpl(dataset, "storage-id", volDir,
+        conf, StorageType.DEFAULT);
+    assertEquals("", 100L, volume4.getReserved());
+  }
+
 }