ソースを参照

HDFS-15028. Keep the capacity of volume and reduce a system call. Contributed by Yang Yun.

Signed-off-by: Masatake Iwasaki <iwasakims@apache.org>
Masatake Iwasaki 5 年 前
コミット
11cd5b6e39

+ 3 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java

@@ -153,6 +153,9 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
       "dfs.datanode.non.local.lazy.persist";
   public static final boolean DFS_DATANODE_NON_LOCAL_LAZY_PERSIST_DEFAULT =
       false;
+  public static final String DFS_DATANODE_FIXED_VOLUME_SIZE_KEY =
+      "dfs.datanode.fixed.volume.size";
+  public static final boolean DFS_DATANODE_FIXED_VOLUME_SIZE_DEFAULT = false;
 
   // This setting is for testing/internal use only.
   public static final String  DFS_DATANODE_DUPLICATE_REPLICA_DELETION = "dfs.datanode.duplicate.replica.deletion";

+ 14 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java

@@ -120,6 +120,7 @@ public class FsVolumeImpl implements FsVolumeSpi {
   private final File currentDir;    // <StorageDirectory>/current
   private final DF usage;
   private final ReservedSpaceCalculator reserved;
+  private long cachedCapacity;
   private CloseableReferenceCount reference = new CloseableReferenceCount();
 
   // Disk space reserved for blocks (RBW or Re-replicating) open for write.
@@ -166,9 +167,16 @@ public class FsVolumeImpl implements FsVolumeSpi {
     if (this.usage != null) {
       reserved = new ReservedSpaceCalculator.Builder(conf)
           .setUsage(this.usage).setStorageType(storageType).build();
+      boolean fixedSizeVolume = conf.getBoolean(
+          DFSConfigKeys.DFS_DATANODE_FIXED_VOLUME_SIZE_KEY,
+          DFSConfigKeys.DFS_DATANODE_FIXED_VOLUME_SIZE_DEFAULT);
+      if (fixedSizeVolume) {
+        cachedCapacity = this.usage.getCapacity();
+      }
     } else {
       reserved = null;
       LOG.warn("Setting reserved to null as usage is null");
+      cachedCapacity = -1;
     }
     if (currentDir != null) {
       File parent = currentDir.getParentFile();
@@ -402,7 +410,12 @@ public class FsVolumeImpl implements FsVolumeSpi {
   @VisibleForTesting
   public long getCapacity() {
     if (configuredCapacity < 0L) {
-      long remaining = usage.getCapacity() - getReserved();
+      long remaining;
+      if (cachedCapacity > 0L) {
+        remaining = cachedCapacity - getReserved();
+      } else {
+        remaining = usage.getCapacity() - getReserved();
+      }
       return Math.max(remaining, 0L);
     }
     return configuredCapacity;

+ 10 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml

@@ -4405,6 +4405,16 @@
   </description>
 </property>
 
+<property>
+  <name>dfs.datanode.fixed.volume.size</name>
+  <value>false</value>
+  <description>
+    If false, call function getTotalSpace of File to get capacity of volume
+    during every heartbeat.
+    If true, cache the capacity when when the first call, and reuse it later.
+  </description>
+</property>
+
 <property>
   <name>dfs.ha.fencing.methods</name>
   <value></value>

+ 51 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java

@@ -61,6 +61,8 @@ import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
 import static org.mockito.Mockito.when;
 
 public class TestFsVolumeList {
@@ -209,7 +211,7 @@ public class TestFsVolumeList {
     /*
      * Lets have the example.
      * Capacity - 1000
-     * Reserved - 100
+     * Reserved - 100G
      * DfsUsed  - 200
      * Actual Non-DfsUsed - 300 -->(expected)
      * ReservedForReplicas - 50
@@ -403,4 +405,51 @@ public class TestFsVolumeList {
           threadPool1, threadPool2);
     }
   }
-}
+
+  @Test
+  public void testGetCachedVolumeCapacity() throws IOException {
+    conf.setBoolean(DFSConfigKeys.DFS_DATANODE_FIXED_VOLUME_SIZE_KEY,
+        DFSConfigKeys.DFS_DATANODE_FIXED_VOLUME_SIZE_DEFAULT);
+
+    long capacity = 4000L;
+    DF usage = mock(DF.class);
+    when(usage.getCapacity()).thenReturn(capacity);
+
+    FsVolumeImpl volumeChanged = new FsVolumeImplBuilder()
+        .setConf(conf)
+        .setDataset(dataset)
+        .setStorageID("storage-id")
+        .setStorageDirectory(new StorageDirectory(StorageLocation.parse(
+            "[RAM_DISK]volume-changed")))
+        .setUsage(usage)
+        .build();
+
+    int callTimes = 5;
+    for(int i = 0; i < callTimes; i++) {
+      assertEquals(capacity, volumeChanged.getCapacity());
+    }
+
+    verify(usage, times(callTimes)).getCapacity();
+
+    conf.setBoolean(DFSConfigKeys.DFS_DATANODE_FIXED_VOLUME_SIZE_KEY, true);
+    FsVolumeImpl volumeFixed = new FsVolumeImplBuilder()
+        .setConf(conf)
+        .setDataset(dataset)
+        .setStorageID("storage-id")
+        .setStorageDirectory(new StorageDirectory(StorageLocation.parse(
+            "[RAM_DISK]volume-fixed")))
+        .setUsage(usage)
+        .build();
+
+    for(int i = 0; i < callTimes; i++) {
+      assertEquals(capacity, volumeFixed.getCapacity());
+    }
+
+    // reuse the capacity for fixed sized volume, only call one time
+    // getCapacity of DF
+    verify(usage, times(callTimes+1)).getCapacity();
+
+    conf.setBoolean(DFSConfigKeys.DFS_DATANODE_FIXED_VOLUME_SIZE_KEY,
+        DFSConfigKeys.DFS_DATANODE_FIXED_VOLUME_SIZE_DEFAULT);
+  }
+}