浏览代码

HDFS-15155. writeIoRate of DataNodeVolumeMetrics is never used. Contributed by Haibin Huang.

Ayush Saxena 5 年之前
父节点
当前提交
fb1d7635ae

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/DataNodeVolumeMetrics.java

@@ -151,15 +151,15 @@ public class DataNodeVolumeMetrics {
 
 
   // Based on writeIoRate
   // Based on writeIoRate
   public long getWriteIoSampleCount() {
   public long getWriteIoSampleCount() {
-    return syncIoRate.lastStat().numSamples();
+    return writeIoRate.lastStat().numSamples();
   }
   }
 
 
   public double getWriteIoMean() {
   public double getWriteIoMean() {
-    return syncIoRate.lastStat().mean();
+    return writeIoRate.lastStat().mean();
   }
   }
 
 
   public double getWriteIoStdDev() {
   public double getWriteIoStdDev() {
-    return syncIoRate.lastStat().stddev();
+    return writeIoRate.lastStat().stddev();
   }
   }
 
 
   public long getTotalFileIoErrors() {
   public long getTotalFileIoErrors() {

+ 55 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeMetrics.java

@@ -19,6 +19,8 @@ package org.apache.hadoop.hdfs.server.datanode;
 
 
 import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
 import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
 import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
 import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotEquals;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.assertTrue;
 
 
 import java.io.File;
 import java.io.File;
@@ -177,4 +179,57 @@ public class TestDataNodeVolumeMetrics {
     LOG.info("fileIoErrorMean : " + metrics.getFileIoErrorMean());
     LOG.info("fileIoErrorMean : " + metrics.getFileIoErrorMean());
     LOG.info("fileIoErrorStdDev : " + metrics.getFileIoErrorStdDev());
     LOG.info("fileIoErrorStdDev : " + metrics.getFileIoErrorStdDev());
   }
   }
+
+  @Test
+  public void testWriteIoVolumeMetrics() throws IOException {
+    Configuration conf = new HdfsConfiguration();
+    conf.setInt(
+        DFSConfigKeys.DFS_DATANODE_FILEIO_PROFILING_SAMPLING_PERCENTAGE_KEY,
+        100);
+    MiniDFSCluster cluster =
+        new MiniDFSCluster.Builder(conf)
+            .numDataNodes(NUM_DATANODES)
+            .storageTypes(
+                new StorageType[]{StorageType.RAM_DISK, StorageType.DISK})
+            .storagesPerDatanode(2).build();
+
+    try {
+      FileSystem fs = cluster.getFileSystem();
+      final Path fileName = new Path("/test.dat");
+      final long fileLen = Integer.MAX_VALUE + 1L;
+      long lastWriteIoSampleCount;
+
+      DFSTestUtil.createFile(fs, fileName, false, BLOCK_SIZE, fileLen,
+          fs.getDefaultBlockSize(fileName), REPL, 1L, true);
+
+      List<DataNode> datanodes = cluster.getDataNodes();
+      DataNode datanode = datanodes.get(0);
+
+      final ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, fileName);
+      final FsVolumeSpi volume = datanode.getFSDataset().getVolume(block);
+      DataNodeVolumeMetrics metrics = volume.getMetrics();
+
+      assertEquals(0, metrics.getSyncIoSampleCount());
+      assertNotEquals(0, metrics.getWriteIoSampleCount());
+      assertTrue(metrics.getFlushIoSampleCount() > metrics
+          .getSyncIoSampleCount());
+      assertTrue(metrics.getWriteIoSampleCount() > metrics
+          .getFlushIoSampleCount());
+
+      lastWriteIoSampleCount = metrics.getWriteIoSampleCount();
+
+      try (FSDataOutputStream out = fs.append(fileName)) {
+        out.writeBytes("hello world");
+        out.hflush();
+      }
+
+      assertEquals(0, metrics.getSyncIoSampleCount());
+      assertTrue(metrics.getWriteIoSampleCount() > lastWriteIoSampleCount);
+
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
 }
 }