Преглед изворни кода

HDFS-17725. DataNodeVolumeMetrics and BalancerMetrics class add MetricTag. (#7382) Contributed by Zhaobo Huang.

Signed-off-by: Shilun Fan <slfan1989@apache.org>
Zhaobo Huang пре 3 месеци
родитељ
комит
1a81c3b564

+ 5 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/BalancerMetrics.java

@@ -43,6 +43,11 @@ final class BalancerMetrics {
   @Metric("Number of over utilized nodes")
   private MutableGaugeInt numOfOverUtilizedNodes;
 
+  @Metric(value = {"BlockPoolID", "Current BlockPoolID"}, type = Metric.Type.TAG)
+  public String getBlockPoolID() {
+    return balancer.getNnc().getBlockpoolID();
+  }
+
   private BalancerMetrics(Balancer b) {
     this.balancer = b;
   }

+ 9 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/DataNodeVolumeMetrics.java

@@ -31,6 +31,8 @@ import org.apache.hadoop.metrics2.lib.MutableQuantiles;
 import org.apache.hadoop.metrics2.lib.MutableRate;
 
 import java.util.concurrent.ThreadLocalRandom;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
 
 /**
  * This class is for maintaining Datanode Volume IO related statistics and
@@ -43,6 +45,13 @@ import java.util.concurrent.ThreadLocalRandom;
 public class DataNodeVolumeMetrics {
   private final MetricsRegistry registry = new MetricsRegistry("FsVolume");
 
+  @Metric(value = {"VolumeName", "Current VolumeName"}, type = Metric.Type.TAG)
+  public String getVolumeName() {
+    Pattern pattern = Pattern.compile("(?:DataNodeVolume-|UndefinedDataNodeVolume)(.*)");
+    Matcher matcher = pattern.matcher(name);
+    return matcher.find() ? matcher.group(1) : name;
+  }
+
   @Metric("number of metadata operations")
   private MutableCounterLong totalMetadataOperations;
   @Metric("metadata operation rate")

+ 13 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerService.java

@@ -242,6 +242,19 @@ public class TestBalancerService {
         }
       }, 100, 10000);
 
+      GenericTestUtils.waitFor(() -> {
+        final String balancerMetricsName =
+            "Balancer-" + cluster.getNameNode(0).getNamesystem().getBlockPoolId();
+        String blockPoolId = cluster.getNameNode(0).getNamesystem().getBlockPoolId();
+        MetricsRecordBuilder metrics = MetricsAsserts.getMetrics(balancerMetricsName);
+        try {
+          MetricsAsserts.assertTag("BlockPoolID", blockPoolId, metrics);
+          return true;
+        } catch (Exception e) {
+          return false;
+        }
+      }, 100, 10000);
+
       TestBalancer.waitForBalancer(totalUsedSpace, totalCapacity, client, cluster,
           BalancerParameters.DEFAULT);
 

+ 4 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeMetrics.java

@@ -44,6 +44,8 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.DataNodeVolumeMetrics;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
+import org.apache.hadoop.test.MetricsAsserts;
+
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.Timeout;
@@ -187,6 +189,8 @@ public class TestDataNodeVolumeMetrics {
         + metrics.getFileIoErrorSampleCount());
     LOG.info("fileIoErrorMean : " + metrics.getFileIoErrorMean());
     LOG.info("fileIoErrorStdDev : " + metrics.getFileIoErrorStdDev());
+
+    MetricsAsserts.assertTag("VolumeName", metrics.getVolumeName(), rb);
   }
 
   @Test