Selaa lähdekoodia

HDFS-4860. Add additional attributes to JMX beans. Contributed by Trevor Lorimer

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1500139 13f79535-47bb-0310-9956-ffa450edef68
Konstantin Boudnik 12 vuotta sitten
vanhempi
commit
ed70fb1608

+ 2 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/source/JvmMetrics.java

@@ -101,8 +101,10 @@ public class JvmMetrics implements MetricsSource {
     Runtime runtime = Runtime.getRuntime();
     Runtime runtime = Runtime.getRuntime();
     rb.addGauge(MemNonHeapUsedM, memNonHeap.getUsed() / M)
     rb.addGauge(MemNonHeapUsedM, memNonHeap.getUsed() / M)
       .addGauge(MemNonHeapCommittedM, memNonHeap.getCommitted() / M)
       .addGauge(MemNonHeapCommittedM, memNonHeap.getCommitted() / M)
+      .addGauge(MemNonHeapMaxM, memNonHeap.getMax() / M)
       .addGauge(MemHeapUsedM, memHeap.getUsed() / M)
       .addGauge(MemHeapUsedM, memHeap.getUsed() / M)
       .addGauge(MemHeapCommittedM, memHeap.getCommitted() / M)
       .addGauge(MemHeapCommittedM, memHeap.getCommitted() / M)
+      .addGauge(MemHeapMaxM, memHeap.getMax() / M)
       .addGauge(MemMaxM, runtime.maxMemory() / M);
       .addGauge(MemMaxM, runtime.maxMemory() / M);
   }
   }
 
 

+ 2 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/source/JvmMetricsInfo.java

@@ -32,8 +32,10 @@ public enum JvmMetricsInfo implements MetricsInfo {
   // metrics
   // metrics
   MemNonHeapUsedM("Non-heap memory used in MB"),
   MemNonHeapUsedM("Non-heap memory used in MB"),
   MemNonHeapCommittedM("Non-heap memory committed in MB"),
   MemNonHeapCommittedM("Non-heap memory committed in MB"),
+  MemNonHeapMaxM("Non-heap memory max in MB"),
   MemHeapUsedM("Heap memory used in MB"),
   MemHeapUsedM("Heap memory used in MB"),
   MemHeapCommittedM("Heap memory committed in MB"),
   MemHeapCommittedM("Heap memory committed in MB"),
+  MemHeapMaxM("Heap memory max in MB"),
   MemMaxM("Max memory size in MB"),
   MemMaxM("Max memory size in MB"),
   GcCount("Total GC count"),
   GcCount("Total GC count"),
   GcTimeMillis("Total GC time in milliseconds"),
   GcTimeMillis("Total GC time in milliseconds"),

+ 109 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

@@ -171,6 +171,7 @@ import org.apache.hadoop.hdfs.server.common.Storage.StorageDirType;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.hdfs.server.common.Util;
 import org.apache.hadoop.hdfs.server.common.Util;
 import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
 import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
+import org.apache.hadoop.hdfs.server.namenode.JournalSet.JournalAndStream;
 import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease;
 import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease;
 import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory;
 import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory;
 import org.apache.hadoop.hdfs.server.namenode.ha.EditLogTailer;
 import org.apache.hadoop.hdfs.server.namenode.ha.EditLogTailer;
@@ -210,6 +211,7 @@ import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.delegation.DelegationKey;
 import org.apache.hadoop.security.token.delegation.DelegationKey;
 import org.apache.hadoop.util.Daemon;
 import org.apache.hadoop.util.Daemon;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.DataChecksum;
+import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.VersionInfo;
 import org.apache.hadoop.util.VersionInfo;
 import org.mortbay.util.ajax.JSON;
 import org.mortbay.util.ajax.JSON;
@@ -4992,6 +4994,28 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
     return getBlockManager().getDatanodeManager().getNumDeadDataNodes();
     return getBlockManager().getDatanodeManager().getNumDeadDataNodes();
   }
   }
   
   
+  @Override // FSNamesystemMBean
+  public int getNumDecomLiveDataNodes() {
+    final List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
+    getBlockManager().getDatanodeManager().fetchDatanodes(live, null, true);
+    int liveDecommissioned = 0;
+    for (DatanodeDescriptor node : live) {
+      liveDecommissioned += node.isDecommissioned() ? 1 : 0;
+    }
+    return liveDecommissioned;
+  }
+
+  @Override // FSNamesystemMBean
+  public int getNumDecomDeadDataNodes() {
+    final List<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
+    getBlockManager().getDatanodeManager().fetchDatanodes(dead, null, true);
+    int deadDecommissioned = 0;
+    for (DatanodeDescriptor node : dead) {
+      deadDecommissioned += node.isDecommissioned() ? 1 : 0;
+    }
+    return deadDecommissioned;
+  }
+
   @Override // FSNamesystemMBean
   @Override // FSNamesystemMBean
   @Metric({"StaleDataNodes", 
   @Metric({"StaleDataNodes", 
     "Number of datanodes marked stale due to delayed heartbeat"})
     "Number of datanodes marked stale due to delayed heartbeat"})
@@ -5804,6 +5828,91 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
     return JSON.toString(statusMap);
     return JSON.toString(statusMap);
   }
   }
 
 
+  @Override // NameNodeMXBean
+  public String getNodeUsage() {
+    float median = 0;
+    float max = 0;
+    float min = 0;
+    float dev = 0;
+
+    final Map<String, Map<String,Object>> info =
+        new HashMap<String, Map<String,Object>>();
+    final List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
+    blockManager.getDatanodeManager().fetchDatanodes(live, null, true);
+
+    if (live.size() > 0) {
+      float totalDfsUsed = 0;
+      float[] usages = new float[live.size()];
+      int i = 0;
+      for (DatanodeDescriptor dn : live) {
+        usages[i++] = dn.getDfsUsedPercent();
+        totalDfsUsed += dn.getDfsUsedPercent();
+      }
+      totalDfsUsed /= live.size();
+      Arrays.sort(usages);
+      median = usages[usages.length / 2];
+      max = usages[usages.length - 1];
+      min = usages[0];
+
+      for (i = 0; i < usages.length; i++) {
+        dev += (usages[i] - totalDfsUsed) * (usages[i] - totalDfsUsed);
+      }
+      dev = (float) Math.sqrt(dev / usages.length);
+    }
+
+    final Map<String, Object> innerInfo = new HashMap<String, Object>();
+    innerInfo.put("min", StringUtils.format("%.2f%%", min));
+    innerInfo.put("median", StringUtils.format("%.2f%%", median));
+    innerInfo.put("max", StringUtils.format("%.2f%%", max));
+    innerInfo.put("stdDev", StringUtils.format("%.2f%%", dev));
+    info.put("nodeUsage", innerInfo);
+
+    return JSON.toString(info);
+  }
+
+  @Override  // NameNodeMXBean
+  public String getNameJournalStatus() {
+    List<Map<String, String>> jasList = new ArrayList<Map<String, String>>();
+    FSEditLog log = getFSImage().getEditLog();
+    if (log != null) {
+      boolean openForWrite = log.isOpenForWrite();
+      for (JournalAndStream jas : log.getJournals()) {
+        final Map<String, String> jasMap = new HashMap<String, String>();
+        String manager = jas.getManager().toString();
+
+        jasMap.put("required", String.valueOf(jas.isRequired()));
+        jasMap.put("disabled", String.valueOf(jas.isDisabled()));
+        jasMap.put("manager", manager);
+
+        if (jas.isDisabled()) {
+          jasMap.put("stream", "Failed");
+        } else if (openForWrite) {
+          EditLogOutputStream elos = jas.getCurrentStream();
+          if (elos != null) {
+            jasMap.put("stream", elos.generateHtmlReport());
+          } else {
+            jasMap.put("stream", "not currently writing");
+          }
+        } else {
+          jasMap.put("stream", "open for read");
+        }
+        jasList.add(jasMap);
+      }
+    }
+    return JSON.toString(jasList);
+  }
+
+  @Override  // NameNodeMXBean
+  public String getNNStarted() {
+    return getStartTime().toString();
+  }
+
+  @Override  // NameNodeMXBean
+  public String getCompileInfo() {
+    return VersionInfo.getDate() + " by " + VersionInfo.getUser() +
+        " from " + VersionInfo.getBranch();
+  }
+
   /** @return the block manager. */
   /** @return the block manager. */
   public BlockManager getBlockManager() {
   public BlockManager getBlockManager() {
     return blockManager;
     return blockManager;

+ 28 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java

@@ -174,4 +174,32 @@ public interface NameNodeMXBean {
    * @return the name dir status information, as a JSON string.
    * @return the name dir status information, as a JSON string.
    */
    */
   public String getNameDirStatuses();
   public String getNameDirStatuses();
+
+  /**
+   * Get Max, Median, Min and Standard Deviation of DataNodes usage.
+   *
+   * @return the DataNode usage information, as a JSON string.
+   */
+  public String getNodeUsage();
+
+  /**
+   * Get status information about the journals of the NN.
+   *
+   * @return the name journal status information, as a JSON string.
+   */
+  public String getNameJournalStatus();
+
+  /**
+   * Gets the NN start time
+   *
+   * @return the NN start time
+   */
+  public String getNNStarted();
+
+  /**
+   * Get the compilation information which contains date, user and branch
+   *
+   * @return the compilation information, as a JSON string.
+   */
+  public String getCompileInfo();
 }
 }

+ 12 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMBean.java

@@ -118,4 +118,16 @@ public interface FSNamesystemMBean {
    * @return number of stale data nodes
    * @return number of stale data nodes
    */
    */
   public int getNumStaleDataNodes();
   public int getNumStaleDataNodes();
+
+  /**
+   * Number of decommissioned Live data nodes
+   * @return number of decommissioned live data nodes
+   */
+  public int getNumDecomLiveDataNodes();
+
+  /**
+   * Number of decommissioned dead data nodes
+   * @return number of decommissioned dead data nodes
+   */
+  public int getNumDecomDeadDataNodes();
 }
 }

+ 14 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java

@@ -112,6 +112,20 @@ public class TestNameNodeMXBean {
       String deadnodeinfo = (String) (mbs.getAttribute(mxbeanName,
       String deadnodeinfo = (String) (mbs.getAttribute(mxbeanName,
           "DeadNodes"));
           "DeadNodes"));
       assertEquals(fsn.getDeadNodes(), deadnodeinfo);
       assertEquals(fsn.getDeadNodes(), deadnodeinfo);
+      // get attribute NodeUsage
+      String nodeUsage = (String) (mbs.getAttribute(mxbeanName,
+          "NodeUsage"));
+      assertEquals("Bad value for NodeUsage", fsn.getNodeUsage(), nodeUsage);
+      // get attribute NameJournalStatus
+      String nameJournalStatus = (String) (mbs.getAttribute(mxbeanName,
+          "NameJournalStatus"));
+      assertEquals("Bad value for NameJournalStatus", fsn.getNameJournalStatus(), nameJournalStatus);
+      // get attribute "NNStarted"
+      String nnStarted = (String) mbs.getAttribute(mxbeanName, "NNStarted");
+      assertEquals("Bad value for NNStarted", fsn.getNNStarted(), nnStarted);
+      // get attribute "CompileInfo"
+      String compileInfo = (String) mbs.getAttribute(mxbeanName, "CompileInfo");
+      assertEquals("Bad value for CompileInfo", fsn.getCompileInfo(), compileInfo);
       // get attribute NameDirStatuses
       // get attribute NameDirStatuses
       String nameDirStatuses = (String) (mbs.getAttribute(mxbeanName,
       String nameDirStatuses = (String) (mbs.getAttribute(mxbeanName,
           "NameDirStatuses"));
           "NameDirStatuses"));