Browse Source

HADOOP-1627. Various small improvements to 'dfsadmin -report' output.
(rangadi)


git-svn-id: https://svn.apache.org/repos/asf/hadoop/core/trunk@676671 13f79535-47bb-0310-9956-ffa450edef68

Raghu Angadi 17 years ago
parent
commit
67777c873e

+ 3 - 0
CHANGES.txt

@@ -54,6 +54,9 @@ Trunk (unreleased changes)
 
   IMPROVEMENTS
 
+    HADOOP-1627. Various small improvements to 'dfsadmin -report' output.
+    (rangadi)
+
     HADOOP-3577. Tools to inject blocks into name node and simulated
     data nodes for testing. (Sanjay Radia via hairong)
 

+ 1 - 1
src/hdfs/org/apache/hadoop/hdfs/DistributedFileSystem.java

@@ -243,7 +243,7 @@ public class DistributedFileSystem extends FileSystem {
     return "DFS[" + dfs + "]";
   }
 
-  DFSClient getClient() {
+  public DFSClient getClient() {
     return dfs;
   }        
   

+ 5 - 4
src/hdfs/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java

@@ -150,17 +150,18 @@ public class DatanodeInfo extends DatanodeID implements Node {
         !NetworkTopology.DEFAULT_RACK.equals(location)) {
       buffer.append("Rack: "+location+"\n");
     }
+    buffer.append("Decommission Status : ");
     if (isDecommissioned()) {
-      buffer.append("State          : Decommissioned\n");
+      buffer.append("Decommissioned\n");
     } else if (isDecommissionInProgress()) {
-      buffer.append("State          : Decommission in progress\n");
+      buffer.append("Decommission in progress\n");
     } else {
-      buffer.append("State          : In Service\n");
+      buffer.append("Normal\n");
     }
     buffer.append("Total raw bytes: "+c+" ("+FsShell.byteDesc(c)+")"+"\n");
     buffer.append("Remaining raw bytes: " +r+ "("+FsShell.byteDesc(r)+")"+"\n");
     buffer.append("Used raw bytes: "+u+" ("+FsShell.byteDesc(u)+")"+"\n");
-    buffer.append("% used: "+FsShell.limitDecimalTo2(((1.0*u)/c)*100)+"%"+"\n");
+    buffer.append("% used: "+FsShell.limitDecimalTo2(100.0*u/(c+1e-10))+"%\n");
     buffer.append("Last contact: "+new Date(lastUpdate)+"\n");
     return buffer.toString();
   }

+ 1 - 0
src/hdfs/org/apache/hadoop/hdfs/server/namenode/DatanodeDescriptor.java

@@ -200,6 +200,7 @@ public class DatanodeDescriptor extends DatanodeInfo {
   void resetBlocks() {
     this.capacity = 0;
     this.remaining = 0;
+    this.dfsUsed = 0;
     this.xceiverCount = 0;
     this.blockList = null;
   }

+ 17 - 10
src/hdfs/org/apache/hadoop/hdfs/tools/DFSAdmin.java

@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs.tools;
 
 import java.io.IOException;
+import java.util.ArrayList;
 import java.util.List;
 
 import org.apache.hadoop.conf.Configuration;
@@ -25,6 +26,7 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.DistributedFileSystem.DiskStatus;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction;
 import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
 import org.apache.hadoop.fs.FileSystem;
@@ -166,7 +168,6 @@ public class DFSAdmin extends FsShell {
       long raw = ds.getCapacity();
       long rawUsed = ds.getDfsUsed();
       long remaining = ds.getRemaining();
-      long used = dfs.getUsed();
       boolean mode = dfs.setSafeMode(FSConstants.SafeModeAction.SAFEMODE_GET);
       UpgradeStatusReport status = 
                       dfs.distributedUpgradeProgress(UpgradeAction.GET_STATUS);
@@ -187,19 +188,25 @@ public class DFSAdmin extends FsShell {
                          + limitDecimalTo2(((1.0 * rawUsed) / raw) * 100)
                          + "%");
       System.out.println();
-      System.out.println("Total effective bytes: " + used
-                         + " (" + byteDesc(used) + ")");
-      System.out.println("Effective replication multiplier: "
-                         + (1.0 * rawUsed / used));
 
       System.out.println("-------------------------------------------------");
-      DatanodeInfo[] info = dfs.getDataNodeStats();
-      System.out.println("Datanodes available: " + info.length);
-      System.out.println();
-      for (int i = 0; i < info.length; i++) {
-        System.out.println(info[i].getDatanodeReport());
+      
+      DatanodeInfo[] live = dfs.getClient().datanodeReport(
+                                                   DatanodeReportType.LIVE);
+      DatanodeInfo[] dead = dfs.getClient().datanodeReport(
+                                                   DatanodeReportType.DEAD);
+      System.out.println("Datanodes available: " + live.length +
+                         " (" + (live.length + dead.length) + " total, " + 
+                         dead.length + " dead)\n");
+      
+      for (DatanodeInfo dn : live) {
+        System.out.println(dn.getDatanodeReport());
         System.out.println();
       }
+      for (DatanodeInfo dn : dead) {
+        System.out.println(dn.getDatanodeReport());
+        System.out.println();
+      }      
     }
   }