Browse Source

HADOOP-1463. HDFS report correct usage statistics for disk space
used by HDFS. (Hairong Kuang via dhruba)



git-svn-id: https://svn.apache.org/repos/asf/lucene/hadoop/trunk@561573 13f79535-47bb-0310-9956-ffa450edef68

Dhruba Borthakur 18 years ago
parent
commit
0c2f526692

+ 3 - 0
CHANGES.txt

@@ -6,6 +6,9 @@ Trunk (unreleased changes)
   1. HADOOP-1636.  Allow configuration of the number of jobs kept in memory
   1. HADOOP-1636.  Allow configuration of the number of jobs kept in memory
      by the JobTracker. (Michael Bieniosek via omalley)
      by the JobTracker. (Michael Bieniosek via omalley)
 
 
+  2. HADOOP-1463.  HDFS report correct usage statistics for disk space
+     used by HDFS.  (Hairong Kuang via dhruba)
+
 
 
 Branch 0.14 (unreleased changes)
 Branch 0.14 (unreleased changes)
 
 

+ 2 - 2
conf/hadoop-default.xml

@@ -258,7 +258,7 @@ creations/deletions), or "all".</description>
 <property>
 <property>
   <name>dfs.datanode.du.reserved</name>
   <name>dfs.datanode.du.reserved</name>
   <value>0</value>
   <value>0</value>
-  <description>Reserved space in bytes. Always leave this much space free for non dfs use
+  <description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
   </description>
   </description>
 </property>
 </property>
 
 
@@ -328,7 +328,7 @@ creations/deletions), or "all".</description>
 
 
 <property>
 <property>
   <name>dfs.df.interval</name>
   <name>dfs.df.interval</name>
-  <value>3000</value>
+  <value>60000</value>
   <description>Disk usage statistics refresh interval in msec.</description>
   <description>Disk usage statistics refresh interval in msec.</description>
 </property>
 </property>
 
 

+ 7 - 2
src/java/org/apache/hadoop/dfs/DFSAdmin.java

@@ -23,6 +23,7 @@ import org.apache.hadoop.fs.FsShell;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.dfs.DistributedFileSystem.DiskStatus;
 import org.apache.hadoop.dfs.FSConstants.UpgradeAction;
 import org.apache.hadoop.dfs.FSConstants.UpgradeAction;
 
 
 /**
 /**
@@ -44,8 +45,10 @@ public class DFSAdmin extends FsShell {
   public void report() throws IOException {
   public void report() throws IOException {
     if (fs instanceof DistributedFileSystem) {
     if (fs instanceof DistributedFileSystem) {
       DistributedFileSystem dfs = (DistributedFileSystem) fs;
       DistributedFileSystem dfs = (DistributedFileSystem) fs;
-      long raw = dfs.getRawCapacity();
-      long rawUsed = dfs.getRawUsed();
+      DiskStatus ds = dfs.getDiskStatus();
+      long raw = ds.getCapacity();
+      long rawUsed = ds.getDfsUsed();
+      long remaining = ds.getRemaining();
       long used = dfs.getUsed();
       long used = dfs.getUsed();
       boolean mode = dfs.setSafeMode(FSConstants.SafeModeAction.SAFEMODE_GET);
       boolean mode = dfs.setSafeMode(FSConstants.SafeModeAction.SAFEMODE_GET);
       UpgradeStatusReport status = 
       UpgradeStatusReport status = 
@@ -59,6 +62,8 @@ public class DFSAdmin extends FsShell {
       }
       }
       System.out.println("Total raw bytes: " + raw
       System.out.println("Total raw bytes: " + raw
                          + " (" + byteDesc(raw) + ")");
                          + " (" + byteDesc(raw) + ")");
+      System.out.println("Remaining raw bytes: " + remaining
+          + " (" + byteDesc(remaining) + ")");
       System.out.println("Used raw bytes: " + rawUsed
       System.out.println("Used raw bytes: " + rawUsed
                          + " (" + byteDesc(rawUsed) + ")");
                          + " (" + byteDesc(rawUsed) + ")");
       System.out.println("% used: "
       System.out.println("% used: "

+ 5 - 0
src/java/org/apache/hadoop/dfs/DFSClient.java

@@ -24,6 +24,7 @@ import org.apache.hadoop.io.retry.RetryProxy;
 import org.apache.hadoop.fs.*;
 import org.apache.hadoop.fs.*;
 import org.apache.hadoop.ipc.*;
 import org.apache.hadoop.ipc.*;
 import org.apache.hadoop.conf.*;
 import org.apache.hadoop.conf.*;
+import org.apache.hadoop.dfs.DistributedFileSystem.DiskStatus;
 import org.apache.hadoop.util.*;
 import org.apache.hadoop.util.*;
 
 
 import org.apache.commons.logging.*;
 import org.apache.commons.logging.*;
@@ -430,6 +431,10 @@ class DFSClient implements FSConstants {
     return namenode.getFileInfo(src.toString());
     return namenode.getFileInfo(src.toString());
   }
   }
 
 
+  public DiskStatus getDiskStatus() throws IOException {
+    long rawNums[] = namenode.getStats();
+    return new DiskStatus(rawNums[0], rawNums[1], rawNums[2]);
+  }
   /**
   /**
    */
    */
   public long totalRawCapacity() throws IOException {
   public long totalRawCapacity() throws IOException {

+ 4 - 4
src/java/org/apache/hadoop/dfs/DataNode.java

@@ -130,7 +130,6 @@ public class DataNode implements FSConstants, Runnable {
   private static Thread dataNodeThread = null;
   private static Thread dataNodeThread = null;
   String machineName;
   String machineName;
   int defaultBytesPerChecksum = 512;
   int defaultBytesPerChecksum = 512;
-
   private static class DataNodeMetrics implements Updater {
   private static class DataNodeMetrics implements Updater {
     private final MetricsRecord metricsRecord;
     private final MetricsRecord metricsRecord;
     private int bytesWritten = 0;
     private int bytesWritten = 0;
@@ -490,9 +489,10 @@ public class DataNode implements FSConstants, Runnable {
           // -- Total capacity
           // -- Total capacity
           // -- Bytes remaining
           // -- Bytes remaining
           //
           //
-          DatanodeCommand cmd = namenode.sendHeartbeat(dnRegistration, 
-                                                       data.getCapacity(), 
-                                                       data.getRemaining(), 
+          DatanodeCommand cmd = namenode.sendHeartbeat(dnRegistration,
+                                                       data.getCapacity(),
+                                                       data.getDfsUsed(),
+                                                       data.getRemaining(),
                                                        xmitsInProgress,
                                                        xmitsInProgress,
                                                        xceiverCount.getValue());
                                                        xceiverCount.getValue());
           //LOG.info("Just sent heartbeat, with name " + localName);
           //LOG.info("Just sent heartbeat, with name " + localName);

+ 14 - 8
src/java/org/apache/hadoop/dfs/DatanodeDescriptor.java

@@ -59,7 +59,7 @@ public class DatanodeDescriptor extends DatanodeInfo {
    * @param nodeID id of the data node
    * @param nodeID id of the data node
    */
    */
   public DatanodeDescriptor(DatanodeID nodeID) {
   public DatanodeDescriptor(DatanodeID nodeID) {
-    this(nodeID, 0L, 0L, 0);
+    this(nodeID, 0L, 0L, 0L, 0);
   }
   }
 
 
   /** DatanodeDescriptor constructor
   /** DatanodeDescriptor constructor
@@ -81,22 +81,24 @@ public class DatanodeDescriptor extends DatanodeInfo {
   public DatanodeDescriptor(DatanodeID nodeID, 
   public DatanodeDescriptor(DatanodeID nodeID, 
                             String networkLocation,
                             String networkLocation,
                             String hostName) {
                             String hostName) {
-    this(nodeID, networkLocation, hostName, 0L, 0L, 0);
+    this(nodeID, networkLocation, hostName, 0L, 0L, 0L, 0);
   }
   }
   
   
   /** DatanodeDescriptor constructor
   /** DatanodeDescriptor constructor
    * 
    * 
    * @param nodeID id of the data node
    * @param nodeID id of the data node
    * @param capacity capacity of the data node
    * @param capacity capacity of the data node
+   * @param dfsUsed space used by the data node
    * @param remaining remaing capacity of the data node
    * @param remaining remaing capacity of the data node
    * @param xceiverCount # of data transfers at the data node
    * @param xceiverCount # of data transfers at the data node
    */
    */
   public DatanodeDescriptor(DatanodeID nodeID, 
   public DatanodeDescriptor(DatanodeID nodeID, 
-                            long capacity, 
+                            long capacity,
+                            long dfsUsed,
                             long remaining,
                             long remaining,
                             int xceiverCount) {
                             int xceiverCount) {
     super(nodeID);
     super(nodeID);
-    updateHeartbeat(capacity, remaining, xceiverCount);
+    updateHeartbeat(capacity, dfsUsed, remaining, xceiverCount);
     initWorkLists();
     initWorkLists();
   }
   }
 
 
@@ -104,18 +106,20 @@ public class DatanodeDescriptor extends DatanodeInfo {
    * 
    * 
    * @param nodeID id of the data node
    * @param nodeID id of the data node
    * @param networkLocation location of the data node in network
    * @param networkLocation location of the data node in network
-   * @param capacity capacity of the data node
+   * @param capacity capacity of the data node, including space used by non-dfs
+   * @param dfsUsed the used space by dfs datanode
    * @param remaining remaing capacity of the data node
    * @param remaining remaing capacity of the data node
    * @param xceiverCount # of data transfers at the data node
    * @param xceiverCount # of data transfers at the data node
    */
    */
   public DatanodeDescriptor(DatanodeID nodeID,
   public DatanodeDescriptor(DatanodeID nodeID,
                             String networkLocation,
                             String networkLocation,
                             String hostName,
                             String hostName,
-                            long capacity, 
+                            long capacity,
+                            long dfsUsed,
                             long remaining,
                             long remaining,
                             int xceiverCount) {
                             int xceiverCount) {
     super(nodeID, networkLocation, hostName);
     super(nodeID, networkLocation, hostName);
-    updateHeartbeat(capacity, remaining, xceiverCount);
+    updateHeartbeat(capacity, dfsUsed, remaining, xceiverCount);
     initWorkLists();
     initWorkLists();
   }
   }
 
 
@@ -151,8 +155,10 @@ public class DatanodeDescriptor extends DatanodeInfo {
   
   
   /**
   /**
    */
    */
-  void updateHeartbeat(long capacity, long remaining, int xceiverCount) {
+  void updateHeartbeat(long capacity, long dfsUsed, long remaining,
+      int xceiverCount) {
     this.capacity = capacity;
     this.capacity = capacity;
+    this.dfsUsed = dfsUsed;
     this.remaining = remaining;
     this.remaining = remaining;
     this.lastUpdate = System.currentTimeMillis();
     this.lastUpdate = System.currentTimeMillis();
     this.xceiverCount = xceiverCount;
     this.xceiverCount = xceiverCount;

+ 12 - 2
src/java/org/apache/hadoop/dfs/DatanodeInfo.java

@@ -40,6 +40,7 @@ import org.apache.hadoop.net.NodeBase;
  */
  */
 public class DatanodeInfo extends DatanodeID implements Node {
 public class DatanodeInfo extends DatanodeID implements Node {
   protected long capacity;
   protected long capacity;
+  protected long dfsUsed;
   protected long remaining;
   protected long remaining;
   protected long lastUpdate;
   protected long lastUpdate;
   protected int xceiverCount;
   protected int xceiverCount;
@@ -63,6 +64,7 @@ public class DatanodeInfo extends DatanodeID implements Node {
   DatanodeInfo(DatanodeInfo from) {
   DatanodeInfo(DatanodeInfo from) {
     super(from);
     super(from);
     this.capacity = from.getCapacity();
     this.capacity = from.getCapacity();
+    this.dfsUsed = from.getDfsUsed();
     this.remaining = from.getRemaining();
     this.remaining = from.getRemaining();
     this.lastUpdate = from.getLastUpdate();
     this.lastUpdate = from.getLastUpdate();
     this.xceiverCount = from.getXceiverCount();
     this.xceiverCount = from.getXceiverCount();
@@ -74,6 +76,7 @@ public class DatanodeInfo extends DatanodeID implements Node {
   DatanodeInfo(DatanodeID nodeID) {
   DatanodeInfo(DatanodeID nodeID) {
     super(nodeID);
     super(nodeID);
     this.capacity = 0L;
     this.capacity = 0L;
+    this.dfsUsed = 0L;
     this.remaining = 0L;
     this.remaining = 0L;
     this.lastUpdate = 0L;
     this.lastUpdate = 0L;
     this.xceiverCount = 0;
     this.xceiverCount = 0;
@@ -88,6 +91,9 @@ public class DatanodeInfo extends DatanodeID implements Node {
   
   
   /** The raw capacity. */
   /** The raw capacity. */
   public long getCapacity() { return capacity; }
   public long getCapacity() { return capacity; }
+  
+  /** The used space by the data node. */
+  public long getDfsUsed() { return dfsUsed; }
 
 
   /** The raw free space. */
   /** The raw free space. */
   public long getRemaining() { return remaining; }
   public long getRemaining() { return remaining; }
@@ -144,7 +150,7 @@ public class DatanodeInfo extends DatanodeID implements Node {
     StringBuffer buffer = new StringBuffer();
     StringBuffer buffer = new StringBuffer();
     long c = getCapacity();
     long c = getCapacity();
     long r = getRemaining();
     long r = getRemaining();
-    long u = c - r;
+    long u = getDfsUsed();
     buffer.append("Name: "+name+"\n");
     buffer.append("Name: "+name+"\n");
     if (!NetworkTopology.DEFAULT_RACK.equals(location)) {
     if (!NetworkTopology.DEFAULT_RACK.equals(location)) {
       buffer.append("Rack: "+location+"\n");
       buffer.append("Rack: "+location+"\n");
@@ -157,6 +163,7 @@ public class DatanodeInfo extends DatanodeID implements Node {
       buffer.append("State          : In Service\n");
       buffer.append("State          : In Service\n");
     }
     }
     buffer.append("Total raw bytes: "+c+" ("+FsShell.byteDesc(c)+")"+"\n");
     buffer.append("Total raw bytes: "+c+" ("+FsShell.byteDesc(c)+")"+"\n");
+    buffer.append("Remaining raw bytes: " +r+ "("+FsShell.byteDesc(r)+")"+"\n");
     buffer.append("Used raw bytes: "+u+" ("+FsShell.byteDesc(u)+")"+"\n");
     buffer.append("Used raw bytes: "+u+" ("+FsShell.byteDesc(u)+")"+"\n");
     buffer.append("% used: "+FsShell.limitDecimal(((1.0*u)/c)*100, 2)+"%"+"\n");
     buffer.append("% used: "+FsShell.limitDecimal(((1.0*u)/c)*100, 2)+"%"+"\n");
     buffer.append("Last contact: "+new Date(lastUpdate)+"\n");
     buffer.append("Last contact: "+new Date(lastUpdate)+"\n");
@@ -168,7 +175,7 @@ public class DatanodeInfo extends DatanodeID implements Node {
     StringBuffer buffer = new StringBuffer();
     StringBuffer buffer = new StringBuffer();
     long c = getCapacity();
     long c = getCapacity();
     long r = getRemaining();
     long r = getRemaining();
-    long u = c - r;
+    long u = getDfsUsed();
     buffer.append(name);
     buffer.append(name);
     if (!NetworkTopology.DEFAULT_RACK.equals(location)) {
     if (!NetworkTopology.DEFAULT_RACK.equals(location)) {
       buffer.append(" "+location);
       buffer.append(" "+location);
@@ -183,6 +190,7 @@ public class DatanodeInfo extends DatanodeID implements Node {
     buffer.append(" " + c + "(" + FsShell.byteDesc(c)+")");
     buffer.append(" " + c + "(" + FsShell.byteDesc(c)+")");
     buffer.append(" " + u + "(" + FsShell.byteDesc(u)+")");
     buffer.append(" " + u + "(" + FsShell.byteDesc(u)+")");
     buffer.append(" " + FsShell.limitDecimal(((1.0*u)/c)*100, 2)+"%");
     buffer.append(" " + FsShell.limitDecimal(((1.0*u)/c)*100, 2)+"%");
+    buffer.append(" " + r + "(" + FsShell.byteDesc(r)+")");
     buffer.append(" " + new Date(lastUpdate));
     buffer.append(" " + new Date(lastUpdate));
     return buffer.toString();
     return buffer.toString();
   }
   }
@@ -281,6 +289,7 @@ public class DatanodeInfo extends DatanodeID implements Node {
   public void write(DataOutput out) throws IOException {
   public void write(DataOutput out) throws IOException {
     super.write(out);
     super.write(out);
     out.writeLong(capacity);
     out.writeLong(capacity);
+    out.writeLong(dfsUsed);
     out.writeLong(remaining);
     out.writeLong(remaining);
     out.writeLong(lastUpdate);
     out.writeLong(lastUpdate);
     out.writeInt(xceiverCount);
     out.writeInt(xceiverCount);
@@ -298,6 +307,7 @@ public class DatanodeInfo extends DatanodeID implements Node {
   public void readFields(DataInput in) throws IOException {
   public void readFields(DataInput in) throws IOException {
     super.readFields(in);
     super.readFields(in);
     this.capacity = in.readLong();
     this.capacity = in.readLong();
+    this.dfsUsed = in.readLong();
     this.remaining = in.readLong();
     this.remaining = in.readLong();
     this.lastUpdate = in.readLong();
     this.lastUpdate = in.readLong();
     this.xceiverCount = in.readInt();
     this.xceiverCount = in.readInt();

+ 4 - 3
src/java/org/apache/hadoop/dfs/DatanodeProtocol.java

@@ -31,9 +31,9 @@ import org.apache.hadoop.ipc.VersionedProtocol;
  **********************************************************************/
  **********************************************************************/
 interface DatanodeProtocol extends VersionedProtocol {
 interface DatanodeProtocol extends VersionedProtocol {
   /*
   /*
-   * 8: blockCrcUpgradeGetBlockLocations() added;
+   * 9: heartbeat sends also the data node used space;
    */
    */
-  public static final long versionID = 8L;
+  public static final long versionID = 9L;
   
   
   // error code
   // error code
   final static int NOTIFY = 0;
   final static int NOTIFY = 0;
@@ -72,7 +72,8 @@ interface DatanodeProtocol extends VersionedProtocol {
    * or to copy them to other DataNodes, etc.
    * or to copy them to other DataNodes, etc.
    */
    */
   public DatanodeCommand sendHeartbeat(DatanodeRegistration registration,
   public DatanodeCommand sendHeartbeat(DatanodeRegistration registration,
-                                       long capacity, long remaining,
+                                       long capacity,
+                                       long dfsUsed, long remaining,
                                        int xmitsInProgress,
                                        int xmitsInProgress,
                                        int xceiverCount) throws IOException;
                                        int xceiverCount) throws IOException;
 
 

+ 29 - 0
src/java/org/apache/hadoop/dfs/DistributedFileSystem.java

@@ -210,6 +210,35 @@ public class DistributedFileSystem extends FileSystem {
   DFSClient getClient() {
   DFSClient getClient() {
     return dfs;
     return dfs;
   }        
   }        
+  
+  public static class DiskStatus {
+    private long capacity;
+    private long dfsUsed;
+    private long remaining;
+    public DiskStatus(long capacity, long dfsUsed, long remaining) {
+      this.capacity = capacity;
+      this.dfsUsed = dfsUsed;
+      this.remaining = remaining;
+    }
+    
+    public long getCapacity() {
+      return capacity;
+    }
+    public long getDfsUsed() {
+      return dfsUsed;
+    }
+    public long getRemaining() {
+      return remaining;
+    }
+  }
+  
+
+  /** Return the disk usage of the filesystem, including total capacity,
+   * used space, and remaining space */
+  public DiskStatus getDiskStatus() throws IOException {
+    return dfs.getDiskStatus();
+  }
+  
   /** Return the total raw capacity of the filesystem, disregarding
   /** Return the total raw capacity of the filesystem, disregarding
    * replication .*/
    * replication .*/
   public long getRawCapacity() throws IOException{
   public long getRawCapacity() throws IOException{

+ 42 - 7
src/java/org/apache/hadoop/dfs/FSDataset.java

@@ -265,8 +265,10 @@ class FSDataset implements FSConstants {
     private FSDir dataDir;
     private FSDir dataDir;
     private File tmpDir;
     private File tmpDir;
     private DF usage;
     private DF usage;
+    private DU dfsUsage;
     private long reserved;
     private long reserved;
     private double usableDiskPct = USABLE_DISK_PCT_DEFAULT;
     private double usableDiskPct = USABLE_DISK_PCT_DEFAULT;
+
     
     
     FSVolume(File currentDir, Configuration conf) throws IOException {
     FSVolume(File currentDir, Configuration conf) throws IOException {
       this.reserved = conf.getLong("dfs.datanode.du.reserved", 0);
       this.reserved = conf.getLong("dfs.datanode.du.reserved", 0);
@@ -286,17 +288,29 @@ class FSDataset implements FSConstants {
         }
         }
       }
       }
       this.usage = new DF(parent, conf);
       this.usage = new DF(parent, conf);
+      this.dfsUsage = new DU(parent, conf);
     }
     }
-      
+
+    void decDfsUsed(long value) {
+      dfsUsage.decDfsUsed(value);
+    }
+    
+    long getDfsUsed() throws IOException {
+      return dfsUsage.getUsed();
+    }
+    
     long getCapacity() throws IOException {
     long getCapacity() throws IOException {
       return usage.getCapacity();
       return usage.getCapacity();
     }
     }
       
       
     long getAvailable() throws IOException {
     long getAvailable() throws IOException {
-      long capacity = usage.getCapacity();
-      long freespace = Math.round(usage.getAvailableSkipRefresh() -
-                                  capacity * (1 - usableDiskPct) - reserved); 
-      return (freespace > 0) ? freespace : 0;
+      long remaining = getCapacity()-getDfsUsed()-reserved;
+      long available = usage.getAvailable();
+      if (remaining>available) {
+        remaining = available;
+      }
+      remaining = (long)(remaining * usableDiskPct); 
+      return (remaining > 0) ? remaining : 0;
     }
     }
       
       
     String getMount() throws IOException {
     String getMount() throws IOException {
@@ -324,7 +338,10 @@ class FSDataset implements FSConstants {
     }
     }
       
       
     File addBlock(Block b, File f) throws IOException {
     File addBlock(Block b, File f) throws IOException {
-      return dataDir.addBlock(b, f);
+      File blockFile = dataDir.addBlock(b, f);
+      File metaFile = getMetaFile( blockFile );
+      dfsUsage.incDfsUsed(b.getNumBytes()+metaFile.length());
+      return blockFile;
     }
     }
       
       
     void checkDirs() throws DiskErrorException {
     void checkDirs() throws DiskErrorException {
@@ -373,6 +390,14 @@ class FSDataset implements FSConstants {
       }
       }
     }
     }
       
       
+    long getDfsUsed() throws IOException {
+      long dfsUsed = 0L;
+      for (int idx = 0; idx < volumes.length; idx++) {
+        dfsUsed += volumes[idx].getDfsUsed();
+      }
+      return dfsUsed;
+    }
+
     synchronized long getCapacity() throws IOException {
     synchronized long getCapacity() throws IOException {
       long capacity = 0L;
       long capacity = 0L;
       for (int idx = 0; idx < volumes.length; idx++) {
       for (int idx = 0; idx < volumes.length; idx++) {
@@ -459,6 +484,13 @@ class FSDataset implements FSConstants {
     volumes.getBlockMap(blockMap);
     volumes.getBlockMap(blockMap);
   }
   }
 
 
+  /**
+   * Return the total space used by dfs datanode
+   */
+  public long getDfsUsed() throws IOException {
+    return volumes.getDfsUsed();
+  }
+  
   /**
   /**
    * Return total capacity, used and unused
    * Return total capacity, used and unused
    */
    */
@@ -628,9 +660,10 @@ class FSDataset implements FSConstants {
     boolean error = false;
     boolean error = false;
     for (int i = 0; i < invalidBlks.length; i++) {
     for (int i = 0; i < invalidBlks.length; i++) {
       File f = null;
       File f = null;
+      FSVolume v;
       synchronized (this) {
       synchronized (this) {
         f = getFile(invalidBlks[i]);
         f = getFile(invalidBlks[i]);
-        FSVolume v = volumeMap.get(invalidBlks[i]);
+        v = volumeMap.get(invalidBlks[i]);
         if (f == null) {
         if (f == null) {
           DataNode.LOG.warn("Unexpected error trying to delete block "
           DataNode.LOG.warn("Unexpected error trying to delete block "
                             + invalidBlks[i] + 
                             + invalidBlks[i] + 
@@ -660,12 +693,14 @@ class FSDataset implements FSConstants {
         volumeMap.remove(invalidBlks[i]);
         volumeMap.remove(invalidBlks[i]);
       }
       }
       File metaFile = getMetaFile( f );
       File metaFile = getMetaFile( f );
+      long blockSize = f.length()+metaFile.length();
       if ( !f.delete() || ( !metaFile.delete() && metaFile.exists() ) ) {
       if ( !f.delete() || ( !metaFile.delete() && metaFile.exists() ) ) {
         DataNode.LOG.warn("Unexpected error trying to delete block "
         DataNode.LOG.warn("Unexpected error trying to delete block "
                           + invalidBlks[i] + " at file " + f);
                           + invalidBlks[i] + " at file " + f);
         error = true;
         error = true;
         continue;
         continue;
       }
       }
+      v.decDfsUsed(blockSize);
       DataNode.LOG.info("Deleting block " + invalidBlks[i] + " file " + f);
       DataNode.LOG.info("Deleting block " + invalidBlks[i] + " file " + f);
       if (f.exists()) {
       if (f.exists()) {
         //
         //

+ 16 - 6
src/java/org/apache/hadoop/dfs/FSNamesystem.java

@@ -113,7 +113,7 @@ class FSNamesystem implements FSConstants {
   //
   //
   // Stats on overall usage
   // Stats on overall usage
   //
   //
-  long totalCapacity = 0, totalRemaining = 0;
+  long totalCapacity = 0L, totalUsed=0L, totalRemaining = 0L;
 
 
   // total number of connections per live datanode
   // total number of connections per live datanode
   int totalLoad = 0;
   int totalLoad = 0;
@@ -1687,7 +1687,7 @@ class FSNamesystem implements FSConstants {
         if( !heartbeats.contains(nodeS)) {
         if( !heartbeats.contains(nodeS)) {
           heartbeats.add(nodeS);
           heartbeats.add(nodeS);
           //update its timestamp
           //update its timestamp
-          nodeS.updateHeartbeat(0L, 0L, 0);
+          nodeS.updateHeartbeat(0L, 0L, 0L, 0);
           nodeS.isAlive = true;
           nodeS.isAlive = true;
         }
         }
       }
       }
@@ -1771,7 +1771,8 @@ class FSNamesystem implements FSConstants {
    * @throws IOException
    * @throws IOException
    */
    */
   public boolean gotHeartbeat(DatanodeID nodeID,
   public boolean gotHeartbeat(DatanodeID nodeID,
-                              long capacity, 
+                              long capacity,
+                              long dfsUsed,
                               long remaining,
                               long remaining,
                               int xceiverCount,
                               int xceiverCount,
                               int xmitsInProgress,
                               int xmitsInProgress,
@@ -1800,7 +1801,7 @@ class FSNamesystem implements FSConstants {
           return true;
           return true;
         } else {
         } else {
           updateStats(nodeinfo, false);
           updateStats(nodeinfo, false);
-          nodeinfo.updateHeartbeat(capacity, remaining, xceiverCount);
+          nodeinfo.updateHeartbeat(capacity, dfsUsed, remaining, xceiverCount);
           updateStats(nodeinfo, true);
           updateStats(nodeinfo, true);
           //
           //
           // Extract pending replication work or block invalidation
           // Extract pending replication work or block invalidation
@@ -1825,10 +1826,12 @@ class FSNamesystem implements FSConstants {
     assert(Thread.holdsLock(heartbeats));
     assert(Thread.holdsLock(heartbeats));
     if (isAdded) {
     if (isAdded) {
       totalCapacity += node.getCapacity();
       totalCapacity += node.getCapacity();
+      totalUsed += node.getDfsUsed();
       totalRemaining += node.getRemaining();
       totalRemaining += node.getRemaining();
       totalLoad += node.getXceiverCount();
       totalLoad += node.getXceiverCount();
     } else {
     } else {
       totalCapacity -= node.getCapacity();
       totalCapacity -= node.getCapacity();
+      totalUsed -= node.getDfsUsed();
       totalRemaining -= node.getRemaining();
       totalRemaining -= node.getRemaining();
       totalLoad -= node.getXceiverCount();
       totalLoad -= node.getXceiverCount();
     }
     }
@@ -2505,15 +2508,22 @@ class FSNamesystem implements FSConstants {
   }
   }
 
 
   /**
   /**
-   * Total raw bytes.
+   * Total raw bytes including non-dfs used space.
    */
    */
   public long totalCapacity() {
   public long totalCapacity() {
-
     synchronized (heartbeats) {
     synchronized (heartbeats) {
       return totalCapacity;
       return totalCapacity;
     }
     }
   }
   }
 
 
+  /**
+   * Total used space by data nodes
+   */
+  public long totalDfsUsed() {
+    synchronized(heartbeats){
+      return totalUsed;
+    }
+  }
   /**
   /**
    * Total non-used raw bytes.
    * Total non-used raw bytes.
    */
    */

+ 7 - 6
src/java/org/apache/hadoop/dfs/NameNode.java

@@ -491,10 +491,10 @@ public class NameNode implements ClientProtocol, DatanodeProtocol, FSConstants {
   /**
   /**
    */
    */
   public long[] getStats() throws IOException {
   public long[] getStats() throws IOException {
-    long results[] = new long[2];
-    long totalCapacity = namesystem.totalCapacity();
-    results[0] = totalCapacity;
-    results[1] = totalCapacity - namesystem.totalRemaining();
+    long results[] = new long[3];
+    results[0] = namesystem.totalCapacity();
+    results[1] = namesystem.totalDfsUsed();
+    results[2] = namesystem.totalRemaining();
     return results;
     return results;
   }
   }
 
 
@@ -596,7 +596,8 @@ public class NameNode implements ClientProtocol, DatanodeProtocol, FSConstants {
    * This will be either a transfer or a delete operation.
    * This will be either a transfer or a delete operation.
    */
    */
   public DatanodeCommand sendHeartbeat(DatanodeRegistration nodeReg,
   public DatanodeCommand sendHeartbeat(DatanodeRegistration nodeReg,
-                                       long capacity, 
+                                       long capacity,
+                                       long dfsUsed,
                                        long remaining,
                                        long remaining,
                                        int xmitsInProgress,
                                        int xmitsInProgress,
                                        int xceiverCount) throws IOException {
                                        int xceiverCount) throws IOException {
@@ -606,7 +607,7 @@ public class NameNode implements ClientProtocol, DatanodeProtocol, FSConstants {
     deleteList[0] = null; 
     deleteList[0] = null; 
 
 
     verifyRequest(nodeReg);
     verifyRequest(nodeReg);
-    if (namesystem.gotHeartbeat(nodeReg, capacity, remaining, 
+    if (namesystem.gotHeartbeat(nodeReg, capacity, dfsUsed, remaining, 
                                 xceiverCount, 
                                 xceiverCount, 
                                 xmitsInProgress,
                                 xmitsInProgress,
                                 xferResults,
                                 xferResults,

+ 52 - 0
src/java/org/apache/hadoop/fs/Command.java

@@ -0,0 +1,52 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs;
+
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.io.BufferedReader;
+
+/** A base class for running a unix command like du or df*/
+abstract public class Command {
+  /** Run a command */
+  protected void run() throws IOException { 
+    Process process;
+    process = Runtime.getRuntime().exec(getExecString());
+
+    try {
+      if (process.waitFor() != 0) {
+        throw new IOException
+          (new BufferedReader(new InputStreamReader(process.getErrorStream()))
+           .readLine());
+      }
+      parseExecResult(new BufferedReader(
+          new InputStreamReader(process.getInputStream())));
+    } catch (InterruptedException e) {
+      throw new IOException(e.toString());
+    } finally {
+      process.destroy();
+    }
+  }
+
+  /** return an array comtaining the command name & its parameters */ 
+  protected abstract String[] getExecString();
+  
+  /** Parse the execution result */
+  protected abstract void parseExecResult(BufferedReader lines)
+  throws IOException;
+  }

+ 4 - 35
src/java/org/apache/hadoop/fs/DF.java

@@ -28,7 +28,7 @@ import org.apache.hadoop.conf.Configuration;
 
 
 /** Filesystem disk space usage statistics.  Uses the unix 'df' program.
 /** Filesystem disk space usage statistics.  Uses the unix 'df' program.
  * Tested on Linux, FreeBSD, Cygwin. */
  * Tested on Linux, FreeBSD, Cygwin. */
-public class DF {
+public class DF extends Command {
   public static final long DF_INTERVAL_DEFAULT = 3 * 1000; // default DF refresh interval 
   public static final long DF_INTERVAL_DEFAULT = 3 * 1000; // default DF refresh interval 
   
   
   private String  dirPath;
   private String  dirPath;
@@ -56,22 +56,7 @@ public class DF {
   private void doDF() throws IOException { 
   private void doDF() throws IOException { 
     if (lastDF + dfInterval > System.currentTimeMillis())
     if (lastDF + dfInterval > System.currentTimeMillis())
       return;
       return;
-    Process process;
-    process = Runtime.getRuntime().exec(getExecString());
-
-    try {
-      if (process.waitFor() != 0) {
-        throw new IOException
-          (new BufferedReader(new InputStreamReader(process.getErrorStream()))
-           .readLine());
-      }
-      parseExecResult(
-                      new BufferedReader(new InputStreamReader(process.getInputStream())));
-    } catch (InterruptedException e) {
-      throw new IOException(e.toString());
-    } finally {
-      process.destroy();
-    }
+    super.run();
   }
   }
 
 
   /// ACCESSORS
   /// ACCESSORS
@@ -110,22 +95,6 @@ public class DF {
     return mount;
     return mount;
   }
   }
   
   
-  public long getCapacitySkipRefresh() { 
-    return capacity; 
-  }
-  
-  public long getUsedSkipRefresh() { 
-    return used;
-  }
-  
-  public long getAvailableSkipRefresh() { 
-    return available;
-  }
-  
-  public int getPercentUsedSkipRefresh() {
-    return percentUsed;
-  }
-  
   public String toString() {
   public String toString() {
     return
     return
       "df -k " + mount +"\n" +
       "df -k " + mount +"\n" +
@@ -137,11 +106,11 @@ public class DF {
       mount;
       mount;
   }
   }
 
 
-  private String[] getExecString() {
+  protected String[] getExecString() {
     return new String[] {"df","-k", dirPath};
     return new String[] {"df","-k", dirPath};
   }
   }
   
   
-  private void parseExecResult(BufferedReader lines) throws IOException {
+  protected void parseExecResult(BufferedReader lines) throws IOException {
     lines.readLine();                         // skip headings
     lines.readLine();                         // skip headings
   
   
     String line = lines.readLine();
     String line = lines.readLine();

+ 96 - 0
src/java/org/apache/hadoop/fs/DU.java

@@ -0,0 +1,96 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.BufferedReader;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.dfs.FSConstants;
+
+/** Filesystem disk space usage statistics.  Uses the unix 'du' program*/
+public class DU extends Command {
+  private String  dirPath;
+  private long    duInterval; // DU refresh interval in msec
+  private long    lastDU;   // last time doDU() was performed
+
+  private long used;
+  
+  public DU(File path, long interval) throws IOException {
+    this.dirPath = path.getCanonicalPath();
+    this.duInterval = interval;
+    run();
+  }
+  
+  public DU(File path, Configuration conf) throws IOException {
+    this(path, conf.getLong("dfs.blockreport.intervalMsec",
+        FSConstants.BLOCKREPORT_INTERVAL));
+  }
+  
+  synchronized public void decDfsUsed(long value) {
+    used -= value;
+  }
+
+  synchronized public void incDfsUsed(long value) {
+    used += value;
+  }
+  
+  synchronized public long getUsed() throws IOException { 
+    if (lastDU + duInterval > System.currentTimeMillis()) {
+      run();
+    }
+    return used;
+  }
+
+  public String getDirPath() {
+    return dirPath;
+  }
+  
+  
+  public String toString() {
+    return
+      "du -s " + dirPath +"\n" +
+      used + "\t" + dirPath;
+  }
+
+  protected String[] getExecString() {
+    return new String[] {"du","-s", dirPath};
+  }
+  
+  protected void parseExecResult(BufferedReader lines) throws IOException {
+    String line = lines.readLine();
+    if (line == null) {
+      throw new IOException( "Expecting a line not the end of stream" );
+    }
+    String[] tokens = line.split("\t");
+    if(tokens.length == 0) {
+      throw new IOException("Illegal du output");
+    }
+    this.used = Long.parseLong(tokens[0])*1024;
+    this.lastDU = System.currentTimeMillis();
+  }
+
+  public static void main(String[] args) throws Exception {
+    String path = ".";
+    if (args.length > 0)
+      path = args[0];
+
+    System.out.println(new DU(new File(path), new Configuration()).toString());
+  }
+}

+ 6 - 4
src/java/org/apache/hadoop/fs/FsShell.java

@@ -19,10 +19,10 @@ package org.apache.hadoop.fs;
 
 
 import java.io.*;
 import java.io.*;
 import java.util.*;
 import java.util.*;
+import java.text.DecimalFormat;
 import java.text.SimpleDateFormat;
 import java.text.SimpleDateFormat;
 
 
 import org.apache.hadoop.conf.*;
 import org.apache.hadoop.conf.*;
-import org.apache.hadoop.dfs.ChecksumDistributedFileSystem;
 import org.apache.hadoop.ipc.*;
 import org.apache.hadoop.ipc.*;
 import org.apache.hadoop.util.ToolBase;
 import org.apache.hadoop.util.ToolBase;
 
 
@@ -38,6 +38,8 @@ public class FsShell extends ToolBase {
   {
   {
     modifFmt.setTimeZone(TimeZone.getTimeZone("UTC"));
     modifFmt.setTimeZone(TimeZone.getTimeZone("UTC"));
   }
   }
+  private static final DecimalFormat decimalFormat = 
+    new DecimalFormat("#*0.0#*");
 
 
   /**
   /**
    */
    */
@@ -798,11 +800,11 @@ public class FsShell extends ToolBase {
     String ending = "";
     String ending = "";
     if (len < 1024 * 1024) {
     if (len < 1024 * 1024) {
       val = (1.0 * len) / 1024;
       val = (1.0 * len) / 1024;
-      ending = " k";
+      ending = " KB";
     } else if (len < 1024 * 1024 * 1024) {
     } else if (len < 1024 * 1024 * 1024) {
       val = (1.0 * len) / (1024 * 1024);
       val = (1.0 * len) / (1024 * 1024);
       ending = " MB";
       ending = " MB";
-    } else if (len < 128L * 1024 * 1024 * 1024) {
+    } else if (len < 1024L * 1024 * 1024 * 1024) {
       val = (1.0 * len) / (1024 * 1024 * 1024);
       val = (1.0 * len) / (1024 * 1024 * 1024);
       ending = " GB";
       ending = " GB";
     } else if (len < 1024L * 1024 * 1024 * 1024 * 1024) {
     } else if (len < 1024L * 1024 * 1024 * 1024 * 1024) {
@@ -816,7 +818,7 @@ public class FsShell extends ToolBase {
   }
   }
 
 
   public static String limitDecimal(double d, int placesAfterDecimal) {
   public static String limitDecimal(double d, int placesAfterDecimal) {
-    String strVal = Double.toString(d);
+    String strVal = decimalFormat.format(d);
     int decpt = strVal.indexOf(".");
     int decpt = strVal.indexOf(".");
     if (decpt >= 0) {
     if (decpt >= 0) {
       strVal = strVal.substring(0, Math.min(strVal.length(), decpt + 1 + placesAfterDecimal));
       strVal = strVal.substring(0, Math.min(strVal.length(), decpt + 1 + placesAfterDecimal));

+ 14 - 14
src/test/org/apache/hadoop/dfs/TestReplicationPolicy.java

@@ -65,8 +65,8 @@ public class TestReplicationPolicy extends TestCase {
     }
     }
     for(int i=0; i<NUM_OF_DATANODES; i++) {
     for(int i=0; i<NUM_OF_DATANODES; i++) {
       dataNodes[i].updateHeartbeat(
       dataNodes[i].updateHeartbeat(
-                                   2*FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 
-                                   2*FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0);
+          2*FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
+          2*FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0);
     }
     }
   }
   }
   
   
@@ -80,8 +80,8 @@ public class TestReplicationPolicy extends TestCase {
    */
    */
   public void testChooseTarget1() throws Exception {
   public void testChooseTarget1() throws Exception {
     dataNodes[0].updateHeartbeat(
     dataNodes[0].updateHeartbeat(
-                                 2*FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 
-                                 FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 4); // overloaded
+        2*FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 
+        FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 4); // overloaded
 
 
     DatanodeDescriptor[] targets;
     DatanodeDescriptor[] targets;
     targets = replicator.chooseTarget(
     targets = replicator.chooseTarget(
@@ -115,8 +115,8 @@ public class TestReplicationPolicy extends TestCase {
     assertFalse(cluster.isOnSameRack(targets[0], targets[3]));
     assertFalse(cluster.isOnSameRack(targets[0], targets[3]));
 
 
     dataNodes[0].updateHeartbeat(
     dataNodes[0].updateHeartbeat(
-                                 2*FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 
-                                 FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0); 
+        2*FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
+        FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0); 
   }
   }
 
 
   /**
   /**
@@ -186,8 +186,8 @@ public class TestReplicationPolicy extends TestCase {
   public void testChooseTarget3() throws Exception {
   public void testChooseTarget3() throws Exception {
     // make data node 0 to be not qualified to choose
     // make data node 0 to be not qualified to choose
     dataNodes[0].updateHeartbeat(
     dataNodes[0].updateHeartbeat(
-                                 2*FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 
-                                 (FSConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0); // no space
+        2*FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
+        (FSConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0); // no space
         
         
     DatanodeDescriptor[] targets;
     DatanodeDescriptor[] targets;
     targets = replicator.chooseTarget(
     targets = replicator.chooseTarget(
@@ -224,8 +224,8 @@ public class TestReplicationPolicy extends TestCase {
     assertFalse(cluster.isOnSameRack(targets[1], targets[3]));
     assertFalse(cluster.isOnSameRack(targets[1], targets[3]));
 
 
     dataNodes[0].updateHeartbeat(
     dataNodes[0].updateHeartbeat(
-                                 2*FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 
-                                 FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0); 
+        2*FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
+        FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0); 
   }
   }
   
   
   /**
   /**
@@ -240,8 +240,8 @@ public class TestReplicationPolicy extends TestCase {
     // make data node 0 & 1 to be not qualified to choose: not enough disk space
     // make data node 0 & 1 to be not qualified to choose: not enough disk space
     for(int i=0; i<2; i++) {
     for(int i=0; i<2; i++) {
       dataNodes[i].updateHeartbeat(
       dataNodes[i].updateHeartbeat(
-                                   2*FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 
-                                   (FSConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0);
+          2*FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
+          (FSConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0);
     }
     }
       
       
     DatanodeDescriptor[] targets;
     DatanodeDescriptor[] targets;
@@ -272,8 +272,8 @@ public class TestReplicationPolicy extends TestCase {
     
     
     for(int i=0; i<2; i++) {
     for(int i=0; i<2; i++) {
       dataNodes[i].updateHeartbeat(
       dataNodes[i].updateHeartbeat(
-                                   2*FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 
-                                   FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0);
+          2*FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
+          FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0);
     }
     }
   }
   }
   /**
   /**

+ 14 - 10
src/webapps/dfs/dfshealth.jsp

@@ -83,8 +83,7 @@
         return;
         return;
     
     
     long c = d.getCapacity();
     long c = d.getCapacity();
-    long r = d.getRemaining();
-    long u = c - r;
+    long u = d.getDfsUsed();
     
     
     String percentUsed;
     String percentUsed;
     if (c > 0) 
     if (c > 0) 
@@ -105,7 +104,9 @@
 	      "<td class=\"size\">" +
 	      "<td class=\"size\">" +
               FsShell.limitDecimal(c*1.0/diskBytes, 2) +
               FsShell.limitDecimal(c*1.0/diskBytes, 2) +
 	      "<td class=\"pcused\">" + percentUsed +
 	      "<td class=\"pcused\">" + percentUsed +
-              "<td class=\"blocks\">" + d.numBlocks() + "\n");
+	      "<td class=\"size\">" +
+              FsShell.limitDecimal(d.getRemaining()*1.0/diskBytes, 2) +
+          "<td class=\"blocks\">" + d.numBlocks() + "\n");
   }
   }
 
 
   public void generateDFSHealthReport(JspWriter out,
   public void generateDFSHealthReport(JspWriter out,
@@ -147,11 +148,12 @@
     out.print( "<div id=\"dfstable\"> <table>\n" +
     out.print( "<div id=\"dfstable\"> <table>\n" +
 	       rowTxt() + colTxt() + "Capacity" + colTxt() + ":" + colTxt() +
 	       rowTxt() + colTxt() + "Capacity" + colTxt() + ":" + colTxt() +
 	       FsShell.byteDesc( fsn.totalCapacity() ) +
 	       FsShell.byteDesc( fsn.totalCapacity() ) +
-	       rowTxt() + colTxt() + "Remaining" + colTxt() + ":" + colTxt() +
+	       rowTxt() + colTxt() + "DFS Remaining" + colTxt() + ":" + colTxt() +
 	       FsShell.byteDesc( fsn.totalRemaining() ) +
 	       FsShell.byteDesc( fsn.totalRemaining() ) +
-	       rowTxt() + colTxt() + "Used" + colTxt() + ":" + colTxt() +
-	       FsShell.limitDecimal((fsn.totalCapacity() -
-				      fsn.totalRemaining())*100.0/
+	       rowTxt() + colTxt() + "DFS Used" + colTxt() + ":" + colTxt() +
+	       FsShell.byteDesc( fsn.totalDfsUsed() ) +
+	       rowTxt() + colTxt() + "DFS Used%" + colTxt() + ":" + colTxt() +
+	       FsShell.limitDecimal((fsn.totalDfsUsed())*100.0/
 				     (fsn.totalCapacity() + 1e-10), 2) + " %" +
 				     (fsn.totalCapacity() + 1e-10), 2) + " %" +
 	       rowTxt() + colTxt() +
 	       rowTxt() + colTxt() +
                "<a href=\"#LiveNodes\">Live Nodes</a> " +
                "<a href=\"#LiveNodes\">Live Nodes</a> " +
@@ -181,13 +183,15 @@
             }
             }
 
 
 	    out.print( "<tr class=\"headerRow\"> <th " +
 	    out.print( "<tr class=\"headerRow\"> <th " +
-                       NodeHeaderStr("name") + "> Node <th " +
+                       ("name") + "> Node <th " +
                        NodeHeaderStr("lastcontact") + "> Last Contact <th " +
                        NodeHeaderStr("lastcontact") + "> Last Contact <th " +
                        NodeHeaderStr("adminstate") + "> Admin State <th " +
                        NodeHeaderStr("adminstate") + "> Admin State <th " +
                        NodeHeaderStr("size") + "> Size (" + diskByteStr +
                        NodeHeaderStr("size") + "> Size (" + diskByteStr +
                        ") <th " + NodeHeaderStr("pcused") +
                        ") <th " + NodeHeaderStr("pcused") +
-                       "> Used (%) <th " + NodeHeaderStr("blocks") +
-                       "> Blocks\n" );
+                       "> Used (%) <th " + 
+                       NodeHeaderStr("remaining") + "> Remaining (" + 
+                       diskByteStr + ") <th " +
+                       NodeHeaderStr("blocks") + "> Blocks\n" );
             
             
 	    for ( int i=0; i < live.size(); i++ ) {
 	    for ( int i=0; i < live.size(); i++ ) {
 		generateNodeData( out, live.get(i), port_suffix, true );
 		generateNodeData( out, live.get(i), port_suffix, true );