Browse Source

HADOOP-5958. Use JDK 1.6 File APIs in DF.java wherever possible. Contributed by Aaron Kimball.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@890576 13f79535-47bb-0310-9956-ffa450edef68
Thomas White 15 years ago
parent
commit
ee6468b8a1

+ 3 - 0
CHANGES.txt

@@ -56,6 +56,9 @@ Trunk (unreleased changes)
     HADOOP-6422. Make RPC backend plugable, protocol-by-protocol, to
     ease evolution towards Avro.  (cutting)
 
+    HADOOP-5958. Use JDK 1.6 File APIs in DF.java wherever possible.
+    (Aaron Kimball via tomwhite)
+
   OPTIMIZATIONS
 
   BUG FIXES

+ 46 - 41
src/java/org/apache/hadoop/fs/DF.java

@@ -28,17 +28,17 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.util.Shell;
 
-/** Filesystem disk space usage statistics.  Uses the unix 'df' program.
- * Tested on Linux, FreeBSD, Cygwin. */
+/** Filesystem disk space usage statistics.
+ * Uses the unix 'df' program to get mount points, and java.io.File for
+ * space utilization. Tested on Linux, FreeBSD, Cygwin. */
 public class DF extends Shell {
-  public static final long DF_INTERVAL_DEFAULT = 3 * 1000; // default DF refresh interval 
-  
-  private String dirPath;
+
+  /** Default DF refresh interval. */
+  public static final long DF_INTERVAL_DEFAULT = 3 * 1000;
+
+  private final String dirPath;
+  private final File dirFile;
   private String filesystem;
-  private long capacity;
-  private long used;
-  private long available;
-  private int percentUsed;
   private String mount;
 
   enum OSType {
@@ -79,6 +79,7 @@ public class DF extends Shell {
   public DF(File path, long dfInterval) throws IOException {
     super(dfInterval);
     this.dirPath = path.getCanonicalPath();
+    this.dirFile = new File(this.dirPath);
   }
 
   protected OSType getOSType() {
@@ -87,35 +88,40 @@ public class DF extends Shell {
   
   /// ACCESSORS
 
+  /** @return the canonical path to the volume we're checking. */
   public String getDirPath() {
     return dirPath;
   }
-  
-  public String getFilesystem() throws IOException { 
-    run(); 
-    return filesystem; 
+
+  /** @return a string indicating which filesystem volume we're checking. */
+  public String getFilesystem() throws IOException {
+    run();
+    return filesystem;
   }
-  
-  public long getCapacity() throws IOException { 
-    run(); 
-    return capacity; 
+
+  /** @return the capacity of the measured filesystem in bytes. */
+  public long getCapacity() {
+    return dirFile.getTotalSpace();
   }
-  
-  public long getUsed() throws IOException { 
-    run(); 
-    return used;
+
+  /** @return the total used space on the filesystem in bytes. */
+  public long getUsed() {
+    return dirFile.getTotalSpace() - dirFile.getFreeSpace();
   }
-  
-  public long getAvailable() throws IOException { 
-    run(); 
-    return available;
+
+  /** @return the usable space remaining on the filesystem in bytes. */
+  public long getAvailable() {
+    return dirFile.getUsableSpace();
   }
-  
-  public int getPercentUsed() throws IOException {
-    run();
-    return percentUsed;
+
+  /** @return the amount of the volume full, as a percent. */
+  public int getPercentUsed() {
+    double cap = (double) getCapacity();
+    double used = (cap - (double) getAvailable());
+    return (int) (used * 100.0 / cap);
   }
 
+  /** @return the filesystem mount point for the indicated volume */
   public String getMount() throws IOException {
     run();
     return mount;
@@ -125,10 +131,10 @@ public class DF extends Shell {
     return
       "df -k " + mount +"\n" +
       filesystem + "\t" +
-      capacity / 1024 + "\t" +
-      used / 1024 + "\t" +
-      available / 1024 + "\t" +
-      percentUsed + "%\t" +
+      getCapacity() / 1024 + "\t" +
+      getUsed() / 1024 + "\t" +
+      getAvailable() / 1024 + "\t" +
+      getPercentUsed() + "%\t" +
       mount;
   }
 
@@ -161,13 +167,12 @@ public class DF extends Shell {
 
     switch(getOSType()) {
       case OS_TYPE_AIX:
-        this.capacity = Long.parseLong(tokens.nextToken()) * 1024;
-        this.available = Long.parseLong(tokens.nextToken()) * 1024;
-        this.percentUsed = Integer.parseInt(tokens.nextToken());
+        Long.parseLong(tokens.nextToken()); // capacity
+        Long.parseLong(tokens.nextToken()); // available
+        Integer.parseInt(tokens.nextToken()); // pct used
         tokens.nextToken();
         tokens.nextToken();
         this.mount = tokens.nextToken();
-        this.used = this.capacity - this.available;
         break;
 
       case OS_TYPE_WIN:
@@ -175,10 +180,10 @@ public class DF extends Shell {
       case OS_TYPE_MAC:
       case OS_TYPE_UNIX:
       default:
-        this.capacity = Long.parseLong(tokens.nextToken()) * 1024;
-        this.used = Long.parseLong(tokens.nextToken()) * 1024;
-        this.available = Long.parseLong(tokens.nextToken()) * 1024;
-        this.percentUsed = Integer.parseInt(tokens.nextToken());
+        Long.parseLong(tokens.nextToken()); // capacity
+        Long.parseLong(tokens.nextToken()); // used
+        Long.parseLong(tokens.nextToken()); // available
+        Integer.parseInt(tokens.nextToken()); // pct used
         this.mount = tokens.nextToken();
         break;
    }

+ 0 - 4
src/test/core/org/apache/hadoop/fs/TestDFVariations.java

@@ -51,10 +51,6 @@ public class TestDFVariations extends TestCase {
   public void testOSParsing() throws Exception {
     for (DF.OSType ost : EnumSet.allOf(DF.OSType.class)) {
       XXDF df = new XXDF(ost.getId());
-      assertEquals(ost.getId() + " total", 453115160 * 1024L, df.getCapacity());
-      assertEquals(ost.getId() + " used", 53037920 * 1024L, df.getUsed());
-      assertEquals(ost.getId() + " avail", 400077240 * 1024L, df.getAvailable());
-      assertEquals(ost.getId() + " pcnt used", 11, df.getPercentUsed());
       assertEquals(ost.getId() + " mount", "/foo/bar", df.getMount());
     }
   }