Browse Source

HADOOP-296. Permit specification of the amount of reserved space on a DFS datanode. Contributed by Johan Oskarson.

git-svn-id: https://svn.apache.org/repos/asf/lucene/hadoop/trunk@417300 13f79535-47bb-0310-9956-ffa450edef68
Doug Cutting 19 years ago
parent
commit
6613b1ec0d
3 changed files with 25 additions and 2 deletions
  1. 4 0
      CHANGES.txt
  2. 15 0
      conf/hadoop-default.xml
  3. 6 2
      src/java/org/apache/hadoop/dfs/FSDataset.java

+ 4 - 0
CHANGES.txt

@@ -66,6 +66,10 @@ Trunk (unreleased changes)
     "dfs", "fsck", "job", and "distcp" commands currently support
     "dfs", "fsck", "job", and "distcp" commands currently support
     this, with more to be added.  (Hairong Kuang via cutting)
     this, with more to be added.  (Hairong Kuang via cutting)
 
 
+16. HADOOP-296.  Permit specification of the amount of reserved space
+    on a DFS datanode.  One may specify both the percentage free and
+    the number of bytes.  (Johan Oskarson via cutting)
+
 
 
 Release 0.3.2 - 2006-06-09
 Release 0.3.2 - 2006-06-09
 
 

+ 15 - 0
conf/hadoop-default.xml

@@ -102,6 +102,20 @@ creations/deletions), or "all".</description>
   </description>
   </description>
 </property>
 </property>
 
 
+<property>
+  <name>dfs.datanode.du.reserved</name>
+  <value>0</value>
+  <description>Reserved space in bytes. Always leave this much space free for non dfs use
+  </description>
+</property>
+
+<property>
+  <name>dfs.datanode.du.pct</name>
+  <value>0.98f</value>
+  <description>When calculating remaining space, only use this percentage of the real available space
+  </description>
+</property>
+
 <property>
 <property>
   <name>dfs.name.dir</name>
   <name>dfs.name.dir</name>
   <value>/tmp/hadoop/dfs/name</value>
   <value>/tmp/hadoop/dfs/name</value>
@@ -161,6 +175,7 @@ creations/deletions), or "all".</description>
   </description>
   </description>
 </property>
 </property>
 
 
+
 <!-- map/reduce properties -->
 <!-- map/reduce properties -->
 
 
 <property>
 <property>

+ 6 - 2
src/java/org/apache/hadoop/dfs/FSDataset.java

@@ -30,8 +30,9 @@ import org.apache.hadoop.conf.*;
  * @author Mike Cafarella
  * @author Mike Cafarella
  ***************************************************/
  ***************************************************/
 class FSDataset implements FSConstants {
 class FSDataset implements FSConstants {
-    static final double USABLE_DISK_PCT = 0.98;
 
 
+		static final double USABLE_DISK_PCT_DEFAULT = 0.98f; 
+	
   /**
   /**
      * A node type that can be built into a tree reflecting the
      * A node type that can be built into a tree reflecting the
      * hierarchy of blocks on the local disk.
      * hierarchy of blocks on the local disk.
@@ -202,6 +203,7 @@ class FSDataset implements FSConstants {
     DF diskUsage;
     DF diskUsage;
     File data = null, tmp = null;
     File data = null, tmp = null;
     long reserved = 0;
     long reserved = 0;
+    double usableDiskPct = USABLE_DISK_PCT_DEFAULT;
     FSDir dirTree;
     FSDir dirTree;
     TreeSet ongoingCreates = new TreeSet();
     TreeSet ongoingCreates = new TreeSet();
 
 
@@ -209,6 +211,8 @@ class FSDataset implements FSConstants {
      * An FSDataset has a directory where it loads its data files.
      * An FSDataset has a directory where it loads its data files.
      */
      */
     public FSDataset(File dir, Configuration conf) throws IOException {
     public FSDataset(File dir, Configuration conf) throws IOException {
+    		this.reserved = conf.getLong("dfs.datanode.du.reserved", 0);
+    		this.usableDiskPct = conf.getFloat("dfs.datanode.du.pct", (float) USABLE_DISK_PCT_DEFAULT);
         diskUsage = new DF( dir.getCanonicalPath(), conf); 
         diskUsage = new DF( dir.getCanonicalPath(), conf); 
         this.data = new File(dir, "data");
         this.data = new File(dir, "data");
         if (! data.exists()) {
         if (! data.exists()) {
@@ -233,7 +237,7 @@ class FSDataset implements FSConstants {
      * Return how many bytes can still be stored in the FSDataset
      * Return how many bytes can still be stored in the FSDataset
      */
      */
     public long getRemaining() throws IOException {
     public long getRemaining() throws IOException {
-        return ((long) Math.round(USABLE_DISK_PCT * diskUsage.getAvailable())) - reserved;
+        return ((long) Math.round(usableDiskPct * diskUsage.getAvailable())) - reserved;
     }
     }
 
 
     /**
     /**