소스 검색

HADOOP-2447. HDFS can be configured to limit the total number of
objects (inodes and blocks) in the file system. (dhruba)



git-svn-id: https://svn.apache.org/repos/asf/lucene/hadoop/trunk@610478 13f79535-47bb-0310-9956-ffa450edef68

Dhruba Borthakur 17 년 전
부모
커밋
c88bdc504e

+ 3 - 0
CHANGES.txt

@@ -64,6 +64,9 @@ Trunk (unreleased changes)
     default.  Enable with dfs.permissions=true.
     default.  Enable with dfs.permissions=true.
     (Tsz Wo (Nicholas) & taton via cutting)
     (Tsz Wo (Nicholas) & taton via cutting)
 
 
+    HADOOP-2447. HDFS can be configured to limit the total number of 
+    objects (inodes and blocks) in the file system. (dhruba)
+
   IMPROVEMENTS
   IMPROVEMENTS
 
 
     HADOOP-2045.  Change committer list on website to a table, so that
     HADOOP-2045.  Change committer list on website to a table, so that

+ 9 - 0
conf/hadoop-default.xml

@@ -434,6 +434,15 @@ creations/deletions), or "all".</description>
   excluded.</description>
   excluded.</description>
 </property> 
 </property> 
 
 
+<property>
+  <name>dfs.max.objects</name>
+  <value>0</value>
+  <description>The maximum number of files, directories and blocks
+  dfs supports. A value of zero indicates no limit to the number
+  of objects that dfs supports.
+  </description>
+</property>
+
 <property>
 <property>
   <name>fs.s3.block.size</name>
   <name>fs.s3.block.size</name>
   <value>67108864</value>
   <value>67108864</value>

+ 32 - 6
src/java/org/apache/hadoop/dfs/FSDirectory.java

@@ -45,6 +45,8 @@ class FSDirectory implements FSConstants {
   boolean ready = false;
   boolean ready = false;
   // Metrics record
   // Metrics record
   private MetricsRecord directoryMetrics = null;
   private MetricsRecord directoryMetrics = null;
+
+  volatile private long totalInodes = 1;   // number of inodes, for rootdir
     
     
   /** Access an existing dfs name directory. */
   /** Access an existing dfs name directory. */
   public FSDirectory(FSNamesystem ns, Configuration conf) throws IOException {
   public FSDirectory(FSNamesystem ns, Configuration conf) throws IOException {
@@ -141,6 +143,9 @@ class FSDirectory implements FSConstants {
       } catch (FileNotFoundException e) {
       } catch (FileNotFoundException e) {
         newNode = null;
         newNode = null;
       }
       }
+      if (newNode != null) {
+        totalInodes++;
+      }
     }
     }
     if (newNode == null) {
     if (newNode == null) {
       NameNode.stateChangeLog.info("DIR* FSDirectory.addFile: "
       NameNode.stateChangeLog.info("DIR* FSDirectory.addFile: "
@@ -183,6 +188,9 @@ class FSDirectory implements FSConstants {
       } catch (FileNotFoundException e) {
       } catch (FileNotFoundException e) {
         return null;
         return null;
       }
       }
+      if (newNode != null) {
+        totalInodes++;
+      }
       return newNode;
       return newNode;
     }
     }
   }
   }
@@ -461,6 +469,7 @@ class FSDirectory implements FSConstants {
           ArrayList<Block> v = new ArrayList<Block>();
           ArrayList<Block> v = new ArrayList<Block>();
           int filesRemoved = targetNode.collectSubtreeBlocks(v);
           int filesRemoved = targetNode.collectSubtreeBlocks(v);
           incrDeletedFileCount(filesRemoved);
           incrDeletedFileCount(filesRemoved);
+          totalInodes -= filesRemoved;
           for (Block b : v) {
           for (Block b : v) {
             namesystem.blocksMap.removeINode(b);
             namesystem.blocksMap.removeINode(b);
           }
           }
@@ -597,7 +606,7 @@ class FSDirectory implements FSConstants {
    * Create directory entries for every item
    * Create directory entries for every item
    */
    */
   boolean mkdirs(String src, PermissionStatus permissions,
   boolean mkdirs(String src, PermissionStatus permissions,
-      boolean inheritPermission, long now) {
+      boolean inheritPermission, long now) throws IOException {
     src = normalizePath(src);
     src = normalizePath(src);
 
 
     // Use this to collect all the dirs we need to construct
     // Use this to collect all the dirs we need to construct
@@ -619,8 +628,15 @@ class FSDirectory implements FSConstants {
     for (int i = numElts - 1; i >= 0; i--) {
     for (int i = numElts - 1; i >= 0; i--) {
       String cur = v.get(i);
       String cur = v.get(i);
       try {
       try {
-        INode inserted = unprotectedMkdir(cur, permissions,
-            inheritPermission || i != 0, now);
+        INode inserted = null;
+        synchronized (rootDir) {
+          inserted = rootDir.addNode(cur, 
+                             new INodeDirectory(permissions, now),
+                             inheritPermission || i != 0);
+          if (inserted != null) {
+            totalInodes++;
+          }
+        }
         if (inserted != null) {
         if (inserted != null) {
           NameNode.stateChangeLog.debug("DIR* FSDirectory.mkdirs: "
           NameNode.stateChangeLog.debug("DIR* FSDirectory.mkdirs: "
                                         +"created directory "+cur);
                                         +"created directory "+cur);
@@ -643,11 +659,15 @@ class FSDirectory implements FSConstants {
 
 
   /**
   /**
    */
    */
-  INodeDirectory unprotectedMkdir(String src, PermissionStatus permissions,
+  INode unprotectedMkdir(String src, PermissionStatus permissions,
       boolean inheritPermission, long timestamp) throws FileNotFoundException {
       boolean inheritPermission, long timestamp) throws FileNotFoundException {
     synchronized (rootDir) {
     synchronized (rootDir) {
-      return rootDir.addNode(src, new INodeDirectory(permissions, timestamp),
-          inheritPermission);
+      INode newNode = rootDir.addNode(src, new INodeDirectory(permissions, 
+                                      timestamp), inheritPermission);
+      if (newNode != null) {
+        totalInodes++;
+      }
+      return newNode;
     }
     }
   }
   }
 
 
@@ -674,4 +694,10 @@ class FSDirectory implements FSConstants {
       }
       }
     }
     }
   }
   }
+
+  long totalInodes() {
+    synchronized (rootDir) {
+      return totalInodes;
+    }
+  }
 }
 }

+ 38 - 0
src/java/org/apache/hadoop/dfs/FSNamesystem.java

@@ -213,6 +213,8 @@ class FSNamesystem implements FSConstants {
   private static final SimpleDateFormat DATE_FORM =
   private static final SimpleDateFormat DATE_FORM =
     new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
     new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
 
 
+  private long maxFsObjects = 0;          // maximum number of fs objects
+
   /**
   /**
    * FSNamesystem constructor.
    * FSNamesystem constructor.
    */
    */
@@ -347,6 +349,7 @@ class FSNamesystem implements FSConstants {
                                                    "dfs.namenode.decommission.interval",
                                                    "dfs.namenode.decommission.interval",
                                                    5 * 60 * 1000);    
                                                    5 * 60 * 1000);    
     this.defaultBlockSize = conf.getLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
     this.defaultBlockSize = conf.getLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
+    this.maxFsObjects = conf.getLong("dfs.max.objects", 0);
   }
   }
 
 
   /** Return the FSNamesystem object
   /** Return the FSNamesystem object
@@ -1000,6 +1003,7 @@ class FSNamesystem implements FSConstants {
       // Now we can add the name to the filesystem. This file has no
       // Now we can add the name to the filesystem. This file has no
       // blocks associated with it.
       // blocks associated with it.
       //
       //
+      checkFsObjectLimit();
       INode newNode = dir.addFile(src, permissions,
       INode newNode = dir.addFile(src, permissions,
           replication, blockSize, holder, clientMachine, clientNode);
           replication, blockSize, holder, clientMachine, clientNode);
       if (newNode == null) {
       if (newNode == null) {
@@ -1043,6 +1047,9 @@ class FSNamesystem implements FSConstants {
         throw new SafeModeException("Cannot add block to " + src, safeMode);
         throw new SafeModeException("Cannot add block to " + src, safeMode);
       }
       }
 
 
+      // have we exceeded the configured limit of fs objects.
+      checkFsObjectLimit();
+
       INodeFileUnderConstruction pendingFile  = checkLease(src, clientName);
       INodeFileUnderConstruction pendingFile  = checkLease(src, clientName);
 
 
       //
       //
@@ -1496,6 +1503,11 @@ class FSNamesystem implements FSConstants {
     }
     }
     checkAncestorAccess(src, FsAction.WRITE);
     checkAncestorAccess(src, FsAction.WRITE);
 
 
+    // validate that we have enough inodes. This is, at best, a 
+    // heuristic because the mkdirs() operation migth need to 
+    // create multiple inodes.
+    checkFsObjectLimit();
+
     if (!dir.mkdirs(src, permissions, false, now())) {
     if (!dir.mkdirs(src, permissions, false, now())) {
       throw new IOException("Invalid directory name: " + src);
       throw new IOException("Invalid directory name: " + src);
     }
     }
@@ -3700,6 +3712,13 @@ class FSNamesystem implements FSConstants {
     safeMode.setBlockTotal(blocksMap.size());
     safeMode.setBlockTotal(blocksMap.size());
   }
   }
 
 
+  /**
+   * Get the total number of blocks in the system. 
+   */
+  long getBlockTotal() {
+    return blocksMap.size();
+  }
+
   /**
   /**
    * Enter safe mode manually.
    * Enter safe mode manually.
    * @throws IOException
    * @throws IOException
@@ -3880,4 +3899,23 @@ class FSNamesystem implements FSConstants {
     }
     }
     return pc;
     return pc;
   }
   }
+
+  /*
+   * Check to see if we have exceeded the limit on the number
+   * of inodes.
+   */
+  void checkFsObjectLimit() throws IOException {
+    if (maxFsObjects != 0 &&
+        maxFsObjects <= dir.totalInodes() + getBlockTotal()) {
+      throw new IOException("Exceeded the configured number of objects " +
+                             maxFsObjects + " in the filesystem.");
+    }
+  }
+
+  /**
+   * Get the total number of objects in the system. 
+   */
+  long getMaxObjects() {
+    return maxFsObjects;
+  }
 }
 }

+ 23 - 0
src/java/org/apache/hadoop/dfs/JspHelper.java

@@ -34,6 +34,7 @@ import javax.servlet.jsp.JspWriter;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.dfs.FSConstants.UpgradeAction;
 import org.apache.hadoop.dfs.FSConstants.UpgradeAction;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.FsShell;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetUtils;
 
 
 public class JspHelper {
 public class JspHelper {
@@ -173,6 +174,28 @@ public class JspHelper {
     return "Safe mode is ON. <em>" + fsn.getSafeModeTip() + "</em><br>";
     return "Safe mode is ON. <em>" + fsn.getSafeModeTip() + "</em><br>";
   }
   }
 
 
+  public String getInodeLimitText() {
+    long inodes = fsn.dir.totalInodes();
+    long blocks = fsn.getBlockTotal();
+    long maxobjects = fsn.getMaxObjects();
+    long totalMemory = Runtime.getRuntime().totalMemory();   
+    long maxMemory = Runtime.getRuntime().maxMemory();   
+
+    long used = (totalMemory * 100)/maxMemory;
+ 
+    String str = inodes + " files and directories, " +
+                 blocks + " blocks = " +
+                 (inodes + blocks) + " total";
+    if (maxobjects != 0) {
+      long pct = ((inodes + blocks) * 100)/maxobjects;
+      str += " / " + maxobjects + " (" + pct + "%)";
+    }
+    str += ".  Heap Size is " + FsShell.byteDesc(totalMemory) + " / " + 
+           FsShell.byteDesc(maxMemory) + 
+           " (" + used + "%) <br>";
+    return str;
+  }
+
   public String getUpgradeStatusText() {
   public String getUpgradeStatusText() {
     String statusText = "";
     String statusText = "";
     try {
     try {

+ 182 - 0
src/test/org/apache/hadoop/dfs/TestFileLimit.java

@@ -0,0 +1,182 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.dfs;
+
+import junit.framework.TestCase;
+import java.io.*;
+import java.net.*;
+import java.util.Random;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FsShell;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.dfs.FSConstants.DatanodeReportType;
+
+
+/**
+ * This class tests that a file system adheres to the limit of
+ * maximum number of files that is configured.
+ */
+public class TestFileLimit extends TestCase {
+  static final long seed = 0xDEADBEEFL;
+  static final int blockSize = 8192;
+  static final int numBlocks = 2;
+  static final int fileSize = numBlocks * blockSize;
+  boolean simulatedStorage = false;
+
+  // The test file is 2 times the blocksize plus one. This means that when the
+  // entire file is written, the first two blocks definitely get flushed to
+  // the datanodes.
+
+  private static String TEST_ROOT_DIR =
+    new Path(System.getProperty("test.build.data","/tmp"))
+    .toString().replace(' ', '+');
+  
+  //
+  // creates a zero file.
+  //
+  private void createFile(FileSystem fileSys, Path name)
+    throws IOException {
+    FSDataOutputStream stm = fileSys.create(name, true,
+                                            fileSys.getConf().getInt("io.file.buffer.size", 4096),
+                                            (short)1, (long)blockSize);
+    stm.close();
+  }
+
+  private void waitForLimit(FSNamesystem namesys, long num)
+  {
+    // wait for number of blocks to decrease
+    while (true) {
+      long total = namesys.getBlockTotal() + namesys.dir.totalInodes();
+      System.out.println("Comparing current nodes " + total +
+                         " to become " + num);
+      if (total == num) {
+        break;
+      }
+      try {
+        Thread.sleep(1000);
+      } catch (InterruptedException e) {
+      }
+    }
+  }
+
+  /**
+   * Test that file data becomes available before file is closed.
+   */
+  public void testFileLimit() throws IOException {
+    Configuration conf = new Configuration();
+    int maxObjects = 5;
+    conf.setLong("dfs.max.objects", maxObjects);
+    conf.setLong("dfs.blockreport.intervalMsec", 1000L);
+    conf.setInt("dfs.heartbeat.interval", 1);
+    int currentNodes = 0;
+    
+    if (simulatedStorage) {
+      conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
+    }
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
+    FileSystem fs = cluster.getFileSystem();
+    FSNamesystem namesys = FSNamesystem.fsNamesystemObject;
+    NameNode namenode = cluster.getNameNode();
+    try {
+
+      //
+      // check that / exists
+      //
+      Path path = new Path("/");
+      assertTrue("/ should be a directory", 
+                 fs.getFileStatus(path).isDir() == true);
+      currentNodes = 1;          // root inode
+
+      // verify that we can create the specified number of files. We leave
+      // one for the "/". Each file takes an inode and a block.
+      //
+      for (int i = 0; i < maxObjects/2; i++) {
+        Path file = new Path("/filestatus" + i);
+        createFile(fs, file);
+        System.out.println("Created file " + file);
+        currentNodes += 2;      // two more objects for this creation.
+      }
+
+      // verify that creating another file fails
+      boolean hitException = false;
+      try {
+        Path file = new Path("/filestatus");
+        createFile(fs, file);
+        System.out.println("Created file " + file);
+      } catch (IOException e) {
+        hitException = true;
+      }
+      assertTrue("Was able to exceed file limit", hitException);
+
+      // delete one file
+      Path file0 = new Path("/filestatus0");
+      fs.delete(file0);
+      System.out.println("Deleted file " + file0);
+      currentNodes -= 2;
+
+      // wait for number of blocks to decrease
+      waitForLimit(namesys, currentNodes);
+
+      // now, we shud be able to create a new file
+      createFile(fs, file0);
+      System.out.println("Created file " + file0 + " again.");
+      currentNodes += 2;
+
+      // delete the file again
+      file0 = new Path("/filestatus0");
+      fs.delete(file0);
+      System.out.println("Deleted file " + file0 + " again.");
+      currentNodes -= 2;
+
+      // wait for number of blocks to decrease
+      waitForLimit(namesys, currentNodes);
+
+      // create two directories in place of the file that we deleted
+      Path dir = new Path("/dir0/dir1");
+      fs.mkdirs(dir);
+      System.out.println("Created directories " + dir);
+      currentNodes += 2;
+      waitForLimit(namesys, currentNodes);
+
+      // verify that creating another directory fails
+      hitException = false;
+      try {
+        fs.mkdirs(new Path("dir.fail"));
+        System.out.println("Created directory should not have succeeded.");
+      } catch (IOException e) {
+        hitException = true;
+      }
+      assertTrue("Was able to exceed dir limit", hitException);
+
+    } finally {
+      fs.close();
+      cluster.shutdown();
+    }
+  }
+
+  public void testFileLimitSimulated() throws IOException {
+    simulatedStorage = true;
+    testFileLimit();
+    simulatedStorage = false;
+  }
+}

+ 1 - 0
src/webapps/dfs/dfshealth.jsp

@@ -239,6 +239,7 @@
 <hr>
 <hr>
 <h3>Cluster Summary</h3>
 <h3>Cluster Summary</h3>
 <b> <%= jspHelper.getSafeModeText()%> </b>
 <b> <%= jspHelper.getSafeModeText()%> </b>
+<b> <%= jspHelper.getInodeLimitText()%> </b>
 
 
 <% 
 <% 
     generateDFSHealthReport(out, request); 
     generateDFSHealthReport(out, request);