Przeglądaj źródła

HDFS-46. Change default namespace quota of root directory from Integer.MAX_VALUE to Long.MAX_VALUE. Contributed by Uma Maheswara Rao G

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1173990 13f79535-47bb-0310-9956-ffa450edef68
Tsz-wo Sze 13 lat temu
rodzic
commit
d773bf0fb5

+ 3 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -62,6 +62,9 @@ Trunk (unreleased changes)
     IOExceptions of stream closures can mask root exceptions.  (Uma Maheswara
     Rao G via szetszwo)
 
+    HDFS-46.   Change default namespace quota of root directory from
+    Integer.MAX_VALUE to Long.MAX_VALUE.  (Uma Maheswara Rao G via szetszwo)
+
 Release 0.23.0 - Unreleased
 
   INCOMPATIBLE CHANGES

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java

@@ -120,7 +120,7 @@ public class FSDirectory implements Closeable {
     this.cond = dirLock.writeLock().newCondition();
     rootDir = new INodeDirectoryWithQuota(INodeDirectory.ROOT_NAME,
         ns.createFsOwnerPermissions(new FsPermission((short)0755)),
-        Integer.MAX_VALUE, UNKNOWN_DISK_SPACE);
+        Long.MAX_VALUE, UNKNOWN_DISK_SPACE);
     this.fsImage = fsImage;
     int configuredLimit = conf.getInt(
         DFSConfigKeys.DFS_LIST_LIMIT, DFSConfigKeys.DFS_LIST_LIMIT_DEFAULT);

+ 15 - 5
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java

@@ -17,6 +17,10 @@
  */
 package org.apache.hadoop.hdfs;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
 import java.io.OutputStream;
 import java.security.PrivilegedExceptionAction;
 
@@ -24,17 +28,15 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
+import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
 import org.apache.hadoop.hdfs.tools.DFSAdmin;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
-import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
-
 import org.junit.Test;
-import static org.junit.Assert.*;
 
 /** A class for testing quota-related commands */
 public class TestQuota {
@@ -841,6 +843,14 @@ public class TestQuota {
     DFSAdmin admin = new DFSAdmin(conf);
 
     try {
+      
+      //Test for deafult NameSpace Quota
+      long nsQuota = FSImageTestUtil.getNSQuota(cluster.getNameNode()
+          .getNamesystem());
+      assertTrue(
+          "Default namespace quota expected as long max. But the value is :"
+              + nsQuota, nsQuota == Long.MAX_VALUE);
+      
       Path dir = new Path("/test");
       boolean exceededQuota = false;
       ContentSummary c;

+ 7 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java

@@ -412,4 +412,11 @@ public abstract class FSImageTestUtil {
   public static FSImage getFSImage(NameNode node) {
     return node.getFSImage();
   }
+
+  /**
+   * get NameSpace quota.
+   */
+  public static long getNSQuota(FSNamesystem ns) {
+    return ns.dir.rootDir.getNsQuota();
+  }
 }