|
@@ -21,11 +21,13 @@ import static org.junit.Assert.assertEquals;
|
|
|
import static org.junit.Assert.assertFalse;
|
|
|
import static org.junit.Assert.assertTrue;
|
|
|
|
|
|
+import java.io.IOException;
|
|
|
import java.io.OutputStream;
|
|
|
import java.security.PrivilegedExceptionAction;
|
|
|
|
|
|
import org.apache.hadoop.conf.Configuration;
|
|
|
import org.apache.hadoop.fs.ContentSummary;
|
|
|
+import org.apache.hadoop.fs.FSDataOutputStream;
|
|
|
import org.apache.hadoop.fs.FileSystem;
|
|
|
import org.apache.hadoop.fs.Path;
|
|
|
import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
|
|
@@ -921,4 +923,33 @@ public class TestQuota {
|
|
|
cluster.shutdown();
|
|
|
}
|
|
|
}
|
|
|
+
|
|
|
+ /**
|
|
|
+ * File count on root , should return total value of files in Filesystem
|
|
|
+ * when one folder contains files more than "dfs.content-summary.limit".
|
|
|
+ */
|
|
|
+ @Test
|
|
|
+ public void testHugeFileCount() throws IOException {
|
|
|
+ MiniDFSCluster cluster = null;
|
|
|
+ Configuration conf = new Configuration();
|
|
|
+ conf.setInt("dfs.content-summary.limit", 4);
|
|
|
+ try {
|
|
|
+ cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
|
|
|
+ DistributedFileSystem dfs = cluster.getFileSystem();
|
|
|
+ for (int i = 1; i <= 5; i++) {
|
|
|
+ FSDataOutputStream out =
|
|
|
+ dfs.create(new Path("/Folder1/" + "file" + i),(short)1);
|
|
|
+ out.close();
|
|
|
+ }
|
|
|
+ FSDataOutputStream out = dfs.create(new Path("/Folder2/file6"),(short)1);
|
|
|
+ out.close();
|
|
|
+ ContentSummary contentSummary = dfs.getContentSummary(new Path("/"));
|
|
|
+ assertEquals(6, contentSummary.getFileCount());
|
|
|
+ } finally {
|
|
|
+ if (cluster != null) {
|
|
|
+ cluster.shutdown();
|
|
|
+ cluster = null;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
}
|