Forráskód Böngészése

HDFS-9234. WebHdfs: getContentSummary() should give quota for storage types. Contributed by Surendra Singh Lilhore.

Xiaoyu Yao 9 éve
szülő
commit
41d3f8899d

+ 18 - 8
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java

@@ -20,11 +20,13 @@ package org.apache.hadoop.hdfs.web;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
 import com.google.common.collect.Maps;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.ContentSummary;
+import org.apache.hadoop.fs.ContentSummary.Builder;
 import org.apache.hadoop.fs.FileChecksum;
 import org.apache.hadoop.fs.FileChecksum;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.MD5MD5CRC32CastagnoliFileChecksum;
 import org.apache.hadoop.fs.MD5MD5CRC32CastagnoliFileChecksum;
 import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
 import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
 import org.apache.hadoop.fs.MD5MD5CRC32GzipFileChecksum;
 import org.apache.hadoop.fs.MD5MD5CRC32GzipFileChecksum;
+import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.fs.XAttrCodec;
 import org.apache.hadoop.fs.XAttrCodec;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.AclStatus;
@@ -316,14 +318,22 @@ class JsonUtilClient {
     final long quota = ((Number) m.get("quota")).longValue();
     final long quota = ((Number) m.get("quota")).longValue();
     final long spaceConsumed = ((Number) m.get("spaceConsumed")).longValue();
     final long spaceConsumed = ((Number) m.get("spaceConsumed")).longValue();
     final long spaceQuota = ((Number) m.get("spaceQuota")).longValue();
     final long spaceQuota = ((Number) m.get("spaceQuota")).longValue();
-
-    return new ContentSummary.Builder()
-        .length(length)
-        .fileCount(fileCount)
-        .directoryCount(directoryCount)
-        .quota(quota)
-        .spaceConsumed(spaceConsumed)
-        .spaceQuota(spaceQuota).build();
+    final Map<?, ?> typem = (Map<?, ?>) m.get("typeQuota");
+
+    Builder contentSummaryBuilder = new ContentSummary.Builder().length(length)
+        .fileCount(fileCount).directoryCount(directoryCount).quota(quota)
+        .spaceConsumed(spaceConsumed).spaceQuota(spaceQuota);
+    if (typem != null) {
+      for (StorageType t : StorageType.getTypesSupportingQuota()) {
+        Map<?, ?> type = (Map<?, ?>) typem.get(t.toString());
+        if (type != null) {
+          contentSummaryBuilder = contentSummaryBuilder.typeQuota(t,
+              ((Number) type.get("quota")).longValue()).typeConsumed(t,
+              ((Number) type.get("consumed")).longValue());
+        }
+      }
+    }
+    return contentSummaryBuilder.build();
   }
   }
 
 
   /** Convert a Json map to a MD5MD5CRC32FileChecksum. */
   /** Convert a Json map to a MD5MD5CRC32FileChecksum. */

+ 3 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -198,6 +198,9 @@ Trunk (Unreleased)
     HADOOP-11684. S3a to use thread pool that blocks clients.
     HADOOP-11684. S3a to use thread pool that blocks clients.
     (Thomas Demoor and Aaron Fabbri via lei)
     (Thomas Demoor and Aaron Fabbri via lei)
 
 
+    HDFS-9234. WebHdfs: getContentSummary() should give quota for storage types.
+    (Surendra Singh Lilhore via xyao)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
   BUG FIXES
   BUG FIXES

+ 15 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java

@@ -253,6 +253,21 @@ public class JsonUtil {
     m.put("quota", contentsummary.getQuota());
     m.put("quota", contentsummary.getQuota());
     m.put("spaceConsumed", contentsummary.getSpaceConsumed());
     m.put("spaceConsumed", contentsummary.getSpaceConsumed());
     m.put("spaceQuota", contentsummary.getSpaceQuota());
     m.put("spaceQuota", contentsummary.getSpaceQuota());
+    final Map<String, Map<String, Long>> typeQuota =
+        new TreeMap<String, Map<String, Long>>();
+    for (StorageType t : StorageType.getTypesSupportingQuota()) {
+      long tQuota = contentsummary.getTypeQuota(t);
+      if (tQuota != HdfsConstants.QUOTA_RESET) {
+        Map<String, Long> type = typeQuota.get(t.toString());
+        if (type == null) {
+          type = new TreeMap<String, Long>();
+          typeQuota.put(t.toString(), type);
+        }
+        type.put("quota", contentsummary.getTypeQuota(t));
+        type.put("consumed", contentsummary.getTypeConsumed(t));
+      }
+    }
+    m.put("typeQuota", typeQuota);
     return toJsonString(ContentSummary.class, m);
     return toJsonString(ContentSummary.class, m);
   }
   }
 
 

+ 83 - 1
hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md

@@ -535,7 +535,25 @@ Other File System Operations
             "length"        : 24930,
             "length"        : 24930,
             "quota"         : -1,
             "quota"         : -1,
             "spaceConsumed" : 24930,
             "spaceConsumed" : 24930,
-            "spaceQuota"    : -1
+            "spaceQuota"    : -1,
+            "typeQuota":
+            {
+              "ARCHIVE":
+              {
+                "consumed": 500,
+                "quota": 10000
+              },
+              "DISK":
+              {
+                "consumed": 500,
+                "quota": 10000
+              },
+              "SSD":
+              {
+                "consumed": 500,
+                "quota": 10000
+              }
+            }
           }
           }
         }
         }
 
 
@@ -1260,6 +1278,70 @@ See also: [`MKDIRS`](#Make_a_Directory), [`RENAME`](#Rename_a_FileDirectory), [`
           "description": "The disk space quota.",
           "description": "The disk space quota.",
           "type"       : "integer",
           "type"       : "integer",
           "required"   : true
           "required"   : true
+        },
+        "typeQuota":
+        {
+          "type"      : "object",
+          "properties":
+          {
+            "ARCHIVE":
+            {
+              "type"      : "object",
+              "properties":
+              {
+                "consumed":
+                {
+                  "description": "The storage type space consumed.",
+                  "type"       : "integer",
+                  "required"   : true
+                },
+                "quota":
+                {
+                  "description": "The storage type quota.",
+                  "type"       : "integer",
+                  "required"   : true
+                }
+              }
+            },
+            "DISK":
+            {
+              "type"      : "object",
+              "properties":
+              {
+                "consumed":
+                {
+                  "description": "The storage type space consumed.",
+                  "type"       : "integer",
+                  "required"   : true
+                },
+                "quota":
+                {
+                  "description": "The storage type quota.",
+                  "type"       : "integer",
+                  "required"   : true
+                }
+              }
+            },
+            "SSD":
+            {
+              "type"      : "object",
+              "properties":
+              {
+                "consumed":
+                {
+                  "description": "The storage type space consumed.",
+                  "type"       : "integer",
+                  "required"   : true
+                },
+                "quota":
+                {
+                  "description": "The storage type quota.",
+                  "type"       : "integer",
+                  "required"   : true
+                }
+              }
+            }
+          }
         }
         }
       }
       }
     }
     }

+ 24 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java

@@ -39,10 +39,12 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -642,6 +644,28 @@ public class TestWebHDFS {
     }
     }
   }
   }
 
 
+  @Test
+  public void testContentSummary() throws Exception {
+    MiniDFSCluster cluster = null;
+    final Configuration conf = WebHdfsTestUtil.createConf();
+    final Path path = new Path("/QuotaDir");
+    try {
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
+      final WebHdfsFileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(
+          conf, WebHdfsConstants.WEBHDFS_SCHEME);
+      final DistributedFileSystem dfs = cluster.getFileSystem();
+      dfs.mkdirs(path);
+      dfs.setQuotaByStorageType(path, StorageType.DISK, 100000);
+      ContentSummary contentSummary = webHdfs.getContentSummary(path);
+      Assert.assertTrue((contentSummary.getTypeQuota(
+          StorageType.DISK) == 100000));
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
+
   @Test
   @Test
   public void testWebHdfsPread() throws Exception {
   public void testWebHdfsPread() throws Exception {
     final Configuration conf = WebHdfsTestUtil.createConf();
     final Configuration conf = WebHdfsTestUtil.createConf();