Browse Source

HDFS-8631. WebHDFS : Support setQuota. Contributed by Chao Sun.

Surendra Singh Lilhore 5 năm trước cách đây
mục cha
commit
29bd6f3fc3
14 tập tin đã thay đổi với 423 bổ sung10 xóa
  1. 43 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
  2. 2 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java
  3. 2 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java
  4. 2 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
  5. 43 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
  6. 44 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/NameSpaceQuotaParam.java
  7. 3 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/PutOpParam.java
  8. 45 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/StorageSpaceQuotaParam.java
  9. 37 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/StorageTypeParam.java
  10. 8 2
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterWebHdfsMethods.java
  11. 43 8
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
  12. 64 0
      hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
  13. 58 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
  14. 29 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java

+ 43 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java

@@ -1788,6 +1788,33 @@ public abstract class FileSystem extends Configured
     return getContentSummary(f);
     return getContentSummary(f);
   }
   }
 
 
+  /**
+   * Set quota for the given {@link Path}.
+   *
+   * @param src the target path to set quota for
+   * @param namespaceQuota the namespace quota (i.e., # of files/directories)
+   *                       to set
+   * @param storagespaceQuota the storage space quota to set
+   * @throws IOException IO failure
+   */
+  public void setQuota(Path src, final long namespaceQuota,
+      final long storagespaceQuota) throws IOException {
+    methodNotSupported();
+  }
+
+  /**
+   * Set per storage type quota for the given {@link Path}.
+   *
+   * @param src the target path to set storage type quota for
+   * @param type the storage type to set
+   * @param quota the quota to set for the given storage type
+   * @throws IOException IO failure
+   */
+  public void setQuotaByStorageType(Path src, final StorageType type,
+      final long quota) throws IOException {
+    methodNotSupported();
+  }
+
   /**
   /**
    * The default filter accepts all paths.
    * The default filter accepts all paths.
    */
    */
@@ -4455,6 +4482,22 @@ public abstract class FileSystem extends Configured
     return result;
     return result;
   }
   }
 
 
+  /**
+   * Helper method that throws an {@link UnsupportedOperationException} for the
+   * current {@link FileSystem} method being called.
+   */
+  private void methodNotSupported() {
+    // The order of the stacktrace elements is (from top to bottom):
+    //   - java.lang.Thread.getStackTrace
+    //   - org.apache.hadoop.fs.FileSystem.methodNotSupported
+    //   - <the FileSystem method>
+    // therefore, to find out the current method name, we use the element at
+    // index 2.
+    String name = Thread.currentThread().getStackTrace()[2].getMethodName();
+    throw new UnsupportedOperationException(getClass().getCanonicalName() +
+        " does not support method " + name);
+  }
+
   /**
   /**
    * Create instance of the standard {@link FSDataInputStreamBuilder} for the
    * Create instance of the standard {@link FSDataInputStreamBuilder} for the
    * given filesystem and path.
    * given filesystem and path.

+ 2 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java

@@ -135,6 +135,8 @@ public class TestFilterFileSystem {
     public Path fixRelativePart(Path p);
     public Path fixRelativePart(Path p);
     public ContentSummary getContentSummary(Path f);
     public ContentSummary getContentSummary(Path f);
     public QuotaUsage getQuotaUsage(Path f);
     public QuotaUsage getQuotaUsage(Path f);
+    void setQuota(Path f, long namespaceQuota, long storagespaceQuota);
+    void setQuotaByStorageType(Path f, StorageType type, long quota);
     StorageStatistics getStorageStatistics();
     StorageStatistics getStorageStatistics();
   }
   }
 
 

+ 2 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java

@@ -118,6 +118,8 @@ public class TestHarFileSystem {
     public void processDeleteOnExit();
     public void processDeleteOnExit();
     public ContentSummary getContentSummary(Path f);
     public ContentSummary getContentSummary(Path f);
     public QuotaUsage getQuotaUsage(Path f);
     public QuotaUsage getQuotaUsage(Path f);
+    void setQuota(Path f, long namespaceQuota, long storagespaceQuota);
+    void setQuotaByStorageType(Path f, StorageType type, long quota);
     public FsStatus getStatus();
     public FsStatus getStatus();
     public FileStatus[] listStatus(Path f, PathFilter filter);
     public FileStatus[] listStatus(Path f, PathFilter filter);
     public FileStatus[] listStatusBatch(Path f, byte[] token);
     public FileStatus[] listStatusBatch(Path f, byte[] token);

+ 2 - 0
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java

@@ -1000,6 +1000,7 @@ public class DistributedFileSystem extends FileSystem
    * @see org.apache.hadoop.hdfs.protocol.ClientProtocol#setQuota(String,
    * @see org.apache.hadoop.hdfs.protocol.ClientProtocol#setQuota(String,
    * long, long, StorageType)
    * long, long, StorageType)
    */
    */
+  @Override
   public void setQuota(Path src, final long namespaceQuota,
   public void setQuota(Path src, final long namespaceQuota,
       final long storagespaceQuota) throws IOException {
       final long storagespaceQuota) throws IOException {
     statistics.incrementWriteOps(1);
     statistics.incrementWriteOps(1);
@@ -1029,6 +1030,7 @@ public class DistributedFileSystem extends FileSystem
    * @param quota value of the specific storage type quota to be modified.
    * @param quota value of the specific storage type quota to be modified.
    * Maybe {@link HdfsConstants#QUOTA_RESET} to clear quota by storage type.
    * Maybe {@link HdfsConstants#QUOTA_RESET} to clear quota by storage type.
    */
    */
+  @Override
   public void setQuotaByStorageType(Path src, final StorageType type,
   public void setQuotaByStorageType(Path src, final StorageType type,
       final long quota)
       final long quota)
       throws IOException {
       throws IOException {

+ 43 - 0
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java

@@ -75,6 +75,7 @@ import org.apache.hadoop.fs.GlobalStorageStatistics;
 import org.apache.hadoop.fs.GlobalStorageStatistics.StorageStatisticsProvider;
 import org.apache.hadoop.fs.GlobalStorageStatistics.StorageStatisticsProvider;
 import org.apache.hadoop.fs.QuotaUsage;
 import org.apache.hadoop.fs.QuotaUsage;
 import org.apache.hadoop.fs.StorageStatistics;
 import org.apache.hadoop.fs.StorageStatistics;
+import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.fs.permission.FsCreateModes;
 import org.apache.hadoop.fs.permission.FsCreateModes;
 import org.apache.hadoop.hdfs.DFSOpsCountStatistics;
 import org.apache.hadoop.hdfs.DFSOpsCountStatistics;
 import org.apache.hadoop.hdfs.DFSOpsCountStatistics.OpType;
 import org.apache.hadoop.hdfs.DFSOpsCountStatistics.OpType;
@@ -1873,6 +1874,48 @@ public class WebHdfsFileSystem extends FileSystem
     }.run();
     }.run();
   }
   }
 
 
+  @Override
+  public void setQuota(Path p, final long namespaceQuota,
+      final long storagespaceQuota) throws IOException {
+    // sanity check
+    if ((namespaceQuota <= 0 &&
+        namespaceQuota != HdfsConstants.QUOTA_RESET) ||
+        (storagespaceQuota < 0 &&
+            storagespaceQuota != HdfsConstants.QUOTA_RESET)) {
+      throw new IllegalArgumentException("Invalid values for quota : " +
+          namespaceQuota + " and " + storagespaceQuota);
+    }
+
+    statistics.incrementWriteOps(1);
+    storageStatistics.incrementOpCounter(OpType.SET_QUOTA_USAGE);
+
+    final HttpOpParam.Op op = PutOpParam.Op.SETQUOTA;
+    new FsPathRunner(op, p, new NameSpaceQuotaParam(namespaceQuota),
+        new StorageSpaceQuotaParam(storagespaceQuota)).run();
+  }
+
+  @Override
+  public void setQuotaByStorageType(Path path, StorageType type, long quota)
+      throws IOException {
+    if (quota <= 0 && quota != HdfsConstants.QUOTA_RESET) {
+      throw new IllegalArgumentException("Invalid values for quota :" + quota);
+    }
+    if (type == null) {
+      throw new IllegalArgumentException("Invalid storage type (null)");
+    }
+    if (!type.supportTypeQuota()) {
+      throw new IllegalArgumentException(
+          "Quota for storage type '" + type.toString() + "' is not supported");
+    }
+
+    statistics.incrementWriteOps(1);
+    storageStatistics.incrementOpCounter(OpType.SET_QUOTA_BYTSTORAGEYPE);
+
+    final HttpOpParam.Op op = PutOpParam.Op.SETQUOTABYSTORAGETYPE;
+    new FsPathRunner(op, path, new StorageTypeParam(type.name()),
+        new StorageSpaceQuotaParam(quota)).run();
+  }
+
   @Override
   @Override
   public MD5MD5CRC32FileChecksum getFileChecksum(final Path p
   public MD5MD5CRC32FileChecksum getFileChecksum(final Path p
   ) throws IOException {
   ) throws IOException {

+ 44 - 0
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/NameSpaceQuotaParam.java

@@ -0,0 +1,44 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.web.resources;
+
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+
+/** The name space quota parameter for directory. */
+public class NameSpaceQuotaParam extends LongParam {
+  /** Parameter name. */
+  public static final String NAME = "namespacequota";
+  /** Default parameter value ({@link Long#MAX_VALUE}). */
+  public static final String DEFAULT = "9223372036854775807";
+
+  private static final Domain DOMAIN = new Domain(NAME);
+
+  public NameSpaceQuotaParam(final Long value) {
+    super(DOMAIN, value, HdfsConstants.QUOTA_RESET,
+        HdfsConstants.QUOTA_DONT_SET);
+  }
+
+  public NameSpaceQuotaParam(final String str) {
+    this(DOMAIN.parse(str));
+  }
+
+  @Override
+  public String getName() {
+    return NAME;
+  }
+}

+ 3 - 0
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/PutOpParam.java

@@ -57,6 +57,9 @@ public class PutOpParam extends HttpOpParam<PutOpParam.Op> {
     RENAMESNAPSHOT(false, HttpURLConnection.HTTP_OK),
     RENAMESNAPSHOT(false, HttpURLConnection.HTTP_OK),
     SETSTORAGEPOLICY(false, HttpURLConnection.HTTP_OK),
     SETSTORAGEPOLICY(false, HttpURLConnection.HTTP_OK),
 
 
+    SETQUOTA(false, HttpURLConnection.HTTP_OK),
+    SETQUOTABYSTORAGETYPE(false, HttpURLConnection.HTTP_OK),
+
     NULL(false, HttpURLConnection.HTTP_NOT_IMPLEMENTED);
     NULL(false, HttpURLConnection.HTTP_NOT_IMPLEMENTED);
 
 
     final boolean doOutputAndRedirect;
     final boolean doOutputAndRedirect;

+ 45 - 0
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/StorageSpaceQuotaParam.java

@@ -0,0 +1,45 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.web.resources;
+
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+
+/** The storage space quota parameter for directory. */
+public class StorageSpaceQuotaParam extends LongParam {
+  /** Parameter name. */
+  public static final String NAME = "storagespacequota";
+  /** Default parameter value ({@link Long#MAX_VALUE}). */
+  public static final String DEFAULT = "9223372036854775807";
+
+  private static final Domain DOMAIN = new Domain(NAME);
+
+  public StorageSpaceQuotaParam(final Long value) {
+    super(DOMAIN, value, HdfsConstants.QUOTA_RESET,
+        HdfsConstants.QUOTA_DONT_SET);
+  }
+
+  public StorageSpaceQuotaParam(final String str) {
+    this(DOMAIN.parse(str));
+  }
+
+  @Override
+  public String getName() {
+    return NAME;
+  }
+}
+

+ 37 - 0
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/StorageTypeParam.java

@@ -0,0 +1,37 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.web.resources;
+
+/** storage type parameter. */
+public class StorageTypeParam extends StringParam {
+  /** Parameter name. */
+  public static final String NAME = "storagetype";
+  /** Default parameter value. */
+  public static final String DEFAULT = "";
+
+  private static final Domain DOMAIN = new Domain(NAME, null);
+
+  public StorageTypeParam(final String str) {
+    super(DOMAIN, str == null || str.equals(DEFAULT) ? null : str);
+  }
+
+  @Override
+  public String getName() {
+    return NAME;
+  }
+}

+ 8 - 2
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterWebHdfsMethods.java

@@ -57,6 +57,7 @@ import org.apache.hadoop.hdfs.web.resources.GroupParam;
 import org.apache.hadoop.hdfs.web.resources.HttpOpParam;
 import org.apache.hadoop.hdfs.web.resources.HttpOpParam;
 import org.apache.hadoop.hdfs.web.resources.LengthParam;
 import org.apache.hadoop.hdfs.web.resources.LengthParam;
 import org.apache.hadoop.hdfs.web.resources.ModificationTimeParam;
 import org.apache.hadoop.hdfs.web.resources.ModificationTimeParam;
+import org.apache.hadoop.hdfs.web.resources.NameSpaceQuotaParam;
 import org.apache.hadoop.hdfs.web.resources.NewLengthParam;
 import org.apache.hadoop.hdfs.web.resources.NewLengthParam;
 import org.apache.hadoop.hdfs.web.resources.NoRedirectParam;
 import org.apache.hadoop.hdfs.web.resources.NoRedirectParam;
 import org.apache.hadoop.hdfs.web.resources.OffsetParam;
 import org.apache.hadoop.hdfs.web.resources.OffsetParam;
@@ -73,6 +74,8 @@ import org.apache.hadoop.hdfs.web.resources.ReplicationParam;
 import org.apache.hadoop.hdfs.web.resources.SnapshotNameParam;
 import org.apache.hadoop.hdfs.web.resources.SnapshotNameParam;
 import org.apache.hadoop.hdfs.web.resources.StartAfterParam;
 import org.apache.hadoop.hdfs.web.resources.StartAfterParam;
 import org.apache.hadoop.hdfs.web.resources.StoragePolicyParam;
 import org.apache.hadoop.hdfs.web.resources.StoragePolicyParam;
+import org.apache.hadoop.hdfs.web.resources.StorageSpaceQuotaParam;
+import org.apache.hadoop.hdfs.web.resources.StorageTypeParam;
 import org.apache.hadoop.hdfs.web.resources.TokenArgumentParam;
 import org.apache.hadoop.hdfs.web.resources.TokenArgumentParam;
 import org.apache.hadoop.hdfs.web.resources.TokenKindParam;
 import org.apache.hadoop.hdfs.web.resources.TokenKindParam;
 import org.apache.hadoop.hdfs.web.resources.TokenServiceParam;
 import org.apache.hadoop.hdfs.web.resources.TokenServiceParam;
@@ -209,7 +212,10 @@ public class RouterWebHdfsMethods extends NamenodeWebHdfsMethods {
       final CreateFlagParam createFlagParam,
       final CreateFlagParam createFlagParam,
       final NoRedirectParam noredirectParam,
       final NoRedirectParam noredirectParam,
       final StoragePolicyParam policyName,
       final StoragePolicyParam policyName,
-      final ECPolicyParam ecpolicy
+      final ECPolicyParam ecpolicy,
+      final NameSpaceQuotaParam namespaceQuota,
+      final StorageSpaceQuotaParam storagespaceQuota,
+      final StorageTypeParam storageType
   ) throws IOException, URISyntaxException {
   ) throws IOException, URISyntaxException {
 
 
     switch(op.getValue()) {
     switch(op.getValue()) {
@@ -261,7 +267,7 @@ public class RouterWebHdfsMethods extends NamenodeWebHdfsMethods {
           accessTime, renameOptions, createParent, delegationTokenArgument,
           accessTime, renameOptions, createParent, delegationTokenArgument,
           aclPermission, xattrName, xattrValue, xattrSetFlag, snapshotName,
           aclPermission, xattrName, xattrValue, xattrSetFlag, snapshotName,
           oldSnapshotName, exclDatanodes, createFlagParam, noredirectParam,
           oldSnapshotName, exclDatanodes, createFlagParam, noredirectParam,
-          policyName, ecpolicy);
+          policyName, ecpolicy, namespaceQuota, storagespaceQuota, storageType);
     }
     }
     default:
     default:
       throw new UnsupportedOperationException(op + " is not supported");
       throw new UnsupportedOperationException(op + " is not supported");

+ 43 - 8
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java

@@ -56,6 +56,8 @@ import javax.ws.rs.core.Response.ResponseBuilder;
 import javax.ws.rs.core.Response.Status;
 import javax.ws.rs.core.Response.Status;
 
 
 import org.apache.hadoop.fs.QuotaUsage;
 import org.apache.hadoop.fs.QuotaUsage;
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.slf4j.Logger;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
@@ -509,14 +511,24 @@ public class NamenodeWebHdfsMethods {
       @QueryParam(StoragePolicyParam.NAME) @DefaultValue(StoragePolicyParam
       @QueryParam(StoragePolicyParam.NAME) @DefaultValue(StoragePolicyParam
           .DEFAULT) final StoragePolicyParam policyName,
           .DEFAULT) final StoragePolicyParam policyName,
       @QueryParam(ECPolicyParam.NAME) @DefaultValue(ECPolicyParam
       @QueryParam(ECPolicyParam.NAME) @DefaultValue(ECPolicyParam
-              .DEFAULT) final ECPolicyParam ecpolicy
-      ) throws IOException, InterruptedException {
+              .DEFAULT) final ECPolicyParam ecpolicy,
+      @QueryParam(NameSpaceQuotaParam.NAME)
+      @DefaultValue(NameSpaceQuotaParam.DEFAULT)
+      final NameSpaceQuotaParam namespaceQuota,
+      @QueryParam(StorageSpaceQuotaParam.NAME)
+      @DefaultValue(StorageSpaceQuotaParam.DEFAULT)
+      final StorageSpaceQuotaParam storagespaceQuota,
+      @QueryParam(StorageTypeParam.NAME)
+      @DefaultValue(StorageTypeParam.DEFAULT)
+      final StorageTypeParam storageType
+  ) throws IOException, InterruptedException {
     return put(ugi, delegation, username, doAsUser, ROOT, op, destination,
     return put(ugi, delegation, username, doAsUser, ROOT, op, destination,
         owner, group, permission, unmaskedPermission, overwrite, bufferSize,
         owner, group, permission, unmaskedPermission, overwrite, bufferSize,
         replication, blockSize, modificationTime, accessTime, renameOptions,
         replication, blockSize, modificationTime, accessTime, renameOptions,
         createParent, delegationTokenArgument, aclPermission, xattrName,
         createParent, delegationTokenArgument, aclPermission, xattrName,
         xattrValue, xattrSetFlag, snapshotName, oldSnapshotName,
         xattrValue, xattrSetFlag, snapshotName, oldSnapshotName,
-        excludeDatanodes, createFlagParam, noredirect, policyName, ecpolicy);
+        excludeDatanodes, createFlagParam, noredirect, policyName, ecpolicy,
+        namespaceQuota, storagespaceQuota, storageType);
   }
   }
 
 
   /** Validate all required params. */
   /** Validate all required params. */
@@ -598,15 +610,23 @@ public class NamenodeWebHdfsMethods {
       @QueryParam(StoragePolicyParam.NAME) @DefaultValue(StoragePolicyParam
       @QueryParam(StoragePolicyParam.NAME) @DefaultValue(StoragePolicyParam
           .DEFAULT) final StoragePolicyParam policyName,
           .DEFAULT) final StoragePolicyParam policyName,
       @QueryParam(ECPolicyParam.NAME) @DefaultValue(ECPolicyParam.DEFAULT)
       @QueryParam(ECPolicyParam.NAME) @DefaultValue(ECPolicyParam.DEFAULT)
-      final ECPolicyParam ecpolicy
+          final ECPolicyParam ecpolicy,
+      @QueryParam(NameSpaceQuotaParam.NAME)
+      @DefaultValue(NameSpaceQuotaParam.DEFAULT)
+          final NameSpaceQuotaParam namespaceQuota,
+      @QueryParam(StorageSpaceQuotaParam.NAME)
+      @DefaultValue(StorageSpaceQuotaParam.DEFAULT)
+          final StorageSpaceQuotaParam storagespaceQuota,
+      @QueryParam(StorageTypeParam.NAME) @DefaultValue(StorageTypeParam.DEFAULT)
+          final StorageTypeParam storageType
       ) throws IOException, InterruptedException {
       ) throws IOException, InterruptedException {
-
     init(ugi, delegation, username, doAsUser, path, op, destination, owner,
     init(ugi, delegation, username, doAsUser, path, op, destination, owner,
         group, permission, unmaskedPermission, overwrite, bufferSize,
         group, permission, unmaskedPermission, overwrite, bufferSize,
         replication, blockSize, modificationTime, accessTime, renameOptions,
         replication, blockSize, modificationTime, accessTime, renameOptions,
         delegationTokenArgument, aclPermission, xattrName, xattrValue,
         delegationTokenArgument, aclPermission, xattrName, xattrValue,
         xattrSetFlag, snapshotName, oldSnapshotName, excludeDatanodes,
         xattrSetFlag, snapshotName, oldSnapshotName, excludeDatanodes,
-        createFlagParam, noredirect, policyName);
+        createFlagParam, noredirect, policyName, ecpolicy,
+        namespaceQuota, storagespaceQuota, storageType);
 
 
     return doAs(ugi, new PrivilegedExceptionAction<Response>() {
     return doAs(ugi, new PrivilegedExceptionAction<Response>() {
       @Override
       @Override
@@ -618,7 +638,8 @@ public class NamenodeWebHdfsMethods {
               renameOptions, createParent, delegationTokenArgument,
               renameOptions, createParent, delegationTokenArgument,
               aclPermission, xattrName, xattrValue, xattrSetFlag,
               aclPermission, xattrName, xattrValue, xattrSetFlag,
               snapshotName, oldSnapshotName, excludeDatanodes,
               snapshotName, oldSnapshotName, excludeDatanodes,
-              createFlagParam, noredirect, policyName, ecpolicy);
+              createFlagParam, noredirect, policyName, ecpolicy,
+              namespaceQuota, storagespaceQuota, storageType);
       }
       }
     });
     });
   }
   }
@@ -654,7 +675,10 @@ public class NamenodeWebHdfsMethods {
       final CreateFlagParam createFlagParam,
       final CreateFlagParam createFlagParam,
       final NoRedirectParam noredirectParam,
       final NoRedirectParam noredirectParam,
       final StoragePolicyParam policyName,
       final StoragePolicyParam policyName,
-      final ECPolicyParam ecpolicy
+      final ECPolicyParam ecpolicy,
+      final NameSpaceQuotaParam namespaceQuota,
+      final StorageSpaceQuotaParam storagespaceQuota,
+      final StorageTypeParam storageType
       ) throws IOException, URISyntaxException {
       ) throws IOException, URISyntaxException {
     final Configuration conf = (Configuration)context.getAttribute(JspHelper.CURRENT_CONF);
     final Configuration conf = (Configuration)context.getAttribute(JspHelper.CURRENT_CONF);
     final ClientProtocol cp = getRpcClientProtocol();
     final ClientProtocol cp = getRpcClientProtocol();
@@ -831,6 +855,17 @@ public class NamenodeWebHdfsMethods {
       validateOpParams(op, ecpolicy);
       validateOpParams(op, ecpolicy);
       cp.setErasureCodingPolicy(fullpath, ecpolicy.getValue());
       cp.setErasureCodingPolicy(fullpath, ecpolicy.getValue());
       return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
       return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
+    case SETQUOTA:
+      validateOpParams(op, namespaceQuota, storagespaceQuota);
+      cp.setQuota(fullpath, namespaceQuota.getValue(),
+          storagespaceQuota.getValue(), null);
+      return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
+    case SETQUOTABYSTORAGETYPE:
+      validateOpParams(op, storagespaceQuota, storageType);
+      cp.setQuota(fullpath, HdfsConstants.QUOTA_DONT_SET,
+          storagespaceQuota.getValue(),
+          StorageType.parseStorageType(storageType.getValue()));
+      return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
     default:
     default:
       throw new UnsupportedOperationException(op + " is not supported");
       throw new UnsupportedOperationException(op + " is not supported");
     }
     }

+ 64 - 0
hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md

@@ -832,6 +832,34 @@ See also: [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getConten
 
 
 See also: [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getQuotaUsage
 See also: [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getQuotaUsage
 
 
+### Set Quota
+
+* Submit a HTTP PUT request.
+
+        curl -i -X PUT "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=SETQUOTA
+                                      &namespacequota=<QUOTA>[&storagespacequota=<QUOTA>]"
+
+    The client receives a response with zero content length:
+
+        HTTP/1.1 200 OK
+        Content-Length: 0
+
+See also: [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).setQuota
+
+### Set Quota By Storage Type
+
+* Submit a HTTP PUT request.
+
+        curl -i -X PUT "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=SETQUOTABYSTORAGETYPE
+                                      &storagetype=<STORAGETYPE>&storagespacequota=<QUOTA>"
+
+    The client receives a response with zero content length:
+
+        HTTP/1.1 200 OK
+        Content-Length: 0
+
+See also: [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).setQuotaByStorageType
+
 ### Get File Checksum
 ### Get File Checksum
 
 
 * Submit a HTTP GET request.
 * Submit a HTTP GET request.
@@ -3179,6 +3207,42 @@ See also: [Authentication](#Authentication)
 
 
 See also: [Create and Write to a File](#Create_and_Write_to_a_File)
 See also: [Create and Write to a File](#Create_and_Write_to_a_File)
 
 
+### Namespace Quota
+
+| Name | `namespacequota` |
+|:---- |:---- |
+| Description | Limit on the namespace usage, i.e., number of files/directories, under a directory. |
+| Type | String |
+| Default Value | Long.MAX_VALUE |
+| Valid Values | \> 0. |
+| Syntax | Any integer. |
+
+See also: [`SETQUOTA`](#Set_Quota)
+
+### Storage Space Quota
+
+| Name | `storagespacequota` |
+|:---- |:---- |
+| Description | Limit on storage space usage (in bytes, including replication) under a directory. |
+| Type | String |
+| Default Value | Long.MAX_VALUE |
+| Valid Values | \> 0. |
+| Syntax | Any integer. |
+
+See also: [`SETQUOTA`](#Set_Quota), [`SETQUOTABYSTORAGETYPE`](#Set_Quota_By_Storage_Type)
+
+### Storage Type
+
+| Name | `storagetype` |
+|:---- |:---- |
+| Description | Storage type of the specific storage type quota to be modified. |
+| Type | String |
+| Default Value | \<empty\> |
+| Valid Values | Any valid storage type. |
+| Syntax | Any string. |
+
+See also: [`SETQUOTABYSTORAGETYPE`](#Set_Quota_By_Storage_Type)
+
 ### Storage Policy
 ### Storage Policy
 
 
 | Name | `storagepolicy` |
 | Name | `storagepolicy` |

+ 58 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java

@@ -55,6 +55,7 @@ import java.util.Random;
 import com.google.common.collect.ImmutableList;
 import com.google.common.collect.ImmutableList;
 import org.apache.commons.io.IOUtils;
 import org.apache.commons.io.IOUtils;
 import org.apache.hadoop.fs.QuotaUsage;
 import org.apache.hadoop.fs.QuotaUsage;
+import org.apache.hadoop.test.LambdaTestUtils;
 import org.slf4j.Logger;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
@@ -1129,9 +1130,66 @@ public class TestWebHDFS {
         cluster.shutdown();
         cluster.shutdown();
       }
       }
     }
     }
+  }
+
+  @Test
+  public void testSetQuota() throws Exception {
+    MiniDFSCluster cluster = null;
+    final Configuration conf = WebHdfsTestUtil.createConf();
+    final Path path = new Path("/TestDir");
+    try {
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
+      final WebHdfsFileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(
+          conf, WebHdfsConstants.WEBHDFS_SCHEME);
+      final DistributedFileSystem dfs = cluster.getFileSystem();
+
+      final long nsQuota = 100;
+      final long spaceQuota = 1024;
+
+      webHdfs.mkdirs(path);
+
+      webHdfs.setQuota(path, nsQuota, spaceQuota);
+      QuotaUsage quotaUsage = dfs.getQuotaUsage(path);
+      assertEquals(nsQuota, quotaUsage.getQuota());
+      assertEquals(spaceQuota, quotaUsage.getSpaceQuota());
+
+      webHdfs.setQuota(path,
+          HdfsConstants.QUOTA_RESET, HdfsConstants.QUOTA_RESET);
+      quotaUsage = dfs.getQuotaUsage(path);
+      assertEquals(HdfsConstants.QUOTA_RESET, quotaUsage.getQuota());
+      assertEquals(HdfsConstants.QUOTA_RESET, quotaUsage.getSpaceQuota());
+
+      webHdfs.setQuotaByStorageType(path, StorageType.DISK, spaceQuota);
+      webHdfs.setQuotaByStorageType(path, StorageType.ARCHIVE, spaceQuota);
+      webHdfs.setQuotaByStorageType(path, StorageType.SSD, spaceQuota);
+      quotaUsage = dfs.getQuotaUsage(path);
+      assertEquals(spaceQuota, quotaUsage.getTypeQuota(StorageType.DISK));
+      assertEquals(spaceQuota, quotaUsage.getTypeQuota(StorageType.ARCHIVE));
+      assertEquals(spaceQuota, quotaUsage.getTypeQuota(StorageType.SSD));
+
+      // Test invalid parameters
+
+      LambdaTestUtils.intercept(IllegalArgumentException.class,
+          () -> webHdfs.setQuota(path, -100, 100));
+      LambdaTestUtils.intercept(IllegalArgumentException.class,
+          () -> webHdfs.setQuota(path, 100, -100));
+      LambdaTestUtils.intercept(IllegalArgumentException.class,
+          () -> webHdfs.setQuotaByStorageType(path, StorageType.SSD, -100));
+      LambdaTestUtils.intercept(IllegalArgumentException.class,
+          () -> webHdfs.setQuotaByStorageType(path, null, 100));
+      LambdaTestUtils.intercept(IllegalArgumentException.class,
+          () -> webHdfs.setQuotaByStorageType(path, StorageType.SSD, -100));
+      LambdaTestUtils.intercept(IllegalArgumentException.class,
+          () -> webHdfs.setQuotaByStorageType(path, StorageType.RAM_DISK, 100));
 
 
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
   }
   }
 
 
+
   @Test
   @Test
   public void testWebHdfsPread() throws Exception {
   public void testWebHdfsPread() throws Exception {
     final Configuration conf = WebHdfsTestUtil.createConf();
     final Configuration conf = WebHdfsTestUtil.createConf();

+ 29 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java

@@ -17,6 +17,7 @@
  */
  */
 package org.apache.hadoop.hdfs.web.resources;
 package org.apache.hadoop.hdfs.web.resources;
 
 
+import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertNull;
 
 
@@ -25,6 +26,7 @@ import java.util.Arrays;
 import java.util.EnumSet;
 import java.util.EnumSet;
 import java.util.List;
 import java.util.List;
 
 
+import org.apache.hadoop.fs.StorageType;
 import org.slf4j.Logger;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
@@ -505,6 +507,33 @@ public class TestParam {
     Assert.assertEquals("COLD", p.getValue());
     Assert.assertEquals("COLD", p.getValue());
   }
   }
 
 
+  @Test
+  public void testNamespaceQuotaParam() {
+    NameSpaceQuotaParam p =
+        new NameSpaceQuotaParam(NameSpaceQuotaParam.DEFAULT);
+    assertEquals(Long.valueOf(NameSpaceQuotaParam.DEFAULT), p.getValue());
+    p = new NameSpaceQuotaParam(100L);
+    assertEquals(100L, p.getValue().longValue());
+  }
+
+  @Test
+  public void testStorageSpaceQuotaParam() {
+    StorageSpaceQuotaParam sp = new StorageSpaceQuotaParam(
+        StorageSpaceQuotaParam.DEFAULT);
+    assertEquals(Long.valueOf(StorageSpaceQuotaParam.DEFAULT),
+        sp.getValue());
+    sp = new StorageSpaceQuotaParam(100L);
+    assertEquals(100L, sp.getValue().longValue());
+  }
+
+  @Test
+  public void testStorageTypeParam() {
+    StorageTypeParam p = new StorageTypeParam(StorageTypeParam.DEFAULT);
+    assertNull(p.getValue());
+    p = new StorageTypeParam(StorageType.DISK.name());
+    assertEquals(StorageType.DISK.name(), p.getValue());
+  }
+
   @Test
   @Test
   public void testECPolicyParam() {
   public void testECPolicyParam() {
     ECPolicyParam p = new ECPolicyParam(ECPolicyParam.DEFAULT);
     ECPolicyParam p = new ECPolicyParam(ECPolicyParam.DEFAULT);