Przeglądaj źródła

HDDS-738. Removing REST protocol support from OzoneClient.
Contributed by Elek, Marton.

Anu Engineer 5 lat temu
rodzic
commit
dc72782008
100 zmienionych plików z 308 dodań i 15177 usunięć
  1. 0 3
      hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
  2. 1 14
      hadoop-hdds/common/src/main/resources/ozone-default.xml
  3. 2 0
      hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
  4. 15 26
      hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientException.java
  5. 11 116
      hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java
  6. 0 91
      hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientUtils.java
  7. 0 36
      hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/DefaultRestServerSelector.java
  8. 0 45
      hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/OzoneExceptionMapper.java
  9. 0 1130
      hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java
  10. 0 40
      hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestServerSelector.java
  11. 0 22
      hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/exceptions/package-info.java
  12. 0 22
      hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/headers/package-info.java
  13. 0 23
      hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/package-info.java
  14. 0 267
      hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/OzoneException.java
  15. 0 74
      hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/headers/Header.java
  16. 0 22
      hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/headers/package-info.java
  17. 0 22
      hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/package-info.java
  18. 0 248
      hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/response/BucketInfo.java
  19. 0 236
      hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/response/KeyInfo.java
  20. 0 118
      hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/response/KeyInfoDetails.java
  21. 0 89
      hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/response/KeyLocation.java
  22. 0 215
      hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/response/VolumeInfo.java
  23. 0 61
      hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/response/VolumeOwner.java
  24. 0 24
      hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/response/package-info.java
  25. 0 23
      hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/ServiceInfo.java
  26. 0 134
      hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/handlers/BucketArgs.java
  27. 0 117
      hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/handlers/KeyArgs.java
  28. 0 142
      hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/handlers/ListArgs.java
  29. 0 172
      hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/handlers/UserArgs.java
  30. 0 143
      hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/handlers/VolumeArgs.java
  31. 0 22
      hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/handlers/package-info.java
  32. 0 214
      hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/request/OzoneQuota.java
  33. 0 23
      hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/request/package-info.java
  34. 0 335
      hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/BucketInfo.java
  35. 0 311
      hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/KeyInfo.java
  36. 0 80
      hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/KeyInfoDetails.java
  37. 0 82
      hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/KeyLocation.java
  38. 0 154
      hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/ListBuckets.java
  39. 0 209
      hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/ListKeys.java
  40. 0 152
      hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/ListVolumes.java
  41. 0 308
      hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/VolumeInfo.java
  42. 0 60
      hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/VolumeOwner.java
  43. 0 23
      hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/package-info.java
  44. 0 82
      hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/web/TestBucketInfo.java
  45. 0 115
      hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/web/TestQuota.java
  46. 0 101
      hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/web/TestUtils.java
  47. 0 73
      hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/web/TestVolumeStructs.java
  48. 0 4
      hadoop-ozone/datanode/pom.xml
  49. 0 4
      hadoop-ozone/dist/dev-support/bin/dist-layout-stitching
  50. 25 18
      hadoop-ozone/dist/src/main/smoketest/basic/ozone-shell.robot
  51. 9 9
      hadoop-ozone/dist/src/main/smoketest/ozonefs/ozonefs.robot
  52. 0 4
      hadoop-ozone/integration-test/pom.xml
  53. 0 9
      hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
  54. 0 16
      hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
  55. 12 14
      hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java
  56. 95 0
      hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDataUtil.java
  57. 0 413
      hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneHelper.java
  58. 22 50
      hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java
  59. 21 21
      hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
  60. 1 2
      hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java
  61. 11 11
      hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java
  62. 0 208
      hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestMultipleContainerReadWrite.java
  63. 38 55
      hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmAcls.java
  64. 24 69
      hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmBlockVersioning.java
  65. 4 14
      hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmInit.java
  66. 0 1449
      hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManager.java
  67. 12 16
      hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestart.java
  68. 5 23
      hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneDatanodeShell.java
  69. 0 1266
      hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
  70. 0 221
      hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestOzoneRestWithMiniCluster.java
  71. 0 187
      hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestOzoneVolumes.java
  72. 0 118
      hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestOzoneWebAccess.java
  73. 0 349
      hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestBuckets.java
  74. 0 108
      hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestBucketsRatis.java
  75. 0 734
      hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java
  76. 0 126
      hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeysRatis.java
  77. 0 304
      hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestOzoneClient.java
  78. 0 381
      hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestVolume.java
  79. 0 155
      hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestVolumeRatis.java
  80. 0 22
      hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/package-info.java
  81. 0 22
      hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/package-info.java
  82. 0 126
      hadoop-ozone/objectstore-service/pom.xml
  83. 0 160
      hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/hdfs/server/datanode/ObjectStoreHandler.java
  84. 0 22
      hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/hdfs/server/datanode/package-info.java
  85. 0 222
      hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/OzoneRestUtils.java
  86. 0 22
      hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/package-info.java
  87. 0 59
      hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/ObjectStoreApplication.java
  88. 0 89
      hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/OzoneHddsDatanodeService.java
  89. 0 225
      hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/exceptions/ErrorTable.java
  90. 0 22
      hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/exceptions/package-info.java
  91. 0 190
      hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/BucketHandler.java
  92. 0 294
      hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/BucketProcessTemplate.java
  93. 0 302
      hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/KeyHandler.java
  94. 0 235
      hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/KeyProcessTemplate.java
  95. 0 61
      hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/ServiceFilter.java
  96. 0 80
      hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/StorageHandlerBuilder.java
  97. 0 75
      hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/UserHandlerBuilder.java
  98. 0 274
      hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/VolumeHandler.java
  99. 0 276
      hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/VolumeProcessTemplate.java
  100. 0 22
      hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/package-info.java

+ 0 - 3
hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java

@@ -125,9 +125,6 @@ public final class OzoneConfigKeys {
    * */
   public static final String OZONE_ADMINISTRATORS_WILDCARD = "*";
 
-  public static final String OZONE_CLIENT_PROTOCOL =
-      "ozone.client.protocol";
-
   public static final String OZONE_CLIENT_STREAM_BUFFER_FLUSH_SIZE =
       "ozone.client.stream.buffer.flush.size";
 

+ 1 - 14
hadoop-hdds/common/src/main/resources/ozone-default.xml

@@ -461,19 +461,6 @@
       there is no wait.
     </description>
   </property>
-  <property>
-    <name>ozone.client.protocol</name>
-    <value>org.apache.hadoop.ozone.client.rpc.RpcClient</value>
-    <tag>OZONE, CLIENT, MANAGEMENT</tag>
-    <description>Protocol class to be used by the client to connect to ozone
-      cluster.
-      The build-in implementation includes:
-      org.apache.hadoop.ozone.client.rpc.RpcClient for RPC
-      org.apache.hadoop.ozone.client.rest.RestClient for REST
-      The default is the RpClient. Please do not change this unless you have a
-      very good understanding of what you are doing.
-    </description>
-  </property>
   <property>
     <name>ozone.client.socket.timeout</name>
     <value>5000ms</value>
@@ -1261,7 +1248,7 @@
 
   <property>
     <name>hdds.datanode.plugins</name>
-    <value>org.apache.hadoop.ozone.web.OzoneHddsDatanodeService</value>
+    <value></value>
     <description>
       Comma-separated list of HDDS datanode plug-ins to be activated when
       HDDS service starts as part of datanode.

+ 2 - 0
hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java

@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.ozone.client;
 
+import com.fasterxml.jackson.annotation.JsonIgnore;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.conf.Configuration;
@@ -211,6 +212,7 @@ public class OzoneBucket extends WithMetadata {
    *
    * @return acls
    */
+  @JsonIgnore
   public List<OzoneAcl> getAcls() throws IOException {
     return proxy.getAcl(ozoneObj);
   }

+ 15 - 26
hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientException.java

@@ -17,38 +17,27 @@
  */
 package org.apache.hadoop.ozone.client;
 
-import org.apache.hadoop.ozone.client.rest.OzoneException;
-
 /**
  * This exception is thrown by the Ozone Clients.
  */
-public class OzoneClientException extends OzoneException {
-  /**
-   * Constructor that allows the shortMessage.
-   *
-   * @param shortMessage Short Message
-   */
-  public OzoneClientException(String shortMessage) {
-    super(0, shortMessage, shortMessage);
+public class OzoneClientException extends Exception {
+  public OzoneClientException() {
+  }
+
+  public OzoneClientException(String s) {
+    super(s);
+  }
+
+  public OzoneClientException(String s, Throwable throwable) {
+    super(s, throwable);
   }
 
-  /**
-   * Constructor that allows a shortMessage and an exception.
-   *
-   * @param shortMessage short message
-   * @param ex exception
-   */
-  public OzoneClientException(String shortMessage, Exception ex) {
-    super(0, shortMessage, shortMessage, ex);
+  public OzoneClientException(Throwable throwable) {
+    super(throwable);
   }
 
-  /**
-   * Constructor that allows the shortMessage and a longer message.
-   *
-   * @param shortMessage Short Message
-   * @param message long error message
-   */
-  public OzoneClientException(String shortMessage, String message) {
-    super(0, shortMessage, message);
+  public OzoneClientException(String s, Throwable throwable, boolean b,
+      boolean b1) {
+    super(s, throwable, b, b1);
   }
 }

+ 11 - 116
hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java

@@ -18,37 +18,22 @@
 
 package org.apache.hadoop.ozone.client;
 
-import com.google.common.base.Preconditions;
+import java.io.IOException;
+import java.lang.reflect.Proxy;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.OmUtils;
 import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
-import org.apache.hadoop.ozone.client.rest.RestClient;
 import org.apache.hadoop.ozone.client.rpc.RpcClient;
 
+import com.google.common.base.Preconditions;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.io.IOException;
-import java.lang.reflect.Constructor;
-import java.lang.reflect.InvocationTargetException;
-import java.lang.reflect.Proxy;
-
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_CLIENT_PROTOCOL;
-import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY;
-import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HTTP_ADDRESS_KEY;
-
 /**
- * Factory class to create different types of OzoneClients.
- * Based on <code>ozone.client.protocol</code>, it decides which
- * protocol to use for the communication.
- * Default value is
- * <code>org.apache.hadoop.ozone.client.rpc.RpcClient</code>.<br>
- * OzoneClientFactory constructs a proxy using
- * {@link OzoneClientInvocationHandler}
- * and creates OzoneClient instance with it.
- * {@link OzoneClientInvocationHandler} dispatches the call to
- * underlying {@link ClientProtocol} implementation.
+ * Factory class to create OzoneClients.
  */
 public final class OzoneClientFactory {
 
@@ -87,9 +72,7 @@ public final class OzoneClientFactory {
   public static OzoneClient getClient(Configuration config)
       throws IOException {
     Preconditions.checkNotNull(config);
-    Class<? extends ClientProtocol> clazz = (Class<? extends ClientProtocol>)
-        config.getClass(OZONE_CLIENT_PROTOCOL, RpcClient.class);
-    return getClient(getClientProtocol(clazz, config), config);
+    return getClient(getClientProtocol(config), config);
   }
 
   /**
@@ -166,85 +149,7 @@ public final class OzoneClientFactory {
   public static OzoneClient getRpcClient(Configuration config)
       throws IOException {
     Preconditions.checkNotNull(config);
-    return getClient(getClientProtocol(RpcClient.class, config),
-        config);
-  }
-
-  /**
-   * Returns an OzoneClient which will use REST protocol.
-   *
-   * @param omHost
-   *        hostname of OzoneManager to connect.
-   *
-   * @return OzoneClient
-   *
-   * @throws IOException
-   */
-  public static OzoneClient getRestClient(String omHost)
-      throws IOException {
-    Configuration config = new OzoneConfiguration();
-    int port = OmUtils.getOmRestPort(config);
-    return getRestClient(omHost, port, config);
-  }
-
-  /**
-   * Returns an OzoneClient which will use REST protocol.
-   *
-   * @param omHost
-   *        hostname of OzoneManager to connect.
-   *
-   * @param omHttpPort
-   *        HTTP port of OzoneManager.
-   *
-   * @return OzoneClient
-   *
-   * @throws IOException
-   */
-  public static OzoneClient getRestClient(String omHost, Integer omHttpPort)
-      throws IOException {
-    return getRestClient(omHost, omHttpPort, new OzoneConfiguration());
-  }
-
-  /**
-   * Returns an OzoneClient which will use REST protocol.
-   *
-   * @param omHost
-   *        hostname of OzoneManager to connect.
-   *
-   * @param omHttpPort
-   *        HTTP port of OzoneManager.
-   *
-   * @param config
-   *        Configuration to be used for OzoneClient creation
-   *
-   * @return OzoneClient
-   *
-   * @throws IOException
-   */
-  public static OzoneClient getRestClient(String omHost, Integer omHttpPort,
-                                          Configuration config)
-      throws IOException {
-    Preconditions.checkNotNull(omHost);
-    Preconditions.checkNotNull(omHttpPort);
-    Preconditions.checkNotNull(config);
-    config.set(OZONE_OM_HTTP_ADDRESS_KEY, omHost + ":" + omHttpPort);
-    return getRestClient(config);
-  }
-
-  /**
-   * Returns an OzoneClient which will use REST protocol.
-   *
-   * @param config
-   *        Configuration to be used for OzoneClient creation
-   *
-   * @return OzoneClient
-   *
-   * @throws IOException
-   */
-  public static OzoneClient getRestClient(Configuration config)
-      throws IOException {
-    Preconditions.checkNotNull(config);
-    return getClient(getClientProtocol(RestClient.class, config),
+    return getClient(getClientProtocol(config),
         config);
   }
 
@@ -270,8 +175,6 @@ public final class OzoneClientFactory {
   /**
    * Returns an instance of Protocol class.
    *
-   * @param protocolClass
-   *        Class object of the ClientProtocol.
    *
    * @param config
    *        Configuration used to initialize ClientProtocol.
@@ -280,23 +183,15 @@ public final class OzoneClientFactory {
    *
    * @throws IOException
    */
-  private static ClientProtocol getClientProtocol(
-      Class<? extends ClientProtocol> protocolClass, Configuration config)
+  private static ClientProtocol getClientProtocol(Configuration config)
       throws IOException {
     try {
-      LOG.debug("Using {} as client protocol.",
-          protocolClass.getCanonicalName());
-      Constructor<? extends ClientProtocol> ctor =
-          protocolClass.getConstructor(Configuration.class);
-      return ctor.newInstance(config);
+      return new RpcClient(config);
     } catch (Exception e) {
-      final String message = "Couldn't create protocol " + protocolClass;
+      final String message = "Couldn't create RpcClient protocol";
       LOG.error(message + " exception: ", e);
       if (e.getCause() instanceof IOException) {
         throw (IOException) e.getCause();
-      } else if (e instanceof InvocationTargetException) {
-        throw new IOException(message,
-            ((InvocationTargetException) e).getTargetException());
       } else {
         throw new IOException(message, e);
       }

+ 0 - 91
hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientUtils.java

@@ -17,106 +17,15 @@
  */
 package org.apache.hadoop.ozone.client;
 
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
 import java.util.concurrent.TimeUnit;
 
-import org.apache.hadoop.hdds.client.OzoneQuota;
-import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
 import org.apache.hadoop.io.retry.RetryPolicies;
 import org.apache.hadoop.io.retry.RetryPolicy;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.client.rest.response.BucketInfo;
-import org.apache.hadoop.ozone.client.rest.response.KeyInfo;
-import org.apache.hadoop.ozone.client.rest.response.KeyInfoDetails;
-import org.apache.hadoop.ozone.client.rest.response.KeyLocation;
-import org.apache.hadoop.ozone.client.rest.response.VolumeInfo;
-import org.apache.hadoop.ozone.client.rest.response.VolumeOwner;
 
 /** A utility class for OzoneClient. */
 public final class OzoneClientUtils {
 
   private OzoneClientUtils() {}
-  /**
-   * Returns a BucketInfo object constructed using fields of the input
-   * OzoneBucket object.
-   *
-   * @param bucket OzoneBucket instance from which BucketInfo object needs to
-   *               be created.
-   * @return BucketInfo instance
-   */
-  public static BucketInfo asBucketInfo(OzoneBucket bucket) throws IOException {
-    BucketInfo bucketInfo =
-        new BucketInfo(bucket.getVolumeName(), bucket.getName());
-    bucketInfo
-        .setCreatedOn(HddsClientUtils.formatDateTime(bucket.getCreationTime()));
-    bucketInfo.setStorageType(bucket.getStorageType());
-    bucketInfo.setVersioning(
-        OzoneConsts.Versioning.getVersioning(bucket.getVersioning()));
-    bucketInfo.setEncryptionKeyName(
-        bucket.getEncryptionKeyName()==null? "N/A" :
-            bucket.getEncryptionKeyName());
-    return bucketInfo;
-  }
-
-  /**
-   * Returns a VolumeInfo object constructed using fields of the input
-   * OzoneVolume object.
-   *
-   * @param volume OzoneVolume instance from which VolumeInfo object needs to
-   *               be created.
-   * @return VolumeInfo instance
-   */
-  public static VolumeInfo asVolumeInfo(OzoneVolume volume) {
-    VolumeInfo volumeInfo = new VolumeInfo(volume.getName(),
-        HddsClientUtils.formatDateTime(volume.getCreationTime()),
-        volume.getOwner());
-    volumeInfo.setQuota(OzoneQuota.getOzoneQuota(volume.getQuota()));
-    volumeInfo.setOwner(new VolumeOwner(volume.getOwner()));
-    return volumeInfo;
-  }
-
-  /**
-   * Returns a KeyInfo object constructed using fields of the input
-   * OzoneKey object.
-   *
-   * @param key OzoneKey instance from which KeyInfo object needs to
-   *            be created.
-   * @return KeyInfo instance
-   */
-  public static KeyInfo asKeyInfo(OzoneKey key) {
-    KeyInfo keyInfo = new KeyInfo();
-    keyInfo.setKeyName(key.getName());
-    keyInfo.setCreatedOn(HddsClientUtils.formatDateTime(key.getCreationTime()));
-    keyInfo.setModifiedOn(
-        HddsClientUtils.formatDateTime(key.getModificationTime()));
-    keyInfo.setSize(key.getDataSize());
-    return keyInfo;
-  }
-
-  /**
-   * Returns a KeyInfoDetails object constructed using fields of the input
-   * OzoneKeyDetails object.
-   *
-   * @param key OzoneKeyDetails instance from which KeyInfo object needs to
-   *            be created.
-   * @return KeyInfoDetails instance
-   */
-  public static KeyInfoDetails asKeyInfoDetails(OzoneKeyDetails key) {
-    KeyInfoDetails keyInfo = new KeyInfoDetails();
-    keyInfo.setKeyName(key.getName());
-    keyInfo.setCreatedOn(HddsClientUtils.formatDateTime(key.getCreationTime()));
-    keyInfo.setModifiedOn(
-        HddsClientUtils.formatDateTime(key.getModificationTime()));
-    keyInfo.setSize(key.getDataSize());
-    List<KeyLocation> keyLocations = new ArrayList<>();
-    key.getOzoneKeyLocations().forEach((a) -> keyLocations.add(new KeyLocation(
-        a.getContainerID(), a.getLocalID(), a.getLength(), a.getOffset())));
-    keyInfo.setKeyLocation(keyLocations);
-    keyInfo.setFileEncryptionInfo(key.getFileEncryptionInfo());
-    return keyInfo;
-  }
 
   public static RetryPolicy createRetryPolicy(int maxRetryCount,
       long retryInterval) {

+ 0 - 36
hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/DefaultRestServerSelector.java

@@ -1,36 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.client.rest;
-
-import org.apache.hadoop.ozone.om.helpers.ServiceInfo;
-
-import java.util.List;
-import java.util.Random;
-
-/**
- * Default selector randomly picks one of the REST Server from the list.
- */
-public class DefaultRestServerSelector implements RestServerSelector {
-
-  @Override
-  public ServiceInfo getRestServer(List<ServiceInfo> restServices) {
-    return restServices.get(
-        new Random().nextInt(restServices.size()));
-  }
-}

+ 0 - 45
hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/OzoneExceptionMapper.java

@@ -1,45 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.client.rest;
-
-
-import javax.ws.rs.core.Response;
-import javax.ws.rs.ext.ExceptionMapper;
-
-import org.slf4j.MDC;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- *  Class the represents various errors returned by the
- *  Object Layer.
- */
-public class OzoneExceptionMapper implements ExceptionMapper<OzoneException> {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(OzoneExceptionMapper.class);
-
-  @Override
-  public Response toResponse(OzoneException exception) {
-    LOG.debug("Returning exception. ex: {}", exception.toJsonString());
-    MDC.clear();
-    return Response.status((int)exception.getHttpCode())
-      .entity(exception.toJsonString()).build();
-  }
-
-}

+ 0 - 1130
hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java

@@ -1,1130 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.client.rest;
-
-import com.fasterxml.jackson.core.type.TypeReference;
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Strings;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.crypto.key.KeyProvider;
-import org.apache.hadoop.hdds.protocol.StorageType;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.client.*;
-import org.apache.hadoop.hdds.client.OzoneQuota;
-import org.apache.hadoop.hdds.client.ReplicationFactor;
-import org.apache.hadoop.hdds.client.ReplicationType;
-import org.apache.hadoop.ozone.client.VolumeArgs;
-import org.apache.hadoop.ozone.client.io.OzoneInputStream;
-import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
-import org.apache.hadoop.ozone.client.rest.headers.Header;
-import org.apache.hadoop.ozone.client.rest.response.BucketInfo;
-import org.apache.hadoop.ozone.client.rest.response.KeyInfoDetails;
-import org.apache.hadoop.ozone.client.rest.response.VolumeInfo;
-import org.apache.hadoop.ozone.client.rpc.OzoneKMSUtil;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
-import org.apache.hadoop.ozone.om.ha.OMFailoverProxyProvider;
-import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo;
-import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo;
-import org.apache.hadoop.ozone.om.helpers.S3SecretValue;
-import org.apache.hadoop.ozone.om.helpers.ServiceInfo;
-import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ServicePort;
-import org.apache.hadoop.ozone.security.OzoneTokenIdentifier;
-import org.apache.hadoop.ozone.security.acl.OzoneObj;
-import org.apache.hadoop.ozone.web.response.ListBuckets;
-import org.apache.hadoop.ozone.web.response.ListKeys;
-import org.apache.hadoop.ozone.web.response.ListVolumes;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.util.Time;
-import org.apache.http.HttpEntity;
-import org.apache.http.HttpHeaders;
-import org.apache.http.HttpResponse;
-import org.apache.http.client.config.RequestConfig;
-import org.apache.http.client.methods.HttpDelete;
-import org.apache.http.client.methods.HttpGet;
-import org.apache.http.client.methods.HttpPost;
-import org.apache.http.client.methods.HttpPut;
-import org.apache.http.client.methods.HttpUriRequest;
-import org.apache.http.client.utils.URIBuilder;
-import org.apache.http.entity.InputStreamEntity;
-import org.apache.http.impl.client.CloseableHttpClient;
-import org.apache.http.impl.client.HttpClients;
-import org.apache.http.impl.conn.PoolingHttpClientConnectionManager;
-import org.apache.http.util.EntityUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.io.PipedInputStream;
-import java.io.PipedOutputStream;
-import java.net.InetSocketAddress;
-import java.net.URI;
-import java.net.URISyntaxException;
-import java.text.ParseException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.FutureTask;
-import java.util.concurrent.TimeUnit;
-import java.util.stream.Collectors;
-
-import static java.net.HttpURLConnection.HTTP_CREATED;
-import static java.net.HttpURLConnection.HTTP_OK;
-import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OM_SERVICE_LIST_HTTP_ENDPOINT;
-
-/**
- * Ozone Client REST protocol implementation. It uses REST protocol to
- * connect to Ozone Handler that executes client calls.
- */
-public class RestClient implements ClientProtocol {
-
-  private static final String PATH_SEPARATOR = "/";
-  private static final Logger LOG = LoggerFactory.getLogger(RestClient.class);
-
-  private final Configuration conf;
-  private final URI ozoneRestUri;
-  private final CloseableHttpClient httpClient;
-  private final UserGroupInformation ugi;
-  // private final OzoneAcl.OzoneACLRights userRights;
-
-   /**
-    * Creates RestClient instance with the given configuration.
-    * @param conf Configuration
-    * @throws IOException
-    */
-  public RestClient(Configuration conf)
-      throws IOException {
-    try {
-      Preconditions.checkNotNull(conf);
-      this.conf = conf;
-      this.ugi = UserGroupInformation.getCurrentUser();
-      long socketTimeout = conf.getTimeDuration(
-          OzoneConfigKeys.OZONE_CLIENT_SOCKET_TIMEOUT,
-          OzoneConfigKeys.OZONE_CLIENT_SOCKET_TIMEOUT_DEFAULT,
-          TimeUnit.MILLISECONDS);
-      long connectionTimeout = conf.getTimeDuration(
-          OzoneConfigKeys.OZONE_CLIENT_CONNECTION_TIMEOUT,
-          OzoneConfigKeys.OZONE_CLIENT_CONNECTION_TIMEOUT_DEFAULT,
-          TimeUnit.MILLISECONDS);
-      int maxConnection = conf.getInt(
-          OzoneConfigKeys.OZONE_REST_CLIENT_HTTP_CONNECTION_MAX,
-          OzoneConfigKeys.OZONE_REST_CLIENT_HTTP_CONNECTION_DEFAULT);
-
-      int maxConnectionPerRoute = conf.getInt(
-          OzoneConfigKeys.OZONE_REST_CLIENT_HTTP_CONNECTION_PER_ROUTE_MAX,
-          OzoneConfigKeys
-              .OZONE_REST_CLIENT_HTTP_CONNECTION_PER_ROUTE_MAX_DEFAULT
-      );
-
-      /*
-      To make RestClient Thread safe, creating the HttpClient with
-      ThreadSafeClientConnManager.
-      */
-      PoolingHttpClientConnectionManager connManager =
-          new PoolingHttpClientConnectionManager();
-      connManager.setMaxTotal(maxConnection);
-      connManager.setDefaultMaxPerRoute(maxConnectionPerRoute);
-
-      this.httpClient = HttpClients.custom()
-          .setConnectionManager(connManager)
-          .setDefaultRequestConfig(
-              RequestConfig.custom()
-              .setSocketTimeout(Math.toIntExact(socketTimeout))
-                  .setConnectTimeout(Math.toIntExact(connectionTimeout))
-                  .build())
-          .build();
-
-//      this.userRights = conf.getEnum(OMConfigKeys.OZONE_OM_USER_RIGHTS,
-//          OMConfigKeys.OZONE_OM_USER_RIGHTS_DEFAULT);
-
-      // TODO: Add new configuration parameter to configure RestServerSelector.
-      RestServerSelector defaultSelector = new DefaultRestServerSelector();
-      InetSocketAddress restServer = getOzoneRestServerAddress(defaultSelector);
-      URIBuilder uriBuilder = new URIBuilder()
-          .setScheme("http")
-          .setHost(restServer.getHostName())
-          .setPort(restServer.getPort());
-      this.ozoneRestUri = uriBuilder.build();
-
-    } catch (URISyntaxException e) {
-      throw new IOException(e);
-    }
-  }
-
-  private InetSocketAddress getOzoneRestServerAddress(
-      RestServerSelector selector) throws IOException {
-    String httpAddress = conf.get(OMConfigKeys.OZONE_OM_HTTP_ADDRESS_KEY);
-
-    if (httpAddress == null) {
-      throw new IllegalArgumentException(
-          OMConfigKeys.OZONE_OM_HTTP_ADDRESS_KEY + " must be defined. See" +
-              " https://wiki.apache.org/hadoop/Ozone#Configuration for" +
-              " details on configuring Ozone.");
-    }
-
-    HttpGet httpGet = new HttpGet("http://" + httpAddress +
-        OZONE_OM_SERVICE_LIST_HTTP_ENDPOINT);
-    HttpEntity entity = executeHttpRequest(httpGet);
-    try {
-      String serviceListJson = EntityUtils.toString(entity);
-
-      ObjectMapper objectMapper = new ObjectMapper();
-      TypeReference<List<ServiceInfo>> serviceInfoReference =
-          new TypeReference<List<ServiceInfo>>() {
-          };
-      List<ServiceInfo> services = objectMapper.readValue(
-          serviceListJson, serviceInfoReference);
-
-      List<ServiceInfo> dataNodeInfos = services.stream().filter(
-          a -> a.getNodeType().equals(HddsProtos.NodeType.DATANODE))
-          .collect(Collectors.toList());
-
-      ServiceInfo restServer = selector.getRestServer(dataNodeInfos);
-
-      return NetUtils.createSocketAddr(
-          NetUtils.normalizeHostName(restServer.getHostname()) + ":"
-              + restServer.getPort(ServicePort.Type.HTTP));
-    } finally {
-      EntityUtils.consume(entity);
-    }
-  }
-
-  @Override
-  public void createVolume(String volumeName) throws IOException {
-    createVolume(volumeName, VolumeArgs.newBuilder().build());
-  }
-
-  @Override
-  public void createVolume(String volumeName, VolumeArgs volArgs)
-      throws IOException {
-    try {
-      HddsClientUtils.verifyResourceName(volumeName);
-      Preconditions.checkNotNull(volArgs);
-      URIBuilder builder = new URIBuilder(ozoneRestUri);
-      String owner = volArgs.getOwner() == null ?
-          ugi.getUserName() : volArgs.getOwner();
-      //TODO: support for ACLs has to be done in OzoneHandler (rest server)
-      /**
-      List<OzoneAcl> listOfAcls = new ArrayList<>();
-      //User ACL
-      listOfAcls.add(new OzoneAcl(OzoneAcl.OzoneACLType.USER,
-          owner, userRights));
-      //ACLs from VolumeArgs
-      if(volArgs.getAcls() != null) {
-        listOfAcls.addAll(volArgs.getAcls());
-      }
-       */
-      builder.setPath(PATH_SEPARATOR + volumeName);
-
-      String quota = volArgs.getQuota();
-      if(quota != null) {
-        builder.setParameter(Header.OZONE_QUOTA_QUERY_TAG, quota);
-      }
-
-      HttpPost httpPost = new HttpPost(builder.build());
-      addOzoneHeaders(httpPost);
-      //use admin from VolumeArgs, if it's present
-      if(volArgs.getAdmin() != null) {
-        httpPost.removeHeaders(HttpHeaders.AUTHORIZATION);
-        httpPost.addHeader(HttpHeaders.AUTHORIZATION,
-            Header.OZONE_SIMPLE_AUTHENTICATION_SCHEME + " " +
-                volArgs.getAdmin());
-      }
-      httpPost.addHeader(Header.OZONE_USER, owner);
-      LOG.info("Creating Volume: {}, with {} as owner and quota set to {}.",
-          volumeName, owner, quota == null ? "default" : quota);
-      EntityUtils.consume(executeHttpRequest(httpPost));
-    } catch (URISyntaxException e) {
-      throw new IOException(e);
-    }
-  }
-
-
-  @Override
-  public void setVolumeOwner(String volumeName, String owner)
-      throws IOException {
-    try {
-      HddsClientUtils.verifyResourceName(volumeName);
-      Preconditions.checkNotNull(owner);
-      URIBuilder builder = new URIBuilder(ozoneRestUri);
-      builder.setPath(PATH_SEPARATOR + volumeName);
-      HttpPut httpPut = new HttpPut(builder.build());
-      addOzoneHeaders(httpPut);
-      httpPut.addHeader(Header.OZONE_USER, owner);
-      EntityUtils.consume(executeHttpRequest(httpPut));
-    } catch (URISyntaxException e) {
-      throw new IOException(e);
-    }
-  }
-
-  @Override
-  public void setVolumeQuota(String volumeName, OzoneQuota quota)
-      throws IOException {
-    try {
-      HddsClientUtils.verifyResourceName(volumeName);
-      Preconditions.checkNotNull(quota);
-      String quotaString = quota.toString();
-      URIBuilder builder = new URIBuilder(ozoneRestUri);
-      builder.setPath(PATH_SEPARATOR + volumeName);
-      builder.setParameter(Header.OZONE_QUOTA_QUERY_TAG, quotaString);
-      HttpPut httpPut = new HttpPut(builder.build());
-      addOzoneHeaders(httpPut);
-      EntityUtils.consume(executeHttpRequest(httpPut));
-    } catch (URISyntaxException e) {
-      throw new IOException(e);
-    }
-  }
-
-  @Override
-  public OzoneVolume getVolumeDetails(String volumeName)
-      throws IOException {
-    try {
-      HddsClientUtils.verifyResourceName(volumeName);
-      URIBuilder builder = new URIBuilder(ozoneRestUri);
-      builder.setPath(PATH_SEPARATOR + volumeName);
-      builder.setParameter(Header.OZONE_INFO_QUERY_TAG,
-          Header.OZONE_INFO_QUERY_VOLUME);
-      HttpGet httpGet = new HttpGet(builder.build());
-      addOzoneHeaders(httpGet);
-      HttpEntity response = executeHttpRequest(httpGet);
-      VolumeInfo volInfo =
-          VolumeInfo.parse(EntityUtils.toString(response));
-      //TODO: OzoneHandler in datanode has to be modified to send ACLs
-      OzoneVolume volume = new OzoneVolume(conf,
-          this,
-          volInfo.getVolumeName(),
-          volInfo.getCreatedBy(),
-          volInfo.getOwner().getName(),
-          volInfo.getQuota().sizeInBytes(),
-          HddsClientUtils.formatDateTime(volInfo.getCreatedOn()),
-          null);
-      EntityUtils.consume(response);
-      return volume;
-    } catch (URISyntaxException | ParseException e) {
-      throw new IOException(e);
-    }
-  }
-
-  @Override
-  public boolean checkVolumeAccess(String volumeName, OzoneAcl acl)
-      throws IOException {
-    throw new UnsupportedOperationException("Not yet implemented.");
-  }
-
-  @Override
-  public void deleteVolume(String volumeName) throws IOException {
-    try {
-      HddsClientUtils.verifyResourceName(volumeName);
-      URIBuilder builder = new URIBuilder(ozoneRestUri);
-      builder.setPath(PATH_SEPARATOR + volumeName);
-      HttpDelete httpDelete = new HttpDelete(builder.build());
-      addOzoneHeaders(httpDelete);
-      EntityUtils.consume(executeHttpRequest(httpDelete));
-    } catch (URISyntaxException e) {
-      throw new IOException(e);
-    }
-  }
-
-  @Override
-  public List<OzoneVolume> listVolumes(String volumePrefix, String prevKey,
-                                       int maxListResult)
-      throws IOException {
-    return listVolumes(null, volumePrefix, prevKey, maxListResult);
-  }
-
-  @Override
-  public List<OzoneVolume> listVolumes(String user, String volumePrefix,
-                                       String prevKey, int maxListResult)
-      throws IOException {
-    try {
-      URIBuilder builder = new URIBuilder(ozoneRestUri);
-      builder.setPath(PATH_SEPARATOR);
-      builder.addParameter(Header.OZONE_INFO_QUERY_TAG,
-          Header.OZONE_LIST_QUERY_SERVICE);
-      builder.addParameter(Header.OZONE_LIST_QUERY_MAXKEYS,
-          String.valueOf(maxListResult));
-      addQueryParamter(Header.OZONE_LIST_QUERY_PREFIX, volumePrefix, builder);
-      addQueryParamter(Header.OZONE_LIST_QUERY_PREVKEY, prevKey, builder);
-      HttpGet httpGet = new HttpGet(builder.build());
-      if (!Strings.isNullOrEmpty(user)) {
-        httpGet.addHeader(Header.OZONE_USER, user);
-      }
-      addOzoneHeaders(httpGet);
-      HttpEntity response = executeHttpRequest(httpGet);
-      ListVolumes volumeList =
-          ListVolumes.parse(EntityUtils.toString(response));
-      EntityUtils.consume(response);
-      return volumeList.getVolumes().stream().map(volInfo -> {
-        long creationTime = 0;
-        try {
-          creationTime = HddsClientUtils.formatDateTime(volInfo.getCreatedOn());
-        } catch (ParseException e) {
-          LOG.warn("Parse exception in getting creation time for volume", e);
-        }
-        return new OzoneVolume(conf, this, volInfo.getVolumeName(),
-            volInfo.getCreatedBy(), volInfo.getOwner().getName(),
-            volInfo.getQuota().sizeInBytes(), creationTime, null);
-      }).collect(Collectors.toList());
-    } catch (URISyntaxException e) {
-      throw new IOException(e);
-    }
-  }
-
-  @Override
-  public void createBucket(String volumeName, String bucketName)
-      throws IOException {
-    createBucket(volumeName, bucketName, BucketArgs.newBuilder().build());
-  }
-
-  @Override
-  public void createBucket(
-      String volumeName, String bucketName, BucketArgs bucketArgs)
-      throws IOException {
-    try {
-      HddsClientUtils.verifyResourceName(volumeName, bucketName);
-      Preconditions.checkNotNull(bucketArgs);
-      URIBuilder builder = new URIBuilder(ozoneRestUri);
-      OzoneConsts.Versioning versioning = OzoneConsts.Versioning.DISABLED;
-      if(bucketArgs.getVersioning() != null &&
-          bucketArgs.getVersioning()) {
-        versioning = OzoneConsts.Versioning.ENABLED;
-      }
-      StorageType storageType = bucketArgs.getStorageType() == null ?
-          StorageType.DEFAULT : bucketArgs.getStorageType();
-
-      builder.setPath(PATH_SEPARATOR + volumeName +
-          PATH_SEPARATOR + bucketName);
-      HttpPost httpPost = new HttpPost(builder.build());
-      addOzoneHeaders(httpPost);
-
-      //ACLs from BucketArgs
-      if(bucketArgs.getAcls() != null) {
-        for (OzoneAcl acl : bucketArgs.getAcls()) {
-          httpPost.addHeader(
-              Header.OZONE_ACLS, Header.OZONE_ACL_ADD + " " + acl.toString());
-        }
-      }
-      httpPost.addHeader(Header.OZONE_STORAGE_TYPE, storageType.toString());
-      httpPost.addHeader(Header.OZONE_BUCKET_VERSIONING,
-          versioning.toString());
-      LOG.info("Creating Bucket: {}/{}, with Versioning {} and Storage Type" +
-              " set to {}", volumeName, bucketName, versioning,
-          storageType);
-
-      EntityUtils.consume(executeHttpRequest(httpPost));
-    } catch (URISyntaxException e) {
-      throw new IOException(e);
-    }
-  }
-
-  @Override
-  public void setBucketVersioning(
-      String volumeName, String bucketName, Boolean versioning)
-      throws IOException {
-    try {
-      HddsClientUtils.verifyResourceName(volumeName, bucketName);
-      Preconditions.checkNotNull(versioning);
-      URIBuilder builder = new URIBuilder(ozoneRestUri);
-
-      builder.setPath(PATH_SEPARATOR + volumeName +
-          PATH_SEPARATOR + bucketName);
-      HttpPut httpPut = new HttpPut(builder.build());
-      addOzoneHeaders(httpPut);
-
-      httpPut.addHeader(Header.OZONE_BUCKET_VERSIONING,
-          getBucketVersioning(versioning).toString());
-      EntityUtils.consume(executeHttpRequest(httpPut));
-    } catch (URISyntaxException e) {
-      throw new IOException(e);
-    }
-  }
-
-  @Override
-  public void setBucketStorageType(
-      String volumeName, String bucketName, StorageType storageType)
-      throws IOException {
-    try {
-      HddsClientUtils.verifyResourceName(volumeName, bucketName);
-      Preconditions.checkNotNull(storageType);
-      URIBuilder builder = new URIBuilder(ozoneRestUri);
-
-      builder.setPath(PATH_SEPARATOR + volumeName +
-          PATH_SEPARATOR + bucketName);
-      HttpPut httpPut = new HttpPut(builder.build());
-      addOzoneHeaders(httpPut);
-
-      httpPut.addHeader(Header.OZONE_STORAGE_TYPE, storageType.toString());
-      EntityUtils.consume(executeHttpRequest(httpPut));
-    } catch (URISyntaxException e) {
-      throw new IOException(e);
-    }
-  }
-
-  @Override
-  public void deleteBucket(String volumeName, String bucketName)
-      throws IOException {
-    try {
-      HddsClientUtils.verifyResourceName(volumeName, bucketName);
-      URIBuilder builder = new URIBuilder(ozoneRestUri);
-      builder.setPath(PATH_SEPARATOR + volumeName +
-          PATH_SEPARATOR + bucketName);
-      HttpDelete httpDelete = new HttpDelete(builder.build());
-      addOzoneHeaders(httpDelete);
-      EntityUtils.consume(executeHttpRequest(httpDelete));
-    } catch (URISyntaxException e) {
-      throw new IOException(e);
-    }
-  }
-
-  @Override
-  public void checkBucketAccess(String volumeName, String bucketName)
-      throws IOException {
-    throw new UnsupportedOperationException("Not yet implemented.");
-  }
-
-  @Override
-  public OzoneBucket getBucketDetails(String volumeName, String bucketName)
-      throws IOException {
-    try {
-      HddsClientUtils.verifyResourceName(volumeName, bucketName);
-      URIBuilder builder = new URIBuilder(ozoneRestUri);
-      builder.setPath(PATH_SEPARATOR + volumeName +
-          PATH_SEPARATOR + bucketName);
-      builder.setParameter(Header.OZONE_INFO_QUERY_TAG,
-          Header.OZONE_INFO_QUERY_BUCKET);
-      HttpGet httpGet = new HttpGet(builder.build());
-      addOzoneHeaders(httpGet);
-      HttpEntity response = executeHttpRequest(httpGet);
-      BucketInfo bucketInfo =
-          BucketInfo.parse(EntityUtils.toString(response));
-      OzoneBucket bucket = new OzoneBucket(conf,
-          this,
-          bucketInfo.getVolumeName(),
-          bucketInfo.getBucketName(),
-          bucketInfo.getStorageType(),
-          getBucketVersioningFlag(bucketInfo.getVersioning()),
-          HddsClientUtils.formatDateTime(bucketInfo.getCreatedOn()),
-          new HashMap<>());
-      EntityUtils.consume(response);
-      return bucket;
-    } catch (URISyntaxException | ParseException e) {
-      throw new IOException(e);
-    }
-  }
-
-  @Override
-  public List<OzoneBucket> listBuckets(String volumeName, String bucketPrefix,
-                                       String prevBucket, int maxListResult)
-      throws IOException {
-    try {
-      HddsClientUtils.verifyResourceName(volumeName);
-      URIBuilder builder = new URIBuilder(ozoneRestUri);
-      builder.setPath(PATH_SEPARATOR + volumeName);
-      builder.addParameter(Header.OZONE_INFO_QUERY_TAG,
-          Header.OZONE_INFO_QUERY_BUCKET);
-      builder.addParameter(Header.OZONE_LIST_QUERY_MAXKEYS,
-          String.valueOf(maxListResult));
-      addQueryParamter(Header.OZONE_LIST_QUERY_PREFIX, bucketPrefix, builder);
-      addQueryParamter(Header.OZONE_LIST_QUERY_PREVKEY, prevBucket, builder);
-      HttpGet httpGet = new HttpGet(builder.build());
-      addOzoneHeaders(httpGet);
-      HttpEntity response = executeHttpRequest(httpGet);
-      ListBuckets bucketList =
-          ListBuckets.parse(EntityUtils.toString(response));
-      EntityUtils.consume(response);
-      return bucketList.getBuckets().stream().map(bucketInfo -> {
-        long creationTime = 0;
-        try {
-          creationTime =
-              HddsClientUtils.formatDateTime(bucketInfo.getCreatedOn());
-        } catch (ParseException e) {
-          LOG.warn("Parse exception in getting creation time for volume", e);
-        }
-        return new OzoneBucket(conf, this, volumeName,
-            bucketInfo.getBucketName(), bucketInfo.getStorageType(),
-            getBucketVersioningFlag(bucketInfo.getVersioning()), creationTime,
-            new HashMap<>(), bucketInfo.getEncryptionKeyName());
-      }).collect(Collectors.toList());
-    } catch (URISyntaxException e) {
-      throw new IOException(e);
-    }
-  }
-
-  /**
-   * Writes a key in an existing bucket.
-   *
-   * @param volumeName Name of the Volume
-   * @param bucketName Name of the Bucket
-   * @param keyName Name of the Key
-   * @param size Size of the data
-   * @param type
-   * @param factor @return {@link OzoneOutputStream}
-   */
-  @Override
-  public OzoneOutputStream createKey(
-      String volumeName, String bucketName, String keyName, long size,
-      ReplicationType type, ReplicationFactor factor,
-      Map<String, String> metadata)
-      throws IOException {
-    // TODO: Once ReplicationType and ReplicationFactor are supported in
-    // OzoneHandler (in Datanode), set them in header.
-    try {
-      HddsClientUtils.verifyResourceName(volumeName, bucketName);
-      HddsClientUtils.checkNotNull(keyName, type, factor);
-      URIBuilder builder = new URIBuilder(ozoneRestUri);
-      builder.setPath(PATH_SEPARATOR + volumeName +
-          PATH_SEPARATOR + bucketName +
-          PATH_SEPARATOR + keyName);
-      HttpPut putRequest = new HttpPut(builder.build());
-      addOzoneHeaders(putRequest);
-      PipedInputStream in = new PipedInputStream();
-      OutputStream out = new PipedOutputStream(in);
-      putRequest.setEntity(new InputStreamEntity(in, size));
-      FutureTask<HttpEntity> futureTask =
-          new FutureTask<>(() -> executeHttpRequest(putRequest));
-      new Thread(futureTask).start();
-      OzoneOutputStream outputStream = new OzoneOutputStream(
-          new OutputStream() {
-            @Override
-            public void write(int b) throws IOException {
-              out.write(b);
-            }
-
-            @Override
-            public void close() throws IOException {
-              try {
-                out.close();
-                EntityUtils.consume(futureTask.get());
-              } catch (ExecutionException | InterruptedException e) {
-                throw new IOException(e);
-              }
-            }
-          });
-
-      return outputStream;
-    } catch (URISyntaxException e) {
-      throw new IOException(e);
-    }
-  }
-
-  /**
-   * Get a valid Delegation Token. Not supported for RestClient.
-   *
-   * @param renewer the designated renewer for the token
-   * @return Token<OzoneDelegationTokenSelector>
-   * @throws IOException
-   */
-  @Override
-  public Token<OzoneTokenIdentifier> getDelegationToken(Text renewer)
-      throws IOException {
-    throw new IOException("Method not supported");
-  }
-
-  /**
-   * Renew an existing delegation token. Not supported for RestClient.
-   *
-   * @param token delegation token obtained earlier
-   * @return the new expiration time
-   * @throws IOException
-   */
-  @Override
-  public long renewDelegationToken(Token<OzoneTokenIdentifier> token)
-      throws IOException {
-    throw new IOException("Method not supported");
-  }
-
-  /**
-   * Cancel an existing delegation token. Not supported for RestClient.
-   *
-   * @param token delegation token
-   * @throws IOException
-   */
-  @Override
-  public void cancelDelegationToken(Token<OzoneTokenIdentifier> token)
-      throws IOException {
-    throw new IOException("Method not supported");
-  }
-
-  @Override
-  public S3SecretValue getS3Secret(String kerberosID) throws IOException {
-    throw new UnsupportedOperationException("Ozone REST protocol does not " +
-        "support this operation.");
-  }
-
-  @Override
-  public OMFailoverProxyProvider getOMProxyProvider() {
-    return null;
-  }
-
-  @Override
-  public KeyProvider getKeyProvider() throws IOException {
-    // TODO: fix me to support kms instances for difference OMs
-    return OzoneKMSUtil.getKeyProvider(conf, getKeyProviderUri());
-  }
-
-  @Override
-  public URI getKeyProviderUri() throws IOException {
-    return OzoneKMSUtil.getKeyProviderUri(ugi, null, null, conf);
-  }
-
-  @Override
-  public OzoneInputStream getKey(
-      String volumeName, String bucketName, String keyName)
-      throws IOException {
-    try {
-      HddsClientUtils.verifyResourceName(volumeName, bucketName);
-      Preconditions.checkNotNull(keyName);
-      URIBuilder builder = new URIBuilder(ozoneRestUri);
-      builder.setPath(PATH_SEPARATOR + volumeName +
-          PATH_SEPARATOR + bucketName +
-          PATH_SEPARATOR + keyName);
-      HttpGet getRequest = new HttpGet(builder.build());
-      addOzoneHeaders(getRequest);
-      HttpEntity entity = executeHttpRequest(getRequest);
-      PipedInputStream in = new PipedInputStream();
-      OutputStream out = new PipedOutputStream(in);
-      FutureTask<Void> futureTask =
-          new FutureTask<>(() -> {
-            entity.writeTo(out);
-            out.close();
-            return null;
-          });
-      new Thread(futureTask).start();
-      OzoneInputStream inputStream = new OzoneInputStream(
-          new InputStream() {
-
-            @Override
-            public int read() throws IOException {
-              return in.read();
-            }
-
-            @Override
-            public void close() throws IOException {
-              in.close();
-              EntityUtils.consume(entity);
-            }
-          });
-
-      return inputStream;
-    } catch (URISyntaxException e) {
-      throw new IOException(e);
-    }
-  }
-
-  @Override
-  public void deleteKey(String volumeName, String bucketName, String keyName)
-      throws IOException {
-    try {
-      HddsClientUtils.verifyResourceName(volumeName, bucketName);
-      Preconditions.checkNotNull(keyName);
-      URIBuilder builder = new URIBuilder(ozoneRestUri);
-      builder.setPath(PATH_SEPARATOR + volumeName +
-          PATH_SEPARATOR + bucketName + PATH_SEPARATOR + keyName);
-      HttpDelete httpDelete = new HttpDelete(builder.build());
-      addOzoneHeaders(httpDelete);
-      EntityUtils.consume(executeHttpRequest(httpDelete));
-    } catch (URISyntaxException e) {
-      throw new IOException(e);
-    }
-  }
-
-  @Override
-  public void renameKey(String volumeName, String bucketName,
-      String fromKeyName, String toKeyName) throws IOException {
-    try {
-      HddsClientUtils.verifyResourceName(volumeName, bucketName);
-      HddsClientUtils.checkNotNull(fromKeyName, toKeyName);
-      URIBuilder builder = new URIBuilder(ozoneRestUri);
-      builder.setPath(PATH_SEPARATOR + volumeName + PATH_SEPARATOR + bucketName
-          + PATH_SEPARATOR + fromKeyName);
-      builder.addParameter(Header.OZONE_RENAME_TO_KEY_PARAM_NAME, toKeyName);
-      HttpPost httpPost = new HttpPost(builder.build());
-      addOzoneHeaders(httpPost);
-      EntityUtils.consume(executeHttpRequest(httpPost));
-    } catch (URISyntaxException e) {
-      throw new IOException(e);
-    }
-  }
-
-  @Override
-  public List<OzoneKey> listKeys(String volumeName, String bucketName,
-                                 String keyPrefix, String prevKey,
-                                 int maxListResult)
-      throws IOException {
-    try {
-      HddsClientUtils.verifyResourceName(volumeName);
-      URIBuilder builder = new URIBuilder(ozoneRestUri);
-      builder
-          .setPath(PATH_SEPARATOR + volumeName + PATH_SEPARATOR + bucketName);
-      builder.addParameter(Header.OZONE_INFO_QUERY_TAG,
-          Header.OZONE_INFO_QUERY_KEY);
-      builder.addParameter(Header.OZONE_LIST_QUERY_MAXKEYS,
-          String.valueOf(maxListResult));
-      addQueryParamter(Header.OZONE_LIST_QUERY_PREFIX, keyPrefix, builder);
-      addQueryParamter(Header.OZONE_LIST_QUERY_PREVKEY, prevKey, builder);
-      HttpGet httpGet = new HttpGet(builder.build());
-      addOzoneHeaders(httpGet);
-      HttpEntity response = executeHttpRequest(httpGet);
-      ListKeys keyList = ListKeys.parse(EntityUtils.toString(response));
-      EntityUtils.consume(response);
-      return keyList.getKeyList().stream().map(keyInfo -> {
-        long creationTime = 0, modificationTime = 0;
-        try {
-          creationTime = HddsClientUtils.formatDateTime(keyInfo.getCreatedOn());
-          modificationTime =
-              HddsClientUtils.formatDateTime(keyInfo.getModifiedOn());
-        } catch (ParseException e) {
-          LOG.warn("Parse exception in getting creation time for volume", e);
-        }
-        return new OzoneKey(volumeName, bucketName, keyInfo.getKeyName(),
-            keyInfo.getSize(), creationTime, modificationTime,
-            ReplicationType.valueOf(keyInfo.getType().toString()));
-      }).collect(Collectors.toList());
-    } catch (URISyntaxException e) {
-      throw new IOException(e);
-    }
-  }
-
-  @Override
-  public OzoneKeyDetails getKeyDetails(
-      String volumeName, String bucketName, String keyName)
-      throws IOException {
-    try {
-      HddsClientUtils.verifyResourceName(volumeName, bucketName);
-      Preconditions.checkNotNull(keyName);
-      URIBuilder builder = new URIBuilder(ozoneRestUri);
-      builder.setPath(PATH_SEPARATOR + volumeName +
-          PATH_SEPARATOR + bucketName + PATH_SEPARATOR + keyName);
-      builder.setParameter(Header.OZONE_INFO_QUERY_TAG,
-          Header.OZONE_INFO_QUERY_KEY_DETAIL);
-      HttpGet httpGet = new HttpGet(builder.build());
-      addOzoneHeaders(httpGet);
-      HttpEntity response = executeHttpRequest(httpGet);
-      KeyInfoDetails keyInfo =
-          KeyInfoDetails.parse(EntityUtils.toString(response));
-
-      List<OzoneKeyLocation> ozoneKeyLocations = new ArrayList<>();
-      keyInfo.getKeyLocations().forEach((a) -> ozoneKeyLocations.add(
-          new OzoneKeyLocation(a.getContainerID(), a.getLocalID(),
-              a.getLength(), a.getOffset())));
-      OzoneKeyDetails key = new OzoneKeyDetails(volumeName,
-          bucketName,
-          keyInfo.getKeyName(),
-          keyInfo.getSize(),
-          HddsClientUtils.formatDateTime(keyInfo.getCreatedOn()),
-          HddsClientUtils.formatDateTime(keyInfo.getModifiedOn()),
-          ozoneKeyLocations, ReplicationType.valueOf(
-              keyInfo.getType().toString()),
-          new HashMap<>(), keyInfo.getFileEncryptionInfo());
-      EntityUtils.consume(response);
-      return key;
-    } catch (URISyntaxException | ParseException e) {
-      throw new IOException(e);
-    }
-  }
-
-  @Override
-  public void createS3Bucket(String userName, String s3BucketName)
-      throws IOException {
-    throw new UnsupportedOperationException("Ozone REST protocol does not " +
-        "support this operation.");
-  }
-
-  @Override
-  public void deleteS3Bucket(String s3BucketName)
-      throws IOException {
-    throw new UnsupportedOperationException("Ozone REST protocol does not " +
-        "support this operation.");
-  }
-
-  @Override
-  public String getOzoneBucketMapping(String s3BucketName) throws IOException {
-    throw new UnsupportedOperationException("Ozone REST protocol does not " +
-        "support this operation.");
-  }
-
-  @Override
-  public String getOzoneVolumeName(String s3BucketName) throws IOException {
-    throw new UnsupportedOperationException("Ozone REST protocol does not " +
-        "support this operation.");
-  }
-
-  @Override
-  public String getOzoneBucketName(String s3BucketName) throws IOException {
-    throw new UnsupportedOperationException("Ozone REST protocol does not " +
-        "support this operation.");
-  }
-
-  @Override
-  public List<OzoneBucket> listS3Buckets(String userName, String bucketPrefix,
-                                  String prevBucket, int maxListResult)
-      throws IOException {
-    throw new UnsupportedOperationException("Ozone REST protocol does not " +
-        "support this operation.");
-  }
-
-  /**
-   * Adds Ozone headers to http request.
-   *
-   * @param httpRequest Http Request
-   */
-  private void addOzoneHeaders(HttpUriRequest httpRequest) {
-    httpRequest.addHeader(HttpHeaders.AUTHORIZATION,
-        Header.OZONE_SIMPLE_AUTHENTICATION_SCHEME + " " +
-            ugi.getUserName());
-    httpRequest.addHeader(HttpHeaders.DATE,
-        HddsClientUtils.formatDateTime(Time.monotonicNow()));
-    httpRequest.addHeader(Header.OZONE_VERSION_HEADER,
-        Header.OZONE_V1_VERSION_HEADER);
-  }
-
-  /**
-   * Sends the http request to server and returns the response HttpEntity.
-   * It's responsibility of the caller to consume and close response HttpEntity
-   * by calling {@code EntityUtils.consume}
-   *
-   * @param httpUriRequest http request
-   * @throws IOException
-   */
-  private HttpEntity executeHttpRequest(HttpUriRequest httpUriRequest)
-      throws IOException {
-    HttpResponse response = httpClient.execute(httpUriRequest);
-    int errorCode = response.getStatusLine().getStatusCode();
-    HttpEntity entity = response.getEntity();
-    if ((errorCode == HTTP_OK) || (errorCode == HTTP_CREATED)) {
-      return entity;
-    }
-    if (entity != null) {
-      throw new IOException(
-          OzoneException.parse(EntityUtils.toString(entity)));
-    } else {
-      throw new IOException("Unexpected null in http payload," +
-          " while processing request");
-    }
-  }
-
-  /**
-   * Converts OzoneConts.Versioning to boolean.
-   *
-   * @param version
-   * @return corresponding boolean value
-   */
-  private Boolean getBucketVersioningFlag(
-      OzoneConsts.Versioning version) {
-    if(version != null) {
-      switch(version) {
-      case ENABLED:
-        return true;
-      case NOT_DEFINED:
-      case DISABLED:
-      default:
-        return false;
-      }
-    }
-    return false;
-  }
-
-  /**
-   * Converts Bucket versioning flag into OzoneConts.Versioning.
-   *
-   * @param flag versioning flag
-   * @return corresponding OzoneConts.Versionin
-   */
-  private OzoneConsts.Versioning getBucketVersioning(Boolean flag) {
-    if(flag != null) {
-      if(flag) {
-        return OzoneConsts.Versioning.ENABLED;
-      } else {
-        return OzoneConsts.Versioning.DISABLED;
-      }
-    }
-    return OzoneConsts.Versioning.NOT_DEFINED;
-  }
-
-  @Override
-  public void close() throws IOException {
-    httpClient.close();
-  }
-
-  private void addQueryParamter(String param, String value,
-      URIBuilder builder) {
-    if (!Strings.isNullOrEmpty(value)) {
-      builder.addParameter(param, value);
-    }
-  }
-
-  @Override
-  public OmMultipartInfo initiateMultipartUpload(String volumeName,
-                                                 String bucketName,
-                                                 String keyName,
-                                                 ReplicationType type,
-                                                 ReplicationFactor factor)
-      throws IOException {
-    throw new UnsupportedOperationException("Ozone REST protocol does not " +
-        "support this operation.");
-  }
-
-  @Override
-  public OzoneOutputStream createMultipartKey(String volumeName,
-                                              String bucketName,
-                                              String keyName,
-                                              long size,
-                                              int partNumber,
-                                              String uploadID)
-      throws IOException {
-    throw new UnsupportedOperationException("Ozone REST protocol does not " +
-        "support this operation.");
-  }
-
-  @Override
-  public OmMultipartUploadCompleteInfo completeMultipartUpload(
-      String volumeName, String bucketName, String keyName, String uploadID,
-      Map<Integer, String> partsMap) throws IOException {
-    throw new UnsupportedOperationException("Ozone REST protocol does not " +
-        "support this operation.");
-  }
-
-  @Override
-  public void abortMultipartUpload(String volumeName,
-       String bucketName, String keyName, String uploadID) throws IOException {
-    throw new UnsupportedOperationException("Ozone REST protocol does not " +
-        "support this operation.");
-  }
-
-  @Override
-  public OzoneMultipartUploadPartListParts listParts(String volumeName,
-      String bucketName, String keyName, String uploadID, int partNumberMarker,
-      int maxParts)  throws IOException {
-    throw new UnsupportedOperationException("Ozone REST protocol does not " +
-        "support this operation.");
-  }
-
-  /**
-   * Get CanonicalServiceName for ozone delegation token.
-   * @return Canonical Service Name of ozone delegation token.
-   */
-  public String getCanonicalServiceName(){
-    throw new UnsupportedOperationException("Ozone REST protocol does not " +
-        "support this operation.");
-  }
-
-  @Override
-  public OzoneFileStatus getOzoneFileStatus(String volumeName,
-      String bucketName, String keyName) throws IOException {
-    throw new UnsupportedOperationException("Ozone REST protocol does not " +
-        "support this operation.");
-  }
-
-  @Override
-  public void createDirectory(String volumeName, String bucketName,
-      String keyName) {
-    throw new UnsupportedOperationException(
-        "Ozone REST protocol does not " + "support this operation.");
-  }
-
-  @Override
-  public OzoneInputStream readFile(String volumeName, String bucketName,
-      String keyName) {
-    throw new UnsupportedOperationException(
-        "Ozone REST protocol does not " + "support this operation.");
-  }
-
-  @Override
-  public OzoneOutputStream createFile(String volumeName, String bucketName,
-      String keyName, long size, ReplicationType type, ReplicationFactor factor,
-      boolean overWrite, boolean recursive) {
-    throw new UnsupportedOperationException(
-        "Ozone REST protocol does not " + "support this operation.");
-  }
-
-  @Override
-  public List<OzoneFileStatus> listStatus(String volumeName, String bucketName,
-      String keyName, boolean recursive, String startKey, long numEntries)
-      throws IOException {
-    throw new UnsupportedOperationException(
-        "Ozone REST protocol does not " + "support this operation.");
-  }
-
-  /**
-   * Add acl for Ozone object. Return true if acl is added successfully else
-   * false.
-   *
-   * @param obj Ozone object for which acl should be added.
-   * @param acl ozone acl top be added.
-   * @throws IOException if there is error.
-   */
-  @Override
-  public boolean addAcl(OzoneObj obj, OzoneAcl acl) throws IOException {
-    throw new UnsupportedOperationException("Ozone REST protocol does not" +
-        " support this operation.");
-  }
-
-  /**
-   * Remove acl for Ozone object. Return true if acl is removed successfully
-   * else false.
-   *
-   * @param obj Ozone object.
-   * @param acl Ozone acl to be removed.
-   * @throws IOException if there is error.
-   */
-  @Override
-  public boolean removeAcl(OzoneObj obj, OzoneAcl acl) throws IOException {
-    throw new UnsupportedOperationException("Ozone REST protocol does not" +
-        " support this operation.");
-  }
-
-  /**
-   * Acls to be set for given Ozone object. This operations reset ACL for given
-   * object to list of ACLs provided in argument.
-   *
-   * @param obj Ozone object.
-   * @param acls List of acls.
-   * @throws IOException if there is error.
-   */
-  @Override
-  public boolean setAcl(OzoneObj obj, List<OzoneAcl> acls) throws IOException {
-    throw new UnsupportedOperationException("Ozone REST protocol does not" +
-        " support this operation.");
-  }
-
-  /**
-   * Returns list of ACLs for given Ozone object.
-   *
-   * @param obj Ozone object.
-   * @throws IOException if there is error.
-   */
-  @Override
-  public List<OzoneAcl> getAcl(OzoneObj obj) throws IOException {
-    throw new UnsupportedOperationException("Ozone REST protocol does not" +
-        " support this operation.");
-  }
-}

+ 0 - 40
hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestServerSelector.java

@@ -1,40 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.client.rest;
-
-import org.apache.hadoop.ozone.om.helpers.ServiceInfo;
-
-import java.util.List;
-
-/**
- * The implementor of this interface should select the REST server which will
- * be used by the client to connect to Ozone Cluster, given list of
- * REST Servers/DataNodes (DataNodes are the ones which hosts REST Service).
- */
-public interface RestServerSelector {
-
-  /**
-   * Returns the REST Service which will be used by the client for connection.
-   *
-   * @param restServices list of available REST servers
-   * @return ServiceInfo
-   */
-  ServiceInfo getRestServer(List<ServiceInfo> restServices);
-
-}

+ 0 - 22
hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/exceptions/package-info.java

@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.client.rest.exceptions;
-
-/**
- * This package contains ozone rest client libraries.
- */

+ 0 - 22
hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/headers/package-info.java

@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-@InterfaceAudience.Private
-package org.apache.hadoop.ozone.client.rest.headers;
-
-import org.apache.hadoop.classification.InterfaceAudience;

+ 0 - 23
hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/package-info.java

@@ -1,23 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.client.rest;
-
-/**
- * This package contains Ozone rest client library classes.
- */

+ 0 - 267
hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/OzoneException.java

@@ -1,267 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.client.rest;
-
-
-import java.io.IOException;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-
-import com.fasterxml.jackson.annotation.JsonAutoDetect;
-import com.fasterxml.jackson.annotation.JsonProperty;
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.fasterxml.jackson.databind.ObjectReader;
-
-/**
- * Class the represents various errors returned by the
- * Ozone Layer.
- */
-@InterfaceAudience.Private
-public class OzoneException extends Exception {
-
-  private static final ObjectReader READER =
-      new ObjectMapper().readerFor(OzoneException.class);
-  private static final ObjectMapper MAPPER;
-
-  static {
-    MAPPER = new ObjectMapper();
-    MAPPER.setVisibility(
-        MAPPER.getSerializationConfig().getDefaultVisibilityChecker()
-            .withCreatorVisibility(JsonAutoDetect.Visibility.NONE)
-            .withFieldVisibility(JsonAutoDetect.Visibility.NONE)
-            .withGetterVisibility(JsonAutoDetect.Visibility.NONE)
-            .withIsGetterVisibility(JsonAutoDetect.Visibility.NONE)
-            .withSetterVisibility(JsonAutoDetect.Visibility.NONE));
-  }
-
-  @JsonProperty("httpCode")
-  private long httpCode;
-  @JsonProperty("shortMessage")
-  private String shortMessage;
-  @JsonProperty("resource")
-  private String resource;
-  @JsonProperty("message")
-  private String message;
-  @JsonProperty("requestID")
-  private String requestId;
-  @JsonProperty("hostName")
-  private String hostID;
-
-  /**
-   * Constructs a new exception with {@code null} as its detail message. The
-   * cause is not initialized, and may subsequently be initialized by a call
-   * to {@link #initCause}.
-   *
-   * This constructor is needed by Json Serializer.
-   */
-  public OzoneException() {
-  }
-
-
-  /**
-   * Constructor that allows a shortMessage and exception.
-   *
-   * @param httpCode Error Code
-   * @param shortMessage Short Message
-   * @param ex Exception
-   */
-  public OzoneException(long httpCode, String shortMessage, Exception ex) {
-    super(ex);
-    this.message = ex.getMessage();
-    this.shortMessage = shortMessage;
-    this.httpCode = httpCode;
-  }
-
-
-  /**
-   * Constructor that allows a shortMessage.
-   *
-   * @param httpCode Error Code
-   * @param shortMessage Short Message
-   */
-  public OzoneException(long httpCode, String shortMessage) {
-    this.shortMessage = shortMessage;
-    this.httpCode = httpCode;
-  }
-
-  /**
-   * Constructor that allows a shortMessage and long message.
-   *
-   * @param httpCode Error Code
-   * @param shortMessage Short Message
-   * @param message long error message
-   */
-  public OzoneException(long httpCode, String shortMessage, String message) {
-    this.shortMessage = shortMessage;
-    this.message = message;
-    this.httpCode = httpCode;
-  }
-
-  /**
-   * Constructor that allows a shortMessage, a long message and an exception.
-   *
-   * @param httpCode Error code
-   * @param shortMessage Short message
-   * @param message Long error message
-   * @param ex Exception
-   */
-  public OzoneException(long httpCode, String shortMessage,
-      String message, Exception ex) {
-    super(ex);
-    this.shortMessage = shortMessage;
-    this.message = message;
-    this.httpCode = httpCode;
-  }
-
-  /**
-   * Returns the Resource that was involved in the stackTraceString.
-   *
-   * @return String
-   */
-  public String getResource() {
-    return resource;
-  }
-
-  /**
-   * Sets Resource.
-   *
-   * @param resourceName - Name of the Resource
-   */
-  public void setResource(String resourceName) {
-    this.resource = resourceName;
-  }
-
-  /**
-   * Gets a detailed message for the error.
-   *
-   * @return String
-   */
-  public String getMessage() {
-    return message;
-  }
-
-  /**
-   * Sets the error message.
-   *
-   * @param longMessage - Long message
-   */
-  public void setMessage(String longMessage) {
-    this.message = longMessage;
-  }
-
-  /**
-   * Returns request Id.
-   *
-   * @return String
-   */
-  public String getRequestId() {
-    return requestId;
-  }
-
-  /**
-   * Sets request ID.
-   *
-   * @param ozoneRequestId Request ID generated by the Server
-   */
-  public void setRequestId(String ozoneRequestId) {
-    this.requestId = ozoneRequestId;
-  }
-
-  /**
-   * Returns short error string.
-   *
-   * @return String
-   */
-  public String getShortMessage() {
-    return shortMessage;
-  }
-
-  /**
-   * Sets short error string.
-   *
-   * @param shortError Short Error Code
-   */
-  public void setShortMessage(String shortError) {
-    this.shortMessage = shortError;
-  }
-
-  /**
-   * Returns hostID.
-   *
-   * @return String
-   */
-  public String getHostID() {
-    return hostID;
-  }
-
-  /**
-   * Sets host ID.
-   *
-   * @param hostName host Name
-   */
-  public void setHostID(String hostName) {
-    this.hostID = hostName;
-  }
-
-  /**
-   * Returns http error code.
-   *
-   * @return long
-   */
-  public long getHttpCode() {
-    return httpCode;
-  }
-
-  /**
-   * Sets http status.
-   *
-   * @param httpStatus http error code.
-   */
-  public void setHttpCode(long httpStatus) {
-    this.httpCode = httpStatus;
-  }
-
-  /**
-   * Returns a Json String.
-   *
-   * @return JSON representation of the Error
-   */
-  public String toJsonString() {
-    try {
-      return MAPPER.writeValueAsString(this);
-    } catch (IOException ex) {
-      // TODO : Log this error on server side.
-    }
-    // TODO : Replace this with a JSON Object -- That represents this error.
-    return "500 Internal Server Error";
-  }
-
-  /**
-   * Parses an Exception record.
-   *
-   * @param jsonString - Exception in Json format.
-   *
-   * @return OzoneException Object
-   *
-   * @throws IOException
-   */
-  public static OzoneException parse(String jsonString) throws IOException {
-    return READER.readValue(jsonString);
-  }
-}

+ 0 - 74
hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/headers/Header.java

@@ -1,74 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.client.rest.headers;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-
-/**
- * OZONE specific HTTP headers.
- */
-@InterfaceAudience.Private
-public final class Header {
-  public static final String OZONE_QUOTA_BYTES = "BYTES";
-  public static final String OZONE_QUOTA_MB = "MB";
-  public static final String OZONE_QUOTA_GB = "GB";
-  public static final String OZONE_QUOTA_TB = "TB";
-  public static final String OZONE_QUOTA_REMOVE = "remove";
-  public static final String OZONE_QUOTA_UNDEFINED = "undefined";
-  public static final String OZONE_EMPTY_STRING="";
-  public static final String OZONE_DEFAULT_LIST_SIZE = "1000";
-
-  public static final String OZONE_USER = "x-ozone-user";
-  public static final String OZONE_SIMPLE_AUTHENTICATION_SCHEME = "OZONE";
-  public static final String OZONE_VERSION_HEADER = "x-ozone-version";
-  public static final String OZONE_V1_VERSION_HEADER ="v1";
-
-  public static final String OZONE_LIST_QUERY_SERVICE = "service";
-
-  public static final String OZONE_INFO_QUERY_VOLUME = "volume";
-  public static final String OZONE_INFO_QUERY_BUCKET = "bucket";
-  public static final String OZONE_INFO_QUERY_KEY = "key";
-  public static final String OZONE_INFO_QUERY_KEY_DETAIL = "key-detail";
-
-  public static final String OZONE_REQUEST_ID = "x-ozone-request-id";
-  public static final String OZONE_SERVER_NAME = "x-ozone-server-name";
-
-  public static final String OZONE_STORAGE_TYPE = "x-ozone-storage-type";
-
-  public static final String OZONE_BUCKET_VERSIONING =
-      "x-ozone-bucket-versioning";
-
-  public static final String OZONE_ACLS = "x-ozone-acls";
-  public static final String OZONE_ACL_ADD = "ADD";
-  public static final String OZONE_ACL_REMOVE = "REMOVE";
-
-  public static final String OZONE_INFO_QUERY_TAG ="info";
-  public static final String OZONE_QUOTA_QUERY_TAG ="quota";
-  public static final String CONTENT_MD5 = "Content-MD5";
-  public static final String OZONE_LIST_QUERY_PREFIX="prefix";
-  public static final String OZONE_LIST_QUERY_MAXKEYS="max-keys";
-  public static final String OZONE_LIST_QUERY_PREVKEY="prev-key";
-  public static final String OZONE_LIST_QUERY_ROOTSCAN="root-scan";
-
-  public static final String OZONE_RENAME_TO_KEY_PARAM_NAME = "toKey";
-
-  private Header() {
-    // Never constructed.
-  }
-}

+ 0 - 22
hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/headers/package-info.java

@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.client.rest.headers;
-
-/**
- * Ozone HTTP Header utility.
- */

+ 0 - 22
hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/package-info.java

@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.client.rest;
-
-/**
- * Ozone REST interface.
- */

+ 0 - 248
hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/response/BucketInfo.java

@@ -1,248 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.client.rest.response;
-
-import java.io.IOException;
-import java.util.LinkedList;
-import java.util.List;
-
-import org.apache.hadoop.hdds.protocol.StorageType;
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.ozone.OzoneConsts;
-
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.fasterxml.jackson.databind.ObjectReader;
-import com.google.common.base.Preconditions;
-
-/**
- * BucketInfo class is used used for parsing json response
- * when BucketInfo Call is made.
- */
-public class BucketInfo implements Comparable<BucketInfo> {
-
-  private static final ObjectReader READER =
-      new ObjectMapper().readerFor(BucketInfo.class);
-
-  private String volumeName;
-  private String bucketName;
-  private String createdOn;
-  private List<OzoneAcl> acls;
-  private OzoneConsts.Versioning versioning;
-  private StorageType storageType;
-  private String bekName;
-
-  /**
-   * Constructor for BucketInfo.
-   *
-   * @param volumeName
-   * @param bucketName
-   */
-  public BucketInfo(String volumeName, String bucketName) {
-    this.volumeName = volumeName;
-    this.bucketName = bucketName;
-  }
-
-
-  /**
-   * Default constructor for BucketInfo.
-   */
-  public BucketInfo() {
-    acls = new LinkedList<>();
-  }
-
-  /**
-   * Parse a JSON string into BucketInfo Object.
-   *
-   * @param jsonString Json String
-   * @return BucketInfo
-   * @throws IOException
-   */
-  public static BucketInfo parse(String jsonString) throws IOException {
-    return READER.readValue(jsonString);
-  }
-
-  /**
-   * Returns a List of ACLs set on the Bucket.
-   *
-   * @return List of Acl
-   */
-  public List<OzoneAcl> getAcls() {
-    return acls;
-  }
-
-  /**
-   * Sets ACls.
-   *
-   * @param acls Acl list
-   */
-  public void setAcls(List<OzoneAcl> acls) {
-    this.acls = acls;
-  }
-
-  /**
-   * Returns Storage Type info.
-   *
-   * @return Storage Type of the bucket
-   */
-  public StorageType getStorageType() {
-    return storageType;
-  }
-
-  /**
-   * Sets the Storage Type.
-   *
-   * @param storageType Storage Type
-   */
-  public void setStorageType(StorageType storageType) {
-    this.storageType = storageType;
-  }
-
-  /**
-   * Returns versioning.
-   *
-   * @return versioning Enum
-   */
-  public OzoneConsts.Versioning getVersioning() {
-    return versioning;
-  }
-
-  /**
-   * Sets Versioning.
-   *
-   * @param versioning
-   */
-  public void setVersioning(OzoneConsts.Versioning versioning) {
-    this.versioning = versioning;
-  }
-
-
-  /**
-   * Gets bucket Name.
-   *
-   * @return String
-   */
-  public String getBucketName() {
-    return bucketName;
-  }
-
-  /**
-   * Sets bucket Name.
-   *
-   * @param bucketName Name of the bucket
-   */
-  public void setBucketName(String bucketName) {
-    this.bucketName = bucketName;
-  }
-
-  /**
-   * Sets creation time of the bucket.
-   *
-   * @param creationTime Date String
-   */
-  public void setCreatedOn(String creationTime) {
-    this.createdOn = creationTime;
-  }
-
-  /**
-   * Returns creation time.
-   *
-   * @return creation time of bucket.
-   */
-  public String getCreatedOn() {
-    return createdOn;
-  }
-
-  /**
-   * Returns Volume Name.
-   *
-   * @return String volume name
-   */
-  public String getVolumeName() {
-    return volumeName;
-  }
-
-  /**
-   * Sets the Volume Name of bucket.
-   *
-   * @param volumeName volumeName
-   */
-  public void setVolumeName(String volumeName) {
-    this.volumeName = volumeName;
-  }
-
-  /**
-   * Return bucket encryption key name.
-   * @return bucket encryption key name
-   */
-  public String getEncryptionKeyName() {
-    return bekName;
-  }
-
-  /**
-   * Sets the bucket encryption key name.
-   * @param name bucket encryption key name
-   */
-  public void setEncryptionKeyName(String name) {
-    this.bekName = name;
-  }
-
-  /**
-   * Compares this object with the specified object for order.  Returns a
-   * negative integer, zero, or a positive integer as this object is less
-   * than, equal to, or greater than the specified object.
-   *
-   * Please note : BucketInfo compare functions are used only within the
-   * context of a volume, hence volume name is purposefully ignored in
-   * compareTo, equal and hashcode functions of this class.
-   */
-  @Override
-  public int compareTo(BucketInfo o) {
-    Preconditions.checkState(o.getVolumeName().equals(this.getVolumeName()));
-    return this.bucketName.compareTo(o.getBucketName());
-  }
-
-  /**
-   * Checks if two bucketInfo's are equal.
-   * @param o Object BucketInfo
-   * @return  True or False
-   */
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) {
-      return true;
-    }
-    if (!(o instanceof BucketInfo)) {
-      return false;
-    }
-
-    BucketInfo that = (BucketInfo) o;
-    Preconditions.checkState(that.getVolumeName().equals(this.getVolumeName()));
-    return bucketName.equals(that.bucketName);
-
-  }
-
-  /**
-   * Hash Code for this object.
-   * @return int
-   */
-  @Override
-  public int hashCode() {
-    return bucketName.hashCode();
-  }
-
-}

+ 0 - 236
hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/response/KeyInfo.java

@@ -1,236 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.client.rest.response;
-
-import java.io.IOException;
-
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.fasterxml.jackson.databind.ObjectReader;
-import org.apache.commons.lang3.builder.EqualsBuilder;
-import org.apache.commons.lang3.builder.HashCodeBuilder;
-import org.apache.hadoop.hdds.client.ReplicationType;
-
-/**
- * KeyInfo class is used used for parsing json response
- * when KeyInfo Call is made.
- */
-public class KeyInfo implements Comparable<KeyInfo> {
-
-  private static final ObjectReader READER =
-      new ObjectMapper().readerFor(KeyInfo.class);
-
-  private long version;
-  private String md5hash;
-  private String createdOn;
-  private String modifiedOn;
-  private long size;
-  private String keyName;
-  private ReplicationType type;
-
-  /**
-   * Return replication type of the key.
-   *
-   * @return replication type
-   */
-  public ReplicationType getType() {
-    return type;
-  }
-
-  /**
-   * Set replication type of the key.
-   *
-   * @param replicationType
-   */
-  public void setType(ReplicationType replicationType) {
-    this.type = replicationType;
-  }
-
-  /**
-   * When this key was created.
-   *
-   * @return Date String
-   */
-  public String getCreatedOn() {
-    return createdOn;
-  }
-
-  /**
-   * When this key was modified.
-   *
-   * @return Date String
-   */
-  public String getModifiedOn() {
-    return modifiedOn;
-  }
-
-  /**
-   * When this key was created.
-   *
-   * @param createdOn Date String
-   */
-  public void setCreatedOn(String createdOn) {
-    this.createdOn = createdOn;
-  }
-
-  /**
-   * When this key was modified.
-   *
-   * @param modifiedOn Date String
-   */
-  public void setModifiedOn(String modifiedOn) {
-    this.modifiedOn = modifiedOn;
-  }
-
-  /**
-   * Gets the Key name of this object.
-   *
-   * @return String
-   */
-  public String getKeyName() {
-    return keyName;
-  }
-
-  /**
-   * Sets the Key name of this object.
-   *
-   * @param keyName String
-   */
-  public void setKeyName(String keyName) {
-    this.keyName = keyName;
-  }
-
-  /**
-   * Returns the MD5 Hash for the data of this key.
-   *
-   * @return String MD5
-   */
-  public String getMd5hash() {
-    return md5hash;
-  }
-
-  /**
-   * Sets the MD5 value of this key.
-   *
-   * @param md5hash Md5 of this file
-   */
-  public void setMd5hash(String md5hash) {
-    this.md5hash = md5hash;
-  }
-
-  /**
-   * Number of bytes stored in the data part of this key.
-   *
-   * @return long size of the data file
-   */
-  public long getSize() {
-    return size;
-  }
-
-  /**
-   * Sets the size of the data part of this key.
-   *
-   * @param size Size in long
-   */
-  public void setSize(long size) {
-    this.size = size;
-  }
-
-  /**
-   * Version of this key.
-   *
-   * @return returns the version of this key.
-   */
-  public long getVersion() {
-    return version;
-  }
-
-  /**
-   * Sets the version of this key.
-   *
-   * @param version - Version String
-   */
-  public void setVersion(long version) {
-    this.version = version;
-  }
-
-  /**
-   * Compares this object with the specified object for order.  Returns a
-   * negative integer, zero, or a positive integer as this object is less
-   * than, equal to, or greater than the specified object.
-   *
-   * @param o the object to be compared.
-   * @return a negative integer, zero, or a positive integer as this object
-   * is less than, equal to, or greater than the specified object.
-   * @throws NullPointerException if the specified object is null
-   * @throws ClassCastException   if the specified object's type prevents it
-   *                              from being compared to this object.
-   */
-  @Override
-  public int compareTo(KeyInfo o) {
-    if (this.keyName.compareTo(o.getKeyName()) != 0) {
-      return this.keyName.compareTo(o.getKeyName());
-    }
-
-    if (this.getVersion() == o.getVersion()) {
-      return 0;
-    }
-    if (this.getVersion() < o.getVersion()) {
-      return -1;
-    }
-    return 1;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) {
-      return true;
-    }
-
-    if (o == null || getClass() != o.getClass()) {
-      return false;
-    }
-
-    KeyInfo keyInfo = (KeyInfo) o;
-
-    return new EqualsBuilder()
-        .append(version, keyInfo.version)
-        .append(keyName, keyInfo.keyName)
-        .isEquals();
-  }
-
-  @Override
-  public int hashCode() {
-    return new HashCodeBuilder(17, 37)
-        .append(version)
-        .append(keyName)
-        .toHashCode();
-  }
-
-  /**
-   * Parse a string to return KeyInfo Object.
-   *
-   * @param jsonString Json String
-   * @return keyInfo
-   * @throws IOException
-   */
-  public static KeyInfo parse(String jsonString) throws IOException {
-    return READER.readValue(jsonString);
-  }
-
-}

+ 0 - 118
hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/response/KeyInfoDetails.java

@@ -1,118 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.client.rest.response;
-
-import java.io.IOException;
-import java.util.List;
-
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.fasterxml.jackson.databind.ObjectReader;
-import org.apache.commons.lang3.builder.EqualsBuilder;
-import org.apache.commons.lang3.builder.HashCodeBuilder;
-import org.apache.hadoop.fs.FileEncryptionInfo;
-
-/**
- * KeyInfoDetails class is used for parsing json response
- * when KeyInfoDetails Call is made.
- */
-public class KeyInfoDetails extends KeyInfo {
-
-  private static final ObjectReader READER =
-      new ObjectMapper().readerFor(KeyInfoDetails.class);
-
-  /**
-   * a list of Map which maps localID to ContainerID
-   * to specify replica locations.
-   */
-  private List<KeyLocation> keyLocations;
-
-  private FileEncryptionInfo feInfo;
-
-  /**
-   * Constructor needed for json serialization.
-   */
-  public KeyInfoDetails() {
-  }
-
-  /**
-   * Set details of key location.
-   *
-   * @param locations - details of key location
-   */
-  public void setKeyLocation(List<KeyLocation> locations) {
-    this.keyLocations = locations;
-  }
-
-  /**
-   * Returns details of key location.
-   *
-   * @return volumeName
-   */
-  public List<KeyLocation> getKeyLocations() {
-    return keyLocations;
-  }
-
-  public void setFileEncryptionInfo(FileEncryptionInfo info) {
-    this.feInfo = info;
-  }
-
-  public FileEncryptionInfo getFileEncryptionInfo() {
-    return feInfo;
-  }
-
-  /**
-   * Parse a string to return KeyInfoDetails Object.
-   *
-   * @param jsonString Json String
-   * @return KeyInfoDetails
-   * @throws IOException
-   */
-  public static KeyInfoDetails parse(String jsonString) throws IOException {
-    return READER.readValue(jsonString);
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) {
-      return true;
-    }
-
-    if (o == null || getClass() != o.getClass()) {
-      return false;
-    }
-
-    KeyInfoDetails that = (KeyInfoDetails) o;
-
-    return new EqualsBuilder()
-        .append(getVersion(), that.getVersion())
-        .append(getKeyName(), that.getKeyName())
-        .append(keyLocations, that.keyLocations)
-        .isEquals();
-  }
-
-  @Override
-  public int hashCode() {
-    return new HashCodeBuilder(21, 33)
-        .append(getVersion())
-        .append(getKeyName())
-        .append(keyLocations)
-        .toHashCode();
-  }
-}
-

+ 0 - 89
hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/response/KeyLocation.java

@@ -1,89 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.client.rest.response;
-
-/**
- * KeyLocation class is used used for parsing json response
- * when KeyInfoDetails Call is made.
- */
-public class KeyLocation {
-  /**
-   * Which container this key stored.
-   */
-  private long containerID;
-  /**
-   * Which block this key stored inside a container.
-   */
-  private long localID;
-  /**
-   * Data length of this key replica.
-   */
-  private long length;
-  /**
-   * Offset of this key.
-   */
-  private long offset;
-
-  /**
-   * Empty constructor for Json serialization.
-   */
-  public KeyLocation() {
-
-  }
-
-  /**
-   * Constructs KeyLocation.
-   */
-  public KeyLocation(long containerID, long localID,
-                          long length, long offset) {
-    this.containerID = containerID;
-    this.localID = localID;
-    this.length = length;
-    this.offset = offset;
-  }
-
-  /**
-   * Returns the containerID of this Key.
-   */
-  public long getContainerID() {
-    return containerID;
-  }
-
-  /**
-   * Returns the localID of this Key.
-   */
-  public long getLocalID() {
-    return localID;
-  }
-
-  /**
-   * Returns the length of this Key.
-   */
-  public long getLength() {
-    return length;
-  }
-
-  /**
-   * Returns the offset of this Key.
-   */
-  public long getOffset() {
-    return offset;
-  }
-
-}

+ 0 - 215
hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/response/VolumeInfo.java

@@ -1,215 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.client.rest.response;
-
-import java.io.IOException;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdds.client.OzoneQuota;
-
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.fasterxml.jackson.databind.ObjectReader;
-
-/**
- * VolumeInfo Class is used for parsing json response
- * when VolumeInfo Call is made.
- */
-@InterfaceAudience.Private
-public class VolumeInfo implements Comparable<VolumeInfo> {
-
-
-  private static final ObjectReader READER =
-      new ObjectMapper().readerFor(VolumeInfo.class);
-
-  private VolumeOwner owner;
-  private OzoneQuota quota;
-  private String volumeName;
-  private String createdOn;
-  private String createdBy;
-
-
-  /**
-   * Constructor for VolumeInfo.
-   *
-   * @param volumeName - Name of the Volume
-   * @param createdOn _ Date String
-   * @param createdBy - Person who created it
-   */
-  public VolumeInfo(String volumeName, String createdOn,
-                    String createdBy) {
-    this.volumeName = volumeName;
-    this.createdOn = createdOn;
-    this.createdBy = createdBy;
-  }
-
-  /**
-   * Constructor for VolumeInfo.
-   */
-  public VolumeInfo() {
-  }
-
-  /**
-   * gets the volume name.
-   *
-   * @return Volume Name
-   */
-  public String getVolumeName() {
-    return volumeName;
-  }
-
-  /**
-   * Sets the volume name.
-   *
-   * @param volumeName Volume Name
-   */
-  public void setVolumeName(String volumeName) {
-    this.volumeName = volumeName;
-  }
-
-
-  /**
-   * Returns the name of the person who created this volume.
-   *
-   * @return Name of Admin who created this
-   */
-  public String getCreatedBy() {
-    return createdBy;
-  }
-
-  /**
-   * Sets the user name of the person who created this volume.
-   *
-   * @param createdBy UserName
-   */
-  public void setCreatedBy(String createdBy) {
-    this.createdBy = createdBy;
-  }
-
-  /**
-   * Gets the date on which this volume was created.
-   *
-   * @return Date String
-   */
-  public String getCreatedOn() {
-    return createdOn;
-  }
-
-  /**
-   * Sets the date string.
-   *
-   * @param createdOn Date String
-   */
-  public void setCreatedOn(String createdOn) {
-    this.createdOn = createdOn;
-  }
-
-  /**
-   * Returns the owner info.
-   *
-   * @return OwnerInfo
-   */
-  public VolumeOwner getOwner() {
-    return owner;
-  }
-
-  /**
-   * Sets the owner.
-   *
-   * @param owner OwnerInfo
-   */
-  public void setOwner(VolumeOwner owner) {
-    this.owner = owner;
-  }
-
-  /**
-   * Returns the quota information on a volume.
-   *
-   * @return Quota
-   */
-  public OzoneQuota getQuota() {
-    return quota;
-  }
-
-  /**
-   * Sets the quota info.
-   *
-   * @param quota Quota Info
-   */
-  public void setQuota(OzoneQuota quota) {
-    this.quota = quota;
-  }
-
-  /**
-   * Comparable Interface.
-   * @param o VolumeInfo Object.
-   * @return Result of comparison
-   */
-  @Override
-  public int compareTo(VolumeInfo o) {
-    return this.volumeName.compareTo(o.getVolumeName());
-  }
-
-  /**
-   * Returns VolumeInfo class from json string.
-   *
-   * @param data Json String
-   *
-   * @return VolumeInfo
-   *
-   * @throws IOException
-   */
-  public static VolumeInfo parse(String data) throws IOException {
-    return READER.readValue(data);
-  }
-
-  /**
-   * Indicates whether some other object is "equal to" this one.
-   *
-   * @param obj the reference object with which to compare.
-   *
-   * @return {@code true} if this object is the same as the obj
-   * argument; {@code false} otherwise.
-   */
-  @Override
-  public boolean equals(Object obj) {
-    if (obj == null) {
-      return false;
-    }
-    if (getClass() != obj.getClass()) {
-      return false;
-    }
-    VolumeInfo otherInfo = (VolumeInfo) obj;
-    return otherInfo.getVolumeName().equals(this.getVolumeName());
-  }
-
-  /**
-   * Returns a hash code value for the object. This method is
-   * supported for the benefit of hash tables such as those provided by
-   * HashMap.
-   * @return a hash code value for this object.
-   *
-   * @see Object#equals(Object)
-   * @see System#identityHashCode
-   */
-  @Override
-  public int hashCode() {
-    return getVolumeName().hashCode();
-  }
-
-}

+ 0 - 61
hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/response/VolumeOwner.java

@@ -1,61 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.client.rest.response;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-
-import com.fasterxml.jackson.annotation.JsonInclude;
-
-/**
- * Volume Owner represents the owner of a volume.
- *
- * This is a class instead of a string since we might need to extend this class
- * to support other forms of authentication.
- */
-@InterfaceAudience.Private
-public class VolumeOwner {
-  @JsonInclude(JsonInclude.Include.NON_NULL)
-  private String name;
-
-  /**
-   * Constructor for VolumeOwner.
-   *
-   * @param name name of the User
-   */
-  public VolumeOwner(String name) {
-    this.name = name;
-  }
-
-  /**
-   * Constructs Volume Owner.
-   */
-  public VolumeOwner() {
-    name = null;
-  }
-
-  /**
-   * Returns the user name.
-   *
-   * @return Name
-   */
-  public String getName() {
-    return name;
-  }
-
-}

+ 0 - 24
hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/rest/response/package-info.java

@@ -1,24 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.hadoop.ozone.client.rest.response;
-
-/**
- * This package contains class for ozone rest client library.
- */

+ 0 - 23
hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/ServiceInfo.java

@@ -24,13 +24,11 @@ import com.fasterxml.jackson.databind.ObjectMapper;
 import com.fasterxml.jackson.databind.ObjectReader;
 import com.fasterxml.jackson.databind.ObjectWriter;
 import com.google.common.base.Preconditions;
-import org.apache.hadoop.ozone.client.rest.response.BucketInfo;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
 import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
     .ServicePort;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType;
 
-import java.io.IOException;
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.List;
@@ -165,27 +163,6 @@ public final class ServiceInfo {
         serviceInfo.getServicePortsList());
   }
 
-  /**
-   * Returns a JSON string of this object.
-   *
-   * @return String - json string
-   * @throws IOException
-   */
-  public String toJsonString() throws IOException {
-    return WRITER.writeValueAsString(this);
-  }
-
-  /**
-   * Parse a JSON string into ServiceInfo Object.
-   *
-   * @param jsonString Json String
-   * @return BucketInfo
-   * @throws IOException
-   */
-  public static BucketInfo parse(String jsonString) throws IOException {
-    return READER.readValue(jsonString);
-  }
-
   /**
    * Creates a new builder to build {@link ServiceInfo}.
    * @return {@link ServiceInfo.Builder}

+ 0 - 134
hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/handlers/BucketArgs.java

@@ -1,134 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.web.handlers;
-
-import org.apache.hadoop.hdds.protocol.StorageType;
-import org.apache.hadoop.ozone.OzoneConsts;
-
-/**
- * BucketArgs packages all bucket related arguments to
- * file system calls.
- */
-public class BucketArgs extends VolumeArgs {
-  private final String bucketName;
-  private OzoneConsts.Versioning versioning;
-  private StorageType storageType;
-
-  /**
-   * Constructor for BucketArgs.
-   *
-   * @param volumeName - volumeName
-   * @param bucketName - bucket Name
-   * @param userArgs - userArgs
-   */
-  public BucketArgs(String volumeName, String bucketName, UserArgs userArgs) {
-    super(volumeName, userArgs);
-    this.bucketName = bucketName;
-    this.versioning = OzoneConsts.Versioning.NOT_DEFINED;
-    this.storageType = null;
-  }
-
-
-  /**
-   * Constructor for BucketArgs.
-   *
-   * @param bucketName - bucket Name
-   * @param volumeArgs - volume Args
-   */
-  public BucketArgs(String bucketName, VolumeArgs volumeArgs) {
-    super(volumeArgs);
-    this.bucketName = bucketName;
-    this.versioning = OzoneConsts.Versioning.NOT_DEFINED;
-    this.storageType = null;
-  }
-
-  /**
-   * Constructor for BucketArgs.
-   *
-   * @param args - Bucket Args
-   */
-  public BucketArgs(BucketArgs args) {
-    this(args.getBucketName(), args);
-  }
-
-  /**
-   * Returns the Bucket Name.
-   *
-   * @return Bucket Name
-   */
-  public String getBucketName() {
-    return bucketName;
-  }
-
-  /**
-   * Returns Versioning Info.
-   *
-   * @return versioning
-   */
-  public OzoneConsts.Versioning getVersioning() {
-    return versioning;
-  }
-
-
-  /**
-   * SetVersioning Info.
-   *
-   * @param versioning - Enum value
-   */
-  public void setVersioning(OzoneConsts.Versioning versioning) {
-    this.versioning = versioning;
-  }
-
-  /**
-   * returns the current Storage Class.
-   *
-   * @return Storage Class
-   */
-  public StorageType getStorageType() {
-    return storageType;
-  }
-
-  /**
-   * Sets the Storage Class.
-   *
-   * @param storageType Set Storage Class
-   */
-  public void setStorageType(StorageType storageType) {
-    this.storageType = storageType;
-  }
-
-  /**
-   * returns - Volume/bucketName.
-   *
-   * @return String
-   */
-  @Override
-  public String getResourceName() {
-    return getVolumeName() + "/" + getBucketName();
-  }
-
-  /**
-   * Returns User/Volume name which is the parent of this
-   * bucket.
-   *
-   * @return String
-   */
-  public String getParentName() {
-    return getUserName() + "/" + getVolumeName();
-  }
-}

+ 0 - 117
hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/handlers/KeyArgs.java

@@ -1,117 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.web.handlers;
-
-/**
- * Class that packages all key Arguments.
- */
-public class KeyArgs extends BucketArgs {
-  private String key;
-  private String hash;
-  private long size;
-
-  /**
-   * Constructor for Key Args.
-   *
-   * @param volumeName - Volume Name
-   * @param bucketName - Bucket Name
-   * @param objectName - Key
-   */
-  public KeyArgs(String volumeName, String bucketName,
-                 String objectName, UserArgs args) {
-    super(volumeName, bucketName, args);
-    this.key = objectName;
-  }
-
-  /**
-   * Constructor for Key Args.
-   *
-   * @param objectName - Key
-   * @param args - Bucket Args
-   */
-  public KeyArgs(String objectName, BucketArgs args) {
-    super(args);
-    this.key = objectName;
-  }
-
-  /**
-   * Get Key Name.
-   *
-   * @return String
-   */
-  public String getKeyName() {
-    return this.key;
-  }
-
-  /**
-   * Computed File hash.
-   *
-   * @return String
-   */
-  public String getHash() {
-    return hash;
-  }
-
-  /**
-   * Sets the hash String.
-   *
-   * @param hash String
-   */
-  public void setHash(String hash) {
-    this.hash = hash;
-  }
-
-  /**
-   * Returns the file size.
-   *
-   * @return long - file size
-   */
-  public long getSize() {
-    return size;
-  }
-
-  /**
-   * Set Size.
-   *
-   * @param size Size of the file
-   */
-  public void setSize(long size) {
-    this.size = size;
-  }
-
-  /**
-   * Returns the name of the resource.
-   *
-   * @return String
-   */
-  @Override
-  public String getResourceName() {
-    return super.getResourceName() + "/" + getKeyName();
-  }
-
-  /**
-   * Parent name of this resource.
-   *
-   * @return String.
-   */
-  @Override
-  public String getParentName() {
-    return super.getResourceName();
-  }
-}

+ 0 - 142
hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/handlers/ListArgs.java

@@ -1,142 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.web.handlers;
-
-/**
- * Supports listing keys with pagination.
- */
-public class ListArgs<T extends UserArgs> {
-  private String prevKey;
-  private String prefix;
-  private int maxKeys;
-  private boolean rootScan;
-  private T args;
-
-  /**
-   * Constructor for ListArgs.
-   *
-   * @param args      - BucketArgs
-   * @param prefix    Prefix to start Query from
-   * @param maxKeys   Max result set
-   * @param prevKey - Page token
-   */
-  public ListArgs(T args, String prefix, int maxKeys,
-                  String prevKey) {
-    setArgs(args);
-    setPrefix(prefix);
-    setMaxKeys(maxKeys);
-    setPrevKey(prevKey);
-  }
-
-  /**
-   * Copy Constructor for ListArgs.
-   *
-   * @param args - List Args
-   */
-  public ListArgs(T args, ListArgs listArgs) {
-    this(args, listArgs.getPrefix(), listArgs.getMaxKeys(),
-        listArgs.getPrevKey());
-  }
-
-  /**
-   * Returns page token.
-   *
-   * @return String
-   */
-  public String getPrevKey() {
-    return prevKey;
-  }
-
-  /**
-   * Sets page token.
-   *
-   * @param prevKey - Page token
-   */
-  public void setPrevKey(String prevKey) {
-    this.prevKey = prevKey;
-  }
-
-  /**
-   * Gets max keys.
-   *
-   * @return int
-   */
-  public int getMaxKeys() {
-    return maxKeys;
-  }
-
-  /**
-   * Sets max keys.
-   *
-   * @param maxKeys - Maximum keys to return
-   */
-  public void setMaxKeys(int maxKeys) {
-    this.maxKeys = maxKeys;
-  }
-
-  /**
-   * Gets prefix.
-   *
-   * @return String
-   */
-  public String getPrefix() {
-    return prefix;
-  }
-
-  /**
-   * Sets prefix.
-   *
-   * @param prefix - The prefix that we are looking for
-   */
-  public void setPrefix(String prefix) {
-    this.prefix = prefix;
-  }
-
-  /**
-   * Gets args.
-   * @return  T
-   */
-  public T getArgs() {
-    return args;
-  }
-
-  /**
-   * Sets  args.
-   * @param args T
-   */
-  public void setArgs(T args) {
-    this.args = args;
-  }
-
-  /**
-   * Checks if we are doing a rootScan.
-   * @return - RootScan.
-   */
-  public boolean isRootScan() {
-    return rootScan;
-  }
-
-  /**
-   * Sets the RootScan property.
-   * @param rootScan - Boolean.
-   */
-  public void setRootScan(boolean rootScan) {
-    this.rootScan = rootScan;
-  }
-
-}

+ 0 - 172
hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/handlers/UserArgs.java

@@ -1,172 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.web.handlers;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-
-import javax.ws.rs.core.HttpHeaders;
-import javax.ws.rs.core.Request;
-import javax.ws.rs.core.UriInfo;
-import java.util.Arrays;
-
-/**
- * UserArgs is used to package caller info
- * and pass it down to file system.
- */
-@InterfaceAudience.Private
-public class UserArgs {
-  private String userName;
-  private final String requestID;
-  private final String hostName;
-  private final UriInfo uri;
-  private final Request request;
-  private final HttpHeaders headers;
-  private String[] groups;
-
-
-  /**
-   * Constructs  user args.
-   *
-   * @param userName - User name
-   * @param requestID - Request ID
-   * @param hostName - Host Name
-   * @param req - Request
-   * @param info - Uri Info
-   * @param httpHeaders - http headers
-   */
-  public UserArgs(String userName, String requestID, String hostName,
-                  Request req, UriInfo info, HttpHeaders httpHeaders) {
-    this.hostName = hostName;
-    this.userName = userName;
-    this.requestID = requestID;
-    this.uri = info;
-    this.request = req;
-    this.headers = httpHeaders;
-  }
-
-  /**
-   * Constructs  user args when we don't know the user name yet.
-   *
-   * @param requestID _ Request ID
-   * @param hostName - Host Name
-   * @param req - Request
-   * @param info - UriInfo
-   * @param httpHeaders - http headers
-   */
-  public UserArgs(String requestID, String hostName, Request req, UriInfo info,
-                  HttpHeaders httpHeaders) {
-    this.hostName = hostName;
-    this.requestID = requestID;
-    this.uri = info;
-    this.request = req;
-    this.headers = httpHeaders;
-  }
-
-  /**
-   * Returns hostname.
-   *
-   * @return String
-   */
-  public String getHostName() {
-    return hostName;
-  }
-
-  /**
-   * Returns RequestID.
-   *
-   * @return Long
-   */
-  public String getRequestID() {
-    return requestID;
-  }
-
-  /**
-   * Returns User Name.
-   *
-   * @return String
-   */
-  public String getUserName() {
-    return userName;
-  }
-
-  /**
-   * Sets the user name.
-   *
-   * @param userName Name of the user
-   */
-  public void setUserName(String userName) {
-    this.userName = userName;
-  }
-
-  /**
-   * Returns list of groups.
-   *
-   * @return String[]
-   */
-  public String[] getGroups() {
-    return groups != null ?
-        Arrays.copyOf(groups, groups.length) : null;
-  }
-
-  /**
-   * Sets the group list.
-   *
-   * @param groups list of groups
-   */
-  public void setGroups(String[] groups) {
-    if (groups != null) {
-      this.groups = Arrays.copyOf(groups, groups.length);
-    }
-  }
-
-  /**
-   * Returns the resource Name.
-   *
-   * @return String Resource.
-   */
-  public String getResourceName() {
-    return getUserName();
-  }
-
-  /**
-   * Returns Http Headers for this call.
-   *
-   * @return httpHeaders
-   */
-  public HttpHeaders getHeaders() {
-    return headers;
-  }
-
-  /**
-   * Returns Request Object.
-   *
-   * @return Request
-   */
-  public Request getRequest() {
-    return request;
-  }
-
-  /**
-   * Returns UriInfo.
-   *
-   * @return UriInfo
-   */
-  public UriInfo getUri() {
-    return uri;
-  }
-}

+ 0 - 143
hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/handlers/VolumeArgs.java

@@ -1,143 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.web.handlers;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.ozone.web.request.OzoneQuota;
-
-import javax.ws.rs.core.HttpHeaders;
-import javax.ws.rs.core.Request;
-import javax.ws.rs.core.UriInfo;
-
-/**
- * VolumeArgs is used to package all volume
- * related arguments in the call to underlying
- * file system.
- */
-@InterfaceAudience.Private
-public class VolumeArgs extends UserArgs {
-  private String adminName;
-  private final String volumeName;
-  private OzoneQuota quota;
-
-  /**
-   * Returns Quota Information.
-   *
-   * @return Quota
-   */
-  public OzoneQuota getQuota() {
-    return quota;
-  }
-
-  /**
-   * Returns volume name.
-   *
-   * @return String
-   */
-  public String getVolumeName() {
-    return volumeName;
-  }
-
-  /**
-   * Constructs  volume Args.
-   *
-   * @param userName - User name
-   * @param volumeName - volume Name
-   * @param requestID _ Request ID
-   * @param hostName - Host Name
-   * @param request  - Http Request
-   * @param info - URI info
-   * @param headers - http headers
-   * @param groups - list of groups allowed to access the volume
-   */
-  @SuppressWarnings("parameternumber")
-  public VolumeArgs(String userName, String volumeName, String requestID,
-                    String hostName, Request request, UriInfo info,
-                    HttpHeaders headers, String[] groups) {
-    super(userName, requestID, hostName, request, info, headers);
-    super.setGroups(groups);
-    this.volumeName = volumeName;
-  }
-
-  /**
-   * Constructs  volume Args.
-   *
-   * @param volumeName - volume Name
-   * @param userArgs - userArgs
-   */
-  public VolumeArgs(String volumeName, UserArgs userArgs) {
-    this(userArgs.getUserName(), volumeName, userArgs.getRequestID(),
-         userArgs.getHostName(), userArgs.getRequest(), userArgs.getUri(),
-         userArgs.getHeaders(), userArgs.getGroups());
-  }
-
-  /**
-   * Creates VolumeArgs from another VolumeArgs.
-   */
-  public VolumeArgs(VolumeArgs volArgs) {
-    this(volArgs.getVolumeName(), volArgs);
-  }
-
-  /**
-   * Sets Quota information.
-   *
-   * @param quota - Quota Sting
-   * @throws IllegalArgumentException
-   */
-  public void setQuota(String quota) throws IllegalArgumentException {
-    this.quota = OzoneQuota.parseQuota(quota);
-  }
-
-  /**
-   * Sets quota information.
-   *
-   * @param quota - OzoneQuota
-   */
-  public void setQuota(OzoneQuota quota) {
-    this.quota = quota;
-  }
-
-  /**
-   * Gets admin Name.
-   *
-   * @return - Admin Name
-   */
-  public String getAdminName() {
-    return adminName;
-  }
-
-  /**
-   * Sets Admin Name.
-   *
-   * @param adminName - Admin Name
-   */
-  public void setAdminName(String adminName) {
-    this.adminName = adminName;
-  }
-
-  /**
-   * Returns UserName/VolumeName.
-   *
-   * @return String
-   */
-  @Override
-  public String getResourceName() {
-    return super.getResourceName() + "/" + getVolumeName();
-  }
-}

+ 0 - 22
hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/handlers/package-info.java

@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.web.handlers;
-
-/**
- * REST handler value classes.
- */

+ 0 - 214
hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/request/OzoneQuota.java

@@ -1,214 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.web.request;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.client.rest.headers.Header;
-import com.fasterxml.jackson.annotation.JsonIgnore;
-
-/**
- * represents an OzoneQuota Object that can be applied to
- * a storage volume.
- */
-@InterfaceAudience.Private
-public class OzoneQuota {
-
-  private Units unit;
-  private int size;
-
-  /** Quota Units.*/
-  public enum Units {UNDEFINED, BYTES, MB, GB, TB}
-
-  /**
-   * Returns size.
-   *
-   * @return int
-   */
-  public int getSize() {
-    return size;
-  }
-
-  /**
-   * Returns Units.
-   *
-   * @return Unit in MB, GB or TB
-   */
-  public Units getUnit() {
-    return unit;
-  }
-
-  /**
-   * Constructs a default Quota object.
-   */
-  public OzoneQuota() {
-    this.size = 0;
-    this.unit = Units.UNDEFINED;
-  }
-
-  /**
-   * Constructor for Ozone Quota.
-   *
-   * @param size - Integer Size
-   * @param unit MB, GB  or TB
-   */
-  public OzoneQuota(int size, Units unit) {
-    this.size = size;
-    this.unit = unit;
-  }
-
-  /**
-   * Formats a quota as a string.
-   *
-   * @param quota the quota to format
-   * @return string representation of quota
-   */
-  public static String formatQuota(OzoneQuota quota) {
-    return String.valueOf(quota.size) + quota.unit;
-  }
-
-  /**
-   * Parses a user provided string and returns the
-   * Quota Object.
-   *
-   * @param quotaString Quota String
-   *
-   * @return OzoneQuota object
-   *
-   * @throws IllegalArgumentException
-   */
-  public static OzoneQuota parseQuota(String quotaString)
-      throws IllegalArgumentException {
-
-    if ((quotaString == null) || (quotaString.isEmpty())) {
-      throw new IllegalArgumentException(
-          "Quota string cannot be null or empty.");
-    }
-
-    if (isRemove(quotaString)) {
-      throw new IllegalArgumentException("Remove is invalid in this context.");
-    }
-
-    String uppercase = quotaString.toUpperCase().replaceAll("\\s+", "");
-    String size = "";
-    int nSize;
-    Units currUnit = Units.MB;
-    Boolean found = false;
-    if (uppercase.endsWith(Header.OZONE_QUOTA_MB)) {
-      size = uppercase
-          .substring(0, uppercase.length() - Header.OZONE_QUOTA_MB.length());
-      currUnit = Units.MB;
-      found = true;
-    }
-
-    if (uppercase.endsWith(Header.OZONE_QUOTA_GB)) {
-      size = uppercase
-          .substring(0, uppercase.length() - Header.OZONE_QUOTA_GB.length());
-      currUnit = Units.GB;
-      found = true;
-    }
-
-    if (uppercase.endsWith(Header.OZONE_QUOTA_TB)) {
-      size = uppercase
-          .substring(0, uppercase.length() - Header.OZONE_QUOTA_TB.length());
-      currUnit = Units.TB;
-      found = true;
-    }
-
-    if (uppercase.endsWith(Header.OZONE_QUOTA_BYTES)) {
-      size = uppercase
-          .substring(0, uppercase.length() - Header.OZONE_QUOTA_BYTES.length());
-      currUnit = Units.BYTES;
-      found = true;
-    }
-
-    if (!found) {
-      throw new IllegalArgumentException(
-          "Quota unit not recognized. Supported values are BYTES, MB, GB and " +
-              "TB.");
-    }
-
-    nSize = Integer.parseInt(size);
-    if (nSize < 0) {
-      throw new IllegalArgumentException("Quota cannot be negative.");
-    }
-
-    return new OzoneQuota(nSize, currUnit);
-  }
-
-
-  /**
-   * Checks if Quota String is just as remove operation.
-   *
-   * @param quotaString User provided quota String
-   *
-   * @return True if it is Remove, false otherwise
-   */
-  public static boolean isRemove(String quotaString) {
-
-    return (quotaString != null) &&
-        (quotaString.compareToIgnoreCase(Header.OZONE_QUOTA_REMOVE) == 0);
-  }
-
-  /**
-   * Returns size in Bytes or -1 if there is no Quota.
-   */
-  @JsonIgnore
-  public long sizeInBytes() {
-    switch (this.unit) {
-    case BYTES:
-      return this.getSize();
-    case MB:
-      return this.getSize() * OzoneConsts.MB;
-    case GB:
-      return this.getSize() * OzoneConsts.GB;
-    case TB:
-      return this.getSize() * OzoneConsts.TB;
-    case UNDEFINED:
-    default:
-      return -1;
-    }
-  }
-
-  /**
-   * Returns OzoneQuota corresponding to size in bytes.
-   *
-   * @param sizeInBytes size in bytes to be converted
-   *
-   * @return OzoneQuota object
-   */
-  public static OzoneQuota getOzoneQuota(long sizeInBytes) {
-    long size;
-    Units unit;
-    if (sizeInBytes % OzoneConsts.TB == 0) {
-      size = sizeInBytes / OzoneConsts.TB;
-      unit = Units.TB;
-    } else if (sizeInBytes % OzoneConsts.GB == 0) {
-      size = sizeInBytes / OzoneConsts.GB;
-      unit = Units.GB;
-    } else if (sizeInBytes % OzoneConsts.MB == 0) {
-      size = sizeInBytes / OzoneConsts.MB;
-      unit = Units.MB;
-    } else {
-      size = sizeInBytes;
-      unit = Units.BYTES;
-    }
-    return new OzoneQuota((int)size, unit);
-  }
-}

+ 0 - 23
hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/request/package-info.java

@@ -1,23 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-/**
- * Netty-based HTTP server implementation for Ozone.
- */
-package org.apache.hadoop.ozone.web.request;

+ 0 - 335
hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/BucketInfo.java

@@ -1,335 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.web.response;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.hadoop.hdds.protocol.StorageType;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.ozone.web.utils.JsonUtils;
-
-import com.fasterxml.jackson.annotation.JsonAutoDetect;
-import com.fasterxml.jackson.annotation.JsonFilter;
-import com.fasterxml.jackson.annotation.PropertyAccessor;
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.fasterxml.jackson.databind.ObjectReader;
-import com.fasterxml.jackson.databind.ObjectWriter;
-import com.fasterxml.jackson.databind.ser.FilterProvider;
-import com.fasterxml.jackson.databind.ser.impl.SimpleBeanPropertyFilter;
-import com.fasterxml.jackson.databind.ser.impl.SimpleFilterProvider;
-import com.google.common.base.Preconditions;
-
-/**
- * BucketInfo class, this is used as response class to send
- * Json info about a bucket back to a client.
- */
-public class BucketInfo implements Comparable<BucketInfo> {
-  static final String BUCKET_INFO = "BUCKET_INFO_FILTER";
-  private static final ObjectReader READER =
-      new ObjectMapper().readerFor(BucketInfo.class);
-  private static final ObjectWriter WRITER;
-
-  static {
-    ObjectMapper mapper = new ObjectMapper();
-    String[] ignorableFieldNames = {"bytesUsed", "keyCount"};
-
-    FilterProvider filters = new SimpleFilterProvider().addFilter(BUCKET_INFO,
-        SimpleBeanPropertyFilter.serializeAllExcept(ignorableFieldNames));
-    mapper.setVisibility(PropertyAccessor.FIELD, JsonAutoDetect.Visibility.ANY);
-    mapper.addMixIn(Object.class, MixIn.class);
-
-    mapper.setFilterProvider(filters);
-    WRITER = mapper.writerWithDefaultPrettyPrinter();
-  }
-
-  private String volumeName;
-  private String bucketName;
-  private String createdOn;
-  private List<OzoneAcl> acls;
-  private OzoneConsts.Versioning versioning;
-  private StorageType storageType;
-  private long bytesUsed;
-  private long keyCount;
-  private String encryptionKeyName;
-
-  /**
-   * Constructor for BucketInfo.
-   *
-   * @param volumeName
-   * @param bucketName
-   */
-  public BucketInfo(String volumeName, String bucketName) {
-    this.volumeName = volumeName;
-    this.bucketName = bucketName;
-  }
-
-
-  /**
-   * Default constructor for BucketInfo.
-   */
-  public BucketInfo() {
-    acls = new ArrayList<>();
-  }
-
-  /**
-   * Parse a JSON string into BucketInfo Object.
-   *
-   * @param jsonString - Json String
-   *
-   * @return - BucketInfo
-   *
-   * @throws IOException
-   */
-  public static BucketInfo parse(String jsonString) throws IOException {
-    return READER.readValue(jsonString);
-  }
-
-  /**
-   * Returns a List of ACL on the Bucket.
-   *
-   * @return List of Acls
-   */
-  public List<OzoneAcl> getAcls() {
-    return acls;
-  }
-
-  /**
-   * Sets ACls.
-   *
-   * @param acls - Acls list
-   */
-  public void setAcls(List<OzoneAcl> acls) {
-    this.acls = acls;
-  }
-
-  /**
-   * Returns Storage Type info.
-   *
-   * @return Storage Type of the bucket
-   */
-  public StorageType getStorageType() {
-    return storageType;
-  }
-
-  /**
-   * Sets the Storage Type.
-   *
-   * @param storageType - Storage Type
-   */
-  public void setStorageType(StorageType storageType) {
-    this.storageType = storageType;
-  }
-
-  /**
-   * Returns versioning.
-   *
-   * @return versioning Enum
-   */
-  public OzoneConsts.Versioning getVersioning() {
-    return versioning;
-  }
-
-  /**
-   * Sets Versioning.
-   *
-   * @param versioning
-   */
-  public void setVersioning(OzoneConsts.Versioning versioning) {
-    this.versioning = versioning;
-  }
-
-
-  /**
-   * Gets bucket Name.
-   *
-   * @return String
-   */
-  public String getBucketName() {
-    return bucketName;
-  }
-
-  /**
-   * Sets bucket Name.
-   *
-   * @param bucketName - Name of the bucket
-   */
-  public void setBucketName(String bucketName) {
-    this.bucketName = bucketName;
-  }
-
-  /**
-   * Sets creation time of the bucket.
-   *
-   * @param creationTime - Date String
-   */
-  public void setCreatedOn(String creationTime) {
-    this.createdOn = creationTime;
-  }
-
-  /**
-   * Returns creation time.
-   *
-   * @return creation time of bucket.
-   */
-  public String getCreatedOn() {
-    return createdOn;
-  }
-
-
-  public void setEncryptionKeyName(String encryptionKeyName) {
-    this.encryptionKeyName = encryptionKeyName;
-  }
-
-  public String getEncryptionKeyName() {
-    return encryptionKeyName;
-  }
-
-  /**
-   * Returns a JSON string of this object.
-   * After stripping out bytesUsed and keyCount
-   *
-   * @return String
-   */
-  public String toJsonString() throws IOException {
-    return WRITER.writeValueAsString(this);
-  }
-
-  /**
-   * Returns the Object as a Json String.
-   *
-   * The reason why both toJSONString exists and toDBString exists
-   * is because toJSONString supports an external facing contract with
-   * REST clients. However server internally would want to add more
-   * fields to this class. The distinction helps in serializing all
-   * fields vs. only fields that are part of REST protocol.
-   */
-  public String toDBString() throws IOException {
-    return JsonUtils.toJsonString(this);
-  }
-
-  /**
-   * Returns Volume Name.
-   *
-   * @return String volume name
-   */
-  public String getVolumeName() {
-    return volumeName;
-  }
-
-  /**
-   * Sets the Volume Name of the bucket.
-   *
-   * @param volumeName - volumeName
-   */
-  public void setVolumeName(String volumeName) {
-    this.volumeName = volumeName;
-  }
-
-  /**
-   * Compares this object with the specified object for order.  Returns a
-   * negative integer, zero, or a positive integer as this object is less
-   * than, equal to, or greater than the specified object.
-   *
-   * Please note : BucketInfo compare functions are used only within the
-   * context of a volume, hence volume name is purposefully ignored in
-   * compareTo, equal and hashcode functions of this class.
-   */
-  @Override
-  public int compareTo(BucketInfo o) {
-    Preconditions.checkState(o.getVolumeName().equals(this.getVolumeName()));
-    return this.bucketName.compareTo(o.getBucketName());
-  }
-
-  /**
-   * Checks if two bucketInfo's are equal.
-   * @param o Object BucketInfo
-   * @return  True or False
-   */
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) {
-      return true;
-    }
-    if (!(o instanceof BucketInfo)) {
-      return false;
-    }
-
-    BucketInfo that = (BucketInfo) o;
-    Preconditions.checkState(that.getVolumeName().equals(this.getVolumeName()));
-    return bucketName.equals(that.bucketName);
-
-  }
-
-  /**
-   * Hash Code for this object.
-   * @return int
-   */
-  @Override
-  public int hashCode() {
-    return bucketName.hashCode();
-  }
-
-  /**
-   * Get the number of bytes used by this bucket.
-   *
-   * @return long
-   */
-  public long getBytesUsed() {
-    return bytesUsed;
-  }
-
-  /**
-   * Set bytes Used.
-   *
-   * @param bytesUsed - bytesUsed
-   */
-  public void setBytesUsed(long bytesUsed) {
-    this.bytesUsed = bytesUsed;
-  }
-
-  /**
-   * Get Key Count  inside this bucket.
-   *
-   * @return - KeyCount
-   */
-  public long getKeyCount() {
-    return keyCount;
-  }
-
-  /**
-   * Set Key Count inside this bucket.
-   *
-   * @param keyCount - Sets the Key Count
-   */
-  public void setKeyCount(long keyCount) {
-    this.keyCount = keyCount;
-  }
-
-  /**
-   * This class allows us to create custom filters
-   * for the Json serialization.
-   */
-  @JsonFilter(BUCKET_INFO)
-  static class MixIn {
-
-  }
-
-}

+ 0 - 311
hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/KeyInfo.java

@@ -1,311 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.web.response;
-
-import java.io.IOException;
-
-import org.apache.commons.lang3.builder.EqualsBuilder;
-import org.apache.commons.lang3.builder.HashCodeBuilder;
-import org.apache.hadoop.hdds.client.ReplicationType;
-import org.apache.hadoop.ozone.web.utils.JsonUtils;
-
-import com.fasterxml.jackson.annotation.JsonAutoDetect;
-import com.fasterxml.jackson.annotation.JsonFilter;
-import com.fasterxml.jackson.annotation.PropertyAccessor;
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.fasterxml.jackson.databind.ObjectReader;
-import com.fasterxml.jackson.databind.ObjectWriter;
-import com.fasterxml.jackson.databind.ser.FilterProvider;
-import com.fasterxml.jackson.databind.ser.impl.SimpleBeanPropertyFilter;
-import com.fasterxml.jackson.databind.ser.impl.SimpleFilterProvider;
-
-/**
- * Represents an Ozone key Object.
- */
-public class KeyInfo implements Comparable<KeyInfo> {
-  static final String OBJECT_INFO = "OBJECT_INFO_FILTER";
-
-  private static final ObjectReader READER =
-      new ObjectMapper().readerFor(KeyInfo.class);
-  private static final ObjectWriter WRITER;
-
-  static {
-    ObjectMapper mapper = new ObjectMapper();
-    String[] ignorableFieldNames = {"dataFileName"};
-
-    FilterProvider filters = new SimpleFilterProvider()
-        .addFilter(OBJECT_INFO, SimpleBeanPropertyFilter
-            .serializeAllExcept(ignorableFieldNames));
-    mapper.setVisibility(PropertyAccessor.FIELD,
-        JsonAutoDetect.Visibility.ANY);
-    mapper.addMixIn(Object.class, MixIn.class);
-
-    mapper.setFilterProvider(filters);
-    WRITER = mapper.writerWithDefaultPrettyPrinter();
-  }
-
-  /**
-   * This class allows us to create custom filters
-   * for the Json serialization.
-   */
-  @JsonFilter(OBJECT_INFO)
-  class MixIn {
-
-  }
-  private long version;
-  private String md5hash;
-  private String createdOn;
-  private String modifiedOn;
-  private long size;
-  private String keyName;
-
-  private String dataFileName;
-
-  private ReplicationType type;
-
-  /**
-   * Return replication type of the key.
-   *
-   * @return replication type
-   */
-  public ReplicationType getType() {
-    return type;
-  }
-
-  /**
-   * Set replication type of the key.
-   *
-   * @param replicationType
-   */
-  public void setType(ReplicationType replicationType) {
-    this.type = replicationType;
-  }
-
-  /**
-   * When this key was created.
-   *
-   * @return Date String
-   */
-  public String getCreatedOn() {
-    return createdOn;
-  }
-
-  /**
-   * When this key was modified.
-   *
-   * @return Date String
-   */
-  public String getModifiedOn() {
-    return modifiedOn;
-  }
-
-  /**
-   * When this key was created.
-   *
-   * @param createdOn - Date String
-   */
-  public void setCreatedOn(String createdOn) {
-    this.createdOn = createdOn;
-  }
-
-  /**
-   * When this key was modified.
-   *
-   * @param modifiedOn - Date String
-   */
-  public void setModifiedOn(String modifiedOn) {
-    this.modifiedOn = modifiedOn;
-  }
-
-  /**
-   * Full path to where the actual data for this key is stored.
-   *
-   * @return String
-   */
-  public String getDataFileName() {
-    return dataFileName;
-  }
-
-  /**
-   * Sets up where the file path is stored.
-   *
-   * @param dataFileName - Data File Name
-   */
-  public void setDataFileName(String dataFileName) {
-    this.dataFileName = dataFileName;
-  }
-
-  /**
-   * Gets the Keyname of this object.
-   *
-   * @return String
-   */
-  public String getKeyName() {
-    return keyName;
-  }
-
-  /**
-   * Sets the Key name of this object.
-   *
-   * @param keyName - String
-   */
-  public void setKeyName(String keyName) {
-    this.keyName = keyName;
-  }
-
-  /**
-   * Returns the MD5 Hash for the data of this key.
-   *
-   * @return String MD5
-   */
-  public String getMd5hash() {
-    return md5hash;
-  }
-
-  /**
-   * Sets the MD5 of this file.
-   *
-   * @param md5hash - Md5 of this file
-   */
-  public void setMd5hash(String md5hash) {
-    this.md5hash = md5hash;
-  }
-
-  /**
-   * Number of bytes stored in the data part of this key.
-   *
-   * @return long size of the data file
-   */
-  public long getSize() {
-    return size;
-  }
-
-  /**
-   * Sets the size of the Data part of this key.
-   *
-   * @param size - Size in long
-   */
-  public void setSize(long size) {
-    this.size = size;
-  }
-
-  /**
-   * Version of this key.
-   *
-   * @return - returns the version of this key.
-   */
-  public long getVersion() {
-    return version;
-  }
-
-  /**
-   * Sets the version of this key.
-   *
-   * @param version - Version String
-   */
-  public void setVersion(long version) {
-    this.version = version;
-  }
-
-  /**
-   * Compares this object with the specified object for order.  Returns a
-   * negative integer, zero, or a positive integer as this object is less
-   * than, equal to, or greater than the specified object.
-   *
-   * @param o the object to be compared.
-   *
-   * @return a negative integer, zero, or a positive integer as this object
-   * is less than, equal to, or greater than the specified object.
-   *
-   * @throws NullPointerException if the specified object is null
-   * @throws ClassCastException if the specified object's type prevents it
-   * from being compared to this object.
-   */
-  @Override
-  public int compareTo(KeyInfo o) {
-    if (this.keyName.compareTo(o.getKeyName()) != 0) {
-      return this.keyName.compareTo(o.getKeyName());
-    }
-
-    if (this.getVersion() == o.getVersion()) {
-      return 0;
-    }
-    if (this.getVersion() < o.getVersion()) {
-      return -1;
-    }
-    return 1;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) {
-      return true;
-    }
-
-    if (o == null || getClass() != o.getClass()) {
-      return false;
-    }
-
-    KeyInfo keyInfo = (KeyInfo) o;
-
-    return new EqualsBuilder()
-        .append(version, keyInfo.version)
-        .append(keyName, keyInfo.keyName)
-        .isEquals();
-  }
-
-  @Override
-  public int hashCode() {
-    return new HashCodeBuilder(17, 37)
-        .append(version)
-        .append(keyName)
-        .toHashCode();
-  }
-
-  /**
-
-   * Parse a string to retuen BucketInfo Object.
-   *
-   * @param jsonString - Json String
-   *
-   * @return - BucketInfo
-   *
-   * @throws IOException
-   */
-  public static KeyInfo parse(String jsonString) throws IOException {
-    return READER.readValue(jsonString);
-  }
-
-
-  /**
-   * Returns a JSON string of this object.
-   * After stripping out bytesUsed and keyCount
-   *
-   * @return String
-   */
-  public String toJsonString() throws IOException {
-    return WRITER.writeValueAsString(this);
-  }
-
-  /**
-   * Returns the Object as a Json String.
-   */
-  public String toDBString() throws IOException {
-    return JsonUtils.toJsonString(this);
-  }
-}

+ 0 - 80
hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/KeyInfoDetails.java

@@ -1,80 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.web.response;
-
-import org.apache.commons.lang3.builder.EqualsBuilder;
-import org.apache.commons.lang3.builder.HashCodeBuilder;
-
-import java.util.List;
-
-/**
- * Represents an Ozone key Object with detail information of location.
- */
-public class KeyInfoDetails extends KeyInfo {
-  /**
-   * a list of Map which maps localID to ContainerID
-   * to specify replica locations.
-   */
-  private List<KeyLocation> keyLocations;
-
-  /**
-   * Set details of key location.
-   *
-   * @param keyLocations - details of key location
-   */
-  public void setKeyLocations(List<KeyLocation> keyLocations) {
-    this.keyLocations = keyLocations;
-  }
-
-  /**
-   * Returns details of key location.
-   *
-   * @return volumeName
-   */
-  public List<KeyLocation> getKeyLocations() {
-    return keyLocations;
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) {
-      return true;
-    }
-
-    if (o == null || getClass() != o.getClass()) {
-      return false;
-    }
-
-    KeyInfoDetails that = (KeyInfoDetails) o;
-
-    return new EqualsBuilder()
-        .append(getVersion(), that.getVersion())
-        .append(getKeyName(), that.getKeyName())
-        .append(keyLocations, that.getKeyLocations())
-        .isEquals();
-  }
-
-  @Override
-  public int hashCode() {
-    return new HashCodeBuilder(17, 37)
-        .append(getVersion())
-        .append(getKeyName())
-        .append(keyLocations)
-        .toHashCode();
-  }
-}

+ 0 - 82
hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/KeyLocation.java

@@ -1,82 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.web.response;
-
-/**
- * KeyLocation class is used used for parsing json response
- * when KeyInfoDetails Call is made.
- */
-public class KeyLocation {
-  /**
-   * Which container this key stored.
-   */
-  private final long containerID;
-  /**
-   * Which block this key stored inside a container.
-   */
-  private final long localID;
-  /**
-   * Data length of this key replica.
-   */
-  private final long length;
-  /**
-   * Offset of this key.
-   */
-  private final long offset;
-
-  /**
-   * Constructs KeyLocation.
-   */
-  public KeyLocation(long containerID, long localID,
-                     long length, long offset) {
-    this.containerID = containerID;
-    this.localID = localID;
-    this.length = length;
-    this.offset = offset;
-  }
-
-  /**
-   * Returns the containerID of this Key.
-   */
-  public long getContainerID() {
-    return containerID;
-  }
-
-  /**
-   * Returns the localID of this Key.
-   */
-  public long getLocalID() {
-    return localID;
-  }
-
-  /**
-   * Returns the length of this Key.
-   */
-  public long getLength() {
-    return length;
-  }
-
-  /**
-   * Returns the offset of this Key.
-   */
-  public long getOffset() {
-    return offset;
-  }
-
-}

+ 0 - 154
hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/ListBuckets.java

@@ -1,154 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.web.response;
-
-
-import java.io.IOException;
-import java.util.Collections;
-import java.util.LinkedList;
-import java.util.List;
-
-import org.apache.hadoop.ozone.web.utils.JsonUtils;
-
-import com.fasterxml.jackson.annotation.JsonAutoDetect;
-import com.fasterxml.jackson.annotation.JsonFilter;
-import com.fasterxml.jackson.annotation.PropertyAccessor;
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.fasterxml.jackson.databind.ObjectReader;
-import com.fasterxml.jackson.databind.ObjectWriter;
-import com.fasterxml.jackson.databind.ser.FilterProvider;
-import com.fasterxml.jackson.databind.ser.impl.SimpleBeanPropertyFilter;
-import com.fasterxml.jackson.databind.ser.impl.SimpleFilterProvider;
-
-/**
- * List Bucket is the response for the ListBucket Query.
- */
-public class ListBuckets {
-  static final String BUCKET_LIST = "BUCKET_LIST_FILTER";
-  private static final ObjectReader READER =
-      new ObjectMapper().readerFor(ListBuckets.class);
-  private static final ObjectWriter WRITER;
-
-  static {
-    ObjectMapper mapper = new ObjectMapper();
-    String[] ignorableFieldNames = {"dataFileName"};
-
-    FilterProvider filters = new SimpleFilterProvider()
-        .addFilter(BUCKET_LIST, SimpleBeanPropertyFilter
-            .serializeAllExcept(ignorableFieldNames));
-    mapper.setVisibility(PropertyAccessor.FIELD,
-        JsonAutoDetect.Visibility.ANY);
-    mapper.addMixIn(Object.class, MixIn.class);
-
-    mapper.setFilterProvider(filters);
-    WRITER = mapper.writerWithDefaultPrettyPrinter();
-  }
-
-  private List<BucketInfo> buckets;
-
-  /**
-   * Constructor for ListBuckets.
-   * @param buckets - List of buckets owned by this user
-   */
-  public ListBuckets(List<BucketInfo> buckets) {
-    this.buckets = buckets;
-
-  }
-
-  /**
-   * Constructor for ListBuckets.
-  */
-  public ListBuckets() {
-    this.buckets = new LinkedList<BucketInfo>();
-  }
-
-  /**
-   * Parses a String to return ListBuckets object.
-   *
-   * @param data - Json String
-   *
-   * @return - ListBuckets
-   *
-   * @throws IOException
-   */
-  public static ListBuckets parse(String data) throws IOException {
-    return READER.readValue(data);
-  }
-
-  /**
-   * Returns a list of Buckets.
-   *
-   * @return Bucket list
-   */
-  public List<BucketInfo> getBuckets() {
-    return buckets;
-  }
-
-  /**
-   * Sets the list of buckets owned by this user.
-   *
-   * @param buckets - List of Buckets
-   */
-  public void setBuckets(List<BucketInfo> buckets) {
-    this.buckets = buckets;
-  }
-
-
-  /**
-   * Returns a JSON string of this object.
-   * After stripping out bytesUsed and keyCount
-   *
-   * @return String
-   */
-  public String toJsonString() throws IOException {
-    return WRITER.writeValueAsString(this);
-  }
-
-  /**
-   * Returns the Object as a Json String.
-   */
-  public String toDBString() throws IOException {
-    return JsonUtils.toJsonString(this);
-  }
-
-  /**
-   * Sorts the buckets based on bucketName.
-   * This is useful when we return the list of buckets
-   */
-  public void sort() {
-    Collections.sort(buckets);
-  }
-
-  /**
-   * Add a new bucket to the list of buckets.
-   * @param bucketInfo - bucket Info
-   */
-  public void addBucket(BucketInfo bucketInfo){
-    this.buckets.add(bucketInfo);
-  }
-
-  /**
-   * This class allows us to create custom filters
-   * for the Json serialization.
-   */
-  @JsonFilter(BUCKET_LIST)
-  class MixIn {
-
-  }
-
-}

+ 0 - 209
hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/ListKeys.java

@@ -1,209 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.web.response;
-
-import java.io.IOException;
-import java.util.Collections;
-import java.util.LinkedList;
-import java.util.List;
-
-import org.apache.hadoop.ozone.web.handlers.BucketArgs;
-import org.apache.hadoop.ozone.web.handlers.ListArgs;
-import org.apache.hadoop.ozone.web.utils.JsonUtils;
-
-import com.fasterxml.jackson.annotation.JsonAutoDetect;
-import com.fasterxml.jackson.annotation.JsonFilter;
-import com.fasterxml.jackson.annotation.PropertyAccessor;
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.fasterxml.jackson.databind.ObjectReader;
-import com.fasterxml.jackson.databind.ObjectWriter;
-import com.fasterxml.jackson.databind.ser.FilterProvider;
-import com.fasterxml.jackson.databind.ser.impl.SimpleBeanPropertyFilter;
-import com.fasterxml.jackson.databind.ser.impl.SimpleFilterProvider;
-import com.google.common.base.Preconditions;
-
-/**
- * This class the represents the list of keys (Objects) in a bucket.
- */
-public class ListKeys {
-  static final String OBJECT_LIST = "OBJECT_LIST_FILTER";
-
-  private static final ObjectReader READER =
-      new ObjectMapper().readerFor(ListKeys.class);
-  private static final ObjectWriter WRITER;
-
-  static {
-    ObjectMapper mapper = new ObjectMapper();
-    String[] ignorableFieldNames = {"dataFileName"};
-
-    FilterProvider filters = new SimpleFilterProvider()
-        .addFilter(OBJECT_LIST, SimpleBeanPropertyFilter
-            .serializeAllExcept(ignorableFieldNames));
-    mapper.setVisibility(PropertyAccessor.FIELD,
-        JsonAutoDetect.Visibility.ANY);
-    mapper.addMixIn(Object.class, MixIn.class);
-
-    mapper.setFilterProvider(filters);
-    WRITER = mapper.writerWithDefaultPrettyPrinter();
-  }
-
-  private String name;
-  private String prefix;
-  private long maxKeys;
-  private boolean truncated;
-  private List<KeyInfo> keyList;
-
-  /**
-   * Default constructor needed for json serialization.
-   */
-  public ListKeys() {
-    this.keyList = new LinkedList<>();
-  }
-
-  /**
-   * Constructor for ListKeys.
-   *
-   * @param args      ListArgs
-   * @param truncated is truncated
-   */
-  public ListKeys(ListArgs args, boolean truncated) {
-    Preconditions.checkState(args.getArgs() instanceof  BucketArgs);
-    this.name = ((BucketArgs) args.getArgs()).getBucketName();
-    this.prefix = args.getPrefix();
-    this.maxKeys = args.getMaxKeys();
-    this.truncated = truncated;
-  }
-
-  /**
-   * Converts a Json string to POJO.
-   * @param jsonString - json string.
-   * @return ListObject
-   * @throws IOException - Json conversion error.
-   */
-  public static ListKeys parse(String jsonString) throws IOException {
-    return READER.readValue(jsonString);
-  }
-
-  /**
-   * Returns a list of Objects.
-   *
-   * @return List of KeyInfo Objects.
-   */
-  public List<KeyInfo> getKeyList() {
-    return keyList;
-  }
-
-  /**
-   * Sets the list of Objects.
-   *
-   * @param objectList - List of Keys
-   */
-  public void setKeyList(List<KeyInfo> objectList) {
-    this.keyList = objectList;
-  }
-
-  /**
-   * Gets the Max Key Count.
-   *
-   * @return long
-   */
-  public long getMaxKeys() {
-    return maxKeys;
-  }
-
-  /**
-   * Gets bucket Name.
-   *
-   * @return String
-   */
-  public String getName() {
-    return name;
-  }
-
-  /**
-   * Gets Prefix.
-   *
-   * @return String
-   */
-  public String getPrefix() {
-    return prefix;
-  }
-
-  /**
-   * Gets truncated Status.
-   *
-   * @return Boolean
-   */
-  public boolean isTruncated() {
-    return truncated;
-  }
-
-  /**
-   * Sets the value of truncated.
-   *
-   * @param value - Boolean
-   */
-  public void setTruncated(boolean value) {
-    this.truncated = value;
-  }
-
-  /**
-   * Returns a JSON string of this object. After stripping out bytesUsed and
-   * keyCount.
-   *
-   * @return String
-   * @throws  IOException - On json Errors.
-   */
-  public String toJsonString() throws IOException {
-    return WRITER.writeValueAsString(this);
-  }
-
-  /**
-   * Returns the Object as a Json String.
-   *
-   * @return String
-   * @throws IOException - on json errors.
-   */
-  public String toDBString() throws IOException {
-    return JsonUtils.toJsonString(this);
-  }
-
-  /**
-   * Sorts the keys based on name and version. This is useful when we return the
-   * list of keys.
-   */
-  public void sort() {
-    Collections.sort(keyList);
-  }
-
-  /**
-   * Add a new key to the list of keys.
-   * @param keyInfo - key Info
-   */
-  public void addKey(KeyInfo keyInfo){
-    this.keyList.add(keyInfo);
-  }
-
-  /**
-   * This class allows us to create custom filters for the Json serialization.
-   */
-  @JsonFilter(OBJECT_LIST)
-  class MixIn {
-
-  }
-}

+ 0 - 152
hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/ListVolumes.java

@@ -1,152 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.web.response;
-
-import java.io.IOException;
-import java.util.Collections;
-import java.util.LinkedList;
-import java.util.List;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.ozone.web.utils.JsonUtils;
-
-import com.fasterxml.jackson.annotation.JsonAutoDetect;
-import com.fasterxml.jackson.annotation.JsonFilter;
-import com.fasterxml.jackson.annotation.PropertyAccessor;
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.fasterxml.jackson.databind.ObjectReader;
-import com.fasterxml.jackson.databind.ObjectWriter;
-import com.fasterxml.jackson.databind.ser.FilterProvider;
-import com.fasterxml.jackson.databind.ser.impl.SimpleBeanPropertyFilter;
-import com.fasterxml.jackson.databind.ser.impl.SimpleFilterProvider;
-
-/**
- * List Volume Class is the class that is returned in JSON format to
- * users when they call ListVolumes.
- */
-@InterfaceAudience.Private
-public class ListVolumes {
-  private List<VolumeInfo> volumes;
-
-  static final String VOLUME_LIST = "VOLUME_LIST_FILTER";
-  private static final ObjectReader READER =
-      new ObjectMapper().readerFor(ListVolumes.class);
-  private static final ObjectWriter WRITER;
-
-  static {
-    ObjectMapper mapper = new ObjectMapper();
-    String[] ignorableFieldNames = {"bytesUsed", "bucketCount"};
-
-    FilterProvider filters = new SimpleFilterProvider()
-        .addFilter(VOLUME_LIST, SimpleBeanPropertyFilter
-            .serializeAllExcept(ignorableFieldNames));
-    mapper.setVisibility(PropertyAccessor.FIELD,
-        JsonAutoDetect.Visibility.ANY);
-    mapper.addMixIn(Object.class, MixIn.class);
-
-    mapper.setFilterProvider(filters);
-    WRITER = mapper.writerWithDefaultPrettyPrinter();
-  }
-
-  /**
-   * Used for json filtering.
-   */
-  @JsonFilter(VOLUME_LIST)
-  class MixIn {
-  }
-
-  /**
-   * Constructs ListVolume objects.
-   */
-  public ListVolumes() {
-    this.volumes = new LinkedList<VolumeInfo>();
-  }
-
-  /**
-   * Gets the list of volumes.
-   *
-   * @return List of VolumeInfo Objects
-   */
-  public List<VolumeInfo> getVolumes() {
-    return volumes;
-  }
-
-
-  /**
-   * Sets volume info.
-   *
-   * @param volumes - List of Volumes
-   */
-  public void setVolumes(List<VolumeInfo> volumes) {
-    this.volumes = volumes;
-  }
-
-  /**
-   * Returns a JSON string of this object.
-   * After stripping out bytesUsed and bucketCount
-   *
-   * @return String
-   */
-  public String toJsonString() throws IOException {
-    return WRITER.writeValueAsString(this);
-  }
-
-  /**
-   * When we serialize a volumeInfo to our database
-   * we will use all fields. However the toJsonString
-   * will strip out bytesUsed and bucketCount from the
-   * volume Info
-   *
-   * @return Json String
-   *
-   * @throws IOException
-   */
-  public String toDBString() throws IOException {
-    return JsonUtils.toJsonString(this);
-  }
-
-  /**
-   * Parses a String to return ListVolumes object.
-   *
-   * @param data - Json String
-   *
-   * @return - ListVolumes
-   *
-   * @throws IOException
-   */
-  public static ListVolumes parse(String data) throws IOException {
-    return READER.readValue(data);
-  }
-
-  /**
-   * Adds a new volume info to the List.
-   *
-   * @param info - VolumeInfo
-   */
-  public void addVolume(VolumeInfo info) {
-    this.volumes.add(info);
-  }
-
-  /**
-   * Sorts the volume names based on volume name.
-   * This is useful when we return the list of volume names
-   */
-  public void sort() {
-    Collections.sort(volumes);
-  }
-}

+ 0 - 308
hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/VolumeInfo.java

@@ -1,308 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.web.response;
-
-
-import java.io.IOException;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.ozone.web.request.OzoneQuota;
-import org.apache.hadoop.ozone.web.utils.JsonUtils;
-
-import com.fasterxml.jackson.annotation.JsonAutoDetect;
-import com.fasterxml.jackson.annotation.JsonFilter;
-import com.fasterxml.jackson.annotation.PropertyAccessor;
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.fasterxml.jackson.databind.ObjectReader;
-import com.fasterxml.jackson.databind.ObjectWriter;
-import com.fasterxml.jackson.databind.ser.FilterProvider;
-import com.fasterxml.jackson.databind.ser.impl.SimpleBeanPropertyFilter;
-import com.fasterxml.jackson.databind.ser.impl.SimpleFilterProvider;
-
-/**
- * VolumeInfo Class is the Java class that represents
- * Json when VolumeInfo Call is made.
- */
-@InterfaceAudience.Private
-public class VolumeInfo implements Comparable<VolumeInfo> {
-
-  static final String VOLUME_INFO = "VOLUME_INFO_FILTER";
-  private static final ObjectReader READER =
-      new ObjectMapper().readerFor(VolumeInfo.class);
-  private static final ObjectWriter WRITER;
-
-  static {
-    ObjectMapper mapper = new ObjectMapper();
-    String[] ignorableFieldNames = {"bytesUsed", "bucketCount"};
-
-    FilterProvider filters = new SimpleFilterProvider()
-        .addFilter(VOLUME_INFO, SimpleBeanPropertyFilter
-            .serializeAllExcept(ignorableFieldNames));
-    mapper.setVisibility(PropertyAccessor.FIELD,
-        JsonAutoDetect.Visibility.ANY);
-    mapper.addMixIn(Object.class, MixIn.class);
-
-    mapper.setFilterProvider(filters);
-    WRITER = mapper.writerWithDefaultPrettyPrinter();
-  }
-
-  /**
-   * Custom Json Filter Class.
-   */
-  @JsonFilter(VOLUME_INFO)
-  class MixIn {
-  }
-  private VolumeOwner owner;
-  private OzoneQuota quota;
-  private String volumeName;
-  private String createdOn;
-  private String createdBy;
-
-  private long bytesUsed;
-  private long bucketCount;
-
-
-  /**
-   * Constructor for VolumeInfo.
-   *
-   * @param volumeName - Name of the Volume
-   * @param createdOn _ Date String
-   * @param createdBy - Person who created it
-   */
-  public VolumeInfo(String volumeName, String createdOn, String createdBy) {
-    this.createdOn = createdOn;
-    this.volumeName = volumeName;
-    this.createdBy = createdBy;
-  }
-
-  /**
-   * Constructor for VolumeInfo.
-   */
-  public VolumeInfo() {
-  }
-
-  /**
-   * Returns the name of the person who created this volume.
-   *
-   * @return Name of Admin who created this
-   */
-  public String getCreatedBy() {
-    return createdBy;
-  }
-
-  /**
-   * Sets the user name of the person who created this volume.
-   *
-   * @param createdBy - UserName
-   */
-  public void setCreatedBy(String createdBy) {
-    this.createdBy = createdBy;
-  }
-
-  /**
-   * Gets the date on which this volume was created.
-   *
-   * @return - Date String
-   */
-  public String getCreatedOn() {
-    return createdOn;
-  }
-
-  /**
-   * Sets the date string.
-   *
-   * @param createdOn - Date String
-   */
-  public void setCreatedOn(String createdOn) {
-    this.createdOn = createdOn;
-  }
-
-  /**
-   * Returns the owner info.
-   *
-   * @return - OwnerInfo
-   */
-  public VolumeOwner getOwner() {
-    return owner;
-  }
-
-  /**
-   * Sets the owner.
-   *
-   * @param owner - OwnerInfo
-   */
-  public void setOwner(VolumeOwner owner) {
-    this.owner = owner;
-  }
-
-  /**
-   * Returns the quota information on a volume.
-   *
-   * @return Quota
-   */
-  public OzoneQuota getQuota() {
-    return quota;
-  }
-
-  /**
-   * Sets the quota info.
-   *
-   * @param quota - Quota Info
-   */
-  public void setQuota(OzoneQuota quota) {
-    this.quota = quota;
-  }
-
-  /**
-   * gets the volume name.
-   *
-   * @return - Volume Name
-   */
-  public String getVolumeName() {
-    return volumeName;
-  }
-
-  /**
-   * Sets the volume name.
-   *
-   * @param volumeName - Volume Name
-   */
-  public void setVolumeName(String volumeName) {
-    this.volumeName = volumeName;
-  }
-
-  /**
-   * Returns a JSON string of this object.
-   * After stripping out bytesUsed and bucketCount
-   *
-   * @return String - json string
-   * @throws IOException
-   */
-  public String toJsonString() throws IOException {
-    return WRITER.writeValueAsString(this);
-  }
-
-  /**
-   * When we serialize a volumeInfo to our database
-   * we will use all fields. However the toJsonString
-   * will strip out bytesUsed and bucketCount from the
-   * volume Info
-   *
-   * @return Json String
-   *
-   * @throws IOException
-   */
-  public String toDBString() throws IOException {
-    return JsonUtils.toJsonString(this);
-  }
-
-
-  /**
-   * Comparable Interface.
-   * @param o VolumeInfo Object.
-   * @return Result of comparison
-   */
-  @Override
-  public int compareTo(VolumeInfo o) {
-    return this.volumeName.compareTo(o.getVolumeName());
-  }
-
-  /**
-   * Gets the number of bytesUsed by this volume.
-   *
-   * @return long - Bytes used
-   */
-  public long getBytesUsed() {
-    return bytesUsed;
-  }
-
-  /**
-   * Sets number of bytesUsed by this volume.
-   *
-   * @param bytesUsed - Number of bytesUsed
-   */
-  public void setBytesUsed(long bytesUsed) {
-    this.bytesUsed = bytesUsed;
-  }
-
-  /**
-   * Returns VolumeInfo class from json string.
-   *
-   * @param data - Json String
-   *
-   * @return VolumeInfo
-   *
-   * @throws IOException
-   */
-  public static VolumeInfo parse(String data) throws IOException {
-    return READER.readValue(data);
-  }
-
-  /**
-   * Indicates whether some other object is "equal to" this one.
-   *
-   * @param obj the reference object with which to compare.
-   *
-   * @return {@code true} if this object is the same as the obj
-   * argument; {@code false} otherwise.
-   */
-  @Override
-  public boolean equals(Object obj) {
-    if (obj == null) {
-      return false;
-    }
-    if (getClass() != obj.getClass()) {
-      return false;
-    }
-    VolumeInfo otherInfo = (VolumeInfo) obj;
-    return otherInfo.getVolumeName().equals(this.getVolumeName());
-  }
-
-  /**
-   * Returns a hash code value for the object. This method is
-   * supported for the benefit of hash tables such as those provided by
-   * HashMap.
-   * @return a hash code value for this object.
-   *
-   * @see Object#equals(Object)
-   * @see System#identityHashCode
-   */
-  @Override
-  public int hashCode() {
-    return getVolumeName().hashCode();
-  }
-
-  /**
-   * Total number of buckets under this volume.
-   *
-   * @return - bucketCount
-   */
-  public long getBucketCount() {
-    return bucketCount;
-  }
-
-  /**
-   * Sets the buckets count.
-   *
-   * @param bucketCount - Bucket Count
-   */
-  public void setBucketCount(long bucketCount) {
-    this.bucketCount = bucketCount;
-  }
-}

+ 0 - 60
hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/VolumeOwner.java

@@ -1,60 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.web.response;
-
-import com.fasterxml.jackson.annotation.JsonInclude;
-import org.apache.hadoop.classification.InterfaceAudience;
-
-/**
- * Volume Owner represents the owner of a volume.
- *
- * This is a class instead of a string since we might need to extend this class
- * to support other forms of authentication.
- */
-@InterfaceAudience.Private
-public class VolumeOwner {
-  @JsonInclude(JsonInclude.Include.NON_NULL)
-  private String name;
-
-  /**
-   * Constructor for VolumeOwner.
-   *
-   * @param name - name of the User
-   */
-  public VolumeOwner(String name) {
-    this.name = name;
-  }
-
-  /**
-   * Constructs Volume Owner.
-   */
-  public VolumeOwner() {
-    name = null;
-  }
-
-  /**
-   * Returns the user name.
-   *
-   * @return Name
-   */
-  public String getName() {
-    return name;
-  }
-
-}

+ 0 - 23
hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/response/package-info.java

@@ -1,23 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-/**
- * Netty-based HTTP server implementation for Ozone.
- */
-package org.apache.hadoop.ozone.web.response;

+ 0 - 82
hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/web/TestBucketInfo.java

@@ -1,82 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.web;
-
-import org.apache.hadoop.hdds.protocol.StorageType;
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.web.response.BucketInfo;
-import org.junit.Test;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-
-/**
- * Test Ozone Bucket Info operation.
- */
-public class TestBucketInfo {
-  @Test
-  public void testBucketInfoJson() throws IOException {
-    BucketInfo bucketInfo = new BucketInfo("volumeName", "bucketName");
-    String bucketInfoString = bucketInfo.toJsonString();
-    BucketInfo newBucketInfo = BucketInfo.parse(bucketInfoString);
-    assertEquals(bucketInfo, newBucketInfo);
-  }
-
-  @Test
-  public void testBucketInfoDBString() throws IOException {
-    BucketInfo bucketInfo = new BucketInfo("volumeName", "bucketName");
-    String bucketInfoString = bucketInfo.toDBString();
-    BucketInfo newBucketInfo = BucketInfo.parse(bucketInfoString);
-    assertEquals(bucketInfo, newBucketInfo);
-  }
-
-  @Test
-  public void testBucketInfoAddAcls() throws IOException {
-    BucketInfo bucketInfo = new BucketInfo("volumeName", "bucketName");
-    String bucketInfoString = bucketInfo.toDBString();
-    BucketInfo newBucketInfo = BucketInfo.parse(bucketInfoString);
-    assertEquals(bucketInfo, newBucketInfo);
-    List<OzoneAcl> aclList = new ArrayList<>();
-
-    aclList.add(OzoneAcl.parseAcl("user:bilbo:r"));
-    aclList.add(OzoneAcl.parseAcl("user:samwise:rw"));
-    newBucketInfo.setAcls(aclList);
-
-    assertNotNull(newBucketInfo.getAcls());
-    assertEquals(2, newBucketInfo.getAcls().size());
-  }
-
-  @Test
-  public void testBucketInfoVersionAndType() throws IOException {
-    BucketInfo bucketInfo = new BucketInfo("volumeName", "bucketName");
-    bucketInfo.setVersioning(OzoneConsts.Versioning.ENABLED);
-    bucketInfo.setStorageType(StorageType.DISK);
-
-    String bucketInfoString = bucketInfo.toDBString();
-
-    BucketInfo newBucketInfo = BucketInfo.parse(bucketInfoString);
-    assertEquals(bucketInfo, newBucketInfo);
-  }
-
-}

+ 0 - 115
hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/web/TestQuota.java

@@ -1,115 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.web;
-
-import org.apache.hadoop.ozone.web.request.OzoneQuota;
-import org.junit.Test;
-
-import java.util.HashMap;
-import java.util.Set;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-/**
- * Test Ozone Volume Quota.
- */
-public class TestQuota {
-  @Test
-  public void testParseQuota() {
-    HashMap<String, Boolean> testMatrix;
-    testMatrix = new HashMap<String, Boolean>();
-
-    testMatrix.put("10TB", Boolean.TRUE);
-    testMatrix.put("1 TB", Boolean.TRUE);
-    testMatrix.put("0MB", Boolean.TRUE);
-    testMatrix.put("0 TB", Boolean.TRUE);
-    testMatrix.put("    1000MB   ", Boolean.TRUE);
-
-    testMatrix.put("    1000MBMB   ", Boolean.FALSE);
-    testMatrix.put("    1000MB00   ", Boolean.FALSE);
-    testMatrix.put("1000ZMB", Boolean.FALSE);
-    testMatrix.put("MB1000", Boolean.FALSE);
-    testMatrix.put("9999", Boolean.FALSE);
-    testMatrix.put("1", Boolean.FALSE);
-    testMatrix.put("remove", Boolean.FALSE);
-    testMatrix.put("1UNDEFINED", Boolean.FALSE);
-    testMatrix.put(null, Boolean.FALSE);
-    testMatrix.put("", Boolean.FALSE);
-    testMatrix.put("-1000MB", Boolean.FALSE);
-    testMatrix.put("1024 bytes", Boolean.TRUE);
-    testMatrix.put("1bytes", Boolean.TRUE);
-    testMatrix.put("0bytes", Boolean.TRUE);
-    testMatrix.put("10000 BYTES", Boolean.TRUE);
-    testMatrix.put("BYTESbytes", Boolean.FALSE);
-    testMatrix.put("bytes", Boolean.FALSE);
-
-    Set<String> keys = testMatrix.keySet();
-    for (String key : keys) {
-      if (testMatrix.get(key)) {
-        OzoneQuota.parseQuota(key);
-      } else {
-        try {
-          OzoneQuota.parseQuota(key);
-          // should never get here since the isValid call will throw
-          fail(key);
-          fail("An exception was expected but did not happen.");
-        } catch (IllegalArgumentException e) {
-
-        }
-      }
-    }
-  }
-
-  @Test
-  public void testVerifyQuota() {
-    OzoneQuota qt = OzoneQuota.parseQuota("10TB");
-    assertEquals(10, qt.getSize());
-    assertEquals(OzoneQuota.Units.TB, qt.getUnit());
-    assertEquals(10L * (1024L * 1024L * 1024L * 1024L), qt.sizeInBytes());
-
-    qt = OzoneQuota.parseQuota("10MB");
-    assertEquals(10, qt.getSize());
-    assertEquals(OzoneQuota.Units.MB, qt.getUnit());
-    assertEquals(10L * (1024L * 1024L), qt.sizeInBytes());
-
-    qt = OzoneQuota.parseQuota("10GB");
-    assertEquals(10, qt.getSize());
-    assertEquals(OzoneQuota.Units.GB, qt.getUnit());
-    assertEquals(10L * (1024L * 1024L * 1024L), qt.sizeInBytes());
-
-    qt = OzoneQuota.parseQuota("10BYTES");
-    assertEquals(10, qt.getSize());
-    assertEquals(OzoneQuota.Units.BYTES, qt.getUnit());
-    assertEquals(10L, qt.sizeInBytes());
-
-    OzoneQuota emptyQuota = new OzoneQuota();
-    assertEquals(-1L, emptyQuota.sizeInBytes());
-    assertEquals(0, emptyQuota.getSize());
-    assertEquals(OzoneQuota.Units.UNDEFINED, emptyQuota.getUnit());
-  }
-
-  @Test
-  public void testVerifyRemove() {
-    assertTrue(OzoneQuota.isRemove("remove"));
-    assertFalse(OzoneQuota.isRemove("not remove"));
-    assertFalse(OzoneQuota.isRemove(null));
-  }
-}

+ 0 - 101
hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/web/TestUtils.java

@@ -1,101 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.web;
-
-import org.junit.Test;
-
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Set;
-
-import static org.apache.hadoop.ozone.web.utils.OzoneUtils.getRequestID;
-import static org.apache.hadoop.ozone.web.utils.OzoneUtils.verifyResourceName;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-/**
- * Test Ozone Utility operations like verifying resource name.
- */
-public class TestUtils {
-
-  /**
-   * Tests if the bucket name handling is correct.
-   */
-  @Test
-  public  void testValidBucketNames() {
-    HashMap<String, Boolean> testMatrix;
-    // Init the Table with Strings and Expected Return values
-    testMatrix = new HashMap<String, Boolean>();
-
-    testMatrix.put("bucket-.ozone.self", Boolean.FALSE);
-    testMatrix.put("bucket.-ozone.self", Boolean.FALSE);
-    testMatrix.put(".bucket.ozone.self", Boolean.FALSE);
-    testMatrix.put("bucket.ozone.self.", Boolean.FALSE);
-    testMatrix.put("bucket..ozone.self", Boolean.FALSE);
-    testMatrix.put("192.1.1.1", Boolean.FALSE);
-    testMatrix.put("ab", Boolean.FALSE);
-    testMatrix.put("bucket.ozone.self.this.is.a.really.long.name.that."
-        + "is.more.than.sixty.three.characters.long.for.sure", Boolean.FALSE);
-    testMatrix.put(null, Boolean.FALSE);
-    testMatrix.put("bucket@$", Boolean.FALSE);
-    testMatrix.put("BUCKET", Boolean.FALSE);
-    testMatrix.put("bucket .ozone.self", Boolean.FALSE);
-    testMatrix.put("       bucket.ozone.self", Boolean.FALSE);
-    testMatrix.put("bucket.ozone.self-", Boolean.FALSE);
-    testMatrix.put("-bucket.ozone.self", Boolean.FALSE);
-
-    testMatrix.put("bucket", Boolean.TRUE);
-    testMatrix.put("bucket.ozone.self", Boolean.TRUE);
-    testMatrix.put("bucket.ozone.self", Boolean.TRUE);
-    testMatrix.put("bucket-name.ozone.self", Boolean.TRUE);
-    testMatrix.put("bucket.1.ozone.self", Boolean.TRUE);
-
-    Set<String> keys = testMatrix.keySet();
-    for (String key : keys) {
-      if (testMatrix.get(key)) {
-
-        // For valid names there should be no exceptions at all
-        verifyResourceName(key);
-      } else {
-        try {
-          verifyResourceName(key);
-          // should never get here since the isValid call will throw
-          fail("An exception was expected but did not happen.");
-        } catch (IllegalArgumentException e) {
-
-        }
-      }
-    }
-  }
-
-  /**
-   *  Just calls Request ID many times and assert we
-   *  got different values, ideally this should be
-   *  run under parallel threads. Since the function under
-   *  test has no external dependencies it is assumed
-   *  that this test is good enough.
-   */
-  @Test
-  public void testRequestIDisRandom() {
-    HashSet<String> set = new HashSet<>();
-    for (int i = 0; i < 1000; i++) {
-      assertTrue(set.add(getRequestID()));
-    }
-  }
-}

+ 0 - 73
hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/web/TestVolumeStructs.java

@@ -1,73 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-
-package org.apache.hadoop.ozone.web;
-
-import org.apache.hadoop.ozone.web.response.ListVolumes;
-import org.apache.hadoop.ozone.web.response.VolumeInfo;
-import org.apache.hadoop.ozone.web.response.VolumeOwner;
-import org.junit.Test;
-
-import java.io.IOException;
-
-import static org.junit.Assert.assertEquals;
-
-/**
- * Test Ozone Volume info structure.
- */
-public class TestVolumeStructs {
-
-  @Test
-  public void testVolumeInfoParse() throws IOException {
-    VolumeInfo volInfo =
-        new VolumeInfo("testvol", "Thu, Apr 9, 2015 10:23:45 GMT", "gandalf");
-    VolumeOwner owner = new VolumeOwner("bilbo");
-    volInfo.setOwner(owner);
-    String jString = volInfo.toJsonString();
-    VolumeInfo newVollInfo = VolumeInfo.parse(jString);
-    String one = volInfo.toJsonString();
-    String two = newVollInfo.toJsonString();
-
-    assertEquals(volInfo.toJsonString(), newVollInfo.toJsonString());
-  }
-
-  @Test
-  public void testVolumeInfoValue() throws IOException {
-    String createdOn = "Thu, Apr 9, 2015 10:23:45 GMT";
-    String createdBy = "gandalf";
-    VolumeInfo volInfo = new VolumeInfo("testvol", createdOn, createdBy);
-    assertEquals(volInfo.getCreatedBy(), createdBy);
-    assertEquals(volInfo.getCreatedOn(), createdOn);
-  }
-
-
-  @Test
-  public void testVolumeListParse() throws IOException {
-    ListVolumes list = new ListVolumes();
-    for (int x = 0; x < 100; x++) {
-      VolumeInfo volInfo = new VolumeInfo("testvol" + Integer.toString(x),
-          "Thu, Apr 9, 2015 10:23:45 GMT", "gandalf");
-      list.addVolume(volInfo);
-    }
-    list.sort();
-    String listString = list.toJsonString();
-    ListVolumes newList = ListVolumes.parse(listString);
-    assertEquals(list.toJsonString(), newList.toJsonString());
-  }
-}

+ 0 - 4
hadoop-ozone/datanode/pom.xml

@@ -47,10 +47,6 @@
       <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-hdds-container-service</artifactId>
     </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-ozone-objectstore-service</artifactId>
-    </dependency>
     <dependency>
       <groupId>com.sun.xml.bind</groupId>
       <artifactId>jaxb-impl</artifactId>

+ 0 - 4
hadoop-ozone/dist/dev-support/bin/dist-layout-stitching

@@ -112,10 +112,6 @@ run cp "${ROOT}/hadoop-ozone/common/src/main/bin/stop-ozone.sh" "sbin/"
 # fault injection tests
 run cp  -r "${ROOT}/hadoop-ozone/fault-injection-test/network-tests/src/test/blockade" tests
 
-#shaded datanode service
-run mkdir -p "./share/hadoop/ozoneplugin"
-run cp "${ROOT}/hadoop-ozone/objectstore-service/target/hadoop-ozone-objectstore-service-${HDDS_VERSION}-plugin.jar" "./share/hadoop/ozoneplugin/hadoop-ozone-datanode-plugin-${HDDS_VERSION}.jar"
-
 # Optional documentation, could be missing
 cp -r "${ROOT}/hadoop-hdds/docs/target/classes/docs" ./
 

+ 25 - 18
hadoop-ozone/dist/src/main/smoketest/basic/ozone-shell.robot

@@ -19,27 +19,34 @@ Library             OperatingSystem
 Resource            ../commonlib.robot
 Test Setup          Run Keyword if    '${SECURITY_ENABLED}' == 'true'    Kinit test user     testuser     testuser.keytab
 Test Timeout        2 minute
+Suite Setup         Generate prefix
 
 *** Variables ***
+${prefix}    generated
+
+*** Keywords ***
+Generate prefix
+   ${random} =         Generate Random String  5  [NUMBERS]
+   Set Suite Variable  ${prefix}  ${random}
 
 *** Test Cases ***
 RpcClient with port
-   Test ozone shell       o3://            om:9862     rpcwoport
+   Test ozone shell       o3://            om:9862     ${prefix}-rpcwoport
 
 RpcClient volume acls
-   Test Volume Acls       o3://            om:9862     rpcwoport2
+   Test Volume Acls       o3://            om:9862     ${prefix}-rpcwoport2
 
 RpcClient bucket acls
-    Test Bucket Acls      o3://            om:9862     rpcwoport2
+    Test Bucket Acls      o3://            om:9862     ${prefix}-rpcwoport2
 
 RpcClient key acls
-    Test Key Acls         o3://            om:9862     rpcwoport2
+    Test Key Acls         o3://            om:9862     ${prefix}-rpcwoport2
 
 RpcClient without host
-    Test ozone shell      o3://            ${EMPTY}    rpcwport
+    Test ozone shell      o3://            ${EMPTY}    ${prefix}-rpcwport
 
 RpcClient without scheme
-    Test ozone shell      ${EMPTY}         ${EMPTY}    rpcwoscheme
+    Test ozone shell      ${EMPTY}         ${EMPTY}    ${prefix}-rpcwoscheme
 
 
 *** Keywords ***
@@ -48,20 +55,20 @@ Test ozone shell
     ${result} =     Execute             ozone sh volume create ${protocol}${server}/${volume} --quota 100TB
                     Should not contain  ${result}       Failed
                     Should contain      ${result}       Creating Volume: ${volume}
-    ${result} =     Execute             ozone sh volume list ${protocol}${server}/ | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.volumeName=="${volume}")'
-                    Should contain      ${result}       createdOn
-    ${result} =     Execute             ozone sh volume list | grep -Ev 'Removed|DEBUG|ERROR|INFO|TRACE|WARN' | jq -r '.[] | select(.volumeName=="${volume}")'
-                    Should contain      ${result}       createdOn
+    ${result} =     Execute             ozone sh volume list ${protocol}${server}/ | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.name=="${volume}")'
+                    Should contain      ${result}       creationTime
+    ${result} =     Execute             ozone sh volume list | grep -Ev 'Removed|DEBUG|ERROR|INFO|TRACE|WARN' | jq -r '. | select(.name=="${volume}")'
+                    Should contain      ${result}       creationTime
 # TODO: Disable updating the owner, acls should be used to give access to other user.        
                     Execute             ozone sh volume update ${protocol}${server}/${volume} --quota 10TB
 #    ${result} =     Execute             ozone sh volume info ${protocol}${server}/${volume} | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="${volume}") | .owner | .name'
 #                    Should Be Equal     ${result}       bill
-    ${result} =     Execute             ozone sh volume info ${protocol}${server}/${volume} | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.volumeName=="${volume}") | .quota | .size'
-                    Should Be Equal     ${result}       10
+    ${result} =     Execute             ozone sh volume info ${protocol}${server}/${volume} | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.name=="${volume}") | .quota'
+                    Should Be Equal     ${result}       10995116277760
                     Execute             ozone sh bucket create ${protocol}${server}/${volume}/bb1
-    ${result} =     Execute             ozone sh bucket info ${protocol}${server}/${volume}/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.bucketName=="bb1") | .storageType'
+    ${result} =     Execute             ozone sh bucket info ${protocol}${server}/${volume}/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.name=="bb1") | .storageType'
                     Should Be Equal     ${result}       DISK
-    ${result} =     Execute             ozone sh bucket list ${protocol}${server}/${volume}/ | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.bucketName=="bb1") | .volumeName'
+    ${result} =     Execute             ozone sh bucket list ${protocol}${server}/${volume}/ | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.name=="bb1") | .volumeName'
                     Should Be Equal     ${result}       ${volume}
                     Run Keyword         Test key handling       ${protocol}       ${server}       ${volume}
                     Execute             ozone sh bucket delete ${protocol}${server}/${volume}/bb1
@@ -106,12 +113,12 @@ Test key handling
                     Execute             rm -f NOTICE.txt.1
                     Execute             ozone sh key get ${protocol}${server}/${volume}/bb1/key1 NOTICE.txt.1
                     Execute             ls -l NOTICE.txt.1
-    ${result} =     Execute             ozone sh key info ${protocol}${server}/${volume}/bb1/key1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.keyName=="key1")'
-                    Should contain      ${result}       createdOn
-    ${result} =     Execute             ozone sh key list ${protocol}${server}/${volume}/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[] | select(.keyName=="key1") | .keyName'
+    ${result} =     Execute             ozone sh key info ${protocol}${server}/${volume}/bb1/key1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.name=="key1")'
+                    Should contain      ${result}       creationTime
+    ${result} =     Execute             ozone sh key list ${protocol}${server}/${volume}/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. | select(.name=="key1") | .name'
                     Should Be Equal     ${result}       key1
                     Execute             ozone sh key rename ${protocol}${server}/${volume}/bb1 key1 key2
-    ${result} =     Execute             ozone sh key list ${protocol}${server}/${volume}/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.[].keyName'
+    ${result} =     Execute             ozone sh key list ${protocol}${server}/${volume}/bb1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '.name'
                     Should Be Equal     ${result}       key2
                     Execute             ozone sh key delete ${protocol}${server}/${volume}/bb1/key2
 

+ 9 - 9
hadoop-ozone/dist/src/main/smoketest/ozonefs/ozonefs.robot

@@ -34,14 +34,14 @@ Check volume from ozonefs
 
 Run ozoneFS tests
                         Execute               ozone fs -mkdir -p o3fs://bucket1.fstest/testdir/deep
-    ${result} =         Execute               ozone sh key list o3://om/fstest/bucket1 | grep -v WARN | jq -r '.[].keyName'
+    ${result} =         Execute               ozone sh key list o3://om/fstest/bucket1 | grep -v WARN | jq -r '.name'
                         Should contain    ${result}         testdir/deep
                         Execute               ozone fs -copyFromLocal NOTICE.txt o3fs://bucket1.fstest/testdir/deep/
-    ${result} =         Execute               ozone sh key list o3://om/fstest/bucket1 | grep -v WARN | jq -r '.[].keyName'
+    ${result} =         Execute               ozone sh key list o3://om/fstest/bucket1 | grep -v WARN | jq -r '.name'
                         Should contain    ${result}         NOTICE.txt
 
                         Execute               ozone fs -put NOTICE.txt o3fs://bucket1.fstest/testdir/deep/PUTFILE.txt
-    ${result} =         Execute               ozone sh key list o3://om/fstest/bucket1 | grep -v WARN | jq -r '.[].keyName'
+    ${result} =         Execute               ozone sh key list o3://om/fstest/bucket1 | grep -v WARN | jq -r '.name'
                         Should contain    ${result}         PUTFILE.txt
 
     ${result} =         Execute               ozone fs -ls o3fs://bucket1.fstest/testdir/deep/
@@ -49,13 +49,13 @@ Run ozoneFS tests
                         Should contain    ${result}         PUTFILE.txt
 
                         Execute               ozone fs -mv o3fs://bucket1.fstest/testdir/deep/NOTICE.txt o3fs://bucket1.fstest/testdir/deep/MOVED.TXT
-    ${result} =         Execute               ozone sh key list o3://om/fstest/bucket1 | grep -v WARN | jq -r '.[].keyName'
+    ${result} =         Execute               ozone sh key list o3://om/fstest/bucket1 | grep -v WARN | jq -r '.name'
                         Should contain    ${result}         MOVED.TXT
                         Should not contain  ${result}       NOTICE.txt
 
                         Execute               ozone fs -mkdir -p o3fs://bucket1.fstest/testdir/deep/subdir1
                         Execute               ozone fs -cp o3fs://bucket1.fstest/testdir/deep/MOVED.TXT o3fs://bucket1.fstest/testdir/deep/subdir1/NOTICE.txt
-    ${result} =         Execute               ozone sh key list o3://om/fstest/bucket1 | grep -v WARN | jq -r '.[].keyName'
+    ${result} =         Execute               ozone sh key list o3://om/fstest/bucket1 | grep -v WARN | jq -r '.name'
                         Should contain    ${result}         subdir1/NOTICE.txt
 
     ${result} =         Execute               ozone fs -ls o3fs://bucket1.fstest/testdir/deep/subdir1/
@@ -65,19 +65,19 @@ Run ozoneFS tests
                         Should not contain  ${result}       Failed
 
                         Execute               ozone fs -rm o3fs://bucket1.fstest/testdir/deep/subdir1/NOTICE.txt
-    ${result} =         Execute               ozone sh key list o3://om/fstest/bucket1 | grep -v WARN | jq -r '.[].keyName'
+    ${result} =         Execute               ozone sh key list o3://om/fstest/bucket1 | grep -v WARN | jq -r '.name'
                         Should not contain  ${result}       NOTICE.txt
 
     ${result} =         Execute               ozone fs -rmdir o3fs://bucket1.fstest/testdir/deep/subdir1/
-    ${result} =         Execute               ozone sh key list o3://om/fstest/bucket1 | grep -v WARN | jq -r '.[].keyName'
+    ${result} =         Execute               ozone sh key list o3://om/fstest/bucket1 | grep -v WARN | jq -r '.name'
                         Should not contain  ${result}       subdir1
 
                         Execute               ozone fs -touch o3fs://bucket1.fstest/testdir/TOUCHFILE.txt
-    ${result} =         Execute               ozone sh key list o3://om/fstest/bucket1 | grep -v WARN | jq -r '.[].keyName'
+    ${result} =         Execute               ozone sh key list o3://om/fstest/bucket1 | grep -v WARN | jq -r '.name'
                         Should contain  ${result}       TOUCHFILE.txt
 
                         Execute               ozone fs -rm -r o3fs://bucket1.fstest/testdir/
-    ${result} =         Execute               ozone sh key list o3://om/fstest/bucket1 | grep -v WARN | jq -r '.[].keyName'
+    ${result} =         Execute               ozone sh key list o3://om/fstest/bucket1 | grep -v WARN | jq -r '.name'
                         Should not contain  ${result}       testdir
 
                         Execute               rm -Rf localdir1

+ 0 - 4
hadoop-ozone/integration-test/pom.xml

@@ -48,10 +48,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
       <scope>test</scope>
     </dependency>
 
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-ozone-objectstore-service</artifactId>
-    </dependency>
     <dependency>
       <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-ozone-s3gateway</artifactId>

+ 0 - 9
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java

@@ -137,15 +137,6 @@ public interface MiniOzoneCluster {
    */
   OzoneClient getRpcClient() throws IOException;
 
-  /**
-   * Returns an REST based {@link OzoneClient} to access the
-   * {@link MiniOzoneCluster}.
-   *
-   * @return {@link OzoneClient}
-   * @throws IOException
-   */
-  OzoneClient getRestClient() throws IOException;
-
   /**
    * Returns StorageContainerLocationClient to communicate with
    * {@link StorageContainerManager} associated with the MiniOzoneCluster.

+ 0 - 16
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java

@@ -36,7 +36,6 @@ import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.ozone.client.OzoneClient;
 import org.apache.hadoop.ozone.client.OzoneClientFactory;
-import org.apache.hadoop.ozone.client.rest.OzoneException;
 import org.apache.hadoop.ozone.common.Storage.StorageState;
 import org.apache.hadoop.ozone.container.common.utils.ContainerCache;
 import org.apache.hadoop.ozone.om.OMConfigKeys;
@@ -67,7 +66,6 @@ import java.util.concurrent.TimeoutException;
 import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL;
 import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState
     .HEALTHY;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_DATANODE_PLUGINS_KEY;
 import static org.apache.hadoop.ozone.OzoneConfigKeys
     .DFS_CONTAINER_IPC_PORT;
 import static org.apache.hadoop.ozone.OzoneConfigKeys
@@ -212,18 +210,6 @@ public class MiniOzoneClusterImpl implements MiniOzoneCluster {
     return OzoneClientFactory.getRpcClient(conf);
   }
 
-  /**
-   * Creates an {@link OzoneClient} connected to this cluster's REST
-   * service. Callers take ownership of the client and must close it when done.
-   *
-   * @return OzoneRestClient connected to this cluster's REST service
-   * @throws OzoneException if Ozone encounters an error creating the client
-   */
-  @Override
-  public OzoneClient getRestClient() throws IOException {
-    return OzoneClientFactory.getRestClient(conf);
-  }
-
   /**
    * Returns an RPC proxy connected to this cluster's StorageContainerManager
    * for accessing container location information.  Callers take ownership of
@@ -619,8 +605,6 @@ public class MiniOzoneClusterImpl implements MiniOzoneCluster {
     private void configureHddsDatanodes() {
       conf.set(ScmConfigKeys.HDDS_REST_HTTP_ADDRESS_KEY, "0.0.0.0:0");
       conf.set(HddsConfigKeys.HDDS_DATANODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
-      conf.set(HDDS_DATANODE_PLUGINS_KEY,
-          "org.apache.hadoop.ozone.web.OzoneHddsDatanodeService");
       conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT,
           randomContainerPort);
       conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT,

+ 12 - 14
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java

@@ -18,15 +18,23 @@
 
 package org.apache.hadoop.ozone;
 
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.ratis.RatisHelper;
 import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
 import org.apache.hadoop.ozone.client.rpc.RpcClient;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.ozone.client.rest.OzoneException;
-import org.apache.hadoop.hdds.ratis.RatisHelper;
+
+import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL;
+import static org.apache.hadoop.hdds.ratis.RatisHelper.newRaftClient;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
 import org.apache.ratis.client.RaftClient;
 import org.apache.ratis.protocol.RaftPeer;
 import org.apache.ratis.rpc.RpcType;
@@ -35,16 +43,6 @@ import org.apache.ratis.util.TimeDuration;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.io.Closeable;
-import java.io.IOException;
-import java.net.URISyntaxException;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-
-import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
-import static org.apache.hadoop.hdds.ratis.RatisHelper.newRaftClient;
-
 /**
  * Helpers for Ratis tests.
  */
@@ -80,7 +78,7 @@ public interface RatisTestHelper {
     }
 
     public ClientProtocol newOzoneClient()
-        throws OzoneException, URISyntaxException, IOException {
+        throws IOException {
       return new RpcClient(conf);
     }
 

+ 95 - 0
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDataUtil.java

@@ -0,0 +1,95 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+package org.apache.hadoop.ozone;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.util.HashMap;
+import java.util.Scanner;
+
+import org.apache.hadoop.hdds.client.ReplicationFactor;
+import org.apache.hadoop.hdds.client.ReplicationType;
+import org.apache.hadoop.hdds.protocol.StorageType;
+import org.apache.hadoop.ozone.client.BucketArgs;
+import org.apache.hadoop.ozone.client.ObjectStore;
+import org.apache.hadoop.ozone.client.OzoneBucket;
+import org.apache.hadoop.ozone.client.OzoneClient;
+import org.apache.hadoop.ozone.client.OzoneVolume;
+import org.apache.hadoop.ozone.client.VolumeArgs;
+
+import org.apache.commons.lang3.RandomStringUtils;
+
+/**
+ * Utility to help to generate test data.
+ */
+public final class TestDataUtil {
+
+  private TestDataUtil() {
+  }
+
+  public static OzoneBucket createVolumeAndBucket(MiniOzoneCluster cluster,
+      String volumeName, String bucketName) throws IOException {
+    String userName = "user" + RandomStringUtils.randomNumeric(5);
+    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
+
+    OzoneClient client = cluster.getClient();
+
+    VolumeArgs volumeArgs = VolumeArgs.newBuilder()
+        .setAdmin(adminName)
+        .setOwner(userName)
+        .build();
+
+    ObjectStore objectStore = client.getObjectStore();
+
+    objectStore.createVolume(volumeName, volumeArgs);
+
+    OzoneVolume volume = objectStore.getVolume(volumeName);
+
+    BucketArgs omBucketArgs = BucketArgs.newBuilder()
+        .setStorageType(StorageType.DISK)
+        .build();
+
+    volume.createBucket(bucketName, omBucketArgs);
+    return volume.getBucket(bucketName);
+
+  }
+
+  public static void createKey(OzoneBucket bucket, String keyName,
+      String content) throws IOException {
+    try (OutputStream stream = bucket
+        .createKey(keyName, content.length(), ReplicationType.STAND_ALONE,
+            ReplicationFactor.ONE, new HashMap<>())) {
+      stream.write(content.getBytes());
+    }
+  }
+
+  public static String getKey(OzoneBucket bucket, String keyName)
+      throws IOException {
+    try (InputStream stream = bucket.readKey(keyName)) {
+      return new Scanner(stream).useDelimiter("\\A").next();
+    }
+  }
+
+  public static OzoneBucket createVolumeAndBucket(MiniOzoneCluster cluster)
+      throws IOException {
+    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
+    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
+    return createVolumeAndBucket(cluster, volumeName, bucketName);
+  }
+}

+ 0 - 413
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneHelper.java

@@ -1,413 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone;
-
-import org.apache.hadoop.ozone.web.exceptions.ErrorTable;
-import org.apache.hadoop.ozone.client.rest.headers.Header;
-import org.apache.hadoop.ozone.web.utils.OzoneUtils;
-import org.apache.hadoop.util.Time;
-import org.apache.http.HttpResponse;
-import org.apache.http.client.methods.HttpGet;
-import org.apache.http.client.methods.HttpPost;
-import org.apache.http.impl.client.CloseableHttpClient;
-import org.apache.http.impl.client.HttpClients;
-
-import javax.ws.rs.core.HttpHeaders;
-import java.io.IOException;
-import java.text.SimpleDateFormat;
-import java.util.Date;
-import java.util.Locale;
-
-import static java.net.HttpURLConnection.HTTP_CREATED;
-import static java.net.HttpURLConnection.HTTP_OK;
-import static org.junit.Assert.assertEquals;
-
-/**
- * Helper functions to test Ozone.
- */
-public class TestOzoneHelper {
-
-  public CloseableHttpClient createHttpClient() {
-    return HttpClients.createDefault();
-  }
-  /**
-   * Creates Volumes on Ozone Store.
-   *
-   * @throws IOException
-   */
-  public void testCreateVolumes(int port) throws IOException {
-    SimpleDateFormat format =
-        new SimpleDateFormat("EEE, dd MMM yyyy HH:mm:ss ZZZ", Locale.US);
-    CloseableHttpClient client = createHttpClient();
-    String volumeName = OzoneUtils.getRequestID().toLowerCase();
-    try {
-      HttpPost httppost = new HttpPost(
-          String.format("http://localhost:%d/%s", port, volumeName));
-
-      httppost.addHeader(Header.OZONE_VERSION_HEADER,
-          Header.OZONE_V1_VERSION_HEADER);
-      httppost.addHeader(HttpHeaders.DATE,
-          format.format(new Date(Time.monotonicNow())));
-      httppost.addHeader(HttpHeaders.AUTHORIZATION,
-          Header.OZONE_SIMPLE_AUTHENTICATION_SCHEME + " " +
-              OzoneConsts.OZONE_SIMPLE_HDFS_USER);
-      httppost.addHeader(Header.OZONE_USER, OzoneConsts.OZONE_SIMPLE_HDFS_USER);
-
-      HttpResponse response = client.execute(httppost);
-      assertEquals(response.toString(), HTTP_CREATED,
-          response.getStatusLine().getStatusCode());
-    } finally {
-      client.close();
-    }
-  }
-
-  /**
-   * Create Volumes with Quota.
-   *
-   * @throws IOException
-   */
-  public void testCreateVolumesWithQuota(int port) throws IOException {
-    SimpleDateFormat format =
-        new SimpleDateFormat("EEE, dd MMM yyyy HH:mm:ss ZZZ", Locale.US);
-    CloseableHttpClient client = createHttpClient();
-    String volumeName = OzoneUtils.getRequestID().toLowerCase();
-    try {
-      HttpPost httppost = new HttpPost(
-          String.format("http://localhost:%d/%s?quota=10TB", port, volumeName));
-
-      httppost.addHeader(Header.OZONE_VERSION_HEADER,
-          Header.OZONE_V1_VERSION_HEADER);
-      httppost.addHeader(HttpHeaders.DATE,
-          format.format(new Date(Time.monotonicNow())));
-      httppost.addHeader(HttpHeaders.AUTHORIZATION,
-          Header.OZONE_SIMPLE_AUTHENTICATION_SCHEME + " " +
-              OzoneConsts.OZONE_SIMPLE_HDFS_USER);
-      httppost.addHeader(Header.OZONE_USER, OzoneConsts.OZONE_SIMPLE_HDFS_USER);
-
-      HttpResponse response = client.execute(httppost);
-      assertEquals(response.toString(), HTTP_CREATED,
-          response.getStatusLine().getStatusCode());
-    } finally {
-      client.close();
-    }
-  }
-
-  /**
-   * Create Volumes with Invalid Quota.
-   *
-   * @throws IOException
-   */
-  public void testCreateVolumesWithInvalidQuota(int port) throws IOException {
-    SimpleDateFormat format =
-        new SimpleDateFormat("EEE, dd MMM yyyy HH:mm:ss ZZZ", Locale.US);
-    CloseableHttpClient client = createHttpClient();
-    String volumeName = OzoneUtils.getRequestID().toLowerCase();
-    try {
-      HttpPost httppost = new HttpPost(
-          String.format("http://localhost:%d/%s?quota=NaN", port, volumeName));
-
-      httppost.addHeader(Header.OZONE_VERSION_HEADER,
-          Header.OZONE_V1_VERSION_HEADER);
-      httppost.addHeader(HttpHeaders.DATE,
-          format.format(new Date(Time.monotonicNow())));
-      httppost.addHeader(HttpHeaders.AUTHORIZATION,
-          Header.OZONE_SIMPLE_AUTHENTICATION_SCHEME + " " +
-              OzoneConsts.OZONE_SIMPLE_HDFS_USER);
-      httppost.addHeader(Header.OZONE_USER, OzoneConsts.OZONE_SIMPLE_HDFS_USER);
-
-      HttpResponse response = client.execute(httppost);
-      assertEquals(response.toString(), ErrorTable.MALFORMED_QUOTA
-              .getHttpCode(),
-          response.getStatusLine().getStatusCode());
-    } finally {
-      client.close();
-    }
-  }
-
-  /**
-   * To create a volume a user name must be specified using OZONE_USER header.
-   * This test verifies that we get an error in case we call without a OZONE
-   * user name.
-   *
-   * @throws IOException
-   */
-  public void testCreateVolumesWithInvalidUser(int port) throws IOException {
-    SimpleDateFormat format =
-        new SimpleDateFormat("EEE, dd MMM yyyy HH:mm:ss ZZZ", Locale.US);
-    CloseableHttpClient client = createHttpClient();
-    String volumeName = OzoneUtils.getRequestID().toLowerCase();
-    try {
-      HttpPost httppost = new HttpPost(
-          String.format("http://localhost:%d/%s?quota=1TB", port, volumeName));
-
-      httppost.addHeader(Header.OZONE_VERSION_HEADER,
-          Header.OZONE_V1_VERSION_HEADER);
-      httppost.addHeader(HttpHeaders.DATE,
-          format.format(new Date(Time.monotonicNow())));
-      httppost.addHeader(HttpHeaders.AUTHORIZATION,
-          Header.OZONE_SIMPLE_AUTHENTICATION_SCHEME + " " +
-              OzoneConsts.OZONE_SIMPLE_HDFS_USER);
-
-      HttpResponse response = client.execute(httppost);
-
-      assertEquals(response.toString(), ErrorTable.USER_NOT_FOUND.getHttpCode(),
-          response.getStatusLine().getStatusCode());
-    } finally {
-      client.close();
-    }
-  }
-
-  /**
-   * Only Admins can create volumes in Ozone. This test uses simple userauth as
-   * backend and hdfs and root are admin users in the simple backend.
-   * <p>
-   * This test tries to create a volume as user bilbo.
-   *
-   * @throws IOException
-   */
-  public void testCreateVolumesWithOutAdminRights(int port) throws IOException {
-    SimpleDateFormat format =
-        new SimpleDateFormat("EEE, dd MMM yyyy HH:mm:ss ZZZ", Locale.US);
-    CloseableHttpClient client = createHttpClient();
-    String volumeName = OzoneUtils.getRequestID().toLowerCase();
-    try {
-      HttpPost httppost = new HttpPost(
-          String.format("http://localhost:%d/%s?quota=NaN", port, volumeName));
-
-      httppost.addHeader(Header.OZONE_VERSION_HEADER,
-          Header.OZONE_V1_VERSION_HEADER);
-      httppost.addHeader(HttpHeaders.DATE,
-          format.format(new Date(Time.monotonicNow())));
-      httppost.addHeader(HttpHeaders.AUTHORIZATION,
-          Header.OZONE_SIMPLE_AUTHENTICATION_SCHEME + " " +
-              "bilbo"); // This is not a root user in Simple Auth
-      httppost.addHeader(Header.OZONE_USER, OzoneConsts.OZONE_SIMPLE_HDFS_USER);
-
-      HttpResponse response = client.execute(httppost);
-      assertEquals(response.toString(), ErrorTable.ACCESS_DENIED.getHttpCode(),
-          response.getStatusLine().getStatusCode());
-    } finally {
-      client.close();
-    }
-  }
-
-  /**
-   * Create a bunch of volumes in a loop.
-   *
-   * @throws IOException
-   */
-  public void testCreateVolumesInLoop(int port) throws IOException {
-    SimpleDateFormat format =
-        new SimpleDateFormat("EEE, dd MMM yyyy HH:mm:ss ZZZ", Locale.US);
-
-    for (int x = 0; x < 1000; x++) {
-      CloseableHttpClient client = createHttpClient();
-      String volumeName = OzoneUtils.getRequestID().toLowerCase();
-      String userName = OzoneUtils.getRequestID().toLowerCase();
-
-      HttpPost httppost = new HttpPost(
-          String.format("http://localhost:%d/%s?quota=10TB", port, volumeName));
-
-      httppost.addHeader(Header.OZONE_VERSION_HEADER,
-          Header.OZONE_V1_VERSION_HEADER);
-      httppost.addHeader(HttpHeaders.DATE,
-          format.format(new Date(Time.monotonicNow())));
-      httppost.addHeader(HttpHeaders.AUTHORIZATION,
-          Header.OZONE_SIMPLE_AUTHENTICATION_SCHEME + " " +
-              OzoneConsts.OZONE_SIMPLE_HDFS_USER);
-      httppost.addHeader(Header.OZONE_USER, userName);
-
-      HttpResponse response = client.execute(httppost);
-      assertEquals(response.toString(), HTTP_CREATED,
-          response.getStatusLine().getStatusCode());
-      client.close();
-    }
-  }
-  /**
-   * Get volumes owned by the user.
-   *
-   * @throws IOException
-   */
-  public void testGetVolumesByUser(int port) throws IOException {
-    SimpleDateFormat format =
-        new SimpleDateFormat("EEE, dd MMM yyyy HH:mm:ss ZZZ", Locale.US);
-    // We need to create a volume for this test to succeed.
-    testCreateVolumes(port);
-    CloseableHttpClient client = createHttpClient();
-    try {
-      HttpGet httpget =
-          new HttpGet(String.format("http://localhost:%d/", port));
-
-      httpget.addHeader(Header.OZONE_VERSION_HEADER,
-          Header.OZONE_V1_VERSION_HEADER);
-
-      httpget.addHeader(HttpHeaders.DATE,
-          format.format(new Date(Time.monotonicNow())));
-
-      httpget.addHeader(HttpHeaders.AUTHORIZATION,
-          Header.OZONE_SIMPLE_AUTHENTICATION_SCHEME + " " +
-              OzoneConsts.OZONE_SIMPLE_HDFS_USER);
-
-      httpget.addHeader(Header.OZONE_USER,
-          OzoneConsts.OZONE_SIMPLE_HDFS_USER);
-
-      HttpResponse response = client.execute(httpget);
-      assertEquals(response.toString(), HTTP_OK,
-          response.getStatusLine().getStatusCode());
-
-    } finally {
-      client.close();
-    }
-  }
-
-  /**
-   * Admins can read volumes belonging to other users.
-   *
-   * @throws IOException
-   */
-  public void testGetVolumesOfAnotherUser(int port) throws IOException {
-    SimpleDateFormat format =
-        new SimpleDateFormat("EEE, dd MMM yyyy HH:mm:ss ZZZ", Locale.US);
-
-    CloseableHttpClient client = createHttpClient();
-    try {
-      HttpGet httpget =
-          new HttpGet(String.format("http://localhost:%d/", port));
-
-      httpget.addHeader(Header.OZONE_VERSION_HEADER,
-          Header.OZONE_V1_VERSION_HEADER);
-      httpget.addHeader(HttpHeaders.DATE,
-          format.format(new Date(Time.monotonicNow())));
-
-      httpget.addHeader(HttpHeaders.AUTHORIZATION,
-          Header.OZONE_SIMPLE_AUTHENTICATION_SCHEME + " " +
-              OzoneConsts.OZONE_SIMPLE_ROOT_USER);
-
-      // User Root is getting volumes belonging to user HDFS
-      httpget.addHeader(Header.OZONE_USER, OzoneConsts.OZONE_SIMPLE_HDFS_USER);
-
-      HttpResponse response = client.execute(httpget);
-      assertEquals(response.toString(), HTTP_OK,
-          response.getStatusLine().getStatusCode());
-
-    } finally {
-      client.close();
-    }
-  }
-
-  /**
-   * if you try to read volumes belonging to another user,
-   * then server always ignores it.
-   *
-   * @throws IOException
-   */
-  public void testGetVolumesOfAnotherUserShouldFail(int port)
-      throws IOException {
-    SimpleDateFormat format =
-        new SimpleDateFormat("EEE, dd MMM yyyy HH:mm:ss ZZZ", Locale.US);
-
-    CloseableHttpClient client = createHttpClient();
-    String userName = OzoneUtils.getRequestID().toLowerCase();
-    try {
-      HttpGet httpget =
-          new HttpGet(String.format("http://localhost:%d/", port));
-
-      httpget.addHeader(Header.OZONE_VERSION_HEADER,
-          Header.OZONE_V1_VERSION_HEADER);
-      httpget.addHeader(HttpHeaders.DATE,
-          format.format(new Date(Time.monotonicNow())));
-
-      httpget.addHeader(HttpHeaders.AUTHORIZATION,
-          Header.OZONE_SIMPLE_AUTHENTICATION_SCHEME + " " +
-              userName);
-
-      // userName is NOT a root user, hence he should NOT be able to read the
-      // volumes of user HDFS
-      httpget.addHeader(Header.OZONE_USER, OzoneConsts.OZONE_SIMPLE_HDFS_USER);
-
-      HttpResponse response = client.execute(httpget);
-      // We will get an Error called userNotFound when using Simple Auth Scheme
-      assertEquals(response.toString(), ErrorTable.USER_NOT_FOUND.getHttpCode(),
-          response.getStatusLine().getStatusCode());
-
-    } finally {
-      client.close();
-    }
-  }
-
-  public void testListKeyOnEmptyBucket(int port) throws IOException {
-    SimpleDateFormat format =
-        new SimpleDateFormat("EEE, dd MMM yyyy HH:mm:ss ZZZ", Locale.US);
-    CloseableHttpClient client = createHttpClient();
-    String volumeName = OzoneUtils.getRequestID().toLowerCase();
-    String bucketName = OzoneUtils.getRequestID().toLowerCase() + "bucket";
-    try {
-
-      HttpPost httppost = new HttpPost(
-          String.format("http://localhost:%d/%s", port, volumeName));
-      httppost.addHeader(Header.OZONE_VERSION_HEADER,
-          Header.OZONE_V1_VERSION_HEADER);
-      httppost.addHeader(HttpHeaders.DATE,
-          format.format(new Date(Time.monotonicNow())));
-      httppost.addHeader(HttpHeaders.AUTHORIZATION,
-          Header.OZONE_SIMPLE_AUTHENTICATION_SCHEME + " "
-              + OzoneConsts.OZONE_SIMPLE_HDFS_USER);
-      httppost.addHeader(Header.OZONE_USER, OzoneConsts.OZONE_SIMPLE_HDFS_USER);
-      HttpResponse response = client.execute(httppost);
-      assertEquals(response.toString(), HTTP_CREATED,
-          response.getStatusLine().getStatusCode());
-      client.close();
-
-      client = createHttpClient();
-      httppost = new HttpPost(String
-          .format("http://localhost:%d/%s/%s", port, volumeName, bucketName));
-      httppost.addHeader(Header.OZONE_VERSION_HEADER,
-          Header.OZONE_V1_VERSION_HEADER);
-      httppost.addHeader(HttpHeaders.DATE,
-          format.format(new Date(Time.monotonicNow())));
-      httppost.addHeader(HttpHeaders.AUTHORIZATION,
-          Header.OZONE_SIMPLE_AUTHENTICATION_SCHEME + " "
-              + OzoneConsts.OZONE_SIMPLE_HDFS_USER);
-      httppost.addHeader(Header.OZONE_USER, OzoneConsts.OZONE_SIMPLE_HDFS_USER);
-      response = client.execute(httppost);
-      assertEquals(response.toString(), HTTP_CREATED,
-          response.getStatusLine().getStatusCode());
-      client.close();
-
-      client = createHttpClient();
-      HttpGet httpget = new HttpGet(String
-          .format("http://localhost:%d/%s/%s", port, volumeName, bucketName));
-      httpget.addHeader(Header.OZONE_VERSION_HEADER,
-          Header.OZONE_V1_VERSION_HEADER);
-      httpget.addHeader(HttpHeaders.DATE,
-          format.format(new Date(Time.monotonicNow())));
-      httpget.addHeader(HttpHeaders.AUTHORIZATION,
-          Header.OZONE_SIMPLE_AUTHENTICATION_SCHEME + " "
-              + OzoneConsts.OZONE_SIMPLE_HDFS_USER);
-      httpget.addHeader(Header.OZONE_USER, OzoneConsts.OZONE_SIMPLE_HDFS_USER);
-      response = client.execute(httpget);
-      assertEquals(response.toString() + " " + response.getStatusLine()
-              .getReasonPhrase(), HTTP_OK,
-          response.getStatusLine().getStatusCode());
-
-    } finally {
-      client.close();
-    }
-  }
-
-}

+ 22 - 50
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java

@@ -16,37 +16,30 @@
  */
 package org.apache.hadoop.ozone;
 
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
-import com.google.common.collect.Sets;
-import com.google.common.primitives.Longs;
-import org.apache.commons.lang3.RandomStringUtils;
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.protocol.StorageType;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
 import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
+import org.apache.hadoop.ozone.client.OzoneBucket;
+import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
+import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
 import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
 import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.web.handlers.BucketArgs;
-import org.apache.hadoop.ozone.web.handlers.KeyArgs;
-import org.apache.hadoop.ozone.web.handlers.UserArgs;
-import org.apache.hadoop.ozone.web.handlers.VolumeArgs;
-import org.apache.hadoop.ozone.web.interfaces.StorageHandler;
-import org.apache.hadoop.ozone.web.utils.OzoneUtils;
 import org.apache.hadoop.utils.MetadataKeyFilters;
 import org.apache.hadoop.utils.MetadataKeyFilters.KeyPrefixFilter;
-import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB;
 
-import java.io.IOException;
-import java.io.OutputStream;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
+import com.google.common.primitives.Longs;
+import org.apache.commons.lang3.RandomStringUtils;
 
 /**
  * A helper class used by {@link TestStorageContainerManager} to generate
@@ -56,53 +49,32 @@ public class TestStorageContainerManagerHelper {
 
   private final MiniOzoneCluster cluster;
   private final Configuration conf;
-  private final StorageHandler storageHandler;
 
   public TestStorageContainerManagerHelper(MiniOzoneCluster cluster,
       Configuration conf) throws IOException {
     this.cluster = cluster;
     this.conf = conf;
-    storageHandler = new ObjectStoreHandler(conf).getStorageHandler();
   }
 
   public Map<String, OmKeyInfo> createKeys(int numOfKeys, int keySize)
       throws Exception {
     Map<String, OmKeyInfo> keyLocationMap = Maps.newHashMap();
-    String volume = "volume" + RandomStringUtils.randomNumeric(5);
-    String bucket = "bucket" + RandomStringUtils.randomNumeric(5);
-    String userName = "user" + RandomStringUtils.randomNumeric(5);
-    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
-    UserArgs userArgs = new UserArgs(null, OzoneUtils.getRequestID(),
-        null, null, null, null);
-
-    VolumeArgs createVolumeArgs = new VolumeArgs(volume, userArgs);
-    createVolumeArgs.setUserName(userName);
-    createVolumeArgs.setAdminName(adminName);
-    storageHandler.createVolume(createVolumeArgs);
-
-    BucketArgs bucketArgs = new BucketArgs(bucket, createVolumeArgs);
-    bucketArgs.setStorageType(StorageType.DISK);
-    storageHandler.createBucket(bucketArgs);
-
-    // Write 20 keys in bucket.
+
+    OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(cluster);
+    // Write 20 keys in bucketName.
     Set<String> keyNames = Sets.newHashSet();
-    KeyArgs keyArgs;
     for (int i = 0; i < numOfKeys; i++) {
       String keyName = RandomStringUtils.randomAlphabetic(5) + i;
       keyNames.add(keyName);
-      keyArgs = new KeyArgs(keyName, bucketArgs);
-      keyArgs.setSize(keySize);
-      // Just for testing list keys call, so no need to write real data.
-      OutputStream stream = storageHandler.newKeyWriter(keyArgs);
-      stream.write(DFSUtil.string2Bytes(
-          RandomStringUtils.randomAlphabetic(5)));
-      stream.close();
+
+      TestDataUtil
+          .createKey(bucket, keyName, RandomStringUtils.randomAlphabetic(5));
     }
 
     for (String key : keyNames) {
       OmKeyArgs arg = new OmKeyArgs.Builder()
-          .setVolumeName(volume)
-          .setBucketName(bucket)
+          .setVolumeName(bucket.getVolumeName())
+          .setBucketName(bucket.getName())
           .setKeyName(key)
           .setRefreshPipeline(true)
           .build();

+ 21 - 21
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java

@@ -58,6 +58,7 @@ import org.apache.hadoop.ozone.client.BucketArgs;
 import org.apache.hadoop.ozone.client.ObjectStore;
 import org.apache.hadoop.ozone.client.OzoneBucket;
 import org.apache.hadoop.ozone.client.OzoneClient;
+import org.apache.hadoop.ozone.client.OzoneClientException;
 import org.apache.hadoop.ozone.client.OzoneClientFactory;
 import org.apache.hadoop.ozone.client.OzoneKey;
 import org.apache.hadoop.ozone.client.OzoneKeyDetails;
@@ -67,7 +68,6 @@ import org.apache.hadoop.ozone.client.OzoneVolume;
 import org.apache.hadoop.ozone.client.VolumeArgs;
 import org.apache.hadoop.ozone.client.io.OzoneInputStream;
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-import org.apache.hadoop.ozone.client.rest.OzoneException;
 import org.apache.hadoop.ozone.common.OzoneChecksumException;
 import org.apache.hadoop.ozone.container.common.helpers.BlockData;
 import org.apache.hadoop.ozone.container.common.interfaces.Container;
@@ -240,7 +240,7 @@ public abstract class TestOzoneRpcClientAbstract {
 
   @Test
   public void testSetVolumeQuota()
-      throws IOException, OzoneException {
+      throws IOException {
     String volumeName = UUID.randomUUID().toString();
     store.createVolume(volumeName);
     store.getVolume(volumeName).setQuota(
@@ -264,7 +264,7 @@ public abstract class TestOzoneRpcClientAbstract {
 
   @Test
   public void testCreateVolumeWithMetadata()
-      throws IOException, OzoneException {
+      throws IOException, OzoneClientException {
     String volumeName = UUID.randomUUID().toString();
     VolumeArgs volumeArgs = VolumeArgs.newBuilder()
         .addMetadata("key1", "val1")
@@ -278,7 +278,7 @@ public abstract class TestOzoneRpcClientAbstract {
 
   @Test
   public void testCreateBucketWithMetadata()
-      throws IOException, OzoneException {
+      throws IOException, OzoneClientException {
     long currentTime = Time.now();
     String volumeName = UUID.randomUUID().toString();
     String bucketName = UUID.randomUUID().toString();
@@ -297,7 +297,7 @@ public abstract class TestOzoneRpcClientAbstract {
 
   @Test
   public void testCreateBucket()
-      throws IOException, OzoneException {
+      throws IOException, OzoneClientException {
     long currentTime = Time.now();
     String volumeName = UUID.randomUUID().toString();
     String bucketName = UUID.randomUUID().toString();
@@ -312,7 +312,7 @@ public abstract class TestOzoneRpcClientAbstract {
 
   @Test
   public void testCreateS3Bucket()
-      throws IOException, OzoneException {
+      throws IOException, OzoneClientException {
     long currentTime = Time.now();
     String userName = UserGroupInformation.getCurrentUser().getUserName();
     String bucketName = UUID.randomUUID().toString();
@@ -345,7 +345,7 @@ public abstract class TestOzoneRpcClientAbstract {
 
   @Test
   public void testListS3Buckets()
-      throws IOException, OzoneException {
+      throws IOException, OzoneClientException {
     String userName = "ozone100";
     String bucketName1 = UUID.randomUUID().toString();
     String bucketName2 = UUID.randomUUID().toString();
@@ -363,7 +363,7 @@ public abstract class TestOzoneRpcClientAbstract {
 
   @Test
   public void testListS3BucketsFail()
-      throws IOException, OzoneException {
+      throws IOException, OzoneClientException {
     String userName = "randomUser";
     Iterator<? extends OzoneBucket> iterator = store.listS3Buckets(userName,
         null);
@@ -402,7 +402,7 @@ public abstract class TestOzoneRpcClientAbstract {
 
   @Test
   public void testCreateS3BucketMapping()
-      throws IOException, OzoneException {
+      throws IOException, OzoneClientException {
     long currentTime = Time.now();
     String userName = "ozone";
     String bucketName = UUID.randomUUID().toString();
@@ -421,7 +421,7 @@ public abstract class TestOzoneRpcClientAbstract {
 
   @Test
   public void testCreateBucketWithVersioning()
-      throws IOException, OzoneException {
+      throws IOException, OzoneClientException {
     String volumeName = UUID.randomUUID().toString();
     String bucketName = UUID.randomUUID().toString();
     store.createVolume(volumeName);
@@ -436,7 +436,7 @@ public abstract class TestOzoneRpcClientAbstract {
 
   @Test
   public void testCreateBucketWithStorageType()
-      throws IOException, OzoneException {
+      throws IOException, OzoneClientException {
     String volumeName = UUID.randomUUID().toString();
     String bucketName = UUID.randomUUID().toString();
     store.createVolume(volumeName);
@@ -451,7 +451,7 @@ public abstract class TestOzoneRpcClientAbstract {
 
   @Test
   public void testCreateBucketWithAcls()
-      throws IOException, OzoneException {
+      throws IOException, OzoneClientException {
     String volumeName = UUID.randomUUID().toString();
     String bucketName = UUID.randomUUID().toString();
     OzoneAcl userAcl = new OzoneAcl(USER, "test",
@@ -470,7 +470,7 @@ public abstract class TestOzoneRpcClientAbstract {
 
   @Test
   public void testCreateBucketWithAllArgument()
-      throws IOException, OzoneException {
+      throws IOException, OzoneClientException {
     String volumeName = UUID.randomUUID().toString();
     String bucketName = UUID.randomUUID().toString();
     OzoneAcl userAcl = new OzoneAcl(USER, "test",
@@ -507,7 +507,7 @@ public abstract class TestOzoneRpcClientAbstract {
 
   @Test
   public void testAddBucketAcl()
-      throws IOException, OzoneException {
+      throws IOException, OzoneClientException {
     String volumeName = UUID.randomUUID().toString();
     String bucketName = UUID.randomUUID().toString();
     store.createVolume(volumeName);
@@ -526,7 +526,7 @@ public abstract class TestOzoneRpcClientAbstract {
 
   @Test
   public void testRemoveBucketAcl()
-      throws IOException, OzoneException {
+      throws IOException, OzoneClientException {
     String volumeName = UUID.randomUUID().toString();
     String bucketName = UUID.randomUUID().toString();
     OzoneAcl userAcl = new OzoneAcl(USER, "test",
@@ -581,7 +581,7 @@ public abstract class TestOzoneRpcClientAbstract {
 
   @Test
   public void testSetBucketVersioning()
-      throws IOException, OzoneException {
+      throws IOException, OzoneClientException {
     String volumeName = UUID.randomUUID().toString();
     String bucketName = UUID.randomUUID().toString();
     store.createVolume(volumeName);
@@ -618,7 +618,7 @@ public abstract class TestOzoneRpcClientAbstract {
 
   @Test
   public void testSetBucketStorageType()
-      throws IOException, OzoneException {
+      throws IOException, OzoneClientException {
     String volumeName = UUID.randomUUID().toString();
     String bucketName = UUID.randomUUID().toString();
     store.createVolume(volumeName);
@@ -678,7 +678,7 @@ public abstract class TestOzoneRpcClientAbstract {
 
   @Test
   public void testPutKey()
-      throws IOException, OzoneException {
+      throws IOException, OzoneClientException {
     String volumeName = UUID.randomUUID().toString();
     String bucketName = UUID.randomUUID().toString();
     long currentTime = Time.now();
@@ -746,7 +746,7 @@ public abstract class TestOzoneRpcClientAbstract {
 
   @Test
   public void testPutKeyRatisOneNode()
-      throws IOException, OzoneException {
+      throws IOException, OzoneClientException {
     String volumeName = UUID.randomUUID().toString();
     String bucketName = UUID.randomUUID().toString();
     long currentTime = Time.now();
@@ -781,7 +781,7 @@ public abstract class TestOzoneRpcClientAbstract {
 
   @Test
   public void testPutKeyRatisThreeNodes()
-      throws IOException, OzoneException {
+      throws IOException, OzoneClientException {
     String volumeName = UUID.randomUUID().toString();
     String bucketName = UUID.randomUUID().toString();
     long currentTime = Time.now();
@@ -976,7 +976,7 @@ public abstract class TestOzoneRpcClientAbstract {
   }
 
   @Test
-  public void testGetKeyDetails() throws IOException, OzoneException {
+  public void testGetKeyDetails() throws IOException, OzoneClientException {
     String volumeName = UUID.randomUUID().toString();
     String bucketName = UUID.randomUUID().toString();
 

+ 1 - 2
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java

@@ -48,7 +48,6 @@ import org.apache.hadoop.ozone.om.OzoneManager;
 import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
-import org.apache.hadoop.ozone.ozShell.TestOzoneShell;
 import org.apache.hadoop.ozone.protocol.commands.RetriableDatanodeEventWatcher;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
@@ -97,7 +96,7 @@ public class TestBlockDeletion {
     GenericTestUtils.setLogLevel(SCMBlockDeletingService.LOG, Level.DEBUG);
 
     String path =
-        GenericTestUtils.getTempPath(TestOzoneShell.class.getSimpleName());
+        GenericTestUtils.getTempPath(TestBlockDeletion.class.getSimpleName());
     File baseDir = new File(path);
     baseDir.mkdirs();
 

+ 11 - 11
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java

@@ -17,8 +17,15 @@
  */
 package org.apache.hadoop.ozone.container.common.statemachine.commandhandler;
 
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.concurrent.TimeoutException;
+
+import org.apache.hadoop.hdds.client.ReplicationFactor;
+import org.apache.hadoop.hdds.client.ReplicationType;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
 import org.apache.hadoop.hdds.scm.container.ContainerInfo;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
@@ -26,32 +33,25 @@ import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.ozone.client.ObjectStore;
 import org.apache.hadoop.ozone.client.OzoneClient;
 import org.apache.hadoop.ozone.client.OzoneClientFactory;
-import org.apache.hadoop.hdds.client.ReplicationFactor;
-import org.apache.hadoop.hdds.client.ReplicationType;
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-import org.apache.hadoop.ozone.client.rest.OzoneException;
 import org.apache.hadoop.ozone.container.common.impl.ContainerData;
 import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
 import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE;
 import org.apache.hadoop.test.GenericTestUtils;
+
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE;
 import org.junit.Assert;
 import org.junit.Test;
 
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.concurrent.TimeoutException;
-
 /**
  * Test to behaviour of the datanode when recieve close container command.
  */
 public class TestCloseContainerHandler {
 
   @Test
-  public void test() throws IOException, TimeoutException, InterruptedException,
-      OzoneException {
+  public void test()
+      throws IOException, TimeoutException, InterruptedException {
 
     //setup a cluster (1G free space is enough for a unit test)
     OzoneConfiguration conf = new OzoneConfiguration();

+ 0 - 208
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestMultipleContainerReadWrite.java

@@ -1,208 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.om;
-
-import org.apache.commons.lang3.RandomStringUtils;
-import org.apache.hadoop.conf.StorageUnit;
-import org.apache.hadoop.hdds.protocol.StorageType;
-import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler;
-import org.apache.hadoop.metrics2.MetricsRecordBuilder;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.web.handlers.BucketArgs;
-import org.apache.hadoop.ozone.web.handlers.KeyArgs;
-import org.apache.hadoop.ozone.web.handlers.UserArgs;
-import org.apache.hadoop.ozone.web.handlers.VolumeArgs;
-import org.apache.hadoop.ozone.web.interfaces.StorageHandler;
-import org.apache.hadoop.ozone.web.utils.OzoneUtils;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Ignore;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-
-import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
-import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
-import static org.junit.Assert.assertEquals;
-
-/**
- * Test key write/read where a key can span multiple containers.
- */
-public class TestMultipleContainerReadWrite {
-  private static MiniOzoneCluster cluster = null;
-  private static StorageHandler storageHandler;
-  private static UserArgs userArgs;
-  private static OzoneConfiguration conf;
-
-  @Rule
-  public ExpectedException exception = ExpectedException.none();
-
-  /**
-   * Create a MiniDFSCluster for testing.
-   * <p>
-   * Ozone is made active by setting OZONE_ENABLED = true
-   *
-   * @throws IOException
-   */
-  @BeforeClass
-  public static void init() throws Exception {
-    conf = new OzoneConfiguration();
-    conf.setStorageSize(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE, 1,
-        StorageUnit.MB);
-    conf.setInt(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT, 5);
-    cluster = MiniOzoneCluster.newBuilder(conf).build();
-    cluster.waitForClusterToBeReady();
-    storageHandler = new ObjectStoreHandler(conf).getStorageHandler();
-    userArgs = new UserArgs(null, OzoneUtils.getRequestID(),
-        null, null, null, null);
-  }
-
-  /**
-   * Shutdown MiniDFSCluster.
-   */
-  @AfterClass
-  public static void shutdown() {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-  @Test
-  public void testWriteRead() throws Exception {
-    String userName = "user" + RandomStringUtils.randomNumeric(5);
-    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
-    String keyName = "key" + RandomStringUtils.randomNumeric(5);
-
-    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
-    createVolumeArgs.setUserName(userName);
-    createVolumeArgs.setAdminName(adminName);
-    storageHandler.createVolume(createVolumeArgs);
-
-    BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs);
-    bucketArgs.setStorageType(StorageType.DISK);
-    storageHandler.createBucket(bucketArgs);
-
-    String dataString = RandomStringUtils.randomAscii(3 * (int)OzoneConsts.MB);
-    KeyArgs keyArgs = new KeyArgs(volumeName, bucketName, keyName, userArgs);
-    keyArgs.setSize(3 * (int)OzoneConsts.MB);
-    keyArgs.setUserName(userName);
-
-    try (OutputStream outputStream = storageHandler.newKeyWriter(keyArgs)) {
-      outputStream.write(dataString.getBytes());
-    }
-
-    byte[] data = new byte[dataString.length()];
-    try (InputStream inputStream = storageHandler.newKeyReader(keyArgs)) {
-      inputStream.read(data, 0, data.length);
-    }
-    assertEquals(dataString, new String(data));
-    // checking whether container meta data has the chunk file persisted.
-    MetricsRecordBuilder containerMetrics = getMetrics(
-        "StorageContainerMetrics");
-    assertCounter("numWriteChunk", 3L, containerMetrics);
-    assertCounter("numReadChunk", 3L, containerMetrics);
-  }
-
-  // Disable this test, because this tests assumes writing beyond a specific
-  // size is not allowed. Which is not true for now. Keeping this test in case
-  // we add this restrict in the future.
-  @Ignore
-  @Test
-  public void testErrorWrite() throws Exception {
-    String userName = "user" + RandomStringUtils.randomNumeric(5);
-    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
-    String keyName = "key" + RandomStringUtils.randomNumeric(5);
-
-    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
-    createVolumeArgs.setUserName(userName);
-    createVolumeArgs.setAdminName(adminName);
-    storageHandler.createVolume(createVolumeArgs);
-
-    BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs);
-    bucketArgs.setStorageType(StorageType.DISK);
-    storageHandler.createBucket(bucketArgs);
-
-    String dataString1 = RandomStringUtils.randomAscii(100);
-    String dataString2 = RandomStringUtils.randomAscii(500);
-    KeyArgs keyArgs = new KeyArgs(volumeName, bucketName, keyName, userArgs);
-    keyArgs.setSize(500);
-
-    try (OutputStream outputStream = storageHandler.newKeyWriter(keyArgs)) {
-      // first write will write succeed
-      outputStream.write(dataString1.getBytes());
-      // second write
-      exception.expect(IOException.class);
-      exception.expectMessage(
-          "Can not write 500 bytes with only 400 byte space");
-      outputStream.write(dataString2.getBytes());
-    }
-  }
-
-  @Test
-  public void testPartialRead() throws Exception {
-    String userName = "user" + RandomStringUtils.randomNumeric(5);
-    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
-    String keyName = "key" + RandomStringUtils.randomNumeric(5);
-
-    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
-    createVolumeArgs.setUserName(userName);
-    createVolumeArgs.setAdminName(adminName);
-    storageHandler.createVolume(createVolumeArgs);
-
-    BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs);
-    bucketArgs.setStorageType(StorageType.DISK);
-    storageHandler.createBucket(bucketArgs);
-
-    String dataString = RandomStringUtils.randomAscii(500);
-    KeyArgs keyArgs = new KeyArgs(volumeName, bucketName, keyName, userArgs);
-    keyArgs.setSize(500);
-    keyArgs.setUserName(userName);
-
-    try (OutputStream outputStream = storageHandler.newKeyWriter(keyArgs)) {
-      outputStream.write(dataString.getBytes());
-    }
-
-    byte[] data = new byte[600];
-    try (InputStream inputStream = storageHandler.newKeyReader(keyArgs)) {
-      int readLen = inputStream.read(data, 0, 340);
-      assertEquals(340, readLen);
-      assertEquals(dataString.substring(0, 340),
-          new String(data).substring(0, 340));
-
-      readLen = inputStream.read(data, 340, 260);
-      assertEquals(160, readLen);
-      assertEquals(dataString, new String(data).substring(0, 500));
-
-      readLen = inputStream.read(data, 500, 1);
-      assertEquals(-1, readLen);
-    }
-  }
-}

+ 38 - 55
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmAcls.java

@@ -19,26 +19,23 @@ package org.apache.hadoop.ozone.om;
 import java.util.UUID;
 
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.StorageType;
-import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.ozone.OzoneTestUtils;
+import org.apache.hadoop.ozone.TestDataUtil;
+import org.apache.hadoop.ozone.client.OzoneBucket;
+import org.apache.hadoop.ozone.client.OzoneVolume;
+import org.apache.hadoop.ozone.client.VolumeArgs;
 import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes;
 import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
 import org.apache.hadoop.ozone.security.acl.IOzoneObj;
 import org.apache.hadoop.ozone.security.acl.RequestContext;
-import org.apache.hadoop.ozone.web.handlers.BucketArgs;
-import org.apache.hadoop.ozone.web.handlers.KeyArgs;
-import org.apache.hadoop.ozone.web.handlers.UserArgs;
-import org.apache.hadoop.ozone.web.handlers.VolumeArgs;
-import org.apache.hadoop.ozone.web.interfaces.StorageHandler;
-import org.apache.hadoop.ozone.web.request.OzoneQuota;
-import org.apache.hadoop.ozone.web.utils.OzoneUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 
 import org.apache.commons.lang3.RandomStringUtils;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_AUTHORIZER_CLASS;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS_WILDCARD;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS;
 import org.junit.AfterClass;
 import static org.junit.Assert.assertTrue;
@@ -52,9 +49,8 @@ import org.junit.rules.ExpectedException;
  */
 public class TestOmAcls {
 
+  private static boolean aclAllow = true;
   private static MiniOzoneCluster cluster = null;
-  private static StorageHandler storageHandler;
-  private static UserArgs userArgs;
   private static OMMetrics omMetrics;
   private static OzoneConfiguration conf;
   private static String clusterId;
@@ -80,15 +76,13 @@ public class TestOmAcls {
     conf.setInt(OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS, 2);
     conf.setClass(OZONE_ACL_AUTHORIZER_CLASS, OzoneAccessAuthorizerTest.class,
         IAccessAuthorizer.class);
+    conf.setStrings(OZONE_ADMINISTRATORS, OZONE_ADMINISTRATORS_WILDCARD);
     cluster = MiniOzoneCluster.newBuilder(conf)
         .setClusterId(clusterId)
         .setScmId(scmId)
         .setOmId(omId)
         .build();
     cluster.waitForClusterToBeReady();
-    storageHandler = new ObjectStoreHandler(conf).getStorageHandler();
-    userArgs = new UserArgs(null, OzoneUtils.getRequestID(),
-        null, null, null, null);
     omMetrics = cluster.getOzoneManager().getMetrics();
     logCapturer =
         GenericTestUtils.LogCapturer.captureLogs(OzoneManager.getLogger());
@@ -104,65 +98,54 @@ public class TestOmAcls {
     }
   }
 
-
   /**
    * Tests the OM Initialization.
    */
+
   @Test
-  public void testOMAclsPermissionDenied() throws Exception {
-    String user0 = "testListVolumes-user-0";
-    String adminUser = "testListVolumes-admin";
-    final VolumeArgs createVolumeArgs;
-    int i = 100;
-    String user0VolName = "Vol-" + user0 + "-" + i;
-    createVolumeArgs = new VolumeArgs(user0VolName, userArgs);
-    createVolumeArgs.setUserName(user0);
-    createVolumeArgs.setAdminName(adminUser);
-    createVolumeArgs.setQuota(new OzoneQuota(i, OzoneQuota.Units.GB));
-    logCapturer.clearOutput();
-    OzoneTestUtils.expectOmException(ResultCodes.PERMISSION_DENIED,
-        () -> storageHandler.createVolume(createVolumeArgs));
-    assertTrue(logCapturer.getOutput().contains("Only admin users are " +
-        "authorized to create Ozone"));
+  public void testBucketCreationPermissionDenied() throws Exception {
 
-    BucketArgs bucketArgs = new BucketArgs("bucket1", createVolumeArgs);
-    bucketArgs.setStorageType(StorageType.DISK);
+    TestOmAcls.aclAllow = true;
+
+    String volumeName = RandomStringUtils.randomAlphabetic(5).toLowerCase();
+    String bucketName = RandomStringUtils.randomAlphabetic(5).toLowerCase();
+    cluster.getClient().getObjectStore().createVolume(volumeName);
+    OzoneVolume volume =
+        cluster.getClient().getObjectStore().getVolume(volumeName);
+
+    TestOmAcls.aclAllow = false;
     OzoneTestUtils.expectOmException(ResultCodes.PERMISSION_DENIED,
-        () -> storageHandler.createBucket(bucketArgs));
-    assertTrue(logCapturer.getOutput().contains("Only admin users are" +
-        " authorized to create Ozone"));
+        () -> volume.createBucket(bucketName));
+
+    assertTrue(logCapturer.getOutput()
+        .contains("doesn't have CREATE permission to access volume"));
   }
 
   @Test
   public void testFailureInKeyOp() throws Exception {
     final VolumeArgs createVolumeArgs;
-    String userName = "user" + RandomStringUtils.randomNumeric(5);
-    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
-    createVolumeArgs = new VolumeArgs(userName, userArgs);
-    createVolumeArgs.setUserName(userName);
-    createVolumeArgs.setAdminName(adminName);
-    createVolumeArgs.setQuota(new OzoneQuota(100, OzoneQuota.Units.GB));
-    BucketArgs bucketArgs = new BucketArgs("bucket1", createVolumeArgs);
-    bucketArgs.setStorageType(StorageType.DISK);
+
+    TestOmAcls.aclAllow = true;
+    OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(cluster);
     logCapturer.clearOutput();
 
-    // write a key without specifying size at all
-    String keyName = "testKey";
-    KeyArgs keyArgs = new KeyArgs(keyName, bucketArgs);
+    TestOmAcls.aclAllow = false;
+
     OzoneTestUtils.expectOmException(ResultCodes.PERMISSION_DENIED,
-        () -> storageHandler.newKeyWriter(keyArgs));
+        () -> TestDataUtil.createKey(bucket, "testKey", "testcontent"));
     assertTrue(logCapturer.getOutput().contains("doesn't have WRITE " +
         "permission to access key"));
   }
-}
 
-/**
- * Test implementation to negative case.
- */
-class OzoneAccessAuthorizerTest implements IAccessAuthorizer {
+  /**
+   * Test implementation to negative case.
+   */
+  static class OzoneAccessAuthorizerTest implements IAccessAuthorizer {
 
-  @Override
-  public boolean checkAccess(IOzoneObj ozoneObject, RequestContext context) {
-    return false;
+    @Override
+    public boolean checkAccess(IOzoneObj ozoneObject, RequestContext context) {
+      return TestOmAcls.aclAllow;
+    }
   }
+
 }

+ 24 - 69
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmBlockVersioning.java

@@ -16,50 +16,39 @@
  */
 package org.apache.hadoop.ozone.om;
 
-import org.apache.commons.lang3.RandomStringUtils;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.StorageType;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.TestDataUtil;
+import org.apache.hadoop.ozone.client.OzoneBucket;
 import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
 import org.apache.hadoop.ozone.om.helpers.OpenKeySession;
-import org.apache.hadoop.ozone.web.handlers.BucketArgs;
-import org.apache.hadoop.ozone.web.handlers.KeyArgs;
-import org.apache.hadoop.ozone.web.handlers.UserArgs;
-import org.apache.hadoop.ozone.web.handlers.VolumeArgs;
-import org.apache.hadoop.ozone.web.interfaces.StorageHandler;
-import org.apache.hadoop.ozone.web.utils.OzoneUtils;
 import org.apache.hadoop.security.UserGroupInformation;
+
+import org.apache.commons.lang3.RandomStringUtils;
 import org.junit.AfterClass;
+import org.junit.Assert;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
 import org.junit.BeforeClass;
 import org.junit.Rule;
 import org.junit.Test;
-import org.junit.Assert;
 import org.junit.rules.ExpectedException;
 
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.util.ArrayList;
-import java.util.List;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
 /**
  * This class tests the versioning of blocks from OM side.
  */
 public class TestOmBlockVersioning {
   private static MiniOzoneCluster cluster = null;
-  private static UserArgs userArgs;
   private static OzoneConfiguration conf;
   private static OzoneManager ozoneManager;
-  private static StorageHandler storageHandler;
 
   @Rule
   public ExpectedException exception = ExpectedException.none();
@@ -76,9 +65,6 @@ public class TestOmBlockVersioning {
     conf = new OzoneConfiguration();
     cluster = MiniOzoneCluster.newBuilder(conf).build();
     cluster.waitForClusterToBeReady();
-    storageHandler = new ObjectStoreHandler(conf).getStorageHandler();
-    userArgs = new UserArgs(null, OzoneUtils.getRequestID(),
-        null, null, null, null);
     ozoneManager = cluster.getOzoneManager();
   }
 
@@ -101,14 +87,7 @@ public class TestOmBlockVersioning {
     String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
     String keyName = "key" + RandomStringUtils.randomNumeric(5);
 
-    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
-    createVolumeArgs.setUserName(userName);
-    createVolumeArgs.setAdminName(adminName);
-    storageHandler.createVolume(createVolumeArgs);
-
-    BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs);
-    bucketArgs.setStorageType(StorageType.DISK);
-    storageHandler.createBucket(bucketArgs);
+    TestDataUtil.createVolumeAndBucket(cluster, volumeName, bucketName);
 
     OmKeyArgs keyArgs = new OmKeyArgs.Builder()
         .setVolumeName(volumeName)
@@ -200,14 +179,8 @@ public class TestOmBlockVersioning {
     String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
     String keyName = "key" + RandomStringUtils.randomNumeric(5);
 
-    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
-    createVolumeArgs.setUserName(userName);
-    createVolumeArgs.setAdminName(adminName);
-    storageHandler.createVolume(createVolumeArgs);
-
-    BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs);
-    bucketArgs.setStorageType(StorageType.DISK);
-    storageHandler.createBucket(bucketArgs);
+    OzoneBucket bucket =
+        TestDataUtil.createVolumeAndBucket(cluster, volumeName, bucketName);
 
     OmKeyArgs omKeyArgs = new OmKeyArgs.Builder()
         .setVolumeName(volumeName)
@@ -218,48 +191,30 @@ public class TestOmBlockVersioning {
         .build();
 
     String dataString = RandomStringUtils.randomAlphabetic(100);
-    KeyArgs keyArgs = new KeyArgs(volumeName, bucketName, keyName, userArgs);
-    keyArgs.setUserName(userName);
-    // this write will create 1st version with one block
-    try (OutputStream stream = storageHandler.newKeyWriter(keyArgs)) {
-      stream.write(dataString.getBytes());
-    }
-    byte[] data = new byte[dataString.length()];
-    try (InputStream in = storageHandler.newKeyReader(keyArgs)) {
-      in.read(data);
-    }
+
+    TestDataUtil.createKey(bucket, keyName, dataString);
+    assertEquals(dataString, TestDataUtil.getKey(bucket, keyName));
     OmKeyInfo keyInfo = ozoneManager.lookupKey(omKeyArgs);
-    assertEquals(dataString, DFSUtil.bytes2String(data));
     assertEquals(0, keyInfo.getLatestVersionLocations().getVersion());
     assertEquals(1,
         keyInfo.getLatestVersionLocations().getLocationList().size());
 
     // this write will create 2nd version, 2nd version will contain block from
     // version 1, and add a new block
-    dataString = RandomStringUtils.randomAlphabetic(10);
-    data = new byte[dataString.length()];
-    try (OutputStream stream = storageHandler.newKeyWriter(keyArgs)) {
-      stream.write(dataString.getBytes());
-    }
-    try (InputStream in = storageHandler.newKeyReader(keyArgs)) {
-      in.read(data);
-    }
+    TestDataUtil.createKey(bucket, keyName, dataString);
+
+
     keyInfo = ozoneManager.lookupKey(omKeyArgs);
-    assertEquals(dataString, DFSUtil.bytes2String(data));
+    assertEquals(dataString, TestDataUtil.getKey(bucket, keyName));
     assertEquals(1, keyInfo.getLatestVersionLocations().getVersion());
     assertEquals(2,
         keyInfo.getLatestVersionLocations().getLocationList().size());
 
     dataString = RandomStringUtils.randomAlphabetic(200);
-    data = new byte[dataString.length()];
-    try (OutputStream stream = storageHandler.newKeyWriter(keyArgs)) {
-      stream.write(dataString.getBytes());
-    }
-    try (InputStream in = storageHandler.newKeyReader(keyArgs)) {
-      in.read(data);
-    }
+    TestDataUtil.createKey(bucket, keyName, dataString);
+
     keyInfo = ozoneManager.lookupKey(omKeyArgs);
-    assertEquals(dataString, DFSUtil.bytes2String(data));
+    assertEquals(dataString, TestDataUtil.getKey(bucket, keyName));
     assertEquals(2, keyInfo.getLatestVersionLocations().getVersion());
     assertEquals(3,
         keyInfo.getLatestVersionLocations().getLocationList().size());

+ 4 - 14
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmInit.java

@@ -16,14 +16,14 @@
  */
 package org.apache.hadoop.ozone.om;
 
+import java.io.IOException;
+import java.util.UUID;
 
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.web.handlers.UserArgs;
-import org.apache.hadoop.ozone.web.interfaces.StorageHandler;
-import org.apache.hadoop.ozone.web.utils.OzoneUtils;
 import org.apache.hadoop.security.authentication.client.AuthenticationException;
+
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS;
 import org.junit.AfterClass;
 import org.junit.Assert;
 import org.junit.BeforeClass;
@@ -31,18 +31,11 @@ import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.ExpectedException;
 
-import java.io.IOException;
-import java.util.UUID;
-
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS;
-
 /**
  * Test Ozone Manager Init.
  */
 public class TestOmInit {
   private static MiniOzoneCluster cluster = null;
-  private static StorageHandler storageHandler;
-  private static UserArgs userArgs;
   private static OMMetrics omMetrics;
   private static OzoneConfiguration conf;
   private static String clusterId;
@@ -72,9 +65,6 @@ public class TestOmInit {
         .setOmId(omId)
         .build();
     cluster.waitForClusterToBeReady();
-    storageHandler = new ObjectStoreHandler(conf).getStorageHandler();
-    userArgs = new UserArgs(null, OzoneUtils.getRequestID(),
-        null, null, null, null);
     omMetrics = cluster.getOzoneManager().getMetrics();
   }
 

+ 0 - 1449
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManager.java

@@ -1,1449 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.ozone.om;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.net.InetSocketAddress;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.text.ParseException;
-import java.util.List;
-import java.util.Random;
-import java.util.Set;
-import java.util.UUID;
-import java.util.stream.Collectors;
-import java.util.stream.Stream;
-
-import org.apache.commons.codec.binary.StringUtils;
-import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.StorageType;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.ScmInfo;
-import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
-import org.apache.hadoop.hdds.scm.server.SCMStorageConfig;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler;
-import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.OzoneTestUtils;
-import org.apache.hadoop.ozone.client.rest.OzoneException;
-import org.apache.hadoop.ozone.common.BlockGroup;
-import org.apache.hadoop.ozone.om.codec.OmKeyInfoCodec;
-import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes;
-import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
-import org.apache.hadoop.ozone.om.helpers.OpenKeySession;
-import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil;
-import org.apache.hadoop.ozone.om.helpers.ServiceInfo;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DBUpdatesRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ServicePort;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.VolumeList;
-import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType;
-import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType;
-import org.apache.hadoop.ozone.util.OzoneVersionInfo;
-import org.apache.hadoop.ozone.web.handlers.BucketArgs;
-import org.apache.hadoop.ozone.web.handlers.KeyArgs;
-import org.apache.hadoop.ozone.web.handlers.ListArgs;
-import org.apache.hadoop.ozone.web.handlers.UserArgs;
-import org.apache.hadoop.ozone.web.handlers.VolumeArgs;
-import org.apache.hadoop.ozone.web.interfaces.StorageHandler;
-import org.apache.hadoop.ozone.web.request.OzoneQuota;
-import org.apache.hadoop.ozone.web.response.BucketInfo;
-import org.apache.hadoop.ozone.web.response.KeyInfo;
-import org.apache.hadoop.ozone.web.response.ListBuckets;
-import org.apache.hadoop.ozone.web.response.ListKeys;
-import org.apache.hadoop.ozone.web.response.ListVolumes;
-import org.apache.hadoop.ozone.web.response.VolumeInfo;
-import org.apache.hadoop.ozone.web.utils.OzoneUtils;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.util.Time;
-import org.apache.hadoop.utils.db.DBUpdatesWrapper;
-import org.apache.hadoop.utils.db.RDBStore;
-import org.apache.hadoop.utils.db.Table;
-import org.apache.hadoop.utils.db.Table.KeyValue;
-import org.apache.hadoop.utils.db.TableIterator;
-
-import org.apache.commons.lang3.RandomStringUtils;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY;
-import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS_WILDCARD;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS;
-import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY;
-import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND;
-import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND;
-import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.ALL;
-
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Ignore;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.Timeout;
-import org.rocksdb.RocksDB;
-
-/**
- * Test Ozone Manager operation in distributed handler scenario.
- */
-public class TestOzoneManager {
-  private MiniOzoneCluster cluster = null;
-  private StorageHandler storageHandler;
-  private UserArgs userArgs;
-  private OMMetrics omMetrics;
-  private OzoneConfiguration conf;
-  private String clusterId;
-  private String scmId;
-  private String omId;
-
-  @Rule
-  public Timeout timeout = new Timeout(60000);
-
-  /**
-   * Create a MiniDFSCluster for testing.
-   * <p>
-   * Ozone is made active by setting OZONE_ENABLED = true
-   *
-   * @throws IOException
-   */
-  @Before
-  public void init() throws Exception {
-    conf = new OzoneConfiguration();
-    clusterId = UUID.randomUUID().toString();
-    scmId = UUID.randomUUID().toString();
-    omId = UUID.randomUUID().toString();
-    conf.setBoolean(OZONE_ACL_ENABLED, true);
-    conf.setInt(OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS, 2);
-    conf.set(OZONE_ADMINISTRATORS, OZONE_ADMINISTRATORS_WILDCARD);
-    cluster =  MiniOzoneCluster.newBuilder(conf)
-        .setClusterId(clusterId)
-        .setScmId(scmId)
-        .setOmId(omId)
-        .build();
-    cluster.waitForClusterToBeReady();
-    storageHandler = new ObjectStoreHandler(conf).getStorageHandler();
-    userArgs = new UserArgs(null, OzoneUtils.getRequestID(),
-        null, null, null, null);
-    omMetrics = cluster.getOzoneManager().getMetrics();
-  }
-
-  /**
-   * Shutdown MiniDFSCluster.
-   */
-  @After
-  public void shutdown() {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-  // Create a volume and test its attribute after creating them
-  @Test
-  public void testCreateVolume() throws IOException, OzoneException {
-    long volumeCreateFailCount = omMetrics.getNumVolumeCreateFails();
-    String userName = "user" + RandomStringUtils.randomNumeric(5);
-    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-
-    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
-    createVolumeArgs.setUserName(userName);
-    createVolumeArgs.setAdminName(adminName);
-    storageHandler.createVolume(createVolumeArgs);
-
-    VolumeArgs getVolumeArgs = new VolumeArgs(volumeName, userArgs);
-    VolumeInfo retVolumeinfo = storageHandler.getVolumeInfo(getVolumeArgs);
-    Assert.assertTrue(retVolumeinfo.getVolumeName().equals(volumeName));
-    Assert.assertTrue(retVolumeinfo.getOwner().getName().equals(userName));
-    Assert.assertEquals(volumeCreateFailCount,
-        omMetrics.getNumVolumeCreateFails());
-  }
-
-  // Create a volume and modify the volume owner and then test its attributes
-  @Test
-  public void testChangeVolumeOwner() throws IOException, OzoneException {
-    long volumeCreateFailCount = omMetrics.getNumVolumeCreateFails();
-    long volumeInfoFailCount = omMetrics.getNumVolumeInfoFails();
-    String userName = "user" + RandomStringUtils.randomNumeric(5);
-    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-
-    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
-    createVolumeArgs.setUserName(userName);
-    createVolumeArgs.setAdminName(adminName);
-    storageHandler.createVolume(createVolumeArgs);
-
-    String newUserName = "user" + RandomStringUtils.randomNumeric(5);
-    createVolumeArgs.setUserName(newUserName);
-    storageHandler.setVolumeOwner(createVolumeArgs);
-
-    VolumeArgs getVolumeArgs = new VolumeArgs(volumeName, userArgs);
-    VolumeInfo retVolumeInfo = storageHandler.getVolumeInfo(getVolumeArgs);
-
-    Assert.assertTrue(retVolumeInfo.getVolumeName().equals(volumeName));
-    Assert.assertFalse(retVolumeInfo.getOwner().getName().equals(userName));
-    Assert.assertTrue(retVolumeInfo.getOwner().getName().equals(newUserName));
-    Assert.assertEquals(volumeCreateFailCount,
-        omMetrics.getNumVolumeCreateFails());
-    Assert.assertEquals(volumeInfoFailCount,
-        omMetrics.getNumVolumeInfoFails());
-  }
-
-  // Create a volume and modify the volume owner and then test its attributes
-  @Test
-  public void testChangeVolumeQuota() throws IOException, OzoneException {
-    long numVolumeCreateFail = omMetrics.getNumVolumeCreateFails();
-    long numVolumeInfoFail = omMetrics.getNumVolumeInfoFails();
-    String userName = "user" + RandomStringUtils.randomNumeric(5);
-    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    Random rand = new Random();
-
-    // Create a new volume with a quota
-    OzoneQuota createQuota =
-        new OzoneQuota(rand.nextInt(100), OzoneQuota.Units.GB);
-    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
-    createVolumeArgs.setUserName(userName);
-    createVolumeArgs.setAdminName(adminName);
-    createVolumeArgs.setQuota(createQuota);
-    storageHandler.createVolume(createVolumeArgs);
-
-    VolumeArgs getVolumeArgs = new VolumeArgs(volumeName, userArgs);
-    VolumeInfo retVolumeInfo = storageHandler.getVolumeInfo(getVolumeArgs);
-    Assert.assertEquals(createQuota.sizeInBytes(),
-        retVolumeInfo.getQuota().sizeInBytes());
-
-    // Set a new quota and test it
-    OzoneQuota setQuota =
-        new OzoneQuota(rand.nextInt(100), OzoneQuota.Units.GB);
-    createVolumeArgs.setQuota(setQuota);
-    storageHandler.setVolumeQuota(createVolumeArgs, false);
-    getVolumeArgs = new VolumeArgs(volumeName, userArgs);
-    retVolumeInfo = storageHandler.getVolumeInfo(getVolumeArgs);
-    Assert.assertEquals(setQuota.sizeInBytes(),
-        retVolumeInfo.getQuota().sizeInBytes());
-
-    // Remove the quota and test it again
-    storageHandler.setVolumeQuota(createVolumeArgs, true);
-    getVolumeArgs = new VolumeArgs(volumeName, userArgs);
-    retVolumeInfo = storageHandler.getVolumeInfo(getVolumeArgs);
-    Assert.assertEquals(OzoneConsts.MAX_QUOTA_IN_BYTES,
-        retVolumeInfo.getQuota().sizeInBytes());
-    Assert.assertEquals(numVolumeCreateFail,
-        omMetrics.getNumVolumeCreateFails());
-    Assert.assertEquals(numVolumeInfoFail,
-        omMetrics.getNumVolumeInfoFails());
-  }
-
-  // Create a volume and then delete it and then check for deletion
-  @Test
-  public void testDeleteVolume() throws IOException, OzoneException {
-    long volumeCreateFailCount = omMetrics.getNumVolumeCreateFails();
-    String userName = "user" + RandomStringUtils.randomNumeric(5);
-    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    String volumeName1 = volumeName + "_A";
-    String volumeName2 = volumeName + "_AA";
-    VolumeArgs volumeArgs = null;
-    VolumeInfo volumeInfo = null;
-
-    // Create 2 empty volumes with same prefix.
-    volumeArgs = new VolumeArgs(volumeName1, userArgs);
-    volumeArgs.setUserName(userName);
-    volumeArgs.setAdminName(adminName);
-    storageHandler.createVolume(volumeArgs);
-
-    volumeArgs = new VolumeArgs(volumeName2, userArgs);
-    volumeArgs.setUserName(userName);
-    volumeArgs.setAdminName(adminName);
-    storageHandler.createVolume(volumeArgs);
-
-    volumeArgs  = new VolumeArgs(volumeName1, userArgs);
-    volumeInfo = storageHandler.getVolumeInfo(volumeArgs);
-    Assert.assertTrue(volumeInfo.getVolumeName().equals(volumeName1));
-    Assert.assertTrue(volumeInfo.getOwner().getName().equals(userName));
-    Assert.assertEquals(volumeCreateFailCount,
-        omMetrics.getNumVolumeCreateFails());
-
-    // Volume with _A should be able to delete as it is empty.
-    storageHandler.deleteVolume(volumeArgs);
-
-    // Make sure volume with _AA suffix still exists.
-    volumeArgs = new VolumeArgs(volumeName2, userArgs);
-    volumeInfo = storageHandler.getVolumeInfo(volumeArgs);
-    Assert.assertTrue(volumeInfo.getVolumeName().equals(volumeName2));
-
-    // Make sure volume with _A suffix is successfully deleted.
-    try {
-      volumeArgs = new VolumeArgs(volumeName1, userArgs);
-      storageHandler.getVolumeInfo(volumeArgs);
-      Assert.fail("Volume is not deleted");
-    } catch (OMException ex) {
-      Assert.assertEquals(ResultCodes.VOLUME_NOT_FOUND, ex.getResult());
-    }
-    //delete the _AA volume, too
-    storageHandler.deleteVolume(new VolumeArgs(volumeName2, userArgs));
-
-    //Make sure there is no volume information for the specific user
-    OMMetadataManager metadataManager =
-        cluster.getOzoneManager().getMetadataManager();
-
-    String userKey = metadataManager.getUserKey(userName);
-    VolumeList volumes = metadataManager.getUserTable().get(userKey);
-
-    //that was the last volume of the user, shouldn't be any record here
-    Assert.assertNull(volumes);
-
-
-  }
-
-  // Create a volume and a bucket inside the volume,
-  // then delete it and then check for deletion failure
-  @Test
-  public void testFailedDeleteVolume() throws IOException, OzoneException {
-    long numVolumeCreateFails = omMetrics.getNumVolumeCreateFails();
-    String userName = "user" + RandomStringUtils.randomNumeric(5);
-    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
-
-    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
-    createVolumeArgs.setUserName(userName);
-    createVolumeArgs.setAdminName(adminName);
-    storageHandler.createVolume(createVolumeArgs);
-
-    VolumeArgs getVolumeArgs = new VolumeArgs(volumeName, userArgs);
-    VolumeInfo retVolumeInfo = storageHandler.getVolumeInfo(getVolumeArgs);
-    Assert.assertTrue(retVolumeInfo.getVolumeName().equals(volumeName));
-    Assert.assertTrue(retVolumeInfo.getOwner().getName().equals(userName));
-    Assert.assertEquals(numVolumeCreateFails,
-        omMetrics.getNumVolumeCreateFails());
-
-    BucketArgs bucketArgs = new BucketArgs(volumeName, bucketName, userArgs);
-    storageHandler.createBucket(bucketArgs);
-
-    try {
-      storageHandler.deleteVolume(createVolumeArgs);
-      Assert.fail("Expecting deletion should fail "
-          + "because volume is not empty");
-    } catch (OMException ex) {
-      Assert.assertEquals(ResultCodes.VOLUME_NOT_EMPTY, ex.getResult());
-    }
-    retVolumeInfo = storageHandler.getVolumeInfo(getVolumeArgs);
-    Assert.assertTrue(retVolumeInfo.getVolumeName().equals(volumeName));
-    Assert.assertTrue(retVolumeInfo.getOwner().getName().equals(userName));
-  }
-
-  // Create a volume and test Volume access for a different user
-  @Test
-  public void testAccessVolume() throws IOException, OzoneException {
-    String userName = UserGroupInformation.getCurrentUser().getUserName();
-    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    String[] groupName =
-        {"group" + RandomStringUtils.randomNumeric(5)};
-
-    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
-    createVolumeArgs.setUserName(userName);
-    createVolumeArgs.setAdminName(adminName);
-    createVolumeArgs.setGroups(groupName);
-    storageHandler.createVolume(createVolumeArgs);
-
-    OzoneAcl userAcl = new OzoneAcl(ACLIdentityType.USER, userName,
-        ACLType.READ, ACCESS);
-    Assert.assertTrue(storageHandler.checkVolumeAccess(volumeName, userAcl));
-    OzoneAcl group = new OzoneAcl(ACLIdentityType.GROUP, groupName[0],
-        ACLType.READ, ACCESS);
-    Assert.assertTrue(storageHandler.checkVolumeAccess(volumeName, group));
-
-    // Create a different user and access should fail
-    String falseUserName = "user" + RandomStringUtils.randomNumeric(5);
-    OzoneAcl falseUserAcl =
-        new OzoneAcl(ACLIdentityType.USER, falseUserName,
-            ACLType.ALL, ACCESS);
-    Assert.assertFalse(storageHandler
-        .checkVolumeAccess(volumeName, falseUserAcl));
-    // Checking access with user name and Group Type should fail
-    OzoneAcl falseGroupAcl = new OzoneAcl(ACLIdentityType.GROUP, userName,
-        ACLType.ALL, ACCESS);
-    Assert.assertFalse(storageHandler
-        .checkVolumeAccess(volumeName, falseGroupAcl));
-
-    // Access for acl type world should also fail
-    OzoneAcl worldAcl =
-        new OzoneAcl(ACLIdentityType.WORLD, "", ACLType.READ, ACCESS);
-    Assert.assertFalse(storageHandler.checkVolumeAccess(volumeName, worldAcl));
-
-    Assert.assertEquals(0, omMetrics.getNumVolumeCheckAccessFails());
-    Assert.assertEquals(0, omMetrics.getNumVolumeCreateFails());
-  }
-
-  @Test
-  public void testCreateBucket() throws IOException, OzoneException {
-    long numVolumeCreateFail = omMetrics.getNumVolumeCreateFails();
-    long numBucketCreateFail = omMetrics.getNumBucketCreateFails();
-    long numBucketInfoFail = omMetrics.getNumBucketInfoFails();
-    String userName = "user" + RandomStringUtils.randomNumeric(5);
-    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
-
-    VolumeArgs volumeArgs = new VolumeArgs(volumeName, userArgs);
-    volumeArgs.setUserName(userName);
-    volumeArgs.setAdminName(adminName);
-    storageHandler.createVolume(volumeArgs);
-
-    BucketArgs bucketArgs = new BucketArgs(volumeName, bucketName, userArgs);
-    storageHandler.createBucket(bucketArgs);
-
-    BucketArgs getBucketArgs = new BucketArgs(volumeName, bucketName,
-        userArgs);
-    BucketInfo bucketInfo = storageHandler.getBucketInfo(getBucketArgs);
-    Assert.assertTrue(bucketInfo.getVolumeName().equals(volumeName));
-    Assert.assertTrue(bucketInfo.getBucketName().equals(bucketName));
-    Assert.assertEquals(numVolumeCreateFail,
-        omMetrics.getNumVolumeCreateFails());
-    Assert.assertEquals(numBucketCreateFail,
-        omMetrics.getNumBucketCreateFails());
-    Assert.assertEquals(numBucketInfoFail,
-        omMetrics.getNumBucketInfoFails());
-  }
-
-  @Test
-  public void testDeleteBucket() throws Exception {
-    String userName = "user" + RandomStringUtils.randomNumeric(5);
-    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
-    VolumeArgs volumeArgs = new VolumeArgs(volumeName, userArgs);
-    volumeArgs.setUserName(userName);
-    volumeArgs.setAdminName(adminName);
-    storageHandler.createVolume(volumeArgs);
-    BucketArgs bucketArgs = new BucketArgs(volumeName, bucketName, userArgs);
-    storageHandler.createBucket(bucketArgs);
-    BucketArgs getBucketArgs = new BucketArgs(volumeName, bucketName,
-        userArgs);
-    BucketInfo bucketInfo = storageHandler.getBucketInfo(getBucketArgs);
-    Assert.assertTrue(bucketInfo.getVolumeName().equals(volumeName));
-    Assert.assertTrue(bucketInfo.getBucketName().equals(bucketName));
-    storageHandler.deleteBucket(bucketArgs);
-
-    OzoneTestUtils.expectOmException(ResultCodes.BUCKET_NOT_FOUND,
-        () -> storageHandler.getBucketInfo(getBucketArgs));
-  }
-
-  @Test
-  public void testDeleteNonExistingBucket() throws Exception {
-    String userName = "user" + RandomStringUtils.randomNumeric(5);
-    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
-    VolumeArgs volumeArgs = new VolumeArgs(volumeName, userArgs);
-    volumeArgs.setUserName(userName);
-    volumeArgs.setAdminName(adminName);
-    storageHandler.createVolume(volumeArgs);
-    BucketArgs bucketArgs = new BucketArgs(volumeName, bucketName, userArgs);
-    storageHandler.createBucket(bucketArgs);
-    BucketArgs getBucketArgs = new BucketArgs(volumeName, bucketName,
-        userArgs);
-    BucketInfo bucketInfo = storageHandler.getBucketInfo(getBucketArgs);
-    Assert.assertTrue(bucketInfo.getVolumeName().equals(volumeName));
-    Assert.assertTrue(bucketInfo.getBucketName().equals(bucketName));
-    BucketArgs newBucketArgs = new BucketArgs(
-        volumeName, bucketName + "_invalid", userArgs);
-    OzoneTestUtils.expectOmException(ResultCodes.BUCKET_NOT_FOUND,
-        () -> storageHandler.deleteBucket(newBucketArgs));
-  }
-
-
-  @Test
-  public void testDeleteNonEmptyBucket() throws Exception {
-    String userName = "user" + RandomStringUtils.randomNumeric(5);
-    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
-    String keyName = "key" + RandomStringUtils.randomNumeric(5);
-    VolumeArgs volumeArgs = new VolumeArgs(volumeName, userArgs);
-    volumeArgs.setUserName(userName);
-    volumeArgs.setAdminName(adminName);
-    storageHandler.createVolume(volumeArgs);
-    BucketArgs bucketArgs = new BucketArgs(volumeName, bucketName, userArgs);
-    storageHandler.createBucket(bucketArgs);
-    BucketArgs getBucketArgs = new BucketArgs(volumeName, bucketName,
-        userArgs);
-    BucketInfo bucketInfo = storageHandler.getBucketInfo(getBucketArgs);
-    Assert.assertTrue(bucketInfo.getVolumeName().equals(volumeName));
-    Assert.assertTrue(bucketInfo.getBucketName().equals(bucketName));
-    String dataString = RandomStringUtils.randomAscii(100);
-    KeyArgs keyArgs = new KeyArgs(volumeName, bucketName, keyName, userArgs);
-    keyArgs.setSize(100);
-    keyArgs.setUserName(userName);
-    try (OutputStream stream = storageHandler.newKeyWriter(keyArgs)) {
-      stream.write(dataString.getBytes());
-    }
-    OzoneTestUtils.expectOmException(ResultCodes.BUCKET_NOT_EMPTY,
-        () -> storageHandler.deleteBucket(bucketArgs));
-
-  }
-
-  /**
-   * Basic test of both putKey and getKey from OM, as one can not be tested
-   * without the other.
-   *
-   * @throws IOException
-   * @throws OzoneException
-   */
-  @Test
-  public void testGetKeyWriterReader() throws IOException, OzoneException {
-    String userName = "user" + RandomStringUtils.randomNumeric(5);
-    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
-    String keyName = "key" + RandomStringUtils.randomNumeric(5);
-    long numKeyAllocates = omMetrics.getNumKeyAllocates();
-    long numKeyLookups = omMetrics.getNumKeyLookups();
-
-    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
-    createVolumeArgs.setUserName(userName);
-    createVolumeArgs.setAdminName(adminName);
-    storageHandler.createVolume(createVolumeArgs);
-
-    BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs);
-    bucketArgs.setStorageType(StorageType.DISK);
-    storageHandler.createBucket(bucketArgs);
-
-    String dataString = RandomStringUtils.randomAscii(100);
-    KeyArgs keyArgs = new KeyArgs(volumeName, bucketName, keyName, userArgs);
-    keyArgs.setSize(100);
-    keyArgs.setUserName(userName);
-    try (OutputStream stream = storageHandler.newKeyWriter(keyArgs)) {
-      stream.write(dataString.getBytes());
-    }
-    Assert.assertEquals(1 + numKeyAllocates, omMetrics.getNumKeyAllocates());
-
-    byte[] data = new byte[dataString.length()];
-    try (InputStream in = storageHandler.newKeyReader(keyArgs)) {
-      in.read(data);
-    }
-    Assert.assertEquals(dataString, DFSUtil.bytes2String(data));
-    Assert.assertEquals(1 + numKeyLookups, omMetrics.getNumKeyLookups());
-  }
-
-  /**
-   * Test write the same key twice, the second write should fail, as currently
-   * key overwrite is not supported.
-   *
-   * @throws IOException
-   * @throws OzoneException
-   */
-  @Test
-  public void testKeyOverwrite() throws IOException, OzoneException {
-    String userName = "user" + RandomStringUtils.randomNumeric(5);
-    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
-    String keyName = "key" + RandomStringUtils.randomNumeric(5);
-    long numKeyAllocateFails = omMetrics.getNumKeyAllocateFails();
-
-    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
-    createVolumeArgs.setUserName(userName);
-    createVolumeArgs.setAdminName(adminName);
-    storageHandler.createVolume(createVolumeArgs);
-
-    BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs);
-    bucketArgs.setStorageType(StorageType.DISK);
-    storageHandler.createBucket(bucketArgs);
-
-    KeyArgs keyArgs = new KeyArgs(volumeName, bucketName, keyName, userArgs);
-    keyArgs.setSize(100);
-    keyArgs.setUserName(userName);
-    String dataString = RandomStringUtils.randomAscii(100);
-    try (OutputStream stream = storageHandler.newKeyWriter(keyArgs)) {
-      stream.write(dataString.getBytes());
-    }
-
-    // We allow the key overwrite to be successful. Please note : Till
-    // HDFS-11922 is fixed this causes a data block leak on the data node side.
-    // That is this overwrite only overwrites the keys on OM. We need to
-    // garbage collect those blocks from datanode.
-    KeyArgs keyArgs2 = new KeyArgs(volumeName, bucketName, keyName, userArgs);
-    keyArgs2.setUserName(userName);
-    storageHandler.newKeyWriter(keyArgs2);
-    Assert
-        .assertEquals(numKeyAllocateFails, omMetrics.getNumKeyAllocateFails());
-  }
-
-  /**
-   * Test get a non-exiting key.
-   *
-   * @throws IOException
-   * @throws OzoneException
-   */
-  @Test
-  public void testGetNonExistKey() throws Exception {
-    String userName = "user" + RandomStringUtils.randomNumeric(5);
-    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
-    String keyName = "key" + RandomStringUtils.randomNumeric(5);
-    long numKeyLookupFails = omMetrics.getNumKeyLookupFails();
-
-    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
-    createVolumeArgs.setUserName(userName);
-    createVolumeArgs.setAdminName(adminName);
-    storageHandler.createVolume(createVolumeArgs);
-
-    BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs);
-    bucketArgs.setStorageType(StorageType.DISK);
-    storageHandler.createBucket(bucketArgs);
-
-    KeyArgs keyArgs = new KeyArgs(volumeName, bucketName, keyName, userArgs);
-    // try to get the key, should fail as it hasn't been created
-    OzoneTestUtils.expectOmException(KEY_NOT_FOUND,
-        () -> storageHandler.newKeyReader(keyArgs));
-    Assert.assertEquals(1 + numKeyLookupFails,
-        omMetrics.getNumKeyLookupFails());
-  }
-
-  /**
-   * Test delete keys for om.
-   *
-   * @throws IOException
-   * @throws OzoneException
-   */
-  @Test
-  public void testDeleteKey() throws Exception {
-    String userName = "user" + RandomStringUtils.randomNumeric(5);
-    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
-    String keyName = "key" + RandomStringUtils.randomNumeric(5);
-    long numKeyDeletes = omMetrics.getNumKeyDeletes();
-    long numKeyDeleteFails = omMetrics.getNumKeyDeletesFails();
-
-    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
-    createVolumeArgs.setUserName(userName);
-    createVolumeArgs.setAdminName(adminName);
-    storageHandler.createVolume(createVolumeArgs);
-
-    BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs);
-    storageHandler.createBucket(bucketArgs);
-
-    KeyArgs keyArgs = new KeyArgs(keyName, bucketArgs);
-    keyArgs.setSize(100);
-    String dataString = RandomStringUtils.randomAscii(100);
-    try (OutputStream stream = storageHandler.newKeyWriter(keyArgs)) {
-      stream.write(dataString.getBytes());
-    }
-
-    storageHandler.deleteKey(keyArgs);
-    Assert.assertEquals(1 + numKeyDeletes, omMetrics.getNumKeyDeletes());
-
-    // Make sure the deleted key has been moved to the deleted table.
-    OMMetadataManager manager = cluster.getOzoneManager().
-        getMetadataManager();
-    try (TableIterator<String, ? extends KeyValue<String, OmKeyInfo>>  iter =
-            manager.getDeletedTable().iterator()) {
-      iter.seekToFirst();
-      Table.KeyValue kv = iter.next();
-      Assert.assertNotNull(kv);
-    }
-
-    // Delete the key again to test deleting non-existing key.
-    OzoneTestUtils.expectOmException(KEY_NOT_FOUND,
-        () -> storageHandler.deleteKey(keyArgs));
-    Assert.assertEquals(1 + numKeyDeleteFails,
-        omMetrics.getNumKeyDeletesFails());
-  }
-
-  /**
-   * Test rename key for om.
-   *
-   * @throws IOException
-   * @throws OzoneException
-   */
-  @Test
-  public void testRenameKey() throws IOException, OzoneException {
-    String userName = "user" + RandomStringUtils.randomNumeric(5);
-    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
-    String keyName = "key" + RandomStringUtils.randomNumeric(5);
-    long numKeyRenames = omMetrics.getNumKeyRenames();
-    long numKeyRenameFails = omMetrics.getNumKeyRenameFails();
-    int testRenameFails = 0;
-    int testRenames = 0;
-    OMException omException = null;
-
-    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
-    createVolumeArgs.setUserName(userName);
-    createVolumeArgs.setAdminName(adminName);
-    storageHandler.createVolume(createVolumeArgs);
-
-    BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs);
-    storageHandler.createBucket(bucketArgs);
-
-    KeyArgs keyArgs = new KeyArgs(keyName, bucketArgs);
-    keyArgs.setSize(100);
-    String toKeyName = "key" + RandomStringUtils.randomNumeric(5);
-
-    // Rename from non-existent key should fail
-    try {
-      testRenames++;
-      storageHandler.renameKey(keyArgs, toKeyName);
-    } catch (OMException e) {
-      testRenameFails++;
-      omException = e;
-    }
-    Assert.assertEquals(KEY_NOT_FOUND, omException.getResult());
-
-    // Write the contents of the key to be renamed
-    String dataString = RandomStringUtils.randomAscii(100);
-    try (OutputStream stream = storageHandler.newKeyWriter(keyArgs)) {
-      stream.write(dataString.getBytes());
-    }
-
-    // Rename the key
-    toKeyName = "key" + RandomStringUtils.randomNumeric(5);
-    testRenames++;
-    storageHandler.renameKey(keyArgs, toKeyName);
-    Assert.assertEquals(numKeyRenames + testRenames,
-        omMetrics.getNumKeyRenames());
-    Assert.assertEquals(numKeyRenameFails + testRenameFails,
-        omMetrics.getNumKeyRenameFails());
-
-    // Try to get the key, should fail as it has been renamed
-    try {
-      storageHandler.newKeyReader(keyArgs);
-    } catch (OMException e) {
-      omException = e;
-    }
-    Assert.assertEquals(KEY_NOT_FOUND, omException.getResult());
-
-    // Verify the contents of the renamed key
-    keyArgs = new KeyArgs(toKeyName, bucketArgs);
-    InputStream in = storageHandler.newKeyReader(keyArgs);
-    byte[] b = new byte[dataString.getBytes().length];
-    in.read(b);
-    Assert.assertEquals(new String(b), dataString);
-
-    // Rewrite the renamed key. Rename to key which already exists should fail.
-    keyArgs = new KeyArgs(keyName, bucketArgs);
-    keyArgs.setSize(100);
-    dataString = RandomStringUtils.randomAscii(100);
-    try (OutputStream stream = storageHandler.newKeyWriter(keyArgs)) {
-      stream.write(dataString.getBytes());
-      stream.close();
-      testRenames++;
-      storageHandler.renameKey(keyArgs, toKeyName);
-    } catch (OMException e) {
-      testRenameFails++;
-      omException = e;
-    }
-    Assert.assertEquals(ResultCodes.KEY_ALREADY_EXISTS,
-        omException.getResult());
-
-    // Rename to empty string should fail
-    toKeyName = "";
-    try {
-      testRenames++;
-      storageHandler.renameKey(keyArgs, toKeyName);
-    } catch (OMException e) {
-      testRenameFails++;
-      omException = e;
-    }
-    Assert.assertEquals(ResultCodes.INVALID_KEY_NAME, omException.getResult());
-
-    // Rename from empty string should fail
-    keyArgs = new KeyArgs("", bucketArgs);
-    toKeyName = "key" + RandomStringUtils.randomNumeric(5);
-    try {
-      testRenames++;
-      storageHandler.renameKey(keyArgs, toKeyName);
-    } catch (OMException e) {
-      testRenameFails++;
-      omException = e;
-    }
-    Assert.assertEquals(ResultCodes.INVALID_KEY_NAME, omException.getResult());
-
-    Assert.assertEquals(numKeyRenames + testRenames,
-        omMetrics.getNumKeyRenames());
-    Assert.assertEquals(numKeyRenameFails + testRenameFails,
-        omMetrics.getNumKeyRenameFails());
-  }
-
-  @Test
-  public void testListBuckets() throws IOException, OzoneException {
-    ListBuckets result = null;
-    ListArgs listBucketArgs = null;
-
-    // Create volume - volA.
-    final String volAname = "volA";
-    VolumeArgs volAArgs = new VolumeArgs(volAname, userArgs);
-    volAArgs.setUserName("userA");
-    volAArgs.setAdminName("adminA");
-    storageHandler.createVolume(volAArgs);
-
-    // Create 20 buckets in volA for tests.
-    for (int i=0; i<10; i++) {
-      // Create "/volA/aBucket_0" to "/volA/aBucket_9" buckets in volA volume.
-      BucketArgs aBuckets = new BucketArgs(volAname,
-          "aBucket_" + i, userArgs);
-      if(i % 3 == 0) {
-        aBuckets.setStorageType(StorageType.ARCHIVE);
-      } else {
-        aBuckets.setStorageType(StorageType.DISK);
-      }
-      storageHandler.createBucket(aBuckets);
-
-      // Create "/volA/bBucket_0" to "/volA/bBucket_9" buckets in volA volume.
-      BucketArgs bBuckets = new BucketArgs(volAname,
-          "bBucket_" + i, userArgs);
-      if(i % 3 == 0) {
-        bBuckets.setStorageType(StorageType.RAM_DISK);
-      } else {
-        bBuckets.setStorageType(StorageType.SSD);
-      }
-      storageHandler.createBucket(bBuckets);
-    }
-
-    VolumeArgs volArgs = new VolumeArgs(volAname, userArgs);
-
-    // List all buckets in volA.
-    listBucketArgs = new ListArgs(volArgs, null, 100, null);
-    result = storageHandler.listBuckets(listBucketArgs);
-    Assert.assertEquals(20, result.getBuckets().size());
-    List<BucketInfo> archiveBuckets = result.getBuckets().stream()
-        .filter(item -> item.getStorageType() == StorageType.ARCHIVE)
-        .collect(Collectors.toList());
-    Assert.assertEquals(4, archiveBuckets.size());
-
-    // List buckets with prefix "aBucket".
-    listBucketArgs = new ListArgs(volArgs, "aBucket", 100, null);
-    result = storageHandler.listBuckets(listBucketArgs);
-    Assert.assertEquals(10, result.getBuckets().size());
-    Assert.assertTrue(result.getBuckets().stream()
-        .allMatch(entry -> entry.getBucketName().startsWith("aBucket")));
-
-    // List a certain number of buckets.
-    listBucketArgs = new ListArgs(volArgs, null, 3, null);
-    result = storageHandler.listBuckets(listBucketArgs);
-    Assert.assertEquals(3, result.getBuckets().size());
-    Assert.assertEquals("aBucket_0",
-        result.getBuckets().get(0).getBucketName());
-    Assert.assertEquals("aBucket_1",
-        result.getBuckets().get(1).getBucketName());
-    Assert.assertEquals("aBucket_2",
-        result.getBuckets().get(2).getBucketName());
-
-    // List a certain number of buckets from the startKey.
-    listBucketArgs = new ListArgs(volArgs, null, 2, "bBucket_3");
-    result = storageHandler.listBuckets(listBucketArgs);
-    Assert.assertEquals(2, result.getBuckets().size());
-    Assert.assertEquals("bBucket_4",
-        result.getBuckets().get(0).getBucketName());
-    Assert.assertEquals("bBucket_5",
-        result.getBuckets().get(1).getBucketName());
-
-    // Provide an invalid bucket name as start key.
-    listBucketArgs = new ListArgs(volArgs, null, 100, "unknown_bucket_name");
-    ListBuckets buckets = storageHandler.listBuckets(listBucketArgs);
-    Assert.assertEquals(buckets.getBuckets().size(), 0);
-
-    // Use all arguments.
-    listBucketArgs = new ListArgs(volArgs, "b", 5, "bBucket_7");
-    result = storageHandler.listBuckets(listBucketArgs);
-    Assert.assertEquals(2, result.getBuckets().size());
-    Assert.assertEquals("bBucket_8",
-        result.getBuckets().get(0).getBucketName());
-    Assert.assertEquals("bBucket_9",
-        result.getBuckets().get(1).getBucketName());
-
-    // Provide an invalid maxKeys argument.
-    try {
-      listBucketArgs = new ListArgs(volArgs, null, -1, null);
-      storageHandler.listBuckets(listBucketArgs);
-      Assert.fail("Expecting an error when the given"
-          + " maxKeys argument is invalid.");
-    } catch (Exception e) {
-      Assert.assertTrue(e.getMessage()
-          .contains(String.format("the value must be in range (0, %d]",
-              OzoneConsts.MAX_LISTBUCKETS_SIZE)));
-    }
-
-    // Provide an invalid volume name.
-    VolumeArgs invalidVolArgs = new VolumeArgs("invalid_name", userArgs);
-    try {
-      listBucketArgs = new ListArgs(invalidVolArgs, null, 100, null);
-      storageHandler.listBuckets(listBucketArgs);
-      Assert.fail("Expecting an error when the given volume name is invalid.");
-    } catch (OMException e) {
-      Assert.assertEquals(VOLUME_NOT_FOUND, e.getResult());
-    }
-  }
-
-  /**
-   * Test list keys.
-   * @throws IOException
-   * @throws OzoneException
-   */
-  @Test
-  public void testListKeys() throws Exception {
-    ListKeys result = null;
-    ListArgs listKeyArgs = null;
-
-    String userName = "user" + RandomStringUtils.randomNumeric(5);
-    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
-
-    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
-    createVolumeArgs.setUserName(userName);
-    createVolumeArgs.setAdminName(adminName);
-    storageHandler.createVolume(createVolumeArgs);
-
-    BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs);
-    bucketArgs.setStorageType(StorageType.DISK);
-    storageHandler.createBucket(bucketArgs);
-
-    // Write 20 keys in bucket.
-    int numKeys = 20;
-    String keyName = "Key";
-    KeyArgs keyArgs = null;
-    for (int i = 0; i < numKeys; i++) {
-      if (i % 2 == 0) {
-        // Create /volume/bucket/aKey[0,2,4,...,18] in bucket.
-        keyArgs = new KeyArgs("a" + keyName + i, bucketArgs);
-      } else {
-        // Create /volume/bucket/bKey[1,3,5,...,19] in bucket.
-        keyArgs = new KeyArgs("b" + keyName + i, bucketArgs);
-      }
-      keyArgs.setSize(4096);
-
-      // Just for testing list keys call, so no need to write real data.
-      OutputStream stream = storageHandler.newKeyWriter(keyArgs);
-      stream.close();
-    }
-
-    // List all keys in bucket.
-    bucketArgs = new BucketArgs(volumeName, bucketName, userArgs);
-    listKeyArgs = new ListArgs(bucketArgs, null, 100, null);
-    result = storageHandler.listKeys(listKeyArgs);
-    Assert.assertEquals(numKeys, result.getKeyList().size());
-
-    // List keys with prefix "aKey".
-    listKeyArgs = new ListArgs(bucketArgs, "aKey", 100, null);
-    result = storageHandler.listKeys(listKeyArgs);
-    Assert.assertEquals(numKeys / 2, result.getKeyList().size());
-    Assert.assertTrue(result.getKeyList().stream()
-        .allMatch(entry -> entry.getKeyName().startsWith("aKey")));
-
-    // List a certain number of keys.
-    listKeyArgs = new ListArgs(bucketArgs, null, 3, null);
-    result = storageHandler.listKeys(listKeyArgs);
-    Assert.assertEquals(3, result.getKeyList().size());
-    Assert.assertEquals("aKey0",
-        result.getKeyList().get(0).getKeyName());
-    Assert.assertEquals("aKey10",
-        result.getKeyList().get(1).getKeyName());
-    Assert.assertEquals("aKey12",
-        result.getKeyList().get(2).getKeyName());
-
-    // List a certain number of keys from the startKey.
-    listKeyArgs = new ListArgs(bucketArgs, null, 2, "bKey1");
-    result = storageHandler.listKeys(listKeyArgs);
-    Assert.assertEquals(2, result.getKeyList().size());
-    Assert.assertEquals("bKey11",
-        result.getKeyList().get(0).getKeyName());
-    Assert.assertEquals("bKey13",
-        result.getKeyList().get(1).getKeyName());
-
-    // Provide an invalid key name as start key.
-    listKeyArgs = new ListArgs(bucketArgs, null, 100, "invalid_start_key");
-    ListKeys keys = storageHandler.listKeys(listKeyArgs);
-    Assert.assertEquals(keys.getKeyList().size(), 0);
-
-    // Provide an invalid maxKeys argument.
-    try {
-      listKeyArgs = new ListArgs(bucketArgs, null, -1, null);
-      storageHandler.listBuckets(listKeyArgs);
-      Assert.fail("Expecting an error when the given"
-          + " maxKeys argument is invalid.");
-    } catch (Exception e) {
-      GenericTestUtils.assertExceptionContains(
-          String.format("the value must be in range (0, %d]",
-              OzoneConsts.MAX_LISTKEYS_SIZE), e);
-    }
-
-    OzoneTestUtils.expectOmException(ResultCodes.BUCKET_NOT_FOUND, () -> {
-      // Provide an invalid bucket name.
-      BucketArgs bucket = new BucketArgs("invalid_bucket", createVolumeArgs);
-      ListArgs ks = new ListArgs(bucket, null, numKeys, null);
-      storageHandler.listKeys(ks);
-    });
-  }
-
-  @Test
-  public void testListVolumes() throws IOException, OzoneException {
-    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
-    String user0 = ugi.getUserName();
-    String user1 = "testListVolumes-user-1";
-    String adminUser = "testListVolumes-admin";
-    ListArgs listVolumeArgs;
-    ListVolumes volumes;
-
-    // Create 10 volumes by user0 and user1
-    String[] user0vols = new String[10];
-    String[] user1vols = new String[10];
-    for (int i =0; i<10; i++) {
-      VolumeArgs createVolumeArgs;
-      String user0VolName = "Vol-" + user0 + "-" + i;
-      user0vols[i] = user0VolName;
-      createVolumeArgs = new VolumeArgs(user0VolName, userArgs);
-      createVolumeArgs.setUserName(user0);
-      createVolumeArgs.setAdminName(adminUser);
-      createVolumeArgs.setQuota(new OzoneQuota(i, OzoneQuota.Units.GB));
-      storageHandler.createVolume(createVolumeArgs);
-
-      String user1VolName = "Vol-" + user1 + "-" + i;
-      user1vols[i] = user1VolName;
-      createVolumeArgs = new VolumeArgs(user1VolName, userArgs);
-      createVolumeArgs.setUserName(user1);
-      createVolumeArgs.setAdminName(adminUser);
-      createVolumeArgs.setQuota(new OzoneQuota(i, OzoneQuota.Units.GB));
-      storageHandler.createVolume(createVolumeArgs);
-    }
-
-    // Test list all volumes - Removed Support for this operation for time
-    // being. TODO: we will need to bring this back if needed.
-    UserArgs userArgs0 = new UserArgs(user0, OzoneUtils.getRequestID(),
-        null, null, null, null);
-    //listVolumeArgs = new ListArgs(userArgs0,"Vol-testListVolumes", 100, null);
-    // listVolumeArgs.setRootScan(true);
-    // volumes = storageHandler.listVolumes(listVolumeArgs);
-    // Assert.assertEquals(20, volumes.getVolumes().size());
-
-    // Test list all volumes belongs to an user
-    listVolumeArgs = new ListArgs(userArgs0, null, 100, null);
-    listVolumeArgs.setRootScan(false);
-    volumes = storageHandler.listVolumes(listVolumeArgs);
-    Assert.assertEquals(10, volumes.getVolumes().size());
-
-    // Test prefix
-    listVolumeArgs = new ListArgs(userArgs0,
-        "Vol-" + user0 + "-3", 100, null);
-    volumes = storageHandler.listVolumes(listVolumeArgs);
-    Assert.assertEquals(1, volumes.getVolumes().size());
-    Assert.assertEquals(user0vols[3],
-        volumes.getVolumes().get(0).getVolumeName());
-    Assert.assertEquals(user0,
-        volumes.getVolumes().get(0).getOwner().getName());
-
-    // Test list volumes by user
-    UserArgs userArgs1 = new UserArgs(user1, OzoneUtils.getRequestID(),
-        null, null, null, null);
-    listVolumeArgs = new ListArgs(userArgs1, null, 100, null);
-    listVolumeArgs.setRootScan(false);
-    volumes = storageHandler.listVolumes(listVolumeArgs);
-    Assert.assertEquals(0, volumes.getVolumes().size());
-
-    // Make sure all available fields are returned
-    final String user0vol4 = "Vol-" + user0 + "-4";
-    final String user0vol5 = "Vol-" + user0 + "-5";
-    listVolumeArgs = new ListArgs(userArgs0, null, 1, user0vol4);
-    listVolumeArgs.setRootScan(false);
-    volumes = storageHandler.listVolumes(listVolumeArgs);
-    Assert.assertEquals(1, volumes.getVolumes().size());
-    Assert.assertEquals(user0,
-        volumes.getVolumes().get(0).getOwner().getName());
-    Assert.assertEquals(user0vol5,
-        volumes.getVolumes().get(0).getVolumeName());
-    Assert.assertEquals(5,
-        volumes.getVolumes().get(0).getQuota().getSize());
-    Assert.assertEquals(OzoneQuota.Units.GB,
-        volumes.getVolumes().get(0).getQuota().getUnit());
-
-    // User doesn't have volumes
-    UserArgs userArgsX = new UserArgs("unknwonUser", OzoneUtils.getRequestID(),
-        null, null, null, null);
-    listVolumeArgs = new ListArgs(userArgsX, null, 100, null);
-    listVolumeArgs.setRootScan(false);
-    volumes = storageHandler.listVolumes(listVolumeArgs);
-    Assert.assertEquals(0, volumes.getVolumes().size());
-  }
-
-  /**
-   * Test get key information.
-   *
-   * @throws IOException
-   * @throws OzoneException
-   */
-  @Test
-  public void testGetKeyInfo() throws IOException,
-      OzoneException, ParseException {
-    String userName = "user" + RandomStringUtils.randomNumeric(5);
-    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
-    long currentTime = Time.now();
-
-    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
-    createVolumeArgs.setUserName(userName);
-    createVolumeArgs.setAdminName(adminName);
-    storageHandler.createVolume(createVolumeArgs);
-
-    BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs);
-    bucketArgs.setStorageType(StorageType.DISK);
-    storageHandler.createBucket(bucketArgs);
-
-    String keyName = "testKey";
-    KeyArgs keyArgs = new KeyArgs(keyName, bucketArgs);
-    keyArgs.setSize(4096);
-
-
-    OutputStream stream = storageHandler.newKeyWriter(keyArgs);
-    stream.close();
-
-    KeyInfo keyInfo = storageHandler.getKeyInfo(keyArgs);
-    // Compare the time in second unit since the date string reparsed to
-    // millisecond will lose precision.
-    Assert.assertTrue(
-        (HddsClientUtils.formatDateTime(keyInfo.getCreatedOn()) / 1000) >= (
-            currentTime / 1000));
-    Assert.assertTrue(
-        (HddsClientUtils.formatDateTime(keyInfo.getModifiedOn()) / 1000) >= (
-            currentTime / 1000));
-    Assert.assertEquals(keyName, keyInfo.getKeyName());
-    // with out data written, the size would be 0
-    Assert.assertEquals(0, keyInfo.getSize());
-  }
-
-  /**
-   * Test that the write can proceed without having to set the right size.
-   *
-   * @throws IOException
-   */
-  @Test
-  public void testWriteSize() throws IOException, OzoneException {
-    String userName = "user" + RandomStringUtils.randomNumeric(5);
-    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
-
-    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
-    createVolumeArgs.setUserName(userName);
-    createVolumeArgs.setAdminName(adminName);
-    storageHandler.createVolume(createVolumeArgs);
-
-    BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs);
-    bucketArgs.setStorageType(StorageType.DISK);
-    storageHandler.createBucket(bucketArgs);
-
-    String dataString = RandomStringUtils.randomAscii(100);
-    // write a key without specifying size at all
-    String keyName = "testKey";
-    KeyArgs keyArgs = new KeyArgs(keyName, bucketArgs);
-    try (OutputStream stream = storageHandler.newKeyWriter(keyArgs)) {
-      stream.write(dataString.getBytes());
-    }
-    byte[] data = new byte[dataString.length()];
-    try (InputStream in = storageHandler.newKeyReader(keyArgs)) {
-      in.read(data);
-    }
-    Assert.assertEquals(dataString, DFSUtil.bytes2String(data));
-
-    // write a key with a size, but write above it.
-    String keyName1 = "testKey1";
-    KeyArgs keyArgs1 = new KeyArgs(keyName1, bucketArgs);
-    keyArgs1.setSize(30);
-    try (OutputStream stream = storageHandler.newKeyWriter(keyArgs1)) {
-      stream.write(dataString.getBytes());
-    }
-    byte[] data1 = new byte[dataString.length()];
-    try (InputStream in = storageHandler.newKeyReader(keyArgs1)) {
-      in.read(data1);
-    }
-    Assert.assertEquals(dataString, DFSUtil.bytes2String(data1));
-  }
-
-  /**
-   * Tests the RPC call for getting scmId and clusterId from SCM.
-   * @throws IOException
-   */
-  @Test
-  public void testGetScmInfo() throws IOException {
-    ScmInfo info = cluster.getOzoneManager().getScmInfo();
-    Assert.assertEquals(clusterId, info.getClusterId());
-    Assert.assertEquals(scmId, info.getScmId());
-  }
-
-
-  //Disabling this test
-  @Ignore("Disabling this test until Open Key is fixed.")
-  public void testExpiredOpenKey() throws Exception {
-//    BackgroundService openKeyCleanUpService = ((BlockManagerImpl)cluster
-//        .getOzoneManager().getBlockManager()).getOpenKeyCleanupService();
-
-    String userName = "user" + RandomStringUtils.randomNumeric(5);
-    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
-
-    VolumeArgs createVolumeArgs = new VolumeArgs(volumeName, userArgs);
-    createVolumeArgs.setUserName(userName);
-    createVolumeArgs.setAdminName(adminName);
-    storageHandler.createVolume(createVolumeArgs);
-
-    BucketArgs bucketArgs = new BucketArgs(bucketName, createVolumeArgs);
-    bucketArgs.setStorageType(StorageType.DISK);
-    storageHandler.createBucket(bucketArgs);
-
-    // open some keys.
-
-    KeyArgs keyArgs1 = new KeyArgs("testKey1", bucketArgs);
-    KeyArgs keyArgs2 = new KeyArgs("testKey2", bucketArgs);
-    KeyArgs keyArgs3 = new KeyArgs("testKey3", bucketArgs);
-    KeyArgs keyArgs4 = new KeyArgs("testKey4", bucketArgs);
-    List<BlockGroup> openKeys;
-    storageHandler.newKeyWriter(keyArgs1);
-    storageHandler.newKeyWriter(keyArgs2);
-    storageHandler.newKeyWriter(keyArgs3);
-    storageHandler.newKeyWriter(keyArgs4);
-
-    Set<String> expected = Stream.of(
-        "testKey1", "testKey2", "testKey3", "testKey4")
-        .collect(Collectors.toSet());
-
-    // Now all k1-k4 should be in open state, so ExpiredOpenKeys should not
-    // contain these values.
-    openKeys = cluster.getOzoneManager()
-        .getMetadataManager().getExpiredOpenKeys();
-
-    for (BlockGroup bg : openKeys) {
-      String[] subs = bg.getGroupID().split("/");
-      String keyName = subs[subs.length - 1];
-      Assert.assertFalse(expected.contains(keyName));
-    }
-
-    Thread.sleep(2000);
-    // Now all k1-k4 should be in ExpiredOpenKeys
-    openKeys = cluster.getOzoneManager()
-        .getMetadataManager().getExpiredOpenKeys();
-    for (BlockGroup bg : openKeys) {
-      String[] subs = bg.getGroupID().split("/");
-      String keyName = subs[subs.length - 1];
-      if (expected.contains(keyName)) {
-        expected.remove(keyName);
-      }
-    }
-    Assert.assertEquals(0, expected.size());
-
-    KeyArgs keyArgs5 = new KeyArgs("testKey5", bucketArgs);
-    storageHandler.newKeyWriter(keyArgs5);
-
-    //openKeyCleanUpService.triggerBackgroundTaskForTesting();
-    Thread.sleep(2000);
-    // now all k1-k4 should have been removed by the clean-up task, only k5
-    // should be present in ExpiredOpenKeys.
-    openKeys =
-        cluster.getOzoneManager().getMetadataManager().getExpiredOpenKeys();
-    System.out.println(openKeys);
-    boolean key5found = false;
-    Set<String> removed = Stream.of(
-        "testKey1", "testKey2", "testKey3", "testKey4")
-        .collect(Collectors.toSet());
-    for (BlockGroup bg : openKeys) {
-      String[] subs = bg.getGroupID().split("/");
-      String keyName = subs[subs.length - 1];
-      Assert.assertFalse(removed.contains(keyName));
-      if (keyName.equals("testKey5")) {
-        key5found = true;
-      }
-    }
-    Assert.assertTrue(key5found);
-  }
-
-  /**
-   * Tests the OM Initialization.
-   * @throws IOException
-   */
-  @Test
-  public void testOmInitialization() throws IOException {
-    // Read the version file info from OM version file
-    OMStorage omStorage = cluster.getOzoneManager().getOmStorage();
-    SCMStorageConfig scmStorageConfig = new SCMStorageConfig(conf);
-    // asserts whether cluster Id and SCM ID are properly set in SCM Version
-    // file.
-    Assert.assertEquals(clusterId, scmStorageConfig.getClusterID());
-    Assert.assertEquals(scmId, scmStorageConfig.getScmId());
-    // asserts whether OM Id is properly set in OM Version file.
-    Assert.assertEquals(omId, omStorage.getOmId());
-    // asserts whether the SCM info is correct in OM Version file.
-    Assert.assertEquals(clusterId, omStorage.getClusterID());
-    Assert.assertEquals(scmId, omStorage.getScmId());
-  }
-
-  /**
-   * Tests the OM Initialization Failure.
-   * @throws IOException
-   */
-  @Test
-  public void testOmInitializationFailure() throws Exception {
-    OzoneConfiguration config = new OzoneConfiguration();
-    final String path =
-        GenericTestUtils.getTempPath(UUID.randomUUID().toString());
-    Path metaDirPath = Paths.get(path, "om-meta");
-    config.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDirPath.toString());
-    config.setBoolean(OzoneConfigKeys.OZONE_ENABLED, true);
-    config.set(OZONE_OM_ADDRESS_KEY, "127.0.0.1:0");
-    config.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "127.0.0.1:0");
-    config.set(ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY,
-        conf.get(ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY));
-
-    OzoneTestUtils.expectOmException(ResultCodes.OM_NOT_INITIALIZED, () -> {
-      OzoneManager.createOm(config);
-    });
-
-    OzoneTestUtils
-        .expectOmException(ResultCodes.SCM_VERSION_MISMATCH_ERROR, () -> {
-          OMStorage omStore = new OMStorage(config);
-          omStore.setClusterId("testClusterId");
-          omStore.setScmId("testScmId");
-          // writes the version file properties
-          omStore.initialize();
-          OzoneManager.createOm(config);
-        });
-  }
-
-  @Test
-  public void testGetServiceList() throws IOException {
-    long numGetServiceListCalls = omMetrics.getNumGetServiceLists();
-    List<ServiceInfo> services = cluster.getOzoneManager().getServiceList();
-
-    Assert.assertEquals(numGetServiceListCalls + 1,
-        omMetrics.getNumGetServiceLists());
-
-    ServiceInfo omInfo = services.stream().filter(
-        a -> a.getNodeType().equals(HddsProtos.NodeType.OM))
-        .collect(Collectors.toList()).get(0);
-    InetSocketAddress omAddress = new InetSocketAddress(omInfo.getHostname(),
-        omInfo.getPort(ServicePort.Type.RPC));
-    Assert.assertEquals(NetUtils.createSocketAddr(
-        conf.get(OZONE_OM_ADDRESS_KEY)), omAddress);
-
-    ServiceInfo scmInfo = services.stream().filter(
-        a -> a.getNodeType().equals(HddsProtos.NodeType.SCM))
-        .collect(Collectors.toList()).get(0);
-    InetSocketAddress scmAddress = new InetSocketAddress(scmInfo.getHostname(),
-        scmInfo.getPort(ServicePort.Type.RPC));
-    Assert.assertEquals(NetUtils.createSocketAddr(
-        conf.get(OZONE_SCM_CLIENT_ADDRESS_KEY)), scmAddress);
-  }
-
-  @Test
-  public void testVersion() {
-    String expectedVersion = OzoneVersionInfo.OZONE_VERSION_INFO.getVersion();
-    String actualVersion = cluster.getOzoneManager().getSoftwareVersion();
-    Assert.assertEquals(expectedVersion, actualVersion);
-  }
-
-  /**
-   * Test if OM RocksDB keyMayExist API works for keys that are present.
-   * Test added in this module since we need access to custom codec dependent
-   * objects like OMKeyInfo.
-   * @throws Exception if OM or RocksDB operations fail.
-   */
-  @Test
-  public void testDBKeyMayExist() throws Exception {
-    RDBStore rdbStore = (RDBStore) cluster.getOzoneManager()
-        .getMetadataManager().getStore();
-    RocksDB db = rdbStore.getDb();
-
-    OmKeyInfo keyInfo = getNewOmKeyInfo();
-    OmKeyInfoCodec omKeyInfoCodec = new OmKeyInfoCodec();
-
-    db.put(StringUtils.getBytesUtf16("OMKey1"),
-        omKeyInfoCodec.toPersistedFormat(keyInfo));
-
-    StringBuilder sb = new StringBuilder();
-    Assert.assertTrue(db.keyMayExist(StringUtils.getBytesUtf16("OMKey1"),
-        sb));
-    Assert.assertTrue(sb.length() > 0);
-  }
-
-
-  @Test
-  public void testGetOMDBUpdates() throws IOException {
-
-    DBUpdatesRequest dbUpdatesRequest =
-        DBUpdatesRequest.newBuilder().setSequenceNumber(0).build();
-
-    DBUpdatesWrapper dbUpdates =
-        cluster.getOzoneManager().getDBUpdates(dbUpdatesRequest);
-    Assert.assertTrue(dbUpdates.getData().isEmpty());
-
-    //Write data to OM.
-    OmKeyInfo keyInfo = getNewOmKeyInfo();
-    Assert.assertNotNull(keyInfo);
-    dbUpdates =
-        cluster.getOzoneManager().getDBUpdates(dbUpdatesRequest);
-    Assert.assertFalse(dbUpdates.getData().isEmpty());
-
-  }
-
-  private OmKeyInfo getNewOmKeyInfo() throws IOException {
-    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
-    OmVolumeArgs volumeArgs = OmVolumeArgs.newBuilder()
-        .setVolume("vol1")
-        .setAdminName("bilbo")
-        .setOwnerName("bilbo")
-        .build();
-    cluster.getOzoneManager().createVolume(volumeArgs);
-
-    OmBucketInfo bucketInfo = OmBucketInfo.newBuilder()
-        .setVolumeName("vol1")
-        .setBucketName("bucket1")
-        .build();
-    cluster.getOzoneManager().createBucket(bucketInfo);
-
-    OmKeyArgs keyArgs = new OmKeyArgs.Builder()
-        .setBucketName("bucket1")
-        .setFactor(HddsProtos.ReplicationFactor.ONE)
-        .setDataSize(0)
-        .setType(HddsProtos.ReplicationType.STAND_ALONE)
-        .setAcls(OzoneAclUtil.getAclList(ugi.getUserName(), ugi.getGroups(),
-            ALL, ALL))
-        .setVolumeName("vol1")
-        .setKeyName(UUID.randomUUID().toString())
-        .setDataSize(16 * 1024 * 1024 * 10)
-        .build();
-    OpenKeySession keySession = cluster.getOzoneManager().getKeyManager()
-        .openKey(keyArgs);
-    return  keySession.getKeyInfo();
-  }
-}

+ 12 - 16
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestart.java

@@ -18,7 +18,10 @@
 
 package org.apache.hadoop.ozone.om;
 
-import org.apache.commons.lang3.RandomStringUtils;
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.UUID;
+
 import org.apache.hadoop.hdds.client.ReplicationFactor;
 import org.apache.hadoop.hdds.client.ReplicationType;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -29,25 +32,20 @@ import org.apache.hadoop.ozone.client.OzoneClient;
 import org.apache.hadoop.ozone.client.OzoneKey;
 import org.apache.hadoop.ozone.client.OzoneVolume;
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-import org.apache.hadoop.ozone.web.handlers.UserArgs;
-import org.apache.hadoop.ozone.web.utils.OzoneUtils;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.Timeout;
-
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.UUID;
 
+import org.apache.commons.lang3.RandomStringUtils;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS_WILDCARD;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS;
+import org.junit.After;
+import org.junit.Assert;
 import static org.junit.Assert.fail;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.Timeout;
 
 /**
  * Test some client operations after cluster starts. And perform restart and
@@ -55,7 +53,6 @@ import static org.junit.Assert.fail;
  */
 public class TestOzoneManagerRestart {
   private MiniOzoneCluster cluster = null;
-  private UserArgs userArgs;
   private OzoneConfiguration conf;
   private String clusterId;
   private String scmId;
@@ -86,8 +83,7 @@ public class TestOzoneManagerRestart {
         .setOmId(omId)
         .build();
     cluster.waitForClusterToBeReady();
-    userArgs = new UserArgs(null, OzoneUtils.getRequestID(),
-        null, null, null, null);
+
   }
 
   /**

+ 5 - 23
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneDatanodeShell.java

@@ -17,13 +17,10 @@
  */
 package org.apache.hadoop.ozone.ozShell;
 
-import com.google.common.base.Strings;
-
 import java.io.ByteArrayOutputStream;
 import java.io.File;
 import java.io.PrintStream;
 import java.util.Arrays;
-import java.util.Collection;
 import java.util.List;
 
 import org.apache.hadoop.fs.FileUtil;
@@ -31,19 +28,19 @@ import org.apache.hadoop.hdds.client.ReplicationFactor;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.HddsDatanodeService;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.client.rest.RestClient;
-import org.apache.hadoop.ozone.client.rpc.RpcClient;
 import org.apache.hadoop.test.GenericTestUtils;
+
+import com.google.common.base.Strings;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION;
 import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.Assert;
+import static org.junit.Assert.fail;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.Timeout;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import picocli.CommandLine;
@@ -53,13 +50,9 @@ import picocli.CommandLine.ParameterException;
 import picocli.CommandLine.ParseResult;
 import picocli.CommandLine.RunLast;
 
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION;
-import static org.junit.Assert.fail;
-
 /**
  * This test class specified for testing Ozone datanode shell command.
  */
-@RunWith(value = Parameterized.class)
 public class TestOzoneDatanodeShell {
 
   private static final Logger LOG =
@@ -81,17 +74,6 @@ public class TestOzoneDatanodeShell {
   private static final PrintStream OLD_OUT = System.out;
   private static final PrintStream OLD_ERR = System.err;
 
-  @Parameterized.Parameters
-  public static Collection<Object[]> clientProtocol() {
-    Object[][] params = new Object[][]{
-        {RpcClient.class},
-        {RestClient.class}};
-    return Arrays.asList(params);
-  }
-
-  @Parameterized.Parameter
-  @SuppressWarnings("visibilitymodifier")
-  public Class clientProtocol;
   /**
    * Create a MiniDFSCluster for testing with using distributed Ozone
    * handler type.
@@ -103,7 +85,7 @@ public class TestOzoneDatanodeShell {
     conf = new OzoneConfiguration();
 
     String path = GenericTestUtils.getTempPath(
-        TestOzoneShell.class.getSimpleName());
+        TestOzoneDatanodeShell.class.getSimpleName());
     baseDir = new File(path);
     baseDir.mkdirs();
 

+ 0 - 1266
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java

@@ -1,1266 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.ozShell;
-
-import java.io.ByteArrayOutputStream;
-import java.io.EOFException;
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.io.PrintStream;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.List;
-import java.util.Objects;
-import java.util.Random;
-import java.util.UUID;
-
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.hdds.cli.MissingSubcommandException;
-import org.apache.hadoop.hdds.client.ReplicationFactor;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.tracing.StringCodec;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.client.OzoneBucket;
-import org.apache.hadoop.ozone.client.OzoneKey;
-import org.apache.hadoop.ozone.client.OzoneVolume;
-import org.apache.hadoop.ozone.client.VolumeArgs;
-import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
-import org.apache.hadoop.ozone.client.rest.OzoneException;
-import org.apache.hadoop.ozone.client.rpc.RpcClient;
-import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes;
-import org.apache.hadoop.ozone.om.helpers.ServiceInfo;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ServicePort;
-import org.apache.hadoop.ozone.web.ozShell.OzoneShell;
-import org.apache.hadoop.ozone.web.ozShell.Shell;
-import org.apache.hadoop.ozone.web.request.OzoneQuota;
-import org.apache.hadoop.ozone.web.response.BucketInfo;
-import org.apache.hadoop.ozone.web.response.KeyInfo;
-import org.apache.hadoop.ozone.web.response.VolumeInfo;
-import org.apache.hadoop.ozone.web.utils.JsonUtils;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.test.GenericTestUtils;
-
-import com.google.common.base.Strings;
-import org.apache.commons.lang3.RandomStringUtils;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_KEY;
-import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY;
-import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND;
-import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND;
-import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Assert;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-import static org.slf4j.event.Level.TRACE;
-
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.Timeout;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import picocli.CommandLine;
-import picocli.CommandLine.ExecutionException;
-import picocli.CommandLine.IExceptionHandler2;
-import picocli.CommandLine.ParameterException;
-import picocli.CommandLine.ParseResult;
-import picocli.CommandLine.RunLast;
-
-/**
- * This test class specified for testing Ozone shell command.
- */
-public class TestOzoneShell {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(TestOzoneShell.class);
-
-  /**
-   * Set the timeout for every test.
-   */
-  @Rule
-  public Timeout testTimeout = new Timeout(300000);
-
-  private static String url;
-  private static File baseDir;
-  private static OzoneConfiguration conf = null;
-  private static MiniOzoneCluster cluster = null;
-  private static ClientProtocol client = null;
-  private static Shell shell = null;
-
-  private final ByteArrayOutputStream out = new ByteArrayOutputStream();
-  private final ByteArrayOutputStream err = new ByteArrayOutputStream();
-  private static final PrintStream OLD_OUT = System.out;
-  private static final PrintStream OLD_ERR = System.err;
-
-  /**
-   * Create a MiniDFSCluster for testing with using distributed Ozone
-   * handler type.
-   *
-   * @throws Exception
-   */
-  @BeforeClass
-  public static void init() throws Exception {
-    conf = new OzoneConfiguration();
-
-    String path = GenericTestUtils.getTempPath(
-        TestOzoneShell.class.getSimpleName());
-    baseDir = new File(path);
-    baseDir.mkdirs();
-
-    shell = new OzoneShell();
-
-    cluster = MiniOzoneCluster.newBuilder(conf)
-        .setNumDatanodes(3)
-        .build();
-    conf.setInt(OZONE_REPLICATION, ReplicationFactor.THREE.getValue());
-    conf.setQuietMode(false);
-    client = new RpcClient(conf);
-    cluster.waitForClusterToBeReady();
-  }
-
-  /**
-   * shutdown MiniDFSCluster.
-   */
-  @AfterClass
-  public static void shutdown() {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-
-    if (baseDir != null) {
-      FileUtil.fullyDelete(baseDir, true);
-    }
-  }
-
-  @Before
-  public void setup() {
-    System.setOut(new PrintStream(out));
-    System.setErr(new PrintStream(err));
-    url = "o3://" + getOmAddress();
-  }
-
-  @After
-  public void reset() {
-    // reset stream after each unit test
-    out.reset();
-    err.reset();
-
-    // restore system streams
-    System.setOut(OLD_OUT);
-    System.setErr(OLD_ERR);
-  }
-
-  @Test
-  public void testCreateVolume() throws Exception {
-    LOG.info("Running testCreateVolume");
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    testCreateVolume(volumeName, "");
-    volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    testCreateVolume("/////" + volumeName, "");
-    testCreateVolume("/////", "Volume name is required");
-    testCreateVolume("/////vol/123",
-        "Invalid volume name. Delimiters (/) not allowed in volume name");
-  }
-
-  private void testCreateVolume(String volumeName, String errorMsg)
-      throws Exception {
-    err.reset();
-    String userName = "bilbo";
-    String[] args = new String[] {"volume", "create", url + "/" + volumeName,
-        "--user", userName, "--root"};
-
-    if (Strings.isNullOrEmpty(errorMsg)) {
-      execute(shell, args);
-
-    } else {
-      executeWithError(shell, args, errorMsg);
-      return;
-    }
-
-    String truncatedVolumeName =
-        volumeName.substring(volumeName.lastIndexOf('/') + 1);
-    OzoneVolume volumeInfo = client.getVolumeDetails(truncatedVolumeName);
-    assertEquals(truncatedVolumeName, volumeInfo.getName());
-    assertEquals(userName, volumeInfo.getOwner());
-  }
-
-  private void execute(Shell ozoneShell, String[] args) {
-    LOG.info("Executing shell command with args {}", Arrays.asList(args));
-    CommandLine cmd = ozoneShell.getCmd();
-
-    IExceptionHandler2<List<Object>> exceptionHandler =
-        new IExceptionHandler2<List<Object>>() {
-          @Override
-          public List<Object> handleParseException(ParameterException ex,
-              String[] args) {
-            throw ex;
-          }
-
-          @Override
-          public List<Object> handleExecutionException(ExecutionException ex,
-              ParseResult parseResult) {
-            throw ex;
-          }
-        };
-    cmd.parseWithHandlers(new RunLast(),
-        exceptionHandler, args);
-  }
-
-  /**
-   * Test to create volume without specifying --user or -u.
-   * @throws Exception
-   */
-  @Test
-  public void testCreateVolumeWithoutUser() throws Exception {
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    String[] args = new String[] {"volume", "create", url + "/" + volumeName,
-        "--root"};
-
-    execute(shell, args);
-
-    String truncatedVolumeName =
-        volumeName.substring(volumeName.lastIndexOf('/') + 1);
-    OzoneVolume volumeInfo = client.getVolumeDetails(truncatedVolumeName);
-    assertEquals(truncatedVolumeName, volumeInfo.getName());
-    assertEquals(UserGroupInformation.getCurrentUser().getUserName(),
-        volumeInfo.getOwner());
-  }
-
-  @Test
-  public void testDeleteVolume() throws Exception {
-    LOG.info("Running testDeleteVolume");
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    VolumeArgs volumeArgs = VolumeArgs.newBuilder()
-        .setOwner("bilbo")
-        .setQuota("100TB")
-        .build();
-    client.createVolume(volumeName, volumeArgs);
-    OzoneVolume volume = client.getVolumeDetails(volumeName);
-    assertNotNull(volume);
-
-    String[] args = new String[] {"volume", "delete", url + "/" + volumeName};
-    execute(shell, args);
-    String output = out.toString();
-    assertTrue(output.contains("Volume " + volumeName + " is deleted"));
-
-    // verify if volume has been deleted
-    try {
-      client.getVolumeDetails(volumeName);
-      fail("Get volume call should have thrown.");
-    } catch (OMException e) {
-      Assert.assertEquals(VOLUME_NOT_FOUND, e.getResult());
-    }
-
-
-    volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    volumeArgs = VolumeArgs.newBuilder()
-        .setOwner("bilbo")
-        .setQuota("100TB")
-        .build();
-    client.createVolume(volumeName, volumeArgs);
-    volume = client.getVolumeDetails(volumeName);
-    assertNotNull(volume);
-
-    //volumeName prefixed with /
-    String volumeNameWithSlashPrefix = "/" + volumeName;
-    args = new String[] {"volume", "delete",
-        url + "/" + volumeNameWithSlashPrefix};
-    execute(shell, args);
-    output = out.toString();
-    assertTrue(output.contains("Volume " + volumeName + " is deleted"));
-
-    // verify if volume has been deleted
-    try {
-      client.getVolumeDetails(volumeName);
-      fail("Get volume call should have thrown.");
-    } catch (OMException e) {
-      Assert.assertEquals(VOLUME_NOT_FOUND, e.getResult());
-    }
-  }
-
-  @Test
-  public void testInfoVolume() throws Exception {
-    LOG.info("Running testInfoVolume");
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    VolumeArgs volumeArgs = VolumeArgs.newBuilder()
-        .setOwner("bilbo")
-        .setQuota("100TB")
-        .build();
-    client.createVolume(volumeName, volumeArgs);
-
-    //volumeName supplied as-is
-    String[] args = new String[] {"volume", "info", url + "/" + volumeName};
-    execute(shell, args);
-
-    String output = out.toString();
-    assertTrue(output.contains(volumeName));
-    assertTrue(output.contains("createdOn")
-        && output.contains(OzoneConsts.OZONE_TIME_ZONE));
-
-    //volumeName prefixed with /
-    String volumeNameWithSlashPrefix = "/" + volumeName;
-    args = new String[] {"volume", "info",
-        url + "/" + volumeNameWithSlashPrefix};
-    execute(shell, args);
-
-    output = out.toString();
-    assertTrue(output.contains(volumeName));
-    assertTrue(output.contains("createdOn")
-        && output.contains(OzoneConsts.OZONE_TIME_ZONE));
-
-    // test infoVolume with invalid volume name
-    args = new String[] {"volume", "info",
-        url + "/" + volumeName + "/invalid-name"};
-    executeWithError(shell, args, "Invalid volume name. " +
-        "Delimiters (/) not allowed in volume name");
-
-    // get info for non-exist volume
-    args = new String[] {"volume", "info", url + "/invalid-volume"};
-    executeWithError(shell, args, VOLUME_NOT_FOUND);
-  }
-
-  @Test
-  public void testShellIncompleteCommand() throws Exception {
-    LOG.info("Running testShellIncompleteCommand");
-    String expectedError = "Incomplete command";
-    String[] args = new String[] {}; //executing 'ozone sh'
-
-    executeWithError(shell, args, expectedError);
-
-    args = new String[] {"volume"}; //executing 'ozone sh volume'
-    executeWithError(shell, args, MissingSubcommandException.class,
-        expectedError);
-
-    args = new String[] {"bucket"}; //executing 'ozone sh bucket'
-    executeWithError(shell, args, MissingSubcommandException.class,
-        expectedError);
-
-    args = new String[] {"key"}; //executing 'ozone sh key'
-    executeWithError(shell, args, MissingSubcommandException.class,
-        expectedError);
-  }
-
-  @Test
-  public void testUpdateVolume() throws Exception {
-    LOG.info("Running testUpdateVolume");
-    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
-    String userName = "bilbo";
-    VolumeArgs volumeArgs = VolumeArgs.newBuilder()
-        .setOwner("bilbo")
-        .setQuota("100TB")
-        .build();
-    client.createVolume(volumeName, volumeArgs);
-    OzoneVolume vol = client.getVolumeDetails(volumeName);
-    assertEquals(userName, vol.getOwner());
-    assertEquals(OzoneQuota.parseQuota("100TB").sizeInBytes(), vol.getQuota());
-
-    String[] args = new String[] {"volume", "update", url + "/" + volumeName,
-        "--quota", "500MB"};
-    execute(shell, args);
-    vol = client.getVolumeDetails(volumeName);
-    assertEquals(userName, vol.getOwner());
-    assertEquals(OzoneQuota.parseQuota("500MB").sizeInBytes(), vol.getQuota());
-
-    String newUser = "new-user";
-    args = new String[] {"volume", "update", url + "/" + volumeName,
-        "--user", newUser};
-    execute(shell, args);
-    vol = client.getVolumeDetails(volumeName);
-    assertEquals(newUser, vol.getOwner());
-
-    //volume with / prefix
-    String volumeWithPrefix = "/" + volumeName;
-    String newUser2 = "new-user2";
-    args = new String[] {"volume", "update", url + "/" + volumeWithPrefix,
-        "--user", newUser2};
-    execute(shell, args);
-    vol = client.getVolumeDetails(volumeName);
-    assertEquals(newUser2, vol.getOwner());
-
-    // test error conditions
-    args = new String[] {"volume", "update", url + "/invalid-volume",
-        "--user", newUser};
-    executeWithError(shell, args, ResultCodes.VOLUME_NOT_FOUND);
-
-    err.reset();
-    args = new String[] {"volume", "update", url + "/invalid-volume",
-        "--quota", "500MB"};
-    executeWithError(shell, args, ResultCodes.VOLUME_NOT_FOUND);
-  }
-
-  /**
-   * Execute command, assert exception message and returns true if error
-   * was thrown.
-   */
-  private void executeWithError(Shell ozoneShell, String[] args,
-      OMException.ResultCodes code) {
-
-    try {
-      execute(ozoneShell, args);
-      fail("Exception is expected from command execution " + Arrays
-          .asList(args));
-    } catch (Exception ex) {
-      Assert.assertEquals(OMException.class, ex.getCause().getClass());
-      Assert.assertEquals(code, ((OMException) ex.getCause()).getResult());
-    }
-
-  }
-
-  /**
-   * Execute command, assert exception message and returns true if error
-   * was thrown.
-   */
-  private void executeWithError(Shell ozoneShell, String[] args,
-      String expectedError) {
-    if (Strings.isNullOrEmpty(expectedError)) {
-      execute(ozoneShell, args);
-    } else {
-      try {
-        execute(ozoneShell, args);
-        fail("Exception is expected from command execution " + Arrays
-            .asList(args));
-      } catch (Exception ex) {
-        if (!Strings.isNullOrEmpty(expectedError)) {
-          Throwable exceptionToCheck = ex;
-          if (exceptionToCheck.getCause() != null) {
-            exceptionToCheck = exceptionToCheck.getCause();
-          }
-          Assert.assertTrue(
-              String.format(
-                  "Error of shell code doesn't contain the " +
-                      "exception [%s] in [%s]",
-                  expectedError, exceptionToCheck.getMessage()),
-              exceptionToCheck.getMessage().contains(expectedError));
-        }
-      }
-    }
-  }
-
-  /**
-   * Execute command, assert exception message and exception class
-   * and returns true if error was thrown.
-   */
-  private void executeWithError(Shell ozoneShell, String[] args,
-                                Class expectedException, String expectedError) {
-    if (Strings.isNullOrEmpty(expectedError)) {
-      execute(ozoneShell, args);
-    } else {
-      try {
-        execute(ozoneShell, args);
-        fail("Exception is expected from command execution " + Arrays
-            .asList(args));
-      } catch (Exception ex) {
-        if (!Strings.isNullOrEmpty(expectedError)) {
-          Throwable exceptionToCheck = ex;
-          if (exceptionToCheck.getCause() != null) {
-            exceptionToCheck = exceptionToCheck.getCause();
-          }
-          Assert.assertTrue(
-              String.format(
-                  "Error of shell code doesn't contain the " +
-                      "expectedException [%s] in [%s]",
-                  expectedError, exceptionToCheck.getMessage()),
-              exceptionToCheck.getMessage().contains(expectedError));
-          assertTrue(ex.getClass().getCanonicalName()
-              .equals(expectedException.getCanonicalName()));
-        }
-      }
-    }
-  }
-
-  /**
-   * Execute command, assert exception cause message and returns true if error
-   * was thrown.
-   */
-  private void executeWithError(Shell ozoneShell, String[] args,
-      Class expectedCause) {
-    if (Objects.isNull(expectedCause)) {
-      execute(ozoneShell, args);
-    } else {
-      try {
-        execute(ozoneShell, args);
-        fail("Exception is expected from command execution " + Arrays
-            .asList(args));
-      } catch (Exception ex) {
-        LOG.error("Exception: ", ex);
-        assertTrue(ex.getCause().getClass().getCanonicalName()
-            .equals(expectedCause.getCanonicalName()));
-      }
-    }
-  }
-
-  @Test
-  public void testListVolume() throws Exception {
-    LOG.info("Running testListVolume");
-    String protocol = "rpcclient";
-    String commandOutput, commandError;
-    List<VolumeInfo> volumes;
-    final int volCount = 20;
-    final String user1 = "test-user-a-" + protocol;
-    final String user2 = "test-user-b-" + protocol;
-
-    // Create 20 volumes, 10 for user1 and another 10 for user2.
-    for (int x = 0; x < volCount; x++) {
-      String volumeName;
-      String userName;
-
-      if (x % 2 == 0) {
-        // create volume [test-vol0, test-vol2, ..., test-vol18] for user1
-        userName = user1;
-        volumeName = "test-vol-" + protocol + x;
-      } else {
-        // create volume [test-vol1, test-vol3, ..., test-vol19] for user2
-        userName = user2;
-        volumeName = "test-vol-" + protocol + x;
-      }
-      VolumeArgs volumeArgs = VolumeArgs.newBuilder()
-          .setOwner(userName)
-          .setQuota("100TB")
-          .build();
-      client.createVolume(volumeName, volumeArgs);
-      OzoneVolume vol = client.getVolumeDetails(volumeName);
-      assertNotNull(vol);
-    }
-
-    String[] args = new String[] {"volume", "list", url + "/abcde", "--user",
-        user1, "--length", "100"};
-    executeWithError(shell, args, "Invalid URI");
-
-    err.reset();
-    // test -length option
-    args = new String[] {"volume", "list", url + "/", "--user",
-        user1, "--length", "100"};
-    execute(shell, args);
-    commandOutput = out.toString();
-    volumes = (List<VolumeInfo>) JsonUtils
-        .toJsonList(commandOutput, VolumeInfo.class);
-
-    assertEquals(10, volumes.size());
-    for (VolumeInfo volume : volumes) {
-      assertEquals(volume.getOwner().getName(), user1);
-      assertTrue(volume.getCreatedOn().contains(OzoneConsts.OZONE_TIME_ZONE));
-    }
-
-    out.reset();
-    args = new String[] {"volume", "list", url + "/", "--user",
-        user1, "--length", "2"};
-    execute(shell, args);
-    commandOutput = out.toString();
-    volumes = (List<VolumeInfo>) JsonUtils
-        .toJsonList(commandOutput, VolumeInfo.class);
-
-    assertEquals(2, volumes.size());
-
-    // test --prefix option
-    out.reset();
-    args =
-        new String[] {"volume", "list", url + "/", "--user", user1, "--length",
-            "100", "--prefix", "test-vol-" + protocol + "1"};
-    execute(shell, args);
-    commandOutput = out.toString();
-    volumes = (List<VolumeInfo>) JsonUtils
-        .toJsonList(commandOutput, VolumeInfo.class);
-
-    assertEquals(5, volumes.size());
-    // return volume names should be [test-vol10, test-vol12, ..., test-vol18]
-    for (int i = 0; i < volumes.size(); i++) {
-      assertEquals(volumes.get(i).getVolumeName(),
-          "test-vol-" + protocol + ((i + 5) * 2));
-      assertEquals(volumes.get(i).getOwner().getName(), user1);
-    }
-
-    // test -start option
-    out.reset();
-    args =
-        new String[] {"volume", "list", url + "/", "--user", user2, "--length",
-            "100", "--start", "test-vol-" + protocol + "15"};
-    execute(shell, args);
-    commandOutput = out.toString();
-    volumes = (List<VolumeInfo>) JsonUtils
-        .toJsonList(commandOutput, VolumeInfo.class);
-
-    assertEquals(2, volumes.size());
-
-    assertEquals(volumes.get(0).getVolumeName(), "test-vol-" + protocol + "17");
-    assertEquals(volumes.get(1).getVolumeName(), "test-vol-" + protocol + "19");
-    assertEquals(volumes.get(0).getOwner().getName(), user2);
-    assertEquals(volumes.get(1).getOwner().getName(), user2);
-
-    // test error conditions
-    err.reset();
-    args = new String[] {"volume", "list", url + "/", "--user",
-        user2, "--length", "-1"};
-    executeWithError(shell, args, "the length should be a positive number");
-
-    err.reset();
-    args = new String[] {"volume", "list", url + "/", "--user",
-        user2, "--length", "invalid-length"};
-    executeWithError(shell, args, "Invalid value for option " +
-        "'--length': 'invalid-length' is not an int");
-  }
-
-  @Test
-  public void testCreateBucket() throws Exception {
-    LOG.info("Running testCreateBucket");
-    OzoneVolume vol = creatVolume();
-    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
-    String[] args = new String[] {"bucket", "create",
-        url + "/" + vol.getName() + "/" + bucketName};
-
-    execute(shell, args);
-    OzoneBucket bucketInfo = vol.getBucket(bucketName);
-    assertEquals(vol.getName(),
-        bucketInfo.getVolumeName());
-    assertEquals(bucketName, bucketInfo.getName());
-
-    // test create a bucket in a non-exist volume
-    args = new String[] {"bucket", "create",
-        url + "/invalid-volume/" + bucketName};
-    executeWithError(shell, args, VOLUME_NOT_FOUND);
-
-    // test createBucket with invalid bucket name
-    args = new String[] {"bucket", "create",
-        url + "/" + vol.getName() + "/" + bucketName + "/invalid-name"};
-    executeWithError(shell, args,
-        "Invalid bucket name. Delimiters (/) not allowed in bucket name");
-  }
-
-  @Test
-  public void testDeleteBucket() throws Exception {
-    LOG.info("Running testDeleteBucket");
-    OzoneVolume vol = creatVolume();
-    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
-    vol.createBucket(bucketName);
-    OzoneBucket bucketInfo = vol.getBucket(bucketName);
-    assertNotNull(bucketInfo);
-
-    String[] args = new String[] {"bucket", "delete",
-        url + "/" + vol.getName() + "/" + bucketName};
-    execute(shell, args);
-
-    // verify if bucket has been deleted in volume
-    try {
-      vol.getBucket(bucketName);
-      fail("Get bucket should have thrown.");
-    } catch (OMException e) {
-      Assert.assertEquals(BUCKET_NOT_FOUND, e.getResult());
-    }
-
-    // test delete bucket in a non-exist volume
-    args = new String[] {"bucket", "delete",
-        url + "/invalid-volume" + "/" + bucketName};
-    executeWithError(shell, args, VOLUME_NOT_FOUND);
-
-    err.reset();
-    // test delete non-exist bucket
-    args = new String[] {"bucket", "delete",
-        url + "/" + vol.getName() + "/invalid-bucket"};
-    executeWithError(shell, args, BUCKET_NOT_FOUND);
-  }
-
-  @Test
-  public void testInfoBucket() throws Exception {
-    LOG.info("Running testInfoBucket");
-    OzoneVolume vol = creatVolume();
-    String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
-    vol.createBucket(bucketName);
-
-    String[] args = new String[] {"bucket", "info",
-        url + "/" + vol.getName() + "/" + bucketName};
-    execute(shell, args);
-
-    String output = out.toString();
-    assertTrue(output.contains(bucketName));
-    assertTrue(output.contains("createdOn")
-        && output.contains(OzoneConsts.OZONE_TIME_ZONE));
-
-    // test infoBucket with invalid bucket name
-    args = new String[] {"bucket", "info",
-        url + "/" + vol.getName() + "/" + bucketName + "/invalid-name"};
-    executeWithError(shell, args,
-        "Invalid bucket name. Delimiters (/) not allowed in bucket name");
-
-    // test get info from a non-exist bucket
-    args = new String[] {"bucket", "info",
-        url + "/" + vol.getName() + "/invalid-bucket" + bucketName};
-    executeWithError(shell, args,
-        ResultCodes.BUCKET_NOT_FOUND);
-  }
-
-  @Test
-  public void testListBucket() throws Exception {
-    LOG.info("Running testListBucket");
-    List<BucketInfo> buckets;
-    String commandOutput;
-    int bucketCount = 11;
-    OzoneVolume vol = creatVolume();
-
-    List<String> bucketNames = new ArrayList<>();
-    // create bucket from test-bucket0 to test-bucket10
-    for (int i = 0; i < bucketCount; i++) {
-      String name = "test-bucket" + i;
-      bucketNames.add(name);
-      vol.createBucket(name);
-      OzoneBucket bucket = vol.getBucket(name);
-      assertNotNull(bucket);
-    }
-
-    // test listBucket with invalid volume name
-    String[] args = new String[] {"bucket", "list",
-        url + "/" + vol.getName() + "/invalid-name"};
-    executeWithError(shell, args, "Invalid volume name. " +
-        "Delimiters (/) not allowed in volume name");
-
-    // test -length option
-    args = new String[] {"bucket", "list",
-        url + "/" + vol.getName(), "--length", "100"};
-    execute(shell, args);
-    commandOutput = out.toString();
-    buckets = (List<BucketInfo>) JsonUtils.toJsonList(commandOutput,
-        BucketInfo.class);
-
-    assertEquals(11, buckets.size());
-    // sort bucket names since the return buckets isn't in created order
-    Collections.sort(bucketNames);
-    // return bucket names should be [test-bucket0, test-bucket1,
-    // test-bucket10, test-bucket2, ,..., test-bucket9]
-    for (int i = 0; i < buckets.size(); i++) {
-      assertEquals(buckets.get(i).getBucketName(), bucketNames.get(i));
-      assertEquals(buckets.get(i).getVolumeName(), vol.getName());
-      assertTrue(buckets.get(i).getCreatedOn()
-          .contains(OzoneConsts.OZONE_TIME_ZONE));
-    }
-
-    out.reset();
-    args = new String[] {"bucket", "list", url + "/" + vol.getName(),
-        "--length", "3"};
-    execute(shell, args);
-    commandOutput = out.toString();
-    buckets = (List<BucketInfo>) JsonUtils.toJsonList(commandOutput,
-        BucketInfo.class);
-
-    assertEquals(3, buckets.size());
-    // return bucket names should be [test-bucket0,
-    // test-bucket1, test-bucket10]
-    assertEquals(buckets.get(0).getBucketName(), "test-bucket0");
-    assertEquals(buckets.get(1).getBucketName(), "test-bucket1");
-    assertEquals(buckets.get(2).getBucketName(), "test-bucket10");
-
-    // test --prefix option
-    out.reset();
-    args = new String[] {"bucket", "list", url + "/" + vol.getName(),
-        "--length", "100", "--prefix", "test-bucket1"};
-    execute(shell, args);
-    commandOutput = out.toString();
-    buckets = (List<BucketInfo>) JsonUtils.toJsonList(commandOutput,
-        BucketInfo.class);
-
-    assertEquals(2, buckets.size());
-    // return bucket names should be [test-bucket1, test-bucket10]
-    assertEquals(buckets.get(0).getBucketName(), "test-bucket1");
-    assertEquals(buckets.get(1).getBucketName(), "test-bucket10");
-
-    // test -start option
-    out.reset();
-    args = new String[] {"bucket", "list", url + "/" + vol.getName(),
-        "--length", "100", "--start", "test-bucket7"};
-    execute(shell, args);
-    commandOutput = out.toString();
-    buckets = (List<BucketInfo>) JsonUtils.toJsonList(commandOutput,
-        BucketInfo.class);
-
-    assertEquals(2, buckets.size());
-    assertEquals(buckets.get(0).getBucketName(), "test-bucket8");
-    assertEquals(buckets.get(1).getBucketName(), "test-bucket9");
-
-    // test error conditions
-    err.reset();
-    args = new String[] {"bucket", "list", url + "/" + vol.getName(),
-        "--length", "-1"};
-    executeWithError(shell, args, "the length should be a positive number");
-  }
-
-  @Test
-  public void testPutKey() throws Exception {
-    LOG.info("Running testPutKey");
-    OzoneBucket bucket = creatBucket();
-    String volumeName = bucket.getVolumeName();
-    String bucketName = bucket.getName();
-    String keyName = "key" + RandomStringUtils.randomNumeric(5);
-
-    String[] args = new String[] {"key", "put",
-        url + "/" + volumeName + "/" + bucketName + "/" + keyName,
-        createTmpFile()};
-    execute(shell, args);
-
-    OzoneKey keyInfo = bucket.getKey(keyName);
-    assertEquals(keyName, keyInfo.getName());
-
-    // test put key in a non-exist bucket
-    args = new String[] {"key", "put",
-        url + "/" + volumeName + "/invalid-bucket/" + keyName,
-        createTmpFile()};
-    executeWithError(shell, args, BUCKET_NOT_FOUND);
-  }
-
-  @Test
-  public void testGetKey() throws Exception {
-    GenericTestUtils.LogCapturer logs = GenericTestUtils.LogCapturer
-        .captureLogs(StringCodec.LOG);
-    GenericTestUtils.setLogLevel(StringCodec.LOG, TRACE);
-    LOG.info("Running testGetKey");
-    String keyName = "key" + RandomStringUtils.randomNumeric(5);
-    OzoneBucket bucket = creatBucket();
-    String volumeName = bucket.getVolumeName();
-    String bucketName = bucket.getName();
-
-    String dataStr = "test-data";
-    OzoneOutputStream keyOutputStream =
-        bucket.createKey(keyName, dataStr.length());
-    keyOutputStream.write(dataStr.getBytes());
-    keyOutputStream.close();
-    assertFalse("put key without malformed tracing",
-        logs.getOutput().contains("MalformedTracerStateString"));
-    logs.clearOutput();
-
-    String tmpPath = baseDir.getAbsolutePath() + "/testfile-"
-        + UUID.randomUUID().toString();
-    String[] args = new String[] {"key", "get",
-        url + "/" + volumeName + "/" + bucketName + "/" + keyName,
-        tmpPath};
-    execute(shell, args);
-    assertFalse("get key without malformed tracing",
-        logs.getOutput().contains("MalformedTracerStateString"));
-    logs.clearOutput();
-
-    byte[] dataBytes = new byte[dataStr.length()];
-    try (FileInputStream randFile = new FileInputStream(new File(tmpPath))) {
-      randFile.read(dataBytes);
-    }
-    assertEquals(dataStr, DFSUtil.bytes2String(dataBytes));
-
-    tmpPath = baseDir.getAbsolutePath() + File.separatorChar + keyName;
-    args = new String[] {"key", "get",
-        url + "/" + volumeName + "/" + bucketName + "/" + keyName,
-        baseDir.getAbsolutePath()};
-    execute(shell, args);
-
-    dataBytes = new byte[dataStr.length()];
-    try (FileInputStream randFile = new FileInputStream(new File(tmpPath))) {
-      randFile.read(dataBytes);
-    }
-    assertEquals(dataStr, DFSUtil.bytes2String(dataBytes));
-  }
-
-  @Test
-  public void testDeleteKey() throws Exception {
-    LOG.info("Running testDeleteKey");
-    String keyName = "key" + RandomStringUtils.randomNumeric(5);
-    OzoneBucket bucket = creatBucket();
-    String volumeName = bucket.getVolumeName();
-    String bucketName = bucket.getName();
-    String dataStr = "test-data";
-    OzoneOutputStream keyOutputStream =
-        bucket.createKey(keyName, dataStr.length());
-    keyOutputStream.write(dataStr.getBytes());
-    keyOutputStream.close();
-
-    OzoneKey keyInfo = bucket.getKey(keyName);
-    assertEquals(keyName, keyInfo.getName());
-
-    String[] args = new String[] {"key", "delete",
-        url + "/" + volumeName + "/" + bucketName + "/" + keyName};
-    execute(shell, args);
-
-    // verify if key has been deleted in the bucket
-    assertKeyNotExists(bucket, keyName);
-
-    // test delete key in a non-exist bucket
-    args = new String[] {"key", "delete",
-        url + "/" + volumeName + "/invalid-bucket/" + keyName};
-    executeWithError(shell, args, BUCKET_NOT_FOUND);
-
-    err.reset();
-    // test delete a non-exist key in bucket
-    args = new String[] {"key", "delete",
-        url + "/" + volumeName + "/" + bucketName + "/invalid-key"};
-    executeWithError(shell, args, KEY_NOT_FOUND);
-  }
-
-  @Test
-  public void testRenameKey() throws Exception {
-    LOG.info("Running testRenameKey");
-    OzoneBucket bucket = creatBucket();
-    OzoneKey oldKey = createTestKey(bucket);
-
-    String oldName = oldKey.getName();
-    String newName = oldName + ".new";
-    String[] args = new String[]{
-        "key", "rename",
-        String.format("%s/%s/%s",
-            url, oldKey.getVolumeName(), oldKey.getBucketName()),
-        oldName,
-        newName
-    };
-    execute(shell, args);
-
-    OzoneKey newKey = bucket.getKey(newName);
-    assertEquals(oldKey.getCreationTime(), newKey.getCreationTime());
-    assertEquals(oldKey.getDataSize(), newKey.getDataSize());
-    assertKeyNotExists(bucket, oldName);
-  }
-
-  @Test
-  public void testInfoKeyDetails() throws Exception {
-    LOG.info("Running testInfoKey");
-    String keyName = "key" + RandomStringUtils.randomNumeric(5);
-    OzoneBucket bucket = creatBucket();
-    String volumeName = bucket.getVolumeName();
-    String bucketName = bucket.getName();
-    String dataStr = "test-data";
-    OzoneOutputStream keyOutputStream =
-        bucket.createKey(keyName, dataStr.length());
-    keyOutputStream.write(dataStr.getBytes());
-    keyOutputStream.close();
-
-    String[] args = new String[] {"key", "info",
-        url + "/" + volumeName + "/" + bucketName + "/" + keyName};
-
-    // verify the response output
-    execute(shell, args);
-    String output = out.toString();
-
-    assertTrue(output.contains(keyName));
-    assertTrue(
-        output.contains("createdOn") && output.contains("modifiedOn") && output
-            .contains(OzoneConsts.OZONE_TIME_ZONE));
-    assertTrue(
-        output.contains("containerID") && output.contains("localID") && output
-            .contains("length") && output.contains("offset"));
-    // reset stream
-    out.reset();
-    err.reset();
-
-    // get the info of a non-exist key
-    args = new String[] {"key", "info",
-        url + "/" + volumeName + "/" + bucketName + "/invalid-key"};
-
-    // verify the response output
-    // get the non-exist key info should be failed
-    executeWithError(shell, args, KEY_NOT_FOUND);
-  }
-
-  @Test
-  public void testInfoDirKey() throws Exception {
-    LOG.info("Running testInfoKey for Dir Key");
-    String dirKeyName = "test/";
-    String keyNameOnly = "test";
-    OzoneBucket bucket = creatBucket();
-    String volumeName = bucket.getVolumeName();
-    String bucketName = bucket.getName();
-    String dataStr = "test-data";
-    OzoneOutputStream keyOutputStream =
-        bucket.createKey(dirKeyName, dataStr.length());
-    keyOutputStream.write(dataStr.getBytes());
-    keyOutputStream.close();
-    String[] args = new String[] {"key", "info",
-        url + "/" + volumeName + "/" + bucketName + "/" + dirKeyName};
-    // verify the response output
-    execute(shell, args);
-    String output = out.toString();
-    assertTrue(output.contains(dirKeyName));
-    assertTrue(output.contains("createdOn") &&
-                output.contains("modifiedOn") &&
-                output.contains(OzoneConsts.OZONE_TIME_ZONE));
-    args = new String[] {"key", "info",
-        url + "/" + volumeName + "/" + bucketName + "/" + keyNameOnly};
-    executeWithError(shell, args, KEY_NOT_FOUND);
-    out.reset();
-    err.reset();
-  }
-
-  @Test
-  public void testListKey() throws Exception {
-    LOG.info("Running testListKey");
-    String commandOutput;
-    List<KeyInfo> keys;
-    int keyCount = 11;
-    OzoneBucket bucket = creatBucket();
-    String volumeName = bucket.getVolumeName();
-    String bucketName = bucket.getName();
-
-    String keyName;
-    List<String> keyNames = new ArrayList<>();
-    for (int i = 0; i < keyCount; i++) {
-      keyName = "test-key" + i;
-      keyNames.add(keyName);
-      String dataStr = "test-data";
-      OzoneOutputStream keyOutputStream =
-          bucket.createKey(keyName, dataStr.length());
-      keyOutputStream.write(dataStr.getBytes());
-      keyOutputStream.close();
-    }
-
-    // test listKey with invalid bucket name
-    String[] args = new String[] {"key", "list",
-        url + "/" + volumeName + "/" + bucketName + "/invalid-name"};
-    executeWithError(shell, args, "Invalid bucket name. " +
-        "Delimiters (/) not allowed in bucket name");
-
-    // test -length option
-    args = new String[] {"key", "list",
-        url + "/" + volumeName + "/" + bucketName, "--length", "100"};
-    execute(shell, args);
-    commandOutput = out.toString();
-    keys = (List<KeyInfo>) JsonUtils.toJsonList(commandOutput,
-        KeyInfo.class);
-
-    assertEquals(11, keys.size());
-    // sort key names since the return keys isn't in created order
-    Collections.sort(keyNames);
-    // return key names should be [test-key0, test-key1,
-    // test-key10, test-key2, ,..., test-key9]
-    for (int i = 0; i < keys.size(); i++) {
-      assertEquals(keys.get(i).getKeyName(), keyNames.get(i));
-      // verify the creation/modification time of key
-      assertTrue(keys.get(i).getCreatedOn()
-          .contains(OzoneConsts.OZONE_TIME_ZONE));
-      assertTrue(keys.get(i).getModifiedOn()
-          .contains(OzoneConsts.OZONE_TIME_ZONE));
-    }
-
-    out.reset();
-    String msgText = "Listing first 3 entries of the result. " +
-        "Use --length (-l) to override max returned keys.";
-    args =
-        new String[] {"key", "list", url + "/" + volumeName + "/" + bucketName,
-            "--length", "3"};
-    execute(shell, args);
-    commandOutput = out.toString();
-    assertTrue("Expecting output to start with " + msgText,
-        commandOutput.contains(msgText));
-    commandOutput = commandOutput.replace(msgText, "");
-    keys = (List<KeyInfo>) JsonUtils.toJsonList(commandOutput,
-        KeyInfo.class);
-
-    assertEquals(3, keys.size());
-    // return key names should be [test-key0, test-key1, test-key10]
-    assertEquals(keys.get(0).getKeyName(), "test-key0");
-    assertEquals(keys.get(1).getKeyName(), "test-key1");
-    assertEquals(keys.get(2).getKeyName(), "test-key10");
-
-    // test --prefix option
-    out.reset();
-    args =
-        new String[] {"key", "list", url + "/" + volumeName + "/" + bucketName,
-            "--length", "100", "--prefix", "test-key1"};
-    execute(shell, args);
-    commandOutput = out.toString();
-    keys = (List<KeyInfo>) JsonUtils.toJsonList(commandOutput,
-        KeyInfo.class);
-
-    assertEquals(2, keys.size());
-    // return key names should be [test-key1, test-key10]
-    assertEquals(keys.get(0).getKeyName(), "test-key1");
-    assertEquals(keys.get(1).getKeyName(), "test-key10");
-
-    // test -start option
-    out.reset();
-    args =
-        new String[] {"key", "list", url + "/" + volumeName + "/" + bucketName,
-            "--length", "100", "--start", "test-key7"};
-    execute(shell, args);
-    commandOutput = out.toString();
-    keys = (List<KeyInfo>) JsonUtils.toJsonList(commandOutput,
-        KeyInfo.class);
-
-    assertEquals(keys.get(0).getKeyName(), "test-key8");
-    assertEquals(keys.get(1).getKeyName(), "test-key9");
-
-    // test error conditions
-    err.reset();
-    args =
-        new String[] {"key", "list", url + "/" + volumeName + "/" + bucketName,
-            "--length", "-1"};
-    executeWithError(shell, args, "the length should be a positive number");
-  }
-
-  private OzoneVolume creatVolume() throws OzoneException, IOException {
-    String volumeName = RandomStringUtils.randomNumeric(5) + "volume";
-    VolumeArgs volumeArgs = VolumeArgs.newBuilder()
-        .setOwner("bilbo")
-        .setQuota("100TB")
-        .build();
-    try {
-      client.createVolume(volumeName, volumeArgs);
-    } catch (Exception ex) {
-      Assert.assertEquals("PartialGroupNameException",
-          ex.getCause().getClass().getSimpleName());
-    }
-    OzoneVolume volume = client.getVolumeDetails(volumeName);
-
-    return volume;
-  }
-
-  private OzoneBucket creatBucket() throws OzoneException, IOException {
-    OzoneVolume vol = creatVolume();
-    String bucketName = RandomStringUtils.randomNumeric(5) + "bucket";
-    vol.createBucket(bucketName);
-    OzoneBucket bucketInfo = vol.getBucket(bucketName);
-
-    return bucketInfo;
-  }
-
-  private OzoneKey createTestKey(OzoneBucket bucket) throws IOException {
-    String key = "key" + RandomStringUtils.randomNumeric(5);
-    String value = "value";
-
-    OzoneOutputStream keyOutputStream =
-        bucket.createKey(key, value.length());
-    keyOutputStream.write(value.getBytes());
-    keyOutputStream.close();
-
-    return bucket.getKey(key);
-  }
-
-  @Test
-  public void testTokenCommands() throws Exception {
-    String omAdd = "--set=" + OZONE_OM_ADDRESS_KEY + "=" + getOmAddress();
-    List<String[]> shellCommands = new ArrayList<>(4);
-    // Case 1: Execution will fail when security is disabled.
-    shellCommands.add(new String[]{omAdd, "token", "get"});
-    shellCommands.add(new String[]{omAdd, "token", "renew"});
-    shellCommands.add(new String[]{omAdd, "token", "cancel"});
-    shellCommands.add(new String[]{omAdd, "token", "print"});
-    shellCommands.forEach(cmd -> execute(cmd, "Error:Token operations " +
-        "work only"));
-
-    String security = "-D=" + OZONE_SECURITY_ENABLED_KEY + "=true";
-
-    // Case 2: Execution of get token will fail when security is enabled but
-    // OzoneManager is not setup correctly.
-    execute(new String[]{omAdd, security,
-        "token", "get"}, "Error: Get delegation token operation failed.");
-
-    // Clear all commands.
-    shellCommands.clear();
-
-    // Case 3: Execution of renew/cancel/print token will fail as token file
-    // doesn't exist.
-    shellCommands.add(new String[]{omAdd, security, "token", "renew"});
-    shellCommands.add(new String[]{omAdd, security, "token", "cancel"});
-    shellCommands.add(new String[]{omAdd, security, "token", "print"});
-    shellCommands.forEach(cmd -> execute(cmd, "token " +
-        "operation failed as token file:"));
-
-    // Create corrupt token file.
-    File testPath = GenericTestUtils.getTestDir();
-    Files.createDirectories(testPath.toPath());
-    Path tokenFile = Paths.get(testPath.toString(), "token.txt");
-    String question = RandomStringUtils.random(100);
-    Files.write(tokenFile, question.getBytes());
-
-    // Clear all commands.
-    shellCommands.clear();
-    String file = "-t=" + tokenFile.toString();
-
-    // Case 4: Execution of renew/cancel/print token will fail if token file
-    // is corrupt.
-    shellCommands.add(new String[]{omAdd, security, "token", "renew", file});
-    shellCommands.add(new String[]{omAdd, security, "token",
-        "cancel", file});
-    shellCommands.add(new String[]{omAdd, security, "token", "print", file});
-    shellCommands.forEach(cmd -> executeWithError(shell, cmd,
-        EOFException.class));
-  }
-
-  private void execute(String[] cmd, String msg) {
-    // verify the response output
-    execute(shell, cmd);
-    String output = err.toString();
-    assertTrue(output.contains(msg));
-    // reset stream
-    out.reset();
-    err.reset();
-  }
-
-  /**
-   * Create a temporary file used for putting key.
-   * @return the created file's path string
-   * @throws Exception
-   */
-  private String createTmpFile() throws Exception {
-    // write a new file that used for putting key
-    File tmpFile = new File(baseDir,
-        "/testfile-" + UUID.randomUUID().toString());
-    FileOutputStream randFile = new FileOutputStream(tmpFile);
-    Random r = new Random();
-    for (int x = 0; x < 10; x++) {
-      char c = (char) (r.nextInt(26) + 'a');
-      randFile.write(c);
-    }
-    randFile.close();
-
-    return tmpFile.getAbsolutePath();
-  }
-
-  private String getOmAddress() {
-    List<ServiceInfo> services;
-    try {
-      services = cluster.getOzoneManager().getServiceList();
-    } catch (IOException e) {
-      fail("Could not get service list from OM");
-      return null;
-    }
-
-    return services.stream()
-        .filter(a -> HddsProtos.NodeType.OM.equals(a.getNodeType()))
-        .findFirst()
-        .map(s -> s.getServiceAddress(ServicePort.Type.RPC))
-        .orElseThrow(IllegalStateException::new);
-  }
-
-  private static void assertKeyNotExists(OzoneBucket bucket, String keyName)
-      throws IOException {
-    try {
-      bucket.getKey(keyName);
-      fail(String.format("Key %s should not exist, but it does", keyName));
-    } catch (OMException e) {
-      Assert.assertEquals(KEY_NOT_FOUND, e.getResult());
-    }
-  }
-
-}

+ 0 - 221
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestOzoneRestWithMiniCluster.java

@@ -1,221 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.web;
-
-import static com.google.common.base.Charsets.UTF_8;
-import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset;
-import static org.apache.hadoop.ozone.OzoneConsts.CHUNK_SIZE;
-import static org.junit.Assert.*;
-
-import org.apache.commons.io.IOUtils;
-import org.apache.commons.lang3.RandomStringUtils;
-import org.apache.hadoop.hdds.client.OzoneQuota;
-import org.apache.hadoop.hdds.client.ReplicationFactor;
-import org.apache.hadoop.hdds.client.ReplicationType;
-import org.apache.hadoop.ozone.client.VolumeArgs;
-import org.apache.hadoop.ozone.client.io.OzoneInputStream;
-import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
-import org.apache.hadoop.ozone.client.OzoneVolume;
-import org.apache.hadoop.ozone.client.OzoneBucket;
-import org.apache.hadoop.ozone.client.rpc.RpcClient;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.junit.rules.Timeout;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.util.HashMap;
-
-/**
- * End-to-end testing of Ozone REST operations.
- */
-public class TestOzoneRestWithMiniCluster {
-  /**
-   * Set the timeout for every test.
-   */
-  @Rule
-  public Timeout testTimeout = new Timeout(300000);
-
-  private static MiniOzoneCluster cluster;
-  private static OzoneConfiguration conf;
-  private static ClientProtocol client;
-  private static ReplicationFactor replicationFactor = ReplicationFactor.ONE;
-  private static ReplicationType replicationType = ReplicationType.RATIS;
-
-  @Rule
-  public ExpectedException exception = ExpectedException.none();
-
-  @BeforeClass
-  public static void init() throws Exception {
-    conf = new OzoneConfiguration();
-    cluster = MiniOzoneCluster.newBuilder(conf).build();
-    cluster.waitForClusterToBeReady();
-    client = new RpcClient(conf);
-  }
-
-  @AfterClass
-  public static void shutdown() throws InterruptedException, IOException {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-    client.close();
-  }
-
-  @Test
-  public void testCreateAndGetVolume() throws Exception {
-    createAndGetVolume();
-  }
-
-  @Test
-  public void testCreateAndGetBucket() throws Exception {
-    OzoneVolume volume = createAndGetVolume();
-    createAndGetBucket(volume);
-  }
-
-  @Test
-  public void testPutAndGetKey() throws Exception {
-    String keyName = nextId("key");
-    String keyData = nextId("data");
-    OzoneVolume volume = createAndGetVolume();
-    OzoneBucket bucket = createAndGetBucket(volume);
-    putKey(bucket, keyName, keyData);
-  }
-
-  private void putKey(OzoneBucket bucket, String keyName, String keyData)
-      throws IOException {
-    try (
-        OzoneOutputStream ozoneOutputStream = bucket
-            .createKey(keyName, 0, replicationType, replicationFactor,
-                new HashMap<>());
-        InputStream inputStream = IOUtils.toInputStream(keyData, UTF_8)) {
-      IOUtils.copy(inputStream, ozoneOutputStream);
-    }
-    try (
-        InputStream inputStream = IOUtils.toInputStream(keyData, UTF_8);
-        OzoneInputStream ozoneInputStream = bucket.readKey(keyName)) {
-      IOUtils.contentEquals(ozoneInputStream, inputStream);
-    }
-  }
-
-  @Test
-  public void testPutAndGetEmptyKey() throws Exception {
-    String keyName = nextId("key");
-    String keyData = "";
-    OzoneVolume volume = createAndGetVolume();
-    OzoneBucket bucket = createAndGetBucket(volume);
-    putKey(bucket, keyName, keyData);
-  }
-
-  @Test
-  public void testPutAndGetMultiChunkKey() throws Exception {
-    String keyName = nextId("key");
-    int keyDataLen = 3 * CHUNK_SIZE;
-    String keyData = buildKeyData(keyDataLen);
-    OzoneVolume volume = createAndGetVolume();
-    OzoneBucket bucket = createAndGetBucket(volume);
-    putKey(bucket, keyName, keyData);
-  }
-
-  @Test
-  public void testPutAndGetMultiChunkKeyLastChunkPartial() throws Exception {
-    String keyName = nextId("key");
-    int keyDataLen = (int)(2.5 * CHUNK_SIZE);
-    String keyData = buildKeyData(keyDataLen);
-    OzoneVolume volume = createAndGetVolume();
-    OzoneBucket bucket = createAndGetBucket(volume);
-    putKey(bucket, keyName, keyData);
-  }
-
-  @Test
-  public void testReplaceKey() throws Exception {
-    String keyName = nextId("key");
-    int keyDataLen = (int)(2.5 * CHUNK_SIZE);
-    String keyData = buildKeyData(keyDataLen);
-    OzoneVolume volume = createAndGetVolume();
-    OzoneBucket bucket = createAndGetBucket(volume);
-    putKey(bucket, keyName, keyData);
-
-    // Replace key with data consisting of fewer chunks.
-    keyDataLen = (int)(1.5 * CHUNK_SIZE);
-    keyData = buildKeyData(keyDataLen);
-    putKey(bucket, keyName, keyData);
-
-    // Replace key with data consisting of more chunks.
-    keyDataLen = (int)(3.5 * CHUNK_SIZE);
-    keyData = buildKeyData(keyDataLen);
-    putKey(bucket, keyName, keyData);
-  }
-
-  private OzoneVolume createAndGetVolume() throws IOException {
-    String volumeName = nextId("volume");
-    VolumeArgs volumeArgs = VolumeArgs.newBuilder()
-        .setOwner("bilbo")
-        .setQuota("100TB")
-        .setAdmin("hdfs")
-        .build();
-    client.createVolume(volumeName, volumeArgs);
-    OzoneVolume volume = client.getVolumeDetails(volumeName);
-    assertEquals(volumeName, volume.getName());
-    assertNotNull(volume);
-    assertEquals("bilbo", volume.getOwner());
-    assertNotNull(volume.getQuota());
-    assertEquals(OzoneQuota.parseQuota("100TB").sizeInBytes(),
-        volume.getQuota());
-    return volume;
-  }
-
-  private OzoneBucket createAndGetBucket(OzoneVolume vol) throws IOException {
-    String bucketName = nextId("bucket");
-    vol.createBucket(bucketName);
-    OzoneBucket bucket = vol.getBucket(bucketName);
-    assertNotNull(bucket);
-    assertEquals(bucketName, bucket.getName());
-    return bucket;
-  }
-
-  /**
-   * Creates sample key data of the specified length.  The data is a string of
-   * printable ASCII characters.  This makes it easy to debug through visual
-   * inspection of the chunk files if a test fails.
-   *
-   * @param keyDataLen desired length of key data
-   * @return string of printable ASCII characters of the specified length
-   */
-  private static String buildKeyData(int keyDataLen) {
-    return new String(dataset(keyDataLen, 33, 93), UTF_8);
-  }
-
-  /**
-   * Generates identifiers unique enough for use in tests, so that individual
-   * tests don't collide on each others' data in the shared mini-cluster.
-   *
-   * @param idPrefix prefix to put in front of ID
-   * @return unique ID generated by appending a suffix to the given prefix
-   */
-  private static String nextId(String idPrefix) {
-    return (idPrefix + RandomStringUtils.random(5, true, true)).toLowerCase();
-  }
-}

+ 0 - 187
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestOzoneVolumes.java

@@ -1,187 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.web;
-
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.TestOzoneHelper;
-import org.apache.log4j.Level;
-import org.apache.log4j.Logger;
-import org.junit.Ignore;
-import org.junit.Rule;
-import org.junit.BeforeClass;
-import org.junit.AfterClass;
-import org.junit.Test;
-import org.junit.Assert;
-
-import org.junit.rules.Timeout;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-
-/**
- * Test ozone volume in the distributed storage handler scenario.
- */
-public class TestOzoneVolumes extends TestOzoneHelper {
-  private static final org.slf4j.Logger LOG =
-      LoggerFactory.getLogger(TestOzoneVolumes.class);
-  /**
-   * Set the timeout for every test.
-   */
-  @Rule
-  public Timeout testTimeout = new Timeout(300000);
-
-  private static MiniOzoneCluster cluster = null;
-  private static int port = 0;
-
-  /**
-   * Create a MiniDFSCluster for testing.
-   * <p>
-   * Ozone is made active by setting OZONE_ENABLED = true
-   *
-   * @throws IOException
-   */
-  @BeforeClass
-  public static void init() throws Exception {
-    OzoneConfiguration conf = new OzoneConfiguration();
-    Logger.getLogger("log4j.logger.org.apache.http").setLevel(Level.DEBUG);
-    cluster = MiniOzoneCluster.newBuilder(conf).build();
-    cluster.waitForClusterToBeReady();
-    port = cluster.getHddsDatanodes().get(0)
-        .getDatanodeDetails()
-        .getPort(DatanodeDetails.Port.Name.REST).getValue();
-  }
-
-  /**
-   * Shutdown MiniDFSCluster.
-   */
-  @AfterClass
-  public static void shutdown() {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-  /**
-   * Creates Volumes on Ozone Store.
-   *
-   * @throws IOException
-   */
-  @Test
-  public void testCreateVolumes() throws IOException {
-    super.testCreateVolumes(port);
-    Assert.assertEquals(0, cluster.getOzoneManager()
-        .getMetrics().getNumVolumeCreateFails());
-  }
-
-  /**
-   * Create Volumes with Quota.
-   *
-   * @throws IOException
-   */
-  @Test
-  public void testCreateVolumesWithQuota() throws IOException {
-    super.testCreateVolumesWithQuota(port);
-    Assert.assertEquals(0, cluster.getOzoneManager()
-        .getMetrics().getNumVolumeCreateFails());
-  }
-
-  /**
-   * Create Volumes with Invalid Quota.
-   *
-   * @throws IOException
-   */
-  @Test
-  public void testCreateVolumesWithInvalidQuota() throws IOException {
-    super.testCreateVolumesWithInvalidQuota(port);
-    Assert.assertEquals(0, cluster.getOzoneManager()
-        .getMetrics().getNumVolumeCreateFails());
-  }
-
-  /**
-   * To create a volume a user name must be specified using OZONE_USER header.
-   * This test verifies that we get an error in case we call without a OZONE
-   * user name.
-   *
-   * @throws IOException
-   */
-  @Test
-  public void testCreateVolumesWithInvalidUser() throws IOException {
-    super.testCreateVolumesWithInvalidUser(port);
-    Assert.assertEquals(0, cluster.getOzoneManager()
-        .getMetrics().getNumVolumeCreateFails());
-  }
-
-  /**
-   * Only Admins can create volumes in Ozone. This test uses simple userauth as
-   * backend and hdfs and root are admin users in the simple backend.
-   * <p>
-   * This test tries to create a volume as user bilbo.
-   *
-   * @throws IOException
-   */
-  @Test
-  public void testCreateVolumesWithOutAdminRights() throws IOException {
-    super.testCreateVolumesWithOutAdminRights(port);
-    Assert.assertEquals(0, cluster.getOzoneManager()
-        .getMetrics().getNumVolumeCreateFails());
-  }
-
-  /**
-   * Create a bunch of volumes in a loop.
-   *
-   * @throws IOException
-   */
-  @Test
-  public void testCreateVolumesInLoop() throws IOException {
-    super.testCreateVolumesInLoop(port);
-    Assert.assertEquals(0, cluster.getOzoneManager()
-        .getMetrics().getNumVolumeCreateFails());
-  }
-  /**
-   * Get volumes owned by the user.
-   *
-   * @throws IOException
-   */
-  @Ignore("Test is ignored for time being, to be enabled after security.")
-  public void testGetVolumesByUser() throws IOException {
-    testGetVolumesByUser(port);
-  }
-
-  /**
-   * Admins can read volumes belonging to other users.
-   *
-   * @throws IOException
-   */
-  @Ignore("Test is ignored for time being, to be enabled after security.")
-  public void testGetVolumesOfAnotherUser() throws IOException {
-    super.testGetVolumesOfAnotherUser(port);
-  }
-
-  /**
-   * if you try to read volumes belonging to another user,
-   * then server always ignores it.
-   *
-   * @throws IOException
-   */
-  @Ignore("Test is ignored for time being, to be enabled after security.")
-  public void testGetVolumesOfAnotherUserShouldFail() throws IOException {
-    super.testGetVolumesOfAnotherUserShouldFail(port);
-  }
-}

+ 0 - 118
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestOzoneWebAccess.java

@@ -1,118 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.web;
-
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.client.rest.headers.Header;
-import org.apache.hadoop.util.Time;
-import org.apache.http.HttpResponse;
-import org.apache.http.client.methods.HttpPost;
-import org.apache.http.impl.client.CloseableHttpClient;
-import org.apache.http.impl.client.HttpClients;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.Timeout;
-
-import javax.ws.rs.core.HttpHeaders;
-import java.io.IOException;
-import java.text.SimpleDateFormat;
-import java.util.Date;
-import java.util.Locale;
-
-import static java.net.HttpURLConnection.HTTP_CREATED;
-import static org.apache.hadoop.ozone.web.utils.OzoneUtils.getRequestID;
-import static org.junit.Assert.assertEquals;
-
-/**
- * Test Ozone Access through REST protocol.
- */
-public class TestOzoneWebAccess {
-  /**
-   * Set the timeout for every test.
-   */
-  @Rule
-  public Timeout testTimeout = new Timeout(300000);
-
-  private static MiniOzoneCluster cluster;
-  private static int port;
-
-  /**
-   * Create a MiniDFSCluster for testing.
-   *
-   * Ozone is made active by setting OZONE_ENABLED = true
-   * @throws IOException
-   */
-  @BeforeClass
-  public static void init() throws Exception {
-    OzoneConfiguration conf = new OzoneConfiguration();
-
-    cluster = MiniOzoneCluster.newBuilder(conf).build();
-    cluster.waitForClusterToBeReady();
-    port = cluster.getHddsDatanodes().get(0)
-        .getDatanodeDetails().getPort(
-            DatanodeDetails.Port.Name.REST).getValue();
-  }
-
-  /**
-   * shutdown MiniOzoneCluster.
-   */
-  @AfterClass
-  public static void shutdown() {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-  /**
-   * Send a vaild Ozone Request.
-   *
-   * @throws IOException
-   */
-  @Test
-  public void testOzoneRequest() throws IOException {
-    SimpleDateFormat format =
-        new SimpleDateFormat("EEE, dd MMM yyyy HH:mm:ss ZZZ", Locale.US);
-    CloseableHttpClient client = HttpClients.createDefault();
-    String volumeName = getRequestID().toLowerCase(Locale.US);
-    try {
-      HttpPost httppost = new HttpPost(
-          String.format("http://localhost:%d/%s", port, volumeName));
-
-      httppost.addHeader(Header.OZONE_VERSION_HEADER,
-          Header.OZONE_V1_VERSION_HEADER);
-      httppost.addHeader(HttpHeaders.DATE,
-          format.format(new Date(Time.now())));
-      httppost.addHeader(HttpHeaders.AUTHORIZATION,
-          Header.OZONE_SIMPLE_AUTHENTICATION_SCHEME + " " +
-              OzoneConsts.OZONE_SIMPLE_HDFS_USER);
-      httppost.addHeader(Header.OZONE_USER, OzoneConsts.OZONE_SIMPLE_HDFS_USER);
-
-      HttpResponse response = client.execute(httppost);
-      assertEquals(response.toString(), HTTP_CREATED,
-          response.getStatusLine().getStatusCode());
-    } finally {
-      client.close();
-    }
-  }
-}

+ 0 - 349
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestBuckets.java

@@ -1,349 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.web.client;
-
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.client.BucketArgs;
-import org.apache.hadoop.ozone.client.VolumeArgs;
-import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
-import org.apache.hadoop.ozone.client.OzoneVolume;
-import org.apache.hadoop.ozone.client.OzoneBucket;
-import org.apache.hadoop.ozone.client.rest.OzoneException;
-import org.apache.hadoop.ozone.client.rest.RestClient;
-import org.apache.hadoop.ozone.client.rpc.RpcClient;
-import org.apache.hadoop.ozone.web.request.OzoneQuota;
-import org.apache.hadoop.ozone.web.utils.OzoneUtils;
-import org.apache.hadoop.util.Time;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.Timeout;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-
-import java.io.IOException;
-import java.net.URISyntaxException;
-import java.text.ParseException;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Iterator;
-import java.util.List;
-import java.util.concurrent.TimeoutException;
-import java.util.stream.Collectors;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-import static org.junit.Assume.assumeFalse;
-
-/**
- * Test Ozone Bucket Lifecycle.
- */
-@RunWith(value = Parameterized.class)
-public class TestBuckets {
-  /**
-   * Set the timeout for every test.
-   */
-  @Rule
-  public Timeout testTimeout = new Timeout(300000);
-
-  private static MiniOzoneCluster cluster = null;
-  private static ClientProtocol client = null;
-  private static OzoneConfiguration conf;
-
-  @Parameterized.Parameters
-  public static Collection<Object[]> clientProtocol() {
-    Object[][] params = new Object[][] {
-        {RpcClient.class},
-        {RestClient.class}};
-    return Arrays.asList(params);
-  }
-
-  @SuppressWarnings("visibilitymodifier")
-  @Parameterized.Parameter
-  public static Class clientProtocol;
-
-  /**
-   * Create a MiniDFSCluster for testing.
-   * <p>
-   * Ozone is made active by setting OZONE_ENABLED = true
-   *
-   * @throws IOException
-   */
-  @BeforeClass
-  public static void init()
-      throws IOException, URISyntaxException, OzoneException, TimeoutException,
-      InterruptedException {
-    conf = new OzoneConfiguration();
-
-    cluster = MiniOzoneCluster.newBuilder(conf)
-        .setNumDatanodes(3)
-        .build();
-    cluster.waitForClusterToBeReady();
-  }
-
-  @Before
-  public void setup() throws Exception {
-    if (clientProtocol.equals(RestClient.class)) {
-      client = new RestClient(conf);
-    } else {
-      client = new RpcClient(conf);
-    }
-  }
-
-  /**
-   * shutdown MiniDFSCluster.
-   */
-  @AfterClass
-  public static void shutdown() {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-  @Test
-  public void testCreateBucket() throws Exception {
-    runTestCreateBucket(client);
-  }
-
-  static void runTestCreateBucket(ClientProtocol protocol)
-      throws IOException {
-    String volumeName = OzoneUtils.getRequestID().toLowerCase();
-    VolumeArgs volumeArgs = VolumeArgs.newBuilder()
-        .setOwner("bilbo")
-        .setQuota("100TB")
-        .setAdmin("hdfs")
-        .build();
-    protocol.createVolume(volumeName, volumeArgs);
-    OzoneVolume vol = protocol.getVolumeDetails(volumeName);
-    String[] acls = {"user:frodo:rw", "user:samwise:rw"};
-
-    // create 10 buckets under same volume
-    for (int x = 0; x < 10; x++) {
-      long currentTime = Time.now();
-      String bucketName = OzoneUtils.getRequestID().toLowerCase();
-
-      List<OzoneAcl> aclList =
-          Arrays.stream(acls).map(acl -> OzoneAcl.parseAcl(acl))
-              .collect(Collectors.toList());
-      BucketArgs bucketArgs = BucketArgs.newBuilder()
-          .setAcls(aclList)
-          .build();
-      vol.createBucket(bucketName, bucketArgs);
-      OzoneBucket bucket = vol.getBucket(bucketName);
-      assertEquals(bucket.getName(), bucketName);
-
-      // verify the bucket creation time
-      assertTrue((bucket.getCreationTime() / 1000) >= (currentTime / 1000));
-    }
-    protocol.close();
-
-    assertEquals(vol.getName(), volumeName);
-    assertEquals(vol.getAdmin(), "hdfs");
-    assertEquals(vol.getOwner(), "bilbo");
-    assertEquals(vol.getQuota(), OzoneQuota.parseQuota("100TB").sizeInBytes());
-
-    // Test create a bucket with invalid bucket name,
-    // not use Rule here because the test method is static.
-    try {
-      String invalidBucketName = "#" + OzoneUtils.getRequestID().toLowerCase();
-      vol.createBucket(invalidBucketName);
-      fail("Except the bucket creation to be failed because the"
-          + " bucket name starts with an invalid char #");
-    } catch (Exception e) {
-      assertTrue(e.getMessage()
-          .contains("Bucket or Volume name has an unsupported character : #"));
-    }
-  }
-
-  @Test
-  public void testAddBucketAcls() throws Exception {
-    assumeFalse("Rest Client does not support ACL",
-        clientProtocol.equals(RestClient.class));
-    runTestAddBucketAcls(client);
-  }
-
-  static void runTestAddBucketAcls(ClientProtocol protocol)
-      throws OzoneException, IOException, ParseException {
-    String volumeName = OzoneUtils.getRequestID().toLowerCase();
-    VolumeArgs volumeArgs = VolumeArgs.newBuilder()
-        .setOwner("bilbo")
-        .setQuota("100TB")
-        .setAdmin("hdfs")
-        .build();
-    protocol.createVolume(volumeName, volumeArgs);
-    OzoneVolume vol = protocol.getVolumeDetails(volumeName);
-    String[] acls = {"user:frodo:rw", "user:samwise:rw"};
-    String bucketName = OzoneUtils.getRequestID().toLowerCase();
-    vol.createBucket(bucketName);
-    OzoneBucket bucket = vol.getBucket(bucketName);
-
-    List<OzoneAcl> aclList =
-        Arrays.stream(acls).map(acl -> OzoneAcl.parseAcl(acl))
-            .collect(Collectors.toList());
-    int numAcls = bucket.getAcls().size();
-    for (OzoneAcl ozoneAcl : aclList) {
-      Assert.assertTrue(bucket.addAcls(ozoneAcl));
-    }
-    OzoneBucket updatedBucket = vol.getBucket(bucketName);
-    assertEquals(updatedBucket.getAcls().size(), 2 + numAcls);
-    // verify if the creation time is missing after update operation
-    assertTrue(
-        (updatedBucket.getCreationTime()) / 1000 >= 0);
-    protocol.close();
-  }
-
-  @Test
-  public void testRemoveBucketAcls() throws Exception {
-    assumeFalse("Rest Client does not support ACL",
-        clientProtocol.equals(RestClient.class));
-    runTestRemoveBucketAcls(client);
-  }
-
-  static void runTestRemoveBucketAcls(ClientProtocol protocol)
-      throws OzoneException, IOException, ParseException {
-    String volumeName = OzoneUtils.getRequestID().toLowerCase();
-    VolumeArgs volumeArgs = VolumeArgs.newBuilder()
-        .setOwner("bilbo")
-        .setQuota("100TB")
-        .setAdmin("hdfs")
-        .build();
-    protocol.createVolume(volumeName, volumeArgs);
-    OzoneVolume vol = protocol.getVolumeDetails(volumeName);
-    String[] acls = {"user:frodo:rw", "user:samwise:rw"};
-    String bucketName = OzoneUtils.getRequestID().toLowerCase();
-    List<OzoneAcl> aclList =
-        Arrays.stream(acls).map(acl -> OzoneAcl.parseAcl(acl))
-            .collect(Collectors.toList());
-    vol.createBucket(bucketName);
-    OzoneBucket bucket = vol.getBucket(bucketName);
-    int numAcls = bucket.getAcls().size();
-    for (OzoneAcl ozoneAcl : aclList) {
-      Assert.assertTrue(bucket.addAcls(ozoneAcl));
-    }
-    assertEquals(bucket.getAcls().size(), 2 + numAcls);
-    for (OzoneAcl ozoneAcl : aclList) {
-      Assert.assertTrue(bucket.removeAcls(ozoneAcl));
-    }
-    OzoneBucket updatedBucket = vol.getBucket(bucketName);
-
-    // We removed all acls
-    assertEquals(updatedBucket.getAcls().size(), numAcls);
-    // verify if the creation time is missing after update operation
-    assertTrue(
-        (updatedBucket.getCreationTime() / 1000) >= 0);
-    protocol.close();
-  }
-
-  @Test
-  public void testDeleteBucket() throws OzoneException, IOException {
-    runTestDeleteBucket(client);
-  }
-
-  static void runTestDeleteBucket(ClientProtocol protocol)
-      throws OzoneException, IOException {
-    String volumeName = OzoneUtils.getRequestID().toLowerCase();
-    VolumeArgs volumeArgs = VolumeArgs.newBuilder()
-        .setOwner("bilbo")
-        .setQuota("100TB")
-        .setAdmin("hdfs")
-        .build();
-    protocol.createVolume(volumeName, volumeArgs);
-    OzoneVolume vol = protocol.getVolumeDetails(volumeName);
-    String[] acls = {"user:frodo:rw", "user:samwise:rw"};
-    String bucketName = OzoneUtils.getRequestID().toLowerCase();
-    List<OzoneAcl> aclList =
-        Arrays.stream(acls).map(acl -> OzoneAcl.parseAcl(acl))
-            .collect(Collectors.toList());
-    BucketArgs bucketArgs = BucketArgs.newBuilder()
-        .setAcls(aclList)
-        .build();
-    vol.createBucket(bucketName, bucketArgs);
-    vol.deleteBucket(bucketName);
-    try {
-      OzoneBucket updatedBucket = vol.getBucket(bucketName);
-      fail("Fetching deleted bucket, Should not reach here.");
-    } catch (Exception ex) {
-      // must throw
-      assertNotNull(ex);
-    }
-    protocol.close();
-  }
-
-  @Test
-  public void testListBucket() throws Exception {
-    runTestListBucket(client);
-  }
-
-  static void runTestListBucket(ClientProtocol protocol)
-      throws OzoneException, IOException, ParseException {
-    String volumeName = OzoneUtils.getRequestID().toLowerCase();
-    VolumeArgs volumeArgs = VolumeArgs.newBuilder()
-        .setOwner("bilbo")
-        .setQuota("100TB")
-        .setAdmin("hdfs")
-        .build();
-    protocol.createVolume(volumeName, volumeArgs);
-    OzoneVolume vol = protocol.getVolumeDetails(volumeName);
-    String[] acls = {"user:frodo:rw", "user:samwise:rw"};
-    List<OzoneAcl> aclList =
-        Arrays.stream(acls).map(acl -> OzoneAcl.parseAcl(acl))
-            .collect(Collectors.toList());
-
-    long currentTime = Time.now();
-    for (int x = 0; x < 10; x++) {
-      String bucketName = "listbucket-test-" + x;
-      BucketArgs bucketArgs = BucketArgs.newBuilder()
-          .setAcls(aclList)
-          .build();
-      vol.createBucket(bucketName, bucketArgs);
-    }
-    Iterator<? extends OzoneBucket> bucketIterator = vol.listBuckets(null);
-    int count = 0;
-
-    while (bucketIterator.hasNext()) {
-      assertTrue((bucketIterator.next().getCreationTime()
-          / 1000) >= (currentTime / 1000));
-      count++;
-    }
-    assertEquals(count, 10);
-
-    bucketIterator = vol.listBuckets(null, "listbucket-test-4");
-    assertEquals(getSize(bucketIterator), 5);
-
-    bucketIterator = vol.listBuckets(null, "listbucket-test-3");
-    assertEquals(getSize(bucketIterator), 6);
-
-    protocol.close();
-  }
-
-  private static int getSize(Iterator<? extends OzoneBucket> bucketIterator) {
-    int count = 0;
-    while (bucketIterator.hasNext()) {
-      count++;
-      bucketIterator.next();
-    }
-    return count;
-  }
-}

+ 0 - 108
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestBucketsRatis.java

@@ -1,108 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.web.client;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.RatisTestHelper;
-import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
-import org.apache.hadoop.ozone.client.rest.OzoneException;
-import org.apache.hadoop.ozone.client.rest.RestClient;
-import org.apache.hadoop.ozone.client.rpc.RpcClient;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Ignore;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.Timeout;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-
-import java.io.IOException;
-import java.util.Arrays;
-import java.util.Collection;
-
-/** The same as {@link TestBuckets} except that this test is Ratis enabled. */
-@Ignore("Disabling Ratis tests for pipeline work.")
-@RunWith(value = Parameterized.class)
-public class TestBucketsRatis {
-  @Rule
-  public Timeout testTimeout = new Timeout(300000);
-
-  private static RatisTestHelper.RatisTestSuite suite;
-  private static ClientProtocol client;
-  private static OzoneConfiguration conf;
-
-  @Parameterized.Parameters
-  public static Collection<Object[]> clientProtocol() {
-    Object[][] params = new Object[][] {
-        {RpcClient.class},
-        {RestClient.class}};
-    return Arrays.asList(params);
-  }
-
-  @Parameterized.Parameter
-  @SuppressWarnings("visibilitymodifier")
-  public static Class clientProtocol;
-
-  @BeforeClass
-  public static void init() throws Exception {
-    suite = new RatisTestHelper.RatisTestSuite();
-    conf = suite.getConf();
-  }
-
-  @Before
-  public void setup() throws Exception {
-    if (clientProtocol.equals(RestClient.class)) {
-      client = new RestClient(conf);
-    } else {
-      client = new RpcClient(conf);
-    }
-  }
-
-  @AfterClass
-  public static void shutdown() {
-    if (suite != null) {
-      suite.close();
-    }
-  }
-
-  @Test
-  public void testCreateBucket() throws Exception {
-    TestBuckets.runTestCreateBucket(client);
-  }
-
-  @Test
-  public void testAddBucketAcls() throws Exception {
-    TestBuckets.runTestAddBucketAcls(client);
-  }
-
-  @Test
-  public void testRemoveBucketAcls() throws Exception {
-    TestBuckets.runTestRemoveBucketAcls(client);
-  }
-
-  @Test
-  public void testDeleteBucket() throws OzoneException, IOException {
-    TestBuckets.runTestDeleteBucket(client);
-  }
-  @Test
-  public void testListBucket() throws Exception {
-    TestBuckets.runTestListBucket(client);
-  }
-}

+ 0 - 734
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java

@@ -1,734 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.web.client;
-
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
-import org.apache.commons.codec.digest.DigestUtils;
-import org.apache.commons.collections.IteratorUtils;
-import org.apache.commons.io.FileUtils;
-import org.apache.commons.io.IOUtils;
-import org.apache.commons.lang3.RandomStringUtils;
-import org.apache.commons.lang3.RandomUtils;
-import org.apache.commons.lang3.tuple.ImmutablePair;
-import org.apache.commons.lang3.tuple.Pair;
-import org.apache.hadoop.hdds.client.ReplicationFactor;
-import org.apache.hadoop.hdds.client.ReplicationType;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.OzoneAcl;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.ozone.OzoneTestUtils;
-import org.apache.hadoop.ozone.client.BucketArgs;
-import org.apache.hadoop.ozone.client.VolumeArgs;
-import org.apache.hadoop.ozone.client.OzoneBucket;
-import org.apache.hadoop.ozone.client.OzoneKey;
-import org.apache.hadoop.ozone.client.OzoneVolume;
-import org.apache.hadoop.ozone.client.io.OzoneInputStream;
-import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
-import org.apache.hadoop.ozone.client.rpc.RpcClient;
-import org.apache.hadoop.ozone.container.common.helpers.BlockData;
-import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
-import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
-import org.apache.hadoop.ozone.container.keyvalue.KeyValueHandler;
-import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
-import org.apache.hadoop.ozone.om.OzoneManager;
-import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes;
-import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
-import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
-
-import org.apache.hadoop.ozone.client.rest.OzoneException;
-import org.apache.hadoop.ozone.web.utils.OzoneUtils;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.util.StringUtils;
-import org.apache.hadoop.util.Time;
-import org.apache.log4j.Level;
-import org.apache.log4j.Logger;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Ignore;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.Timeout;
-
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileOutputStream;
-import java.io.InputStream;
-import java.io.IOException;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.Random;
-import java.util.concurrent.TimeUnit;
-import java.util.stream.Collectors;
-
-import static org.apache.hadoop.hdds
-    .HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
-    .OZONE_SCM_STALENODE_INTERVAL;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-/**
- * Test Ozone Key Lifecycle.
- */
-public class TestKeys {
-  /**
-   * Set the timeout for every test.
-   */
-  @Rule
-  public Timeout testTimeout = new Timeout(300000);
-
-  private static OzoneConfiguration conf;
-  private static MiniOzoneCluster ozoneCluster = null;
-  private static String path;
-  private static ClientProtocol client = null;
-  private static long currentTime;
-  private static ReplicationFactor replicationFactor = ReplicationFactor.ONE;
-  private static ReplicationType replicationType = ReplicationType.STAND_ALONE;
-
-
-  /**
-   * Create a MiniDFSCluster for testing.
-   *
-   * @throws IOException
-   */
-  @Before
-  public void init() throws Exception {
-    conf = new OzoneConfiguration();
-
-    // Set short block deleting service interval to speed up deletions.
-    conf.setTimeDuration(OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL,
-        1000, TimeUnit.MILLISECONDS);
-    conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 1, TimeUnit.SECONDS);
-    conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 30, TimeUnit.SECONDS);
-
-    path = GenericTestUtils.getTempPath(TestKeys.class.getSimpleName());
-    Logger.getLogger("log4j.logger.org.apache.http").setLevel(Level.DEBUG);
-
-    ozoneCluster = MiniOzoneCluster.newBuilder(conf)
-        .setNumDatanodes(1)
-        .setHbInterval(1000)
-        .setHbProcessorInterval(1000)
-        .build();
-    ozoneCluster.waitForClusterToBeReady();
-    client = new RpcClient(conf);
-    currentTime = Time.now();
-  }
-
-  /**
-   * shutdown MiniDFSCluster.
-   */
-  @After
-  public void shutdown() {
-    if (ozoneCluster != null) {
-      ozoneCluster.shutdown();
-    }
-  }
-
-  /**
-   * Creates a file with Random Data.
-   *
-   * @return File.
-   */
-  static File createRandomDataFile(String dir, String fileName, long size)
-      throws IOException {
-    File tmpDir = new File(dir);
-    FileUtils.forceMkdir(tmpDir);
-    File tmpFile = new File(tmpDir, fileName);
-    try (FileOutputStream randFile = new FileOutputStream(tmpFile)) {
-      Random r = new Random();
-      for (int x = 0; x < size; x++) {
-        char c = (char) (r.nextInt(26) + 'a');
-        randFile.write(c);
-      }
-
-    } catch (IOException e) {
-      fail(e.getMessage());
-    }
-    return tmpFile;
-  }
-
-  /**
-   * This function generates multi part key which are delimited by a certain
-   * delimiter. Different parts of key are random string of random length
-   * between 0 - 4. Number of parts of the keys are between 0 and 5.
-   *
-   * @param delimiter delimiter used to delimit parts of string
-   * @return Key composed of multiple parts delimited by "/"
-   */
-  static String getMultiPartKey(String delimiter) {
-    int numParts = RandomUtils.nextInt(0, 5) + 1;
-    String[] nameParts = new String[numParts];
-    for (int i = 0; i < numParts; i++) {
-      int stringLength = numParts == 1 ? 5 : RandomUtils.nextInt(0, 5);
-      nameParts[i] = RandomStringUtils.randomAlphanumeric(stringLength);
-    }
-    return StringUtils.join(delimiter, nameParts);
-  }
-
-  static class PutHelper {
-    private final ClientProtocol client;
-    private final String dir;
-    private final String keyName;
-
-    private OzoneVolume vol;
-    private OzoneBucket bucket;
-    private File file;
-
-    PutHelper(ClientProtocol client, String dir) {
-      this(client, dir, OzoneUtils.getRequestID().toLowerCase());
-    }
-
-    PutHelper(ClientProtocol client, String dir, String key) {
-      this.client = client;
-      this.dir = dir;
-      this.keyName = key;
-    }
-
-    public OzoneVolume getVol() {
-      return vol;
-    }
-
-    public OzoneBucket getBucket() {
-      return bucket;
-    }
-
-    public File getFile() {
-      return file;
-    }
-
-    /**
-     * This function is reused in all other tests.
-     *
-     * @return Returns the name of the new key that was created.
-     * @throws OzoneException
-     */
-    private String putKey() throws Exception {
-      String volumeName = OzoneUtils.getRequestID().toLowerCase();
-
-      VolumeArgs volumeArgs = VolumeArgs.newBuilder()
-          .setOwner("bilbo")
-          .setQuota("100TB")
-          .setAdmin("hdfs")
-          .build();
-      client.createVolume(volumeName, volumeArgs);
-      vol = client.getVolumeDetails(volumeName);
-      String[] acls = {"user:frodo:rw", "user:samwise:rw"};
-
-      String bucketName = OzoneUtils.getRequestID().toLowerCase();
-      List<OzoneAcl> aclList =
-          Arrays.stream(acls).map(acl -> OzoneAcl.parseAcl(acl))
-              .collect(Collectors.toList());
-      BucketArgs bucketArgs = BucketArgs.newBuilder()
-          .setAcls(aclList)
-          .build();
-      vol.createBucket(bucketName, bucketArgs);
-      bucket = vol.getBucket(bucketName);
-
-      String fileName = OzoneUtils.getRequestID().toLowerCase();
-
-      file = createRandomDataFile(dir, fileName, 1024);
-
-      try (
-          OzoneOutputStream ozoneOutputStream = bucket
-              .createKey(keyName, 0, replicationType, replicationFactor,
-                  new HashMap<>());
-          InputStream fileInputStream = new FileInputStream(file)) {
-        IOUtils.copy(fileInputStream, ozoneOutputStream);
-      }
-      return keyName;
-    }
-  }
-
-  @Test
-  public void testPutKey() throws Exception {
-    // Test non-delimited keys
-    runTestPutKey(new PutHelper(client, path));
-    // Test key delimited by a random delimiter
-    String delimiter = RandomStringUtils.randomAscii(1);
-    runTestPutKey(new PutHelper(client, path,
-        getMultiPartKey(delimiter)));
-  }
-
-  @SuppressWarnings("emptyblock")
-  static void runTestPutKey(PutHelper helper) throws Exception {
-    final ClientProtocol helperClient = helper.client;
-    helper.putKey();
-    assertNotNull(helper.getBucket());
-    assertNotNull(helper.getFile());
-    List<OzoneKey> keyList = helperClient
-        .listKeys(helper.getVol().getName(), helper.getBucket().getName(), null,
-            null, 10);
-    Assert.assertEquals(1, keyList.size());
-
-    // test list key using a more efficient call
-    String newkeyName = OzoneUtils.getRequestID().toLowerCase();
-    OzoneOutputStream ozoneOutputStream = helperClient
-        .createKey(helper.getVol().getName(), helper.getBucket().getName(),
-            newkeyName, 0, replicationType, replicationFactor, new HashMap<>());
-    ozoneOutputStream.close();
-    keyList = helperClient
-        .listKeys(helper.getVol().getName(), helper.getBucket().getName(), null,
-            null, 10);
-    Assert.assertEquals(2, keyList.size());
-
-    // test new put key with invalid volume/bucket name
-    OzoneTestUtils.expectOmException(ResultCodes.VOLUME_NOT_FOUND, () -> {
-
-      try (OzoneOutputStream oos = helperClient
-          .createKey("invalid-volume", helper.getBucket().getName(), newkeyName,
-              0, replicationType, replicationFactor, new HashMap<>())) {
-      }
-    });
-
-    OzoneTestUtils.expectOmException(ResultCodes.BUCKET_NOT_FOUND, () -> {
-      try (OzoneOutputStream oos = helperClient
-          .createKey(helper.getVol().getName(), "invalid-bucket", newkeyName, 0,
-              replicationType, replicationFactor, new HashMap<>())) {
-      }
-    });
-  }
-
-  private static void restartDatanode(MiniOzoneCluster cluster, int datanodeIdx)
-      throws Exception {
-    cluster.restartHddsDatanode(datanodeIdx, true);
-  }
-
-  @Test
-  public void testPutAndGetKeyWithDnRestart() throws Exception {
-    runTestPutAndGetKeyWithDnRestart(
-        new PutHelper(client, path), ozoneCluster);
-    String delimiter = RandomStringUtils.randomAscii(1);
-    runTestPutAndGetKeyWithDnRestart(
-        new PutHelper(client, path,
-            getMultiPartKey(delimiter)), ozoneCluster);
-  }
-
-  static void runTestPutAndGetKeyWithDnRestart(
-      PutHelper helper, MiniOzoneCluster cluster) throws Exception {
-    String keyName = helper.putKey();
-    assertNotNull(helper.getBucket());
-    assertNotNull(helper.getFile());
-
-    // restart the datanode
-    restartDatanode(cluster, 0);
-    // verify getKey after the datanode restart
-    String newFileName = helper.dir + "/"
-        + OzoneUtils.getRequestID().toLowerCase();
-    Path newPath = Paths.get(newFileName);
-    try (
-        FileOutputStream newOutputStream = new FileOutputStream(
-            newPath.toString());
-        OzoneInputStream ozoneInputStream = helper.client
-            .getKey(helper.getVol().getName(), helper.getBucket().getName(),
-                keyName)) {
-      IOUtils.copy(ozoneInputStream, newOutputStream);
-    }
-
-    try (
-        FileInputStream original = new FileInputStream(helper.getFile());
-        FileInputStream downloaded = new FileInputStream(newPath.toFile())) {
-      String originalHash = DigestUtils.sha256Hex(original);
-      String downloadedHash = DigestUtils.sha256Hex(downloaded);
-      assertEquals(
-          "Sha256 does not match between original file and downloaded file.",
-          originalHash, downloadedHash);
-    }
-  }
-
-  @Test
-  public void testPutAndGetKey() throws Exception {
-    runTestPutAndGetKey(new PutHelper(client, path));
-    String delimiter = RandomStringUtils.randomAscii(1);
-    runTestPutAndGetKey(new PutHelper(client, path,
-        getMultiPartKey(delimiter)));
-  }
-
-  static void runTestPutAndGetKey(PutHelper helper) throws Exception {
-    final ClientProtocol helperClient = helper.client;
-
-    String keyName = helper.putKey();
-    assertNotNull(helper.getBucket());
-    assertNotNull(helper.getFile());
-
-    final String newFileName1 = helper.dir + "/"
-        + OzoneUtils.getRequestID().toLowerCase();
-    final String newFileName2 = helper.dir + "/"
-        + OzoneUtils.getRequestID().toLowerCase();
-
-    Path newPath1 = Paths.get(newFileName1);
-    Path newPath2 = Paths.get(newFileName2);
-
-    try (
-        FileOutputStream newOutputStream = new FileOutputStream(
-            newPath1.toString());
-        OzoneInputStream ozoneInputStream = helper.getBucket()
-            .readKey(keyName)) {
-      IOUtils.copy(ozoneInputStream, newOutputStream);
-    }
-
-    // test get key using a more efficient call
-    try (
-        FileOutputStream newOutputStream = new FileOutputStream(
-            newPath2.toString());
-        OzoneInputStream ozoneInputStream = helper.getBucket()
-            .readKey(keyName)) {
-      IOUtils.copy(ozoneInputStream, newOutputStream);
-    }
-
-    try (FileInputStream original = new FileInputStream(helper.getFile());
-        FileInputStream downloaded1 = new FileInputStream(newPath1.toFile());
-        FileInputStream downloaded2 = new FileInputStream(newPath1.toFile())) {
-      String originalHash = DigestUtils.sha256Hex(original);
-      String downloadedHash1 = DigestUtils.sha256Hex(downloaded1);
-      String downloadedHash2 = DigestUtils.sha256Hex(downloaded2);
-
-      assertEquals(
-          "Sha256 does not match between original file and downloaded file.",
-          originalHash, downloadedHash1);
-      assertEquals(
-          "Sha256 does not match between original file and downloaded file.",
-          originalHash, downloadedHash2);
-
-      // test new get key with invalid volume/bucket name
-      OzoneTestUtils.expectOmException(ResultCodes.KEY_NOT_FOUND,
-          () -> helperClient.getKey(
-              "invalid-volume", helper.getBucket().getName(), keyName));
-
-      OzoneTestUtils.expectOmException(ResultCodes.KEY_NOT_FOUND,
-          () -> helperClient.getKey(
-              helper.getVol().getName(), "invalid-bucket", keyName));
-    }
-  }
-
-  @Test
-  public void testPutAndDeleteKey() throws Exception {
-    runTestPutAndDeleteKey(new PutHelper(client, path));
-    String delimiter = RandomStringUtils.randomAscii(1);
-    runTestPutAndDeleteKey(new PutHelper(client, path,
-        getMultiPartKey(delimiter)));
-  }
-
-  static void runTestPutAndDeleteKey(PutHelper helper) throws Exception {
-    String keyName = helper.putKey();
-    assertNotNull(helper.getBucket());
-    assertNotNull(helper.getFile());
-    helper.getBucket().deleteKey(keyName);
-
-    OzoneTestUtils.expectOmException(ResultCodes.KEY_NOT_FOUND, () -> {
-      helper.getBucket().getKey(keyName);
-    });
-  }
-
-  @Test
-  public void testPutAndListKey() throws Exception {
-    runTestPutAndListKey(new PutHelper(client, path));
-    String delimiter = RandomStringUtils.randomAscii(1);
-    runTestPutAndListKey(new PutHelper(client, path,
-        getMultiPartKey(delimiter)));
-  }
-
-  static void runTestPutAndListKey(PutHelper helper) throws Exception {
-    ClientProtocol helperClient = helper.client;
-    helper.putKey();
-    assertNotNull(helper.getBucket());
-    assertNotNull(helper.getFile());
-
-    // add keys [list-key0, list-key1, ..., list-key9]
-    for (int x = 0; x < 10; x++) {
-      String newkeyName = "list-key" + x;
-      try (
-          OzoneOutputStream ozoneOutputStream = helper.getBucket()
-              .createKey(newkeyName, 0, replicationType, replicationFactor,
-                  new HashMap<>());
-          InputStream fileInputStream = new FileInputStream(helper.getFile())) {
-        IOUtils.copy(fileInputStream, ozoneOutputStream);
-      }
-    }
-
-    List<OzoneKey> keyList1 =
-        IteratorUtils.toList(helper.getBucket().listKeys(null, null));
-    // test list key using a more efficient call
-    List<OzoneKey> keyList2 = helperClient
-        .listKeys(helper.getVol().getName(), helper.getBucket().getName(), null,
-            null, 100);
-
-    Assert.assertEquals(11, keyList1.size());
-    Assert.assertEquals(11, keyList2.size());
-    // Verify the key creation/modification time. Here we compare the time in
-    // second unit since the date string reparsed to millisecond will
-    // lose precision.
-    for (OzoneKey key : keyList1) {
-      assertTrue((key.getCreationTime() / 1000) >= (currentTime / 1000));
-      assertTrue((key.getModificationTime() / 1000) >= (currentTime / 1000));
-    }
-
-    for (OzoneKey key : keyList2) {
-      assertTrue((key.getCreationTime() / 1000) >= (currentTime / 1000));
-      assertTrue((key.getModificationTime() / 1000) >= (currentTime / 1000));
-    }
-
-    // test maxLength parameter of list keys
-    keyList2 = helperClient
-        .listKeys(helper.getVol().getName(), helper.getBucket().getName(), null,
-            null, 1);
-    Assert.assertEquals(1, keyList2.size());
-
-    // test startKey parameter of list keys
-    keyList1 = IteratorUtils
-        .toList(helper.getBucket().listKeys("list-key", "list-key4"));
-    keyList2 = helperClient
-        .listKeys(helper.getVol().getName(), helper.getBucket().getName(),
-            "list-key", "list-key4", 100);
-    Assert.assertEquals(5, keyList1.size());
-    Assert.assertEquals(5, keyList2.size());
-
-    // test prefix parameter of list keys
-    keyList1 =
-        IteratorUtils.toList(helper.getBucket().listKeys("list-key2", null));
-    keyList2 = helperClient
-        .listKeys(helper.getVol().getName(), helper.getBucket().getName(),
-            "list-key2", null, 100);
-    Assert.assertTrue(
-        keyList1.size() == 1 && keyList1.get(0).getName().equals("list-key2"));
-    Assert.assertTrue(
-        keyList2.size() == 1 && keyList2.get(0).getName().equals("list-key2"));
-
-    // test new list keys with invalid volume/bucket name
-    OzoneTestUtils.expectOmException(ResultCodes.BUCKET_NOT_FOUND, () -> {
-      helperClient.listKeys("invalid-volume", helper.getBucket().getName(),
-          null, null, 100);
-    });
-
-    OzoneTestUtils.expectOmException(ResultCodes.BUCKET_NOT_FOUND, () -> {
-      helperClient.listKeys(helper.getVol().getName(), "invalid-bucket", null,
-          null, 100);
-    });
-  }
-
-  @Test
-  public void testGetKeyInfo() throws Exception {
-    runTestGetKeyInfo(new PutHelper(client, path));
-    String delimiter = RandomStringUtils.randomAscii(1);
-    runTestGetKeyInfo(new PutHelper(client, path,
-        getMultiPartKey(delimiter)));
-  }
-
-  static void runTestGetKeyInfo(PutHelper helper) throws Exception {
-    String keyName = helper.putKey();
-    assertNotNull(helper.getBucket());
-    assertNotNull(helper.getFile());
-
-    OzoneKey keyInfo = helper.getBucket().getKey(keyName);
-    assertNotNull(keyInfo);
-    assertEquals(keyName, keyInfo.getName());
-
-    // Compare the time in second unit since the date string reparsed to
-    // millisecond will lose precision.
-    Assert
-        .assertTrue((keyInfo.getCreationTime() / 1000) >= (currentTime / 1000));
-    Assert.assertTrue(
-        (keyInfo.getModificationTime() / 1000) >= (currentTime / 1000));
-  }
-
-  // Volume, bucket, keys info that helps for test create/delete keys.
-  private static class BucketKeys {
-
-    private Map<Pair<String, String>, List<String>> buckets;
-
-    BucketKeys() {
-      buckets = Maps.newHashMap();
-    }
-
-    void addKey(String volume, String bucket, String key) {
-      // check if this bucket exists
-      for (Map.Entry<Pair<String, String>, List<String>> entry :
-          buckets.entrySet()) {
-        if (entry.getKey().getValue().equals(bucket)) {
-          entry.getValue().add(key);
-          return;
-        }
-      }
-
-      // bucket not exist
-      Pair<String, String> newBucket = new ImmutablePair(volume, bucket);
-      List<String> keyList = Lists.newArrayList();
-      keyList.add(key);
-      buckets.put(newBucket, keyList);
-    }
-
-    Set<Pair<String, String>> getAllBuckets() {
-      return buckets.keySet();
-    }
-
-    List<String> getBucketKeys(String bucketName) {
-      for (Map.Entry<Pair<String, String>, List<String>> entry : buckets
-          .entrySet()) {
-        if (entry.getKey().getValue().equals(bucketName)) {
-          return entry.getValue();
-        }
-      }
-      return Lists.newArrayList();
-    }
-
-    int totalNumOfKeys() {
-      int count = 0;
-      for (Map.Entry<Pair<String, String>, List<String>> entry : buckets
-          .entrySet()) {
-        count += entry.getValue().size();
-      }
-      return count;
-    }
-  }
-
-  private int countOmKeys(OzoneManager om) throws IOException {
-    int totalCount = 0;
-    List<OmVolumeArgs> volumes =
-        om.listAllVolumes(null, null, Integer.MAX_VALUE);
-    for (OmVolumeArgs volume : volumes) {
-      List<OmBucketInfo> buckets =
-          om.listBuckets(volume.getVolume(), null, null, Integer.MAX_VALUE);
-      for (OmBucketInfo bucket : buckets) {
-        List<OmKeyInfo> keys = om.listKeys(bucket.getVolumeName(),
-            bucket.getBucketName(), null, null, Integer.MAX_VALUE);
-        totalCount += keys.size();
-      }
-    }
-    return totalCount;
-  }
-
-  @Test
-  @Ignore("Until delete background service is fixed.")
-  public void testDeleteKey() throws Exception {
-    OzoneManager ozoneManager = ozoneCluster.getOzoneManager();
-    // To avoid interference from other test cases,
-    // we collect number of existing keys at the beginning
-    int numOfExistedKeys = countOmKeys(ozoneManager);
-
-    // Keep tracking bucket keys info while creating them
-    PutHelper helper = new PutHelper(client, path);
-    BucketKeys bucketKeys = new BucketKeys();
-    for (int i = 0; i < 20; i++) {
-      String keyName = helper.putKey();
-      bucketKeys.addKey(helper.getVol().getName(), helper.getBucket().getName(),
-          keyName);
-    }
-
-    // There should be 20 keys in the buckets we just created.
-    Assert.assertEquals(20, bucketKeys.totalNumOfKeys());
-
-    int numOfCreatedKeys = 0;
-    OzoneContainer cm = ozoneCluster.getHddsDatanodes().get(0)
-        .getDatanodeStateMachine().getContainer();
-
-    // Expected to delete chunk file list.
-    List<File> expectedChunkFiles = Lists.newArrayList();
-    // Iterate over all buckets, and list all keys in each bucket,
-    // count the total number of created keys.
-    Set<Pair<String, String>> buckets = bucketKeys.getAllBuckets();
-    for (Pair<String, String> buk : buckets) {
-      List<OmKeyInfo> createdKeys =
-          ozoneManager.listKeys(buk.getKey(), buk.getValue(), null, null, 20);
-
-      // Memorize chunks that has been created,
-      // so we can verify actual deletions at DN side later.
-      for (OmKeyInfo keyInfo : createdKeys) {
-        List<OmKeyLocationInfo> locations =
-            keyInfo.getLatestVersionLocations().getLocationList();
-        OzoneTestUtils.closeContainers(keyInfo.getKeyLocationVersions(),
-            ozoneCluster.getStorageContainerManager());
-        for (OmKeyLocationInfo location : locations) {
-          KeyValueHandler  keyValueHandler = (KeyValueHandler) cm
-              .getDispatcher().getHandler(ContainerProtos.ContainerType
-                  .KeyValueContainer);
-          KeyValueContainer container = (KeyValueContainer) cm.getContainerSet()
-              .getContainer(location.getBlockID().getContainerID());
-          BlockData blockInfo = keyValueHandler.getBlockManager()
-              .getBlock(container, location.getBlockID());
-          KeyValueContainerData containerData =
-              (KeyValueContainerData) container.getContainerData();
-          File dataDir = new File(containerData.getChunksPath());
-          for (ContainerProtos.ChunkInfo chunkInfo : blockInfo.getChunks()) {
-            File chunkFile = dataDir.toPath()
-                .resolve(chunkInfo.getChunkName()).toFile();
-            System.out.println("Chunk File created: "
-                + chunkFile.getAbsolutePath());
-            Assert.assertTrue(chunkFile.exists());
-            expectedChunkFiles.add(chunkFile);
-          }
-        }
-      }
-      numOfCreatedKeys += createdKeys.size();
-    }
-
-    // Ensure all keys are created.
-    Assert.assertEquals(20, numOfCreatedKeys);
-
-    // Ensure all keys are visible from OM.
-    // Total number should be numOfCreated + numOfExisted
-    Assert.assertEquals(20 + numOfExistedKeys, countOmKeys(ozoneManager));
-
-    // Delete 10 keys
-    int delCount = 20;
-    Set<Pair<String, String>> allBuckets = bucketKeys.getAllBuckets();
-    for (Pair<String, String> bucketInfo : allBuckets) {
-      List<String> bks = bucketKeys.getBucketKeys(bucketInfo.getValue());
-      for (String keyName : bks) {
-        if (delCount > 0) {
-          OmKeyArgs arg =
-              new OmKeyArgs.Builder().setVolumeName(bucketInfo.getKey())
-                  .setBucketName(bucketInfo.getValue()).setKeyName(keyName)
-                  .build();
-          ozoneManager.deleteKey(arg);
-          delCount--;
-        }
-      }
-    }
-
-    // It should be pretty quick that keys are removed from OM namespace,
-    // because actual deletion happens in async mode.
-    GenericTestUtils.waitFor(() -> {
-      try {
-        int num = countOmKeys(ozoneManager);
-        return num == (numOfExistedKeys);
-      } catch (IOException e) {
-        return false;
-      }
-    }, 1000, 10000);
-
-    // It might take a while until all blocks are actually deleted,
-    // verify all chunk files created earlier are removed from disk.
-    GenericTestUtils.waitFor(
-        () -> expectedChunkFiles.stream().allMatch(file -> !file.exists()),
-        1000, 60000);
-  }
-}

+ 0 - 126
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeysRatis.java

@@ -1,126 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-package org.apache.hadoop.ozone.web.client;
-
-import org.apache.commons.lang3.RandomStringUtils;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.RatisTestHelper;
-import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.Timeout;
-
-import static org.apache.hadoop.ozone.web.client
-    .TestKeys.PutHelper;
-import static org.apache.hadoop.ozone.web.client
-    .TestKeys.getMultiPartKey;
-import static org.apache.hadoop.ozone.web.client
-    .TestKeys.runTestGetKeyInfo;
-import static org.apache.hadoop.ozone.web.client
-    .TestKeys.runTestPutAndDeleteKey;
-import static org.apache.hadoop.ozone.web.client
-    .TestKeys.runTestPutAndGetKey;
-import static org.apache.hadoop.ozone.web.client
-    .TestKeys.runTestPutAndGetKeyWithDnRestart;
-import static org.apache.hadoop.ozone.web.client
-    .TestKeys.runTestPutAndListKey;
-import static org.apache.hadoop.ozone.web.client
-    .TestKeys.runTestPutKey;
-
-/** The same as {@link TestKeys} except that this test is Ratis enabled. */
-public class TestKeysRatis {
-  @Rule
-  public Timeout testTimeout = new Timeout(300000);
-  private static RatisTestHelper.RatisTestSuite suite;
-  private static MiniOzoneCluster ozoneCluster = null;
-  static private String path;
-  private static ClientProtocol client = null;
-
-  @BeforeClass
-  public static void init() throws Exception {
-    suite = new RatisTestHelper.RatisTestSuite();
-    path = GenericTestUtils.getTempPath(TestKeysRatis.class.getSimpleName());
-    ozoneCluster = suite.getCluster();
-    ozoneCluster.waitForClusterToBeReady();
-    client = suite.newOzoneClient();
-  }
-
-  /**
-   * shutdown MiniDFSCluster.
-   */
-  @AfterClass
-  public static void shutdown() {
-    if (suite != null) {
-      suite.close();
-    }
-  }
-
-
-  @Test
-  public void testPutKey() throws Exception {
-    runTestPutKey(new PutHelper(client, path));
-    String delimiter = RandomStringUtils.randomAlphanumeric(1);
-    runTestPutKey(new PutHelper(client, path,
-        getMultiPartKey(delimiter)));
-  }
-
-  @Test
-  public void testPutAndGetKeyWithDnRestart() throws Exception {
-    runTestPutAndGetKeyWithDnRestart(
-        new PutHelper(client, path), ozoneCluster);
-    String delimiter = RandomStringUtils.randomAlphanumeric(1);
-    runTestPutAndGetKeyWithDnRestart(
-        new PutHelper(client, path, getMultiPartKey(delimiter)),
-        ozoneCluster);
-  }
-
-  @Test
-  public void testPutAndGetKey() throws Exception {
-    runTestPutAndGetKey(new PutHelper(client, path));
-    String delimiter = RandomStringUtils.randomAlphanumeric(1);
-    runTestPutAndGetKey(new PutHelper(client, path,
-        getMultiPartKey(delimiter)));
-  }
-
-  @Test
-  public void testPutAndDeleteKey() throws Exception  {
-    runTestPutAndDeleteKey(new PutHelper(client, path));
-    String delimiter = RandomStringUtils.randomAlphanumeric(1);
-    runTestPutAndDeleteKey(new PutHelper(client, path,
-        getMultiPartKey(delimiter)));
-  }
-
-  @Test
-  public void testPutAndListKey() throws Exception {
-    runTestPutAndListKey(new PutHelper(client, path));
-    String delimiter = RandomStringUtils.randomAlphanumeric(1);
-    runTestPutAndListKey(new PutHelper(client, path,
-        getMultiPartKey(delimiter)));
-  }
-
-  @Test
-  public void testGetKeyInfo() throws Exception {
-    runTestGetKeyInfo(new PutHelper(client, path));
-    String delimiter = RandomStringUtils.randomAlphanumeric(1);
-    runTestGetKeyInfo(new PutHelper(client, path,
-        getMultiPartKey(delimiter)));
-  }
-}

+ 0 - 304
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestOzoneClient.java

@@ -1,304 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.web.client;
-
-import io.netty.bootstrap.Bootstrap;
-import io.netty.channel.Channel;
-import io.netty.channel.ChannelHandlerContext;
-import io.netty.channel.ChannelInitializer;
-import io.netty.channel.ChannelOption;
-import io.netty.channel.ChannelPipeline;
-import io.netty.channel.EventLoopGroup;
-import io.netty.channel.SimpleChannelInboundHandler;
-import io.netty.channel.nio.NioEventLoopGroup;
-import io.netty.channel.socket.SocketChannel;
-import io.netty.channel.socket.nio.NioSocketChannel;
-import io.netty.handler.codec.http.DefaultFullHttpRequest;
-import io.netty.handler.codec.http.FullHttpRequest;
-import io.netty.handler.codec.http.HttpClientCodec;
-import io.netty.handler.codec.http.HttpContent;
-import io.netty.handler.codec.http.HttpContentDecompressor;
-import io.netty.handler.codec.http.HttpMethod;
-import io.netty.handler.codec.http.HttpObject;
-import io.netty.handler.codec.http.HttpResponse;
-import io.netty.handler.codec.http.HttpResponseStatus;
-import io.netty.handler.codec.http.HttpVersion;
-import io.netty.handler.codec.http.LastHttpContent;
-import io.netty.handler.logging.LogLevel;
-import io.netty.handler.logging.LoggingHandler;
-
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.client.rest.headers.Header;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.util.Time;
-import org.apache.http.HttpEntity;
-import org.apache.http.client.methods.CloseableHttpResponse;
-import org.apache.http.client.methods.HttpPost;
-import org.apache.http.client.utils.URIBuilder;
-import org.apache.http.impl.client.CloseableHttpClient;
-import org.apache.http.impl.client.HttpClients;
-import org.apache.http.impl.conn.PoolingHttpClientConnectionManager;
-import org.apache.http.util.EntityUtils;
-import org.apache.log4j.Level;
-import org.apache.log4j.Logger;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import javax.ws.rs.core.HttpHeaders;
-import java.io.IOException;
-import java.net.HttpURLConnection;
-import java.net.URI;
-import java.net.URISyntaxException;
-import java.text.SimpleDateFormat;
-import java.util.Date;
-import java.util.Locale;
-import java.util.UUID;
-
-import static io.netty.util.CharsetUtil.UTF_8;
-
-/**
- * Unit tests for Ozone client connection reuse with Apache HttpClient and Netty
- * based HttpClient.
- */
-public class TestOzoneClient {
-  private static Logger log = Logger.getLogger(TestOzoneClient.class);
-  private static int testVolumeCount = 5;
-  private static MiniOzoneCluster cluster = null;
-  private static String endpoint = null;
-
-  @BeforeClass
-  public static void init() throws Exception {
-    Logger.getLogger("log4j.logger.org.apache.http").setLevel(Level.ALL);
-    OzoneConfiguration conf = new OzoneConfiguration();
-    cluster = MiniOzoneCluster.newBuilder(conf).build();
-    cluster.waitForClusterToBeReady();
-    int port = cluster.getHddsDatanodes().get(0)
-        .getDatanodeDetails()
-        .getPort(DatanodeDetails.Port.Name.REST).getValue();
-    endpoint = String.format("http://localhost:%d", port);
-  }
-
-  @AfterClass
-  public static void shutdown() {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-  @Test(timeout = 5000)
-  public void testNewConnectionPerRequest()
-      throws IOException, URISyntaxException {
-    for (int i = 0; i < testVolumeCount; i++) {
-      try (CloseableHttpClient httpClient =
-               HttpClients.createDefault()) {
-        createVolume(getRandomVolumeName(i), httpClient);
-      }
-    }
-  }
-
-  /**
-   * Object handler should be able to serve multiple requests from
-   * a single http client. This allows the client side to reuse
-   * http connections in a connection pool instead of creating a new
-   * connection per request which consumes resource heavily.
-   *
-   */
-  @Test(timeout = 5000)
-  public void testReuseWithApacheHttpClient()
-      throws IOException, URISyntaxException {
-
-    PoolingHttpClientConnectionManager cm =
-        new PoolingHttpClientConnectionManager();
-    cm.setMaxTotal(200);
-    cm.setDefaultMaxPerRoute(20);
-
-    try (CloseableHttpClient httpClient =
-             HttpClients.custom().setConnectionManager(cm).build()) {
-      for (int i = 0; i < testVolumeCount; i++) {
-        createVolume(getRandomVolumeName(i), httpClient);
-      }
-    }
-  }
-
-  @Test(timeout = 10000)
-  public void testReuseWithNettyHttpClient()
-      throws IOException, InterruptedException, URISyntaxException {
-    URI uri = new URI(endpoint);
-    String host = uri.getHost() == null? "127.0.0.1" : uri.getHost();
-    int port = uri.getPort();
-
-    EventLoopGroup workerGroup = new NioEventLoopGroup();
-    try {
-      Bootstrap b = new Bootstrap();
-      b.group(workerGroup)
-          .channel(NioSocketChannel.class)
-          .option(ChannelOption.SO_KEEPALIVE, true)
-          .option(ChannelOption.SO_REUSEADDR, true)
-          .handler(new ChannelInitializer<SocketChannel>() {
-            /**
-             * This method will be called once the {@link Channel} was
-             * registered. After the method returns this instance
-             * will be removed from the {@link ChannelPipeline}
-             * of the {@link Channel}.
-             *
-             * @param ch the {@link Channel} which was registered.
-             * @throws Exception is thrown if an error occurs.
-             * In that case the {@link Channel} will be closed.
-             */
-            @Override
-            public void initChannel(SocketChannel ch) {
-              ChannelPipeline p = ch.pipeline();
-
-              // Comment the following line if you don't want client http trace
-              p.addLast("log", new LoggingHandler(LogLevel.INFO));
-              p.addLast(new HttpClientCodec());
-              p.addLast(new HttpContentDecompressor());
-              p.addLast(new NettyHttpClientHandler());
-            }
-          });
-
-      Channel ch = b.connect(host, port).sync().channel();
-      for (int i = 0; i < testVolumeCount; i++) {
-        String volumeName = getRandomVolumeName(i);
-        try {
-          sendNettyCreateVolumeRequest(ch, volumeName);
-          Thread.sleep(1000);
-        } catch (Exception e) {
-          e.printStackTrace();
-        }
-      }
-
-      Thread.sleep(1000);
-      ch.close();
-      // Wait for the server to close the connection.
-      ch.closeFuture().sync();
-    } catch (Exception ex) {
-      log.error("Error received in client setup", ex);
-    }finally {
-      workerGroup.shutdownGracefully();
-    }
-  }
-
-  class NettyHttpClientHandler extends
-      SimpleChannelInboundHandler<HttpObject> {
-
-    @Override
-    public void channelRead0(ChannelHandlerContext ctx, HttpObject msg) {
-      if (msg instanceof HttpResponse) {
-        HttpResponse response = (HttpResponse) msg;
-        log.info("STATUS: " + response.getStatus());
-        log.info("VERSION: " + response.getProtocolVersion());
-        Assert.assertEquals(HttpResponseStatus.CREATED.code(),
-            response.getStatus().code());
-      }
-      if (msg instanceof HttpContent) {
-        HttpContent content = (HttpContent) msg;
-        log.info(content.content().toString(UTF_8));
-        if (content instanceof LastHttpContent) {
-          log.info("END OF CONTENT");
-        }
-      }
-    }
-
-    @Override
-    public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) {
-      log.error("Exception upon channel read", cause);
-      ctx.close();
-    }
-  }
-
-  private String getRandomVolumeName(int index) {
-    UUID id = UUID.randomUUID();
-    return "test-volume-" + index + "-" + id;
-
-  }
-
-  // Prepare the HTTP request and send it over the netty channel.
-  private void sendNettyCreateVolumeRequest(Channel channel, String volumeName)
-      throws URISyntaxException, IOException {
-    URIBuilder builder = new URIBuilder(endpoint);
-    builder.setPath("/" + volumeName);
-    URI uri = builder.build();
-
-    String host = uri.getHost() == null ? "127.0.0.1" : uri.getHost();
-    FullHttpRequest request = new DefaultFullHttpRequest(
-        HttpVersion.HTTP_1_1, HttpMethod.POST, uri.getRawPath());
-
-    SimpleDateFormat format =
-        new SimpleDateFormat("EEE, dd MMM yyyy HH:mm:ss ZZZ", Locale.US);
-    request.headers().set(HttpHeaders.HOST, host);
-    request.headers().add(HttpHeaders.CONTENT_TYPE, "application/json");
-    request.headers().set(Header.OZONE_VERSION_HEADER,
-        Header.OZONE_V1_VERSION_HEADER);
-    request.headers().set(HttpHeaders.DATE,
-        format.format(new Date(Time.monotonicNow())));
-    request.headers().set(Header.OZONE_USER,
-        UserGroupInformation.getCurrentUser().getUserName());
-    request.headers().set(HttpHeaders.AUTHORIZATION,
-        Header.OZONE_SIMPLE_AUTHENTICATION_SCHEME + " "
-            + OzoneConsts.OZONE_SIMPLE_HDFS_USER);
-
-    // Send the HTTP request via netty channel.
-    channel.writeAndFlush(request);
-  }
-
-  // It is caller's responsibility to close the client.
-  private void createVolume(String volumeName, CloseableHttpClient httpClient)
-      throws IOException, URISyntaxException {
-    HttpPost create1 =
-        getCreateVolumeRequest(volumeName);
-    HttpEntity entity = null;
-    try {
-      CloseableHttpResponse response1 =
-          httpClient.execute(create1);
-      Assert.assertEquals(HttpURLConnection.HTTP_CREATED,
-          response1.getStatusLine().getStatusCode());
-      entity = response1.getEntity();
-    } catch (IOException e) {
-      e.printStackTrace();
-    } finally {
-      EntityUtils.consumeQuietly(entity);
-    }
-  }
-
-  private HttpPost getCreateVolumeRequest(String volumeName)
-      throws URISyntaxException, IOException {
-    URIBuilder builder = new URIBuilder(endpoint);
-    builder.setPath("/" + volumeName);
-    HttpPost httpPost = new HttpPost(builder.build().toString());
-    SimpleDateFormat format =
-        new SimpleDateFormat("EEE, dd MMM yyyy HH:mm:ss ZZZ", Locale.US);
-    httpPost.addHeader(Header.OZONE_VERSION_HEADER,
-        Header.OZONE_V1_VERSION_HEADER);
-    httpPost.addHeader(HttpHeaders.DATE,
-        format.format(new Date(Time.monotonicNow())));
-    httpPost.addHeader(Header.OZONE_USER,
-        UserGroupInformation.getCurrentUser().getUserName());
-    httpPost.addHeader(HttpHeaders.AUTHORIZATION,
-        Header.OZONE_SIMPLE_AUTHENTICATION_SCHEME + " "
-            + OzoneConsts.OZONE_SIMPLE_HDFS_USER);
-    return httpPost;
-  }
-
-}

+ 0 - 381
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestVolume.java

@@ -1,381 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.web.client;
-
-import java.io.File;
-import java.io.IOException;
-import java.text.ParseException;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.List;
-
-import org.apache.hadoop.hdds.client.OzoneQuota;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.OzoneTestUtils;
-import org.apache.hadoop.ozone.client.OzoneVolume;
-import org.apache.hadoop.ozone.client.VolumeArgs;
-import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
-import org.apache.hadoop.ozone.client.rest.OzoneException;
-import org.apache.hadoop.ozone.client.rest.RestClient;
-import org.apache.hadoop.ozone.client.rpc.RpcClient;
-import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes;
-import org.apache.hadoop.ozone.web.utils.OzoneUtils;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.util.Time;
-
-import org.apache.commons.io.FileUtils;
-import org.apache.commons.lang3.RandomStringUtils;
-import org.apache.log4j.Level;
-import org.apache.log4j.Logger;
-import org.junit.AfterClass;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Ignore;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-
-/**
- * Test Ozone Volumes Lifecycle.
- */
-@RunWith(value = Parameterized.class)
-public class TestVolume {
-  private static MiniOzoneCluster cluster = null;
-  private static ClientProtocol client = null;
-  private static OzoneConfiguration conf;
-
-  @Parameterized.Parameters
-  public static Collection<Object[]> clientProtocol() {
-    Object[][] params = new Object[][] {
-        {RpcClient.class}};
-    return Arrays.asList(params);
-  }
-
-  @SuppressWarnings("visibilitymodifier")
-  @Parameterized.Parameter
-  public Class clientProtocol;
-
-  /**
-   * Create a MiniDFSCluster for testing.
-   * <p>
-   * Ozone is made active by setting OZONE_ENABLED = true
-   *
-   * @throws IOException
-   */
-  @BeforeClass
-  public static void init() throws Exception {
-    conf = new OzoneConfiguration();
-
-    String path = GenericTestUtils
-        .getTempPath(TestVolume.class.getSimpleName());
-    FileUtils.deleteDirectory(new File(path));
-
-    Logger.getLogger("log4j.logger.org.apache.http").setLevel(Level.DEBUG);
-
-    cluster = MiniOzoneCluster.newBuilder(conf).build();
-    cluster.waitForClusterToBeReady();
-  }
-
-  @Before
-  public void setup() throws Exception {
-    if (clientProtocol.equals(RestClient.class)) {
-      client = new RestClient(conf);
-    } else {
-      client = new RpcClient(conf);
-    }
-  }
-
-  /**
-   * shutdown MiniDFSCluster.
-   */
-  @AfterClass
-  public static void shutdown() {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-  }
-
-  @Test
-  public void testCreateVolume() throws Exception {
-    runTestCreateVolume(client);
-  }
-
-  static void runTestCreateVolume(ClientProtocol clientProtocol)
-      throws OzoneException, IOException, ParseException {
-    String volumeName = OzoneUtils.getRequestID().toLowerCase();
-
-    long currentTime = Time.now();
-
-    VolumeArgs volumeArgs = VolumeArgs.newBuilder()
-        .setOwner("bilbo")
-        .setQuota("100TB")
-        .setAdmin("hdfs")
-        .build();
-    clientProtocol.createVolume(volumeName, volumeArgs);
-    OzoneVolume vol = clientProtocol.getVolumeDetails(volumeName);
-
-    assertEquals(vol.getName(), volumeName);
-    assertEquals(vol.getAdmin(), "hdfs");
-    assertEquals(vol.getOwner(), "bilbo");
-    assertEquals(vol.getQuota(), OzoneQuota.parseQuota("100TB").sizeInBytes());
-
-    // verify the key creation time
-    assertTrue((vol.getCreationTime()
-        / 1000) >= (currentTime / 1000));
-
-    // Test create a volume with invalid volume name,
-    // not use Rule here because the test method is static.
-    try {
-      String invalidVolumeName = "#" + OzoneUtils.getRequestID().toLowerCase();
-      clientProtocol.createVolume(invalidVolumeName);
-      /*
-      //TODO: RestClient and RpcClient should use HddsClientUtils to verify name
-      fail("Except the volume creation be failed because the"
-          + " volume name starts with an invalid char #");*/
-    } catch (Exception e) {
-      assertTrue(e.getMessage().contains("Bucket or Volume name"
-          + " has an unsupported character : #"));
-    }
-  }
-
-  @Test
-  public void testCreateDuplicateVolume() throws Exception {
-    runTestCreateDuplicateVolume(client);
-  }
-
-  static void runTestCreateDuplicateVolume(ClientProtocol clientProtocol)
-      throws Exception {
-
-    clientProtocol.createVolume("testvol");
-    OzoneTestUtils.expectOmException(ResultCodes.VOLUME_ALREADY_EXISTS,
-        () -> clientProtocol.createVolume("testvol"));
-  }
-
-  @Test
-  public void testDeleteVolume() throws OzoneException, IOException {
-    runTestDeleteVolume(client);
-  }
-
-  static void runTestDeleteVolume(ClientProtocol clientProtocol)
-      throws OzoneException, IOException {
-    String volumeName = OzoneUtils.getRequestID().toLowerCase();
-    clientProtocol.createVolume(volumeName);
-    clientProtocol.deleteVolume(volumeName);
-  }
-
-  @Test
-  public void testChangeOwnerOnVolume() throws Exception {
-    runTestChangeOwnerOnVolume(client);
-  }
-
-  static void runTestChangeOwnerOnVolume(ClientProtocol clientProtocol)
-      throws OzoneException, ParseException, IOException {
-    String volumeName = OzoneUtils.getRequestID().toLowerCase();
-    clientProtocol.createVolume(volumeName);
-    clientProtocol.getVolumeDetails(volumeName);
-    clientProtocol.setVolumeOwner(volumeName, "frodo");
-    OzoneVolume newVol = clientProtocol.getVolumeDetails(volumeName);
-    assertEquals(newVol.getOwner(), "frodo");
-    // verify if the creation time is missing after setting owner operation
-    assertTrue(newVol.getCreationTime() > 0);
-  }
-
-  @Test
-  public void testChangeQuotaOnVolume() throws Exception {
-    runTestChangeQuotaOnVolume(client);
-  }
-
-  static void runTestChangeQuotaOnVolume(ClientProtocol clientProtocol)
-      throws OzoneException, IOException, ParseException {
-    String volumeName = OzoneUtils.getRequestID().toLowerCase();
-    clientProtocol.createVolume(volumeName);
-    clientProtocol.setVolumeQuota(volumeName, OzoneQuota.parseQuota("1000MB"));
-    OzoneVolume newVol = clientProtocol.getVolumeDetails(volumeName);
-    assertEquals(newVol.getQuota(),
-        OzoneQuota.parseQuota("1000MB").sizeInBytes());
-    // verify if the creation time is missing after setting quota operation
-    assertTrue(newVol.getCreationTime() > 0);
-  }
-
-  // Listing all volumes in the cluster feature has to be fixed after HDDS-357.
-  // TODO: fix this
-  @Ignore
-  @Test
-  public void testListVolume() throws OzoneException, IOException {
-    runTestListVolume(client);
-  }
-
-  static void runTestListVolume(ClientProtocol clientProtocol)
-      throws OzoneException, IOException {
-    for (int x = 0; x < 10; x++) {
-      String volumeName = OzoneUtils.getRequestID().toLowerCase();
-      clientProtocol.createVolume(volumeName);
-    }
-
-    List<OzoneVolume> ovols = clientProtocol.listVolumes(
-        UserGroupInformation.getCurrentUser().getUserName(), null, null, 100);
-    assertTrue(ovols.size() >= 10);
-  }
-
-  // TODO: remove @Ignore below once the problem has been resolved.
-  @Ignore("Takes 3m to run, disable for now.")
-  @Test
-  public void testListVolumePagination() throws OzoneException, IOException {
-    runTestListVolumePagination(client);
-  }
-
-  static void runTestListVolumePagination(ClientProtocol clientProtocol)
-      throws OzoneException, IOException {
-    final int volCount = 2000;
-    final int step = 100;
-    for (int x = 0; x < volCount; x++) {
-      String volumeName = OzoneUtils.getRequestID().toLowerCase();
-      clientProtocol.createVolume(volumeName);
-    }
-    String prevKey = null;
-    int count = 0;
-    int pagecount = 0;
-    while (count < volCount) {
-      List<OzoneVolume> ovols = clientProtocol.listVolumes(null, prevKey, step);
-      count += ovols.size();
-      prevKey = ovols.get(ovols.size() - 1).getName();
-      pagecount++;
-    }
-    assertEquals(volCount / step, pagecount);
-  }
-
-  // TODO: remove @Ignore below once the problem has been resolved.
-  @Ignore
-  @Test
-  public void testListAllVolumes() throws OzoneException, IOException {
-    runTestListAllVolumes(client);
-  }
-
-  static void runTestListAllVolumes(ClientProtocol clientProtocol)
-      throws OzoneException, IOException {
-    final int volCount = 200;
-    final int step = 10;
-    for (int x = 0; x < volCount; x++) {
-      String userName =
-          "frodo" + RandomStringUtils.randomAlphabetic(5).toLowerCase();
-      String volumeName =
-          "vol" + RandomStringUtils.randomAlphabetic(5).toLowerCase();
-      VolumeArgs volumeArgs = VolumeArgs.newBuilder()
-          .setOwner(userName)
-          .setQuota("100TB")
-          .setAdmin("hdfs")
-          .build();
-      clientProtocol.createVolume(volumeName, volumeArgs);
-      OzoneVolume vol = clientProtocol.getVolumeDetails(volumeName);
-      assertNotNull(vol);
-    }
-    String prevKey = null;
-    int count = 0;
-    int pagecount = 0;
-    while (count < volCount) {
-      List<OzoneVolume> ovols = clientProtocol.listVolumes(null, prevKey, step);
-      count += ovols.size();
-      if (ovols.size() > 0) {
-        prevKey = ovols.get(ovols.size() - 1).getName();
-      }
-      pagecount++;
-    }
-    // becasue we are querying an existing ozone store, there will
-    // be volumes created by other tests too. So we should get more page counts.
-    assertEquals(volCount / step, pagecount);
-  }
-
-  // Listing all volumes in the cluster feature has to be fixed after HDDS-357.
-  // TODO: fix this
-  @Ignore
-  @Test
-  public void testListVolumes() throws Exception {
-    runTestListVolumes(client);
-  }
-
-  static void runTestListVolumes(ClientProtocol clientProtocol)
-      throws OzoneException, IOException, ParseException {
-    final int volCount = 20;
-    final String user1 = "test-user-a";
-    final String user2 = "test-user-b";
-
-    long currentTime = Time.now();
-    // Create 20 volumes, 10 for user1 and another 10 for user2.
-    for (int x = 0; x < volCount; x++) {
-      String volumeName;
-      String userName;
-
-      if (x % 2 == 0) {
-        // create volume [test-vol0, test-vol2, ..., test-vol18] for user1
-        userName = user1;
-        volumeName = "test-vol" + x;
-      } else {
-        // create volume [test-vol1, test-vol3, ..., test-vol19] for user2
-        userName = user2;
-        volumeName = "test-vol" + x;
-      }
-      VolumeArgs volumeArgs = VolumeArgs.newBuilder()
-          .setOwner(userName)
-          .setQuota("100TB")
-          .setAdmin("hdfs")
-          .build();
-      clientProtocol.createVolume(volumeName, volumeArgs);
-      OzoneVolume vol = clientProtocol.getVolumeDetails(volumeName);
-      assertNotNull(vol);
-    }
-
-    // list all the volumes belong to user1
-    List<OzoneVolume> volumeList =
-        clientProtocol.listVolumes(user1, null, null, 100);
-    assertEquals(10, volumeList.size());
-    // verify the owner name and creation time of volume
-    for (OzoneVolume vol : volumeList) {
-      assertTrue(vol.getOwner().equals(user1));
-      assertTrue((vol.getCreationTime()
-          / 1000) >= (currentTime / 1000));
-    }
-
-    // test max key parameter of listing volumes
-    volumeList = clientProtocol.listVolumes(user1, null, null, 2);
-    assertEquals(2, volumeList.size());
-
-    // test prefix parameter of listing volumes
-    volumeList = clientProtocol.listVolumes(user1, "test-vol10", null, 10);
-    assertTrue(volumeList.size() == 1
-        && volumeList.get(0).getName().equals("test-vol10"));
-
-    volumeList = clientProtocol.listVolumes(user1, "test-vol1", null, 10);
-    assertEquals(5, volumeList.size());
-
-    // test start key parameter of listing volumes
-    volumeList = clientProtocol.listVolumes(user2, null, "test-vol15", 10);
-    assertEquals(2, volumeList.size());
-
-    String volumeName;
-    for (int x = 0; x < volCount; x++) {
-      volumeName = "test-vol" + x;
-      clientProtocol.deleteVolume(volumeName);
-    }
-  }
-}

+ 0 - 155
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestVolumeRatis.java

@@ -1,155 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.web.client;
-
-import org.apache.commons.io.FileUtils;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-
-import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
-import org.apache.hadoop.ozone.client.rest.OzoneException;
-import org.apache.hadoop.ozone.client.rest.RestClient;
-import org.apache.hadoop.ozone.client.rpc.RpcClient;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.log4j.Level;
-import org.apache.log4j.Logger;
-import org.junit.*;
-import org.junit.rules.Timeout;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.Arrays;
-import java.util.Collection;
-
-/** The same as {@link TestVolume} except that this test is Ratis enabled. */
-@Ignore("Disabling Ratis tests for pipeline work.")
-@RunWith(value = Parameterized.class)
-public class TestVolumeRatis {
-  @Rule
-  public Timeout testTimeout = new Timeout(300000);
-  private static ClientProtocol client;
-  private static MiniOzoneCluster cluster;
-  private static OzoneConfiguration conf;
-
-  @Parameterized.Parameters
-  public static Collection<Object[]> clientProtocol() {
-    Object[][] params = new Object[][] {
-        {RpcClient.class},
-        {RestClient.class}};
-    return Arrays.asList(params);
-  }
-
-  @Parameterized.Parameter
-  @SuppressWarnings("visibilitymodifier")
-  public Class clientProtocol;
-
-  @BeforeClass
-  public static void init() throws Exception {
-    conf = new OzoneConfiguration();
-
-    // This enables Ratis in the cluster.
-    conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY, true);
-
-
-    String path = GenericTestUtils
-        .getTempPath(TestVolume.class.getSimpleName());
-    FileUtils.deleteDirectory(new File(path));
-
-    Logger.getLogger("log4j.logger.org.apache.http").setLevel(Level.DEBUG);
-
-    cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(3).build();
-    cluster.waitForClusterToBeReady();
-    final int port = cluster.getHddsDatanodes().get(0)
-        .getDatanodeDetails()
-        .getPort(DatanodeDetails.Port.Name.REST).getValue();
-  }
-
-  @Before
-  public void setup() throws Exception {
-    if (clientProtocol.equals(RestClient.class)) {
-      client = new RestClient(conf);
-    } else {
-      client = new RpcClient(conf);
-    }
-  }
-
-  @AfterClass
-  public static void shutdown() {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
-
-  }
-
-  @Test
-  public void testCreateVolume() throws Exception {
-    TestVolume.runTestCreateVolume(client);
-  }
-
-  @Test
-  public void testCreateDuplicateVolume() throws Exception {
-    TestVolume.runTestCreateDuplicateVolume(client);
-  }
-
-  @Test
-  public void testDeleteVolume() throws OzoneException, IOException {
-    TestVolume.runTestDeleteVolume(client);
-  }
-
-  @Test
-  public void testChangeOwnerOnVolume() throws Exception {
-    TestVolume.runTestChangeOwnerOnVolume(client);
-  }
-
-  @Test
-  public void testChangeQuotaOnVolume() throws Exception {
-    TestVolume.runTestChangeQuotaOnVolume(client);
-  }
-
-  // TODO: remove @Ignore below once the problem has been resolved.
-  @Ignore("listVolumes not implemented in DistributedStorageHandler")
-  @Test
-  public void testListVolume() throws OzoneException, IOException {
-    TestVolume.runTestListVolume(client);
-  }
-
-  // TODO: remove @Ignore below once the problem has been resolved.
-  @Ignore("See TestVolume.testListVolumePagination()")
-  @Test
-  public void testListVolumePagination() throws OzoneException, IOException {
-    TestVolume.runTestListVolumePagination(client);
-  }
-
-  // TODO: remove @Ignore below once the problem has been resolved.
-  @Ignore("See TestVolume.testListAllVolumes()")
-  @Test
-  public void testListAllVolumes() throws Exception {
-    TestVolume.runTestListAllVolumes(client);
-  }
-
-  @Ignore("Disabling Ratis tests for pipeline work.")
-  @Test
-  public void testListVolumes() throws Exception {
-    TestVolume.runTestListVolumes(client);
-  }
-}

+ 0 - 22
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/package-info.java

@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- *     http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- *
- */
-/**
- * REST client tests.
- */
-package org.apache.hadoop.ozone.web.client;

+ 0 - 22
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/package-info.java

@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- *     http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- *
- */
-/**
- * Rest Client Tests.
- */
-package org.apache.hadoop.ozone.web;

+ 0 - 126
hadoop-ozone/objectstore-service/pom.xml

@@ -1,126 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-<project xmlns="http://maven.apache.org/POM/4.0.0"
-         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
-https://maven.apache.org/xsd/maven-4.0.0.xsd">
-  <modelVersion>4.0.0</modelVersion>
-  <parent>
-    <groupId>org.apache.hadoop</groupId>
-    <artifactId>hadoop-ozone</artifactId>
-    <version>0.5.0-SNAPSHOT</version>
-  </parent>
-  <artifactId>hadoop-ozone-objectstore-service</artifactId>
-  <version>0.5.0-SNAPSHOT</version>
-  <description>Apache Hadoop Ozone Object Store REST Service</description>
-  <name>Apache Hadoop Ozone Object Store REST Service</name>
-  <packaging>jar</packaging>
-
-  <dependencies>
-
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-ozone-common</artifactId>
-    </dependency>
-
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-ozone-client</artifactId>
-    </dependency>
-
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-common</artifactId>
-      <scope>provided</scope>
-    </dependency>
-
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdfs</artifactId>
-      <scope>provided</scope>
-    </dependency>
-
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdfs-client</artifactId>
-      <scope>provided</scope>
-      <exclusions>
-        <exclusion>
-          <groupId>com.squareup.okhttp</groupId>
-          <artifactId>okhttp</artifactId>
-        </exclusion>
-      </exclusions>
-    </dependency>
-
-
-    <dependency>
-      <groupId>junit</groupId>
-      <artifactId>junit</artifactId>
-      <scope>test</scope>
-    </dependency>
-
-    <dependency>
-      <groupId>io.swagger</groupId>
-      <artifactId>swagger-annotations</artifactId>
-      <version>1.5.9</version>
-      <scope>provided</scope>
-    </dependency>
-
-    <dependency>
-      <groupId>org.mockito</groupId>
-      <artifactId>mockito-core</artifactId>
-      <version>2.2.0</version>
-      <scope>test</scope>
-    </dependency>
-
-  </dependencies>
-  <build>
-    <plugins>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-shade-plugin</artifactId>
-        <version>3.1.1</version>
-        <configuration>
-          <shadedArtifactAttached>true</shadedArtifactAttached>
-          <shadedClassifierName>plugin</shadedClassifierName>
-          <artifactSet>
-		      <includes>
-              <!-- artifacts with provided scope will be excluded anyway -->
-              <include>*:*:*:*</include>
-            </includes>
-          </artifactSet>
-          <filters>
-            <filter>
-              <artifact>*:*</artifact>
-              <excludes>
-                <exclude>META-INF/*.SF</exclude>
-                <exclude>META-INF/*.DSA</exclude>
-                <exclude>META-INF/*.RSA</exclude>
-              </excludes>
-            </filter>
-          </filters>
-        </configuration>
-        <executions>
-          <execution>
-            <phase>package</phase>
-            <goals>
-              <goal>shade</goal>
-            </goals>
-          </execution>
-        </executions>
-      </plugin>
-    </plugins>
-  </build>
-</project>

+ 0 - 160
hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/hdfs/server/datanode/ObjectStoreHandler.java

@@ -1,160 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.hdfs.server.datanode;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.util.HashMap;
-import java.util.Map;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol;
-import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
-import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB;
-import org.apache.hadoop.hdds.tracing.TracingUtil;
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.ipc.Client;
-import org.apache.hadoop.ipc.ProtobufRpcEngine;
-import org.apache.hadoop.ipc.RPC;
-import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol;
-import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolClientSideTranslatorPB;
-import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolPB;
-import org.apache.hadoop.ozone.web.ObjectStoreApplication;
-import org.apache.hadoop.ozone.web.handlers.ServiceFilter;
-import org.apache.hadoop.ozone.web.interfaces.StorageHandler;
-import org.apache.hadoop.ozone.web.netty.ObjectStoreJerseyContainer;
-import org.apache.hadoop.ozone.web.storage.DistributedStorageHandler;
-import org.apache.hadoop.security.UserGroupInformation;
-
-import com.sun.jersey.api.container.ContainerFactory;
-import com.sun.jersey.api.core.ApplicationAdapter;
-import static com.sun.jersey.api.core.ResourceConfig.FEATURE_TRACE;
-import static com.sun.jersey.api.core.ResourceConfig.PROPERTY_CONTAINER_REQUEST_FILTERS;
-import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForClients;
-import static org.apache.hadoop.ozone.OmUtils.getOmAddress;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_TRACE_ENABLED_DEFAULT;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_TRACE_ENABLED_KEY;
-import org.apache.ratis.protocol.ClientId;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Implements object store handling within the DataNode process.  This class is
- * responsible for initializing and maintaining the RPC clients and servers and
- * the web application required for the object store implementation.
- */
-public final class ObjectStoreHandler implements Closeable {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(ObjectStoreHandler.class);
-
-  private final ObjectStoreJerseyContainer objectStoreJerseyContainer;
-  private final OzoneManagerProtocol ozoneManagerClient;
-  private final StorageContainerLocationProtocol
-      storageContainerLocationClient;
-
-  private final StorageHandler storageHandler;
-  private ClientId clientId = ClientId.randomId();
-
-  /**
-   * Creates a new ObjectStoreHandler.
-   *
-   * @param conf configuration
-   * @throws IOException if there is an I/O error
-   */
-  public ObjectStoreHandler(Configuration conf) throws IOException {
-    boolean ozoneTrace = conf.getBoolean(OZONE_TRACE_ENABLED_KEY,
-        OZONE_TRACE_ENABLED_DEFAULT);
-
-    // Initialize Jersey container for object store web application.
-    RPC.setProtocolEngine(conf, StorageContainerLocationProtocolPB.class,
-        ProtobufRpcEngine.class);
-    long scmVersion =
-        RPC.getProtocolVersion(StorageContainerLocationProtocolPB.class);
-
-    InetSocketAddress scmAddress =
-        getScmAddressForClients(conf);
-    this.storageContainerLocationClient =
-        TracingUtil.createProxy(
-            new StorageContainerLocationProtocolClientSideTranslatorPB(
-                RPC.getProxy(StorageContainerLocationProtocolPB.class,
-                    scmVersion,
-                    scmAddress, UserGroupInformation.getCurrentUser(), conf,
-                    NetUtils.getDefaultSocketFactory(conf),
-                    Client.getRpcTimeout(conf))),
-            StorageContainerLocationProtocol.class, conf);
-
-    RPC.setProtocolEngine(conf, OzoneManagerProtocolPB.class,
-        ProtobufRpcEngine.class);
-    long omVersion =
-        RPC.getProtocolVersion(OzoneManagerProtocolPB.class);
-    InetSocketAddress omAddress = getOmAddress(conf);
-    this.ozoneManagerClient =
-        TracingUtil.createProxy(
-            new OzoneManagerProtocolClientSideTranslatorPB(
-                RPC.getProxy(OzoneManagerProtocolPB.class, omVersion,
-                    omAddress, UserGroupInformation.getCurrentUser(), conf,
-                    NetUtils.getDefaultSocketFactory(conf),
-                    Client.getRpcTimeout(conf)), clientId.toString()),
-            OzoneManagerProtocol.class, conf);
-
-    storageHandler = new DistributedStorageHandler(
-        new OzoneConfiguration(conf),
-        TracingUtil.createProxy(storageContainerLocationClient,
-            StorageContainerLocationProtocol.class, conf),
-        this.ozoneManagerClient);
-    ApplicationAdapter aa =
-        new ApplicationAdapter(new ObjectStoreApplication());
-    Map<String, Object> settingsMap = new HashMap<>();
-    settingsMap.put(PROPERTY_CONTAINER_REQUEST_FILTERS,
-        ServiceFilter.class.getCanonicalName());
-    settingsMap.put(FEATURE_TRACE, ozoneTrace);
-    aa.setPropertiesAndFeatures(settingsMap);
-    this.objectStoreJerseyContainer = ContainerFactory.createContainer(
-        ObjectStoreJerseyContainer.class, aa);
-    this.objectStoreJerseyContainer.setStorageHandler(storageHandler);
-  }
-
-  /**
-   * Returns the initialized web application container.
-   *
-   * @return initialized web application container
-   */
-  public ObjectStoreJerseyContainer getObjectStoreJerseyContainer() {
-    return this.objectStoreJerseyContainer;
-  }
-
-  /**
-   * Returns the storage handler.
-   *
-   * @return returns the storage handler
-   */
-  public StorageHandler getStorageHandler() {
-    return this.storageHandler;
-  }
-
-  @Override
-  public void close() {
-    LOG.info("Closing ObjectStoreHandler.");
-    storageHandler.close();
-    IOUtils.cleanupWithLogger(LOG, storageContainerLocationClient);
-    IOUtils.cleanupWithLogger(LOG, ozoneManagerClient);
-  }
-}

+ 0 - 22
hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/hdfs/server/datanode/package-info.java

@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.datanode;
-
-/**
- * Object store related service inside the datanode.
- */

+ 0 - 222
hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/OzoneRestUtils.java

@@ -1,222 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone;
-
-import javax.ws.rs.core.HttpHeaders;
-import javax.ws.rs.core.MediaType;
-import javax.ws.rs.core.Request;
-import javax.ws.rs.core.Response;
-import java.text.ParseException;
-import java.text.SimpleDateFormat;
-import java.util.Date;
-import java.util.List;
-import java.util.Locale;
-import java.util.TimeZone;
-import java.util.UUID;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
-import org.apache.hadoop.ozone.client.io.LengthInputStream;
-import org.apache.hadoop.ozone.client.rest.OzoneException;
-import org.apache.hadoop.ozone.client.rest.headers.Header;
-import org.apache.hadoop.ozone.web.exceptions.ErrorTable;
-import org.apache.hadoop.ozone.web.handlers.UserArgs;
-import org.apache.hadoop.util.Time;
-
-import com.google.common.base.Preconditions;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Set of Utility functions used in ozone.
- */
-@InterfaceAudience.Private
-public final class OzoneRestUtils {
-
-  private static final Logger LOG = LoggerFactory.getLogger(
-      OzoneRestUtils.class);
-
-
-  private OzoneRestUtils() {
-    // Never constructed
-  }
-
-  /**
-   * Date format that used in ozone. Here the format is thread safe to use.
-   */
-  private static final ThreadLocal<SimpleDateFormat> DATE_FORMAT =
-      new ThreadLocal<SimpleDateFormat>() {
-    @Override
-    protected SimpleDateFormat initialValue() {
-      SimpleDateFormat format = new SimpleDateFormat(
-          OzoneConsts.OZONE_DATE_FORMAT, Locale.US);
-      format.setTimeZone(TimeZone.getTimeZone(OzoneConsts.OZONE_TIME_ZONE));
-
-      return format;
-    }
-  };
-
-  /**
-   * verifies that bucket name / volume name is a valid DNS name.
-   *
-   * @param resName Bucket or volume Name to be validated
-   *
-   * @throws IllegalArgumentException
-   */
-  public static void verifyResourceName(String resName)
-      throws IllegalArgumentException {
-    HddsClientUtils.verifyResourceName(resName);
-  }
-
-  /**
-   * Returns a random Request ID.
-   *
-   * Request ID is returned to the client as well as flows through the system
-   * facilitating debugging on why a certain request failed.
-   *
-   * @return String random request ID
-   */
-  public static String getRequestID() {
-    return UUID.randomUUID().toString();
-  }
-
-
-
-  /**
-   * Basic validate routine to make sure that all the
-   * required headers are in place.
-   *
-   * @param request - http request
-   * @param headers - http headers
-   * @param reqId - request id
-   * @param resource - Resource Name
-   * @param hostname - Hostname
-   *
-   * @throws OzoneException
-   */
-  public static void validate(Request request, HttpHeaders headers,
-                              String reqId, String resource, String hostname)
-      throws OzoneException {
-
-    List<String> ozHeader =
-        headers.getRequestHeader(Header.OZONE_VERSION_HEADER);
-    if (ozHeader == null) {
-      throw ErrorTable
-          .newError(ErrorTable.MISSING_VERSION, reqId, resource, hostname);
-    }
-
-    List<String> date = headers.getRequestHeader(HttpHeaders.DATE);
-    if (date == null) {
-      throw ErrorTable
-          .newError(ErrorTable.MISSING_DATE, reqId, resource, hostname);
-    }
-
-    /*
-    TODO :
-    Ignore the results for time being. Eventually we can validate if the
-    request Date time is too skewed and reject if it is so.
-    */
-    parseDate(date.get(0), reqId, resource, hostname);
-
-  }
-
-  /**
-   * Parses the Date String coming from the Users.
-   *
-   * @param dateString - Date String
-   * @param reqID - Ozone Request ID
-   * @param resource - Resource Name
-   * @param hostname - HostName
-   *
-   * @return - Date
-   *
-   * @throws OzoneException - in case of parsing error
-   */
-  public static synchronized Date parseDate(String dateString, String reqID,
-                                            String resource, String hostname)
-      throws OzoneException {
-    try {
-      return DATE_FORMAT.get().parse(dateString);
-    } catch (ParseException ex) {
-      OzoneException exp =
-          ErrorTable.newError(ErrorTable.BAD_DATE, reqID, resource, hostname);
-      exp.setMessage(ex.getMessage());
-      throw exp;
-    }
-  }
-
-  /**
-   * Returns a response with appropriate OZONE headers and payload.
-   *
-   * @param args - UserArgs or Inherited class
-   * @param statusCode - HttpStatus code
-   * @param payload - Content Body
-   *
-   * @return JAX-RS Response
-   */
-  public static Response getResponse(UserArgs args, int statusCode,
-                                     String payload) {
-    String date = DATE_FORMAT.get().format(new Date(Time.now()));
-    return Response.ok(payload)
-        .header(Header.OZONE_SERVER_NAME, args.getHostName())
-        .header(Header.OZONE_REQUEST_ID, args.getRequestID())
-        .header(HttpHeaders.DATE, date).status(statusCode).build();
-  }
-
-  /**
-   * Returns a response with appropriate OZONE headers and payload.
-   *
-   * @param args - UserArgs or Inherited class
-   * @param statusCode - HttpStatus code
-   * @param stream InputStream
-   *
-   * @return JAX-RS Response
-   */
-  public static Response getResponse(UserArgs args, int statusCode,
-                                     LengthInputStream stream) {
-    String date = DATE_FORMAT.get().format(new Date(Time.now()));
-    return Response.ok(stream, MediaType.APPLICATION_OCTET_STREAM)
-        .header(Header.OZONE_SERVER_NAME, args.getHostName())
-        .header(Header.OZONE_REQUEST_ID, args.getRequestID())
-        .header(HttpHeaders.DATE, date).status(statusCode)
-        .header(HttpHeaders.CONTENT_LENGTH, stream.getLength())
-        .build();
-
-  }
-
-
-
-  /**
-   * Convert time in millisecond to a human readable format required in ozone.
-   * @return a human readable string for the input time
-   */
-  public static String formatTime(long millis) {
-    return DATE_FORMAT.get().format(millis);
-  }
-
-  /**
-   * Convert time in ozone date format to millisecond.
-   * @return time in milliseconds
-   */
-  public static long formatDate(String date) throws ParseException {
-    Preconditions.checkNotNull(date, "Date string should not be null.");
-    return DATE_FORMAT.get().parse(date).getTime();
-  }
-
-}

+ 0 - 22
hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/package-info.java

@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone;
-
-/**
- * Ozone related generic classes.
- */

+ 0 - 59
hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/ObjectStoreApplication.java

@@ -1,59 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ozone.web;
-
-import org.apache.hadoop.ozone.client.rest.OzoneExceptionMapper;
-import org.apache.hadoop.ozone.web.handlers.BucketHandler;
-import org.apache.hadoop.ozone.web.handlers.KeyHandler;
-import org.apache.hadoop.ozone.web.handlers.ServiceFilter;
-import org.apache.hadoop.ozone.web.handlers.VolumeHandler;
-import org.apache.hadoop.ozone.web.messages.LengthInputStreamMessageBodyWriter;
-import org.apache.hadoop.ozone.web.messages.StringMessageBodyWriter;
-
-import javax.ws.rs.core.Application;
-import java.util.HashSet;
-import java.util.Set;
-
-/**
- * Ozone Application.
- */
-public class ObjectStoreApplication extends Application {
-  public ObjectStoreApplication() {
-    super();
-  }
-
-  @Override
-  public Set<Class<?>> getClasses() {
-    HashSet<Class<?>> set = new HashSet<>();
-    set.add(BucketHandler.class);
-    set.add(VolumeHandler.class);
-    set.add(KeyHandler.class);
-    set.add(OzoneExceptionMapper.class);
-    set.add(LengthInputStreamMessageBodyWriter.class);
-    set.add(StringMessageBodyWriter.class);
-    return set;
-  }
-
-  @Override
-  public Set<Object> getSingletons() {
-    HashSet<Object> set = new HashSet<>();
-    set.add(ServiceFilter.class);
-    return set;
-  }
-}

+ 0 - 89
hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/OzoneHddsDatanodeService.java

@@ -1,89 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package org.apache.hadoop.ozone.web;
-
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler;
-import org.apache.hadoop.ozone.HddsDatanodeService;
-import org.apache.hadoop.ozone.web.netty.ObjectStoreRestHttpServer;
-
-import org.apache.commons.io.IOUtils;
-import org.apache.hadoop.util.ServicePlugin;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * DataNode service plugin implementation to start ObjectStore rest server.
- */
-public class OzoneHddsDatanodeService implements ServicePlugin {
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(OzoneHddsDatanodeService.class);
-
-  private Configuration conf;
-  private ObjectStoreHandler handler;
-  private ObjectStoreRestHttpServer objectStoreRestHttpServer;
-
-  @Override
-  public void start(Object service) {
-    if (service instanceof HddsDatanodeService) {
-      try {
-        HddsDatanodeService hddsDatanodeService = (HddsDatanodeService) service;
-        conf = hddsDatanodeService.getConf();
-        handler = new ObjectStoreHandler(conf);
-        objectStoreRestHttpServer = new ObjectStoreRestHttpServer(
-            conf, null, handler);
-        objectStoreRestHttpServer.start();
-        DatanodeDetails.Port restPort = DatanodeDetails.newPort(
-            DatanodeDetails.Port.Name.REST,
-            objectStoreRestHttpServer.getHttpAddress().getPort());
-        hddsDatanodeService.getDatanodeDetails().setPort(restPort);
-
-      } catch (IOException e) {
-        throw new RuntimeException("Can't start the Object Store Rest server",
-            e);
-      }
-    } else {
-      LOG.error("Not starting {}, as the plugin is not invoked through {}",
-          OzoneHddsDatanodeService.class.getSimpleName(),
-          HddsDatanodeService.class.getSimpleName());
-    }
-  }
-
-
-  @Override
-  public void stop() {
-    try {
-      if (handler != null) {
-        handler.close();
-      }
-    } catch (Exception e) {
-      throw new RuntimeException("Can't stop the Object Store Rest server", e);
-    }
-  }
-
-  @Override
-  public void close() {
-    IOUtils.closeQuietly(objectStoreRestHttpServer);
-    IOUtils.closeQuietly(handler);
-  }
-
-}

+ 0 - 225
hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/exceptions/ErrorTable.java

@@ -1,225 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.web.exceptions;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.ozone.client.rest.OzoneException;
-import org.apache.hadoop.ozone.web.handlers.UserArgs;
-
-import static java.net.HttpURLConnection.HTTP_BAD_REQUEST;
-import static java.net.HttpURLConnection.HTTP_CONFLICT;
-import static java.net.HttpURLConnection.HTTP_FORBIDDEN;
-import static java.net.HttpURLConnection.HTTP_INTERNAL_ERROR;
-import static java.net.HttpURLConnection.HTTP_NOT_FOUND;
-import static java.net.HttpURLConnection.HTTP_UNAUTHORIZED;
-
-/**
- * Error Table represents the Errors from Ozone Rest API layer.
- *
- * Please note : The errors in this table are sorted by the HTTP_ERROR codes
- * if you add new error codes to this table please follow the same convention.
- */
-@InterfaceAudience.Private
-public final class ErrorTable {
-
-  /* Error 400 */
-  public static final OzoneException MISSING_VERSION =
-      new OzoneException(HTTP_BAD_REQUEST, "missingVersion",
-                         "x-ozone-version header is required.");
-
-  public static final OzoneException MISSING_DATE =
-      new OzoneException(HTTP_BAD_REQUEST, "missingDate",
-                         "Date header is required.");
-
-  public static final OzoneException BAD_DATE =
-      new OzoneException(HTTP_BAD_REQUEST, "badDate",
-                         "Unable to parse date format.");
-
-  public static final OzoneException MALFORMED_QUOTA =
-      new OzoneException(HTTP_BAD_REQUEST, "malformedQuota",
-                         "Invalid quota specified.");
-
-  public static final OzoneException MALFORMED_ACL =
-      new OzoneException(HTTP_BAD_REQUEST, "malformedACL",
-                         "Invalid ACL specified.");
-
-
-  public static final OzoneException INVALID_VOLUME_NAME =
-      new OzoneException(HTTP_BAD_REQUEST, "invalidVolumeName",
-                         "Invalid volume name.");
-
-  public static final OzoneException INVALID_QUERY_PARAM =
-      new OzoneException(HTTP_BAD_REQUEST, "invalidQueryParam",
-                         "Invalid query parameter.");
-
-  public static final OzoneException INVALID_RESOURCE_NAME =
-      new OzoneException(HTTP_BAD_REQUEST, "invalidResourceName",
-                         "Invalid volume, bucket or key name.");
-
-  public static final OzoneException INVALID_BUCKET_NAME =
-      new OzoneException(HTTP_BAD_REQUEST, "invalidBucketName",
-                         "Invalid bucket name.");
-
-  public static final OzoneException INVALID_KEY =
-      new OzoneException(HTTP_BAD_REQUEST, "invalidKey", "Invalid key.");
-
-  public static final OzoneException INVALID_REQUEST =
-      new OzoneException(HTTP_BAD_REQUEST, "invalidRequest",
-                         "Error in request.");
-
-  public static final OzoneException MALFORMED_BUCKET_VERSION =
-      new OzoneException(HTTP_BAD_REQUEST, "malformedBucketVersion",
-                         "Malformed bucket version or version not unique.");
-
-  public static final OzoneException MALFORMED_STORAGE_TYPE =
-      new OzoneException(HTTP_BAD_REQUEST, "malformedStorageType",
-                         "Invalid storage Type specified.");
-
-  public static final OzoneException MALFORMED_STORAGE_CLASS =
-      new OzoneException(HTTP_BAD_REQUEST, "malformedStorageClass",
-                         "Invalid storage class specified.");
-
-  public static final OzoneException BAD_DIGEST =
-      new OzoneException(HTTP_BAD_REQUEST, "badDigest",
-                         "Content MD5 does not match.");
-
-  public static final OzoneException INCOMPLETE_BODY =
-      new OzoneException(HTTP_BAD_REQUEST, "incompleteBody",
-                         "Content length does not match stream size.");
-
-  public static final OzoneException BAD_AUTHORIZATION =
-      new OzoneException(HTTP_BAD_REQUEST, "badAuthorization",
-                         "Missing authorization or authorization has to be " +
-                             "unique.");
-
-  public static final OzoneException BAD_PROPERTY =
-      new OzoneException(HTTP_BAD_REQUEST, "unknownProperty",
-          "This property is not supported by this server.");
-
-  /* Error 401 */
-  public static final OzoneException UNAUTHORIZED =
-      new OzoneException(HTTP_UNAUTHORIZED, "Unauthorized",
-                         "Access token is missing or invalid token.");
-
-  /* Error 403 */
-  public static final OzoneException ACCESS_DENIED =
-      new OzoneException(HTTP_FORBIDDEN, "accessDenied", "Access denied.");
-
-  /* Error 404 */
-  public static final OzoneException USER_NOT_FOUND =
-      new OzoneException(HTTP_NOT_FOUND, "userNotFound", "Invalid user name.");
-
-  public static final OzoneException VOLUME_NOT_FOUND =
-      new OzoneException(HTTP_NOT_FOUND, "volumeNotFound", "No such volume.");
-
-  /* Error 409 */
-  public static final OzoneException VOLUME_ALREADY_EXISTS =
-      new OzoneException(HTTP_CONFLICT, "volumeAlreadyExists",
-                         "Duplicate volume name.");
-
-  public static final OzoneException BUCKET_ALREADY_EXISTS =
-      new OzoneException(HTTP_CONFLICT, "bucketAlreadyExists",
-                         "Duplicate bucket name.");
-
-  public static final OzoneException VOLUME_NOT_EMPTY =
-      new OzoneException(HTTP_CONFLICT, "volumeNotEmpty",
-                         "Volume must not have any buckets.");
-
-  public static final OzoneException BUCKET_NOT_EMPTY =
-      new OzoneException(HTTP_CONFLICT, "bucketNotEmpty",
-                         "Bucket must not have any keys.");
-
-  public static final OzoneException KEY_OPERATION_CONFLICT =
-      new OzoneException(HTTP_CONFLICT, "keyOperationConflict",
-                         "Conflicting operation on the specified key is going" +
-                             " on.");
-
-  /* Error 500 */
-  public static final OzoneException SERVER_ERROR =
-      new OzoneException(HTTP_INTERNAL_ERROR, "internalServerError",
-                         "Internal server error.");
-
-  /**
-   * Create a new instance of Error.
-   *
-   * @param e Error Template
-   * @param requestID Request ID
-   * @param resource Resource Name
-   * @param hostID hostID
-   *
-   * @return creates a new instance of error based on the template
-   */
-  public static OzoneException newError(OzoneException e, String requestID,
-                                        String resource, String hostID) {
-    OzoneException err =
-        new OzoneException(e.getHttpCode(), e.getShortMessage(),
-                           e.getMessage());
-    err.setRequestId(requestID);
-    err.setResource(resource);
-    err.setHostID(hostID);
-    return err;
-  }
-
-  /**
-   * Create new instance of Error.
-   *
-   * @param e - Error Template
-   * @param args - Args
-   *
-   * @return Ozone Exception
-   */
-  public static OzoneException newError(OzoneException e, UserArgs args) {
-    OzoneException err =
-        new OzoneException(e.getHttpCode(), e.getShortMessage(),
-                           e.getMessage());
-    if (args != null) {
-      err.setRequestId(args.getRequestID());
-      err.setResource(args.getResourceName());
-      err.setHostID(args.getHostName());
-    }
-    return err;
-  }
-
-  /**
-   * Create new instance of Error.
-   *
-   * @param e - Error Template
-   * @param args - Args
-   * @param ex Exception
-   *
-   * @return Ozone Exception
-   */
-  public static OzoneException newError(OzoneException e, UserArgs args,
-                                        Exception ex) {
-    OzoneException err =
-        new OzoneException(e.getHttpCode(), e.getShortMessage(), ex);
-
-    if(args != null) {
-      err.setRequestId(args.getRequestID());
-      err.setResource(args.getResourceName());
-      err.setHostID(args.getHostName());
-    }
-    err.setMessage(ex.getMessage());
-    return err;
-  }
-
-  private ErrorTable() {
-    // Never constructed.
-  }
-}

+ 0 - 22
hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/exceptions/package-info.java

@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.web.exceptions;
-
-/**
- This package contains ozone client side libraries.
- */

+ 0 - 190
hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/BucketHandler.java

@@ -1,190 +0,0 @@
-/*
- *
- *  Licensed to the Apache Software Foundation (ASF) under one
- *  or more contributor license agreements.  See the NOTICE file
- *  distributed with this work for additional information
- *  regarding copyright ownership.  The ASF licenses this file
- *  to you under the Apache License, Version 2.0 (the
- *  "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- *
- */
-
-package org.apache.hadoop.ozone.web.handlers;
-
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.OzoneRestUtils;
-import org.apache.hadoop.ozone.client.rest.OzoneException;
-import org.apache.hadoop.ozone.client.rest.headers.Header;
-import org.apache.hadoop.ozone.web.exceptions.ErrorTable;
-import org.apache.hadoop.ozone.web.interfaces.Bucket;
-import org.apache.hadoop.ozone.web.interfaces.StorageHandler;
-import org.slf4j.MDC;
-
-import javax.ws.rs.core.HttpHeaders;
-import javax.ws.rs.core.Request;
-import javax.ws.rs.core.Response;
-import javax.ws.rs.core.UriInfo;
-import java.io.IOException;
-
-import static java.net.HttpURLConnection.HTTP_CREATED;
-import static java.net.HttpURLConnection.HTTP_OK;
-import static org.apache.hadoop.ozone.OzoneConsts.OZONE_FUNCTION;
-
-
-/**
- * Bucket Class handles all ozone Bucket related actions.
- */
-public class BucketHandler implements Bucket {
-  /**
-   * createBucket call handles the POST request for Creating a Bucket.
-   *
-   * @param volume - Volume name
-   * @param bucket - Bucket Name
-   * @param req - Http request
-   * @param info - Uri Info
-   * @param headers - Http headers
-   *
-   * @return Response
-   *
-   * @throws OzoneException
-   */
-  @Override
-  public Response createBucket(String volume, String bucket, Request req,
-                               UriInfo info, HttpHeaders headers)
-      throws OzoneException {
-    MDC.put(OZONE_FUNCTION, "createBucket");
-    return new BucketProcessTemplate() {
-      @Override
-      public Response doProcess(BucketArgs args)
-          throws OzoneException, IOException {
-        StorageHandler fs = StorageHandlerBuilder.getStorageHandler();
-        args.setVersioning(getVersioning(args));
-        args.setStorageType(getStorageType(args));
-        fs.createBucket(args);
-        return OzoneRestUtils.getResponse(args, HTTP_CREATED, "");
-      }
-    }.handleCall(volume, bucket, req, info, headers);
-  }
-
-  /**
-   * updateBucket call handles the PUT request for updating a Bucket.
-   *
-   * There are only three possible actions currently with updateBucket.
-   * They are add/remove on ACLS, Bucket Versioning and  StorageType.
-   *  if you make a call with any other action, update just returns 200 OK.
-   *
-   * @param volume - Storage volume name
-   * @param bucket - Bucket name
-   * @param req - Http request
-   * @param info - Uri Info
-   * @param headers - Http headers
-   *
-   * @return Response
-   *
-   * @throws OzoneException
-   */
-  @Override
-  public Response updateBucket(String volume, String bucket, Request req,
-                               UriInfo info, HttpHeaders headers)
-      throws OzoneException {
-    MDC.put(OZONE_FUNCTION, "updateBucket");
-    return new BucketProcessTemplate() {
-      @Override
-      public Response doProcess(BucketArgs args)
-          throws OzoneException, IOException {
-        StorageHandler fs = StorageHandlerBuilder.getStorageHandler();
-        args.setVersioning(getVersioning(args));
-        args.setStorageType(getStorageType(args));
-        if (args.getVersioning() != OzoneConsts.Versioning.NOT_DEFINED) {
-          fs.setBucketVersioning(args);
-        }
-
-        if (args.getStorageType() != null) {
-          fs.setBucketStorageClass(args);
-        }
-        return OzoneRestUtils.getResponse(args, HTTP_OK, "");
-      }
-    }.handleCall(volume, bucket, req, info, headers);
-  }
-
-  /**
-   * Deletes an empty bucket.
-   *
-   * @param volume Volume name
-   * @param bucket Bucket Name
-   * @param req - Http request
-   * @param info - Uri Info
-   * @param headers - Http headers
-   *
-   * @return Response
-   *
-   * @throws OzoneException
-   */
-  @Override
-  public Response deleteBucket(String volume, String bucket, Request req,
-                               UriInfo info, HttpHeaders headers)
-      throws OzoneException {
-    MDC.put(OZONE_FUNCTION, "deleteBucket");
-    return new BucketProcessTemplate() {
-      @Override
-      public Response doProcess(BucketArgs args)
-          throws OzoneException, IOException {
-        StorageHandler fs = StorageHandlerBuilder.getStorageHandler();
-        fs.deleteBucket(args);
-        return OzoneRestUtils.getResponse(args, HTTP_OK, "");
-      }
-    }.handleCall(volume, bucket, req, info, headers);
-  }
-
-  /**
-   * List Buckets allows the user to list the bucket.
-   *
-   * @param volume - Storage Volume Name
-   * @param bucket - Bucket Name
-   * @param info - Uri Info
-   * @param prefix - Prefix for the keys to be fetched
-   * @param maxKeys - MaxNumber of Keys to Return
-   * @param startPage - Continuation Token
-   * @param req - Http request
-   * @param headers - Http headers
-   *
-   * @return - Json Body
-   *
-   * @throws OzoneException
-   */
-  @Override
-  public Response listBucket(String volume, String bucket, final String info,
-                             final String prefix, final int maxKeys,
-                             final String startPage, Request req,
-                             UriInfo uriInfo, HttpHeaders headers)
-      throws OzoneException {
-    MDC.put(OZONE_FUNCTION, "listBucket");
-    return new BucketProcessTemplate() {
-      @Override
-      public Response doProcess(BucketArgs args)
-          throws OzoneException, IOException {
-        switch (info) {
-        case Header.OZONE_INFO_QUERY_KEY:
-          ListArgs listArgs = new ListArgs(args, prefix, maxKeys, startPage);
-          return getBucketKeysList(listArgs);
-        case Header.OZONE_INFO_QUERY_BUCKET:
-          return getBucketInfoResponse(args);
-        default:
-          OzoneException ozException =
-              ErrorTable.newError(ErrorTable.INVALID_QUERY_PARAM, args);
-          ozException.setMessage("Unrecognized query param : " + info);
-          throw ozException;
-        }
-      }
-    }.handleCall(volume, bucket, req, uriInfo, headers);
-  }
-}

+ 0 - 294
hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/BucketProcessTemplate.java

@@ -1,294 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.web.handlers;
-
-import javax.ws.rs.core.HttpHeaders;
-import javax.ws.rs.core.Request;
-import javax.ws.rs.core.Response;
-import javax.ws.rs.core.UriInfo;
-import java.io.IOException;
-import java.nio.file.DirectoryNotEmptyException;
-import java.nio.file.FileAlreadyExistsException;
-import java.nio.file.NoSuchFileException;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.hadoop.hdds.protocol.StorageType;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.OzoneRestUtils;
-import org.apache.hadoop.ozone.client.rest.OzoneException;
-import org.apache.hadoop.ozone.client.rest.headers.Header;
-import org.apache.hadoop.ozone.web.exceptions.ErrorTable;
-import org.apache.hadoop.ozone.web.interfaces.StorageHandler;
-import org.apache.hadoop.ozone.web.interfaces.UserAuth;
-import org.apache.hadoop.ozone.web.response.BucketInfo;
-import org.apache.hadoop.ozone.web.response.ListKeys;
-import org.apache.hadoop.ozone.web.utils.OzoneUtils;
-
-import static java.net.HttpURLConnection.HTTP_OK;
-import static org.apache.hadoop.ozone.OzoneConsts.OZONE_COMPONENT;
-import static org.apache.hadoop.ozone.OzoneConsts.OZONE_REQUEST;
-import static org.apache.hadoop.ozone.OzoneConsts.OZONE_RESOURCE;
-import static org.apache.hadoop.ozone.OzoneConsts.OZONE_USER;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.slf4j.MDC;
-
-/**
- * This class abstracts way the repetitive tasks in
- * Bucket handling code.
- */
-public abstract class BucketProcessTemplate {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(BucketProcessTemplate.class);
-
-  /**
-   * This function serves as the common error handling function
-   * for all bucket related operations.
-   *
-   * @param volume - Volume Name
-   * @param bucket - Bucket Name
-   * @param request - Http Request
-   * @param uriInfo - Http Uri
-   * @param headers - Http Headers
-   *
-   * @return Response
-   *
-   * @throws OzoneException
-   */
-  public Response handleCall(String volume, String bucket, Request request,
-                             UriInfo uriInfo, HttpHeaders headers)
-      throws OzoneException {
-    // TODO : Add logging
-    String reqID = OzoneUtils.getRequestID();
-    String hostName = OzoneUtils.getHostName();
-    MDC.put(OZONE_COMPONENT, "ozone");
-    MDC.put(OZONE_REQUEST, reqID);
-    UserArgs userArgs = null;
-    try {
-      userArgs = new UserArgs(reqID, hostName, request, uriInfo, headers);
-
-      OzoneRestUtils.validate(request, headers, reqID, bucket, hostName);
-      OzoneUtils.verifyResourceName(bucket);
-
-      UserAuth auth = UserHandlerBuilder.getAuthHandler();
-      userArgs.setUserName(auth.getUser(userArgs));
-      MDC.put(OZONE_USER, userArgs.getUserName());
-
-      BucketArgs args = new BucketArgs(volume, bucket, userArgs);
-      MDC.put(OZONE_RESOURCE, args.getResourceName());
-      Response response =  doProcess(args);
-      LOG.debug("Success");
-      MDC.clear();
-      return response;
-
-    } catch (IllegalArgumentException argEx) {
-      LOG.error("Invalid bucket.", argEx);
-      throw ErrorTable.newError(ErrorTable.INVALID_BUCKET_NAME, userArgs,
-          argEx);
-    } catch (IOException fsExp) {
-      handleIOException(bucket, reqID, hostName, fsExp);
-    }
-    return null;
-  }
-
-  /**
-   * Converts FileSystem IO exceptions to OZONE exceptions.
-   *
-   * @param bucket Name of the bucket
-   * @param reqID Request ID
-   * @param hostName Machine Name
-   * @param fsExp Exception
-   *
-   * @throws OzoneException
-   */
-  void handleIOException(String bucket, String reqID, String hostName,
-                         IOException fsExp) throws OzoneException {
-    LOG.error("IOException:", fsExp);
-
-    OzoneException exp = null;
-    if (fsExp instanceof FileAlreadyExistsException) {
-      exp = ErrorTable
-          .newError(ErrorTable.BUCKET_ALREADY_EXISTS, reqID, bucket, hostName);
-    }
-
-    if (fsExp instanceof DirectoryNotEmptyException) {
-      exp = ErrorTable
-          .newError(ErrorTable.BUCKET_NOT_EMPTY, reqID, bucket, hostName);
-    }
-
-    if (fsExp instanceof NoSuchFileException) {
-      exp = ErrorTable
-          .newError(ErrorTable.INVALID_BUCKET_NAME, reqID, bucket, hostName);
-    }
-
-    // Default we don't handle this exception yet,
-    // report a Server Internal Error.
-    if (exp == null) {
-      exp =
-          ErrorTable.newError(ErrorTable.SERVER_ERROR, reqID, bucket, hostName);
-      if (fsExp != null) {
-        exp.setMessage(fsExp.getMessage());
-      }
-    }
-    throw exp;
-  }
-
-  /**
-   * Abstract function that gets implemented in the BucketHandler functions.
-   * This function will just deal with the core file system related logic
-   * and will rely on handleCall function for repetitive error checks
-   *
-   * @param args - parsed bucket args, name, userName, ACLs etc
-   *
-   * @return Response
-   *
-   * @throws OzoneException
-   * @throws IOException
-   */
-  public abstract Response doProcess(BucketArgs args)
-      throws OzoneException, IOException;
-
-
-  /**
-   * Returns the ACL String if available.
-   * This function ignores all ACLs that are not prefixed with either
-   * ADD or Remove
-   *
-   * @param args - BucketArgs
-   * @param tag - Tag for different type of acls
-   *
-   * @return List of ACLs
-   *
-   */
-  List<String> getAcls(BucketArgs args, String tag)  {
-    List<String> aclStrings =
-        args.getHeaders().getRequestHeader(Header.OZONE_ACLS);
-    List<String> filteredSet = null;
-    if (aclStrings != null) {
-      filteredSet = new ArrayList<>();
-      for (String s : aclStrings) {
-        if (s.startsWith(tag)) {
-          filteredSet.add(s.replaceFirst(tag, ""));
-        }
-      }
-    }
-    return filteredSet;
-  }
-
-  /**
-   * Returns bucket versioning Info.
-   *
-   * @param args - BucketArgs
-   *
-   * @return - String
-   *
-   * @throws OzoneException
-   */
-  OzoneConsts.Versioning getVersioning(BucketArgs args) throws OzoneException {
-
-    List<String> versionStrings =
-        args.getHeaders().getRequestHeader(Header.OZONE_BUCKET_VERSIONING);
-    if (versionStrings == null) {
-      return null;
-    }
-
-    if (versionStrings.size() > 1) {
-      OzoneException ex =
-          ErrorTable.newError(ErrorTable.MALFORMED_BUCKET_VERSION, args);
-      ex.setMessage("Exactly one bucket version header required");
-      throw ex;
-    }
-
-    String version = versionStrings.get(0);
-    try {
-      return OzoneConsts.Versioning.valueOf(version);
-    } catch (IllegalArgumentException ex) {
-      LOG.debug("Malformed Version. version: {}", version);
-      throw ErrorTable.newError(ErrorTable.MALFORMED_BUCKET_VERSION, args, ex);
-    }
-  }
-
-
-  /**
-   * Returns Storage Class if Available or returns Default.
-   *
-   * @param args - bucketArgs
-   *
-   * @return StorageType
-   *
-   * @throws OzoneException
-   */
-  StorageType getStorageType(BucketArgs args) throws OzoneException {
-    List<String> storageClassString = null;
-    try {
-      storageClassString =
-          args.getHeaders().getRequestHeader(Header.OZONE_STORAGE_TYPE);
-
-      if (storageClassString == null) {
-        return null;
-      }
-      if (storageClassString.size() > 1) {
-        OzoneException ex =
-            ErrorTable.newError(ErrorTable.MALFORMED_STORAGE_TYPE, args);
-        ex.setMessage("Exactly one storage class header required");
-        throw ex;
-      }
-      return StorageType.valueOf(storageClassString.get(0).toUpperCase());
-    } catch (IllegalArgumentException ex) {
-      if(storageClassString != null) {
-        LOG.debug("Malformed storage type. Type: {}",
-            storageClassString.get(0).toUpperCase());
-      }
-      throw ErrorTable.newError(ErrorTable.MALFORMED_STORAGE_TYPE, args, ex);
-    }
-  }
-
-  /**
-   * Returns BucketInfo response.
-   *
-   * @param args - BucketArgs
-   *
-   * @return BucketInfo
-   *
-   * @throws IOException
-   * @throws OzoneException
-   */
-  Response getBucketInfoResponse(BucketArgs args)
-      throws IOException, OzoneException {
-    StorageHandler fs = StorageHandlerBuilder.getStorageHandler();
-    BucketInfo info = fs.getBucketInfo(args);
-    return OzoneRestUtils.getResponse(args, HTTP_OK, info.toJsonString());
-  }
-
-  /**
-   * Returns list of objects in a bucket.
-   * @param args - ListArgs
-   * @return Response
-   * @throws IOException
-   * @throws OzoneException
-   */
-  Response getBucketKeysList(ListArgs args) throws IOException, OzoneException {
-    StorageHandler fs = StorageHandlerBuilder.getStorageHandler();
-    ListKeys objects = fs.listKeys(args);
-    return OzoneRestUtils.getResponse(args.getArgs(), HTTP_OK,
-        objects.toJsonString());
-  }
-
-}

+ 0 - 302
hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/KeyHandler.java

@@ -1,302 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.web.handlers;
-
-import javax.ws.rs.core.HttpHeaders;
-import javax.ws.rs.core.Request;
-import javax.ws.rs.core.Response;
-import javax.ws.rs.core.UriInfo;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.security.MessageDigest;
-import java.security.NoSuchAlgorithmException;
-
-import org.apache.hadoop.ozone.OzoneRestUtils;
-import org.apache.hadoop.ozone.client.io.LengthInputStream;
-import org.apache.hadoop.ozone.client.rest.OzoneException;
-import org.apache.hadoop.ozone.client.rest.headers.Header;
-import org.apache.hadoop.ozone.web.exceptions.ErrorTable;
-import org.apache.hadoop.ozone.web.interfaces.Keys;
-import org.apache.hadoop.ozone.web.interfaces.StorageHandler;
-import org.apache.hadoop.ozone.web.response.KeyInfo;
-
-import static java.net.HttpURLConnection.HTTP_CREATED;
-import static java.net.HttpURLConnection.HTTP_OK;
-import org.apache.commons.codec.binary.Hex;
-
-/**
- * KeyHandler deals with basic Key Operations.
- */
-public class KeyHandler implements Keys {
-
-  /**
-   * Gets the Key/key information if it exists.
-   *
-   * @param volume  Storage Volume
-   * @param bucket  Name of the bucket
-   * @param key Name of the key
-   * @param info Tag info
-   * @param req Request
-   * @param uriInfo Uri Info
-   * @param headers Http Header
-   * @return Response
-   * @throws OzoneException
-   */
-  @Override
-  public Response getKey(String volume, String bucket, String key, String info,
-      Request req, UriInfo uriInfo, HttpHeaders headers)
-      throws OzoneException {
-    return new KeyProcessTemplate() {
-      /**
-       * Abstract function that gets implemented in the KeyHandler functions.
-       * This function will just deal with the core file system related logic
-       * and will rely on handleCall function for repetitive error checks
-       *
-       * @param args - parsed bucket args, name, userName, ACLs etc
-       * @param input - The body as an Input Stream
-       * @param request - Http request
-       * @param headers - Parsed http Headers.
-       * @param uriInfo - UriInfo
-       *
-       * @return Response
-       *
-       * @throws IOException - From the file system operations
-       */
-      @Override
-      public Response doProcess(KeyArgs args, InputStream input,
-                                Request request, HttpHeaders headers,
-                                UriInfo uriInfo)
-          throws IOException, OzoneException, NoSuchAlgorithmException {
-        if (info == null) {
-          return getKey(args);
-        } else if (info.equals(Header.OZONE_INFO_QUERY_KEY)) {
-          return getKeyInfo(args);
-        } else if (info.equals(Header.OZONE_INFO_QUERY_KEY_DETAIL)) {
-          return getKeyInfoDetail(args);
-        }
-
-        OzoneException ozException = ErrorTable
-            .newError(ErrorTable.INVALID_QUERY_PARAM, args);
-        ozException.setMessage("Unrecognized query param : " + info);
-        throw ozException;
-      }
-    }.handleCall(volume, bucket, key, req, headers, uriInfo, null);
-  }
-
-  /**
-   * Gets the Key if it exists.
-   */
-  private Response getKey(KeyArgs args)
-      throws IOException, OzoneException {
-    StorageHandler fs = StorageHandlerBuilder.getStorageHandler();
-    LengthInputStream stream = fs.newKeyReader(args);
-    return OzoneRestUtils.getResponse(args, HTTP_OK, stream);
-  }
-
-  /**
-   * Gets the Key information if it exists.
-   */
-  private Response getKeyInfo(KeyArgs args)
-      throws IOException, OzoneException {
-    StorageHandler fs = StorageHandlerBuilder.getStorageHandler();
-    KeyInfo keyInfo = fs.getKeyInfo(args);
-    return OzoneRestUtils.getResponse(args, HTTP_OK, keyInfo.toJsonString());
-  }
-
-  /**
-   * Gets the Key detail information if it exists.
-   */
-  private Response getKeyInfoDetail(KeyArgs args)
-      throws IOException, OzoneException {
-    StorageHandler fs = StorageHandlerBuilder.getStorageHandler();
-    KeyInfo keyInfo = fs.getKeyInfoDetails(args);
-    return OzoneRestUtils.getResponse(args, HTTP_OK, keyInfo.toJsonString());
-  }
-
-  /**
-   * Adds a key to an existing bucket. If the object already exists this call
-   * will overwrite or add with new version number if the bucket versioning is
-   * turned on.
-   *
-   * @param volume  Storage Volume Name
-   * @param bucket  Name of the bucket
-   * @param keys    Name of the Object
-   * @param is      InputStream or File Data
-   * @param req     Request
-   * @param info    - UriInfo
-   * @param headers http headers
-   * @return Response
-   * @throws OzoneException
-   */
-  @Override
-  public Response putKey(String volume, String bucket, String keys,
-                         InputStream is, Request req, UriInfo info,
-                         HttpHeaders headers) throws OzoneException {
-
-    return new KeyProcessTemplate() {
-      /**
-       * Abstract function that gets implemented in the KeyHandler functions.
-       * This function will just deal with the core file system related logic
-       * and will rely on handleCall function for repetitive error checks
-       *
-       * @param args - parsed bucket args, name, userName, ACLs etc
-       * @param input - The body as an Input Stream
-       * @param request - Http request
-       * @param headers - Parsed http Headers.
-       * @param info - UriInfo
-       *
-       * @return Response
-       *
-       * @throws IOException - From the file system operations
-       */
-      @Override
-      public Response doProcess(KeyArgs args, InputStream input,
-                                Request request, HttpHeaders headers,
-                                UriInfo info)
-          throws IOException, OzoneException, NoSuchAlgorithmException {
-        final int eof = -1;
-        StorageHandler fs = StorageHandlerBuilder.getStorageHandler();
-
-        byte[] buffer = new byte[4 * 1024];
-        String contentLenString = getContentLength(headers, args);
-        String newLen = contentLenString.replaceAll("\"", "");
-        int contentLen = Integer.parseInt(newLen);
-        args.setSize(contentLen);
-
-        MessageDigest md5 = MessageDigest.getInstance("MD5");
-        int bytesRead = 0;
-        int len = 0;
-        OutputStream stream = fs.newKeyWriter(args);
-        while ((bytesRead < contentLen) && (len != eof)) {
-          int readSize =
-              (contentLen - bytesRead > buffer.length) ? buffer.length :
-                  contentLen - bytesRead;
-          len = input.read(buffer, 0, readSize);
-          if (len != eof) {
-            stream.write(buffer, 0, len);
-            md5.update(buffer, 0, len);
-            bytesRead += len;
-
-          }
-        }
-
-        checkFileLengthMatch(args, fs, contentLen, bytesRead);
-
-        String hashString = Hex.encodeHexString(md5.digest());
-// TODO : Enable hash value checking.
-//          String contentHash = getContentMD5(headers, args);
-//          checkFileHashMatch(args, hashString, fs, contentHash);
-        args.setHash(hashString);
-        args.setSize(bytesRead);
-        fs.commitKey(args, stream);
-        return OzoneRestUtils.getResponse(args, HTTP_CREATED, "");
-      }
-    }.handleCall(volume, bucket, keys, req, headers, info, is);
-  }
-
-  /**
-   * Deletes an existing key.
-   *
-   * @param volume  Storage Volume Name
-   * @param bucket  Name of the bucket
-   * @param keys    Name of the Object
-   * @param req     http Request
-   * @param info    - UriInfo
-   * @param headers HttpHeaders
-   * @return Response
-   * @throws OzoneException
-   */
-  @Override
-  public Response deleteKey(String volume, String bucket, String keys,
-                            Request req, UriInfo info, HttpHeaders headers)
-      throws OzoneException {
-    return new KeyProcessTemplate() {
-      /**
-       * Abstract function that gets implemented in the KeyHandler functions.
-       * This function will just deal with the core file system related logic
-       * and will rely on handleCall function for repetitive error checks
-       *
-       * @param args - parsed bucket args, name, userName, ACLs etc
-       * @param input - The body as an Input Stream
-       * @param request - Http request
-       * @param headers - Parsed http Headers.
-       * @param info - UriInfo
-       *
-       * @return Response
-       *
-       * @throws IOException - From the file system operations
-       */
-      @Override
-      public Response doProcess(KeyArgs args, InputStream input,
-                                Request request, HttpHeaders headers,
-                                UriInfo info)
-          throws IOException, OzoneException, NoSuchAlgorithmException {
-        StorageHandler fs = StorageHandlerBuilder.getStorageHandler();
-        fs.deleteKey(args);
-        return OzoneRestUtils.getResponse(args, HTTP_OK, "");
-      }
-    }.handleCall(volume, bucket, keys, req, headers, info, null);
-  }
-
-  /**
-   * Renames an existing key within a bucket.
-   *
-   * @param volume      Storage Volume Name
-   * @param bucket      Name of the bucket
-   * @param key         Name of the Object
-   * @param toKeyName   New name of the Object
-   * @param req         http Request
-   * @param info        UriInfo
-   * @param headers     HttpHeaders
-   * @return Response
-   * @throws OzoneException
-   */
-  @Override
-  public Response renameKey(String volume, String bucket, String key,
-      String toKeyName, Request req, UriInfo info, HttpHeaders headers)
-      throws OzoneException {
-    return new KeyProcessTemplate() {
-      /**
-       * Abstract function that gets implemented in the KeyHandler functions.
-       * This function will just deal with the core file system related logic
-       * and will rely on handleCall function for repetitive error checks
-       *
-       * @param args - parsed bucket args, name, userName, ACLs etc
-       * @param input - The body as an Input Stream
-       * @param request - Http request
-       * @param headers - Parsed http Headers.
-       * @param info - UriInfo
-       *
-       * @return Response
-       *
-       * @throws IOException - From the file system operations
-       */
-      @Override
-      public Response doProcess(KeyArgs args, InputStream input,
-          Request request, HttpHeaders headers,
-          UriInfo info)
-          throws IOException, OzoneException, NoSuchAlgorithmException {
-        StorageHandler fs = StorageHandlerBuilder.getStorageHandler();
-        fs.renameKey(args, toKeyName);
-        return OzoneRestUtils.getResponse(args, HTTP_OK, "");
-      }
-    }.handleCall(volume, bucket, key, req, headers, info, null);
-  }
-}

+ 0 - 235
hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/KeyProcessTemplate.java

@@ -1,235 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.web.handlers;
-
-import org.apache.commons.codec.binary.Base64;
-
-import org.apache.hadoop.ozone.OzoneRestUtils;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.web.exceptions.ErrorTable;
-import org.apache.hadoop.ozone.client.rest.OzoneException;
-import org.apache.hadoop.ozone.client.rest.headers.Header;
-import org.apache.hadoop.ozone.web.interfaces.StorageHandler;
-import org.apache.hadoop.ozone.web.interfaces.UserAuth;
-import org.apache.hadoop.ozone.web.utils.OzoneUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.slf4j.MDC;
-
-import javax.ws.rs.core.HttpHeaders;
-import javax.ws.rs.core.Request;
-import javax.ws.rs.core.Response;
-import javax.ws.rs.core.UriInfo;
-import java.io.IOException;
-import java.io.InputStream;
-import java.security.NoSuchAlgorithmException;
-import java.util.List;
-
-import static org.apache.hadoop.ozone.web.exceptions.ErrorTable.BAD_DIGEST;
-import static org.apache.hadoop.ozone.web.exceptions.ErrorTable.INCOMPLETE_BODY;
-import static org.apache.hadoop.ozone.web.exceptions.ErrorTable.INVALID_BUCKET_NAME;
-import static org.apache.hadoop.ozone.web.exceptions.ErrorTable.INVALID_REQUEST;
-import static org.apache.hadoop.ozone.web.exceptions.ErrorTable.SERVER_ERROR;
-import static org.apache.hadoop.ozone.web.exceptions.ErrorTable.newError;
-import static org.apache.hadoop.ozone.OzoneConsts.OZONE_COMPONENT;
-import static org.apache.hadoop.ozone.OzoneConsts.OZONE_RESOURCE;
-import static org.apache.hadoop.ozone.OzoneConsts.OZONE_REQUEST;
-import static org.apache.hadoop.ozone.OzoneConsts.OZONE_USER;
-
-
-/**
- * This class abstracts way the repetitive tasks in  Key handling code.
- */
-public abstract class KeyProcessTemplate {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(KeyProcessTemplate.class);
-
-  /**
-   * This function serves as the common error handling function for all Key
-   * related operations.
-   *
-   * @param bucket  bucket Name
-   * @param key     the object name
-   * @param headers Http headers
-   * @param is      Input XML stream
-   * @throws OzoneException
-   */
-  public Response handleCall(String volume, String bucket, String key,
-                             Request request, HttpHeaders headers, UriInfo info,
-                             InputStream is) throws OzoneException {
-
-    String reqID = OzoneUtils.getRequestID();
-    String hostName = OzoneUtils.getHostName();
-    MDC.put(OZONE_COMPONENT, "ozone");
-    MDC.put(OZONE_REQUEST, reqID);
-    UserArgs userArgs = null;
-    try {
-      userArgs = new UserArgs(reqID, hostName, request, info, headers);
-      OzoneRestUtils.validate(request, headers, reqID, bucket, hostName);
-      OzoneUtils.verifyResourceName(bucket);
-
-      UserAuth auth = UserHandlerBuilder.getAuthHandler();
-      userArgs.setUserName(auth.getUser(userArgs));
-      MDC.put(OZONE_USER, userArgs.getUserName());
-
-      KeyArgs args = new KeyArgs(volume, bucket, key, userArgs);
-      MDC.put(OZONE_RESOURCE, args.getResourceName());
-      Response response =  doProcess(args, is, request, headers, info);
-      LOG.debug("Success");
-      MDC.clear();
-      return response;
-
-    } catch (IllegalArgumentException argExp) {
-      LOG.error("Invalid bucket in key call.", argExp);
-      throw newError(INVALID_BUCKET_NAME, userArgs, argExp);
-    } catch (IOException fsExp) {
-      LOG.error("IOException:", fsExp);
-      // Map KEY_NOT_FOUND to INVALID_KEY
-      if (fsExp.getMessage().endsWith(
-          OzoneManagerProtocolProtos.Status.KEY_NOT_FOUND.name())) {
-        throw ErrorTable.newError(ErrorTable.INVALID_KEY, userArgs, fsExp);
-      }
-
-      // TODO : Handle errors from the FileSystem , let us map to server error
-      // for now.
-      throw ErrorTable.newError(ErrorTable.SERVER_ERROR, userArgs, fsExp);
-    } catch (NoSuchAlgorithmException algoEx) {
-      LOG.error("NoSuchAlgorithmException. Probably indicates an unusual java "
-          + "installation.", algoEx);
-      throw ErrorTable.newError(SERVER_ERROR, userArgs, algoEx);
-    }
-  }
-
-  /**
-   * Abstract function that gets implemented in the KeyHandler functions. This
-   * function will just deal with the core file system related logic and will
-   * rely on handleCall function for repetitive error checks
-   *
-   * @param args    - parsed bucket args, name, userName, ACLs etc
-   * @param input   - The body as an Input Stream
-   * @param request - Http request
-   * @param headers - Parsed http Headers.
-   * @param info    - UriInfo
-   * @return Response
-   * @throws IOException - From the file system operations
-   */
-  public abstract Response doProcess(KeyArgs args, InputStream input,
-                                     Request request, HttpHeaders headers,
-                                     UriInfo info)
-      throws IOException, OzoneException, NoSuchAlgorithmException;
-
-  /**
-   * checks if the File Content-MD5 we wrote matches the hash we computed from
-   * the stream. if it does match we delete the file and throw and exception to
-   * let the user know that we have a hash mismatch
-   *
-   * @param args           Object Args
-   * @param computedString MD5 hash value
-   * @param fs             Pointer to File System so we can delete the file
-   * @param contentHash    User Specified hash string
-   * @throws IOException
-   * @throws OzoneException
-   */
-  public void checkFileHashMatch(KeyArgs args, String computedString,
-                                 StorageHandler fs, String contentHash)
-      throws IOException, OzoneException {
-    if (contentHash != null) {
-      String contentString =
-          new String(Base64.decodeBase64(contentHash), OzoneUtils.ENCODING)
-              .trim();
-
-      if (!contentString.equals(computedString)) {
-        fs.deleteKey(args);
-        OzoneException ex = ErrorTable.newError(BAD_DIGEST, args);
-        String msg = String.format("MD5 Digest mismatch. Expected %s Found " +
-            "%s", contentString, computedString);
-        ex.setMessage(msg);
-        LOG.debug(msg);
-        throw ex;
-      }
-    }
-  }
-
-  /**
-   * check if the content-length matches the actual stream length. if we find a
-   * mismatch we will delete the file and throw an exception to let the user
-   * know that length mismatch detected
-   *
-   * @param args       Object Args
-   * @param fs         Pointer to File System Object, to delete the file that we
-   *                   wrote
-   * @param contentLen Http Content-Length Header
-   * @param bytesRead  Actual Bytes we read from the stream
-   * @throws IOException
-   * @throws OzoneException
-   */
-  public void checkFileLengthMatch(KeyArgs args, StorageHandler fs,
-                                   int contentLen, int bytesRead)
-      throws IOException, OzoneException {
-    if (bytesRead != contentLen) {
-      fs.deleteKey(args);
-      OzoneException ex = ErrorTable.newError(INCOMPLETE_BODY, args);
-      String msg = String.format("Body length mismatch. Expected length : %d" +
-          " Found %d", contentLen, bytesRead);
-      ex.setMessage(msg);
-      LOG.debug(msg);
-      throw ex;
-    }
-  }
-
-  /**
-   * Returns Content Length header value if available.
-   *
-   * @param headers - Http Headers
-   * @return - String or null
-   */
-  public String getContentLength(HttpHeaders headers, KeyArgs args)
-      throws OzoneException {
-    List<String> contentLengthList =
-        headers.getRequestHeader(HttpHeaders.CONTENT_LENGTH);
-    if ((contentLengthList != null) && (contentLengthList.size() > 0)) {
-      return contentLengthList.get(0);
-    }
-
-    OzoneException ex = ErrorTable.newError(INVALID_REQUEST, args);
-    ex.setMessage("Content-Length is a required header for putting a key.");
-    throw ex;
-
-  }
-
-  /**
-   * Returns Content MD5 value if available.
-   *
-   * @param headers - Http Headers
-   * @return - String or null
-   */
-  public String getContentMD5(HttpHeaders headers, KeyArgs args) {
-    List<String> contentLengthList =
-        headers.getRequestHeader(Header.CONTENT_MD5);
-    if ((contentLengthList != null) && (contentLengthList.size() > 0)) {
-      return contentLengthList.get(0);
-    }
-// TODO : Should we make this compulsory ?
-//    OzoneException ex = ErrorTable.newError(ErrorTable.invalidRequest, args);
-//    ex.setMessage("Content-MD5 is a required header for putting a key");
-//    throw ex;
-    return "";
-  }
-}
-

+ 0 - 61
hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/ServiceFilter.java

@@ -1,61 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.web.handlers;
-
-import com.sun.jersey.spi.container.ContainerRequest;
-import com.sun.jersey.spi.container.ContainerRequestFilter;
-import org.apache.hadoop.ozone.client.rest.headers.Header;
-
-import javax.ws.rs.core.UriBuilder;
-import javax.ws.rs.ext.Provider;
-
-/**
- * This class is used to intercept root URL requests and route it to
- * Volume List functionality.
- */
-@Provider
-public class ServiceFilter implements ContainerRequestFilter {
-  /**
-   * Filter the request.
-   * <p>
-   * An implementation may modify the state of the request or
-   * create a new instance.
-   *
-   * @param request the request.
-   *
-   * @return the request.
-   */
-  @Override
-  public ContainerRequest filter(ContainerRequest request) {
-    if (request.getRequestUri().getPath().length() > 1) {
-      return request;
-    }
-
-    // Just re-route it to volume handler with some hypothetical volume name.
-    // volume name is ignored.
-
-    request.setUris(request.getBaseUri(),
-        UriBuilder.fromUri(request.getRequestUri())
-        .path("/service")
-        .queryParam("info", Header.OZONE_LIST_QUERY_SERVICE)
-        .build());
-
-    return request;
-  }
-}

+ 0 - 80
hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/StorageHandlerBuilder.java

@@ -1,80 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.web.handlers;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.ozone.web.interfaces.StorageHandler;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-
-/**
- * This class is responsible for providing a {@link StorageHandler}
- * implementation to object store web handlers.
- */
-@InterfaceAudience.Private
-public final class StorageHandlerBuilder {
-
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(StorageHandlerBuilder.class);
-  private static final ThreadLocal<StorageHandler>
-      STORAGE_HANDLER_THREAD_LOCAL = new ThreadLocal<>();
-
-  /**
-   * Returns the configured StorageHandler from thread-local storage for this
-   * thread.
-   *
-   * @return StorageHandler from thread-local storage
-   */
-  public static StorageHandler getStorageHandler() throws IOException {
-    StorageHandler storageHandler = STORAGE_HANDLER_THREAD_LOCAL.get();
-    if (storageHandler != null) {
-      return storageHandler;
-    } else {
-      LOG.error("No Storage Handler Configured.");
-      throw new IOException("Invalid Handler Configuration");
-    }
-
-  }
-
-  /**
-   * Removes the configured StorageHandler from thread-local storage for this
-   * thread.
-   */
-  public static void removeStorageHandler() {
-    STORAGE_HANDLER_THREAD_LOCAL.remove();
-  }
-
-  /**
-   * Sets the configured StorageHandler in thread-local storage for this thread.
-   *
-   * @param storageHandler StorageHandler to set in thread-local storage
-   */
-  public static void setStorageHandler(StorageHandler storageHandler) {
-    STORAGE_HANDLER_THREAD_LOCAL.set(storageHandler);
-  }
-
-  /**
-   * There is no reason to instantiate this class.
-   */
-  private StorageHandlerBuilder() {
-  }
-}

+ 0 - 75
hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/UserHandlerBuilder.java

@@ -1,75 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.web.handlers;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.ozone.web.interfaces.UserAuth;
-import org.apache.hadoop.ozone.web.userauth.Simple;
-
-/**
- * This class is responsible for providing a
- * {@link org.apache.hadoop.ozone.web.interfaces.UserAuth}
- * implementation to object store web handlers.
- */
-@InterfaceAudience.Private
-public final class UserHandlerBuilder {
-
-  private static final ThreadLocal<UserAuth> USER_AUTH_THREAD_LOCAL =
-      new ThreadLocal<UserAuth>();
-
-  /**
-   * Returns the configured UserAuth from thread-local storage for this
-   * thread.
-   *
-   * @return UserAuth from thread-local storage
-   */
-  public static UserAuth getAuthHandler() {
-    UserAuth authHandler = USER_AUTH_THREAD_LOCAL.get();
-    if (authHandler != null) {
-      return authHandler;
-    } else {
-      // This only happens while using mvn jetty:run for testing.
-      return new Simple();
-    }
-  }
-
-  /**
-   * Removes the configured UserAuth from thread-local storage for this
-   * thread.
-   */
-  public static void removeAuthHandler() {
-    USER_AUTH_THREAD_LOCAL.remove();
-  }
-
-  /**
-   * Sets the configured UserAuthHandler in thread-local storage for this
-   * thread.
-   *
-   * @param authHandler authHandler to set in thread-local storage
-   */
-  public static void setAuthHandler(UserAuth authHandler) {
-    USER_AUTH_THREAD_LOCAL.set(authHandler);
-  }
-
-  /**
-   * There is no reason to instantiate this class.
-   */
-  private UserHandlerBuilder() {
-  }
-}

+ 0 - 274
hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/VolumeHandler.java

@@ -1,274 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.web.handlers;
-
-import javax.ws.rs.core.HttpHeaders;
-import javax.ws.rs.core.Request;
-import javax.ws.rs.core.Response;
-import javax.ws.rs.core.UriInfo;
-import java.io.IOException;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.ozone.OzoneRestUtils;
-import org.apache.hadoop.ozone.client.rest.OzoneException;
-import org.apache.hadoop.ozone.client.rest.headers.Header;
-import org.apache.hadoop.ozone.web.exceptions.ErrorTable;
-import org.apache.hadoop.ozone.web.interfaces.StorageHandler;
-import org.apache.hadoop.ozone.web.interfaces.UserAuth;
-import org.apache.hadoop.ozone.web.interfaces.Volume;
-
-import static java.net.HttpURLConnection.HTTP_CREATED;
-import static java.net.HttpURLConnection.HTTP_OK;
-import static org.apache.hadoop.ozone.OzoneConsts.OZONE_FUNCTION;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.slf4j.MDC;
-
-/**
- * VolumeHandler handles volume specific HTTP calls.
- *
- * Most functions in this file follow a simple pattern.
- * All calls are handled by VolumeProcessTemplate.handleCall, which
- * calls back into doProcess function.
- *
- * Everything common to volume handling is abstracted out in handleCall function
- * For Example : Checking that volume name is sane, we have a supported
- * ozone version number and a valid date. That is everything common is in
- * handleCall and actions specific to a call is inside doProcess callback.
- */
-@InterfaceAudience.Private
-public class VolumeHandler implements Volume {
-  private static final Logger LOG = LoggerFactory.getLogger(VolumeHandler
-      .class);
-  /**
-   * Creates a volume.
-   *
-   * @param volume Volume Name, this has to be unique at Ozone cluster level
-   * @param quota Quota for this Storage Volume
-   *             - {@literal <int>(<BYTES|MB|GB|TB>)}
-   * @param req Request Object
-   * @param uriInfo URI info
-   * @param headers Http Headers
-   *
-   * @return Standard JAX-RS Response
-   *
-   * @throws OzoneException
-   */
-  @Override
-  public Response createVolume(String volume, final String quota, Request req,
-                               UriInfo uriInfo, HttpHeaders headers)
-      throws OzoneException {
-    MDC.put(OZONE_FUNCTION, "createVolume");
-    return new VolumeProcessTemplate() {
-      @Override
-      public Response doProcess(VolumeArgs args)
-          throws IOException, OzoneException {
-        UserAuth auth = UserHandlerBuilder.getAuthHandler();
-        if (auth.isAdmin(args)) {
-          args.setAdminName(args.getUserName());
-          String volumeOwner = auth.getOzoneUser(args);
-
-          if (volumeOwner == null) {
-            throw ErrorTable.newError(ErrorTable.USER_NOT_FOUND, args);
-          }
-
-          if (!auth.isUser(volumeOwner, args)) {
-            throw ErrorTable.newError(ErrorTable.USER_NOT_FOUND, args);
-          }
-
-          args.setUserName(volumeOwner);
-          args.setGroups(auth.getGroups(args));
-          if (!quota.equals(Header.OZONE_QUOTA_UNDEFINED)) {
-            setQuotaArgs(args, quota);
-          }
-          StorageHandler fs = StorageHandlerBuilder.getStorageHandler();
-          fs.createVolume(args);
-          return OzoneRestUtils.getResponse(args, HTTP_CREATED, "");
-        } else {
-          throw ErrorTable.newError(ErrorTable.ACCESS_DENIED, args);
-        }
-      }
-    }.handleCall(volume, req, uriInfo, headers);
-  }
-
-  /**
-   * Updates  volume metadata.
-   *
-   * There are only two actions possible currently with updateVolume.
-   * Change the volume ownership or update quota. if you make a call
-   * with neither of these actions, update just returns 200 OK.
-   *
-   * @param volume Volume Name, this has to be unique at Ozone Level
-   * @param quota Quota for this volume
-   *             - {@literal <int>(<BYTES|MB|GB|TB>)}|remove
-   * @param req - Request Object
-   * @param uriInfo - URI info
-   * @param headers Http Headers
-   *
-   * @return Standard JAX-RS Response
-   *
-   * @throws OzoneException
-   */
-  @Override
-  public Response updateVolume(String volume, final String quota, Request req,
-                               UriInfo uriInfo, HttpHeaders headers)
-      throws OzoneException {
-    MDC.put(OZONE_FUNCTION, "updateVolume");
-    return new VolumeProcessTemplate() {
-      @Override
-      public Response doProcess(VolumeArgs args)
-          throws IOException, OzoneException {
-        UserAuth auth = UserHandlerBuilder.getAuthHandler();
-        if (auth.isAdmin(args)) {
-          StorageHandler fs = StorageHandlerBuilder.getStorageHandler();
-          args.setAdminName(args.getUserName());
-          String newVolumeOwner = auth.getOzoneUser(args);
-
-          if (newVolumeOwner != null) {
-            if (!auth.isUser(newVolumeOwner, args)) {
-              throw ErrorTable.newError(ErrorTable.USER_NOT_FOUND, args);
-            }
-            args.setUserName(newVolumeOwner);
-            fs.setVolumeOwner(args);
-          }
-
-          if (!quota.equals(Header.OZONE_QUOTA_UNDEFINED)) {
-            if (quota.equals(Header.OZONE_QUOTA_REMOVE)) {
-              // if it is remove, just tell the file system to remove quota
-              fs.setVolumeQuota(args, true);
-            } else {
-              setQuotaArgs(args, quota);
-              fs.setVolumeQuota(args, false);
-            }
-          }
-          return OzoneRestUtils.getResponse(args, HTTP_OK, "");
-        } else {
-          // Only Admins are allowed to update volumes
-          throw ErrorTable.newError(ErrorTable.ACCESS_DENIED, args);
-        }
-      }
-    }.handleCall(volume, req, uriInfo, headers);
-  }
-
-
-  /**
-   * Deletes a volume if it is empty.
-   *
-   * @param volume Volume Name
-   * @param req - Http Request
-   * @param uriInfo - http URI
-   * @param headers - http headers
-   *
-   * @return Standard JAX-RS Response
-   *
-   * @throws OzoneException
-   */
-  @Override
-  public Response deleteVolume(String volume, Request req, UriInfo uriInfo,
-                               HttpHeaders headers) throws OzoneException {
-    MDC.put(OZONE_FUNCTION, "deleteVolume");
-
-    return new VolumeProcessTemplate() {
-      @Override
-      public Response doProcess(VolumeArgs args)
-          throws IOException, OzoneException {
-        UserAuth auth = UserHandlerBuilder.getAuthHandler();
-        if (auth.isAdmin(args)) {
-          StorageHandler fs = StorageHandlerBuilder.getStorageHandler();
-          fs.deleteVolume(args);
-          return OzoneRestUtils.getResponse(args, HTTP_OK, "");
-        } else {
-          throw ErrorTable.newError(ErrorTable.ACCESS_DENIED, args);
-        }
-      }
-    }.handleCall(volume, req, uriInfo, headers);
-  }
-
-  /**
-   * Returns Volume info. This API can be invoked either by admin or the owner
-   *
-   * @param volume  - Storage Volume Name
-   * @param info    - Info attribute
-   * @param prefix  - Prefix key
-   * @param maxKeys - Max results
-   * @param prevKey - PrevKey
-   * @param req     - Http Req
-   * @param uriInfo - UriInfo.
-   * @param headers - Http headers
-   * @return
-   * @throws OzoneException
-   */
-  @Override
-  public Response getVolumeInfo(String volume, final String info,
-                                final String prefix,
-                                final int maxKeys,
-                                final String prevKey,
-                                final boolean rootScan,
-                                Request req,
-                                final UriInfo uriInfo, HttpHeaders headers)
-      throws OzoneException {
-
-    return new VolumeProcessTemplate() {
-      @Override
-      public Response doProcess(VolumeArgs args)
-          throws IOException, OzoneException {
-
-        switch (info) {
-        case Header.OZONE_INFO_QUERY_BUCKET:
-          MDC.put(OZONE_FUNCTION, "ListBucket");
-          return getBucketsInVolume(args, prefix, maxKeys, prevKey);
-        case Header.OZONE_INFO_QUERY_VOLUME:
-          MDC.put(OZONE_FUNCTION, "InfoVolume");
-          assertNoListParamPresent(uriInfo, args);
-          return getVolumeInfoResponse(args); // Return volume info
-        case Header.OZONE_LIST_QUERY_SERVICE:
-          MDC.put(OZONE_FUNCTION, "ListVolume");
-          return getVolumesByUser(args, prefix, maxKeys, prevKey, rootScan);
-        default:
-          LOG.debug("Unrecognized query param : {} ", info);
-          OzoneException ozoneException =
-              ErrorTable.newError(ErrorTable.INVALID_QUERY_PARAM, args);
-          ozoneException.setMessage("Unrecognized query param : " + info);
-          throw ozoneException;
-        }
-      }
-    }.handleCall(volume, req, uriInfo, headers);
-  }
-
-  /**
-   * Asserts no list query param is present during this call.
-   *
-   * @param uriInfo - UriInfo.   - UriInfo
-   * @param args    - Volume Args - VolumeArgs.
-   * @throws OzoneException
-   */
-  private void assertNoListParamPresent(final UriInfo uriInfo, VolumeArgs
-      args) throws
-      OzoneException {
-
-    String prefix = uriInfo.getQueryParameters().getFirst("prefix");
-    String maxKeys = uriInfo.getQueryParameters().getFirst("max_keys");
-    String prevKey = uriInfo.getQueryParameters().getFirst("prev_key");
-    if ((prefix != null && !prefix.equals(Header.OZONE_EMPTY_STRING)) ||
-        (maxKeys != null && !maxKeys.equals(Header.OZONE_DEFAULT_LIST_SIZE)) ||
-        (prevKey != null && !prevKey.equals(Header.OZONE_EMPTY_STRING))) {
-      throw ErrorTable.newError(ErrorTable.INVALID_QUERY_PARAM, args);
-    }
-  }
-}

+ 0 - 276
hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/VolumeProcessTemplate.java

@@ -1,276 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.web.handlers;
-
-import javax.ws.rs.core.HttpHeaders;
-import javax.ws.rs.core.Request;
-import javax.ws.rs.core.Response;
-import javax.ws.rs.core.UriInfo;
-import java.io.IOException;
-import java.nio.file.DirectoryNotEmptyException;
-import java.nio.file.FileAlreadyExistsException;
-import java.nio.file.NoSuchFileException;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.ozone.OzoneRestUtils;
-import org.apache.hadoop.ozone.client.rest.OzoneException;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.ozone.web.exceptions.ErrorTable;
-import org.apache.hadoop.ozone.web.interfaces.StorageHandler;
-import org.apache.hadoop.ozone.web.interfaces.UserAuth;
-import org.apache.hadoop.ozone.web.response.ListBuckets;
-import org.apache.hadoop.ozone.web.response.ListVolumes;
-import org.apache.hadoop.ozone.web.response.VolumeInfo;
-import org.apache.hadoop.ozone.web.utils.OzoneUtils;
-
-import static java.net.HttpURLConnection.HTTP_OK;
-import static org.apache.hadoop.ozone.OzoneConsts.OZONE_COMPONENT;
-import static org.apache.hadoop.ozone.OzoneConsts.OZONE_REQUEST;
-import static org.apache.hadoop.ozone.OzoneConsts.OZONE_RESOURCE;
-import static org.apache.hadoop.ozone.OzoneConsts.OZONE_USER;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.slf4j.MDC;
-
-
-/**
- * This class abstracts way the repetitive tasks in
- * handling volume related code.
- */
-@InterfaceAudience.Private
-public abstract class VolumeProcessTemplate {
-  private static final Logger LOG =
-      LoggerFactory.getLogger(VolumeProcessTemplate.class);
-
-
-  /**
-   * The handle call is the common functionality for Volume
-   * handling code.
-   *
-   * @param volume - Name of the Volume
-   * @param request - request
-   * @param info - UriInfo
-   * @param headers - Http Headers
-   *
-   * @return Response
-   *
-   * @throws OzoneException
-   */
-  public Response handleCall(String volume, Request request, UriInfo info,
-                             HttpHeaders headers) throws OzoneException {
-    String reqID = OzoneUtils.getRequestID();
-    String hostName = OzoneUtils.getHostName();
-    MDC.put(OZONE_COMPONENT, "ozone");
-    MDC.put(OZONE_REQUEST, reqID);
-    UserArgs userArgs  = null;
-    try {
-      userArgs = new UserArgs(reqID, hostName, request, info, headers);
-      OzoneRestUtils.validate(request, headers, reqID, volume, hostName);
-
-      // we use the same logic for both bucket and volume names
-      OzoneUtils.verifyResourceName(volume);
-      UserAuth auth = UserHandlerBuilder.getAuthHandler();
-
-      userArgs.setUserName(auth.getUser(userArgs));
-      MDC.put(OZONE_USER, userArgs.getUserName());
-      VolumeArgs args = new VolumeArgs(volume, userArgs);
-
-      MDC.put(OZONE_RESOURCE, args.getResourceName());
-      Response response =  doProcess(args);
-      LOG.info("Success");
-      MDC.clear();
-      return response;
-
-    } catch (IllegalArgumentException ex) {
-      LOG.error("Illegal argument.", ex);
-      throw ErrorTable.newError(ErrorTable.INVALID_VOLUME_NAME, userArgs, ex);
-    } catch (IOException ex) {
-      handleIOException(volume, reqID, hostName, ex);
-    }
-    return null;
-  }
-
-  /**
-   * Specific handler for each call.
-   *
-   * @param args - Volume Args
-   *
-   * @return - Response
-   *
-   * @throws IOException
-   * @throws OzoneException
-   */
-  public abstract Response doProcess(VolumeArgs args)
-      throws IOException, OzoneException;
-
-  /**
-   * Maps Java File System Exceptions to Ozone Exceptions in the Volume path.
-   *
-   * @param volume - Name of the Volume
-   * @param reqID - Request ID
-   * @param hostName - HostName
-   * @param fsExp - Exception
-   *
-   * @throws OzoneException
-   */
-  private void handleIOException(String volume, String reqID, String hostName,
-                                 IOException fsExp) throws OzoneException {
-    LOG.error("IOException:", fsExp);
-    OzoneException exp = null;
-
-    if ((fsExp != null && fsExp.getMessage().endsWith(
-        OzoneManagerProtocolProtos.Status.VOLUME_ALREADY_EXISTS.name()))
-        || fsExp instanceof FileAlreadyExistsException) {
-      exp = ErrorTable
-          .newError(ErrorTable.VOLUME_ALREADY_EXISTS, reqID, volume, hostName);
-    }
-
-    if (fsExp instanceof DirectoryNotEmptyException) {
-      exp = ErrorTable
-          .newError(ErrorTable.VOLUME_NOT_EMPTY, reqID, volume, hostName);
-    }
-
-    if (fsExp instanceof NoSuchFileException) {
-      exp = ErrorTable
-          .newError(ErrorTable.INVALID_VOLUME_NAME, reqID, volume, hostName);
-    }
-
-    if ((fsExp != null) && (exp != null)) {
-      exp.setMessage(fsExp.getMessage());
-    }
-
-    // We don't handle that FS error yet, report a Server Internal Error
-    if (exp == null) {
-      exp =
-          ErrorTable.newError(ErrorTable.SERVER_ERROR, reqID, volume, hostName);
-      if (fsExp != null) {
-        exp.setMessage(fsExp.getMessage());
-      }
-    }
-    throw exp;
-  }
-
-  /**
-   * Set the user provided string into args and throw ozone exception
-   * if needed.
-   *
-   * @param args - volume args
-   * @param quota - quota sting
-   *
-   * @throws OzoneException
-   */
-  void setQuotaArgs(VolumeArgs args, String quota) throws OzoneException {
-    try {
-      args.setQuota(quota);
-    } catch (IllegalArgumentException ex) {
-      LOG.debug("Malformed Quota.", ex);
-      throw ErrorTable.newError(ErrorTable.MALFORMED_QUOTA, args, ex);
-    }
-  }
-
-  /**
-   * Wraps calls into volumeInfo data.
-   *
-   * @param args - volumeArgs
-   *
-   * @return - VolumeInfo
-   *
-   * @throws IOException
-   * @throws OzoneException
-   */
-  Response getVolumeInfoResponse(VolumeArgs args)
-      throws IOException, OzoneException {
-    StorageHandler fs = StorageHandlerBuilder.getStorageHandler();
-    VolumeInfo info = fs.getVolumeInfo(args);
-    return OzoneRestUtils.getResponse(args, HTTP_OK, info.toJsonString());
-  }
-
-  /**
-   * Returns all the volumes belonging to a user.
-   *
-   * @param user - userArgs
-   * @return - Response
-   * @throws OzoneException
-   * @throws IOException
-   */
-  Response getVolumesByUser(UserArgs user, String prefix, int maxKeys,
-      String prevKey, boolean rootScan) throws OzoneException, IOException {
-
-    String validatedUser = user.getUserName();
-    try {
-      UserAuth auth = UserHandlerBuilder.getAuthHandler();
-      if(rootScan && !auth.isAdmin(user)) {
-        throw ErrorTable.newError(ErrorTable.UNAUTHORIZED, user);
-      }
-      if (auth.isAdmin(user)) {
-        validatedUser = auth.getOzoneUser(user);
-        if (validatedUser == null) {
-          validatedUser = auth.getUser(user);
-        }
-      }
-
-      UserArgs onBehalfOf =
-          new UserArgs(validatedUser, user.getRequestID(), user.getHostName(),
-              user.getRequest(), user.getUri(), user.getHeaders());
-
-      StorageHandler fs = StorageHandlerBuilder.getStorageHandler();
-      ListArgs<UserArgs> listArgs = new ListArgs<>(onBehalfOf, prefix,
-          maxKeys, prevKey);
-      listArgs.setRootScan(rootScan);
-      ListVolumes volumes = fs.listVolumes(listArgs);
-      return OzoneRestUtils.getResponse(user, HTTP_OK, volumes.toJsonString());
-    } catch (IOException ex) {
-      LOG.debug("unable to get the volume list for the user.", ex);
-      OzoneException exp = ErrorTable.newError(ErrorTable.SERVER_ERROR,
-          user, ex);
-      exp.setMessage("unable to get the volume list for the user");
-      throw exp;
-    }
-  }
-
-  /**
-   * Returns a list of Buckets in a Volume.
-   *
-   * @param args    - VolumeArgs
-   * @param prefix  - Prefix to Match
-   * @param maxKeys - Max results to return.
-   * @param prevKey - PrevKey
-   * @return List of Buckets
-   * @throws OzoneException
-   */
-  Response getBucketsInVolume(VolumeArgs args, String prefix, int maxKeys,
-                              String prevKey) throws OzoneException {
-    try {
-      // UserAuth auth = UserHandlerBuilder.getAuthHandler();
-      // TODO : Check ACLS.
-      StorageHandler fs = StorageHandlerBuilder.getStorageHandler();
-      ListArgs<VolumeArgs> listArgs = new ListArgs<>(args, prefix,
-          maxKeys, prevKey);
-      ListBuckets bucketList = fs.listBuckets(listArgs);
-      return OzoneRestUtils
-          .getResponse(args, HTTP_OK, bucketList.toJsonString());
-    } catch (IOException ex) {
-      LOG.debug("unable to get the bucket list for the specified volume.", ex);
-      OzoneException exp =
-          ErrorTable.newError(ErrorTable.SERVER_ERROR, args, ex);
-      exp.setMessage("unable to get the bucket list for the specified volume.");
-      throw exp;
-    }
-  }
-}

+ 0 - 22
hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/handlers/package-info.java

@@ -1,22 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ozone.web.handlers;
-
-/**
- This package contains ozone client side libraries.
- */

Niektóre pliki nie zostały wyświetlone z powodu dużej ilości zmienionych plików