Browse Source

HDDS-1795. Implement S3 Delete Bucket request to use Cache and DoubleBuffer. (#1097)

Bharat Viswanadham 5 years ago
parent
commit
f5b2f7538c

+ 1 - 0
hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java

@@ -41,6 +41,7 @@ public enum OMAction implements AuditAction {
 
   // S3 Bucket
   CREATE_S3_BUCKET,
+  DELETE_S3_BUCKET,
 
   // READ Actions
   CHECK_VOLUME_ACCESS,

+ 13 - 0
hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java

@@ -129,6 +129,8 @@ public class OMMetrics {
 
   private @Metric MutableCounterLong numS3BucketCreates;
   private @Metric MutableCounterLong numS3BucketCreateFails;
+  private @Metric MutableCounterLong numS3BucketDeletes;
+  private @Metric MutableCounterLong numS3BucketDeleteFails;
 
 
   public OMMetrics() {
@@ -150,6 +152,17 @@ public class OMMetrics {
     numS3BucketCreateFails.incr();
   }
 
+  public void incNumS3BucketDeletes() {
+    numBucketOps.incr();
+    numS3BucketDeletes.incr();
+  }
+
+  public void incNumS3BucketDeleteFails() {
+    numBucketOps.incr();
+    numS3BucketDeleteFails.incr();
+  }
+
+
   public void incNumS3Buckets() {
     numS3Buckets.incr();
   }

+ 3 - 0
hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java

@@ -32,6 +32,7 @@ import org.apache.hadoop.ozone.om.request.key.OMKeyDeleteRequest;
 import org.apache.hadoop.ozone.om.request.key.OMKeyPurgeRequest;
 import org.apache.hadoop.ozone.om.request.key.OMKeyRenameRequest;
 import org.apache.hadoop.ozone.om.request.s3.bucket.S3BucketCreateRequest;
+import org.apache.hadoop.ozone.om.request.s3.bucket.S3BucketDeleteRequest;
 import org.apache.hadoop.ozone.om.request.volume.OMVolumeCreateRequest;
 import org.apache.hadoop.ozone.om.request.volume.OMVolumeDeleteRequest;
 import org.apache.hadoop.ozone.om.request.volume.OMVolumeSetOwnerRequest;
@@ -102,6 +103,8 @@ public final class OzoneManagerRatisUtils {
       return new OMKeyPurgeRequest(omRequest);
     case CreateS3Bucket:
       return new S3BucketCreateRequest(omRequest);
+    case DeleteS3Bucket:
+      return new S3BucketDeleteRequest(omRequest);
     default:
       // TODO: will update once all request types are implemented.
       return null;

+ 193 - 0
hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/bucket/S3BucketDeleteRequest.java

@@ -0,0 +1,193 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.request.s3.bucket;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+
+import com.google.common.base.Optional;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.audit.OMAction;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.OMMetrics;
+import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.om.request.volume.OMVolumeRequest;
+import org.apache.hadoop.ozone.om.response.OMClientResponse;
+import org.apache.hadoop.ozone.om.response.s3.bucket.S3BucketDeleteResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+    .OMRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+    .OMResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+    .S3DeleteBucketRequest;
+import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
+import org.apache.hadoop.ozone.security.acl.OzoneObj;
+import org.apache.hadoop.utils.db.cache.CacheKey;
+import org.apache.hadoop.utils.db.cache.CacheValue;
+
+import static org.apache.hadoop.ozone.OzoneConsts.S3_BUCKET_MAX_LENGTH;
+import static org.apache.hadoop.ozone.OzoneConsts.S3_BUCKET_MIN_LENGTH;
+import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
+import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.S3_BUCKET_LOCK;
+
+/**
+ * Handle Create S3Bucket request.
+ */
+public class S3BucketDeleteRequest extends OMVolumeRequest {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(S3BucketDeleteRequest.class);
+
+  public S3BucketDeleteRequest(OMRequest omRequest) {
+    super(omRequest);
+  }
+
+  public OMRequest preExecute(OzoneManager ozoneManager) throws IOException {
+    S3DeleteBucketRequest s3DeleteBucketRequest =
+        getOmRequest().getDeleteS3BucketRequest();
+
+    // TODO: Do we need to enforce the bucket rules in this code path?
+    // https://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html
+
+    // For now only checked the length.
+    int bucketLength = s3DeleteBucketRequest.getS3BucketName().length();
+    if (bucketLength < S3_BUCKET_MIN_LENGTH ||
+        bucketLength >= S3_BUCKET_MAX_LENGTH) {
+      throw new OMException("S3BucketName must be at least 3 and not more " +
+          "than 63 characters long",
+          OMException.ResultCodes.S3_BUCKET_INVALID_LENGTH);
+    }
+
+    return getOmRequest().toBuilder().setUserInfo(getUserInfo()).build();
+
+  }
+
+  @Override
+  public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
+      long transactionLogIndex) {
+    S3DeleteBucketRequest s3DeleteBucketRequest =
+        getOmRequest().getDeleteS3BucketRequest();
+
+    String s3BucketName = s3DeleteBucketRequest.getS3BucketName();
+
+    OMResponse.Builder omResponse = OMResponse.newBuilder().setCmdType(
+        OzoneManagerProtocolProtos.Type.DeleteS3Bucket).setStatus(
+        OzoneManagerProtocolProtos.Status.OK).setSuccess(true);
+
+    OMMetrics omMetrics = ozoneManager.getMetrics();
+    omMetrics.incNumS3BucketDeletes();
+    IOException exception = null;
+    boolean acquiredS3Lock = false;
+    boolean acquiredBucketLock = false;
+    String volumeName = null;
+    OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
+    try {
+      // check Acl
+      if (ozoneManager.getAclsEnabled()) {
+        checkAcls(ozoneManager, OzoneObj.ResourceType.BUCKET,
+            OzoneObj.StoreType.S3, IAccessAuthorizer.ACLType.DELETE, null,
+            s3BucketName, null);
+      }
+
+      acquiredS3Lock = omMetadataManager.getLock().acquireLock(S3_BUCKET_LOCK,
+          s3BucketName);
+
+      String s3Mapping = omMetadataManager.getS3Table().get(s3BucketName);
+
+      if (s3Mapping == null) {
+        throw new OMException("S3Bucket " + s3BucketName + " not found",
+            OMException.ResultCodes.S3_BUCKET_NOT_FOUND);
+      } else {
+        volumeName = getOzoneVolumeName(s3Mapping);
+
+        acquiredBucketLock =
+            omMetadataManager.getLock().acquireLock(BUCKET_LOCK, volumeName,
+                s3BucketName);
+
+        String bucketKey = omMetadataManager.getBucketKey(volumeName,
+            s3BucketName);
+
+        // Update bucket table cache and s3 table cache.
+        omMetadataManager.getBucketTable().addCacheEntry(
+            new CacheKey<>(bucketKey),
+            new CacheValue<>(Optional.absent(), transactionLogIndex));
+        omMetadataManager.getS3Table().addCacheEntry(
+            new CacheKey<>(s3BucketName),
+            new CacheValue<>(Optional.absent(), transactionLogIndex));
+      }
+    } catch (IOException ex) {
+      exception = ex;
+    } finally {
+      if (acquiredBucketLock) {
+        omMetadataManager.getLock().releaseLock(BUCKET_LOCK, volumeName,
+            s3BucketName);
+      }
+      if (acquiredS3Lock) {
+        omMetadataManager.getLock().releaseLock(S3_BUCKET_LOCK, s3BucketName);
+      }
+    }
+
+    // Performing audit logging outside of the lock.
+    auditLog(ozoneManager.getAuditLogger(),
+        buildAuditMessage(OMAction.DELETE_S3_BUCKET,
+            buildAuditMap(s3BucketName), exception,
+            getOmRequest().getUserInfo()));
+
+    if (exception == null) {
+      // Decrement s3 bucket and ozone bucket count. As S3 bucket is mapped to
+      // ozonevolume/ozone bucket.
+      LOG.debug("S3Bucket {} successfully deleted", s3BucketName);
+      omMetrics.decNumS3Buckets();
+      omMetrics.decNumBuckets();
+      omResponse.setDeleteS3BucketResponse(
+          OzoneManagerProtocolProtos.S3DeleteBucketResponse.newBuilder());
+      return new S3BucketDeleteResponse(s3BucketName, volumeName,
+          omResponse.build());
+    } else {
+      LOG.error("S3Bucket Deletion failed for S3Bucket:{}", s3BucketName,
+          exception);
+      omMetrics.incNumS3BucketDeleteFails();
+      return new S3BucketDeleteResponse(null, null,
+          createErrorOMResponse(omResponse, exception));
+    }
+  }
+
+  /**
+   * Extract volumeName from s3Mapping.
+   * @param s3Mapping
+   * @return volumeName
+   * @throws IOException
+   */
+  private String getOzoneVolumeName(String s3Mapping) throws IOException {
+    return s3Mapping.split("/")[0];
+  }
+
+  private Map<String, String> buildAuditMap(String s3BucketName) {
+    Map<String, String> auditMap = new HashMap<>();
+    auditMap.put(s3BucketName, OzoneConsts.S3_BUCKET);
+    return auditMap;
+  }
+
+}

+ 7 - 1
hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/bucket/S3BucketCreateResponse.java

@@ -22,6 +22,8 @@ import javax.annotation.Nullable;
 import java.io.IOException;
 
 import com.google.common.base.Preconditions;
+import com.google.common.annotations.VisibleForTesting;
+
 import org.apache.hadoop.ozone.om.OMMetadataManager;
 import org.apache.hadoop.ozone.om.response.OMClientResponse;
 import org.apache.hadoop.ozone.om.response.bucket.OMBucketCreateResponse;
@@ -69,5 +71,9 @@ public class S3BucketCreateResponse extends OMClientResponse {
           s3Mapping);
     }
   }
-}
 
+  @VisibleForTesting
+  public String getS3Mapping() {
+    return s3Mapping;
+  }
+}

+ 55 - 0
hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/bucket/S3BucketDeleteResponse.java

@@ -0,0 +1,55 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.ozone.om.response.s3.bucket;
+
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.response.OMClientResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
+import org.apache.hadoop.utils.db.BatchOperation;
+
+import javax.annotation.Nonnull;
+import javax.annotation.Nullable;
+import java.io.IOException;
+
+/**
+ * Response for S3Bucket Delete request.
+ */
+public class S3BucketDeleteResponse extends OMClientResponse {
+
+  private String s3BucketName;
+  private String volumeName;
+  public S3BucketDeleteResponse(@Nullable String s3BucketName,
+      @Nullable String volumeName, @Nonnull OMResponse omResponse) {
+    super(omResponse);
+    this.s3BucketName = s3BucketName;
+    this.volumeName = volumeName;
+  }
+
+  @Override
+  public void addToDBBatch(OMMetadataManager omMetadataManager,
+      BatchOperation batchOperation) throws IOException {
+
+    if (getOMResponse().getStatus() == OzoneManagerProtocolProtos.Status.OK) {
+      omMetadataManager.getBucketTable().deleteWithBatch(batchOperation,
+          omMetadataManager.getBucketKey(volumeName, s3BucketName));
+      omMetadataManager.getS3Table().deleteWithBatch(batchOperation,
+          s3BucketName);
+    }
+  }
+}

+ 1 - 0
hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerHARequestHandlerImpl.java

@@ -68,6 +68,7 @@ public class OzoneManagerHARequestHandlerImpl
     case CreateFile:
     case PurgeKeys:
     case CreateS3Bucket:
+    case DeleteS3Bucket:
       //TODO: We don't need to pass transactionID, this will be removed when
       // complete write requests is changed to new model. And also we can
       // return OMClientResponse, then adding to doubleBuffer can be taken

+ 11 - 0
hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java

@@ -203,6 +203,17 @@ public final class TestOMRequestUtils {
         .setClientId(UUID.randomUUID().toString()).build();
   }
 
+  public static OzoneManagerProtocolProtos.OMRequest deleteS3BucketRequest(
+      String s3BucketName) {
+    OzoneManagerProtocolProtos.S3DeleteBucketRequest request =
+        OzoneManagerProtocolProtos.S3DeleteBucketRequest.newBuilder()
+            .setS3BucketName(s3BucketName).build();
+    return OzoneManagerProtocolProtos.OMRequest.newBuilder()
+        .setDeleteS3BucketRequest(request)
+        .setCmdType(OzoneManagerProtocolProtos.Type.DeleteS3Bucket)
+        .setClientId(UUID.randomUUID().toString()).build();
+  }
+
   public static List< HddsProtos.KeyValue> getMetadataList() {
     List<HddsProtos.KeyValue> metadataList = new ArrayList<>();
     metadataList.add(HddsProtos.KeyValue.newBuilder().setKey("key1").setValue(

+ 167 - 0
hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/bucket/TestS3BucketDeleteRequest.java

@@ -0,0 +1,167 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.ozone.om.request.s3.bucket;
+
+import java.util.UUID;
+
+import org.apache.commons.lang.RandomStringUtils;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+import org.mockito.Mockito;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.audit.AuditLogger;
+import org.apache.hadoop.ozone.audit.AuditMessage;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.OMMetrics;
+import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
+import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.ozone.om.response.OMClientResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+    .OMRequest;
+import org.apache.hadoop.test.GenericTestUtils;
+
+import static org.junit.Assert.fail;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mockito.when;
+
+/**
+ * Tests S3BucketDelete Request.
+ */
+public class TestS3BucketDeleteRequest {
+  @Rule
+  public TemporaryFolder folder = new TemporaryFolder();
+
+  private OzoneManager ozoneManager;
+  private OMMetrics omMetrics;
+  private OMMetadataManager omMetadataManager;
+  private AuditLogger auditLogger;
+
+
+  @Before
+  public void setup() throws Exception {
+
+    ozoneManager = Mockito.mock(OzoneManager.class);
+    omMetrics = OMMetrics.create();
+    OzoneConfiguration ozoneConfiguration = new OzoneConfiguration();
+    ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS,
+        folder.newFolder().getAbsolutePath());
+    omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration);
+    when(ozoneManager.getMetrics()).thenReturn(omMetrics);
+    when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager);
+    auditLogger = Mockito.mock(AuditLogger.class);
+    when(ozoneManager.getAuditLogger()).thenReturn(auditLogger);
+    Mockito.doNothing().when(auditLogger).logWrite(any(AuditMessage.class));
+  }
+
+  @After
+  public void stop() {
+    omMetrics.unRegister();
+    Mockito.framework().clearInlineMocks();
+  }
+
+  @Test
+  public void testPreExecute() throws Exception {
+    String s3BucketName = UUID.randomUUID().toString();
+    doPreExecute(s3BucketName);
+  }
+
+  @Test
+  public void testValidateAndUpdateCache() throws Exception {
+    String s3BucketName = UUID.randomUUID().toString();
+    OMRequest omRequest = doPreExecute(s3BucketName);
+
+    // Add s3Bucket to s3Bucket table.
+    TestOMRequestUtils.addS3BucketToDB("ozone", s3BucketName,
+        omMetadataManager);
+
+    S3BucketDeleteRequest s3BucketDeleteRequest =
+        new S3BucketDeleteRequest(omRequest);
+
+    OMClientResponse s3BucketDeleteResponse =
+        s3BucketDeleteRequest.validateAndUpdateCache(ozoneManager, 1L);
+
+    Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK,
+        s3BucketDeleteResponse.getOMResponse().getStatus());
+  }
+
+  @Test
+  public void testValidateAndUpdateCacheWithS3BucketNotFound()
+      throws Exception {
+    String s3BucketName = UUID.randomUUID().toString();
+    OMRequest omRequest = doPreExecute(s3BucketName);
+
+    S3BucketDeleteRequest s3BucketDeleteRequest =
+        new S3BucketDeleteRequest(omRequest);
+
+    OMClientResponse s3BucketDeleteResponse =
+        s3BucketDeleteRequest.validateAndUpdateCache(ozoneManager, 1L);
+
+    Assert.assertEquals(OzoneManagerProtocolProtos.Status.S3_BUCKET_NOT_FOUND,
+        s3BucketDeleteResponse.getOMResponse().getStatus());
+  }
+
+  @Test
+  public void testPreExecuteInvalidBucketLength() throws Exception {
+    // set bucket name which is less than 3 characters length
+    String s3BucketName = RandomStringUtils.randomAlphabetic(2);
+
+    try {
+      doPreExecute(s3BucketName);
+      fail("testPreExecuteInvalidBucketLength failed");
+    } catch (OMException ex) {
+      GenericTestUtils.assertExceptionContains("S3_BUCKET_INVALID_LENGTH", ex);
+    }
+
+    // set bucket name which is less than 3 characters length
+    s3BucketName = RandomStringUtils.randomAlphabetic(65);
+
+    try {
+      doPreExecute(s3BucketName);
+      fail("testPreExecuteInvalidBucketLength failed");
+    } catch (OMException ex) {
+      GenericTestUtils.assertExceptionContains("S3_BUCKET_INVALID_LENGTH", ex);
+    }
+  }
+
+  private OMRequest doPreExecute(String s3BucketName) throws Exception {
+    OMRequest omRequest =
+        TestOMRequestUtils.deleteS3BucketRequest(s3BucketName);
+
+    S3BucketDeleteRequest s3BucketDeleteRequest =
+        new S3BucketDeleteRequest(omRequest);
+
+    OMRequest modifiedOMRequest =
+        s3BucketDeleteRequest.preExecute(ozoneManager);
+
+    // As user name will be set both should not be equal.
+    Assert.assertNotEquals(omRequest, modifiedOMRequest);
+
+    return modifiedOMRequest;
+  }
+}

+ 42 - 0
hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/TestOMResponseUtils.java

@@ -20,6 +20,12 @@
 package org.apache.hadoop.ozone.om.response;
 
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
+import org.apache.hadoop.ozone.om.request.s3.bucket.S3BucketCreateRequest;
+import org.apache.hadoop.ozone.om.response.bucket.OMBucketCreateResponse;
+import org.apache.hadoop.ozone.om.response.s3.bucket.S3BucketCreateResponse;
+import org.apache.hadoop.ozone.om.response.volume.OMVolumeCreateResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
 import org.apache.hadoop.util.Time;
 
 /**
@@ -37,4 +43,40 @@ public final class TestOMResponseUtils {
             "key1", "value1").build();
 
   }
+
+  public static S3BucketCreateResponse createS3BucketResponse(String userName,
+      String volumeName, String s3BucketName) {
+    OzoneManagerProtocolProtos.OMResponse omResponse =
+        OzoneManagerProtocolProtos.OMResponse.newBuilder()
+            .setCmdType(OzoneManagerProtocolProtos.Type.CreateS3Bucket)
+            .setStatus(OzoneManagerProtocolProtos.Status.OK)
+            .setSuccess(true)
+            .setCreateS3BucketResponse(
+                OzoneManagerProtocolProtos.S3CreateBucketResponse
+                    .getDefaultInstance())
+            .build();
+
+    OzoneManagerProtocolProtos.VolumeList volumeList =
+        OzoneManagerProtocolProtos.VolumeList.newBuilder()
+            .addVolumeNames(volumeName).build();
+
+    OmVolumeArgs omVolumeArgs = OmVolumeArgs.newBuilder()
+        .setOwnerName(userName).setAdminName(userName)
+        .setVolume(volumeName).setCreationTime(Time.now()).build();
+
+    OMVolumeCreateResponse omVolumeCreateResponse =
+        new OMVolumeCreateResponse(omVolumeArgs, volumeList, omResponse);
+
+
+    OmBucketInfo omBucketInfo = TestOMResponseUtils.createBucket(
+        volumeName, s3BucketName);
+    OMBucketCreateResponse omBucketCreateResponse =
+        new OMBucketCreateResponse(omBucketInfo, omResponse);
+
+    String s3Mapping = S3BucketCreateRequest.formatS3MappingName(volumeName,
+        s3BucketName);
+    return
+        new S3BucketCreateResponse(omVolumeCreateResponse,
+            omBucketCreateResponse, s3BucketName, s3Mapping, omResponse);
+  }
 }

+ 3 - 38
hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/bucket/TestS3BucketCreateResponse.java

@@ -31,14 +31,8 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.OMMetadataManager;
 import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
-import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
-import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
 import org.apache.hadoop.ozone.om.request.s3.bucket.S3BucketCreateRequest;
 import org.apache.hadoop.ozone.om.response.TestOMResponseUtils;
-import org.apache.hadoop.ozone.om.response.bucket.OMBucketCreateResponse;
-import org.apache.hadoop.ozone.om.response.volume.OMVolumeCreateResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
-import org.apache.hadoop.util.Time;
 import org.apache.hadoop.utils.db.BatchOperation;
 
 /**
@@ -66,40 +60,11 @@ public class TestS3BucketCreateResponse {
   public void testAddToDBBatch() throws Exception {
     String userName = UUID.randomUUID().toString();
     String s3BucketName = UUID.randomUUID().toString();
-
-    OzoneManagerProtocolProtos.OMResponse omResponse =
-        OzoneManagerProtocolProtos.OMResponse.newBuilder()
-            .setCmdType(OzoneManagerProtocolProtos.Type.CreateS3Bucket)
-            .setStatus(OzoneManagerProtocolProtos.Status.OK)
-            .setSuccess(true)
-            .setCreateS3BucketResponse(
-                OzoneManagerProtocolProtos.S3CreateBucketResponse
-                    .getDefaultInstance())
-            .build();
-
     String volumeName = S3BucketCreateRequest.formatOzoneVolumeName(userName);
-    OzoneManagerProtocolProtos.VolumeList volumeList =
-        OzoneManagerProtocolProtos.VolumeList.newBuilder()
-            .addVolumeNames(volumeName).build();
-
-    OmVolumeArgs omVolumeArgs = OmVolumeArgs.newBuilder()
-        .setOwnerName(userName).setAdminName(userName)
-        .setVolume(volumeName).setCreationTime(Time.now()).build();
-
-    OMVolumeCreateResponse omVolumeCreateResponse =
-        new OMVolumeCreateResponse(omVolumeArgs, volumeList, omResponse);
-
-
-    OmBucketInfo omBucketInfo = TestOMResponseUtils.createBucket(
-        volumeName, s3BucketName);
-    OMBucketCreateResponse omBucketCreateResponse =
-        new OMBucketCreateResponse(omBucketInfo, omResponse);
 
-    String s3Mapping = S3BucketCreateRequest.formatS3MappingName(volumeName,
-        s3BucketName);
     S3BucketCreateResponse s3BucketCreateResponse =
-        new S3BucketCreateResponse(omVolumeCreateResponse,
-            omBucketCreateResponse, s3BucketName, s3Mapping, omResponse);
+        TestOMResponseUtils.createS3BucketResponse(userName, volumeName,
+            s3BucketName);
 
     s3BucketCreateResponse.addToDBBatch(omMetadataManager, batchOperation);
 
@@ -107,7 +72,7 @@ public class TestS3BucketCreateResponse {
     omMetadataManager.getStore().commitBatchOperation(batchOperation);
 
     Assert.assertNotNull(omMetadataManager.getS3Table().get(s3BucketName));
-    Assert.assertEquals(s3Mapping,
+    Assert.assertEquals(s3BucketCreateResponse.getS3Mapping(),
         omMetadataManager.getS3Table().get(s3BucketName));
     Assert.assertNotNull(omMetadataManager.getVolumeTable().get(
         omMetadataManager.getVolumeKey(volumeName)));

+ 73 - 0
hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/bucket/TestS3BucketDeleteResponse.java

@@ -0,0 +1,73 @@
+package org.apache.hadoop.ozone.om.response.s3.bucket;
+
+import java.util.UUID;
+
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
+import org.apache.hadoop.ozone.om.request.s3.bucket.S3BucketCreateRequest;
+import org.apache.hadoop.ozone.om.response.TestOMResponseUtils;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+    .OMResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+    .S3DeleteBucketResponse;
+import org.apache.hadoop.utils.db.BatchOperation;
+
+
+
+/**
+ * Tests S3BucketDeleteResponse.
+ */
+public class TestS3BucketDeleteResponse {
+  @Rule
+  public TemporaryFolder folder = new TemporaryFolder();
+
+  private OMMetadataManager omMetadataManager;
+  private BatchOperation batchOperation;
+
+  @Before
+  public void setup() throws Exception {
+    OzoneConfiguration ozoneConfiguration = new OzoneConfiguration();
+    ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS,
+        folder.newFolder().getAbsolutePath());
+    omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration);
+    batchOperation = omMetadataManager.getStore().initBatchOperation();
+  }
+
+  @Test
+  public void testAddToDBBatch() throws Exception {
+    String s3BucketName = UUID.randomUUID().toString();
+    String userName = "ozone";
+    String volumeName = S3BucketCreateRequest.formatOzoneVolumeName(userName);
+    S3BucketCreateResponse s3BucketCreateResponse =
+        TestOMResponseUtils.createS3BucketResponse(userName, volumeName,
+            s3BucketName);
+
+    s3BucketCreateResponse.addToDBBatch(omMetadataManager, batchOperation);
+
+    OMResponse omResponse = OMResponse.newBuilder().setCmdType(
+        OzoneManagerProtocolProtos.Type.DeleteS3Bucket).setStatus(
+        OzoneManagerProtocolProtos.Status.OK).setSuccess(true)
+        .setDeleteS3BucketResponse(S3DeleteBucketResponse.newBuilder()).build();
+
+    S3BucketDeleteResponse s3BucketDeleteResponse =
+        new S3BucketDeleteResponse(s3BucketName, volumeName, omResponse);
+
+    s3BucketDeleteResponse.addToDBBatch(omMetadataManager, batchOperation);
+
+    omMetadataManager.getStore().commitBatchOperation(batchOperation);
+
+    // Check now s3 bucket exists or not.
+    Assert.assertNull(omMetadataManager.getS3Table().get(s3BucketName));
+    Assert.assertNull(omMetadataManager.getBucketTable().get(
+        omMetadataManager.getBucketKey(volumeName, s3BucketName)));
+  }
+}