Browse Source

HDDS-577. Support S3 buckets as first class objects in Ozone Manager - 2.
Contributed by Bharat Viswanadham.

Anu Engineer 6 years ago
parent
commit
5b7ba48ced

+ 48 - 0
hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java

@@ -82,6 +82,54 @@ public class ObjectStore {
     proxy.createVolume(volumeName, volumeArgs);
   }
 
+  /**
+   * Creates an S3 bucket inside Ozone manager and creates the mapping needed
+   * to access via both S3 and Ozone.
+   * @param userName - S3 user name.
+   * @param s3BucketName - S3 bucket Name.
+   * @throws IOException - On failure, throws an exception like Bucket exists.
+   */
+  public void createS3Bucket(String userName, String s3BucketName) throws
+      IOException {
+    proxy.createS3Bucket(userName, s3BucketName);
+  }
+
+  /**
+   * Returns the Ozone Namespace for the S3Bucket. It will return the
+   * OzoneVolume/OzoneBucketName.
+   * @param s3BucketName  - S3 Bucket Name.
+   * @return String - The Ozone canonical name for this s3 bucket. This
+   * string is useful for mounting an OzoneFS.
+   * @throws IOException - Error is throw if the s3bucket does not exist.
+   */
+  public String getOzoneBucketMapping(String s3BucketName) throws IOException {
+    return proxy.getOzoneBucketMapping(s3BucketName);
+  }
+
+  /**
+   * Returns the corresponding Ozone volume given an S3 Bucket.
+   * @param s3BucketName - S3Bucket Name.
+   * @return String - Ozone Volume name.
+   * @throws IOException - Throws if the s3Bucket does not exist.
+   */
+  public String getOzoneVolumeName(String s3BucketName) throws IOException {
+    String mapping = getOzoneBucketMapping(s3BucketName);
+    return mapping.split("/")[0];
+
+  }
+
+  /**
+   * Returns the corresponding Ozone bucket name for the given S3 bucket.
+   * @param s3BucketName - S3Bucket Name.
+   * @return String - Ozone bucket Name.
+   * @throws IOException - Throws if the s3bucket does not exist.
+   */
+  public String getOzoneBucketName(String s3BucketName) throws IOException {
+    String mapping = getOzoneBucketMapping(s3BucketName);
+    return mapping.split("/")[1];
+  }
+
+
   /**
    * Returns the volume information.
    * @param volumeName Name of the volume.

+ 35 - 0
hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java

@@ -321,6 +321,41 @@ public interface ClientProtocol {
                                 String keyName)
       throws IOException;
 
+  /**
+   * Creates an S3 bucket inside Ozone manager and creates the mapping needed
+   * to access via both S3 and Ozone.
+   * @param userName - S3 user name.
+   * @param s3BucketName - S3 bucket Name.
+   * @throws IOException - On failure, throws an exception like Bucket exists.
+   */
+  void createS3Bucket(String userName, String s3BucketName) throws IOException;
+
+  /**
+   * Returns the Ozone Namespace for the S3Bucket. It will return the
+   * OzoneVolume/OzoneBucketName.
+   * @param s3BucketName  - S3 Bucket Name.
+   * @return String - The Ozone canonical name for this s3 bucket. This
+   * string is useful for mounting an OzoneFS.
+   * @throws IOException - Error is throw if the s3bucket does not exist.
+   */
+  String getOzoneBucketMapping(String s3BucketName) throws IOException;
+
+  /**
+   * Returns the corresponding Ozone volume given an S3 Bucket.
+   * @param s3BucketName - S3Bucket Name.
+   * @return String - Ozone Volume name.
+   * @throws IOException - Throws if the s3Bucket does not exist.
+   */
+  String getOzoneVolumeName(String s3BucketName) throws IOException;
+
+  /**
+   * Returns the corresponding Ozone bucket name for the given S3 bucket.
+   * @param s3BucketName - S3Bucket Name.
+   * @return String - Ozone bucket Name.
+   * @throws IOException - Throws if the s3bucket does not exist.
+   */
+  String getOzoneBucketName(String s3BucketName) throws IOException;
+
   /**
    * Close and release the resources.
    */

+ 25 - 0
hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rest/RestClient.java

@@ -820,6 +820,31 @@ public class RestClient implements ClientProtocol {
     }
   }
 
+  @Override
+  public void createS3Bucket(String userName, String s3BucketName)
+      throws IOException {
+    throw new UnsupportedOperationException("Ozone REST protocol does not " +
+        "support this operation.");
+  }
+
+  @Override
+  public String getOzoneBucketMapping(String s3BucketName) throws IOException {
+    throw new UnsupportedOperationException("Ozone REST protocol does not " +
+        "support this operation.");
+  }
+
+  @Override
+  public String getOzoneVolumeName(String s3BucketName) throws IOException {
+    throw new UnsupportedOperationException("Ozone REST protocol does not " +
+        "support this operation.");
+  }
+
+  @Override
+  public String getOzoneBucketName(String s3BucketName) throws IOException {
+    throw new UnsupportedOperationException("Ozone REST protocol does not " +
+        "support this operation.");
+  }
+
   /**
    * Adds Ozone headers to http request.
    *

+ 32 - 0
hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java

@@ -65,6 +65,7 @@ import org.apache.hadoop.hdds.scm.protocolPB
 import org.apache.hadoop.hdds.scm.protocolPB
     .StorageContainerLocationProtocolPB;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.logging.log4j.util.Strings;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -567,6 +568,37 @@ public class RpcClient implements ClientProtocol {
                         ozoneKeyLocations);
   }
 
+  @Override
+  public void createS3Bucket(String userName, String s3BucketName)
+      throws IOException {
+    Preconditions.checkArgument(Strings.isNotBlank(userName), "user name " +
+        "cannot be null or empty.");
+
+    Preconditions.checkArgument(Strings.isNotBlank(s3BucketName), "bucket " +
+        "name cannot be null or empty.");
+    ozoneManagerClient.createS3Bucket(userName, s3BucketName);
+  }
+
+  @Override
+  public String getOzoneBucketMapping(String s3BucketName) throws IOException {
+    Preconditions.checkArgument(Strings.isNotBlank(s3BucketName), "bucket " +
+        "name cannot be null or empty.");
+    return ozoneManagerClient.getOzoneBucketMapping(s3BucketName);
+  }
+
+  @Override
+  public String getOzoneVolumeName(String s3BucketName) throws IOException {
+    String mapping = getOzoneBucketMapping(s3BucketName);
+    return mapping.split("/")[0];
+
+  }
+
+  @Override
+  public String getOzoneBucketName(String s3BucketName) throws IOException {
+    String mapping = getOzoneBucketMapping(s3BucketName);
+    return mapping.split("/")[1];
+  }
+
   @Override
   public void close() throws IOException {
     IOUtils.cleanupWithLogger(LOG, storageContainerLocationClient);

+ 25 - 0
hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java

@@ -250,4 +250,29 @@ public interface OzoneManagerProtocol {
    * @throws IOException
    */
   List<ServiceInfo> getServiceList() throws IOException;
+
+  /*
+   * S3 Specific functionality that is supported by Ozone Manager.
+   */
+
+  /**
+   * Creates an S3 bucket inside Ozone manager and creates the mapping needed
+   * to access via both S3 and Ozone.
+   * @param userName - S3 user name.
+   * @param s3BucketName - S3 bucket Name.
+   * @throws IOException - On failure, throws an exception like Bucket exists.
+   */
+  void createS3Bucket(String userName, String s3BucketName) throws IOException;
+
+  /**
+   * Returns the Ozone Namespace for the S3Bucket. It will return the
+   * OzoneVolume/OzoneBucketName.
+   * @param s3BucketName  - S3 Bucket Name.
+   * @return String - The Ozone canonical name for this s3 bucket. This
+   * string is useful for mounting an OzoneFS.
+   * @throws IOException - Error is throw if the s3bucket does not exist.
+   */
+  String getOzoneBucketMapping(String s3BucketName) throws IOException;
+
 }
+

+ 51 - 0
hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java

@@ -114,6 +114,17 @@ import org.apache.hadoop.ozone.protocol.proto
     .OzoneManagerProtocolProtos.ServiceListRequest;
 import org.apache.hadoop.ozone.protocol.proto
     .OzoneManagerProtocolProtos.ServiceListResponse;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.S3BucketRequest;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.S3BucketResponse;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.S3BucketInfoRequest;
+import org.apache.hadoop.ozone.protocol.proto
+    .OzoneManagerProtocolProtos.S3BucketInfoResponse;
+
+
+
 
 import java.io.Closeable;
 import java.io.IOException;
@@ -763,6 +774,46 @@ public final class OzoneManagerProtocolClientSideTranslatorPB
     }
   }
 
+  @Override
+  public void createS3Bucket(String userName, String s3BucketName)
+      throws IOException {
+    S3BucketRequest request  = S3BucketRequest.newBuilder()
+        .setUserName(userName)
+        .setS3Bucketname(s3BucketName)
+        .build();
+    final S3BucketResponse resp;
+    try {
+      resp = rpcProxy.createS3Bucket(NULL_RPC_CONTROLLER, request);
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+
+    if(resp.getStatus() != Status.OK) {
+      throw new IOException("Creating S3 bucket failed, error: "
+          + resp.getStatus());
+    }
+
+  }
+
+  @Override
+  public String getOzoneBucketMapping(String s3BucketName)
+      throws IOException {
+    S3BucketInfoRequest request  = S3BucketInfoRequest.newBuilder()
+        .setS3BucketName(s3BucketName)
+        .build();
+    final  S3BucketInfoResponse resp;
+    try {
+      resp = rpcProxy.getS3Bucketinfo(NULL_RPC_CONTROLLER, request);
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+    if(resp.getStatus() != Status.OK) {
+      throw new IOException("GetOzoneBucketMapping failed, error:" + resp
+          .getStatus());
+    }
+    return resp.getOzoneMapping();
+  }
+
   /**
    * Return the proxy object underlying this protocol translator.
    *

+ 32 - 0
hadoop-ozone/common/src/main/proto/OzoneManagerProtocol.proto

@@ -59,6 +59,8 @@ enum Status {
     METADATA_ERROR = 19;
     OM_NOT_INITIALIZED = 20;
     SCM_VERSION_MISMATCH_ERROR = 21;
+    S3_BUCKET_NOT_FOUND = 22;
+    S3_BUCKET_ALREADY_EXISTS = 23;
 }
 
 
@@ -362,6 +364,24 @@ message ServiceInfo {
     repeated ServicePort servicePorts = 3;
 }
 
+message S3BucketRequest {
+    required string userName = 1;
+    required string s3bucketname = 2;
+}
+
+message S3BucketResponse {
+    required Status status = 1;
+}
+
+message S3BucketInfoRequest {
+    required string s3bucketName = 1;
+}
+message S3BucketInfoResponse {
+    required Status status = 1;
+    optional string ozoneMapping = 2;
+}
+
+
 /**
  The OM service that takes care of Ozone namespace.
 */
@@ -479,4 +499,16 @@ service OzoneManagerService {
     */
     rpc getServiceList(ServiceListRequest)
     returns(ServiceListResponse);
+
+    /**
+     Creates an S3 bucket and creates an ozone mapping for that bucket.
+    */
+    rpc createS3Bucket(S3BucketRequest)
+    returns(S3BucketResponse);
+
+    /**
+    Gets the Ozone Mapping information for the S3Bucket.
+    */
+    rpc getS3Bucketinfo(S3BucketInfoRequest)
+    returns(S3BucketInfoResponse);
 }

+ 34 - 0
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java

@@ -200,6 +200,40 @@ public class TestOzoneRpcClient {
     Assert.assertTrue(volume.getCreationTime() >= currentTime);
   }
 
+  @Test
+  public void testCreateS3Bucket()
+      throws IOException, OzoneException {
+    long currentTime = Time.now();
+    String userName = "ozone";
+    String bucketName = UUID.randomUUID().toString();
+    store.createS3Bucket(userName, bucketName);
+    String volumeName = store.getOzoneVolumeName(bucketName);
+    OzoneVolume volume = store.getVolume(volumeName);
+    OzoneBucket bucket = volume.getBucket(bucketName);
+    Assert.assertEquals(bucketName, bucket.getName());
+    Assert.assertTrue(bucket.getCreationTime() >= currentTime);
+    Assert.assertTrue(volume.getCreationTime() >= currentTime);
+  }
+
+  @Test
+  public void testCreateS3BucketMapping()
+      throws IOException, OzoneException {
+    long currentTime = Time.now();
+    String userName = "ozone";
+    String bucketName = UUID.randomUUID().toString();
+    store.createS3Bucket(userName, bucketName);
+    String volumeName = store.getOzoneVolumeName(bucketName);
+    OzoneVolume volume = store.getVolume(volumeName);
+    OzoneBucket bucket = volume.getBucket(bucketName);
+    Assert.assertEquals(bucketName, bucket.getName());
+
+    String mapping = store.getOzoneBucketMapping(bucketName);
+    Assert.assertEquals("s3"+userName+"/"+bucketName, mapping);
+    Assert.assertEquals(bucketName, store.getOzoneBucketName(bucketName));
+    Assert.assertEquals("s3"+userName, store.getOzoneVolumeName(bucketName));
+
+  }
+
   @Test
   public void testCreateBucketWithVersioning()
       throws IOException, OzoneException {

+ 23 - 0
hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java

@@ -122,6 +122,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
   private final ScmBlockLocationProtocol scmBlockClient;
   private final StorageContainerLocationProtocol scmContainerClient;
   private ObjectName omInfoBeanName;
+  private final S3BucketManager s3BucketManager;
 
   private OzoneManager(OzoneConfiguration conf) throws IOException {
     Preconditions.checkNotNull(conf);
@@ -159,6 +160,8 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
     metadataManager = new OmMetadataManagerImpl(configuration);
     volumeManager = new VolumeManagerImpl(metadataManager, configuration);
     bucketManager = new BucketManagerImpl(metadataManager);
+    s3BucketManager = new S3BucketManagerImpl(configuration, metadataManager,
+        volumeManager, bucketManager);
     metrics = OMMetrics.create();
     keyManager =
         new KeyManagerImpl(scmBlockClient, metadataManager, configuration,
@@ -1128,6 +1131,26 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
     return services;
   }
 
+  @Override
+  /**
+   * {@inheritDoc}
+   */
+  public void createS3Bucket(String userName, String s3BucketName)
+      throws IOException {
+    s3BucketManager.createS3Bucket(userName, s3BucketName);
+  }
+
+  @Override
+  /**
+   * {@inheritDoc}
+   */
+  public String getOzoneBucketMapping(String s3BucketName)
+      throws IOException {
+    return s3BucketManager.getOzoneBucketMapping(s3BucketName);
+  }
+
+
+
   /**
    * Startup options.
    */

+ 11 - 0
hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/S3BucketManagerImpl.java

@@ -74,6 +74,10 @@ public class S3BucketManagerImpl implements S3BucketManager {
     Preconditions.checkArgument(Strings.isNotBlank(userName), "User name " +
         "cannot be null or empty.");
 
+    Preconditions.checkArgument(bucketName.length() >=3 &&
+        bucketName.length() < 64, "Length of the S3 Bucket is not correct.");
+
+
     // TODO: Decide if we want to enforce S3 Bucket Creation Rules in this
     // code path?
     // https://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html
@@ -105,6 +109,7 @@ public class S3BucketManagerImpl implements S3BucketManager {
               bucketName.getBytes(StandardCharsets.UTF_8));
 
       if (bucket != null) {
+        LOG.debug("Bucket already exists. {}", bucketName);
         throw new OMException(
             "Unable to create S3 bucket. " + bucketName + " already exists.",
             OMException.ResultCodes.S3_BUCKET_ALREADY_EXISTS);
@@ -165,6 +170,12 @@ public class S3BucketManagerImpl implements S3BucketManager {
 
   @Override
   public String getOzoneBucketMapping(String s3BucketName) throws IOException {
+    Preconditions.checkArgument(
+        Strings.isNotBlank(s3BucketName),
+        "Bucket name cannot be null or empty.");
+    Preconditions.checkArgument(s3BucketName.length() >=3 &&
+        s3BucketName.length() < 64,
+        "Length of the S3 Bucket is not correct.");
     omMetadataManager.getLock().acquireS3Lock(s3BucketName);
     try {
       byte[] mapping =

+ 113 - 74
hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java

@@ -19,6 +19,8 @@ package org.apache.hadoop.ozone.protocolPB;
 import com.google.common.collect.Lists;
 import com.google.protobuf.RpcController;
 import com.google.protobuf.ServiceException;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.helpers.OmBucketArgs;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
@@ -29,80 +31,86 @@ import org.apache.hadoop.ozone.om.helpers.OpenKeySession;
 import org.apache.hadoop.ozone.om.helpers.ServiceInfo;
 import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol;
 import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolPB;
-import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.protocol.proto
-    .OzoneManagerProtocolProtos.AllocateBlockRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .OzoneManagerProtocolProtos.AllocateBlockResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .OzoneManagerProtocolProtos.CommitKeyRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .OzoneManagerProtocolProtos.CommitKeyResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .OzoneManagerProtocolProtos.CreateBucketRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .OzoneManagerProtocolProtos.CreateBucketResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .OzoneManagerProtocolProtos.InfoBucketRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .OzoneManagerProtocolProtos.InfoBucketResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .OzoneManagerProtocolProtos.SetBucketPropertyRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .OzoneManagerProtocolProtos.SetBucketPropertyResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .OzoneManagerProtocolProtos.DeleteBucketRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .OzoneManagerProtocolProtos.DeleteBucketResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .OzoneManagerProtocolProtos.CreateVolumeRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .OzoneManagerProtocolProtos.CreateVolumeResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .OzoneManagerProtocolProtos.LocateKeyRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .OzoneManagerProtocolProtos.LocateKeyResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .OzoneManagerProtocolProtos.RenameKeyRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .OzoneManagerProtocolProtos.RenameKeyResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .OzoneManagerProtocolProtos.KeyArgs;
-import org.apache.hadoop.ozone.protocol.proto
-    .OzoneManagerProtocolProtos.SetVolumePropertyRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .OzoneManagerProtocolProtos.SetVolumePropertyResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .OzoneManagerProtocolProtos.CheckVolumeAccessRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .OzoneManagerProtocolProtos.CheckVolumeAccessResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .OzoneManagerProtocolProtos.InfoVolumeRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .OzoneManagerProtocolProtos.InfoVolumeResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .OzoneManagerProtocolProtos.DeleteVolumeRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .OzoneManagerProtocolProtos.DeleteVolumeResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .OzoneManagerProtocolProtos.ListVolumeRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .OzoneManagerProtocolProtos.ListVolumeResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .OzoneManagerProtocolProtos.ListBucketsRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .OzoneManagerProtocolProtos.ListBucketsResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .OzoneManagerProtocolProtos.ListKeysRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .OzoneManagerProtocolProtos.ListKeysResponse;
-import org.apache.hadoop.ozone.protocol.proto
-    .OzoneManagerProtocolProtos.Status;
-import org.apache.hadoop.ozone.protocol.proto
-    .OzoneManagerProtocolProtos.ServiceListRequest;
-import org.apache.hadoop.ozone.protocol.proto
-    .OzoneManagerProtocolProtos.ServiceListResponse;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+    .AllocateBlockRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+    .AllocateBlockResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+    .CheckVolumeAccessRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+    .CheckVolumeAccessResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+    .CommitKeyRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+    .CommitKeyResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+    .CreateBucketRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+    .CreateBucketResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+    .CreateVolumeRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+    .CreateVolumeResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+    .DeleteBucketRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+    .DeleteBucketResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+    .DeleteVolumeRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+    .DeleteVolumeResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+    .InfoBucketRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+    .InfoBucketResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+    .InfoVolumeRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+    .InfoVolumeResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+    .KeyArgs;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+    .ListBucketsRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+    .ListBucketsResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+    .ListKeysRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+    .ListKeysResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+    .ListVolumeRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+    .ListVolumeResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+    .LocateKeyRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+    .LocateKeyResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+    .RenameKeyRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+    .RenameKeyResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+    .S3BucketInfoRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+    .S3BucketInfoResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+    .S3BucketRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+    .S3BucketResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+    .ServiceListRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+    .ServiceListResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+    .SetBucketPropertyRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+    .SetBucketPropertyResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+    .SetVolumePropertyRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+    .SetVolumePropertyResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+    .Status;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -170,6 +178,10 @@ public class OzoneManagerProtocolServerSideTranslatorPB implements
         return Status.OM_NOT_INITIALIZED;
       case SCM_VERSION_MISMATCH_ERROR:
         return Status.SCM_VERSION_MISMATCH_ERROR;
+      case S3_BUCKET_ALREADY_EXISTS:
+        return Status.S3_BUCKET_ALREADY_EXISTS;
+      case S3_BUCKET_NOT_FOUND:
+        return Status.S3_BUCKET_NOT_FOUND;
       default:
         return Status.INTERNAL_ERROR;
       }
@@ -570,4 +582,31 @@ public class OzoneManagerProtocolServerSideTranslatorPB implements
     }
     return resp.build();
   }
+
+  @Override
+  public S3BucketResponse createS3Bucket(RpcController controller,
+      S3BucketRequest request) throws ServiceException {
+    S3BucketResponse.Builder resp = S3BucketResponse.newBuilder();
+    try {
+      impl.createS3Bucket(request.getUserName(), request.getS3Bucketname());
+      resp.setStatus(Status.OK);
+    } catch (IOException e) {
+      resp.setStatus(exceptionToResponseStatus(e));
+    }
+    return resp.build();
+  }
+
+  @Override
+  public S3BucketInfoResponse getS3Bucketinfo(RpcController controller,
+      S3BucketInfoRequest request) throws ServiceException {
+    S3BucketInfoResponse.Builder resp = S3BucketInfoResponse.newBuilder();
+    try {
+      resp.setOzoneMapping(
+          impl.getOzoneBucketMapping(request.getS3BucketName()));
+      resp.setStatus(Status.OK);
+    } catch (IOException e) {
+      resp.setStatus(exceptionToResponseStatus(e));
+    }
+    return resp.build();
+  }
 }