Explorar o código

HDFS-12083. Ozone: KSM: previous key has to be excluded from result in listVolumes, listBuckets and listKeys. Contributed by Nandakumar.

Yiqun Lin %!s(int64=7) %!d(string=hai) anos
pai
achega
58e850f262

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/ksm/BucketManager.java

@@ -63,7 +63,7 @@ public interface BucketManager {
    *   to return.
    * @param startBucket
    *   Optional start bucket name parameter indicating where to start
-   *   the bucket listing from.
+   *   the bucket listing from, this key is excluded from the result.
    * @param bucketPrefix
    *   Optional start key parameter, restricting the response to buckets
    *   that begin with the specified name.

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/ksm/KeyManager.java

@@ -76,6 +76,7 @@ public interface KeyManager {
    * @param startKey
    *   the start key name, only the keys whose name is
    *   after this value will be included in the result.
+   *   This key is excluded from the result.
    * @param keyPrefix
    *   key name prefix, only the keys whose name has
    *   this prefix will be included in the result.

+ 4 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/ksm/MetadataManager.java

@@ -148,6 +148,7 @@ public interface MetadataManager {
    * @param startBucket
    *   the start bucket name. Only the buckets whose name is
    *   after this value will be included in the result.
+   *   This key is excluded from the result.
    * @param bucketPrefix
    *   bucket name prefix. Only the buckets whose name has
    *   this prefix will be included in the result.
@@ -171,6 +172,7 @@ public interface MetadataManager {
    * @param startKey
    *   the start key name, only the keys whose name is
    *   after this value will be included in the result.
+   *   This key is excluded from the result.
    * @param keyPrefix
    *   key name prefix, only the keys whose name has
    *   this prefix will be included in the result.
@@ -193,7 +195,8 @@ public interface MetadataManager {
    * @param prefix
    *   the volume prefix used to filter the listing result.
    * @param startKey
-   *   the start volume name determines where to start listing from.
+   *   the start volume name determines where to start listing from,
+   *   this key is excluded from the result.
    * @param maxKeys
    *   the maximum number of volumes to return.
    * @return a list of {@link KsmVolumeArgs}

+ 29 - 12
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/ksm/MetadataManagerImpl.java

@@ -305,14 +305,22 @@ public class MetadataManagerImpl implements  MetadataManager {
           ResultCodes.FAILED_VOLUME_NOT_FOUND);
     }
 
-    byte[] startKeyBytes = null;
-    if (!Strings.isNullOrEmpty(startBucket)) {
-      startKeyBytes = getBucketKey(volumeName, startBucket);
-    }
     LevelDBKeyFilter filter =
         new KeyPrefixFilter(getBucketKeyPrefix(volumeName, bucketPrefix));
-    List<Map.Entry<byte[], byte[]>> rangeResult =
-        store.getRangeKVs(startKeyBytes, maxNumOfBuckets, filter);
+
+    List<Map.Entry<byte[], byte[]>> rangeResult;
+    if (!Strings.isNullOrEmpty(startBucket)) {
+      //Since we are excluding start key from the result,
+      // the maxNumOfBuckets is incremented.
+      rangeResult = store.getRangeKVs(
+          getBucketKey(volumeName, startBucket),
+          maxNumOfBuckets + 1, filter);
+      //Remove start key from result.
+      rangeResult.remove(0);
+    } else {
+      rangeResult = store.getRangeKVs(null, maxNumOfBuckets, filter);
+    }
+
     for (Map.Entry<byte[], byte[]> entry : rangeResult) {
       KsmBucketInfo info = KsmBucketInfo.getFromProtobuf(
           BucketInfo.parseFrom(entry.getValue()));
@@ -341,14 +349,22 @@ public class MetadataManagerImpl implements  MetadataManager {
           ResultCodes.FAILED_BUCKET_NOT_FOUND);
     }
 
-    byte[] startKeyBytes = null;
-    if (!Strings.isNullOrEmpty(startKey)) {
-      startKeyBytes = getDBKeyForKey(volumeName, bucketName, startKey);
-    }
     LevelDBKeyFilter filter =
         new KeyPrefixFilter(getKeyKeyPrefix(volumeName, bucketName, keyPrefix));
-    List<Map.Entry<byte[], byte[]>> rangeResult =
-        store.getRangeKVs(startKeyBytes, maxKeys, filter);
+
+    List<Map.Entry<byte[], byte[]>> rangeResult;
+    if (!Strings.isNullOrEmpty(startKey)) {
+      //Since we are excluding start key from the result,
+      // the maxNumOfBuckets is incremented.
+      rangeResult = store.getRangeKVs(
+          getDBKeyForKey(volumeName, bucketName, startKey),
+          maxKeys + 1, filter);
+      //Remove start key from result.
+      rangeResult.remove(0);
+    } else {
+      rangeResult = store.getRangeKVs(null, maxKeys, filter);
+    }
+
     for (Map.Entry<byte[], byte[]> entry : rangeResult) {
       KsmKeyInfo info = KsmKeyInfo.getFromProtobuf(
           KeyInfo.parseFrom(entry.getValue()));
@@ -382,6 +398,7 @@ public class MetadataManagerImpl implements  MetadataManager {
 
       if (!startKeyFound && volumeName.equals(startKey)) {
         startKeyFound = true;
+        continue;
       }
       if (startKeyFound && result.size() < maxKeys) {
         byte[] volumeInfo = store.get(this.getVolumeKey(volumeName));

+ 2 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/ksm/VolumeManager.java

@@ -88,7 +88,8 @@ public interface VolumeManager {
    * @param prefix
    *   the volume prefix used to filter the listing result.
    * @param startKey
-   *   the start volume name determines where to start listing from.
+   *   the start volume name determines where to start listing from,
+   *   this key is excluded from the result.
    * @param maxKeys
    *   the maximum number of volumes to return.
    * @return a list of {@link KsmVolumeArgs}

+ 5 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/client/OzoneBucket.java

@@ -451,12 +451,13 @@ public class OzoneBucket {
    * List all keys in a bucket.
    *
    * @param resultLength The max length of listing result.
-   * @param startKey The start key where to start listing from.
+   * @param previousKey The key from where listing should start,
+   *                    this key is excluded in the result.
    * @param prefix The prefix that return list keys start with.
    * @return List of OzoneKeys
    * @throws OzoneException
    */
-  public List<OzoneKey> listKeys(String resultLength, String startKey,
+  public List<OzoneKey> listKeys(String resultLength, String previousKey,
       String prefix) throws OzoneException {
     HttpGet getRequest = null;
     try (CloseableHttpClient httpClient = OzoneClientUtils.newHttpClient()) {
@@ -469,8 +470,8 @@ public class OzoneBucket {
         builder.addParameter(Header.OZONE_LIST_QUERY_MAXKEYS, resultLength);
       }
 
-      if (!Strings.isNullOrEmpty(startKey)) {
-        builder.addParameter(Header.OZONE_LIST_QUERY_PREVKEY, startKey);
+      if (!Strings.isNullOrEmpty(previousKey)) {
+        builder.addParameter(Header.OZONE_LIST_QUERY_PREVKEY, previousKey);
       }
 
       if (!Strings.isNullOrEmpty(prefix)) {

+ 9 - 8
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/client/OzoneRestClient.java

@@ -213,13 +213,13 @@ public class OzoneRestClient implements Closeable {
    *   Maximum number of results to return, if the result set
    *   is smaller than requested size, it means that list is
    *   complete.
-   * @param startVolume
+   * @param previousVolume
    *   The previous volume name.
    * @return List of Volumes
    * @throws OzoneException
    */
   public List<OzoneVolume> listVolumes(String onBehalfOf, String prefix,
-      int maxKeys, String startVolume) throws OzoneException {
+      int maxKeys, String previousVolume) throws OzoneException {
     HttpGet httpGet = null;
     try (CloseableHttpClient httpClient = newHttpClient()) {
       URIBuilder builder = new URIBuilder(endPointURI);
@@ -232,9 +232,9 @@ public class OzoneRestClient implements Closeable {
             .toString(maxKeys));
       }
 
-      if (!Strings.isNullOrEmpty(startVolume)) {
+      if (!Strings.isNullOrEmpty(previousVolume)) {
         builder.addParameter(Header.OZONE_LIST_QUERY_PREVKEY,
-            startVolume);
+            previousVolume);
       }
 
       builder.setPath("/").build();
@@ -672,13 +672,14 @@ public class OzoneRestClient implements Closeable {
    * @param volumeName - Volume name
    * @param bucketName - Bucket name
    * @param resultLength The max length of listing result.
-   * @param startKey The start key where to start listing from.
+   * @param previousKey The key from where listing should start,
+   *                    this key is excluded in the result.
    * @param prefix The prefix that return list keys start with.
    *
    * @return List of OzoneKeys
    */
   public List<OzoneKey> listKeys(String volumeName, String bucketName,
-      String resultLength, String startKey, String prefix)
+      String resultLength, String previousKey, String prefix)
       throws OzoneException {
     OzoneUtils.verifyResourceName(volumeName);
     OzoneUtils.verifyResourceName(bucketName);
@@ -692,8 +693,8 @@ public class OzoneRestClient implements Closeable {
         builder.addParameter(Header.OZONE_LIST_QUERY_MAXKEYS, resultLength);
       }
 
-      if (!Strings.isNullOrEmpty(startKey)) {
-        builder.addParameter(Header.OZONE_LIST_QUERY_PREVKEY, startKey);
+      if (!Strings.isNullOrEmpty(previousKey)) {
+        builder.addParameter(Header.OZONE_LIST_QUERY_PREVKEY, previousKey);
       }
 
       if (!Strings.isNullOrEmpty(prefix)) {

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/client/OzoneVolume.java

@@ -430,7 +430,7 @@ public class OzoneVolume {
    * @throws OzoneException
    */
   public List<OzoneBucket> listBuckets(String resultLength,
-      String startBucket, String prefix) throws OzoneException {
+      String previousBucket, String prefix) throws OzoneException {
     HttpGet getRequest = null;
     try (CloseableHttpClient httpClient = newHttpClient()) {
       URIBuilder builder = new URIBuilder(getClient().getEndPointURI());
@@ -438,8 +438,8 @@ public class OzoneVolume {
       if (!Strings.isNullOrEmpty(resultLength)) {
         builder.addParameter(Header.OZONE_LIST_QUERY_MAXKEYS, resultLength);
       }
-      if (!Strings.isNullOrEmpty(startBucket)) {
-        builder.addParameter(Header.OZONE_LIST_QUERY_PREVKEY, startBucket);
+      if (!Strings.isNullOrEmpty(previousBucket)) {
+        builder.addParameter(Header.OZONE_LIST_QUERY_PREVKEY, previousBucket);
       }
       if (!Strings.isNullOrEmpty(prefix)) {
         builder.addParameter(Header.OZONE_LIST_QUERY_PREFIX, prefix);

+ 7 - 7
hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/OzoneRest.md

@@ -180,7 +180,7 @@ This API allows user to list all volumes owned by themselves. Administrators can
 
 Schema:
 
-- `GET /?prefix=<PREFIX>&max-keys=<MAX_RESULT_SIZE>&prev-key=<VOLUME_TO_START_LISTING_FROM>`
+- `GET /?prefix=<PREFIX>&max-keys=<MAX_RESULT_SIZE>&prev-key=<PREVIOUS_VOLUME_KEY>`
 
 Query Parameter:
 
@@ -188,7 +188,7 @@ Query Parameter:
 |:---- |:---- |:----
 | prefix | string | Optional. Only volumes with this prefix are included in the result. |
 | max-keys | int | Optional. Maximum number of volumes included in the result. Default is 1024 if not specified. |
-| prev-key | string | Optional. Volume name where to start listing from. It must be a valid volume name. |
+| prev-key | string | Optional. Volume name from where listing should start, this key is excluded in the result. It must be a valid volume name. |
 | root-scan | bool | Optional. List all volumes in the cluster if this is set to true. Default false. |
 
 Sample HTTP GET request:
@@ -345,7 +345,7 @@ List buckets in a given volume.
 
 Schema:
 
-- `GET /{volume}?prefix=<PREFIX>&max-keys=<MAX_RESULT_SIZE>&prev-key=<BUCKET_TO_START_LISTING_FROM>`
+- `GET /{volume}?prefix=<PREFIX>&max-keys=<MAX_RESULT_SIZE>&prev-key=<PREVIOUS_BUCKET_KEY>`
 
 Query Parameters:
 
@@ -353,7 +353,7 @@ Query Parameters:
 |:---- |:---- |:----
 | prefix | string | Optional. Only buckets with this prefix are included in the result. |
 | max-keys | int | Optional. Maximum number of buckets included in the result. Default is 1024 if not specified. |
-| prev-key | string | Optional. Bucket name where to start listing from. It must be a valid bucket name. |
+| prev-key | string | Optional. Bucket name from where listing should start, this key is excluded in the result. It must be a valid bucket name. |
 
 Sample HTTP GET request:
 
@@ -504,7 +504,7 @@ This API allows user to list keys in a bucket.
 
 Schema:
 
-- `GET /{volume}/{bucket}?prefix=<PREFIX>&max-keys=<MAX_RESULT_SIZE>&prev-key=<KEY_TO_START_LISTING_FROM>`
+- `GET /{volume}/{bucket}?prefix=<PREFIX>&max-keys=<MAX_RESULT_SIZE>&prev-key=<PREVIOUS_KEY>`
 
 Query Parameters:
 
@@ -512,7 +512,7 @@ Query Parameters:
 |:---- |:---- |:----
 | prefix | string | Optional. Only keys with this prefix are included in the result. |
 | max-keys | int | Optional. Maximum number of keys included in the result. Default is 1024 if not specified. |
-| prev-key | string | Optional. Key name where to start listing from. It must be a valid key name. |
+| prev-key | string | Optional. Key name from where listing should start, this key is excluded in the result. It must be a valid key name. |
 
 Sample HTTP GET request:
 
@@ -542,4 +542,4 @@ this request list keys under bucket */volume-of-bilbo/bucket-0*, the listing res
           },
           ...
        ]
-    }
+    }

+ 7 - 6
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/ksm/TestKeySpaceManager.java

@@ -676,9 +676,9 @@ public class TestKeySpaceManager {
     listBucketArgs = new ListArgs(volArgs, null, 2, "bBucket_3");
     result = storageHandler.listBuckets(listBucketArgs);
     Assert.assertEquals(2, result.getBuckets().size());
-    Assert.assertEquals("bBucket_3",
-        result.getBuckets().get(0).getBucketName());
     Assert.assertEquals("bBucket_4",
+        result.getBuckets().get(0).getBucketName());
+    Assert.assertEquals("bBucket_5",
         result.getBuckets().get(1).getBucketName());
 
     // Provide an invalid bucket name as start key.
@@ -692,7 +692,7 @@ public class TestKeySpaceManager {
     }
 
     // Use all arguments.
-    listBucketArgs = new ListArgs(volArgs, "b", 5, "bBucket_8");
+    listBucketArgs = new ListArgs(volArgs, "b", 5, "bBucket_7");
     result = storageHandler.listBuckets(listBucketArgs);
     Assert.assertEquals(2, result.getBuckets().size());
     Assert.assertEquals("bBucket_8",
@@ -801,9 +801,9 @@ public class TestKeySpaceManager {
     listKeyArgs = new ListArgs(bucketArgs, null, 2, "bKey1");
     result = storageHandler.listKeys(listKeyArgs);
     Assert.assertEquals(2, result.getKeyList().size());
-    Assert.assertEquals("bKey1",
-        result.getKeyList().get(0).getKeyName());
     Assert.assertEquals("bKey11",
+        result.getKeyList().get(0).getKeyName());
+    Assert.assertEquals("bKey13",
         result.getKeyList().get(1).getKeyName());
 
     // Provide an invalid key name as start key.
@@ -908,8 +908,9 @@ public class TestKeySpaceManager {
         volumes.getVolumes().get(3).getOwner().getName());
 
     // Make sure all available fields are returned
+    final String user0vol4 = "Vol-" + user0 + "-4";
     final String user0vol5 = "Vol-" + user0 + "-5";
-    listVolumeArgs = new ListArgs(userArgs0, null, 1, user0vol5);
+    listVolumeArgs = new ListArgs(userArgs0, null, 1, user0vol4);
     listVolumeArgs.setRootScan(false);
     volumes = storageHandler.listVolumes(listVolumeArgs);
     Assert.assertEquals(1, volumes.getVolumes().size());

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/client/TestBuckets.java

@@ -177,7 +177,7 @@ public class TestBuckets {
     bucketList = vol.listBuckets("3", null, null);
     assertEquals(bucketList.size(), 3);
 
-    bucketList = vol.listBuckets("100", "listbucket-test-5", null);
+    bucketList = vol.listBuckets("100", "listbucket-test-4", null);
     assertEquals(bucketList.size(), 5);
 
     bucketList = vol.listBuckets("100", null, "listbucket-test-3");

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java

@@ -345,9 +345,9 @@ public class TestKeys {
     Assert.assertEquals(keyList2.size(), 1);
 
     // test startKey parameter of list keys
-    keyList1 = helper.getBucket().listKeys("100", "list-key5", null);
+    keyList1 = helper.getBucket().listKeys("100", "list-key4", null);
     keyList2 = client.listKeys(helper.getVol().getVolumeName(),
-        helper.getBucket().getBucketName(), "100", "list-key5", null);
+        helper.getBucket().getBucketName(), "100", "list-key4", null);
     Assert.assertEquals(keyList1.size(), 5);
     Assert.assertEquals(keyList2.size(), 5);
 

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/client/TestVolume.java

@@ -276,7 +276,7 @@ public class TestVolume {
     assertEquals(5, volumeList.size());
 
     // test start key parameter of listing volumes
-    volumeList = client.listVolumes(user2, null, 100, "test-vol17");
+    volumeList = client.listVolumes(user2, null, 100, "test-vol15");
     assertEquals(2, volumeList.size());
   }