浏览代码

HADOOP-19278. S3A: Remove option to delete directory markers (#7052)

S3A no longer supports the ability to disable deletion of parent directory
markers on file or directory creation.

The option "fs.s3a.directory.marker.retention" is no longer supported.

This is incompatible with all hadoop versions before 3.2.2 and with hadoop 3.3.0
when applications using the s3a connector attempt to write to the same repository.
Steve Loughran 3 月之前
父节点
当前提交
d44ac28e32
共有 66 个文件被更改,包括 290 次插入2875 次删除
  1. 0 53
      hadoop-tools/hadoop-aws/pom.xml
  2. 24 7
      hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java
  3. 21 28
      hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Listing.java
  4. 20 174
      hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
  5. 0 1
      hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java
  6. 2 23
      hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/WriteOperationHelper.java
  7. 0 4
      hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/CredentialProviderListFactory.java
  8. 1 1
      hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/impl/CommitOperations.java
  9. 2 2
      hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/magic/S3MagicCommitTracker.java
  10. 0 26
      hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/DirMarkerTracker.java
  11. 0 110
      hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/DirectoryPolicy.java
  12. 0 212
      hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/DirectoryPolicyImpl.java
  13. 4 11
      hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/MkdirOperation.java
  14. 7 31
      hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/PutObjectOptions.java
  15. 1 1
      hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/S3AMultipartUploader.java
  16. 0 6
      hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3Guard.java
  17. 19 48
      hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java
  18. 6 87
      hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/tools/MarkerTool.java
  19. 0 148
      hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3native/NativeS3FileSystem.java
  20. 5 82
      hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3native/S3xLoginHelper.java
  21. 0 16
      hadoop-tools/hadoop-aws/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
  22. 4 7
      hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/assumed_roles.md
  23. 93 373
      hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/directory_markers.md
  24. 4 4
      hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md
  25. 1 2
      hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/performance.md
  26. 2 5
      hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/s3guard.md
  27. 0 52
      hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/s3n.md
  28. 0 47
      hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/testing.md
  29. 0 51
      hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/AbstractS3ATestBase.java
  30. 0 5
      hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestDowngradeSyncable.java
  31. 1 1
      hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AClientSideEncryption.java
  32. 3 28
      hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AFileOperationCost.java
  33. 1 1
      hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AMiscOperations.java
  34. 0 4
      hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3APrefetchingCacheFiles.java
  35. 0 4
      hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3APrefetchingInputStream.java
  36. 0 1
      hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3APrefetchingLruEviction.java
  37. 0 8
      hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/MockS3AFileSystem.java
  38. 1 1
      hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/MultipartTestUtils.java
  39. 0 6
      hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestConstants.java
  40. 1 9
      hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java
  41. 1 1
      hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3ABlockOutputStream.java
  42. 0 4
      hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/audit/ITestAuditAccessChecks.java
  43. 0 4
      hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/audit/ITestAuditManager.java
  44. 0 4
      hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/audit/ITestAuditManagerDisabled.java
  45. 4 41
      hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/ITestCommitOperationCost.java
  46. 0 4
      hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestPartialRenamesDeletes.java
  47. 1 22
      hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestRenameDeleteRace.java
  48. 0 4
      hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestXAttrCost.java
  49. 0 163
      hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/TestDirectoryMarkerPolicy.java
  50. 2 4
      hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/TestRequestFactory.java
  51. 1 90
      hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/AbstractS3ACostTest.java
  52. 2 4
      hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/ITestCreateFileCost.java
  53. 9 73
      hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/ITestDirectoryMarkerListing.java
  54. 22 83
      hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/ITestS3ADeleteCost.java
  55. 2 5
      hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/ITestS3AMiscOperationCost.java
  56. 0 23
      hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/ITestS3AMkdirCost.java
  57. 0 4
      hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/ITestS3AOpenCost.java
  58. 4 48
      hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/ITestS3ARenameCost.java
  59. 0 7
      hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/ITestUnbufferDraining.java
  60. 1 5
      hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/AbstractS3GuardToolTestBase.java
  61. 0 132
      hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/TestAuthoritativePath.java
  62. 1 8
      hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3ADirectoryPerformance.java
  63. 0 7
      hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/test/MinimalWriteOperationHelperCallbacks.java
  64. 2 93
      hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/tools/AbstractMarkerToolTest.java
  65. 15 157
      hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/tools/ITestMarkerTool.java
  66. 0 205
      hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/TestS3xLoginHelper.java

+ 0 - 53
hadoop-tools/hadoop-aws/pom.xml

@@ -48,10 +48,6 @@
     <!-- Set a longer timeout for integration test (in milliseconds) -->
     <!-- Set a longer timeout for integration test (in milliseconds) -->
     <test.integration.timeout>200000</test.integration.timeout>
     <test.integration.timeout>200000</test.integration.timeout>
 
 
-    <!--   should directory marker retention be audited? -->
-    <fs.s3a.directory.marker.audit>false</fs.s3a.directory.marker.audit>
-    <!--    marker retention policy -->
-    <fs.s3a.directory.marker.retention></fs.s3a.directory.marker.retention>
 
 
     <!-- Is prefetch enabled? -->
     <!-- Is prefetch enabled? -->
     <fs.s3a.prefetch.enabled>unset</fs.s3a.prefetch.enabled>
     <fs.s3a.prefetch.enabled>unset</fs.s3a.prefetch.enabled>
@@ -126,9 +122,6 @@
                 <fs.s3a.scale.test.huge.filesize>${fs.s3a.scale.test.huge.filesize}</fs.s3a.scale.test.huge.filesize>
                 <fs.s3a.scale.test.huge.filesize>${fs.s3a.scale.test.huge.filesize}</fs.s3a.scale.test.huge.filesize>
                 <fs.s3a.scale.test.huge.huge.partitionsize>${fs.s3a.scale.test.huge.partitionsize}</fs.s3a.scale.test.huge.huge.partitionsize>
                 <fs.s3a.scale.test.huge.huge.partitionsize>${fs.s3a.scale.test.huge.partitionsize}</fs.s3a.scale.test.huge.huge.partitionsize>
                 <fs.s3a.scale.test.timeout>${fs.s3a.scale.test.timeout}</fs.s3a.scale.test.timeout>
                 <fs.s3a.scale.test.timeout>${fs.s3a.scale.test.timeout}</fs.s3a.scale.test.timeout>
-                <!-- Markers-->
-                <fs.s3a.directory.marker.retention>${fs.s3a.directory.marker.retention}</fs.s3a.directory.marker.retention>
-                <fs.s3a.directory.marker.audit>${fs.s3a.directory.marker.audit}</fs.s3a.directory.marker.audit>
                 <!-- Prefetch -->
                 <!-- Prefetch -->
                 <fs.s3a.prefetch.enabled>${fs.s3a.prefetch.enabled}</fs.s3a.prefetch.enabled>
                 <fs.s3a.prefetch.enabled>${fs.s3a.prefetch.enabled}</fs.s3a.prefetch.enabled>
               </systemPropertyVariables>
               </systemPropertyVariables>
@@ -167,8 +160,6 @@
                     <fs.s3a.scale.test.huge.filesize>${fs.s3a.scale.test.huge.filesize}</fs.s3a.scale.test.huge.filesize>
                     <fs.s3a.scale.test.huge.filesize>${fs.s3a.scale.test.huge.filesize}</fs.s3a.scale.test.huge.filesize>
                     <fs.s3a.scale.test.huge.huge.partitionsize>${fs.s3a.scale.test.huge.partitionsize}</fs.s3a.scale.test.huge.huge.partitionsize>
                     <fs.s3a.scale.test.huge.huge.partitionsize>${fs.s3a.scale.test.huge.partitionsize}</fs.s3a.scale.test.huge.huge.partitionsize>
                     <fs.s3a.scale.test.timeout>${fs.s3a.scale.test.timeout}</fs.s3a.scale.test.timeout>
                     <fs.s3a.scale.test.timeout>${fs.s3a.scale.test.timeout}</fs.s3a.scale.test.timeout>
-                    <fs.s3a.directory.marker.retention>${fs.s3a.directory.marker.retention}</fs.s3a.directory.marker.retention>
-
                     <test.default.timeout>${test.integration.timeout}</test.default.timeout>
                     <test.default.timeout>${test.integration.timeout}</test.default.timeout>
                     <!-- Prefetch -->
                     <!-- Prefetch -->
                     <fs.s3a.prefetch.enabled>${fs.s3a.prefetch.enabled}</fs.s3a.prefetch.enabled>
                     <fs.s3a.prefetch.enabled>${fs.s3a.prefetch.enabled}</fs.s3a.prefetch.enabled>
@@ -221,9 +212,6 @@
                     <fs.s3a.scale.test.huge.filesize>${fs.s3a.scale.test.huge.filesize}</fs.s3a.scale.test.huge.filesize>
                     <fs.s3a.scale.test.huge.filesize>${fs.s3a.scale.test.huge.filesize}</fs.s3a.scale.test.huge.filesize>
                     <fs.s3a.scale.test.huge.huge.partitionsize>${fs.s3a.scale.test.huge.partitionsize}</fs.s3a.scale.test.huge.huge.partitionsize>
                     <fs.s3a.scale.test.huge.huge.partitionsize>${fs.s3a.scale.test.huge.partitionsize}</fs.s3a.scale.test.huge.huge.partitionsize>
                     <fs.s3a.scale.test.timeout>${fs.s3a.scale.test.timeout}</fs.s3a.scale.test.timeout>
                     <fs.s3a.scale.test.timeout>${fs.s3a.scale.test.timeout}</fs.s3a.scale.test.timeout>
-                    <!-- Markers-->
-                    <fs.s3a.directory.marker.retention>${fs.s3a.directory.marker.retention}</fs.s3a.directory.marker.retention>
-                    <fs.s3a.directory.marker.audit>${fs.s3a.directory.marker.audit}</fs.s3a.directory.marker.audit>
                     <!-- Prefetch -->
                     <!-- Prefetch -->
                     <fs.s3a.prefetch.enabled>${fs.s3a.prefetch.enabled}</fs.s3a.prefetch.enabled>
                     <fs.s3a.prefetch.enabled>${fs.s3a.prefetch.enabled}</fs.s3a.prefetch.enabled>
                     <!-- are root tests enabled. Set to false when running parallel jobs on same bucket -->
                     <!-- are root tests enabled. Set to false when running parallel jobs on same bucket -->
@@ -285,9 +273,6 @@
                     <fs.s3a.scale.test.enabled>${fs.s3a.scale.test.enabled}</fs.s3a.scale.test.enabled>
                     <fs.s3a.scale.test.enabled>${fs.s3a.scale.test.enabled}</fs.s3a.scale.test.enabled>
                     <fs.s3a.scale.test.huge.filesize>${fs.s3a.scale.test.huge.filesize}</fs.s3a.scale.test.huge.filesize>
                     <fs.s3a.scale.test.huge.filesize>${fs.s3a.scale.test.huge.filesize}</fs.s3a.scale.test.huge.filesize>
                     <fs.s3a.scale.test.timeout>${fs.s3a.scale.test.timeout}</fs.s3a.scale.test.timeout>
                     <fs.s3a.scale.test.timeout>${fs.s3a.scale.test.timeout}</fs.s3a.scale.test.timeout>
-                    <!-- Markers-->
-                    <fs.s3a.directory.marker.retention>${fs.s3a.directory.marker.retention}</fs.s3a.directory.marker.retention>
-                    <fs.s3a.directory.marker.audit>${fs.s3a.directory.marker.audit}</fs.s3a.directory.marker.audit>
                     <!-- Prefetch -->
                     <!-- Prefetch -->
                     <fs.s3a.prefetch.enabled>${fs.s3a.prefetch.enabled}</fs.s3a.prefetch.enabled>
                     <fs.s3a.prefetch.enabled>${fs.s3a.prefetch.enabled}</fs.s3a.prefetch.enabled>
                     <test.unique.fork.id>job-${job.id}</test.unique.fork.id>
                     <test.unique.fork.id>job-${job.id}</test.unique.fork.id>
@@ -314,44 +299,6 @@
       </properties>
       </properties>
     </profile>
     </profile>
 
 
-    <!-- Directory marker retention options, all from the -Dmarkers value-->
-    <profile>
-      <id>keep-markers</id>
-      <activation>
-        <property>
-          <name>markers</name>
-          <value>keep</value>
-        </property>
-      </activation>
-      <properties >
-        <fs.s3a.directory.marker.retention>keep</fs.s3a.directory.marker.retention>
-      </properties>
-    </profile>
-    <profile>
-      <id>delete-markers</id>
-      <activation>
-        <property>
-          <name>markers</name>
-          <value>delete</value>
-        </property>
-      </activation>
-      <properties >
-        <fs.s3a.directory.marker.retention>delete</fs.s3a.directory.marker.retention>
-      </properties>
-    </profile>
-    <profile>
-      <id>auth-markers</id>
-      <activation>
-        <property>
-          <name>markers</name>
-          <value>authoritative</value>
-        </property>
-      </activation>
-      <properties >
-        <fs.s3a.directory.marker.retention>authoritative</fs.s3a.directory.marker.retention>
-      </properties>
-    </profile>
-
     <!-- Turn on prefetching-->
     <!-- Turn on prefetching-->
     <profile>
     <profile>
       <id>prefetch</id>
       <id>prefetch</id>

+ 24 - 7
hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Constants.java

@@ -848,6 +848,7 @@ public final class Constants {
       "fs.s3a." + Constants.AWS_SERVICE_IDENTIFIER_STS.toLowerCase()
       "fs.s3a." + Constants.AWS_SERVICE_IDENTIFIER_STS.toLowerCase()
           + ".signing-algorithm";
           + ".signing-algorithm";
 
 
+  @Deprecated
   public static final String S3N_FOLDER_SUFFIX = "_$folder$";
   public static final String S3N_FOLDER_SUFFIX = "_$folder$";
   public static final String FS_S3A_BLOCK_SIZE = "fs.s3a.block.size";
   public static final String FS_S3A_BLOCK_SIZE = "fs.s3a.block.size";
   public static final String FS_S3A = "s3a";
   public static final String FS_S3A = "s3a";
@@ -868,10 +869,13 @@ public final class Constants {
   /**
   /**
    * Paths considered "authoritative".
    * Paths considered "authoritative".
    * When S3guard was supported, this skipped checks to s3 on directory listings.
    * When S3guard was supported, this skipped checks to s3 on directory listings.
-   * It is also use to optionally disable marker retentation purely on these
-   * paths -a feature which is still retained/available.
+   * It was also possilbe to use to optionally disable marker retentation purely on these
+   * paths -a feature which is no longer available.
+   * As no feature uses this any more, it is declared as deprecated.
    * */
    * */
+  @Deprecated
   public static final String AUTHORITATIVE_PATH = "fs.s3a.authoritative.path";
   public static final String AUTHORITATIVE_PATH = "fs.s3a.authoritative.path";
+  @Deprecated
   public static final String[] DEFAULT_AUTHORITATIVE_PATH = {};
   public static final String[] DEFAULT_AUTHORITATIVE_PATH = {};
 
 
   /**
   /**
@@ -1339,37 +1343,44 @@ public final class Constants {
 
 
   /**
   /**
    * Policy for directory markers.
    * Policy for directory markers.
-   * This is a new feature of HADOOP-13230 which addresses
-   * some scale, performance and permissions issues -but
-   * at the risk of backwards compatibility.
+   * No longer supported as "keep" is the sole policy.
    */
    */
+  @Deprecated
   public static final String DIRECTORY_MARKER_POLICY =
   public static final String DIRECTORY_MARKER_POLICY =
       "fs.s3a.directory.marker.retention";
       "fs.s3a.directory.marker.retention";
 
 
   /**
   /**
-   * Delete directory markers. This is the backwards compatible option.
+   * Delete directory markers.
+   * No longer supported as "keep" is the sole policy.
    * Value: {@value}.
    * Value: {@value}.
    */
    */
+  @Deprecated
   public static final String DIRECTORY_MARKER_POLICY_DELETE =
   public static final String DIRECTORY_MARKER_POLICY_DELETE =
       "delete";
       "delete";
 
 
   /**
   /**
    * Retain directory markers.
    * Retain directory markers.
+   * No longer needed, so marked as deprecated to flag usages.
    * Value: {@value}.
    * Value: {@value}.
    */
    */
+  @Deprecated
   public static final String DIRECTORY_MARKER_POLICY_KEEP =
   public static final String DIRECTORY_MARKER_POLICY_KEEP =
       "keep";
       "keep";
 
 
   /**
   /**
    * Retain directory markers in authoritative directory trees only.
    * Retain directory markers in authoritative directory trees only.
+   * No longer required as "keep" is the sole policy.
    * Value: {@value}.
    * Value: {@value}.
    */
    */
+  @Deprecated
   public static final String DIRECTORY_MARKER_POLICY_AUTHORITATIVE =
   public static final String DIRECTORY_MARKER_POLICY_AUTHORITATIVE =
       "authoritative";
       "authoritative";
 
 
   /**
   /**
    * Default retention policy: {@value}.
    * Default retention policy: {@value}.
+   * No longer required as "keep" is the sole policy.
    */
    */
+  @Deprecated
   public static final String DEFAULT_DIRECTORY_MARKER_POLICY =
   public static final String DEFAULT_DIRECTORY_MARKER_POLICY =
       DIRECTORY_MARKER_POLICY_KEEP;
       DIRECTORY_MARKER_POLICY_KEEP;
 
 
@@ -1377,7 +1388,7 @@ public final class Constants {
   /**
   /**
    * {@code PathCapabilities} probe to verify that an S3A Filesystem
    * {@code PathCapabilities} probe to verify that an S3A Filesystem
    * has the changes needed to safely work with buckets where
    * has the changes needed to safely work with buckets where
-   * directoy markers have not been deleted.
+   * directory markers have not been deleted.
    * Value: {@value}.
    * Value: {@value}.
    */
    */
   public static final String STORE_CAPABILITY_DIRECTORY_MARKER_AWARE
   public static final String STORE_CAPABILITY_DIRECTORY_MARKER_AWARE
@@ -1394,16 +1405,20 @@ public final class Constants {
   /**
   /**
    * {@code PathCapabilities} probe to indicate that the filesystem
    * {@code PathCapabilities} probe to indicate that the filesystem
    * deletes directory markers.
    * deletes directory markers.
+   * Always false.
    * Value: {@value}.
    * Value: {@value}.
    */
    */
+  @Deprecated
   public static final String STORE_CAPABILITY_DIRECTORY_MARKER_POLICY_DELETE
   public static final String STORE_CAPABILITY_DIRECTORY_MARKER_POLICY_DELETE
       = "fs.s3a.capability.directory.marker.policy.delete";
       = "fs.s3a.capability.directory.marker.policy.delete";
 
 
   /**
   /**
    * {@code PathCapabilities} probe to indicate that the filesystem
    * {@code PathCapabilities} probe to indicate that the filesystem
    * keeps directory markers in authoritative paths only.
    * keeps directory markers in authoritative paths only.
+   * This probe always returns false.
    * Value: {@value}.
    * Value: {@value}.
    */
    */
+  @Deprecated
   public static final String
   public static final String
       STORE_CAPABILITY_DIRECTORY_MARKER_POLICY_AUTHORITATIVE =
       STORE_CAPABILITY_DIRECTORY_MARKER_POLICY_AUTHORITATIVE =
       "fs.s3a.capability.directory.marker.policy.authoritative";
       "fs.s3a.capability.directory.marker.policy.authoritative";
@@ -1411,6 +1426,7 @@ public final class Constants {
   /**
   /**
    * {@code PathCapabilities} probe to indicate that a path
    * {@code PathCapabilities} probe to indicate that a path
    * keeps directory markers.
    * keeps directory markers.
+   * This probe always returns true.
    * Value: {@value}.
    * Value: {@value}.
    */
    */
   public static final String STORE_CAPABILITY_DIRECTORY_MARKER_ACTION_KEEP
   public static final String STORE_CAPABILITY_DIRECTORY_MARKER_ACTION_KEEP
@@ -1419,6 +1435,7 @@ public final class Constants {
   /**
   /**
    * {@code PathCapabilities} probe to indicate that a path
    * {@code PathCapabilities} probe to indicate that a path
    * deletes directory markers.
    * deletes directory markers.
+   * This probe always returns false.
    * Value: {@value}.
    * Value: {@value}.
    */
    */
   public static final String STORE_CAPABILITY_DIRECTORY_MARKER_ACTION_DELETE
   public static final String STORE_CAPABILITY_DIRECTORY_MARKER_ACTION_DELETE

+ 21 - 28
hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Listing.java

@@ -50,9 +50,7 @@ import java.util.NoSuchElementException;
 import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.CompletableFuture;
 import java.util.StringJoiner;
 import java.util.StringJoiner;
 
 
-import static org.apache.hadoop.fs.s3a.Constants.S3N_FOLDER_SUFFIX;
 import static org.apache.hadoop.fs.s3a.Invoker.onceInTheFuture;
 import static org.apache.hadoop.fs.s3a.Invoker.onceInTheFuture;
-import static org.apache.hadoop.fs.s3a.S3AUtils.ACCEPT_ALL;
 import static org.apache.hadoop.fs.s3a.S3AUtils.createFileStatus;
 import static org.apache.hadoop.fs.s3a.S3AUtils.createFileStatus;
 import static org.apache.hadoop.fs.s3a.S3AUtils.maybeAddTrailingSlash;
 import static org.apache.hadoop.fs.s3a.S3AUtils.maybeAddTrailingSlash;
 import static org.apache.hadoop.fs.s3a.S3AUtils.objectRepresentsDirectory;
 import static org.apache.hadoop.fs.s3a.S3AUtils.objectRepresentsDirectory;
@@ -76,8 +74,8 @@ public class Listing extends AbstractStoreOperation {
 
 
   private static final Logger LOG = S3AFileSystem.LOG;
   private static final Logger LOG = S3AFileSystem.LOG;
 
 
-  static final FileStatusAcceptor ACCEPT_ALL_BUT_S3N =
-      new AcceptAllButS3nDirs();
+  static final FileStatusAcceptor ACCEPT_ALL_OBJECTS =
+      new AcceptAllObjects();
 
 
   private final ListingOperationCallbacks listingOperationCallbacks;
   private final ListingOperationCallbacks listingOperationCallbacks;
 
 
@@ -116,7 +114,7 @@ public class Listing extends AbstractStoreOperation {
           S3AFileStatus[] fileStatuses) {
           S3AFileStatus[] fileStatuses) {
     return filteringRemoteIterator(
     return filteringRemoteIterator(
         remoteIteratorFromArray(fileStatuses),
         remoteIteratorFromArray(fileStatuses),
-        Listing.ACCEPT_ALL_BUT_S3N::accept);
+        Listing.ACCEPT_ALL_OBJECTS::accept);
   }
   }
 
 
   /**
   /**
@@ -132,7 +130,7 @@ public class Listing extends AbstractStoreOperation {
    * @throws IOException IO Problems
    * @throws IOException IO Problems
    */
    */
   @Retries.RetryRaw
   @Retries.RetryRaw
-  public FileStatusListingIterator createFileStatusListingIterator(
+  public RemoteIterator<S3AFileStatus> createFileStatusListingIterator(
       Path listPath,
       Path listPath,
       S3ListRequest request,
       S3ListRequest request,
       PathFilter filter,
       PathFilter filter,
@@ -212,7 +210,7 @@ public class Listing extends AbstractStoreOperation {
                 .createListObjectsRequest(key,
                 .createListObjectsRequest(key,
                     delimiter,
                     delimiter,
                     span),
                     span),
-            ACCEPT_ALL,
+            S3AUtils.ACCEPT_ALL,
             acceptor,
             acceptor,
             span));
             span));
   }
   }
@@ -235,7 +233,7 @@ public class Listing extends AbstractStoreOperation {
             listingOperationCallbacks
             listingOperationCallbacks
                 .createListObjectsRequest(key, "/", span),
                 .createListObjectsRequest(key, "/", span),
             filter,
             filter,
-            new AcceptAllButSelfAndS3nDirs(dir),
+            new AcceptAllButSelf(dir),
             span));
             span));
   }
   }
 
 
@@ -263,8 +261,8 @@ public class Listing extends AbstractStoreOperation {
     return createFileStatusListingIterator(
     return createFileStatusListingIterator(
         path,
         path,
         request,
         request,
-        ACCEPT_ALL,
-        new AcceptAllButSelfAndS3nDirs(path),
+        S3AUtils.ACCEPT_ALL,
+        new AcceptAllButSelf(path),
         span);
         span);
   }
   }
 
 
@@ -462,7 +460,7 @@ public class Listing extends AbstractStoreOperation {
         if (LOG.isDebugEnabled()) {
         if (LOG.isDebugEnabled()) {
           LOG.debug("{}: {}", keyPath, stringify(s3Object));
           LOG.debug("{}: {}", keyPath, stringify(s3Object));
         }
         }
-        // Skip over keys that are ourselves and old S3N _$folder$ files
+        // Skip over keys that are ourselves
         if (acceptor.accept(keyPath, s3Object) && filter.accept(keyPath)) {
         if (acceptor.accept(keyPath, s3Object) && filter.accept(keyPath)) {
           S3AFileStatus status = createFileStatus(keyPath, s3Object,
           S3AFileStatus status = createFileStatus(keyPath, s3Object,
               blockSize, userName, s3Object.eTag(),
               blockSize, userName, s3Object.eTag(),
@@ -722,13 +720,12 @@ public class Listing extends AbstractStoreOperation {
   }
   }
 
 
   /**
   /**
-   * Accept all entries except the base path and those which map to S3N
-   * pseudo directory markers.
+   * Accept all entries except the base path.
    */
    */
   static class AcceptFilesOnly implements FileStatusAcceptor {
   static class AcceptFilesOnly implements FileStatusAcceptor {
     private final Path qualifiedPath;
     private final Path qualifiedPath;
 
 
-    public AcceptFilesOnly(Path qualifiedPath) {
+    AcceptFilesOnly(Path qualifiedPath) {
       this.qualifiedPath = qualifiedPath;
       this.qualifiedPath = qualifiedPath;
     }
     }
 
 
@@ -743,7 +740,6 @@ public class Listing extends AbstractStoreOperation {
     @Override
     @Override
     public boolean accept(Path keyPath, S3Object s3Object) {
     public boolean accept(Path keyPath, S3Object s3Object) {
       return !keyPath.equals(qualifiedPath)
       return !keyPath.equals(qualifiedPath)
-          && !s3Object.key().endsWith(S3N_FOLDER_SUFFIX)
           && !objectRepresentsDirectory(s3Object.key());
           && !objectRepresentsDirectory(s3Object.key());
     }
     }
 
 
@@ -765,29 +761,28 @@ public class Listing extends AbstractStoreOperation {
   }
   }
 
 
   /**
   /**
-   * Accept all entries except those which map to S3N pseudo directory markers.
+   * Accept all entries.
    */
    */
-  static class AcceptAllButS3nDirs implements FileStatusAcceptor {
+  static class AcceptAllObjects implements FileStatusAcceptor {
 
 
     public boolean accept(Path keyPath, S3Object s3Object) {
     public boolean accept(Path keyPath, S3Object s3Object) {
-      return !s3Object.key().endsWith(S3N_FOLDER_SUFFIX);
+      return true;
     }
     }
 
 
     public boolean accept(Path keyPath, String prefix) {
     public boolean accept(Path keyPath, String prefix) {
-      return !keyPath.toString().endsWith(S3N_FOLDER_SUFFIX);
+      return true;
     }
     }
 
 
     public boolean accept(FileStatus status) {
     public boolean accept(FileStatus status) {
-      return !status.getPath().toString().endsWith(S3N_FOLDER_SUFFIX);
+      return true;
     }
     }
 
 
   }
   }
 
 
   /**
   /**
-   * Accept all entries except the base path and those which map to S3N
-   * pseudo directory markers.
+   * Accept all entries except the base path.
    */
    */
-  public static class AcceptAllButSelfAndS3nDirs implements FileStatusAcceptor {
+  public static class AcceptAllButSelf implements FileStatusAcceptor {
 
 
     /** Base path. */
     /** Base path. */
     private final Path qualifiedPath;
     private final Path qualifiedPath;
@@ -796,13 +791,12 @@ public class Listing extends AbstractStoreOperation {
      * Constructor.
      * Constructor.
      * @param qualifiedPath an already-qualified path.
      * @param qualifiedPath an already-qualified path.
      */
      */
-    public AcceptAllButSelfAndS3nDirs(Path qualifiedPath) {
+    public AcceptAllButSelf(Path qualifiedPath) {
       this.qualifiedPath = qualifiedPath;
       this.qualifiedPath = qualifiedPath;
     }
     }
 
 
     /**
     /**
-     * Reject a s3Object entry if the key path is the qualified Path, or
-     * it ends with {@code "_$folder$"}.
+     * Reject a s3Object entry if the key path is the qualified Path.
      * @param keyPath key path of the entry
      * @param keyPath key path of the entry
      * @param s3Object s3Object entry
      * @param s3Object s3Object entry
      * @return true if the entry is accepted (i.e. that a status entry
      * @return true if the entry is accepted (i.e. that a status entry
@@ -810,8 +804,7 @@ public class Listing extends AbstractStoreOperation {
      */
      */
     @Override
     @Override
     public boolean accept(Path keyPath, S3Object s3Object) {
     public boolean accept(Path keyPath, S3Object s3Object) {
-      return !keyPath.equals(qualifiedPath) &&
-          !s3Object.key().endsWith(S3N_FOLDER_SUFFIX);
+      return !keyPath.equals(qualifiedPath);
     }
     }
 
 
     /**
     /**

+ 20 - 174
hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java

@@ -30,7 +30,6 @@ import java.text.SimpleDateFormat;
 import java.time.Duration;
 import java.time.Duration;
 import java.time.Instant;
 import java.time.Instant;
 import java.util.ArrayList;
 import java.util.ArrayList;
-import java.util.Collection;
 import java.util.Collections;
 import java.util.Collections;
 import java.util.Date;
 import java.util.Date;
 import java.util.EnumSet;
 import java.util.EnumSet;
@@ -130,8 +129,6 @@ import org.apache.hadoop.fs.s3a.impl.S3AFileSystemOperations;
 import org.apache.hadoop.fs.s3a.impl.CSEV1CompatibleS3AFileSystemOperations;
 import org.apache.hadoop.fs.s3a.impl.CSEV1CompatibleS3AFileSystemOperations;
 import org.apache.hadoop.fs.s3a.impl.CSEMaterials;
 import org.apache.hadoop.fs.s3a.impl.CSEMaterials;
 import org.apache.hadoop.fs.s3a.impl.DeleteOperation;
 import org.apache.hadoop.fs.s3a.impl.DeleteOperation;
-import org.apache.hadoop.fs.s3a.impl.DirectoryPolicy;
-import org.apache.hadoop.fs.s3a.impl.DirectoryPolicyImpl;
 import org.apache.hadoop.fs.s3a.impl.GetContentSummaryOperation;
 import org.apache.hadoop.fs.s3a.impl.GetContentSummaryOperation;
 import org.apache.hadoop.fs.s3a.impl.HeaderProcessing;
 import org.apache.hadoop.fs.s3a.impl.HeaderProcessing;
 import org.apache.hadoop.fs.s3a.impl.InternalConstants;
 import org.apache.hadoop.fs.s3a.impl.InternalConstants;
@@ -198,7 +195,6 @@ import org.apache.hadoop.fs.s3a.commit.CommitConstants;
 import org.apache.hadoop.fs.s3a.commit.PutTracker;
 import org.apache.hadoop.fs.s3a.commit.PutTracker;
 import org.apache.hadoop.fs.s3a.commit.MagicCommitIntegration;
 import org.apache.hadoop.fs.s3a.commit.MagicCommitIntegration;
 import org.apache.hadoop.fs.s3a.impl.ChangeTracker;
 import org.apache.hadoop.fs.s3a.impl.ChangeTracker;
-import org.apache.hadoop.fs.s3a.s3guard.S3Guard;
 import org.apache.hadoop.fs.s3a.statistics.BlockOutputStreamStatistics;
 import org.apache.hadoop.fs.s3a.statistics.BlockOutputStreamStatistics;
 import org.apache.hadoop.fs.s3a.statistics.CommitterStatistics;
 import org.apache.hadoop.fs.s3a.statistics.CommitterStatistics;
 import org.apache.hadoop.fs.s3a.statistics.S3AInputStreamStatistics;
 import org.apache.hadoop.fs.s3a.statistics.S3AInputStreamStatistics;
@@ -395,7 +391,6 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities,
   private ChangeDetectionPolicy changeDetectionPolicy;
   private ChangeDetectionPolicy changeDetectionPolicy;
   private final AtomicBoolean closed = new AtomicBoolean(false);
   private final AtomicBoolean closed = new AtomicBoolean(false);
   private volatile boolean isClosed = false;
   private volatile boolean isClosed = false;
-  private Collection<String> allowAuthoritativePaths;
 
 
   /** Delegation token integration; non-empty when DT support is enabled. */
   /** Delegation token integration; non-empty when DT support is enabled. */
   private Optional<S3ADelegationTokens> delegationTokens = Optional.empty();
   private Optional<S3ADelegationTokens> delegationTokens = Optional.empty();
@@ -431,11 +426,6 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities,
    */
    */
   private OpenFileSupport openFileHelper;
   private OpenFileSupport openFileHelper;
 
 
-  /**
-   * Directory policy.
-   */
-  private DirectoryPolicy directoryPolicy;
-
   /**
   /**
    * Context accessors for re-use.
    * Context accessors for re-use.
    */
    */
@@ -771,12 +761,7 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities,
       performanceFlags.makeImmutable();
       performanceFlags.makeImmutable();
 
 
       LOG.debug("{} = {}", FS_S3A_CREATE_PERFORMANCE, performanceCreation);
       LOG.debug("{} = {}", FS_S3A_CREATE_PERFORMANCE, performanceCreation);
-      allowAuthoritativePaths = S3Guard.getAuthoritativePaths(this);
 
 
-      // directory policy, which may look at authoritative paths
-      directoryPolicy = DirectoryPolicyImpl.getDirectoryPolicy(conf,
-          this::allowAuthoritative);
-      LOG.debug("Directory marker retention policy is {}", directoryPolicy);
       pageSize = intOption(getConf(), BULK_DELETE_PAGE_SIZE,
       pageSize = intOption(getConf(), BULK_DELETE_PAGE_SIZE,
           BULK_DELETE_PAGE_SIZE_DEFAULT, 0);
           BULK_DELETE_PAGE_SIZE_DEFAULT, 0);
       checkArgument(pageSize <= InternalConstants.MAX_ENTRIES_TO_DELETE,
       checkArgument(pageSize <= InternalConstants.MAX_ENTRIES_TO_DELETE,
@@ -2010,33 +1995,6 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities,
       return store.uploadPart(request, body, durationTrackerFactory);
       return store.uploadPart(request, body, durationTrackerFactory);
     }
     }
 
 
-    /**
-     * Perform post-write actions.
-     * <p>
-     * This operation MUST be called after any PUT/multipart PUT completes
-     * successfully.
-     * <p>
-     * The actions include calling
-     * {@link #deleteUnnecessaryFakeDirectories(Path)}
-     * if directory markers are not being retained.
-     * @param eTag eTag of the written object
-     * @param versionId S3 object versionId of the written object
-     * @param key key written to
-     * @param length total length of file written
-     * @param putOptions put object options
-     */
-    @Override
-    @Retries.RetryExceptionsSwallowed
-    public void finishedWrite(
-        String key,
-        long length,
-        PutObjectOptions putOptions) {
-      S3AFileSystem.this.finishedWrite(
-          key,
-          length,
-          putOptions);
-
-    }
   }
   }
 
 
   /**
   /**
@@ -2153,8 +2111,7 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities,
    * Retry policy: retrying, translated on the getFileStatus() probe.
    * Retry policy: retrying, translated on the getFileStatus() probe.
    * No data is uploaded to S3 in this call, so no retry issues related to that.
    * No data is uploaded to S3 in this call, so no retry issues related to that.
    * The "performance" flag disables safety checks for the path being a file,
    * The "performance" flag disables safety checks for the path being a file,
-   * parent directory existing, and doesn't attempt to delete
-   * dir markers, irrespective of FS settings.
+   * or parent directory existing.
    * If true, this method call does no IO at all.
    * If true, this method call does no IO at all.
    * @param path the file name to open
    * @param path the file name to open
    * @param progress the progress reporter.
    * @param progress the progress reporter.
@@ -2213,11 +2170,9 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities,
         committerIntegration.createTracker(path, key, outputStreamStatistics);
         committerIntegration.createTracker(path, key, outputStreamStatistics);
     String destKey = putTracker.getDestKey();
     String destKey = putTracker.getDestKey();
 
 
-    // put options are derived from the path and the
-    // option builder.
-    boolean keep = options.isPerformance() || keepDirectoryMarkers(path);
+    // put options are derived from the option builder.
     final PutObjectOptions putOptions =
     final PutObjectOptions putOptions =
-        new PutObjectOptions(keep, null, options.getHeaders());
+        new PutObjectOptions(null, options.getHeaders());
 
 
     validateOutputStreamConfiguration(path, getConf());
     validateOutputStreamConfiguration(path, getConf());
 
 
@@ -2650,8 +2605,8 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities,
           path,
           path,
           true,
           true,
           includeSelf
           includeSelf
-              ? Listing.ACCEPT_ALL_BUT_S3N
-              : new Listing.AcceptAllButSelfAndS3nDirs(path),
+              ? Listing.ACCEPT_ALL_OBJECTS
+              : new Listing.AcceptAllButSelf(path),
           status
           status
       );
       );
     }
     }
@@ -2682,9 +2637,6 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities,
       Path destParent = destCreated.getParent();
       Path destParent = destCreated.getParent();
       if (!sourceRenamed.getParent().equals(destParent)) {
       if (!sourceRenamed.getParent().equals(destParent)) {
         LOG.debug("source & dest parents are different; fix up dir markers");
         LOG.debug("source & dest parents are different; fix up dir markers");
-        if (!keepDirectoryMarkers(destParent)) {
-          deleteUnnecessaryFakeDirectories(destParent);
-        }
         maybeCreateFakeParentDirectory(sourceRenamed);
         maybeCreateFakeParentDirectory(sourceRenamed);
       }
       }
     }
     }
@@ -2699,7 +2651,7 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities,
           listing.createFileStatusListingIterator(path,
           listing.createFileStatusListingIterator(path,
               createListObjectsRequest(key, null),
               createListObjectsRequest(key, null),
               ACCEPT_ALL,
               ACCEPT_ALL,
-              Listing.ACCEPT_ALL_BUT_S3N,
+              Listing.ACCEPT_ALL_OBJECTS,
               auditSpan));
               auditSpan));
     }
     }
 
 
@@ -3359,8 +3311,6 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities,
                       provider.getSize(),
                       provider.getSize(),
                       CONTENT_TYPE_OCTET_STREAM)));
                       CONTENT_TYPE_OCTET_STREAM)));
       incrementPutCompletedStatistics(true, len);
       incrementPutCompletedStatistics(true, len);
-      // apply any post-write actions.
-      finishedWrite(putObjectRequest.key(), len, putOptions);
       return response;
       return response;
     } catch (SdkException e) {
     } catch (SdkException e) {
       incrementPutCompletedStatistics(false, len);
       incrementPutCompletedStatistics(false, len);
@@ -3637,7 +3587,7 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities,
     // is mostly harmless to create a new one.
     // is mostly harmless to create a new one.
     if (!key.isEmpty() && !s3Exists(f, StatusProbeEnum.DIRECTORIES)) {
     if (!key.isEmpty() && !s3Exists(f, StatusProbeEnum.DIRECTORIES)) {
       LOG.debug("Creating new fake directory at {}", f);
       LOG.debug("Creating new fake directory at {}", f);
-      createFakeDirectory(key, putOptionsForPath(f));
+      createFakeDirectory(key, PutObjectOptions.defaultOptions());
     }
     }
   }
   }
 
 
@@ -3729,25 +3679,13 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities,
         return listing.createProvidedFileStatusIterator(
         return listing.createProvidedFileStatusIterator(
                 stats,
                 stats,
                 ACCEPT_ALL,
                 ACCEPT_ALL,
-                Listing.ACCEPT_ALL_BUT_S3N);
+                Listing.ACCEPT_ALL_OBJECTS);
       }
       }
     }
     }
     // Here we have a directory which may or may not be empty.
     // Here we have a directory which may or may not be empty.
     return statusIt;
     return statusIt;
   }
   }
 
 
-  /**
-   * Is a path to be considered as authoritative?
-   * is a  store with the supplied path under
-   * one of the paths declared as authoritative.
-   * @param path path
-   * @return true if the path is auth
-   */
-  public boolean allowAuthoritative(final Path path) {
-    return S3Guard.allowAuthoritative(path, this,
-        allowAuthoritativePaths);
-  }
-
   /**
   /**
    * Create a {@code ListObjectsRequest} request against this bucket,
    * Create a {@code ListObjectsRequest} request against this bucket,
    * with the maximum keys returned in a query set by {@link #maxKeys}.
    * with the maximum keys returned in a query set by {@link #maxKeys}.
@@ -3873,13 +3811,11 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities,
     }
     }
 
 
     @Override
     @Override
-    public void createFakeDirectory(final Path dir, final boolean keepMarkers)
+    public void createFakeDirectory(final Path dir)
         throws IOException {
         throws IOException {
       S3AFileSystem.this.createFakeDirectory(
       S3AFileSystem.this.createFakeDirectory(
           pathToKey(dir),
           pathToKey(dir),
-          keepMarkers
-              ? PutObjectOptions.keepingDirs()
-              : putOptionsForPath(dir));
+          PutObjectOptions.defaultOptions());
     }
     }
   }
   }
 
 
@@ -3927,7 +3863,7 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities,
     @Override
     @Override
     public RemoteIterator<S3ALocatedFileStatus> listFilesIterator(final Path path,
     public RemoteIterator<S3ALocatedFileStatus> listFilesIterator(final Path path,
         final boolean recursive) throws IOException {
         final boolean recursive) throws IOException {
-      return S3AFileSystem.this.innerListFiles(path, recursive, Listing.ACCEPT_ALL_BUT_S3N, null);
+      return S3AFileSystem.this.innerListFiles(path, recursive, Listing.ACCEPT_ALL_OBJECTS, null);
     }
     }
   }
   }
 
 
@@ -4276,7 +4212,8 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities,
             newPutObjectRequestBuilder(key, file.length(), false);
             newPutObjectRequestBuilder(key, file.length(), false);
         final String dest = to.toString();
         final String dest = to.toString();
         S3AFileSystem.this.invoker.retry("putObject(" + dest + ")", dest, true, () ->
         S3AFileSystem.this.invoker.retry("putObject(" + dest + ")", dest, true, () ->
-            executePut(putObjectRequestBuilder.build(), null, putOptionsForPath(to), file));
+            executePut(putObjectRequestBuilder.build(), null,
+                PutObjectOptions.defaultOptions(), file));
         return null;
         return null;
       });
       });
     }
     }
@@ -4319,15 +4256,11 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities,
       final File file)
       final File file)
       throws IOException {
       throws IOException {
     String key = putObjectRequest.key();
     String key = putObjectRequest.key();
-    long len = getPutRequestLength(putObjectRequest);
     ProgressableProgressListener listener =
     ProgressableProgressListener listener =
-        new ProgressableProgressListener(store, putObjectRequest.key(), progress);
+        new ProgressableProgressListener(store, key, progress);
     UploadInfo info = putObject(putObjectRequest, file, listener);
     UploadInfo info = putObject(putObjectRequest, file, listener);
     PutObjectResponse result = store.waitForUploadCompletion(key, info).response();
     PutObjectResponse result = store.waitForUploadCompletion(key, info).response();
     listener.uploadCompleted(info.getFileUpload());
     listener.uploadCompleted(info.getFileUpload());
-
-    // post-write actions
-    finishedWrite(key, len, putOptions);
     return result;
     return result;
   }
   }
 
 
@@ -4579,7 +4512,7 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities,
 
 
   /**
   /**
    * Copy a single object in the bucket via a COPY operation.
    * Copy a single object in the bucket via a COPY operation.
-   * There's no update of metadata, directory markers, etc.
+   * There's no update of metadata, etc.
    * Callers must implement.
    * Callers must implement.
    * @param srcKey source object path
    * @param srcKey source object path
    * @param dstKey destination object path
    * @param dstKey destination object path
@@ -4711,10 +4644,6 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities,
    * <p>
    * <p>
    * This operation MUST be called after any PUT/multipart PUT completes
    * This operation MUST be called after any PUT/multipart PUT completes
    * successfully.
    * successfully.
-   * <p>
-   * The actions include calling
-   * {@link #deleteUnnecessaryFakeDirectories(Path)}
-   * if directory markers are not being retained.
    * @param key key written to
    * @param key key written to
    * @param length total length of file written
    * @param length total length of file written
    * @param putOptions put object options
    * @param putOptions put object options
@@ -4728,70 +4657,6 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities,
     LOG.debug("Finished write to {}, len {}.",
     LOG.debug("Finished write to {}, len {}.",
         key, length);
         key, length);
     Preconditions.checkArgument(length >= 0, "content length is negative");
     Preconditions.checkArgument(length >= 0, "content length is negative");
-    if (!putOptions.isKeepMarkers()) {
-      Path p = keyToQualifiedPath(key);
-      deleteUnnecessaryFakeDirectories(p.getParent());
-    }
-  }
-
-  /**
-   * Should we keep directory markers under the path being created
-   * by mkdir/file creation/rename?
-   * This is done if marker retention is enabled for the path,
-   * or it is under a magic path where we are saving IOPs
-   * knowing that all committers are on the same code version and
-   * therefore marker aware.
-   * @param path path to probe
-   * @return true if the markers MAY be retained,
-   * false if they MUST be deleted
-   */
-  private boolean keepDirectoryMarkers(Path path) {
-    return directoryPolicy.keepDirectoryMarkers(path)
-        || isUnderMagicCommitPath(path);
-  }
-
-  /**
-   * Should we keep directory markers under the path being created
-   * by mkdir/file creation/rename?
-   * See {@link #keepDirectoryMarkers(Path)} for the policy.
-   *
-   * @param path path to probe
-   * @return the options to use with the put request
-   */
-  private PutObjectOptions putOptionsForPath(Path path) {
-    return keepDirectoryMarkers(path)
-        ? PutObjectOptions.keepingDirs()
-        : PutObjectOptions.deletingDirs();
-  }
-
-  /**
-   * Delete mock parent directories which are no longer needed.
-   * Retry policy: retrying; exceptions swallowed.
-   * @param path path
-   *
-   */
-  @Retries.RetryExceptionsSwallowed
-  private void deleteUnnecessaryFakeDirectories(Path path) {
-    List<ObjectIdentifier> keysToRemove = new ArrayList<>();
-    while (!path.isRoot()) {
-      String key = pathToKey(path);
-      key = (key.endsWith("/")) ? key : (key + "/");
-      LOG.trace("To delete unnecessary fake directory {} for {}", key, path);
-      keysToRemove.add(ObjectIdentifier.builder().key(key).build());
-      path = path.getParent();
-    }
-    try {
-      removeKeys(keysToRemove, true);
-    } catch (AwsServiceException | IOException e) {
-      instrumentation.errorIgnored();
-      if (LOG.isDebugEnabled()) {
-        StringBuilder sb = new StringBuilder();
-        for (ObjectIdentifier objectIdentifier : keysToRemove) {
-          sb.append(objectIdentifier.key()).append(",");
-        }
-        LOG.debug("While deleting keys {} ", sb.toString(), e);
-      }
-    }
   }
   }
 
 
   /**
   /**
@@ -4810,8 +4675,6 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities,
 
 
   /**
   /**
    * Used to create an empty file that represents an empty directory.
    * Used to create an empty file that represents an empty directory.
-   * The policy for deleting parent dirs depends on the path, dir
-   * status and the putOptions value.
    * Retry policy: retrying; translated.
    * Retry policy: retrying; translated.
    * @param objectName object to create
    * @param objectName object to create
    * @param putOptions put object options
    * @param putOptions put object options
@@ -4842,14 +4705,6 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities,
     return getConf().getLongBytes(FS_S3A_BLOCK_SIZE, DEFAULT_BLOCKSIZE);
     return getConf().getLongBytes(FS_S3A_BLOCK_SIZE, DEFAULT_BLOCKSIZE);
   }
   }
 
 
-  /**
-   * Get the directory marker policy of this filesystem.
-   * @return the marker policy.
-   */
-  public DirectoryPolicy getDirectoryMarkerPolicy() {
-    return directoryPolicy;
-  }
-
   @Override
   @Override
   public String toString() {
   public String toString() {
     final StringBuilder sb = new StringBuilder(
     final StringBuilder sb = new StringBuilder(
@@ -4879,7 +4734,6 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities,
       sb.append(", blockFactory=").append(blockFactory);
       sb.append(", blockFactory=").append(blockFactory);
     }
     }
     sb.append(", auditManager=").append(auditManager);
     sb.append(", auditManager=").append(auditManager);
-    sb.append(", authoritativePath=").append(allowAuthoritativePaths);
     sb.append(", useListV1=").append(useListV1);
     sb.append(", useListV1=").append(useListV1);
     if (committerIntegration != null) {
     if (committerIntegration != null) {
       sb.append(", magicCommitter=").append(isMagicCommitEnabled());
       sb.append(", magicCommitter=").append(isMagicCommitEnabled());
@@ -4889,7 +4743,6 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities,
     sb.append(", credentials=").append(credentials);
     sb.append(", credentials=").append(credentials);
     sb.append(", delegation tokens=")
     sb.append(", delegation tokens=")
         .append(delegationTokens.map(Objects::toString).orElse("disabled"));
         .append(delegationTokens.map(Objects::toString).orElse("disabled"));
-    sb.append(", ").append(directoryPolicy);
     // if logging at debug, toString returns the entire IOStatistics set.
     // if logging at debug, toString returns the entire IOStatistics set.
     if (getInstrumentation() != null) {
     if (getInstrumentation() != null) {
       sb.append(", instrumentation {")
       sb.append(", instrumentation {")
@@ -5237,7 +5090,7 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities,
     final Path path = qualify(f);
     final Path path = qualify(f);
     return trackDurationAndSpan(INVOCATION_LIST_FILES, path, () ->
     return trackDurationAndSpan(INVOCATION_LIST_FILES, path, () ->
         innerListFiles(path, recursive,
         innerListFiles(path, recursive,
-            Listing.ACCEPT_ALL_BUT_S3N,
+            Listing.ACCEPT_ALL_OBJECTS,
             null));
             null));
   }
   }
 
 
@@ -5549,20 +5402,13 @@ public class S3AFileSystem extends FileSystem implements StreamCapabilities,
     case CommonPathCapabilities.VIRTUAL_BLOCK_LOCATIONS:
     case CommonPathCapabilities.VIRTUAL_BLOCK_LOCATIONS:
       return true;
       return true;
 
 
-       /*
-     * Marker policy capabilities are handed off.
-     */
     case STORE_CAPABILITY_DIRECTORY_MARKER_POLICY_KEEP:
     case STORE_CAPABILITY_DIRECTORY_MARKER_POLICY_KEEP:
-    case STORE_CAPABILITY_DIRECTORY_MARKER_POLICY_DELETE:
-    case STORE_CAPABILITY_DIRECTORY_MARKER_POLICY_AUTHORITATIVE:
-      return getDirectoryMarkerPolicy().hasPathCapability(path, cap);
-
-     // keep for a magic path or if the policy retains it
     case STORE_CAPABILITY_DIRECTORY_MARKER_ACTION_KEEP:
     case STORE_CAPABILITY_DIRECTORY_MARKER_ACTION_KEEP:
-      return keepDirectoryMarkers(path);
-    // delete is the opposite of keep
+      return true;
+    // never true
+    case STORE_CAPABILITY_DIRECTORY_MARKER_POLICY_AUTHORITATIVE:
     case STORE_CAPABILITY_DIRECTORY_MARKER_ACTION_DELETE:
     case STORE_CAPABILITY_DIRECTORY_MARKER_ACTION_DELETE:
-      return !keepDirectoryMarkers(path);
+      return false;
 
 
     case STORE_CAPABILITY_DIRECTORY_MARKER_MULTIPART_UPLOAD_ENABLED:
     case STORE_CAPABILITY_DIRECTORY_MARKER_MULTIPART_UPLOAD_ENABLED:
       return isMultipartUploadEnabled();
       return isMultipartUploadEnabled();

+ 0 - 1
hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java

@@ -724,7 +724,6 @@ public final class S3AUtils {
    */
    */
   public static S3xLoginHelper.Login getAWSAccessKeys(URI name,
   public static S3xLoginHelper.Login getAWSAccessKeys(URI name,
       Configuration conf) throws IOException {
       Configuration conf) throws IOException {
-    S3xLoginHelper.rejectSecretsInURIs(name);
     Configuration c = ProviderUtils.excludeIncompatibleCredentialProviders(
     Configuration c = ProviderUtils.excludeIncompatibleCredentialProviders(
         conf, S3AFileSystem.class);
         conf, S3AFileSystem.class);
     String bucket = name != null ? name.getHost() : "";
     String bucket = name != null ? name.getHost() : "";

+ 2 - 23
hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/WriteOperationHelper.java

@@ -288,9 +288,7 @@ public class WriteOperationHelper implements WriteOperations {
 
 
   /**
   /**
    * Finalize a multipart PUT operation.
    * Finalize a multipart PUT operation.
-   * This completes the upload, and, if that works, calls
-   * {@link WriteOperationHelperCallbacks#finishedWrite(String, long, PutObjectOptions)}
-   * to update the filesystem.
+   * This completes the upload.
    * Retry policy: retrying, translated.
    * Retry policy: retrying, translated.
    * @param destKey destination of the commit
    * @param destKey destination of the commit
    * @param uploadId multipart operation Id
    * @param uploadId multipart operation Id
@@ -324,8 +322,6 @@ public class WriteOperationHelper implements WriteOperations {
                     destKey, uploadId, partETags);
                     destKey, uploadId, partETags);
             return writeOperationHelperCallbacks.completeMultipartUpload(requestBuilder.build());
             return writeOperationHelperCallbacks.completeMultipartUpload(requestBuilder.build());
           });
           });
-      writeOperationHelperCallbacks.finishedWrite(destKey, length,
-          putOptions);
       return uploadResult;
       return uploadResult;
     }
     }
   }
   }
@@ -547,8 +543,6 @@ public class WriteOperationHelper implements WriteOperations {
   /**
   /**
    * This completes a multipart upload to the destination key via
    * This completes a multipart upload to the destination key via
    * {@code finalizeMultipartUpload()}.
    * {@code finalizeMultipartUpload()}.
-   * Markers are never deleted on commit; this avoids having to
-   * issue many duplicate deletions.
    * Retry policy: retrying, translated.
    * Retry policy: retrying, translated.
    * Retries increment the {@code errorCount} counter.
    * Retries increment the {@code errorCount} counter.
    * @param destKey destination
    * @param destKey destination
@@ -574,7 +568,7 @@ public class WriteOperationHelper implements WriteOperations {
         uploadId,
         uploadId,
         partETags,
         partETags,
         length,
         length,
-        PutObjectOptions.keepingDirs(),
+        PutObjectOptions.defaultOptions(),
         Invoker.NO_OP);
         Invoker.NO_OP);
   }
   }
 
 
@@ -670,21 +664,6 @@ public class WriteOperationHelper implements WriteOperations {
         RequestBody body,
         RequestBody body,
         DurationTrackerFactory durationTrackerFactory)
         DurationTrackerFactory durationTrackerFactory)
         throws AwsServiceException, UncheckedIOException;
         throws AwsServiceException, UncheckedIOException;
-
-    /**
-     * Perform post-write actions.
-     * <p>
-     * This operation MUST be called after any PUT/multipart PUT completes
-     * successfully.
-     * @param key key written to
-     * @param length total length of file written
-     * @param putOptions put object options
-     */
-    @Retries.RetryExceptionsSwallowed
-    void finishedWrite(
-        String key,
-        long length,
-        PutObjectOptions putOptions);
   }
   }
 
 
 }
 }

+ 0 - 4
hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/CredentialProviderListFactory.java

@@ -47,7 +47,6 @@ import org.apache.hadoop.fs.s3a.SimpleAWSCredentialsProvider;
 import org.apache.hadoop.fs.s3a.TemporaryAWSCredentialsProvider;
 import org.apache.hadoop.fs.s3a.TemporaryAWSCredentialsProvider;
 import org.apache.hadoop.fs.s3a.adapter.AwsV1BindingSupport;
 import org.apache.hadoop.fs.s3a.adapter.AwsV1BindingSupport;
 import org.apache.hadoop.fs.s3a.impl.InstantiationIOException;
 import org.apache.hadoop.fs.s3a.impl.InstantiationIOException;
-import org.apache.hadoop.fs.s3native.S3xLoginHelper;
 import org.apache.hadoop.fs.store.LogExactlyOnce;
 import org.apache.hadoop.fs.store.LogExactlyOnce;
 
 
 import static org.apache.hadoop.fs.s3a.Constants.AWS_CREDENTIALS_PROVIDER;
 import static org.apache.hadoop.fs.s3a.Constants.AWS_CREDENTIALS_PROVIDER;
@@ -139,15 +138,12 @@ public final class CredentialProviderListFactory {
   public static AWSCredentialProviderList createAWSCredentialProviderList(
   public static AWSCredentialProviderList createAWSCredentialProviderList(
       @Nullable URI binding,
       @Nullable URI binding,
       Configuration conf) throws IOException {
       Configuration conf) throws IOException {
-    // this will reject any user:secret entries in the URI
-    S3xLoginHelper.rejectSecretsInURIs(binding);
     AWSCredentialProviderList credentials =
     AWSCredentialProviderList credentials =
         buildAWSProviderList(binding,
         buildAWSProviderList(binding,
             conf,
             conf,
             AWS_CREDENTIALS_PROVIDER,
             AWS_CREDENTIALS_PROVIDER,
             STANDARD_AWS_PROVIDERS,
             STANDARD_AWS_PROVIDERS,
             new HashSet<>());
             new HashSet<>());
-    // make sure the logging message strips out any auth details
     LOG.debug("For URI {}, using credentials {}",
     LOG.debug("For URI {}, using credentials {}",
         binding, credentials);
         binding, credentials);
     return credentials;
     return credentials;

+ 1 - 1
hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/impl/CommitOperations.java

@@ -547,7 +547,7 @@ public class CommitOperations extends AbstractStoreOperation
 
 
       statistics.commitCreated();
       statistics.commitCreated();
       uploadId = writeOperations.initiateMultiPartUpload(destKey,
       uploadId = writeOperations.initiateMultiPartUpload(destKey,
-          PutObjectOptions.keepingDirs());
+          PutObjectOptions.defaultOptions());
       long length = localFile.length();
       long length = localFile.length();
 
 
       SinglePendingCommit commitData = new SinglePendingCommit();
       SinglePendingCommit commitData = new SinglePendingCommit();

+ 2 - 2
hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/commit/magic/S3MagicCommitTracker.java

@@ -79,7 +79,7 @@ public class S3MagicCommitTracker extends MagicCommitTracker {
     PutObjectRequest originalDestPut = getWriter().createPutObjectRequest(
     PutObjectRequest originalDestPut = getWriter().createPutObjectRequest(
         getOriginalDestKey(),
         getOriginalDestKey(),
         0,
         0,
-        new PutObjectOptions(true, null, headers));
+        new PutObjectOptions(null, headers));
     upload(originalDestPut, EMPTY);
     upload(originalDestPut, EMPTY);
 
 
     // build the commit summary
     // build the commit summary
@@ -117,7 +117,7 @@ public class S3MagicCommitTracker extends MagicCommitTracker {
   @Retries.RetryTranslated
   @Retries.RetryTranslated
   private void upload(PutObjectRequest request, byte[] bytes) throws IOException {
   private void upload(PutObjectRequest request, byte[] bytes) throws IOException {
     trackDurationOfInvocation(getTrackerStatistics(), COMMITTER_MAGIC_MARKER_PUT.getSymbol(),
     trackDurationOfInvocation(getTrackerStatistics(), COMMITTER_MAGIC_MARKER_PUT.getSymbol(),
-        () -> getWriter().putObject(request, PutObjectOptions.keepingDirs(),
+        () -> getWriter().putObject(request, PutObjectOptions.defaultOptions(),
             new S3ADataBlocks.BlockUploadData(bytes, null), null));
             new S3ADataBlocks.BlockUploadData(bytes, null), null));
   }
   }
 }
 }

+ 0 - 26
hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/DirMarkerTracker.java

@@ -19,7 +19,6 @@
 package org.apache.hadoop.fs.s3a.impl;
 package org.apache.hadoop.fs.s3a.impl;
 
 
 import java.util.ArrayList;
 import java.util.ArrayList;
-import java.util.Iterator;
 import java.util.List;
 import java.util.List;
 import java.util.Map;
 import java.util.Map;
 import java.util.TreeMap;
 import java.util.TreeMap;
@@ -267,31 +266,6 @@ public class DirMarkerTracker {
         '}';
         '}';
   }
   }
 
 
-  /**
-   * Scan the surplus marker list and remove from it all where the directory
-   * policy says "keep". This is useful when auditing
-   * @param policy policy to use when auditing markers for
-   * inclusion/exclusion.
-   * @return list of markers stripped
-   */
-  public List<Path> removeAllowedMarkers(DirectoryPolicy policy) {
-    List<Path> removed = new ArrayList<>();
-    Iterator<Map.Entry<Path, Marker>> entries =
-        surplusMarkers.entrySet().iterator();
-    while (entries.hasNext()) {
-      Map.Entry<Path, Marker> entry = entries.next();
-      Path path = entry.getKey();
-      if (policy.keepDirectoryMarkers(path)) {
-        // there's a match
-        // remove it from the map.
-        entries.remove();
-        LOG.debug("Removing {}", entry.getValue());
-        removed.add(path);
-      }
-    }
-    return removed;
-  }
-
   /**
   /**
    * This is a marker entry stored in the map and
    * This is a marker entry stored in the map and
    * returned as markers are deleted.
    * returned as markers are deleted.

+ 0 - 110
hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/DirectoryPolicy.java

@@ -1,110 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.s3a.impl;
-
-import org.apache.hadoop.fs.Path;
-
-import static org.apache.hadoop.fs.s3a.Constants.DIRECTORY_MARKER_POLICY_AUTHORITATIVE;
-import static org.apache.hadoop.fs.s3a.Constants.DIRECTORY_MARKER_POLICY_DELETE;
-import static org.apache.hadoop.fs.s3a.Constants.DIRECTORY_MARKER_POLICY_KEEP;
-
-/**
- * Interface for Directory Marker policies to implement.
- */
-
-public interface DirectoryPolicy {
-
-
-
-  /**
-   * Should a directory marker be retained?
-   * @param path path a file/directory is being created with.
-   * @return true if the marker MAY be kept, false if it MUST be deleted.
-   */
-  boolean keepDirectoryMarkers(Path path);
-
-  /**
-   * Get the marker policy.
-   * @return policy.
-   */
-  MarkerPolicy getMarkerPolicy();
-
-  /**
-   * Describe the policy for marker tools and logs.
-   * @return description of the current policy.
-   */
-  String describe();
-
-  /**
-   * Does a specific path have the relevant option.
-   * This is to be forwarded from the S3AFileSystem.hasPathCapability
-   * But only for those capabilities related to markers*
-   * @param path path
-   * @param capability capability
-   * @return true if the capability is supported, false if not
-   * @throws IllegalArgumentException if the capability is unknown.
-   */
-  boolean hasPathCapability(Path path, String capability);
-
-  /**
-   * Supported retention policies.
-   */
-  enum MarkerPolicy {
-
-    /**
-     * Delete markers.
-     * <p>
-     * This is the classic S3A policy,
-     */
-    Delete(DIRECTORY_MARKER_POLICY_DELETE),
-
-    /**
-     * Keep markers.
-     * <p>
-     * This is <i>Not backwards compatible</i>.
-     */
-    Keep(DIRECTORY_MARKER_POLICY_KEEP),
-
-    /**
-     * Keep markers in authoritative paths only.
-     * <p>
-     * This is <i>Not backwards compatible</i> within the
-     * auth paths, but is outside these.
-     */
-    Authoritative(DIRECTORY_MARKER_POLICY_AUTHORITATIVE);
-
-    /**
-     * The name of the option as allowed in configuration files
-     * and marker-aware tooling.
-     */
-    private final String optionName;
-
-    MarkerPolicy(final String optionName) {
-      this.optionName = optionName;
-    }
-
-    /**
-     * Get the option name.
-     * @return name of the option
-     */
-    public String getOptionName() {
-      return optionName;
-    }
-  }
-}

+ 0 - 212
hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/DirectoryPolicyImpl.java

@@ -1,212 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.s3a.impl;
-
-
-import java.util.EnumSet;
-import java.util.Locale;
-import java.util.Set;
-import java.util.function.Predicate;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-
-import static org.apache.hadoop.fs.s3a.Constants.DEFAULT_DIRECTORY_MARKER_POLICY;
-import static org.apache.hadoop.fs.s3a.Constants.DIRECTORY_MARKER_POLICY;
-import static org.apache.hadoop.fs.s3a.Constants.DIRECTORY_MARKER_POLICY_AUTHORITATIVE;
-import static org.apache.hadoop.fs.s3a.Constants.DIRECTORY_MARKER_POLICY_DELETE;
-import static org.apache.hadoop.fs.s3a.Constants.DIRECTORY_MARKER_POLICY_KEEP;
-import static org.apache.hadoop.fs.s3a.Constants.STORE_CAPABILITY_DIRECTORY_MARKER_AWARE;
-import static org.apache.hadoop.fs.s3a.Constants.STORE_CAPABILITY_DIRECTORY_MARKER_ACTION_DELETE;
-import static org.apache.hadoop.fs.s3a.Constants.STORE_CAPABILITY_DIRECTORY_MARKER_ACTION_KEEP;
-import static org.apache.hadoop.fs.s3a.Constants.STORE_CAPABILITY_DIRECTORY_MARKER_POLICY_AUTHORITATIVE;
-import static org.apache.hadoop.fs.s3a.Constants.STORE_CAPABILITY_DIRECTORY_MARKER_POLICY_DELETE;
-import static org.apache.hadoop.fs.s3a.Constants.STORE_CAPABILITY_DIRECTORY_MARKER_POLICY_KEEP;
-
-/**
- * Implementation of directory policy.
- */
-public final class DirectoryPolicyImpl
-    implements DirectoryPolicy {
-
-  private static final Logger LOG = LoggerFactory.getLogger(
-      DirectoryPolicyImpl.class);
-
-  /**
-   * Error string when unable to parse the marker policy option.
-   */
-  public static final String UNKNOWN_MARKER_POLICY =
-      "Unknown policy in "
-      + DIRECTORY_MARKER_POLICY + ": ";
-
-  /**
-   * All available policies.
-   */
-  private static final Set<MarkerPolicy> AVAILABLE_POLICIES =
-      EnumSet.allOf(MarkerPolicy.class);
-
-  /**
-   * Keep all markers.
-   */
-  public static final DirectoryPolicy KEEP = new DirectoryPolicyImpl(
-      MarkerPolicy.Keep, (p) -> false);
-
-  /**
-   * Delete all markers.
-   */
-  public static final DirectoryPolicy DELETE = new DirectoryPolicyImpl(
-      MarkerPolicy.Delete, (p) -> false);
-
-  /**
-   * Chosen marker policy.
-   */
-  private final MarkerPolicy markerPolicy;
-
-  /**
-   * Callback to evaluate authoritativeness of a
-   * path.
-   */
-  private final Predicate<Path> authoritativeness;
-
-  /**
-   * Constructor.
-   * @param markerPolicy marker policy
-   * @param authoritativeness function for authoritativeness
-   */
-  public DirectoryPolicyImpl(final MarkerPolicy markerPolicy,
-      final Predicate<Path> authoritativeness) {
-    this.markerPolicy = markerPolicy;
-    this.authoritativeness = authoritativeness;
-  }
-
-  @Override
-  public boolean keepDirectoryMarkers(final Path path) {
-    switch (markerPolicy) {
-    case Keep:
-      return true;
-    case Authoritative:
-      return authoritativeness.test(path);
-    case Delete:
-    default:   // which cannot happen
-      return false;
-    }
-  }
-
-  @Override
-  public MarkerPolicy getMarkerPolicy() {
-    return markerPolicy;
-  }
-
-  @Override
-  public String describe() {
-    return markerPolicy.getOptionName();
-  }
-
-  @Override
-  public String toString() {
-    final StringBuilder sb = new StringBuilder(
-        "DirectoryMarkerRetention{");
-    sb.append("policy='").append(markerPolicy.getOptionName()).append('\'');
-    sb.append('}');
-    return sb.toString();
-  }
-
-  /**
-   * Return path policy for store and paths.
-   * @param path path
-   * @param capability capability
-   * @return true if a capability is active
-   */
-  @Override
-  public boolean hasPathCapability(final Path path, final String capability) {
-
-    switch (capability) {
-    /*
-     * Marker policy is dynamically determined for the given path.
-     */
-    case STORE_CAPABILITY_DIRECTORY_MARKER_AWARE:
-      return true;
-
-    case STORE_CAPABILITY_DIRECTORY_MARKER_POLICY_KEEP:
-      return markerPolicy == MarkerPolicy.Keep;
-
-    case STORE_CAPABILITY_DIRECTORY_MARKER_POLICY_DELETE:
-      return markerPolicy == MarkerPolicy.Delete;
-
-    case STORE_CAPABILITY_DIRECTORY_MARKER_POLICY_AUTHORITATIVE:
-      return markerPolicy == MarkerPolicy.Authoritative;
-
-    case STORE_CAPABILITY_DIRECTORY_MARKER_ACTION_KEEP:
-      return keepDirectoryMarkers(path);
-
-    case STORE_CAPABILITY_DIRECTORY_MARKER_ACTION_DELETE:
-      return !keepDirectoryMarkers(path);
-
-    default:
-      throw new IllegalArgumentException("Unknown capability " + capability);
-    }
-  }
-
-  /**
-   * Create/Get the policy for this configuration.
-   * @param conf config
-   * @param authoritativeness Callback to evaluate authoritativeness of a
-   * path.
-   * @return a policy
-   */
-  public static DirectoryPolicy getDirectoryPolicy(
-      final Configuration conf,
-      final Predicate<Path> authoritativeness) {
-    DirectoryPolicy policy;
-    String option = conf.getTrimmed(DIRECTORY_MARKER_POLICY,
-        DEFAULT_DIRECTORY_MARKER_POLICY);
-    switch (option.toLowerCase(Locale.ENGLISH)) {
-    case DIRECTORY_MARKER_POLICY_DELETE:
-      // backwards compatible.
-      LOG.debug("Directory markers will be deleted");
-      policy = DELETE;
-      break;
-    case DIRECTORY_MARKER_POLICY_KEEP:
-      LOG.debug("Directory markers will be kept");
-      policy = KEEP;
-      break;
-    case DIRECTORY_MARKER_POLICY_AUTHORITATIVE:
-      LOG.debug("Directory markers will be kept on authoritative"
-          + " paths");
-      policy = new DirectoryPolicyImpl(MarkerPolicy.Authoritative,
-          authoritativeness);
-      break;
-    default:
-      throw new IllegalArgumentException(UNKNOWN_MARKER_POLICY + option);
-    }
-    return policy;
-  }
-
-  /**
-   * Enumerate all available policies.
-   * @return set of the policies.
-   */
-  public static Set<MarkerPolicy> availablePolicies() {
-    return AVAILABLE_POLICIES;
-  }
-
-}

+ 4 - 11
hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/MkdirOperation.java

@@ -145,8 +145,7 @@ public class MkdirOperation extends ExecutingStoreOperation<Boolean> {
     // If so, we declare success without looking any further
     // If so, we declare success without looking any further
     if (isMagicPath) {
     if (isMagicPath) {
       // Create the marker file immediately,
       // Create the marker file immediately,
-      // and don't delete markers
-      callbacks.createFakeDirectory(dir, true);
+      callbacks.createFakeDirectory(dir);
       return true;
       return true;
     }
     }
 
 
@@ -159,9 +158,8 @@ public class MkdirOperation extends ExecutingStoreOperation<Boolean> {
     // if we get here there is no directory at the destination.
     // if we get here there is no directory at the destination.
     // so create one.
     // so create one.
 
 
-    // Create the marker file, delete the parent entries
-    // if the filesystem isn't configured to retain them
-    callbacks.createFakeDirectory(dir, false);
+    // Create the directory marker file
+    callbacks.createFakeDirectory(dir);
     return true;
     return true;
   }
   }
 
 
@@ -264,15 +262,10 @@ public class MkdirOperation extends ExecutingStoreOperation<Boolean> {
     /**
     /**
      * Create a fake directory, always ending in "/".
      * Create a fake directory, always ending in "/".
      * Retry policy: retrying; translated.
      * Retry policy: retrying; translated.
-     * the keepMarkers flag controls whether or not markers
-     * are automatically kept (this is set when creating
-     * directories under a magic path, always)
      * @param dir dir to create
      * @param dir dir to create
-     * @param keepMarkers always keep markers
-     *
      * @throws IOException IO failure
      * @throws IOException IO failure
      */
      */
     @Retries.RetryTranslated
     @Retries.RetryTranslated
-    void createFakeDirectory(Path dir, boolean keepMarkers) throws IOException;
+    void createFakeDirectory(Path dir) throws IOException;
   }
   }
 }
 }

+ 7 - 31
hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/PutObjectOptions.java

@@ -26,11 +26,6 @@ import javax.annotation.Nullable;
  */
  */
 public final class PutObjectOptions {
 public final class PutObjectOptions {
 
 
-  /**
-   * Can the PUT operation skip marker deletion?
-   */
-  private final boolean keepMarkers;
-
   /**
   /**
    * Storage class, if not null.
    * Storage class, if not null.
    */
    */
@@ -43,27 +38,16 @@ public final class PutObjectOptions {
 
 
   /**
   /**
    * Constructor.
    * Constructor.
-   * @param keepMarkers Can the PUT operation skip marker deletion?
    * @param storageClass Storage class, if not null.
    * @param storageClass Storage class, if not null.
    * @param headers Headers; may be null.
    * @param headers Headers; may be null.
    */
    */
   public PutObjectOptions(
   public PutObjectOptions(
-      final boolean keepMarkers,
       @Nullable final String storageClass,
       @Nullable final String storageClass,
       @Nullable final Map<String, String> headers) {
       @Nullable final Map<String, String> headers) {
-    this.keepMarkers = keepMarkers;
     this.storageClass = storageClass;
     this.storageClass = storageClass;
     this.headers = headers;
     this.headers = headers;
   }
   }
 
 
-  /**
-   * Get the marker retention flag.
-   * @return true if markers are to be retained.
-   */
-  public boolean isKeepMarkers() {
-    return keepMarkers;
-  }
-
   /**
   /**
    * Headers for the put/post request.
    * Headers for the put/post request.
    * @return headers or null.
    * @return headers or null.
@@ -75,30 +59,22 @@ public final class PutObjectOptions {
   @Override
   @Override
   public String toString() {
   public String toString() {
     return "PutObjectOptions{" +
     return "PutObjectOptions{" +
-        "keepMarkers=" + keepMarkers +
         ", storageClass='" + storageClass + '\'' +
         ", storageClass='" + storageClass + '\'' +
         '}';
         '}';
   }
   }
 
 
-  private static final PutObjectOptions KEEP_DIRS = new PutObjectOptions(true,
-      null, null);
-  private static final PutObjectOptions DELETE_DIRS = new PutObjectOptions(false,
-      null, null);
-
   /**
   /**
-   * Get the options to keep directories.
-   * @return an instance which keeps dirs
+   * Empty options.
    */
    */
-  public static PutObjectOptions keepingDirs() {
-    return KEEP_DIRS;
-  }
+  private static final PutObjectOptions EMPTY_OPTIONS = new PutObjectOptions(
+      null, null);
 
 
   /**
   /**
-   * Get the options to delete directory markers.
-   * @return an instance which deletes dirs
+   * Get the default options.
+   * @return an instance with no storage class or headers.
    */
    */
-  public static PutObjectOptions deletingDirs() {
-    return DELETE_DIRS;
+  public static PutObjectOptions defaultOptions() {
+    return EMPTY_OPTIONS;
   }
   }
 
 
 }
 }

+ 1 - 1
hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/S3AMultipartUploader.java

@@ -128,7 +128,7 @@ class S3AMultipartUploader extends AbstractMultipartUploader {
     return context.submit(new CompletableFuture<>(),
     return context.submit(new CompletableFuture<>(),
         trackDurationOfCallable(statistics, OBJECT_MULTIPART_UPLOAD_INITIATED.getSymbol(), () -> {
         trackDurationOfCallable(statistics, OBJECT_MULTIPART_UPLOAD_INITIATED.getSymbol(), () -> {
           String uploadId = writeOperations.initiateMultiPartUpload(key,
           String uploadId = writeOperations.initiateMultiPartUpload(key,
-              PutObjectOptions.keepingDirs());
+              PutObjectOptions.defaultOptions());
           statistics.uploadStarted();
           statistics.uploadStarted();
           return BBUploadHandle.from(ByteBuffer.wrap(
           return BBUploadHandle.from(ByteBuffer.wrap(
               uploadId.getBytes(StandardCharsets.UTF_8)));
               uploadId.getBytes(StandardCharsets.UTF_8)));

+ 0 - 6
hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3Guard.java

@@ -121,12 +121,6 @@ public final class S3Guard {
     return true;
     return true;
   }
   }
 
 
-  public static Collection<String> getAuthoritativePaths(S3AFileSystem fs) {
-    return getAuthoritativePaths(
-        fs.getUri(),
-        fs.getConf(),
-        p -> fs.maybeAddTrailingSlash(fs.qualify(p).toString()));
-  }
 
 
   /**
   /**
    * Get the authoritative paths of a filesystem.
    * Get the authoritative paths of a filesystem.

+ 19 - 48
hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java

@@ -25,15 +25,13 @@ import java.io.PrintStream;
 import java.net.URI;
 import java.net.URI;
 import java.net.URISyntaxException;
 import java.net.URISyntaxException;
 import java.util.Arrays;
 import java.util.Arrays;
-import java.util.Collection;
 import java.util.Date;
 import java.util.Date;
 import java.util.List;
 import java.util.List;
+import java.util.Locale;
 import java.util.Scanner;
 import java.util.Scanner;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeUnit;
-import java.util.stream.Collectors;
 
 
 import software.amazon.awssdk.services.s3.model.MultipartUpload;
 import software.amazon.awssdk.services.s3.model.MultipartUpload;
-
 import org.slf4j.Logger;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.LoggerFactory;
 
 
@@ -55,8 +53,6 @@ import org.apache.hadoop.fs.s3a.auth.RolePolicies;
 import org.apache.hadoop.fs.s3a.auth.delegation.S3ADelegationTokens;
 import org.apache.hadoop.fs.s3a.auth.delegation.S3ADelegationTokens;
 import org.apache.hadoop.fs.s3a.commit.CommitConstants;
 import org.apache.hadoop.fs.s3a.commit.CommitConstants;
 import org.apache.hadoop.fs.s3a.commit.InternalCommitterConstants;
 import org.apache.hadoop.fs.s3a.commit.InternalCommitterConstants;
-import org.apache.hadoop.fs.s3a.impl.DirectoryPolicy;
-import org.apache.hadoop.fs.s3a.impl.DirectoryPolicyImpl;
 import org.apache.hadoop.fs.s3a.select.SelectConstants;
 import org.apache.hadoop.fs.s3a.select.SelectConstants;
 import org.apache.hadoop.fs.s3a.tools.BucketTool;
 import org.apache.hadoop.fs.s3a.tools.BucketTool;
 import org.apache.hadoop.fs.s3a.tools.MarkerTool;
 import org.apache.hadoop.fs.s3a.tools.MarkerTool;
@@ -389,8 +385,7 @@ public abstract class S3GuardTool extends Configured implements Tool,
 
 
     @VisibleForTesting
     @VisibleForTesting
     public static final String IS_MARKER_AWARE =
     public static final String IS_MARKER_AWARE =
-        "\tThe S3A connector is compatible with buckets where"
-            + " directory markers are not deleted";
+        "\tThe S3A connector does not delete markers";
 
 
     public static final String CAPABILITY_FORMAT = "\t%s %s%n";
     public static final String CAPABILITY_FORMAT = "\t%s %s%n";
 
 
@@ -446,16 +441,6 @@ public abstract class S3GuardTool extends Configured implements Tool,
         fs.listXAttrs(new Path("/"));
         fs.listXAttrs(new Path("/"));
       }
       }
 
 
-      // print any auth paths for directory marker info
-      final Collection<String> authoritativePaths
-          = S3Guard.getAuthoritativePaths(fs);
-      if (!authoritativePaths.isEmpty()) {
-        println(out, "Qualified Authoritative Paths:");
-        for (String path : authoritativePaths) {
-          println(out, "\t%s", path);
-        }
-        println(out, "");
-      }
       println(out, "%nS3A Client");
       println(out, "%nS3A Client");
       printOption(out, "\tSigning Algorithm", SIGNING_ALGORITHM, "(unset)");
       printOption(out, "\tSigning Algorithm", SIGNING_ALGORITHM, "(unset)");
       String endpoint = conf.getTrimmed(ENDPOINT, "");
       String endpoint = conf.getTrimmed(ENDPOINT, "");
@@ -556,7 +541,7 @@ public abstract class S3GuardTool extends Configured implements Tool,
       }
       }
 
 
       // directory markers
       // directory markers
-      processMarkerOption(out, fs,
+      processMarkerOption(out,
           getCommandFormat().getOptValue(MARKERS_FLAG));
           getCommandFormat().getOptValue(MARKERS_FLAG));
 
 
       // and check for capabilities
       // and check for capabilities
@@ -583,43 +568,29 @@ public abstract class S3GuardTool extends Configured implements Tool,
     /**
     /**
      * Validate the marker options.
      * Validate the marker options.
      * @param out output stream
      * @param out output stream
-     * @param fs filesystem
      * @param marker desired marker option -may be null.
      * @param marker desired marker option -may be null.
      */
      */
     private void processMarkerOption(final PrintStream out,
     private void processMarkerOption(final PrintStream out,
-        final S3AFileSystem fs,
         final String marker) {
         final String marker) {
-      println(out, "%nDirectory Markers");
-      DirectoryPolicy markerPolicy = fs.getDirectoryMarkerPolicy();
-      String desc = markerPolicy.describe();
-      println(out, "\tThe directory marker policy is \"%s\"", desc);
-
-      String pols = DirectoryPolicyImpl.availablePolicies()
-          .stream()
-          .map(DirectoryPolicy.MarkerPolicy::getOptionName)
-          .collect(Collectors.joining(", "));
-      println(out, "\tAvailable Policies: %s", pols);
-      printOption(out, "\tAuthoritative paths",
-          AUTHORITATIVE_PATH, "");
-      DirectoryPolicy.MarkerPolicy mp = markerPolicy.getMarkerPolicy();
+      println(out, "%nThis version of Hadoop always retains directory markers");
+
 
 
       String desiredMarker = marker == null
       String desiredMarker = marker == null
           ? ""
           ? ""
-          : marker.trim();
-      final String optionName = mp.getOptionName();
-      if (!desiredMarker.isEmpty()) {
-        if (MARKERS_AWARE.equalsIgnoreCase(desiredMarker)) {
-          // simple awareness test -provides a way to validate compatibility
-          // on the command line
-          println(out, IS_MARKER_AWARE);
-        } else {
-          // compare with current policy
-          if (!optionName.equalsIgnoreCase(desiredMarker)) {
-            throw badState("Bucket %s: required marker policy is \"%s\""
-                    + " but actual policy is \"%s\"",
-                fs.getUri(), desiredMarker, optionName);
-          }
-        }
+          : marker.trim().toLowerCase(Locale.ROOT);
+      switch(desiredMarker) {
+      case "":
+      case DIRECTORY_MARKER_POLICY_KEEP:
+        break;
+
+      case MARKERS_AWARE:
+        // simple awareness test -provides a way to validate compatibility
+        // on the command line
+        println(out, IS_MARKER_AWARE);
+        break;
+
+      default:
+        throw badState("Unsupported Marker Policy \"%s\"", desiredMarker);
       }
       }
     }
     }
 
 

+ 6 - 87
hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/tools/MarkerTool.java

@@ -53,8 +53,6 @@ import org.apache.hadoop.fs.s3a.S3AFileSystem;
 import org.apache.hadoop.fs.s3a.S3ALocatedFileStatus;
 import org.apache.hadoop.fs.s3a.S3ALocatedFileStatus;
 import org.apache.hadoop.fs.s3a.UnknownStoreException;
 import org.apache.hadoop.fs.s3a.UnknownStoreException;
 import org.apache.hadoop.fs.s3a.impl.DirMarkerTracker;
 import org.apache.hadoop.fs.s3a.impl.DirMarkerTracker;
-import org.apache.hadoop.fs.s3a.impl.DirectoryPolicy;
-import org.apache.hadoop.fs.s3a.impl.DirectoryPolicyImpl;
 import org.apache.hadoop.fs.s3a.impl.MultiObjectDeleteException;
 import org.apache.hadoop.fs.s3a.impl.MultiObjectDeleteException;
 import org.apache.hadoop.fs.s3a.impl.StoreContext;
 import org.apache.hadoop.fs.s3a.impl.StoreContext;
 import org.apache.hadoop.fs.s3a.s3guard.S3GuardTool;
 import org.apache.hadoop.fs.s3a.s3guard.S3GuardTool;
@@ -62,8 +60,6 @@ import org.apache.hadoop.fs.shell.CommandFormat;
 import org.apache.hadoop.util.DurationInfo;
 import org.apache.hadoop.util.DurationInfo;
 import org.apache.hadoop.util.ExitUtil;
 import org.apache.hadoop.util.ExitUtil;
 
 
-
-import static org.apache.hadoop.fs.s3a.Constants.AUTHORITATIVE_PATH;
 import static org.apache.hadoop.fs.s3a.Constants.BULK_DELETE_PAGE_SIZE;
 import static org.apache.hadoop.fs.s3a.Constants.BULK_DELETE_PAGE_SIZE;
 import static org.apache.hadoop.fs.s3a.Constants.BULK_DELETE_PAGE_SIZE_DEFAULT;
 import static org.apache.hadoop.fs.s3a.Constants.BULK_DELETE_PAGE_SIZE_DEFAULT;
 import static org.apache.hadoop.fs.s3a.Invoker.once;
 import static org.apache.hadoop.fs.s3a.Invoker.once;
@@ -175,7 +171,6 @@ public final class MarkerTool extends S3GuardTool {
       + " [-" + OPT_MAX + " <count>]"
       + " [-" + OPT_MAX + " <count>]"
       + " [-" + OPT_OUT + " <filename>]"
       + " [-" + OPT_OUT + " <filename>]"
       + " [-" + OPT_LIMIT + " <limit>]"
       + " [-" + OPT_LIMIT + " <limit>]"
-      + " [-" + OPT_NONAUTH + "]"
       + " [-" + VERBOSE + "]"
       + " [-" + VERBOSE + "]"
 
 
       + " <PATH>\n"
       + " <PATH>\n"
@@ -207,8 +202,7 @@ public final class MarkerTool extends S3GuardTool {
     super(conf,
     super(conf,
         OPT_AUDIT,
         OPT_AUDIT,
         OPT_CLEAN,
         OPT_CLEAN,
-        VERBOSE,
-        OPT_NONAUTH);
+        VERBOSE);
     CommandFormat format = getCommandFormat();
     CommandFormat format = getCommandFormat();
     format.addOptionWithValue(OPT_MIN);
     format.addOptionWithValue(OPT_MIN);
     format.addOptionWithValue(OPT_MAX);
     format.addOptionWithValue(OPT_MAX);
@@ -275,7 +269,6 @@ public final class MarkerTool extends S3GuardTool {
       path = new Path(path, "/");
       path = new Path(path, "/");
     }
     }
     FileSystem fs = path.getFileSystem(getConf());
     FileSystem fs = path.getFileSystem(getConf());
-    boolean nonAuth = command.getOpt(OPT_NONAUTH);
     ScanResult result;
     ScanResult result;
     try {
     try {
       result = execute(
       result = execute(
@@ -286,7 +279,6 @@ public final class MarkerTool extends S3GuardTool {
                       .withMinMarkerCount(expectedMin)
                       .withMinMarkerCount(expectedMin)
                       .withMaxMarkerCount(expectedMax)
                       .withMaxMarkerCount(expectedMax)
                       .withLimit(limit)
                       .withLimit(limit)
-                      .withNonAuth(nonAuth)
                       .build());
                       .build());
     } catch (UnknownStoreException ex) {
     } catch (UnknownStoreException ex) {
       // bucket doesn't exist.
       // bucket doesn't exist.
@@ -356,21 +348,6 @@ public final class MarkerTool extends S3GuardTool {
 
 
     // extract the callbacks needed for the rest of the work
     // extract the callbacks needed for the rest of the work
     storeContext = fs.createStoreContext();
     storeContext = fs.createStoreContext();
-    // filesystem policy.
-    // if the -nonauth option is set, this is used to filter
-    // out surplus markers from the results.
-    DirectoryPolicy activePolicy = fs.getDirectoryMarkerPolicy();
-    DirectoryPolicy.MarkerPolicy policy = activePolicy
-        .getMarkerPolicy();
-    println(out, "The directory marker policy of %s is \"%s\"",
-        storeContext.getFsURI(),
-        policy);
-    String authPath = storeContext.getConfiguration()
-        .getTrimmed(AUTHORITATIVE_PATH, "");
-    if (policy == DirectoryPolicy.MarkerPolicy.Authoritative) {
-      // in auth mode, note the auth paths.
-      println(out, "Authoritative path list is \"%s\"", authPath);
-    }
     // qualify the path
     // qualify the path
     Path path = scanArgs.getPath();
     Path path = scanArgs.getPath();
     Path target = path.makeQualified(fs.getUri(), new Path("/"));
     Path target = path.makeQualified(fs.getUri(), new Path("/"));
@@ -389,26 +366,8 @@ public final class MarkerTool extends S3GuardTool {
     }
     }
 
 
     // the default filter policy is that all entries should be deleted
     // the default filter policy is that all entries should be deleted
-    DirectoryPolicy filterPolicy;
-    if (scanArgs.isNonAuth()) {
-      filterPolicy = new DirectoryPolicyImpl(
-          DirectoryPolicy.MarkerPolicy.Authoritative,
-          fs::allowAuthoritative);
-    } else {
-      filterPolicy = null;
-    }
     int minMarkerCount = scanArgs.getMinMarkerCount();
     int minMarkerCount = scanArgs.getMinMarkerCount();
     int maxMarkerCount = scanArgs.getMaxMarkerCount();
     int maxMarkerCount = scanArgs.getMaxMarkerCount();
-    if (minMarkerCount > maxMarkerCount) {
-      // swap min and max if they are wrong.
-      // this is to ensure any test scripts written to work around
-      // HADOOP-17332 and min/max swapping continue to work.
-      println(out, "Swapping -min (%d) and -max (%d) values",
-          minMarkerCount, maxMarkerCount);
-      int m = minMarkerCount;
-      minMarkerCount = maxMarkerCount;
-      maxMarkerCount = m;
-    }
     // extract the callbacks needed for the rest of the work
     // extract the callbacks needed for the rest of the work
     operations = fs.createMarkerToolOperations(
     operations = fs.createMarkerToolOperations(
         target.toString());
         target.toString());
@@ -416,8 +375,8 @@ public final class MarkerTool extends S3GuardTool {
         scanArgs.isDoPurge(),
         scanArgs.isDoPurge(),
         minMarkerCount,
         minMarkerCount,
         maxMarkerCount,
         maxMarkerCount,
-        scanArgs.getLimit(),
-        filterPolicy);
+        scanArgs.getLimit()
+    );
   }
   }
 
 
   /**
   /**
@@ -520,7 +479,6 @@ public final class MarkerTool extends S3GuardTool {
    * @param minMarkerCount min marker count (ignored on purge)
    * @param minMarkerCount min marker count (ignored on purge)
    * @param maxMarkerCount max marker count (ignored on purge)
    * @param maxMarkerCount max marker count (ignored on purge)
    * @param limit limit of files to scan; 0 for 'unlimited'
    * @param limit limit of files to scan; 0 for 'unlimited'
-   * @param filterPolicy filter policy on a nonauth scan; may be null
    * @return result.
    * @return result.
    * @throws IOException IO failure
    * @throws IOException IO failure
    * @throws ExitUtil.ExitException explicitly raised failure
    * @throws ExitUtil.ExitException explicitly raised failure
@@ -531,8 +489,7 @@ public final class MarkerTool extends S3GuardTool {
       final boolean doPurge,
       final boolean doPurge,
       final int minMarkerCount,
       final int minMarkerCount,
       final int maxMarkerCount,
       final int maxMarkerCount,
-      final int limit,
-      final DirectoryPolicy filterPolicy)
+      final int limit)
       throws IOException, ExitUtil.ExitException {
       throws IOException, ExitUtil.ExitException {
 
 
     // safety check: min and max are correctly ordered at this point.
     // safety check: min and max are correctly ordered at this point.
@@ -598,21 +555,6 @@ public final class MarkerTool extends S3GuardTool {
       result.purgeSummary = purgeMarkers(tracker, deletePageSize);
       result.purgeSummary = purgeMarkers(tracker, deletePageSize);
     } else {
     } else {
       // this is an audit, so validate the marker count
       // this is an audit, so validate the marker count
-
-      if (filterPolicy != null) {
-        // if a filter policy is supplied, filter out all markers
-        // under the auth path
-        List<Path> allowed = tracker.removeAllowedMarkers(filterPolicy);
-        int allowedMarkers =  allowed.size();
-        println(out, "%nIgnoring %d marker%s in authoritative paths",
-            allowedMarkers, suffix(allowedMarkers));
-        if (verbose) {
-          allowed.forEach(p -> println(out, p.toString()));
-        }
-        // recalculate the marker size
-        markerCount = surplusMarkers.size();
-        result.filteredMarkerCount = markerCount;
-      }
       if (markerCount < minMarkerCount || markerCount > maxMarkerCount) {
       if (markerCount < minMarkerCount || markerCount > maxMarkerCount) {
         // failure
         // failure
         return failScan(result, EXIT_NOT_ACCEPTABLE,
         return failScan(result, EXIT_NOT_ACCEPTABLE,
@@ -902,9 +844,6 @@ public final class MarkerTool extends S3GuardTool {
     /** Limit of files to scan; 0 for 'unlimited'. */
     /** Limit of files to scan; 0 for 'unlimited'. */
     private final int limit;
     private final int limit;
 
 
-    /** Consider only markers in nonauth paths as errors. */
-    private final boolean nonAuth;
-
     /**
     /**
      * @param sourceFS source FS; must be or wrap an S3A FS.
      * @param sourceFS source FS; must be or wrap an S3A FS.
      * @param path path to scan.
      * @param path path to scan.
@@ -912,22 +851,19 @@ public final class MarkerTool extends S3GuardTool {
      * @param minMarkerCount min marker count (ignored on purge)
      * @param minMarkerCount min marker count (ignored on purge)
      * @param maxMarkerCount max marker count (ignored on purge)
      * @param maxMarkerCount max marker count (ignored on purge)
      * @param limit limit of files to scan; 0 for 'unlimited'
      * @param limit limit of files to scan; 0 for 'unlimited'
-     * @param nonAuth consider only markers in nonauth paths as errors
      */
      */
     private ScanArgs(final FileSystem sourceFS,
     private ScanArgs(final FileSystem sourceFS,
         final Path path,
         final Path path,
         final boolean doPurge,
         final boolean doPurge,
         final int minMarkerCount,
         final int minMarkerCount,
         final int maxMarkerCount,
         final int maxMarkerCount,
-        final int limit,
-        final boolean nonAuth) {
+        final int limit) {
       this.sourceFS = sourceFS;
       this.sourceFS = sourceFS;
       this.path = path;
       this.path = path;
       this.doPurge = doPurge;
       this.doPurge = doPurge;
       this.minMarkerCount = minMarkerCount;
       this.minMarkerCount = minMarkerCount;
       this.maxMarkerCount = maxMarkerCount;
       this.maxMarkerCount = maxMarkerCount;
       this.limit = limit;
       this.limit = limit;
-      this.nonAuth = nonAuth;
     }
     }
 
 
     FileSystem getSourceFS() {
     FileSystem getSourceFS() {
@@ -954,9 +890,6 @@ public final class MarkerTool extends S3GuardTool {
       return limit;
       return limit;
     }
     }
 
 
-    boolean isNonAuth() {
-      return nonAuth;
-    }
   }
   }
 
 
   /**
   /**
@@ -982,9 +915,6 @@ public final class MarkerTool extends S3GuardTool {
     /** Limit of files to scan; 0 for 'unlimited'. */
     /** Limit of files to scan; 0 for 'unlimited'. */
     private int limit = UNLIMITED_LISTING;
     private int limit = UNLIMITED_LISTING;
 
 
-    /** Consider only markers in nonauth paths as errors. */
-    private boolean nonAuth = false;
-
     /**
     /**
      * Source FS; must be or wrap an S3A FS.
      * Source FS; must be or wrap an S3A FS.
      * @param source Source FileSystem
      * @param source Source FileSystem
@@ -1045,16 +975,6 @@ public final class MarkerTool extends S3GuardTool {
       return this;
       return this;
     }
     }
 
 
-    /**
-     * Consider only markers in non-authoritative paths as errors.
-     * @param b True if tool should only consider markers in non-authoritative paths
-     * @return builder class for method chaining
-     */
-    public ScanArgsBuilder withNonAuth(final boolean b) {
-      this.nonAuth = b;
-      return this;
-    }
-
     /**
     /**
      * Build the actual argument instance.
      * Build the actual argument instance.
      * @return the arguments to pass in
      * @return the arguments to pass in
@@ -1065,8 +985,7 @@ public final class MarkerTool extends S3GuardTool {
           doPurge,
           doPurge,
           minMarkerCount,
           minMarkerCount,
           maxMarkerCount,
           maxMarkerCount,
-          limit,
-          nonAuth);
+          limit);
     }
     }
   }
   }
 }
 }

+ 0 - 148
hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3native/NativeS3FileSystem.java

@@ -1,148 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.s3native;
-
-import java.io.IOException;
-import java.net.URI;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.util.Progressable;
-
-/**
- * This is a stub filesystem purely present to fail meaningfully when
- * someone who explicitly declares
- * {@code fs.s3n.impl=org.apache.hadoop.fs.s3native.NativeS3FileSystem}
- * and then tries to create a filesystem off an s3n:// URL.
- *
- * The {@link #initialize(URI, Configuration)} method will throw
- * an IOException informing the user of their need to migrate.
- * @deprecated Replaced by the S3A client.
- */
-@InterfaceAudience.Public
-@InterfaceStability.Stable
-public final class NativeS3FileSystem extends FileSystem {
-  
-  public static final Logger LOG =
-      LoggerFactory.getLogger(NativeS3FileSystem.class);
-
-  /**
-   * Message in thrown exceptions: {@value}.
-   */
-  private static final String UNSUPPORTED =
-      "The s3n:// client to Amazon S3 is no longer available:"
-          + " please migrate to the s3a:// client";
-
-  public NativeS3FileSystem() {
-  }
-
-  /**
-   * Return the protocol scheme for the FileSystem.
-   *
-   * @return <code>s3n</code>
-   */
-  @Override
-  public String getScheme() {
-    return "s3n";
-  }
-
-  /**
-   * Always fail to initialize.
-   * @throws IOException always.
-   */
-  @Override
-  public void initialize(URI uri, Configuration conf) throws IOException {
-    super.initialize(uri, conf);
-    throw new IOException(UNSUPPORTED);
-  }
-
-  @Override
-  public FileStatus getFileStatus(Path f) throws IOException {
-    throw new UnsupportedOperationException(UNSUPPORTED);
-  }
-
-  @Override
-  public URI getUri() {
-    throw new UnsupportedOperationException(UNSUPPORTED);
-  }
-
-  @Override
-  public FSDataInputStream open(Path f, int bufferSize) throws IOException {
-    throw new UnsupportedOperationException(UNSUPPORTED);
-  }
-
-  @Override
-  public FSDataOutputStream create(Path f,
-      FsPermission permission,
-      boolean overwrite,
-      int bufferSize,
-      short replication,
-      long blockSize,
-      Progressable progress) throws IOException {
-    throw new UnsupportedOperationException(UNSUPPORTED);
-  }
-
-  @Override
-  public FSDataOutputStream append(Path f,
-      int bufferSize,
-      Progressable progress) throws IOException {
-    throw new UnsupportedOperationException(UNSUPPORTED);
-  }
-
-  @Override
-  public boolean rename(Path src, Path dst) throws IOException {
-    throw new UnsupportedOperationException(UNSUPPORTED);
-  }
-
-  @Override
-  public boolean delete(Path f, boolean recursive) throws IOException {
-    throw new UnsupportedOperationException(UNSUPPORTED);
-  }
-
-  @Override
-  public FileStatus[] listStatus(Path f)
-      throws IOException {
-    throw new UnsupportedOperationException(UNSUPPORTED);
-  }
-
-  @Override
-  public void setWorkingDirectory(Path new_dir) {
-    throw new UnsupportedOperationException(UNSUPPORTED);
-  }
-
-  @Override
-  public Path getWorkingDirectory() {
-    throw new UnsupportedOperationException(UNSUPPORTED);
-  }
-
-  @Override
-  public boolean mkdirs(Path f, FsPermission permission) throws IOException {
-    throw new UnsupportedOperationException(UNSUPPORTED);
-  }
-}

+ 5 - 82
hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3native/S3xLoginHelper.java

@@ -22,11 +22,6 @@ import java.net.URI;
 import java.net.URISyntaxException;
 import java.net.URISyntaxException;
 import java.util.Objects;
 import java.util.Objects;
 
 
-import org.apache.hadoop.classification.VisibleForTesting;
-import org.apache.hadoop.util.Preconditions;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
 import org.apache.commons.lang3.StringUtils;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
@@ -41,9 +36,8 @@ import static org.apache.commons.lang3.StringUtils.equalsIgnoreCase;
  * It is in S3N so that it can be used across all S3 filesystems.
  * It is in S3N so that it can be used across all S3 filesystems.
  *
  *
  * The core function of this class was the extraction and decoding of user:secret
  * The core function of this class was the extraction and decoding of user:secret
- * information from filesystems URIs. As this is no longer supported,
- * its role has been reduced to checking for secrets in the URI and rejecting
- * them where found.
+ * information from filesystems URIs.
+ * All that is left now is some URI canonicalization and checking.
  */
  */
 @InterfaceAudience.Private
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
 @InterfaceStability.Evolving
@@ -52,87 +46,20 @@ public final class S3xLoginHelper {
   private S3xLoginHelper() {
   private S3xLoginHelper() {
   }
   }
 
 
-  public static final String LOGIN_WARNING =
-      "The Filesystem URI contains login details."
-      +" This authentication mechanism is no longer supported.";
-
   /**
   /**
    * Build the filesystem URI.
    * Build the filesystem URI.
    * @param uri filesystem uri
    * @param uri filesystem uri
    * @return the URI to use as the basis for FS operation and qualifying paths.
    * @return the URI to use as the basis for FS operation and qualifying paths.
-   * @throws IllegalArgumentException if the URI is in some way invalid.
+   * @throws NullPointerException if the URI has null parts.
    */
    */
   public static URI buildFSURI(URI uri) {
   public static URI buildFSURI(URI uri) {
     // look for login secrets and fail if they are present.
     // look for login secrets and fail if they are present.
-    rejectSecretsInURIs(uri);
     Objects.requireNonNull(uri, "null uri");
     Objects.requireNonNull(uri, "null uri");
     Objects.requireNonNull(uri.getScheme(), "null uri.getScheme()");
     Objects.requireNonNull(uri.getScheme(), "null uri.getScheme()");
-    if (uri.getHost() == null && uri.getAuthority() != null) {
-      Objects.requireNonNull(uri.getHost(), "null uri host.");
-    }
     Objects.requireNonNull(uri.getHost(), "null uri host.");
     Objects.requireNonNull(uri.getHost(), "null uri host.");
     return URI.create(uri.getScheme() + "://" + uri.getHost());
     return URI.create(uri.getScheme() + "://" + uri.getHost());
   }
   }
 
 
-  /**
-   * Create a stripped down string value for error messages.
-   * @param pathUri URI
-   * @return a shortened schema://host/path value
-   */
-  public static String toString(URI pathUri) {
-    return pathUri != null
-        ? String.format("%s://%s/%s",
-        pathUri.getScheme(), pathUri.getHost(), pathUri.getPath())
-        : "(null URI)";
-  }
-
-  /**
-   * Extract the login details from a URI, raising an exception if
-   * the URI contains them.
-   * @param name URI of the filesystem, can be null
-   * @throws IllegalArgumentException if there is a secret in the URI.
-   */
-  public static void rejectSecretsInURIs(URI name) {
-    Login login = extractLoginDetails(name);
-    Preconditions.checkArgument(!login.hasLogin(), LOGIN_WARNING);
-  }
-
-  /**
-   * Extract the login details from a URI.
-   * @param name URI of the filesystem, may be null
-   * @return a login tuple, possibly empty.
-   */
-  @VisibleForTesting
-  static Login extractLoginDetails(URI name) {
-    if (name == null) {
-      return Login.EMPTY;
-    }
-
-    String authority = name.getAuthority();
-    if (authority == null) {
-      return Login.EMPTY;
-    }
-    int loginIndex = authority.indexOf('@');
-    if (loginIndex < 0) {
-      // no login
-      return Login.EMPTY;
-    }
-    String login = authority.substring(0, loginIndex);
-    int loginSplit = login.indexOf(':');
-    if (loginSplit > 0) {
-      String user = login.substring(0, loginSplit);
-      String encodedPassword = login.substring(loginSplit + 1);
-      return new Login(user, encodedPassword.isEmpty()? "": "password removed");
-    } else if (loginSplit == 0) {
-      // there is no user, just a password. In this case, there's no login
-      return Login.EMPTY;
-    } else {
-      // loginSplit < 0: there is no ":".
-      // return a login with a null password
-      return new Login(login, "");
-    }
-  }
-
   /**
   /**
    * Canonicalize the given URI.
    * Canonicalize the given URI.
    *
    *
@@ -164,8 +91,7 @@ public final class S3xLoginHelper {
 
 
   /**
   /**
    * Check the path, ignoring authentication details.
    * Check the path, ignoring authentication details.
-   * See {@link FileSystem#checkPath(Path)} for the operation of this.
-   *
+   * See {@code FileSystem.checkPath(Path)} for the operation of this.
    * Essentially
    * Essentially
    * <ol>
    * <ol>
    *   <li>The URI is canonicalized.</li>
    *   <li>The URI is canonicalized.</li>
@@ -221,8 +147,7 @@ public final class S3xLoginHelper {
     }
     }
     // make sure the exception strips out any auth details
     // make sure the exception strips out any auth details
     throw new IllegalArgumentException(
     throw new IllegalArgumentException(
-        "Wrong FS " + S3xLoginHelper.toString(pathUri)
-            + " -expected " + fsUri);
+        "Wrong FS " + pathUri + " -expected " + fsUri);
   }
   }
 
 
   /**
   /**
@@ -232,8 +157,6 @@ public final class S3xLoginHelper {
     private final String user;
     private final String user;
     private final String password;
     private final String password;
 
 
-    public static final Login EMPTY = new Login();
-
     /**
     /**
      * Create an instance with no login details.
      * Create an instance with no login details.
      * Calls to {@link #hasLogin()} return false.
      * Calls to {@link #hasLogin()} return false.

+ 0 - 16
hadoop-tools/hadoop-aws/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem

@@ -1,16 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-org.apache.hadoop.fs.s3native.NativeS3FileSystem

+ 4 - 7
hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/assumed_roles.md

@@ -295,13 +295,10 @@ views or operations.
 Particular troublespots are "directory markers" and
 Particular troublespots are "directory markers" and
 failures of non-atomic operations, particularly `rename()` and `delete()`.
 failures of non-atomic operations, particularly `rename()` and `delete()`.
 
 
-A directory marker such as `/users/` will not be deleted if the user `alice`
-creates a directory `/users/alice` *and* she only has access to `/users/alice`.
-
-When a path or directory is deleted, the parent directory may not exist afterwards.
-In the example above, if `alice` deletes `/users/alice` and there are no
-other entries under `/users/alice`, then the directory marker `/users/` cannot
-be created. The directory `/users` will not exist in listings,
+If `alice` deletes `/users/alice` and there are no
+other entries under `/users/alice`, or a directory marker `/users` then that
+directory marker cannot be created.
+The directory `/users` will not exist in listings,
 `getFileStatus("/users")` or similar.
 `getFileStatus("/users")` or similar.
 
 
 Rename will fail if it cannot delete the items it has just copied, that is
 Rename will fail if it cannot delete the items it has just copied, that is

+ 93 - 373
hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/directory_markers.md

@@ -12,112 +12,104 @@
   limitations under the License. See accompanying LICENSE file.
   limitations under the License. See accompanying LICENSE file.
 -->
 -->
 
 
-# Controlling the S3A Directory Marker Behavior
+# S3A Directory Marker Behavior
 
 
-This document discusses a performance feature of the S3A
-connector: directory markers are not deleted unless the
-client is explicitly configured to do so.
-
-## <a name="compatibility"></a> Critical: this is not backwards compatible!
+This document discusses directory markers and a change to the S3A
+connector: surplus directory markers are no longer deleted.
 
 
 This document shows how the performance of S3 I/O, especially applications
 This document shows how the performance of S3 I/O, especially applications
 creating many files (for example Apache Hive) or working with versioned S3 buckets can
 creating many files (for example Apache Hive) or working with versioned S3 buckets can
 increase performance by changing the S3A directory marker retention policy.
 increase performance by changing the S3A directory marker retention policy.
 
 
-The default policy in this release of hadoop is "keep",
-which _is not backwards compatible_ with hadoop versions
+This release always retains markers
+and _is potentially not backwards compatible_ with hadoop versions
 released before 2021.
 released before 2021.
 
 
 The compatibility table of older releases is as follows:
 The compatibility table of older releases is as follows:
 
 
-| Branch     | Compatible Since | Supported | Released |
-|------------|------------------|-----------|----------|
-| Hadoop 2.x | 2.10.2           | Read-only | 05/2022  |
-| Hadoop 3.0 | n/a              | WONTFIX   |          |
-| Hadoop 3.1 | n/a              | WONTFIX   |          |
-| Hadoop 3.2 | 3.2.2            | Read-only | 01/2022  |
-| Hadoop 3.3 | 3.3.1            | Done      | 01/2021  |
+| Branch     | Compatible Since | Support                               | Released |
+|------------|------------------|---------------------------------------|----------|
+| Hadoop 2.x | 2.10.2           | Partial                               | 05/2022  |
+| Hadoop 3.0 | n/a              | WONTFIX                               |          |
+| Hadoop 3.1 | n/a              | WONTFIX                               |          |
+| Hadoop 3.2 | 3.2.2            | Partial                               | 01/2022  |
+| Hadoop 3.3 | 3.3.1            | Full: deletion is enabled             | 01/2021  |
+| Hadoop 3.4 | 3.4.0            | Full: deletion is disabled            | 03/2024  |
+| Hadoop 3.5 | 3.5.0            | Full: markers will always be retained |          |
 
 
+*Full*
 
 
-*WONTFIX*
+These releases are full marker-aware and will (possibly only optionally)
+not delete them on file/dir creation.
 
 
-The Hadoop 3.0 and 3.1 lines will have no further releases, so will
-not be upgraded.
-The compatibility patch "HADOOP-17199. S3A Directory Marker HADOOP-13230 backport"
-is present in both source code branches, for anyone wishing to make a private
-release.
+*Partial*
 
 
-*Read-only*
+These branches have partial compatibility, and are safe to use with hadoop versions
+that do not delete markers.
 
 
-These branches have read-only compatibility.
-
-* They may list directories with directory markers, and correctly identify when
+* They can list directories with surplus directory markers, and correctly identify when
   such directories have child entries.
   such directories have child entries.
-* They will open files under directories with such markers.
-
-## How to re-enable backwards compatibility
+* They can open files under directories with such markers.
+* They will always delete parent directory markers when creating their own files and directories.
 
 
-The option `fs.s3a.directory.marker.retention` can be changed to "delete" to re-enable
-the original policy.
+All these branches are no longer supported by Apache for bugs and security fixes
+-users should upgrade to more recent versions for those reasons alone.
 
 
-```xml
-  <property>
-    <name>fs.s3a.directory.marker.retention</name>
-    <value>delete</value>
-  </property>
-```
-## Verifying read compatibility.
+*WONTFIX*
 
 
-The `s3guard bucket-info` tool [can be used to verify support](#bucket-info).
-This allows for a command line check of compatibility, including
-in scripts.
+These stores may misinterpret a surplus directory marker for an empty directory.
+This does not happen on listing, but it may be misinterpreted on rename operations.
 
 
-External Hadoop-based applications should also be assumed to be incompatible
-unless otherwise stated/known.
+The compatibility patch "HADOOP-17199. S3A Directory Marker HADOOP-13230 backport"
+is present in both the Hadoop 3.0.x and 3.1.x source branches, for anyone wishing to make a private
+release.
+However, there will be no further Apache releases of the obsolete branches.
 
 
-It is only safe change the directory marker policy if the following
- conditions are met:
+Everyone using these branches should upgrade to a supported version.
 
 
-1. You know exactly which applications are writing to and reading from
-   (including backing up) an S3 bucket.
-2. You know all applications which read data from the bucket are compatible.
+## History
 
 
+### Hadoop 3.3.1 Directory marker retention is optional
 
 
-### <a name="backups"></a> Applications backing up data.
+[HADOOP-13230](https://issues.apache.org/jira/browse/HADOOP-13230)
+ _S3A to optionally retain directory markers_
 
 
-It is not enough to have a version of Apache Hadoop which is compatible, any
-application which backs up an S3 bucket or copies elsewhere must have an S3
-connector which is compatible. For the Hadoop codebase, that means that if
-distcp is used, it _must_ be from a compatible hadoop version.
+### Hadoop 3.4.0: markers are not deleted by default
 
 
-### <a name="fallure-mode"></a> How will incompatible applications/versions fail?
+[HADOOP-18752](https://issues.apache.org/jira/browse/HADOOP-18752)
+_Change fs.s3a.directory.marker.retention to "keep"_ changed the default
+policy.
 
 
-Applications using an incompatible version of the S3A connector will mistake
-directories containing data for empty directories. This means that:
+Marker deletion can still be enabled.
 
 
-* Listing directories/directory trees may exclude files which exist.
-* Queries across the data will miss data files.
-* Renaming a directory to a new location may exclude files underneath.
+Since this release there have been no reports of incompatibilities
+surfacing "in the wild". That is: out of date hadoop versions are not
+being used to work into the same parts of S3 buckets as modern releases.
 
 
-The failures are silent: there is no error message, stack trace or
-other warning that files may have been missed. They simply aren't
-found.
+### Hadoop 3.5: markers are never deleted
 
 
-### <a name="recovery"></a> If an application has updated a directory tree incompatibly-- what can be done?
+[HADOOP-19278](https://issues.apache.org/jira/browse/HADOOP-19278)
+_S3A: remove option to delete directory markers_
 
 
-There's a tool on the hadoop command line, [marker tool](#marker-tool) which can audit
-a bucket/path for markers, and clean up any markers which were found.
-It can be used to make a bucket compatible with older applications.
+Surplus directory markers are neither checked for nor deleted.
 
 
-Now that this is all clear, let's explain the problem.
+Removing the option to delete markers simplifies the code and significantly improves testing:
+* There is no need to parameterize many tests based on the marker policy.
+* Tests which make assertions about the number of http requests which take places no longer have to
+  contain separate assertions for the keeping/deleting options.
 
 
+Notes
+* During the directory tree copy which takes place in a rename, surplus directory
+markers are not copied. They are, after all, surplus.
+* The `hadoop s3guard markers` command (see below) can purge directory markers from a bucket or path.
 
 
 ## <a name="background"></a> Background: Directory Markers: what and why?
 ## <a name="background"></a> Background: Directory Markers: what and why?
 
 
-Amazon S3 is not a filesystem, it is an object store.
+Amazon S3 is not a Posix-like filesystem, it is an object store.
 
 
 The S3A connector not only provides a hadoop-compatible API to interact with
 The S3A connector not only provides a hadoop-compatible API to interact with
-data in S3, it tries to maintain the filesystem metaphor.
+data in S3, it tries to maintain the filesystem metaphor for applications
+written to expect it.
 
 
 One key aspect of the metaphor of a file system is "directories"
 One key aspect of the metaphor of a file system is "directories"
 
 
@@ -157,7 +149,6 @@ returned)
 
 
 1. Files and other directories can be created in it.
 1. Files and other directories can be created in it.
 
 
-
 Lots of code contains a big assumption here: after you create a directory it
 Lots of code contains a big assumption here: after you create a directory it
 exists. They also assume that after files in a directory are deleted, the
 exists. They also assume that after files in a directory are deleted, the
 directory still exists.
 directory still exists.
@@ -172,10 +163,13 @@ in `_$folder$` was considered to be a sign that a directory existed. A call to
 The S3A also has directory markers, but it just appends a "/" to the directory
 The S3A also has directory markers, but it just appends a "/" to the directory
 name, so `mkdir(s3a://bucket/a/b)` will create a new marker object `a/b/` .
 name, so `mkdir(s3a://bucket/a/b)` will create a new marker object `a/b/` .
 
 
-When a file is created under a path, the directory marker is deleted. And when a
-file is deleted, if it was the last file in the directory, the marker is
+In older versions of Hadoop, when a file was created under a path,
+the directory marker is deleted. And when a file is deleted,
+if it was the last file in the directory, the marker is
 recreated.
 recreated.
 
 
+This release does not delete directory markers.
+
 And, historically, when a path is listed, if a marker to that path is found, *it
 And, historically, when a path is listed, if a marker to that path is found, *it
 has been interpreted as an empty directory.*
 has been interpreted as an empty directory.*
 
 
@@ -186,11 +180,11 @@ It is that little detail which is the cause of the incompatibility issues.
 Creating, deleting and the listing directory markers adds overhead and can slow
 Creating, deleting and the listing directory markers adds overhead and can slow
 down applications.
 down applications.
 
 
-Whenever a file is created we have to delete any marker which could exist in
+Whenever a file is created the S3A client had to delete any marker which could exist in the
 parent directory _or any parent paths_. Rather than do a sequence of probes for
 parent directory _or any parent paths_. Rather than do a sequence of probes for
-parent markers existing, the connector issues a single request to S3 to delete
+parent markers existing, the connector issued a single request to S3 to delete
 all parents. For example, if a file `/a/b/file1` is created, a multi-object
 all parents. For example, if a file `/a/b/file1` is created, a multi-object
-`DELETE` request containing the keys `/a/` and `/a/b/` is issued.
+`DELETE` request containing the keys `/a/` and `/a/b/` was issued.
 If no markers exists, this is harmless.
 If no markers exists, this is harmless.
 
 
 When a file is deleted, a check for the parent directory continuing to exist
 When a file is deleted, a check for the parent directory continuing to exist
@@ -226,182 +220,21 @@ The tombstone markers have follow-on consequences -it makes listings against
 S3 versioned buckets slower.
 S3 versioned buckets slower.
 This can have adverse effects on those large directories, again.
 This can have adverse effects on those large directories, again.
 
 
-## <a name="solutions"></a> Strategies to avoid marker-related problems.
-
-###  Presto: every path is a directory
-
-In the Presto [S3 connector](https://prestodb.io/docs/current/connector/hive.html#amazon-s3-configuration),
-`mkdirs()` is a no-op.
-Whenever it lists any path which isn't an object or a prefix of one more objects, it returns an
-empty listing. That is:;  by default, every path is an empty directory.
-
-Provided no code probes for a directory existing and fails if it is there, this
-is very efficient. That's a big requirement however, -one Presto can pull off
-because they know how their file uses data in S3.
-
-
-###  Hadoop 3.3.1+: marker deletion is now optional
-
-From Hadoop 3.3.1 onwards, the S3A client can be configured to skip deleting
-directory markers when creating files under paths. This removes all scalability
-problems caused by deleting these markers -however, it is achieved at the expense
-of backwards compatibility.
-
-## <a name="marker-retention"></a> Controlling marker retention with `fs.s3a.directory.marker.retention`
-
-There is now an option `fs.s3a.directory.marker.retention` which controls how
-markers are managed when new files are created
-
-1. `delete`: a request is issued to delete any parental directory markers
-whenever a file or directory is created.
-2. `keep`: No delete request is issued.
-Any directory markers which exist are not deleted.
-This is *not* backwards compatible
-3. `authoritative`: directory markers are deleted _except for files created
-in "authoritative" directories_. This is backwards compatible _outside authoritative directories_.
-
-The setting, `fs.s3a.directory.marker.retention = delete` is compatible with
-every shipping Hadoop release; that of `keep` compatible with
-all releases since 2021.
-
-##  <a name="s3guard"></a> Directory Markers and Authoritative paths
-
-
-The now-deleted S3Guard feature included the concept of "authoritative paths";
-paths where all clients were required to be using S3Guard and sharing the
-same metadata store.
-In such a setup, listing authoritative paths would skip all queries of the S3
-store -potentially being much faster.
-
-In production, authoritative paths were usually only ever for Hive managed
-tables, where access was strictly restricted to the Hive services.
-
-
-When the S3A client is configured to treat some directories as "Authoritative"
-then an S3A connector with a retention policy of `fs.s3a.directory.marker.retention` of
-`authoritative` will omit deleting markers in authoritative directories.
-
-```xml
-<property>
-  <name>fs.s3a.bucket.hive.authoritative.path</name>
-  <value>/tables</value>
-</property>
-```
-This an option to consider if not 100% confident that all
-applications interacting with a store are using an S3A client
-which is marker aware.
-
 ## <a name="bucket-info"></a> Verifying marker policy with `s3guard bucket-info`
 ## <a name="bucket-info"></a> Verifying marker policy with `s3guard bucket-info`
 
 
-The `bucket-info` command has been enhanced to support verification from the command
+Although it is now moot, the `bucket-info` command has been enhanced to support verification from the command
 line of bucket policies via the `-marker` option
 line of bucket policies via the `-marker` option
 
 
-
-| option                   | verifies                                              |
-|--------------------------|-------------------------------------------------------|
-| `-markers aware`         | the hadoop release is "aware" of directory markers    |
-| `-markers delete`        | directory markers are deleted                         |
-| `-markers keep`          | directory markers are kept (not backwards compatible) |
-| `-markers authoritative` | directory markers are kept in authoritative paths     |
+| option                   | verifies                                                         | result |
+|--------------------------|------------------------------------------------------------------|--------|
+| `-markers aware`         | The hadoop release is "aware" of directory markers. Always true  | 0      |
+| `-markers keep`          | Directory markers are kept. Always true                          | 0      |
+| `-markers delete`        | Directory markers are deleted. Always false                      | 1      |
+| `-markers authoritative` | Directory markers are kept in authoritative paths.  Always false | 1      |
 
 
 All releases of Hadoop which have been updated to be marker aware will support the `-markers aware` option.
 All releases of Hadoop which have been updated to be marker aware will support the `-markers aware` option.
 
 
 
 
-1. Updated releases which do not support switching marker retention policy will also support the
-`-markers delete` option.
-
-
-Example: `s3guard bucket-info -markers aware` on a compatible release.
-
-```
-> hadoop s3guard bucket-info -markers aware s3a://noaa-isd-pds/
-Filesystem s3a://noaa-isd-pds
-
-...
-
-Directory Markers
-        The directory marker policy is "keep"
-        Available Policies: delete, keep, authoritative
-        Authoritative paths: fs.s3a.authoritative.path=
-        The S3A connector is compatible with buckets where directory markers are not deleted
-
-...
-```
-
-The same command will fail on older releases, because the `-markers` option
-is unknown
-
-```
-> hadoop s3guard bucket-info -markers aware s3a://noaa-isd-pds/
-Illegal option -markers
-Usage: hadoop bucket-info [OPTIONS] s3a://BUCKET
-    provide/check information about a specific bucket
-
-Common options:
-  -magic - Require the S3 filesystem to be support the "magic" committer
-  -encryption -require {none, sse-s3, sse-kms} - Require encryption policy
-
-When possible and not overridden by more specific options, metadata
-repository information will be inferred from the S3A URL (if provided)
-
-Generic options supported are:
-  -conf <config file> - specify an application configuration file
-  -D <property=value> - define a value for a given property
-
-2020-08-12 16:47:16,579 [main] INFO  util.ExitUtil (ExitUtil.java:terminate(210)) - Exiting with status 42: Illegal option -markers
-````
-
-A specific policy check verifies that the connector is configured as desired
-
-```
-> hadoop s3guard bucket-info -markers keep s3a://noaa-isd-pds/
-Filesystem s3a://noaa-isd-pds
-
-...
-
-Directory Markers
-        The directory marker policy is "keep"
-        Available Policies: delete, keep, authoritative
-        Authoritative paths: fs.s3a.authoritative.path=
-
-```
-
-When probing for a specific policy, the error code "46" is returned if the active policy
-does not match that requested:
-
-```
-> hadoop s3guard bucket-info -markers delete s3a://noaa-isd-pds/
-Filesystem s3a://noaa-isd-pds
-
-S3A Client
-        Signing Algorithm: fs.s3a.signing-algorithm=(unset)
-        Endpoint: fs.s3a.endpoint=s3.amazonaws.com
-        Encryption: fs.s3a.encryption.algorithm=none
-        Input seek policy: fs.s3a.experimental.input.fadvise=normal
-        Change Detection Source: fs.s3a.change.detection.source=etag
-        Change Detection Mode: fs.s3a.change.detection.mode=server
-
-S3A Committers
-        The "magic" committer is supported in the filesystem
-        S3A Committer factory class: mapreduce.outputcommitter.factory.scheme.s3a=org.apache.hadoop.fs.s3a.commit.S3ACommitterFactory
-        S3A Committer name: fs.s3a.committer.name=magic
-        Store magic committer integration: fs.s3a.committer.magic.enabled=true
-
-Security
-        Delegation token support is disabled
-
-Directory Markers
-        The directory marker policy is "delete"
-        Available Policies: delete, keep, authoritative
-        Authoritative paths: fs.s3a.authoritative.path=
-
-2021-11-22 16:03:59,175 [main] INFO  util.ExitUtil (ExitUtil.java:terminate(210))
- -Exiting with status 46: 46: Bucket s3a://noaa-isd-pds: required marker polic is
-  "keep" but actual policy is "delete"
-
-```
-
-
 ##  <a name="marker-tool"></a> The marker tool: `hadoop s3guard markers`
 ##  <a name="marker-tool"></a> The marker tool: `hadoop s3guard markers`
 
 
 The marker tool aims to help migration by scanning/auditing directory trees
 The marker tool aims to help migration by scanning/auditing directory trees
@@ -415,7 +248,6 @@ Syntax
 > hadoop s3guard markers -verbose -nonauth
 > hadoop s3guard markers -verbose -nonauth
 markers (-audit | -clean) [-min <count>] [-max <count>] [-out <filename>] [-limit <limit>] [-nonauth] [-verbose] <PATH>
 markers (-audit | -clean) [-min <count>] [-max <count>] [-out <filename>] [-limit <limit>] [-nonauth] [-verbose] <PATH>
         View and manipulate S3 directory markers
         View and manipulate S3 directory markers
-
 ```
 ```
 
 
 *Options*
 *Options*
@@ -446,7 +278,6 @@ All other non-zero status code also indicate errors of some form or other.
 
 
 Audit the path and fail if any markers were found.
 Audit the path and fail if any markers were found.
 
 
-
 ```
 ```
 > hadoop s3guard markers -limit 8000 -audit s3a://noaa-isd-pds/
 > hadoop s3guard markers -limit 8000 -audit s3a://noaa-isd-pds/
 
 
@@ -499,35 +330,6 @@ Found 5 empty directory 'leaf' markers under s3a://london/
 This fails because surplus markers were found. This S3A bucket would *NOT* be safe for older Hadoop versions
 This fails because surplus markers were found. This S3A bucket would *NOT* be safe for older Hadoop versions
 to use.
 to use.
 
 
-The `-nonauth` option does not treat markers under authoritative paths as errors:
-
-```
-bin/hadoop s3guard markers -nonauth -audit s3a://london/
-
-The directory marker policy of s3a://london is "Authoritative"
-Authoritative path list is "/tables"
-2020-08-05 18:31:19,210 [main] INFO  tools.MarkerTool (DurationInfo.java:<init>(77)) - Starting: marker scan s3a://london/
-2020-08-05 18:31:22,240 [main] INFO  tools.MarkerTool (DurationInfo.java:close(98)) - marker scan s3a://london/: duration 0:03.031s
-Listed 8 objects under s3a://london/
-
-Found 3 surplus directory markers under s3a://london/
-    s3a://london/tables/
-    s3a://london/tables/tables-4/
-    s3a://london/tables/tables-4/tables-5/
-Found 5 empty directory 'leaf' markers under s3a://london/
-    s3a://london/tables/tables-2/
-    s3a://london/tables/tables-3/
-    s3a://london/tables/tables-4/tables-5/06/
-    s3a://london/tables2/
-    s3a://london/tables3/
-These are required to indicate empty directories
-
-Ignoring 3 markers in authoritative paths
-```
-
-All of this S3A bucket _other_ than the authoritative path `/tables` will be safe for
-incompatible Hadoop releases to use.
-
 
 
 ###  <a name="marker-tool-clean"></a>`markers clean`
 ###  <a name="marker-tool-clean"></a>`markers clean`
 
 
@@ -535,9 +337,9 @@ The `markers clean` command will clean the directory tree of all surplus markers
 The `-verbose` option prints more detail on the operation as well as some IO statistics
 The `-verbose` option prints more detail on the operation as well as some IO statistics
 
 
 ```
 ```
-bin/hadoop s3guard markers -clean -verbose s3a://stevel-london/
-The directory marker policy of s3a://stevel-london is "Keep"
-2023-06-06 17:15:52,110 [main] INFO  tools.MarkerTool (DurationInfo.java:<init>(77)) - Starting: marker scan s3a://stevel-london/
+bin/hadoop s3guard markers -clean -verbose s3a://london/
+The directory marker policy of s3a://london is "Keep"
+2023-06-06 17:15:52,110 [main] INFO  tools.MarkerTool (DurationInfo.java:<init>(77)) - Starting: marker scan s3a://london/
   Directory Marker user/stevel/target/test/data/4so7pZebRx/
   Directory Marker user/stevel/target/test/data/4so7pZebRx/
   Directory Marker user/stevel/target/test/data/OKvfC3oxlD/
   Directory Marker user/stevel/target/test/data/OKvfC3oxlD/
   Directory Marker user/stevel/target/test/data/VSTQ1O4dMi/
   Directory Marker user/stevel/target/test/data/VSTQ1O4dMi/
@@ -550,38 +352,20 @@ maximums=((object_continue_list_request.failures.max=-1) (object_list_request.fa
 means=((object_list_request.mean=(samples=1, sum=540, mean=540.0000)) (object_continue_list_request.failures.mean=(samples=0, sum=0, mean=0.0000)) (object_list_request.failures.mean=(samples=0, sum=0, mean=0.0000)) (object_continue_list_request.mean=(samples=0, sum=0, mean=0.0000)));
 means=((object_list_request.mean=(samples=1, sum=540, mean=540.0000)) (object_continue_list_request.failures.mean=(samples=0, sum=0, mean=0.0000)) (object_list_request.failures.mean=(samples=0, sum=0, mean=0.0000)) (object_continue_list_request.mean=(samples=0, sum=0, mean=0.0000)));
 
 
 
 
-2023-06-06 17:15:52,662 [main] INFO  tools.MarkerTool (DurationInfo.java:close(98)) - marker scan s3a://stevel-london/: duration 0:00.553s
-Listed 3 objects under s3a://stevel-london/
+2023-06-06 17:15:52,662 [main] INFO  tools.MarkerTool (DurationInfo.java:close(98)) - marker scan s3a://london/: duration 0:00.553s
+Listed 3 objects under s3a://london/
 
 
-No surplus directory markers were found under s3a://stevel-london/
-Found 3 empty directory 'leaf' markers under s3a://stevel-london/
-    s3a://stevel-london/user/stevel/target/test/data/4so7pZebRx/
-    s3a://stevel-london/user/stevel/target/test/data/OKvfC3oxlD/
-    s3a://stevel-london/user/stevel/target/test/data/VSTQ1O4dMi/
+No surplus directory markers were found under s3a://london/
+Found 3 empty directory 'leaf' markers under s3a://london/
+    s3a://london/user/stevel/target/test/data/4so7pZebRx/
+    s3a://london/user/stevel/target/test/data/OKvfC3oxlD/
+    s3a://london/user/stevel/target/test/data/VSTQ1O4dMi/
 These are required to indicate empty directories
 These are required to indicate empty directories
 
 
 0 markers to delete in 0 pages of 250 keys/page
 0 markers to delete in 0 pages of 250 keys/page
 2023-06-06 17:15:52,664 [main] INFO  tools.MarkerTool (DurationInfo.java:<init>(77)) - Starting: Deleting markers
 2023-06-06 17:15:52,664 [main] INFO  tools.MarkerTool (DurationInfo.java:<init>(77)) - Starting: Deleting markers
 2023-06-06 17:15:52,664 [main] INFO  tools.MarkerTool (DurationInfo.java:close(98)) - Deleting markers: duration 0:00.000s
 2023-06-06 17:15:52,664 [main] INFO  tools.MarkerTool (DurationInfo.java:close(98)) - Deleting markers: duration 0:00.000s
 
 
-IO Statistics for s3a://stevel-london
-
-counters=((audit_request_execution=1)
-(audit_span_creation=3)
-(object_list_request=1)
-(op_get_file_status=1)
-(store_io_request=1));
-
-gauges=();
-
-minimums=((object_list_request.min=540)
-(op_get_file_status.min=2));
-
-maximums=((object_list_request.max=540)
-(op_get_file_status.max=2));
-
-means=((object_list_request.mean=(samples=1, sum=540, mean=540.0000))
-(op_get_file_status.mean=(samples=1, sum=2, mean=2.0000)));
 ```
 ```
 
 
 The `markers -clean` command _does not_ delete markers above empty directories -only those which have
 The `markers -clean` command _does not_ delete markers above empty directories -only those which have
@@ -600,16 +384,14 @@ An instance of the filesystem can be probed for its directory marker retention a
 policy can be probed for through the `org.apache.hadoop.fs.PathCapabilities` interface,
 policy can be probed for through the `org.apache.hadoop.fs.PathCapabilities` interface,
 which all FileSystem classes have supported since Hadoop 3.3.
 which all FileSystem classes have supported since Hadoop 3.3.
 
 
-
-| Probe                   | Meaning                 |
-|-------------------------|-------------------------|
-| `fs.s3a.capability.directory.marker.aware`  | Does the filesystem support surplus directory markers? |
-| `fs.s3a.capability.directory.marker.policy.delete` | Is the bucket policy "delete"? |
-| `fs.s3a.capability.directory.marker.policy.keep`   | Is the bucket policy "keep"? |
-| `fs.s3a.capability.directory.marker.policy.authoritative` | Is the bucket policy "authoritative"? |
-| `fs.s3a.capability.directory.marker.action.delete` | If a file was created at this path, would directory markers be deleted? |
-| `fs.s3a.capability.directory.marker.action.keep`   | If a file was created at this path, would directory markers be retained? |
-
+| Probe                                                     | Meaning                                                                  | Current value |
+|-----------------------------------------------------------|--------------------------------------------------------------------------|---------------|
+| `fs.s3a.capability.directory.marker.aware`                | Does the filesystem support surplus directory markers?                   | true          |
+| `fs.s3a.capability.directory.marker.policy.delete`        | Is the bucket policy "delete"?                                           | false         |
+| `fs.s3a.capability.directory.marker.policy.keep`          | Is the bucket policy "keep"?                                             | true          |
+| `fs.s3a.capability.directory.marker.policy.authoritative` | Is the bucket policy "authoritative"?                                    | false         |
+| `fs.s3a.capability.directory.marker.action.delete`        | If a file was created at this path, would directory markers be deleted?  | false         |
+| `fs.s3a.capability.directory.marker.action.keep`          | If a file was created at this path, would directory markers be retained? | true          |
 
 
 The probe `fs.s3a.capability.directory.marker.aware` allows for a filesystem to be
 The probe `fs.s3a.capability.directory.marker.aware` allows for a filesystem to be
 probed to determine if its file listing policy is "aware" of directory marker retention
 probed to determine if its file listing policy is "aware" of directory marker retention
@@ -625,68 +407,6 @@ be kept or deleted?
 The `S3AFileSystem` class also implements the `org.apache.hadoop.fs.StreamCapabilities` interface, which
 The `S3AFileSystem` class also implements the `org.apache.hadoop.fs.StreamCapabilities` interface, which
 can be used to probe for marker awareness via the `fs.s3a.capability.directory.marker.aware` capability.
 can be used to probe for marker awareness via the `fs.s3a.capability.directory.marker.aware` capability.
 
 
-Again, this will be true if-and-only-if the S3A connector is safe to work with S3A buckets/paths where
-directories are retained.
-
-*If an S3A instance, probed by `PathCapabilities` or `StreamCapabilities` for the capability
-`fs.s3a.capability.directory.marker.aware` and it returns false, *it is not safe to be used with
-S3A paths where markers have been retained*.
-
-This is programmatic probe -however it can be accessed on the command line via the
-external [`cloudstore`](https://github.com/steveloughran/cloudstore) tool:
-
-```
-> hadoop jar cloudstore-1.0.jar pathcapability fs.s3a.capability.directory.marker.aware  s3a://london/
-
-Probing s3a://london/ for capability fs.s3a.capability.directory.marker.aware
-
-Using filesystem s3a://london
-Path s3a://london/ has capability fs.s3a.capability.directory.marker.aware
-```
-
-If the exit code of the command is `0`, then the S3A is safe to work with buckets
-where markers have not been deleted.
-
-The same tool can be used to dynamically probe for the policy.
-
-Take a bucket with a retention policy of "authoritative" -only paths under `/tables` will have markers retained.
-
-```xml
-  <property>
-    <name>fs.s3a.bucket.london.directory.marker.retention</name>
-    <value>authoritative</value>
-  </property>
-  <property>
-    <name>fs.s3a.bucket.london.authoritative.path</name>
-    <value>/tables</value>
-  </property>
-```
-
-With this policy the path capability `fs.s3a.capability.directory.marker.action.keep` will hold under
-the path `s3a://london/tables`
-
-```
-bin/hadoop jar cloudstore-1.0.jar pathcapability fs.s3a.capability.directory.marker.action.keep s3a://london/tables
-Probing s3a://london/tables for capability fs.s3a.capability.directory.marker.action.keep
-2020-08-11 22:03:31,658 [main] INFO  impl.DirectoryPolicyImpl (DirectoryPolicyImpl.java:getDirectoryPolicy(143))
- - Directory markers will be kept on authoritative paths
-Using filesystem s3a://london
-Path s3a://london/tables has capability fs.s3a.capability.directory.marker.action.keep
-```
-
-However it will not hold for other paths, so indicating that older Hadoop versions will be safe
-to work with data written there by this S3A client.
-
-```
-bin/hadoop jar cloudstore-1.0.jar pathcapability fs.s3a.capability.directory.marker.action.keep s3a://london/tempdir
-Probing s3a://london/tempdir for capability fs.s3a.capability.directory.marker.action.keep
-2020-08-11 22:06:56,300 [main] INFO  impl.DirectoryPolicyImpl (DirectoryPolicyImpl.java:getDirectoryPolicy(143))
- - Directory markers will be kept on authoritative paths
-Using filesystem s3a://london
-Path s3a://london/tempdir lacks capability fs.s3a.capability.directory.marker.action.keep
-2020-08-11 22:06:56,308 [main] INFO  util.ExitUtil (ExitUtil.java:terminate(210)) - Exiting with status -1:
-```
-
 
 
 ## <a name="glossary"></a> Glossary
 ## <a name="glossary"></a> Glossary
 
 

+ 4 - 4
hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/index.md

@@ -23,12 +23,12 @@
 
 
 ###  <a name="directory-marker-compatibility"></a> Directory Marker Compatibility
 ###  <a name="directory-marker-compatibility"></a> Directory Marker Compatibility
 
 
-This release does not delete directory markers when creating
+This release never delete directory markers when creating
 files or directories underneath.
 files or directories underneath.
 This is incompatible with versions of the Hadoop S3A client released
 This is incompatible with versions of the Hadoop S3A client released
 before 2021.
 before 2021.
 
 
-Consult [Controlling the S3A Directory Marker Behavior](directory_markers.html) for
+Consult [S3A and Directory Markers](directory_markers.html) for
 full details.
 full details.
 
 
 ## <a name="documents"></a> Documents
 ## <a name="documents"></a> Documents
@@ -40,7 +40,7 @@ full details.
 * [Working with Third-party S3 Stores](./third_party_stores.html)
 * [Working with Third-party S3 Stores](./third_party_stores.html)
 * [Troubleshooting](./troubleshooting_s3a.html)
 * [Troubleshooting](./troubleshooting_s3a.html)
 * [Prefetching](./prefetching.html)
 * [Prefetching](./prefetching.html)
-* [Controlling the S3A Directory Marker Behavior](directory_markers.html).
+* [S3A and Directory Markers](directory_markers.html).
 * [Auditing](./auditing.html).
 * [Auditing](./auditing.html).
 * [Committing work to S3 with the "S3A Committers"](./committers.html)
 * [Committing work to S3 with the "S3A Committers"](./committers.html)
 * [S3A Committers Architecture](./committer_architecture.html)
 * [S3A Committers Architecture](./committer_architecture.html)
@@ -77,7 +77,7 @@ and compatible implementations.
 
 
 * Directly reads and writes S3 objects.
 * Directly reads and writes S3 objects.
 * Compatible with standard S3 clients.
 * Compatible with standard S3 clients.
-* Compatible with files created by the older `s3n://` client and Amazon EMR's `s3://` client.
+* Compatible with files created by Amazon EMR's `s3://` client (EMRFS).
 * Supports partitioned uploads for many-GB objects.
 * Supports partitioned uploads for many-GB objects.
 * Offers a high-performance random IO mode for working with columnar data such
 * Offers a high-performance random IO mode for working with columnar data such
 as Apache ORC and Apache Parquet files.
 as Apache ORC and Apache Parquet files.

+ 1 - 2
hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/performance.md

@@ -110,8 +110,7 @@ it is by default, then the page size is limited to that defined in
   will, if the path references an object, cause that object to be deleted.
   will, if the path references an object, cause that object to be deleted.
 * If the path does not reference an object: the path will not be deleted
 * If the path does not reference an object: the path will not be deleted
   "This is for deleting objects, not directories"
   "This is for deleting objects, not directories"
-* No probes for the existence of parent directories will take place; no
-  parent directory markers will be created.
+* No probes for the existence of parent directories will take place.
   "If you need parent directories, call mkdir() yourself"
   "If you need parent directories, call mkdir() yourself"
 * The list of failed keys listed in the `DeleteObjectsResponse` response
 * The list of failed keys listed in the `DeleteObjectsResponse` response
   are converted into paths and returned along with their error messages.
   are converted into paths and returned along with their error messages.

+ 2 - 5
hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/s3guard.md

@@ -141,7 +141,7 @@ Options
 |----------------------|---------------------------------------------------------------------|
 |----------------------|---------------------------------------------------------------------|
 | `-fips`              | Require FIPS endopint to be in use                                  |
 | `-fips`              | Require FIPS endopint to be in use                                  |
 | `-magic`             | Require the S3 filesystem to be support the "magic" committer       |
 | `-magic`             | Require the S3 filesystem to be support the "magic" committer       |
-| `-markers`           | Directory marker status: `aware`, `keep`, `delete`, `authoritative` |
+| `-markers`           | Directory marker status: `aware`, `keep`                            |
 | `-encryption <type>` | Require a specific encryption algorithm                             |
 | `-encryption <type>` | Require a specific encryption algorithm                             |
 
 
 The server side encryption options are not directly related to S3Guard, but
 The server side encryption options are not directly related to S3Guard, but
@@ -172,10 +172,7 @@ S3A Committers
 Security
 Security
         Delegation token support is disabled
         Delegation token support is disabled
 
 
-Directory Markers
-        The directory marker policy is "keep"
-        Available Policies: delete, keep, authoritative
-        Authoritative paths: fs.s3a.authoritative.path=
+This version of Hadoop always retains directory markers
 
 
 ```
 ```
 
 

+ 0 - 52
hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/s3n.md

@@ -1,52 +0,0 @@
-<!---
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-
-# The S3N Client
-
-<!-- MACRO{toc|fromDepth=0|toDepth=5} -->
-
-S3N was a Hadoop filesystem client which can read or write data stored
-in Amazon S3. It uses URLs with the schema `s3n://`.
-
-- - -
-
-**Hadoop's S3N client for Amazon S3 has been superceded by
-the S3A connector**
-
-**Please upgrade to S3A for a supported, higher-performance S3 Client**
-
-- - -
-
-
-## <a name="migrating"></a> How to migrate to the S3A client
-
-1. Keep the `hadoop-aws` JAR on your classpath.
-
-1. Add the `aws-java-sdk-bundle.jar` JAR which Hadoop ships
-with to your classpath.
-
-1. Change the authentication keys
-
-    | old key | new key |
-    |---------|---------|
-    | `fs.s3n.awsAccessKeyId` | `fs.s3a.access.key` |
-    | `fs.s3n.awsSecretAccessKey` | `fs.s3a.secret.key` |
-
-    Do make sure the property names are correct. For S3A, they are
-    `fs.s3a.access.key` and `fs.s3a.secret.key` —you cannot just copy the S3N
-    properties and replace `s3n` with `s3a`.
-
-1. Replace URLs which began with `s3n://` with `s3a://`
-
-1. You may now remove the `jets3t` JAR, as it is no longer needed.

+ 0 - 47
hadoop-tools/hadoop-aws/src/site/markdown/tools/hadoop-aws/testing.md

@@ -337,53 +337,6 @@ then delete.
 Once a bucket is converted to being versioned, it cannot be converted back
 Once a bucket is converted to being versioned, it cannot be converted back
 to being unversioned.
 to being unversioned.
 
 
-
-## <a name="marker"></a> Testing Different Marker Retention Policy
-
-Hadoop supports [different policies for directory marker retention](directory_markers.html)
--essentially the classic "delete" and the higher-performance "keep" options; "authoritative"
-is just "keep" restricted to a part of the bucket.
-
-
-Example: test with `markers=keep`
-
-```
-mvn verify -Dparallel-tests -DtestsThreadCount=4 -Dmarkers=keep
-```
-
-This is the default and does not need to be explicitly set.
-
-Example: test with `markers=delete`
-
-```
-mvn verify -Dparallel-tests -DtestsThreadCount=4 -Dmarkers=delete
-```
-
-Example: test with `markers=authoritative`
-
-```
-mvn verify -Dparallel-tests -DtestsThreadCount=4 -Dmarkers=authoritative
-```
-
-This final option is of limited use unless paths in the bucket have actually been configured to be
-of mixed status; unless anything is set up then the outcome should equal that of "delete"
-
-### Enabling auditing of markers
-
-To enable an audit of the output directory of every test suite,
-enable the option `fs.s3a.directory.marker.audit`
-
-```
--Dfs.s3a.directory.marker.audit=true
-```
-
-When set, if the marker policy is to delete markers under the test output directory, then
-the marker tool audit command will be run. This will fail if a marker was found.
-
-This adds extra overhead to every operation, but helps verify that the connector is
-not keeping markers where it needs to be deleting them -and hence backwards compatibility
-is maintained.
-
 ## <a name="enabling-prefetch"></a> Enabling prefetch for all tests
 ## <a name="enabling-prefetch"></a> Enabling prefetch for all tests
 
 
 The tests are run with prefetch if the `prefetch` property is set in the
 The tests are run with prefetch if the `prefetch` property is set in the

+ 0 - 51
hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/AbstractS3ATestBase.java

@@ -25,7 +25,6 @@ import org.apache.hadoop.fs.contract.AbstractFSContract;
 import org.apache.hadoop.fs.contract.AbstractFSContractTestBase;
 import org.apache.hadoop.fs.contract.AbstractFSContractTestBase;
 import org.apache.hadoop.fs.contract.ContractTestUtils;
 import org.apache.hadoop.fs.contract.ContractTestUtils;
 import org.apache.hadoop.fs.contract.s3a.S3AContract;
 import org.apache.hadoop.fs.contract.s3a.S3AContract;
-import org.apache.hadoop.fs.s3a.tools.MarkerTool;
 import org.apache.hadoop.fs.statistics.IOStatisticsSnapshot;
 import org.apache.hadoop.fs.statistics.IOStatisticsSnapshot;
 import org.apache.hadoop.fs.statistics.IOStatisticsContext;
 import org.apache.hadoop.fs.statistics.IOStatisticsContext;
 import org.apache.hadoop.fs.store.audit.AuditSpan;
 import org.apache.hadoop.fs.store.audit.AuditSpan;
@@ -37,15 +36,11 @@ import org.junit.Assume;
 import org.slf4j.Logger;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.LoggerFactory;
 
 
-import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.IOException;
 import java.util.concurrent.atomic.AtomicReference;
 import java.util.concurrent.atomic.AtomicReference;
 
 
 import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset;
 import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset;
 import static org.apache.hadoop.fs.contract.ContractTestUtils.writeDataset;
 import static org.apache.hadoop.fs.contract.ContractTestUtils.writeDataset;
-import static org.apache.hadoop.fs.s3a.S3ATestUtils.getTestPropertyBool;
-import static org.apache.hadoop.fs.s3a.S3AUtils.E_FS_CLOSED;
-import static org.apache.hadoop.fs.s3a.tools.MarkerTool.UNLIMITED_LISTING;
 import static org.apache.hadoop.fs.statistics.IOStatisticsLogging.ioStatisticsToPrettyString;
 import static org.apache.hadoop.fs.statistics.IOStatisticsLogging.ioStatisticsToPrettyString;
 import static org.apache.hadoop.fs.statistics.IOStatisticsSupport.snapshotIOStatistics;
 import static org.apache.hadoop.fs.statistics.IOStatisticsSupport.snapshotIOStatistics;
 
 
@@ -119,8 +114,6 @@ public abstract class AbstractS3ATestBase extends AbstractFSContractTestBase
   public void teardown() throws Exception {
   public void teardown() throws Exception {
     Thread.currentThread().setName("teardown");
     Thread.currentThread().setName("teardown");
 
 
-    maybeAuditTestPath();
-
     super.teardown();
     super.teardown();
     if (getFileSystem() != null) {
     if (getFileSystem() != null) {
       FILESYSTEM_IOSTATS.aggregate(getFileSystem().getIOStatistics());
       FILESYSTEM_IOSTATS.aggregate(getFileSystem().getIOStatistics());
@@ -138,50 +131,6 @@ public abstract class AbstractS3ATestBase extends AbstractFSContractTestBase
         ioStatisticsToPrettyString(FILESYSTEM_IOSTATS));
         ioStatisticsToPrettyString(FILESYSTEM_IOSTATS));
   }
   }
 
 
-  /**
-   * Audit the FS under {@link #methodPath()} if
-   * the test option {@link #DIRECTORY_MARKER_AUDIT} is
-   * true.
-   */
-  public void maybeAuditTestPath() {
-    final S3AFileSystem fs = getFileSystem();
-    if (fs != null) {
-      try {
-        boolean audit = getTestPropertyBool(fs.getConf(),
-            DIRECTORY_MARKER_AUDIT, false);
-        Path methodPath = methodPath();
-        if (audit
-            && !fs.getDirectoryMarkerPolicy()
-            .keepDirectoryMarkers(methodPath)
-            && fs.isDirectory(methodPath)) {
-          MarkerTool.ScanResult result = MarkerTool.execMarkerTool(
-              new MarkerTool.ScanArgsBuilder()
-                  .withSourceFS(fs)
-                  .withPath(methodPath)
-                  .withDoPurge(true)
-                  .withMinMarkerCount(0)
-                  .withMaxMarkerCount(0)
-                  .withLimit(UNLIMITED_LISTING)
-                  .withNonAuth(false)
-                  .build());
-          final String resultStr = result.toString();
-          assertEquals("Audit of " + methodPath + " failed: "
-                  + resultStr,
-              0, result.getExitCode());
-          assertEquals("Marker Count under " + methodPath
-                  + " non-zero: " + resultStr,
-              0, result.getFilteredMarkerCount());
-        }
-      } catch (FileNotFoundException ignored) {
-      } catch (Exception e) {
-        // If is this is not due to the FS being closed: log.
-        if (!e.toString().contains(E_FS_CLOSED)) {
-          LOG.warn("Marker Tool Failure", e);
-        }
-      }
-    }
-  }
-
   @Override
   @Override
   protected int getTestTimeoutMillis() {
   protected int getTestTimeoutMillis() {
     return S3A_TEST_TIMEOUT;
     return S3A_TEST_TIMEOUT;

+ 0 - 5
hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestDowngradeSyncable.java

@@ -42,11 +42,6 @@ public class ITestDowngradeSyncable extends AbstractS3ACostTest {
   protected static final Logger LOG =
   protected static final Logger LOG =
       LoggerFactory.getLogger(ITestDowngradeSyncable.class);
       LoggerFactory.getLogger(ITestDowngradeSyncable.class);
 
 
-
-  public ITestDowngradeSyncable() {
-    super(true);
-  }
-
   @Override
   @Override
   public Configuration createConfiguration() {
   public Configuration createConfiguration() {
     final Configuration conf = super.createConfiguration();
     final Configuration conf = super.createConfiguration();

+ 1 - 1
hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AClientSideEncryption.java

@@ -336,7 +336,7 @@ public abstract class ITestS3AClientSideEncryption extends AbstractS3ATestBase {
         putObjectRequestBuilder.contentLength(Long.parseLong(String.valueOf(SMALL_FILE_SIZE)));
         putObjectRequestBuilder.contentLength(Long.parseLong(String.valueOf(SMALL_FILE_SIZE)));
         putObjectRequestBuilder.metadata(metadata);
         putObjectRequestBuilder.metadata(metadata);
         fs.putObjectDirect(putObjectRequestBuilder.build(),
         fs.putObjectDirect(putObjectRequestBuilder.build(),
-            PutObjectOptions.deletingDirs(),
+            PutObjectOptions.defaultOptions(),
             new S3ADataBlocks.BlockUploadData(new byte[SMALL_FILE_SIZE], null),
             new S3ADataBlocks.BlockUploadData(new byte[SMALL_FILE_SIZE], null),
             null);
             null);
 
 

+ 3 - 28
hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AFileOperationCost.java

@@ -21,12 +21,11 @@ package org.apache.hadoop.fs.s3a;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.s3a.api.PerformanceFlagEnum;
 import org.apache.hadoop.fs.s3a.impl.StatusProbeEnum;
 import org.apache.hadoop.fs.s3a.impl.StatusProbeEnum;
 import org.apache.hadoop.fs.s3a.performance.AbstractS3ACostTest;
 import org.apache.hadoop.fs.s3a.performance.AbstractS3ACostTest;
 
 
 import org.junit.Test;
 import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
 import org.assertj.core.api.Assertions;
 import org.assertj.core.api.Assertions;
 import org.slf4j.Logger;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.LoggerFactory;
@@ -34,8 +33,6 @@ import org.slf4j.LoggerFactory;
 import java.io.File;
 import java.io.File;
 import java.io.FileNotFoundException;
 import java.io.FileNotFoundException;
 import java.net.URI;
 import java.net.URI;
-import java.util.Arrays;
-import java.util.Collection;
 import java.util.EnumSet;
 import java.util.EnumSet;
 
 
 
 
@@ -48,39 +45,17 @@ import static org.apache.hadoop.test.LambdaTestUtils.intercept;
 
 
 /**
 /**
  * Use metrics to assert about the cost of file API calls.
  * Use metrics to assert about the cost of file API calls.
- * Parameterized on directory marker keep vs delete.
- * When the FS is instantiated with creation performance, things
- * behave differently...its value is that of the marker keep flag,
- * so deletion costs are the same.
  */
  */
-@RunWith(Parameterized.class)
 public class ITestS3AFileOperationCost extends AbstractS3ACostTest {
 public class ITestS3AFileOperationCost extends AbstractS3ACostTest {
 
 
   private static final Logger LOG =
   private static final Logger LOG =
       LoggerFactory.getLogger(ITestS3AFileOperationCost.class);
       LoggerFactory.getLogger(ITestS3AFileOperationCost.class);
 
 
-  /**
-   * Parameterization.
-   */
-  @Parameterized.Parameters(name = "{0}")
-  public static Collection<Object[]> params() {
-    return Arrays.asList(new Object[][]{
-        {"keep-markers", true},
-        {"delete-markers", false},
-    });
-  }
-
-  public ITestS3AFileOperationCost(
-      final String name,
-      final boolean keepMarkers) {
-    super(keepMarkers);
-  }
-
   @Override
   @Override
   public Configuration createConfiguration() {
   public Configuration createConfiguration() {
     return setPerformanceFlags(
     return setPerformanceFlags(
         super.createConfiguration(),
         super.createConfiguration(),
-        isKeepingMarkers() ? "create" : "");
+        PerformanceFlagEnum.Create.toString());
   }
   }
 
 
   /**
   /**
@@ -132,7 +107,7 @@ public class ITestS3AFileOperationCost extends AbstractS3ACostTest {
 
 
   @Test
   @Test
   public void testCostOfListFilesOnEmptyDir() throws Throwable {
   public void testCostOfListFilesOnEmptyDir() throws Throwable {
-    describe("Perpforming listFiles() on an empty dir with marker");
+    describe("Performing listFiles() on an empty dir with marker");
     // this attem
     // this attem
     Path dir = path(getMethodName());
     Path dir = path(getMethodName());
     S3AFileSystem fs = getFileSystem();
     S3AFileSystem fs = getFileSystem();

+ 1 - 1
hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AMiscOperations.java

@@ -112,7 +112,7 @@ public class ITestS3AMiscOperations extends AbstractS3ATestBase {
       LambdaTestUtils.intercept(IllegalStateException.class,
       LambdaTestUtils.intercept(IllegalStateException.class,
           () -> fs.putObjectDirect(
           () -> fs.putObjectDirect(
               putObjectRequestBuilder.build(),
               putObjectRequestBuilder.build(),
-              PutObjectOptions.keepingDirs(),
+              PutObjectOptions.defaultOptions(),
               new S3ADataBlocks.BlockUploadData("PUT".getBytes(), null),
               new S3ADataBlocks.BlockUploadData("PUT".getBytes(), null),
               null));
               null));
       assertPathDoesNotExist("put object was created", path);
       assertPathDoesNotExist("put object was created", path);

+ 0 - 4
hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3APrefetchingCacheFiles.java

@@ -68,10 +68,6 @@ public class ITestS3APrefetchingCacheFiles extends AbstractS3ACostTest {
 
 
   private String bufferDir;
   private String bufferDir;
 
 
-  public ITestS3APrefetchingCacheFiles() {
-    super(true);
-  }
-
   @Before
   @Before
   public void setUp() throws Exception {
   public void setUp() throws Exception {
     super.setup();
     super.setup();

+ 0 - 4
hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3APrefetchingInputStream.java

@@ -53,10 +53,6 @@ import static org.apache.hadoop.fs.statistics.StreamStatisticNames.STREAM_READ_P
  */
  */
 public class ITestS3APrefetchingInputStream extends AbstractS3ACostTest {
 public class ITestS3APrefetchingInputStream extends AbstractS3ACostTest {
 
 
-  public ITestS3APrefetchingInputStream() {
-    super(true);
-  }
-
   private static final Logger LOG =
   private static final Logger LOG =
       LoggerFactory.getLogger(ITestS3APrefetchingInputStream.class);
       LoggerFactory.getLogger(ITestS3APrefetchingInputStream.class);
 
 

+ 0 - 1
hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3APrefetchingLruEviction.java

@@ -69,7 +69,6 @@ public class ITestS3APrefetchingLruEviction extends AbstractS3ACostTest {
   }
   }
 
 
   public ITestS3APrefetchingLruEviction(final String maxBlocks) {
   public ITestS3APrefetchingLruEviction(final String maxBlocks) {
-    super(true);
     this.maxBlocks = maxBlocks;
     this.maxBlocks = maxBlocks;
   }
   }
 
 

+ 0 - 8
hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/MockS3AFileSystem.java

@@ -41,7 +41,6 @@ import org.apache.hadoop.fs.s3a.audit.AuditTestSupport;
 import org.apache.hadoop.fs.s3a.auth.delegation.EncryptionSecrets;
 import org.apache.hadoop.fs.s3a.auth.delegation.EncryptionSecrets;
 import org.apache.hadoop.fs.s3a.commit.staging.StagingTestBase;
 import org.apache.hadoop.fs.s3a.commit.staging.StagingTestBase;
 import org.apache.hadoop.fs.s3a.impl.ClientManager;
 import org.apache.hadoop.fs.s3a.impl.ClientManager;
-import org.apache.hadoop.fs.s3a.impl.PutObjectOptions;
 import org.apache.hadoop.fs.s3a.impl.RequestFactoryImpl;
 import org.apache.hadoop.fs.s3a.impl.RequestFactoryImpl;
 import org.apache.hadoop.fs.s3a.impl.StoreContext;
 import org.apache.hadoop.fs.s3a.impl.StoreContext;
 import org.apache.hadoop.fs.s3a.impl.StoreContextBuilder;
 import org.apache.hadoop.fs.s3a.impl.StoreContextBuilder;
@@ -235,13 +234,6 @@ public class MockS3AFileSystem extends S3AFileSystem {
     return mock.exists(f);
     return mock.exists(f);
   }
   }
 
 
-  @Override
-  void finishedWrite(String key,
-      long length,
-      final PutObjectOptions putOptions) {
-
-  }
-
   @Override
   @Override
   public FSDataInputStream open(Path f, int bufferSize) throws IOException {
   public FSDataInputStream open(Path f, int bufferSize) throws IOException {
     event("open(%s)", f);
     event("open(%s)", f);

+ 1 - 1
hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/MultipartTestUtils.java

@@ -90,7 +90,7 @@ public final class MultipartTestUtils {
       WriteOperationHelper writeHelper = fs.getWriteOperationHelper();
       WriteOperationHelper writeHelper = fs.getWriteOperationHelper();
       byte[] data = dataset(len, 'a', 'z');
       byte[] data = dataset(len, 'a', 'z');
       InputStream in = new ByteArrayInputStream(data);
       InputStream in = new ByteArrayInputStream(data);
-      String uploadId = writeHelper.initiateMultiPartUpload(key, PutObjectOptions.keepingDirs());
+      String uploadId = writeHelper.initiateMultiPartUpload(key, PutObjectOptions.defaultOptions());
       UploadPartRequest req = writeHelper.newUploadPartRequestBuilder(key, uploadId,
       UploadPartRequest req = writeHelper.newUploadPartRequestBuilder(key, uploadId,
           partNo, true, len).build();
           partNo, true, len).build();
       RequestBody body = RequestBody.fromInputStream(in, len);
       RequestBody body = RequestBody.fromInputStream(in, len);

+ 0 - 6
hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestConstants.java

@@ -246,12 +246,6 @@ public interface S3ATestConstants {
   Duration TEST_SESSION_TOKEN_DURATION = Duration.ofSeconds(
   Duration TEST_SESSION_TOKEN_DURATION = Duration.ofSeconds(
       TEST_SESSION_TOKEN_DURATION_SECONDS);
       TEST_SESSION_TOKEN_DURATION_SECONDS);
 
 
-  /**
-   * Test option to enable audits of the method path after
-   * every test case.
-   */
-  String DIRECTORY_MARKER_AUDIT = "fs.s3a.directory.marker.audit";
-
   /**
   /**
    * Constant bytes being written when Client side encryption KMS is enabled
    * Constant bytes being written when Client side encryption KMS is enabled
    * for a test. This bytes written takes into account "EncryptionContext",
    * for a test. This bytes written takes into account "EncryptionContext",

+ 1 - 9
hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java

@@ -722,13 +722,6 @@ public final class S3ATestUtils {
     }
     }
     conf.set(BUFFER_DIR, tmpDir);
     conf.set(BUFFER_DIR, tmpDir);
 
 
-    // directory marker policy
-    String directoryRetention = getTestProperty(
-        conf,
-        DIRECTORY_MARKER_POLICY,
-        DEFAULT_DIRECTORY_MARKER_POLICY);
-    conf.set(DIRECTORY_MARKER_POLICY, directoryRetention);
-
     boolean prefetchEnabled =
     boolean prefetchEnabled =
         getTestPropertyBool(conf, PREFETCH_ENABLED_KEY, PREFETCH_ENABLED_DEFAULT);
         getTestPropertyBool(conf, PREFETCH_ENABLED_KEY, PREFETCH_ENABLED_DEFAULT);
     conf.setBoolean(PREFETCH_ENABLED_KEY, prefetchEnabled);
     conf.setBoolean(PREFETCH_ENABLED_KEY, prefetchEnabled);
@@ -1088,8 +1081,7 @@ public final class S3ATestUtils {
     List<CompletableFuture<Path>> futures = new ArrayList<>(paths.size()
     List<CompletableFuture<Path>> futures = new ArrayList<>(paths.size()
         + dirs.size());
         + dirs.size());
 
 
-    // create directories. With dir marker retention, that adds more entries
-    // to cause deletion issues
+    // create directories.
     try (DurationInfo ignore =
     try (DurationInfo ignore =
              new DurationInfo(LOG, "Creating %d directories", dirs.size())) {
              new DurationInfo(LOG, "Creating %d directories", dirs.size())) {
       for (Path path : dirs) {
       for (Path path : dirs) {

+ 1 - 1
hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestS3ABlockOutputStream.java

@@ -68,7 +68,7 @@ public class TestS3ABlockOutputStream extends AbstractS3AMockTest {
             .withProgress(progressable)
             .withProgress(progressable)
             .withPutTracker(putTracker)
             .withPutTracker(putTracker)
             .withWriteOperations(oHelper)
             .withWriteOperations(oHelper)
-            .withPutOptions(PutObjectOptions.keepingDirs())
+            .withPutOptions(PutObjectOptions.defaultOptions())
             .withIOStatisticsAggregator(
             .withIOStatisticsAggregator(
                 IOStatisticsContext.getCurrentIOStatisticsContext()
                 IOStatisticsContext.getCurrentIOStatisticsContext()
                     .getAggregator());
                     .getAggregator());

+ 0 - 4
hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/audit/ITestAuditAccessChecks.java

@@ -59,10 +59,6 @@ public class ITestAuditAccessChecks extends AbstractS3ACostTest {
 
 
   private AccessCheckingAuditor auditor;
   private AccessCheckingAuditor auditor;
 
 
-  public ITestAuditAccessChecks() {
-    super(true);
-  }
-
   @Override
   @Override
   public Configuration createConfiguration() {
   public Configuration createConfiguration() {
     Configuration conf = super.createConfiguration();
     Configuration conf = super.createConfiguration();

+ 0 - 4
hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/audit/ITestAuditManager.java

@@ -49,10 +49,6 @@ import static org.apache.hadoop.test.LambdaTestUtils.intercept;
  */
  */
 public class ITestAuditManager extends AbstractS3ACostTest {
 public class ITestAuditManager extends AbstractS3ACostTest {
 
 
-  public ITestAuditManager() {
-    super(true);
-  }
-
   @Override
   @Override
   public Configuration createConfiguration() {
   public Configuration createConfiguration() {
     Configuration conf = super.createConfiguration();
     Configuration conf = super.createConfiguration();

+ 0 - 4
hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/audit/ITestAuditManagerDisabled.java

@@ -35,10 +35,6 @@ import static org.apache.hadoop.fs.s3a.audit.S3AAuditConstants.AUDIT_ENABLED;
  */
  */
 public class ITestAuditManagerDisabled extends AbstractS3ACostTest {
 public class ITestAuditManagerDisabled extends AbstractS3ACostTest {
 
 
-  public ITestAuditManagerDisabled() {
-    super(true);
-  }
-
   @Override
   @Override
   public Configuration createConfiguration() {
   public Configuration createConfiguration() {
     Configuration conf = super.createConfiguration();
     Configuration conf = super.createConfiguration();

+ 4 - 41
hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/ITestCommitOperationCost.java

@@ -44,7 +44,6 @@ import static org.apache.hadoop.fs.s3a.Statistic.ACTION_HTTP_GET_REQUEST;
 import static org.apache.hadoop.fs.s3a.Statistic.COMMITTER_MAGIC_FILES_CREATED;
 import static org.apache.hadoop.fs.s3a.Statistic.COMMITTER_MAGIC_FILES_CREATED;
 import static org.apache.hadoop.fs.s3a.Statistic.COMMITTER_MAGIC_MARKER_PUT;
 import static org.apache.hadoop.fs.s3a.Statistic.COMMITTER_MAGIC_MARKER_PUT;
 import static org.apache.hadoop.fs.s3a.Statistic.DIRECTORIES_CREATED;
 import static org.apache.hadoop.fs.s3a.Statistic.DIRECTORIES_CREATED;
-import static org.apache.hadoop.fs.s3a.Statistic.FAKE_DIRECTORIES_DELETED;
 import static org.apache.hadoop.fs.s3a.Statistic.OBJECT_BULK_DELETE_REQUEST;
 import static org.apache.hadoop.fs.s3a.Statistic.OBJECT_BULK_DELETE_REQUEST;
 import static org.apache.hadoop.fs.s3a.Statistic.OBJECT_DELETE_REQUEST;
 import static org.apache.hadoop.fs.s3a.Statistic.OBJECT_DELETE_REQUEST;
 import static org.apache.hadoop.fs.s3a.Statistic.OBJECT_LIST_REQUEST;
 import static org.apache.hadoop.fs.s3a.Statistic.OBJECT_LIST_REQUEST;
@@ -64,8 +63,6 @@ import static org.apache.hadoop.util.functional.RemoteIterators.toList;
 /**
 /**
  * Assert cost of commit operations;
  * Assert cost of commit operations;
  * <ol>
  * <ol>
- *   <li>Even on marker deleting filesystems,
- *       operations under magic dirs do not trigger marker deletion.</li>
  *   <li>Loading pending files from FileStatus entries skips HEAD checks.</li>
  *   <li>Loading pending files from FileStatus entries skips HEAD checks.</li>
  *   <li>Mkdir under magic dirs doesn't check ancestor or dest type</li>
  *   <li>Mkdir under magic dirs doesn't check ancestor or dest type</li>
  * </ol>
  * </ol>
@@ -80,13 +77,6 @@ public class ITestCommitOperationCost extends AbstractS3ACostTest {
    */
    */
   private CommitterTestHelper testHelper;
   private CommitterTestHelper testHelper;
 
 
-  /**
-   * Create with markers kept, always.
-   */
-  public ITestCommitOperationCost() {
-    super(false);
-  }
-
   @Override
   @Override
   public void setup() throws Exception {
   public void setup() throws Exception {
     super.setup();
     super.setup();
@@ -122,37 +112,12 @@ public class ITestCommitOperationCost extends AbstractS3ACostTest {
     return ioStatisticsToPrettyString(getFileSystem().getIOStatistics());
     return ioStatisticsToPrettyString(getFileSystem().getIOStatistics());
   }
   }
 
 
-  @Test
-  public void testMagicMkdir() throws Throwable {
-    describe("Mkdirs 'MAGIC PATH' always skips dir marker deletion");
-    S3AFileSystem fs = getFileSystem();
-    Path baseDir = methodPath();
-    // create dest dir marker, always
-    fs.mkdirs(baseDir);
-    Path magicDir = new Path(baseDir, MAGIC_PATH_PREFIX + JOB_ID);
-    verifyMetrics(() -> {
-      fs.mkdirs(magicDir);
-      return fileSystemIOStats();
-    },
-        with(OBJECT_BULK_DELETE_REQUEST, 0),
-        with(OBJECT_DELETE_REQUEST, 0),
-        with(DIRECTORIES_CREATED, 1));
-    verifyMetrics(() -> {
-      fs.delete(magicDir, true);
-      return fileSystemIOStats();
-    },
-        with(OBJECT_BULK_DELETE_REQUEST, 0),
-        with(OBJECT_DELETE_REQUEST, 1),
-        with(DIRECTORIES_CREATED, 0));
-    assertPathExists("parent", baseDir);
-  }
-
   /**
   /**
    * When a magic subdir is deleted, parent dirs are not recreated.
    * When a magic subdir is deleted, parent dirs are not recreated.
    */
    */
   @Test
   @Test
   public void testMagicMkdirs() throws Throwable {
   public void testMagicMkdirs() throws Throwable {
-    describe("Mkdirs __magic_job-<jobId>/subdir always skips dir marker deletion");
+    describe("Mkdirs __magic_job-<jobId>/subdir always skips dir marker recreation");
     S3AFileSystem fs = getFileSystem();
     S3AFileSystem fs = getFileSystem();
     Path baseDir = methodPath();
     Path baseDir = methodPath();
     Path magicDir = new Path(baseDir, MAGIC_PATH_PREFIX + JOB_ID);
     Path magicDir = new Path(baseDir, MAGIC_PATH_PREFIX + JOB_ID);
@@ -202,7 +167,7 @@ public class ITestCommitOperationCost extends AbstractS3ACostTest {
 
 
   @Test
   @Test
   public void testCostOfCreatingMagicFile() throws Throwable {
   public void testCostOfCreatingMagicFile() throws Throwable {
-    describe("Files created under magic paths skip existence checks and marker deletes");
+    describe("Files created under magic paths skip existence checks");
     S3AFileSystem fs = getFileSystem();
     S3AFileSystem fs = getFileSystem();
     Path destFile = methodSubPath("file.txt");
     Path destFile = methodSubPath("file.txt");
     fs.delete(destFile.getParent(), true);
     fs.delete(destFile.getParent(), true);
@@ -223,9 +188,8 @@ public class ITestCommitOperationCost extends AbstractS3ACostTest {
 
 
       stream.write("hello".getBytes(StandardCharsets.UTF_8));
       stream.write("hello".getBytes(StandardCharsets.UTF_8));
 
 
-      // when closing, there will be no directories deleted
-      // we do expect two PUT requests, because the marker and manifests
-      // are both written
+      // when closing, we expect two PUT requests,
+      // because the marker and manifests are both written
       LOG.info("closing magic stream to {}", magicDest);
       LOG.info("closing magic stream to {}", magicDest);
       verifyMetrics(() -> {
       verifyMetrics(() -> {
         stream.close();
         stream.close();
@@ -266,7 +230,6 @@ public class ITestCommitOperationCost extends AbstractS3ACostTest {
           commitOperations.getIOStatistics());
           commitOperations.getIOStatistics());
     },
     },
         always(NO_HEAD_OR_LIST),  // no probes for the dest path
         always(NO_HEAD_OR_LIST),  // no probes for the dest path
-        with(FAKE_DIRECTORIES_DELETED, 0),  // no fake dirs
         with(OBJECT_DELETE_REQUEST, 0)); // no deletes
         with(OBJECT_DELETE_REQUEST, 0)); // no deletes
 
 
     LOG.info("Final Statistics {}",
     LOG.info("Final Statistics {}",

+ 0 - 4
hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestPartialRenamesDeletes.java

@@ -307,13 +307,9 @@ public class ITestPartialRenamesDeletes extends AbstractS3ATestBase {
     removeBucketOverrides(bucketName, conf,
     removeBucketOverrides(bucketName, conf,
         MAX_THREADS,
         MAX_THREADS,
         MAXIMUM_CONNECTIONS,
         MAXIMUM_CONNECTIONS,
-        DIRECTORY_MARKER_POLICY,
         BULK_DELETE_PAGE_SIZE);
         BULK_DELETE_PAGE_SIZE);
     conf.setInt(MAX_THREADS, EXECUTOR_THREAD_COUNT);
     conf.setInt(MAX_THREADS, EXECUTOR_THREAD_COUNT);
     conf.setInt(MAXIMUM_CONNECTIONS, EXECUTOR_THREAD_COUNT * 2);
     conf.setInt(MAXIMUM_CONNECTIONS, EXECUTOR_THREAD_COUNT * 2);
-    // use the keep policy to ensure that surplus markers exist
-    // to complicate failures
-    conf.set(DIRECTORY_MARKER_POLICY, DIRECTORY_MARKER_POLICY_KEEP);
     // set the delete page size to its maximum to ensure that all
     // set the delete page size to its maximum to ensure that all
     // entries are included in the same large delete, even on
     // entries are included in the same large delete, even on
     // scale runs. This is needed for assertions on the result.
     // scale runs. This is needed for assertions on the result.

+ 1 - 22
hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestRenameDeleteRace.java

@@ -23,25 +23,18 @@ import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.Semaphore;
 import java.util.concurrent.Semaphore;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeUnit;
 
 
-import software.amazon.awssdk.core.exception.SdkException;
-
 import org.assertj.core.api.Assertions;
 import org.assertj.core.api.Assertions;
 import org.junit.Test;
 import org.junit.Test;
 import org.slf4j.Logger;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.LoggerFactory;
+import software.amazon.awssdk.core.exception.SdkException;
 
 
-
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.contract.ContractTestUtils;
 import org.apache.hadoop.fs.contract.ContractTestUtils;
 import org.apache.hadoop.fs.s3a.AbstractS3ATestBase;
 import org.apache.hadoop.fs.s3a.AbstractS3ATestBase;
 import org.apache.hadoop.fs.s3a.S3AFileSystem;
 import org.apache.hadoop.fs.s3a.S3AFileSystem;
 import org.apache.hadoop.util.BlockingThreadPoolExecutorService;
 import org.apache.hadoop.util.BlockingThreadPoolExecutorService;
 
 
-import static org.apache.hadoop.fs.s3a.Constants.DIRECTORY_MARKER_POLICY;
-import static org.apache.hadoop.fs.s3a.Constants.DIRECTORY_MARKER_POLICY_DELETE;
-import static org.apache.hadoop.fs.s3a.S3ATestUtils.getTestBucketName;
-import static org.apache.hadoop.fs.s3a.S3ATestUtils.removeBaseAndBucketOverrides;
 import static org.apache.hadoop.fs.s3a.impl.CallableSupplier.submit;
 import static org.apache.hadoop.fs.s3a.impl.CallableSupplier.submit;
 import static org.apache.hadoop.fs.s3a.impl.CallableSupplier.waitForCompletion;
 import static org.apache.hadoop.fs.s3a.impl.CallableSupplier.waitForCompletion;
 import static org.apache.hadoop.io.IOUtils.cleanupWithLogger;
 import static org.apache.hadoop.io.IOUtils.cleanupWithLogger;
@@ -72,20 +65,6 @@ public class ITestRenameDeleteRace extends AbstractS3ATestBase {
           30, TimeUnit.SECONDS,
           30, TimeUnit.SECONDS,
           "test-operations");
           "test-operations");
 
 
-  @Override
-  protected Configuration createConfiguration() {
-    Configuration conf = super.createConfiguration();
-
-    // use the keep policy to ensure that surplus markers exist
-    // to complicate failures
-    conf.set(DIRECTORY_MARKER_POLICY, DIRECTORY_MARKER_POLICY_DELETE);
-    removeBaseAndBucketOverrides(getTestBucketName(conf),
-        conf,
-        DIRECTORY_MARKER_POLICY);
-
-    return conf;
-  }
-
   /**
   /**
    * This test uses a subclass of S3AFileSystem to recreate the race between
    * This test uses a subclass of S3AFileSystem to recreate the race between
    * subdirectory delete and rename.
    * subdirectory delete and rename.

+ 0 - 4
hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/ITestXAttrCost.java

@@ -58,10 +58,6 @@ public class ITestXAttrCost extends AbstractS3ACostTest {
   private static final int GET_METADATA_ON_OBJECT = 1;
   private static final int GET_METADATA_ON_OBJECT = 1;
   private static final int GET_METADATA_ON_DIR = GET_METADATA_ON_OBJECT * 2;
   private static final int GET_METADATA_ON_DIR = GET_METADATA_ON_OBJECT * 2;
 
 
-  public ITestXAttrCost() {
-    super(true);
-  }
-
   @Test
   @Test
   public void testXAttrRoot() throws Throwable {
   public void testXAttrRoot() throws Throwable {
     describe("Test xattr on root");
     describe("Test xattr on root");

+ 0 - 163
hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/TestDirectoryMarkerPolicy.java

@@ -1,163 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.s3a.impl;
-
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.function.Predicate;
-
-import org.assertj.core.api.Assertions;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.test.AbstractHadoopTestBase;
-
-import static org.apache.hadoop.fs.s3a.Constants.STORE_CAPABILITY_DIRECTORY_MARKER_ACTION_DELETE;
-import static org.apache.hadoop.fs.s3a.Constants.STORE_CAPABILITY_DIRECTORY_MARKER_ACTION_KEEP;
-
-/**
- * Unit tests for directory marker policies.
- */
-@RunWith(Parameterized.class)
-public class TestDirectoryMarkerPolicy extends AbstractHadoopTestBase {
-
-  @Parameterized.Parameters(name = "{0}")
-  public static Collection<Object[]> data() {
-    return Arrays.asList(new Object[][]{
-        {
-            DirectoryPolicy.MarkerPolicy.Delete,
-            FAIL_IF_INVOKED,
-            false, false
-        },
-        {
-            DirectoryPolicy.MarkerPolicy.Keep,
-            FAIL_IF_INVOKED,
-            true, true
-        },
-        {
-            DirectoryPolicy.MarkerPolicy.Authoritative,
-            AUTH_PATH_ONLY,
-            false, true
-        }
-    });
-  }
-
-  private final DirectoryPolicy directoryPolicy;
-
-  private final boolean expectNonAuthDelete;
-
-  private final boolean expectAuthDelete;
-
-  public TestDirectoryMarkerPolicy(
-      final DirectoryPolicy.MarkerPolicy markerPolicy,
-      final Predicate<Path> authoritativeness,
-      final boolean expectNonAuthDelete,
-      final boolean expectAuthDelete) {
-    this.directoryPolicy = newPolicy(markerPolicy, authoritativeness);
-    this.expectNonAuthDelete = expectNonAuthDelete;
-    this.expectAuthDelete = expectAuthDelete;
-  }
-
-  /**
-   * Create a new retention policy.
-   * @param markerPolicy policy option
-   * @param authoritativeness predicate for determining if
-   * a path is authoritative.
-   * @return the retention policy.
-   */
-  private DirectoryPolicy newPolicy(
-      DirectoryPolicy.MarkerPolicy markerPolicy,
-      Predicate<Path> authoritativeness) {
-    return new DirectoryPolicyImpl(markerPolicy, authoritativeness);
-  }
-
-  private static final Predicate<Path> AUTH_PATH_ONLY =
-      (p) -> p.toUri().getPath().startsWith("/auth/");
-
-  private static final Predicate<Path> FAIL_IF_INVOKED = (p) -> {
-    throw new RuntimeException("failed");
-  };
-
-  private final Path nonAuthPath = new Path("s3a://bucket/nonauth/data");
-
-  private final Path authPath = new Path("s3a://bucket/auth/data1");
-
-  private final Path deepAuth = new Path("s3a://bucket/auth/d1/d2/data2");
-
-  /**
-   * Assert that a path has a retention outcome.
-   * @param path path
-   * @param retain should the marker be retained
-   */
-  private void assertMarkerRetention(Path path, boolean retain) {
-    Assertions.assertThat(directoryPolicy.keepDirectoryMarkers(path))
-        .describedAs("Retention of path %s by %s", path, directoryPolicy)
-        .isEqualTo(retain);
-  }
-
-  /**
-   * Assert that a path has a capability.
-   */
-  private void assertPathCapability(Path path,
-      String capability,
-      boolean outcome) {
-    Assertions.assertThat(directoryPolicy)
-        .describedAs("%s support for capability %s by path %s"
-                + " expected as %s",
-            directoryPolicy, capability, path, outcome)
-        .matches(p -> p.hasPathCapability(path, capability) == outcome,
-            "pathCapability");
-  }
-
-  @Test
-  public void testNonAuthPath() throws Throwable {
-    assertMarkerRetention(nonAuthPath, expectNonAuthDelete);
-    assertPathCapability(nonAuthPath,
-        STORE_CAPABILITY_DIRECTORY_MARKER_ACTION_DELETE,
-        !expectNonAuthDelete);
-    assertPathCapability(nonAuthPath,
-        STORE_CAPABILITY_DIRECTORY_MARKER_ACTION_KEEP,
-        expectNonAuthDelete);
-  }
-
-  @Test
-  public void testAuthPath() throws Throwable {
-    assertMarkerRetention(authPath, expectAuthDelete);
-    assertPathCapability(authPath,
-        STORE_CAPABILITY_DIRECTORY_MARKER_ACTION_DELETE,
-        !expectAuthDelete);
-    assertPathCapability(authPath,
-        STORE_CAPABILITY_DIRECTORY_MARKER_ACTION_KEEP,
-        expectAuthDelete);
-  }
-
-  @Test
-  public void testDeepAuthPath() throws Throwable {
-    assertMarkerRetention(deepAuth, expectAuthDelete);
-    assertPathCapability(deepAuth,
-        STORE_CAPABILITY_DIRECTORY_MARKER_ACTION_DELETE,
-        !expectAuthDelete);
-    assertPathCapability(deepAuth,
-        STORE_CAPABILITY_DIRECTORY_MARKER_ACTION_KEEP,
-        expectAuthDelete);
-  }
-
-}

+ 2 - 4
hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/TestRequestFactory.java

@@ -179,9 +179,7 @@ public class TestRequestFactory extends AbstractHadoopTestBase {
     a(factory.newListObjectsV2RequestBuilder(path, "/", 1));
     a(factory.newListObjectsV2RequestBuilder(path, "/", 1));
     a(factory.newMultipartUploadRequestBuilder(path, null));
     a(factory.newMultipartUploadRequestBuilder(path, null));
     a(factory.newPutObjectRequestBuilder(path,
     a(factory.newPutObjectRequestBuilder(path,
-        PutObjectOptions.keepingDirs(), -1, true));
-    a(factory.newPutObjectRequestBuilder(path,
-        PutObjectOptions.deletingDirs(), 1024, false));
+        PutObjectOptions.defaultOptions(), -1, true));
   }
   }
 
 
   /**
   /**
@@ -265,7 +263,7 @@ public class TestRequestFactory extends AbstractHadoopTestBase {
 
 
     // A simple PUT
     // A simple PUT
     final PutObjectRequest put = factory.newPutObjectRequestBuilder(path,
     final PutObjectRequest put = factory.newPutObjectRequestBuilder(path,
-        PutObjectOptions.deletingDirs(), 1024, false).build();
+        PutObjectOptions.defaultOptions(), 1024, false).build();
     assertApiTimeouts(partDuration, put);
     assertApiTimeouts(partDuration, put);
 
 
     // multipart part
     // multipart part

+ 1 - 90
hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/AbstractS3ACostTest.java

@@ -36,7 +36,6 @@ import org.apache.hadoop.fs.s3a.S3AFileStatus;
 import org.apache.hadoop.fs.s3a.S3AFileSystem;
 import org.apache.hadoop.fs.s3a.S3AFileSystem;
 import org.apache.hadoop.fs.s3a.Statistic;
 import org.apache.hadoop.fs.s3a.Statistic;
 import org.apache.hadoop.fs.s3a.Tristate;
 import org.apache.hadoop.fs.s3a.Tristate;
-import org.apache.hadoop.fs.s3a.impl.DirectoryPolicy;
 import org.apache.hadoop.fs.s3a.impl.InternalConstants;
 import org.apache.hadoop.fs.s3a.impl.InternalConstants;
 import org.apache.hadoop.fs.s3a.impl.StatusProbeEnum;
 import org.apache.hadoop.fs.s3a.impl.StatusProbeEnum;
 import org.apache.hadoop.fs.s3a.statistics.StatisticTypeEnum;
 import org.apache.hadoop.fs.s3a.statistics.StatisticTypeEnum;
@@ -58,15 +57,6 @@ import static org.apache.hadoop.test.AssertExtensions.dynamicDescription;
  */
  */
 public class AbstractS3ACostTest extends AbstractS3ATestBase {
 public class AbstractS3ACostTest extends AbstractS3ATestBase {
 
 
-  /**
-   * Parameter: should directory markers be retained?
-   */
-  private final boolean keepMarkers;
-
-  private boolean isKeeping;
-
-  private boolean isDeleting;
-
   private OperationCostValidator costValidator;
   private OperationCostValidator costValidator;
 
 
   /**
   /**
@@ -84,18 +74,8 @@ public class AbstractS3ACostTest extends AbstractS3ATestBase {
 
 
   /**
   /**
    * Constructor for parameterized tests.
    * Constructor for parameterized tests.
-   * @param keepMarkers should markers be tested.
    */
    */
-  protected AbstractS3ACostTest(
-      final boolean keepMarkers) {
-    this.keepMarkers = keepMarkers;
-  }
-
-  /**
-   * Constructor with markers kept.
-   */
-  public AbstractS3ACostTest() {
-    this(true);
+  protected AbstractS3ACostTest() {
   }
   }
 
 
   @Override
   @Override
@@ -106,15 +86,8 @@ public class AbstractS3ACostTest extends AbstractS3ATestBase {
     String arn = conf.getTrimmed(arnKey, "");
     String arn = conf.getTrimmed(arnKey, "");
 
 
     removeBaseAndBucketOverrides(bucketName, conf,
     removeBaseAndBucketOverrides(bucketName, conf,
-        DIRECTORY_MARKER_POLICY,
-        AUTHORITATIVE_PATH,
         FS_S3A_CREATE_PERFORMANCE,
         FS_S3A_CREATE_PERFORMANCE,
         FS_S3A_PERFORMANCE_FLAGS);
         FS_S3A_PERFORMANCE_FLAGS);
-    // directory marker options
-    conf.set(DIRECTORY_MARKER_POLICY,
-        keepMarkers
-            ? DIRECTORY_MARKER_POLICY_KEEP
-            : DIRECTORY_MARKER_POLICY_DELETE);
     disableFilesystemCaching(conf);
     disableFilesystemCaching(conf);
 
 
     // AccessPoint ARN is the only per bucket configuration that must be kept.
     // AccessPoint ARN is the only per bucket configuration that must be kept.
@@ -129,17 +102,7 @@ public class AbstractS3ACostTest extends AbstractS3ATestBase {
   public void setup() throws Exception {
   public void setup() throws Exception {
     super.setup();
     super.setup();
     S3AFileSystem fs = getFileSystem();
     S3AFileSystem fs = getFileSystem();
-    isKeeping = isKeepingMarkers();
 
 
-    isDeleting = !isKeeping;
-
-    // check that the FS has the expected state
-    DirectoryPolicy markerPolicy = fs.getDirectoryMarkerPolicy();
-    Assertions.assertThat(markerPolicy.getMarkerPolicy())
-        .describedAs("Marker policy for filesystem %s", fs)
-        .isEqualTo(isKeepingMarkers()
-            ? DirectoryPolicy.MarkerPolicy.Keep
-            : DirectoryPolicy.MarkerPolicy.Delete);
     setupCostValidator();
     setupCostValidator();
 
 
     // determine bulk delete settings
     // determine bulk delete settings
@@ -164,14 +127,6 @@ public class AbstractS3ACostTest extends AbstractS3ATestBase {
     costValidator = builder.build();
     costValidator = builder.build();
   }
   }
 
 
-  public boolean isDeleting() {
-    return isDeleting;
-  }
-
-  public boolean isKeepingMarkers() {
-    return keepMarkers;
-  }
-
   /**
   /**
    * A special object whose toString() value is the current
    * A special object whose toString() value is the current
    * state of the metrics.
    * state of the metrics.
@@ -379,26 +334,6 @@ public class AbstractS3ACostTest extends AbstractS3ATestBase {
     return OperationCostValidator.always();
     return OperationCostValidator.always();
   }
   }
 
 
-  /**
-   * A metric diff which must hold when the fs is keeping markers.
-   * @param cost expected cost
-   * @return the diff.
-   */
-  protected OperationCostValidator.ExpectedProbe whenKeeping(
-      OperationCost cost) {
-    return expect(isKeepingMarkers(), cost);
-  }
-
-  /**
-   * A metric diff which must hold when the fs is keeping markers.
-   * @param cost expected cost
-   * @return the diff.
-   */
-  protected OperationCostValidator.ExpectedProbe whenDeleting(
-      OperationCost cost) {
-    return expect(isDeleting(), cost);
-  }
-
   /**
   /**
    * Execute a closure expecting a specific number of HEAD/LIST calls.
    * Execute a closure expecting a specific number of HEAD/LIST calls.
    * The operation is always evaluated.
    * The operation is always evaluated.
@@ -506,30 +441,6 @@ public class AbstractS3ACostTest extends AbstractS3ATestBase {
     return probe(stat, expected);
     return probe(stat, expected);
   }
   }
 
 
-  /**
-   * A metric diff which must hold when the fs is keeping markers.
-   * @param stat metric source
-   * @param expected expected value.
-   * @return the diff.
-   */
-  protected OperationCostValidator.ExpectedProbe withWhenKeeping(
-      final Statistic stat,
-      final int expected) {
-    return probe(isKeepingMarkers(), stat, expected);
-  }
-
-  /**
-   * A metric diff which must hold when the fs is keeping markers.
-   * @param stat metric source
-   * @param expected expected value.
-   * @return the diff.
-   */
-  protected OperationCostValidator.ExpectedProbe withWhenDeleting(
-      final Statistic stat,
-      final int expected) {
-    return probe(isDeleting(), stat, expected);
-  }
-
   /**
   /**
    * Assert the empty directory status of a file is as expected.
    * Assert the empty directory status of a file is as expected.
    * The raised assertion message includes a list of the path.
    * The raised assertion message includes a list of the path.

+ 2 - 4
hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/ITestCreateFileCost.java

@@ -83,12 +83,10 @@ public class ITestCreateFileCost extends AbstractS3ACostTest {
   private final boolean createPerformance;
   private final boolean createPerformance;
 
 
   /**
   /**
-   * Create with markers kept, always.
+   * Create.
+   * @param createPerformance use the performance flag
    */
    */
   public ITestCreateFileCost(final boolean createPerformance) {
   public ITestCreateFileCost(final boolean createPerformance) {
-    // keep markers to permit assertions that create performance
-    // always skips marker deletion.
-    super(false);
     this.createPerformance = createPerformance;
     this.createPerformance = createPerformance;
   }
   }
 
 

+ 9 - 73
hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/ITestDirectoryMarkerListing.java

@@ -22,21 +22,18 @@ import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Arrays;
-import java.util.Collection;
 import java.util.List;
 import java.util.List;
 import java.util.concurrent.Callable;
 import java.util.concurrent.Callable;
 import java.util.stream.Collectors;
 import java.util.stream.Collectors;
 
 
 import org.assertj.core.api.Assertions;
 import org.assertj.core.api.Assertions;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import software.amazon.awssdk.core.exception.SdkException;
 import software.amazon.awssdk.core.exception.SdkException;
 import software.amazon.awssdk.core.sync.RequestBody;
 import software.amazon.awssdk.core.sync.RequestBody;
 import software.amazon.awssdk.services.s3.S3Client;
 import software.amazon.awssdk.services.s3.S3Client;
 import software.amazon.awssdk.services.s3.model.HeadObjectResponse;
 import software.amazon.awssdk.services.s3.model.HeadObjectResponse;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
@@ -50,11 +47,7 @@ import org.apache.hadoop.fs.s3a.S3AFileSystem;
 import org.apache.hadoop.fs.s3a.S3AUtils;
 import org.apache.hadoop.fs.s3a.S3AUtils;
 import org.apache.hadoop.fs.store.audit.AuditSpan;
 import org.apache.hadoop.fs.store.audit.AuditSpan;
 
 
-
 import static org.apache.hadoop.fs.contract.ContractTestUtils.touch;
 import static org.apache.hadoop.fs.contract.ContractTestUtils.touch;
-import static org.apache.hadoop.fs.s3a.Constants.DIRECTORY_MARKER_POLICY;
-import static org.apache.hadoop.fs.s3a.Constants.DIRECTORY_MARKER_POLICY_DELETE;
-import static org.apache.hadoop.fs.s3a.Constants.DIRECTORY_MARKER_POLICY_KEEP;
 import static org.apache.hadoop.fs.s3a.Constants.FS_S3A_CREATE_PERFORMANCE;
 import static org.apache.hadoop.fs.s3a.Constants.FS_S3A_CREATE_PERFORMANCE;
 import static org.apache.hadoop.fs.s3a.Constants.FS_S3A_PERFORMANCE_FLAGS;
 import static org.apache.hadoop.fs.s3a.Constants.FS_S3A_PERFORMANCE_FLAGS;
 import static org.apache.hadoop.fs.s3a.S3ATestUtils.getTestBucketName;
 import static org.apache.hadoop.fs.s3a.S3ATestUtils.getTestBucketName;
@@ -85,7 +78,6 @@ import static org.apache.hadoop.util.functional.RemoteIterators.foreach;
  * <p></p>
  * <p></p>
  * s3a create performance is disabled for consistent assertions.
  * s3a create performance is disabled for consistent assertions.
  */
  */
-@RunWith(Parameterized.class)
 public class ITestDirectoryMarkerListing extends AbstractS3ATestBase {
 public class ITestDirectoryMarkerListing extends AbstractS3ATestBase {
 
 
   private static final Logger LOG =
   private static final Logger LOG =
@@ -99,17 +91,6 @@ public class ITestDirectoryMarkerListing extends AbstractS3ATestBase {
 
 
   private static final String MARKER_PEER = "markerpeer";
   private static final String MARKER_PEER = "markerpeer";
 
 
-  /**
-   * Parameterization.
-   */
-  @Parameterized.Parameters(name = "{0}")
-  public static Collection<Object[]> params() {
-    return Arrays.asList(new Object[][]{
-        {"keep-markers",  true},
-        {"delete-markers", false},
-    });
-  }
-
   /**
   /**
    * Does rename copy markers?
    * Does rename copy markers?
    * Value: {@value}
    * Value: {@value}
@@ -120,21 +101,6 @@ public class ITestDirectoryMarkerListing extends AbstractS3ATestBase {
    */
    */
   private static final boolean RENAME_COPIES_MARKERS = false;
   private static final boolean RENAME_COPIES_MARKERS = false;
 
 
-  /**
-   * Test configuration name.
-   */
-  private final String name;
-
-  /**
-   * Does this test configuration keep markers?
-   */
-  private final boolean keepMarkers;
-
-  /**
-   * Is this FS deleting markers?
-   */
-  private final boolean isDeletingMarkers;
-
   /**
   /**
    * Path to a directory which has a marker.
    * Path to a directory which has a marker.
    */
    */
@@ -187,27 +153,14 @@ public class ITestDirectoryMarkerListing extends AbstractS3ATestBase {
    */
    */
   private String markerPeerKey;
   private String markerPeerKey;
 
 
-  public ITestDirectoryMarkerListing(final String name,
-      final boolean keepMarkers) {
-    this.name = name;
-    this.keepMarkers = keepMarkers;
-    this.isDeletingMarkers = !keepMarkers;
-  }
-
   @Override
   @Override
   protected Configuration createConfiguration() {
   protected Configuration createConfiguration() {
     Configuration conf = super.createConfiguration();
     Configuration conf = super.createConfiguration();
     String bucketName = getTestBucketName(conf);
     String bucketName = getTestBucketName(conf);
 
 
-    // directory marker options
     removeBaseAndBucketOverrides(bucketName, conf,
     removeBaseAndBucketOverrides(bucketName, conf,
-        DIRECTORY_MARKER_POLICY,
         FS_S3A_CREATE_PERFORMANCE,
         FS_S3A_CREATE_PERFORMANCE,
         FS_S3A_PERFORMANCE_FLAGS);
         FS_S3A_PERFORMANCE_FLAGS);
-    conf.set(DIRECTORY_MARKER_POLICY,
-        keepMarkers
-            ? DIRECTORY_MARKER_POLICY_KEEP
-            : DIRECTORY_MARKER_POLICY_DELETE);
     conf.setBoolean(FS_S3A_CREATE_PERFORMANCE, false);
     conf.setBoolean(FS_S3A_CREATE_PERFORMANCE, false);
     return conf;
     return conf;
   }
   }
@@ -449,10 +402,6 @@ public class ITestDirectoryMarkerListing extends AbstractS3ATestBase {
 
 
   /**
   /**
    * Rename the base directory, expect the source files to move.
    * Rename the base directory, expect the source files to move.
-   * <p></p>
-   * Whether or not the marker itself is copied depends on whether
-   * the release's rename operation explicitly skips
-   * markers on renames.
    */
    */
   @Test
   @Test
   public void testRenameBase() throws Throwable {
   public void testRenameBase() throws Throwable {
@@ -480,14 +429,8 @@ public class ITestDirectoryMarkerListing extends AbstractS3ATestBase {
     assertIsFile(destMarkerPeer);
     assertIsFile(destMarkerPeer);
     head(destFileKeyUnderMarker);
     head(destFileKeyUnderMarker);
 
 
-    // probe for the marker based on expected rename
-    // behavior
-    if (RENAME_COPIES_MARKERS) {
-      head(destMarkerKeySlash);
-    } else {
-      head404(destMarkerKeySlash);
-    }
-
+    // rename doesn't copy non-leaf markers
+    head404(destMarkerKeySlash);
   }
   }
 
 
   /**
   /**
@@ -520,11 +463,7 @@ public class ITestDirectoryMarkerListing extends AbstractS3ATestBase {
     assertRenamed(src, dest);
     assertRenamed(src, dest);
     assertIsFile(new Path(dest, file));
     assertIsFile(new Path(dest, file));
     assertIsDirectory(srcDir);
     assertIsDirectory(srcDir);
-    if (isDeletingMarkers) {
-      head404(markerKeySlash);
-    } else {
-      head(markerKeySlash);
-    }
+    head(markerKeySlash);
   }
   }
 
 
   /**
   /**
@@ -557,11 +496,7 @@ public class ITestDirectoryMarkerListing extends AbstractS3ATestBase {
     assertRenamed(src, dest);
     assertRenamed(src, dest);
     assertIsFile(dest);
     assertIsFile(dest);
     assertIsDirectory(srcDir);
     assertIsDirectory(srcDir);
-    if (isDeletingMarkers) {
-      head404(markerKeySlash);
-    } else {
-      head(markerKeySlash);
-    }
+    head(markerKeySlash);
   }
   }
 
 
   /**
   /**
@@ -615,6 +550,7 @@ public class ITestDirectoryMarkerListing extends AbstractS3ATestBase {
         s3client.putObject(b -> b.bucket(bucket).key(key),
         s3client.putObject(b -> b.bucket(bucket).key(key),
             RequestBody.fromString(content)));
             RequestBody.fromString(content)));
   }
   }
+
   /**
   /**
    * Delete an object; exceptions are swallowed.
    * Delete an object; exceptions are swallowed.
    * @param key key
    * @param key key
@@ -650,7 +586,7 @@ public class ITestDirectoryMarkerListing extends AbstractS3ATestBase {
   private void head404(final String key) throws Exception {
   private void head404(final String key) throws Exception {
     intercept(FileNotFoundException.class, "",
     intercept(FileNotFoundException.class, "",
         "Expected 404 of " + key, () ->
         "Expected 404 of " + key, () ->
-        head(key));
+            head(key));
   }
   }
 
 
   /**
   /**

+ 22 - 83
hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/ITestS3ADeleteCost.java

@@ -21,12 +21,9 @@ package org.apache.hadoop.fs.s3a.performance;
 
 
 import java.io.FileNotFoundException;
 import java.io.FileNotFoundException;
 import java.util.Arrays;
 import java.util.Arrays;
-import java.util.Collection;
 
 
 import org.assertj.core.api.Assertions;
 import org.assertj.core.api.Assertions;
 import org.junit.Test;
 import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
 import org.slf4j.Logger;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.LoggerFactory;
 
 
@@ -36,6 +33,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.contract.ContractTestUtils;
 import org.apache.hadoop.fs.contract.ContractTestUtils;
 import org.apache.hadoop.fs.s3a.S3AFileStatus;
 import org.apache.hadoop.fs.s3a.S3AFileStatus;
 import org.apache.hadoop.fs.s3a.S3AFileSystem;
 import org.apache.hadoop.fs.s3a.S3AFileSystem;
+import org.apache.hadoop.fs.s3a.Statistic;
 import org.apache.hadoop.fs.s3a.Tristate;
 import org.apache.hadoop.fs.s3a.Tristate;
 import org.apache.hadoop.fs.s3a.impl.StatusProbeEnum;
 import org.apache.hadoop.fs.s3a.impl.StatusProbeEnum;
 
 
@@ -47,35 +45,17 @@ import static org.apache.hadoop.test.LambdaTestUtils.intercept;
 
 
 /**
 /**
  * Use metrics to assert about the cost of file API calls.
  * Use metrics to assert about the cost of file API calls.
- * <p></p>
- * Parameterized on directory marker keep vs delete.
+ * <p>
  * There's extra complexity related to bulk/non-bulk delete calls.
  * There's extra complexity related to bulk/non-bulk delete calls.
  * If bulk deletes are disabled, many more requests are made to delete
  * If bulk deletes are disabled, many more requests are made to delete
  * parent directories. The counters of objects deleted are constant
  * parent directories. The counters of objects deleted are constant
  * irrespective of the delete mode.
  * irrespective of the delete mode.
  */
  */
-@RunWith(Parameterized.class)
 public class ITestS3ADeleteCost extends AbstractS3ACostTest {
 public class ITestS3ADeleteCost extends AbstractS3ACostTest {
 
 
   private static final Logger LOG =
   private static final Logger LOG =
       LoggerFactory.getLogger(ITestS3ADeleteCost.class);
       LoggerFactory.getLogger(ITestS3ADeleteCost.class);
 
 
-  /**
-   * Parameterization.
-   */
-  @Parameterized.Parameters(name = "{0}")
-  public static Collection<Object[]> params() {
-    return Arrays.asList(new Object[][]{
-        {"keep-markers", true},
-        {"delete-markers", false},
-    });
-  }
-
-  public ITestS3ADeleteCost(final String name,
-      final boolean keepMarkers) {
-    super(keepMarkers);
-  }
-
   @Override
   @Override
   public Configuration createConfiguration() {
   public Configuration createConfiguration() {
     return setPerformanceFlags(
     return setPerformanceFlags(
@@ -85,11 +65,9 @@ public class ITestS3ADeleteCost extends AbstractS3ACostTest {
 
 
   @Override
   @Override
   public void teardown() throws Exception {
   public void teardown() throws Exception {
-    if (isKeepingMarkers()) {
-      // do this ourselves to avoid audits teardown failing
-      // when surplus markers are found
-      deleteTestDirInTeardown();
-    }
+    // do this ourselves to avoid audits teardown failing
+    // when surplus markers are found
+    deleteTestDirInTeardown();
     super.teardown();
     super.teardown();
   }
   }
 
 
@@ -104,22 +82,15 @@ public class ITestS3ADeleteCost extends AbstractS3ACostTest {
     S3AFileSystem fs = getFileSystem();
     S3AFileSystem fs = getFileSystem();
     // creates the marker
     // creates the marker
     Path dir = dir(methodPath());
     Path dir = dir(methodPath());
-    // file creation may have deleted that marker, but it may
-    // still be there
     Path simpleFile = file(new Path(dir, "simple.txt"));
     Path simpleFile = file(new Path(dir, "simple.txt"));
 
 
-    boolean keeping = !isDeleting();
-    boolean deleting = isDeleting();
     boolean bulkDelete = isBulkDelete();
     boolean bulkDelete = isBulkDelete();
     verifyMetrics(() -> {
     verifyMetrics(() -> {
           fs.delete(simpleFile, false);
           fs.delete(simpleFile, false);
           return "after fs.delete(simpleFile) " + getMetricSummary();
           return "after fs.delete(simpleFile) " + getMetricSummary();
         },
         },
-        probe(keeping, OBJECT_METADATA_REQUESTS,
+        probe(OBJECT_METADATA_REQUESTS,
             FILESTATUS_FILE_PROBE_H),
             FILESTATUS_FILE_PROBE_H),
-        // if deleting markers, look for the parent too
-        probe(deleting, OBJECT_METADATA_REQUESTS,
-            FILESTATUS_FILE_PROBE_H + FILESTATUS_DIR_PROBE_H),
         with(OBJECT_LIST_REQUEST,
         with(OBJECT_LIST_REQUEST,
             FILESTATUS_FILE_PROBE_L + FILESTATUS_DIR_PROBE_L),
             FILESTATUS_FILE_PROBE_L + FILESTATUS_DIR_PROBE_L),
         with(DIRECTORIES_DELETED, 0),
         with(DIRECTORIES_DELETED, 0),
@@ -130,17 +101,9 @@ public class ITestS3ADeleteCost extends AbstractS3ACostTest {
         probe(!bulkDelete, OBJECT_DELETE_REQUEST,
         probe(!bulkDelete, OBJECT_DELETE_REQUEST,
             DELETE_OBJECT_REQUEST + DELETE_MARKER_REQUEST),
             DELETE_OBJECT_REQUEST + DELETE_MARKER_REQUEST),
 
 
-        // keeping: create no parent dirs or delete parents
-        withWhenKeeping(DIRECTORIES_CREATED, 0),
-        withWhenKeeping(OBJECT_BULK_DELETE_REQUEST, 0),
-
-        // deleting: create a parent and delete any of its parents
-        withWhenDeleting(DIRECTORIES_CREATED, 1),
-        // a bulk delete for all parents is issued.
-        // the number of objects in it depends on the depth of the tree;
-        // don't worry about that
-        probe(deleting && bulkDelete, OBJECT_BULK_DELETE_REQUEST,
-            DELETE_MARKER_REQUEST)
+        // create no parent dirs or delete parents
+        with(DIRECTORIES_CREATED, 0),
+        with(OBJECT_BULK_DELETE_REQUEST, 0)
     );
     );
 
 
     // there is an empty dir for a parent
     // there is an empty dir for a parent
@@ -164,18 +127,13 @@ public class ITestS3ADeleteCost extends AbstractS3ACostTest {
     Path file1 = file(new Path(dir, "file1.txt"));
     Path file1 = file(new Path(dir, "file1.txt"));
     Path file2 = file(new Path(dir, "file2.txt"));
     Path file2 = file(new Path(dir, "file2.txt"));
 
 
-    boolean rawAndKeeping = !isDeleting();
-    boolean rawAndDeleting = isDeleting();
     verifyMetrics(() -> {
     verifyMetrics(() -> {
       fs.delete(file1, false);
       fs.delete(file1, false);
       return "after fs.delete(file1) " + getMetricSummary();
       return "after fs.delete(file1) " + getMetricSummary();
     },
     },
-        // delete file. For keeping: that's it
-        probe(rawAndKeeping, OBJECT_METADATA_REQUESTS,
+        // delete file.
+        probe(OBJECT_METADATA_REQUESTS,
             FILESTATUS_FILE_PROBE_H),
             FILESTATUS_FILE_PROBE_H),
-        // if deleting markers, look for the parent too
-        probe(rawAndDeleting, OBJECT_METADATA_REQUESTS,
-            FILESTATUS_FILE_PROBE_H + FILESTATUS_DIR_PROBE_H),
         with(OBJECT_LIST_REQUEST,
         with(OBJECT_LIST_REQUEST,
             FILESTATUS_FILE_PROBE_L + FILESTATUS_DIR_PROBE_L),
             FILESTATUS_FILE_PROBE_L + FILESTATUS_DIR_PROBE_L),
         with(DIRECTORIES_DELETED, 0),
         with(DIRECTORIES_DELETED, 0),
@@ -184,12 +142,8 @@ public class ITestS3ADeleteCost extends AbstractS3ACostTest {
         // no need to create a parent
         // no need to create a parent
         with(DIRECTORIES_CREATED, 0),
         with(DIRECTORIES_CREATED, 0),
 
 
-        // keeping: create no parent dirs or delete parents
-        withWhenKeeping(OBJECT_DELETE_REQUEST, DELETE_OBJECT_REQUEST),
-
-        // deleting: create a parent and delete any of its parents
-        withWhenDeleting(OBJECT_DELETE_REQUEST,
-            DELETE_OBJECT_REQUEST));
+        // create no parent dirs or delete parents
+        with(OBJECT_DELETE_REQUEST, DELETE_OBJECT_REQUEST));
   }
   }
 
 
   @Test
   @Test
@@ -216,19 +170,15 @@ public class ITestS3ADeleteCost extends AbstractS3ACostTest {
     LOG.info("creating sub directory {}", subDir);
     LOG.info("creating sub directory {}", subDir);
     // one dir created, possibly a parent removed
     // one dir created, possibly a parent removed
     final int fakeDirectoriesToDelete = directoriesInPath(subDir) - 1;
     final int fakeDirectoriesToDelete = directoriesInPath(subDir) - 1;
+    final Statistic stat = getDeleteMarkerStatistic();
     verifyMetrics(() -> {
     verifyMetrics(() -> {
       mkdirs(subDir);
       mkdirs(subDir);
       return "after mkdir(subDir) " + getMetricSummary();
       return "after mkdir(subDir) " + getMetricSummary();
     },
     },
         with(DIRECTORIES_CREATED, 1),
         with(DIRECTORIES_CREATED, 1),
         with(DIRECTORIES_DELETED, 0),
         with(DIRECTORIES_DELETED, 0),
-        withWhenKeeping(getDeleteMarkerStatistic(), 0),
-        withWhenKeeping(FAKE_DIRECTORIES_DELETED, 0),
-        withWhenDeleting(getDeleteMarkerStatistic(),
-            isBulkDelete() ? DELETE_MARKER_REQUEST : fakeDirectoriesToDelete),
-        // delete all possible fake dirs above the subdirectory
-        withWhenDeleting(FAKE_DIRECTORIES_DELETED,
-            fakeDirectoriesToDelete));
+        with(stat, 0),
+        with(FAKE_DIRECTORIES_DELETED, 0));
 
 
     LOG.info("About to delete {}", parent);
     LOG.info("About to delete {}", parent);
     // now delete the deep tree.
     // now delete the deep tree.
@@ -237,12 +187,9 @@ public class ITestS3ADeleteCost extends AbstractS3ACostTest {
       return "deleting parent dir " + parent + " " + getMetricSummary();
       return "deleting parent dir " + parent + " " + getMetricSummary();
     },
     },
 
 
-        // keeping: the parent dir marker needs deletion alongside
+        // the parent dir marker needs deletion alongside
         // the subdir one.
         // the subdir one.
-        withWhenKeeping(OBJECT_DELETE_OBJECTS, dirsCreated),
-
-        // deleting: only the marker at the bottom needs deleting
-        withWhenDeleting(OBJECT_DELETE_OBJECTS, 1));
+        with(OBJECT_DELETE_OBJECTS, dirsCreated));
 
 
     // followup with list calls to make sure all is clear.
     // followup with list calls to make sure all is clear.
     verifyNoListing(parent);
     verifyNoListing(parent);
@@ -274,10 +221,7 @@ public class ITestS3ADeleteCost extends AbstractS3ACostTest {
 
 
     Path srcDir = dir(new Path(srcBaseDir, "1/2/3/4/5/6"));
     Path srcDir = dir(new Path(srcBaseDir, "1/2/3/4/5/6"));
 
 
-    // creating a file should trigger demise of the src dir marker
-    // unless markers are being kept
-
-    final int directories = directoriesInPath(srcDir);
+    final Statistic stat = getDeleteMarkerStatistic();
     verifyMetrics(() -> {
     verifyMetrics(() -> {
       final Path srcPath = new Path(srcDir, "source.txt");
       final Path srcPath = new Path(srcDir, "source.txt");
       file(srcPath);
       file(srcPath);
@@ -286,14 +230,9 @@ public class ITestS3ADeleteCost extends AbstractS3ACostTest {
     },
     },
         with(DIRECTORIES_CREATED, 0),
         with(DIRECTORIES_CREATED, 0),
         with(DIRECTORIES_DELETED, 0),
         with(DIRECTORIES_DELETED, 0),
-        // keeping: no delete operations.
-        withWhenKeeping(getDeleteMarkerStatistic(), 0),
-        withWhenKeeping(FAKE_DIRECTORIES_DELETED, 0),
-        // delete all possible fake dirs above the file
-        withWhenDeleting(FAKE_DIRECTORIES_DELETED,
-            directories),
-        withWhenDeleting(getDeleteMarkerStatistic(),
-            isBulkDelete() ? 1: directories));
+        // no delete operations.
+        with(stat, 0),
+        with(FAKE_DIRECTORIES_DELETED, 0));
   }
   }
 
 
 }
 }

+ 2 - 5
hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/ITestS3AMiscOperationCost.java

@@ -49,7 +49,6 @@ import static org.apache.hadoop.fs.s3a.performance.OperationCostValidator.probe;
 
 
 /**
 /**
  * Use metrics to assert about the cost of misc operations.
  * Use metrics to assert about the cost of misc operations.
- * Parameterized on directory marker keep vs delete
  */
  */
 @RunWith(Parameterized.class)
 @RunWith(Parameterized.class)
 public class ITestS3AMiscOperationCost extends AbstractS3ACostTest {
 public class ITestS3AMiscOperationCost extends AbstractS3ACostTest {
@@ -68,15 +67,13 @@ public class ITestS3AMiscOperationCost extends AbstractS3ACostTest {
   @Parameterized.Parameters(name = "{0}")
   @Parameterized.Parameters(name = "{0}")
   public static Collection<Object[]> params() {
   public static Collection<Object[]> params() {
     return Arrays.asList(new Object[][]{
     return Arrays.asList(new Object[][]{
-        {"keep-markers-auditing", true, true},
-        {"delete-markers-unaudited", false, false}
+        {"auditing", true},
+        {"unaudited", false}
     });
     });
   }
   }
 
 
   public ITestS3AMiscOperationCost(final String name,
   public ITestS3AMiscOperationCost(final String name,
-      final boolean keepMarkers,
       final boolean auditing) {
       final boolean auditing) {
-    super(keepMarkers);
     this.auditing = auditing;
     this.auditing = auditing;
   }
   }
 
 

+ 0 - 23
hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/ITestS3AMkdirCost.java

@@ -18,12 +18,7 @@
 
 
 package org.apache.hadoop.fs.s3a.performance;
 package org.apache.hadoop.fs.s3a.performance;
 
 
-import java.util.Arrays;
-import java.util.Collection;
-
 import org.junit.Test;
 import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
 import org.slf4j.Logger;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.LoggerFactory;
 
 
@@ -40,30 +35,12 @@ import static org.apache.hadoop.fs.s3a.performance.OperationCost.FILESTATUS_FILE
 
 
 /**
 /**
  * Use metrics to assert about the cost of mkdirs.
  * Use metrics to assert about the cost of mkdirs.
- * Parameterized directory marker keep vs delete
  */
  */
-@RunWith(Parameterized.class)
 public class ITestS3AMkdirCost extends AbstractS3ACostTest {
 public class ITestS3AMkdirCost extends AbstractS3ACostTest {
 
 
   private static final Logger LOG =
   private static final Logger LOG =
       LoggerFactory.getLogger(ITestS3AMkdirCost.class);
       LoggerFactory.getLogger(ITestS3AMkdirCost.class);
 
 
-  /**
-   * Parameterization.
-   */
-  @Parameterized.Parameters(name = "{0}")
-  public static Collection<Object[]> params() {
-    return Arrays.asList(new Object[][]{
-        {"keep-markers", true},
-        {"delete-markers", false}
-    });
-  }
-
-  public ITestS3AMkdirCost(final String name,
-      final boolean keepMarkers) {
-    super(keepMarkers);
-  }
-
   /**
   /**
    * Common operation which should be low cost as possible.
    * Common operation which should be low cost as possible.
    */
    */

+ 0 - 4
hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/ITestS3AOpenCost.java

@@ -93,10 +93,6 @@ public class ITestS3AOpenCost extends AbstractS3ACostTest {
    */
    */
   private boolean prefetching;
   private boolean prefetching;
 
 
-  public ITestS3AOpenCost() {
-    super(true);
-  }
-
   @Override
   @Override
   public Configuration createConfiguration() {
   public Configuration createConfiguration() {
     Configuration conf = super.createConfiguration();
     Configuration conf = super.createConfiguration();

+ 4 - 48
hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/ITestS3ARenameCost.java

@@ -18,15 +18,10 @@
 
 
 package org.apache.hadoop.fs.s3a.performance;
 package org.apache.hadoop.fs.s3a.performance;
 
 
-
-import java.util.Arrays;
-import java.util.Collection;
 import java.util.UUID;
 import java.util.UUID;
 
 
 import org.assertj.core.api.Assertions;
 import org.assertj.core.api.Assertions;
 import org.junit.Test;
 import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
 import org.slf4j.Logger;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.LoggerFactory;
 
 
@@ -35,36 +30,15 @@ import org.apache.hadoop.fs.s3a.S3AFileSystem;
 
 
 import static org.apache.hadoop.fs.s3a.Statistic.*;
 import static org.apache.hadoop.fs.s3a.Statistic.*;
 import static org.apache.hadoop.fs.s3a.performance.OperationCost.*;
 import static org.apache.hadoop.fs.s3a.performance.OperationCost.*;
-import static org.apache.hadoop.fs.s3a.performance.OperationCostValidator.probe;
 
 
 /**
 /**
  * Use metrics to assert about the cost of file API calls.
  * Use metrics to assert about the cost of file API calls.
- * <p></p>
- * Parameterized on directory marker keep vs delete
  */
  */
-@RunWith(Parameterized.class)
 public class ITestS3ARenameCost extends AbstractS3ACostTest {
 public class ITestS3ARenameCost extends AbstractS3ACostTest {
 
 
   private static final Logger LOG =
   private static final Logger LOG =
       LoggerFactory.getLogger(ITestS3ARenameCost.class);
       LoggerFactory.getLogger(ITestS3ARenameCost.class);
 
 
-  /**
-   * Parameterization.
-   */
-  @Parameterized.Parameters(name = "{0}")
-  public static Collection<Object[]> params() {
-    return Arrays.asList(new Object[][]{
-        {"keep-markers", true},
-        {"delete-markers", false},
-    });
-  }
-
-  public ITestS3ARenameCost(final String name,
-      final boolean keepMarkers) {
-    super(keepMarkers);
-
-  }
-
   @Test
   @Test
   public void testRenameFileToDifferentDirectory() throws Throwable {
   public void testRenameFileToDifferentDirectory() throws Throwable {
     describe("rename a file to a different directory, "
     describe("rename a file to a different directory, "
@@ -101,28 +75,10 @@ public class ITestS3ARenameCost extends AbstractS3ACostTest {
         always(RENAME_SINGLE_FILE_DIFFERENT_DIR),
         always(RENAME_SINGLE_FILE_DIFFERENT_DIR),
         with(DIRECTORIES_CREATED, 0),
         with(DIRECTORIES_CREATED, 0),
         with(DIRECTORIES_DELETED, 0),
         with(DIRECTORIES_DELETED, 0),
-        // keeping: only the core delete operation is issued.
-        withWhenKeeping(OBJECT_DELETE_REQUEST, DELETE_OBJECT_REQUEST),
-        withWhenKeeping(FAKE_DIRECTORIES_DELETED, 0),
-        withWhenKeeping(OBJECT_DELETE_OBJECTS, 1),
-
-        // deleting: delete any fake marker above the destination.
-        // the actual request count depends on whether bulk delete is
-        // enabled or not
-
-        // no bulk delete: multiple marker calls
-        probe(isDeleting() && !isBulkDelete(), OBJECT_DELETE_REQUEST,
-            DELETE_OBJECT_REQUEST + directoriesInPath),
-
-        // bulk delete: split up
-        probe(isDeleting() && isBulkDelete(), OBJECT_DELETE_REQUEST,
-                DELETE_OBJECT_REQUEST),
-        probe(isDeleting() && isBulkDelete(), OBJECT_BULK_DELETE_REQUEST,
-            DELETE_MARKER_REQUEST),
-        withWhenDeleting(FAKE_DIRECTORIES_DELETED,
-            directoriesInPath),
-        withWhenDeleting(OBJECT_DELETE_OBJECTS,
-            directoriesInPath + 1));
+        // only the core delete operation is issued.
+        with(OBJECT_DELETE_REQUEST, DELETE_OBJECT_REQUEST),
+        with(FAKE_DIRECTORIES_DELETED, 0),
+        with(OBJECT_DELETE_OBJECTS, 1));
 
 
     assertIsFile(destFilePath);
     assertIsFile(destFilePath);
     assertIsDirectory(srcDir);
     assertIsDirectory(srcDir);

+ 0 - 7
hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/performance/ITestUnbufferDraining.java

@@ -96,13 +96,6 @@ public class ITestUnbufferDraining extends AbstractS3ACostTest {
    */
    */
   private FileSystem brittleFS;
   private FileSystem brittleFS;
 
 
-  /**
-   * Create with markers kept, always.
-   */
-  public ITestUnbufferDraining() {
-    super(false);
-  }
-
   @Override
   @Override
   public Configuration createConfiguration() {
   public Configuration createConfiguration() {
     Configuration conf = super.createConfiguration();
     Configuration conf = super.createConfiguration();

+ 1 - 5
hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/AbstractS3GuardToolTestBase.java

@@ -33,7 +33,6 @@ import org.apache.hadoop.fs.s3a.UnknownStoreException;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.util.ExitUtil;
 import org.apache.hadoop.util.ExitUtil;
 
 
-import static org.apache.hadoop.fs.s3a.s3guard.S3GuardTool.BucketInfo.IS_MARKER_AWARE;
 import static org.apache.hadoop.fs.s3a.s3guard.S3GuardTool.E_S3GUARD_UNSUPPORTED;
 import static org.apache.hadoop.fs.s3a.s3guard.S3GuardTool.E_S3GUARD_UNSUPPORTED;
 import static org.apache.hadoop.fs.s3a.s3guard.S3GuardTool.INVALID_ARGUMENT;
 import static org.apache.hadoop.fs.s3a.s3guard.S3GuardTool.INVALID_ARGUMENT;
 import static org.apache.hadoop.fs.s3a.s3guard.S3GuardTool.SUCCESS;
 import static org.apache.hadoop.fs.s3a.s3guard.S3GuardTool.SUCCESS;
@@ -169,12 +168,9 @@ public abstract class AbstractS3GuardToolTestBase extends AbstractS3ATestBase {
 
 
     // run a bucket info command
     // run a bucket info command
     S3GuardTool.BucketInfo infocmd = toClose(new S3GuardTool.BucketInfo(conf));
     S3GuardTool.BucketInfo infocmd = toClose(new S3GuardTool.BucketInfo(conf));
-    String info = exec(infocmd, S3GuardTool.BucketInfo.NAME,
+    exec(infocmd, S3GuardTool.BucketInfo.NAME,
         "-" + MARKERS, S3GuardTool.BucketInfo.MARKERS_AWARE,
         "-" + MARKERS, S3GuardTool.BucketInfo.MARKERS_AWARE,
         fsUri.toString());
         fsUri.toString());
-
-    assertTrue("Output should contain information about S3A client " + info,
-        info.contains(IS_MARKER_AWARE));
   }
   }
 
 
   /**
   /**

+ 0 - 132
hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/TestAuthoritativePath.java

@@ -1,132 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.s3a.s3guard;
-
-import java.net.URI;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.List;
-import java.util.stream.Collectors;
-
-import org.junit.Before;
-import org.junit.Test;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.s3a.S3AUtils;
-import org.apache.hadoop.test.AbstractHadoopTestBase;
-
-import static org.apache.hadoop.fs.s3a.Constants.AUTHORITATIVE_PATH;
-import static org.apache.hadoop.fs.s3a.S3ATestConstants.UNIT_TEST_EXAMPLE_PATH;
-import static org.assertj.core.api.Assertions.assertThat;
-
-/**
- * Unit tests of auth path resolution.
- */
-public class TestAuthoritativePath extends AbstractHadoopTestBase {
-
-  private final Path root = new Path("/");
-
-  private URI fsUri;
-
-  private static final String BASE = "s3a://bucket";
-
-  @Before
-  public void setup() throws Exception {
-    fsUri = new URI(BASE +"/");
-  }
-
-  private Configuration authPathsConf(String... paths) {
-    Configuration conf = new Configuration(false);
-    conf.set(AUTHORITATIVE_PATH, String.join(",", paths));
-    return conf;
-  }
-
-  @Test
-  public void testResolution() throws Throwable {
-    assertAuthPaths(l("/one"), "/one/");
-  }
-
-  @Test
-  public void testResolutionWithFQP() throws Throwable {
-    assertAuthPaths(l("/one/",
-        BASE + "/two/"),
-        "/one/", "/two/");
-  }
-  @Test
-  public void testOtherBucket() throws Throwable {
-    assertAuthPaths(l("/one/",
-        UNIT_TEST_EXAMPLE_PATH,
-        BASE + "/two/"),
-        "/one/", "/two/");
-  }
-
-  @Test
-  public void testOtherScheme() throws Throwable {
-    assertAuthPaths(l("/one/",
-         UNIT_TEST_EXAMPLE_PATH,
-        "http://bucket/two/"),
-        "/one/");
-  }
-
-  /**
-   * Get the auth paths; qualification is through
-   * Path.makeQualified not the FS near-equivalent.
-   * @param conf configuration
-   * @return list of auth paths.
-   */
-  private Collection<String> getAuthoritativePaths(
-      Configuration conf) {
-
-    return S3Guard.getAuthoritativePaths(fsUri, conf,
-        p -> {
-          Path q = p.makeQualified(fsUri, root);
-          assertThat(q.toUri().getAuthority())
-              .describedAs("Path %s", q)
-              .isEqualTo(fsUri.getAuthority());
-          return S3AUtils.maybeAddTrailingSlash(q.toString());
-        });
-  }
-
-  /**
-   * take a varargs list and and return as an array.
-   * @param s source
-   * @return the values
-   */
-  private String[] l(String...s) {
-    return s;
-  }
-
-  /**
-   * Assert that the authoritative paths from a source list
-   * are that expected.
-   * @param src source entries to set as auth paths
-   * @param expected the list of auth paths for a filesystem
-   */
-  private void assertAuthPaths(String[] src, String...expected) {
-    Configuration conf = authPathsConf(src);
-    List<String> collect = Arrays.stream(expected)
-        .map(s -> BASE + s)
-        .collect(Collectors.toList());
-    Collection<String> paths = getAuthoritativePaths(conf);
-    assertThat(paths)
-        .containsExactlyInAnyOrderElementsOf(collect);
-  }
-
-}

+ 1 - 8
hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/ITestS3ADirectoryPerformance.java

@@ -53,8 +53,6 @@ import java.util.concurrent.Executors;
 import software.amazon.awssdk.services.s3.model.PutObjectRequest;
 import software.amazon.awssdk.services.s3.model.PutObjectRequest;
 import software.amazon.awssdk.services.s3.model.PutObjectResponse;
 import software.amazon.awssdk.services.s3.model.PutObjectResponse;
 
 
-import static org.apache.hadoop.fs.s3a.Constants.DIRECTORY_MARKER_POLICY;
-import static org.apache.hadoop.fs.s3a.Constants.DIRECTORY_MARKER_POLICY_KEEP;
 import static org.apache.hadoop.fs.s3a.Statistic.*;
 import static org.apache.hadoop.fs.s3a.Statistic.*;
 import static org.apache.hadoop.fs.s3a.S3ATestUtils.*;
 import static org.apache.hadoop.fs.s3a.S3ATestUtils.*;
 import static org.apache.hadoop.fs.contract.ContractTestUtils.*;
 import static org.apache.hadoop.fs.contract.ContractTestUtils.*;
@@ -227,11 +225,6 @@ public class ITestS3ADirectoryPerformance extends S3AScaleTestBase {
     final Configuration conf =
     final Configuration conf =
             getConfigurationWithConfiguredBatchSize(batchSize);
             getConfigurationWithConfiguredBatchSize(batchSize);
 
 
-    removeBaseAndBucketOverrides(conf,
-        DIRECTORY_MARKER_POLICY);
-    // force directory markers = keep to save delete requests on every
-    // file created.
-    conf.set(DIRECTORY_MARKER_POLICY, DIRECTORY_MARKER_POLICY_KEEP);
     S3AFileSystem fs = (S3AFileSystem) FileSystem.get(dir.toUri(), conf);
     S3AFileSystem fs = (S3AFileSystem) FileSystem.get(dir.toUri(), conf);
 
 
     final List<String> originalListOfFiles = new ArrayList<>();
     final List<String> originalListOfFiles = new ArrayList<>();
@@ -260,7 +253,7 @@ public class ITestS3ADirectoryPerformance extends S3AScaleTestBase {
                 null, 0, false);
                 null, 0, false);
         futures.add(submit(executorService,
         futures.add(submit(executorService,
             () -> writeOperationHelper.putObject(putObjectRequestBuilder.build(),
             () -> writeOperationHelper.putObject(putObjectRequestBuilder.build(),
-                PutObjectOptions.keepingDirs(),
+                PutObjectOptions.defaultOptions(),
                 new S3ADataBlocks.BlockUploadData(new byte[0], null), null)));
                 new S3ADataBlocks.BlockUploadData(new byte[0], null), null)));
       }
       }
       LOG.info("Waiting for PUTs to complete");
       LOG.info("Waiting for PUTs to complete");

+ 0 - 7
hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/test/MinimalWriteOperationHelperCallbacks.java

@@ -30,7 +30,6 @@ import software.amazon.awssdk.services.s3.model.UploadPartRequest;
 import software.amazon.awssdk.services.s3.model.UploadPartResponse;
 import software.amazon.awssdk.services.s3.model.UploadPartResponse;
 
 
 import org.apache.hadoop.fs.s3a.WriteOperationHelper;
 import org.apache.hadoop.fs.s3a.WriteOperationHelper;
-import org.apache.hadoop.fs.s3a.impl.PutObjectOptions;
 import org.apache.hadoop.fs.statistics.DurationTrackerFactory;
 import org.apache.hadoop.fs.statistics.DurationTrackerFactory;
 
 
 /**
 /**
@@ -70,11 +69,5 @@ public class MinimalWriteOperationHelperCallbacks
     return s3clientSupplier.get().uploadPart(request, body);
     return s3clientSupplier.get().uploadPart(request, body);
   }
   }
 
 
-  @Override
-  public void finishedWrite(final String key,
-      final long length,
-      final PutObjectOptions putOptions) {
-
-  }
 }
 }
 
 

+ 2 - 93
hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/tools/AbstractMarkerToolTest.java

@@ -21,7 +21,6 @@ package org.apache.hadoop.fs.s3a.tools;
 import java.io.File;
 import java.io.File;
 import java.io.FileReader;
 import java.io.FileReader;
 import java.io.IOException;
 import java.io.IOException;
-import java.net.URI;
 import java.util.List;
 import java.util.List;
 
 
 import org.assertj.core.api.Assertions;
 import org.assertj.core.api.Assertions;
@@ -32,23 +31,19 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.s3a.AbstractS3ATestBase;
 import org.apache.hadoop.fs.s3a.AbstractS3ATestBase;
-import org.apache.hadoop.fs.s3a.S3AFileSystem;
-import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.StringUtils;
 
 
 import static org.apache.hadoop.fs.s3a.Constants.*;
 import static org.apache.hadoop.fs.s3a.Constants.*;
 import static org.apache.hadoop.fs.s3a.S3ATestUtils.disableFilesystemCaching;
 import static org.apache.hadoop.fs.s3a.S3ATestUtils.disableFilesystemCaching;
 import static org.apache.hadoop.fs.s3a.S3ATestUtils.getTestBucketName;
 import static org.apache.hadoop.fs.s3a.S3ATestUtils.getTestBucketName;
 import static org.apache.hadoop.fs.s3a.S3ATestUtils.removeBaseAndBucketOverrides;
 import static org.apache.hadoop.fs.s3a.S3ATestUtils.removeBaseAndBucketOverrides;
-import static org.apache.hadoop.fs.s3a.S3ATestUtils.removeBucketOverrides;
 import static org.apache.hadoop.fs.s3a.s3guard.S3GuardTool.VERBOSE;
 import static org.apache.hadoop.fs.s3a.s3guard.S3GuardTool.VERBOSE;
 import static org.apache.hadoop.fs.s3a.s3guard.S3GuardToolTestHelper.runS3GuardCommand;
 import static org.apache.hadoop.fs.s3a.s3guard.S3GuardToolTestHelper.runS3GuardCommand;
 import static org.apache.hadoop.fs.s3a.s3guard.S3GuardToolTestHelper.runS3GuardCommandToFailure;
 import static org.apache.hadoop.fs.s3a.s3guard.S3GuardToolTestHelper.runS3GuardCommandToFailure;
 import static org.apache.hadoop.fs.s3a.tools.MarkerTool.UNLIMITED_LISTING;
 import static org.apache.hadoop.fs.s3a.tools.MarkerTool.UNLIMITED_LISTING;
 
 
 /**
 /**
- * Class for marker tool tests -sets up keeping/deleting filesystems,
- * has methods to invoke.
+ * Class for marker tool tests.
  */
  */
 public class AbstractMarkerToolTest extends AbstractS3ATestBase {
 public class AbstractMarkerToolTest extends AbstractS3ATestBase {
 
 
@@ -58,26 +53,15 @@ public class AbstractMarkerToolTest extends AbstractS3ATestBase {
   /** the -verbose option. */
   /** the -verbose option. */
   protected static final String V = AbstractMarkerToolTest.m(VERBOSE);
   protected static final String V = AbstractMarkerToolTest.m(VERBOSE);
 
 
-  /** FS which keeps markers. */
-  private S3AFileSystem keepingFS;
-
-  /** FS which deletes markers. */
-  private S3AFileSystem deletingFS;
-
-  /** FS which mixes markers; only created in some tests. */
-  private S3AFileSystem mixedFS;
   @Override
   @Override
   protected Configuration createConfiguration() {
   protected Configuration createConfiguration() {
     Configuration conf = super.createConfiguration();
     Configuration conf = super.createConfiguration();
     String bucketName = getTestBucketName(conf);
     String bucketName = getTestBucketName(conf);
     removeBaseAndBucketOverrides(bucketName, conf,
     removeBaseAndBucketOverrides(bucketName, conf,
         S3A_BUCKET_PROBE,
         S3A_BUCKET_PROBE,
-        DIRECTORY_MARKER_POLICY,
         AUTHORITATIVE_PATH,
         AUTHORITATIVE_PATH,
         FS_S3A_CREATE_PERFORMANCE,
         FS_S3A_CREATE_PERFORMANCE,
         FS_S3A_PERFORMANCE_FLAGS);
         FS_S3A_PERFORMANCE_FLAGS);
-    // base FS is legacy
-    conf.set(DIRECTORY_MARKER_POLICY, DIRECTORY_MARKER_POLICY_DELETE);
     conf.setBoolean(FS_S3A_CREATE_PERFORMANCE, false);
     conf.setBoolean(FS_S3A_CREATE_PERFORMANCE, false);
 
 
     // turn off bucket probes for a bit of speedup in the connectors we create.
     // turn off bucket probes for a bit of speedup in the connectors we create.
@@ -86,54 +70,14 @@ public class AbstractMarkerToolTest extends AbstractS3ATestBase {
     return conf;
     return conf;
   }
   }
 
 
-  @Override
-  public void setup() throws Exception {
-    super.setup();
-    setKeepingFS(createFS(DIRECTORY_MARKER_POLICY_KEEP, null));
-    setDeletingFS(createFS(DIRECTORY_MARKER_POLICY_DELETE, null));
-  }
-
   @Override
   @Override
   public void teardown() throws Exception {
   public void teardown() throws Exception {
     // do this ourselves to avoid audits teardown failing
     // do this ourselves to avoid audits teardown failing
     // when surplus markers are found
     // when surplus markers are found
     deleteTestDirInTeardown();
     deleteTestDirInTeardown();
     super.teardown();
     super.teardown();
-    IOUtils.cleanupWithLogger(LOG, getKeepingFS(),
-        getMixedFS(), getDeletingFS());
-
   }
   }
 
 
-  /**
-   * FS which deletes markers.
-   */
-  public S3AFileSystem getDeletingFS() {
-    return deletingFS;
-  }
-
-  public void setDeletingFS(final S3AFileSystem deletingFS) {
-    this.deletingFS = deletingFS;
-  }
-
-  /**
-   * FS which keeps markers.
-   */
-  protected S3AFileSystem getKeepingFS() {
-    return keepingFS;
-  }
-
-  private void setKeepingFS(S3AFileSystem keepingFS) {
-    this.keepingFS = keepingFS;
-  }
-
-  /** only created on demand. */
-  private S3AFileSystem getMixedFS() {
-    return mixedFS;
-  }
-
-  protected void setMixedFS(S3AFileSystem mixedFS) {
-    this.mixedFS = mixedFS;
-  }
 
 
   /**
   /**
    * Get a filename for a temp file.
    * Get a filename for a temp file.
@@ -178,53 +122,19 @@ public class AbstractMarkerToolTest extends AbstractS3ATestBase {
     }
     }
   }
   }
 
 
-  /**
-   * Create a new FS with given marker policy and path.
-   * This filesystem MUST be closed in test teardown.
-   * @param markerPolicy markers
-   * @param authPath authoritative path. If null: no path.
-   * @return a new FS.
-   */
-  protected S3AFileSystem createFS(String markerPolicy,
-      String authPath) throws Exception {
-    S3AFileSystem testFS = getFileSystem();
-    Configuration conf = new Configuration(testFS.getConf());
-    URI testFSUri = testFS.getUri();
-    String bucketName = getTestBucketName(conf);
-    removeBucketOverrides(bucketName, conf,
-        DIRECTORY_MARKER_POLICY,
-        BULK_DELETE_PAGE_SIZE,
-        AUTHORITATIVE_PATH);
-    if (authPath != null) {
-      conf.set(AUTHORITATIVE_PATH, authPath);
-    }
-    // Use a very small page size to force the paging
-    // code to be tested.
-    conf.setInt(BULK_DELETE_PAGE_SIZE, 2);
-    conf.set(DIRECTORY_MARKER_POLICY, markerPolicy);
-    S3AFileSystem fs2 = new S3AFileSystem();
-    fs2.initialize(testFSUri, conf);
-    LOG.info("created new filesystem with policy {} and auth path {}",
-        markerPolicy,
-        (authPath == null ? "(null)": authPath));
-    return fs2;
-  }
-
   /**
   /**
    * Execute the marker tool, expecting the execution to succeed.
    * Execute the marker tool, expecting the execution to succeed.
    * @param sourceFS filesystem to use
    * @param sourceFS filesystem to use
    * @param path path to scan
    * @param path path to scan
-   * @param doPurge should markers be purged
    * @param expectedMarkerCount number of markers expected
    * @param expectedMarkerCount number of markers expected
    * @return the result
    * @return the result
    */
    */
   protected MarkerTool.ScanResult markerTool(
   protected MarkerTool.ScanResult markerTool(
       final FileSystem sourceFS,
       final FileSystem sourceFS,
       final Path path,
       final Path path,
-      final boolean doPurge,
       final int expectedMarkerCount)
       final int expectedMarkerCount)
       throws IOException {
       throws IOException {
-    return markerTool(0, sourceFS, path, doPurge,
+    return markerTool(0, sourceFS, path, false,
         expectedMarkerCount,
         expectedMarkerCount,
         UNLIMITED_LISTING, false);
         UNLIMITED_LISTING, false);
   }
   }
@@ -315,7 +225,6 @@ public class AbstractMarkerToolTest extends AbstractS3ATestBase {
             .withMinMarkerCount(expectedMarkers)
             .withMinMarkerCount(expectedMarkers)
             .withMaxMarkerCount(expectedMarkers)
             .withMaxMarkerCount(expectedMarkers)
             .withLimit(limit)
             .withLimit(limit)
-            .withNonAuth(nonAuth)
             .build());
             .build());
     Assertions.assertThat(result.getExitCode())
     Assertions.assertThat(result.getExitCode())
         .describedAs("Exit code of marker(%s, %s, %d) -> %s",
         .describedAs("Exit code of marker(%s, %s, %d) -> %s",

+ 15 - 157
hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/tools/ITestMarkerTool.java

@@ -35,12 +35,9 @@ import org.apache.hadoop.fs.contract.ContractTestUtils;
 import org.apache.hadoop.fs.s3a.test.PublicDatasetTestUtils;
 import org.apache.hadoop.fs.s3a.test.PublicDatasetTestUtils;
 import org.apache.hadoop.fs.s3a.S3AFileSystem;
 import org.apache.hadoop.fs.s3a.S3AFileSystem;
 
 
-import static org.apache.hadoop.fs.s3a.Constants.DIRECTORY_MARKER_POLICY_AUTHORITATIVE;
-import static org.apache.hadoop.fs.s3a.Constants.DIRECTORY_MARKER_POLICY_DELETE;
 import static org.apache.hadoop.fs.s3a.Constants.DIRECTORY_MARKER_POLICY_KEEP;
 import static org.apache.hadoop.fs.s3a.Constants.DIRECTORY_MARKER_POLICY_KEEP;
 import static org.apache.hadoop.fs.s3a.s3guard.S3GuardTool.BucketInfo.BUCKET_INFO;
 import static org.apache.hadoop.fs.s3a.s3guard.S3GuardTool.BucketInfo.BUCKET_INFO;
 import static org.apache.hadoop.fs.s3a.s3guard.S3GuardToolTestHelper.runS3GuardCommand;
 import static org.apache.hadoop.fs.s3a.s3guard.S3GuardToolTestHelper.runS3GuardCommand;
-import static org.apache.hadoop.fs.s3a.s3guard.S3GuardToolTestHelper.runS3GuardCommandToFailure;
 import static org.apache.hadoop.fs.s3a.tools.MarkerTool.*;
 import static org.apache.hadoop.fs.s3a.tools.MarkerTool.*;
 import static org.apache.hadoop.service.launcher.LauncherExitCodes.EXIT_INTERRUPTED;
 import static org.apache.hadoop.service.launcher.LauncherExitCodes.EXIT_INTERRUPTED;
 import static org.apache.hadoop.service.launcher.LauncherExitCodes.EXIT_NOT_ACCEPTABLE;
 import static org.apache.hadoop.service.launcher.LauncherExitCodes.EXIT_NOT_ACCEPTABLE;
@@ -48,8 +45,7 @@ import static org.apache.hadoop.service.launcher.LauncherExitCodes.EXIT_NOT_FOUN
 import static org.apache.hadoop.service.launcher.LauncherExitCodes.EXIT_USAGE;
 import static org.apache.hadoop.service.launcher.LauncherExitCodes.EXIT_USAGE;
 
 
 /**
 /**
- * Test the marker tool and use it to compare the behavior
- * of keeping vs legacy S3A FS instances.
+ * Test the marker tools.
  */
  */
 public class ITestMarkerTool extends AbstractMarkerToolTest {
 public class ITestMarkerTool extends AbstractMarkerToolTest {
 
 
@@ -81,126 +77,40 @@ public class ITestMarkerTool extends AbstractMarkerToolTest {
    */
    */
   private int expectedMarkersWithBaseDir;
   private int expectedMarkersWithBaseDir;
 
 
-
-  @Test
-  public void testCleanMarkersLegacyDir() throws Throwable {
-    describe("Clean markers under a deleting FS -expect none");
-    CreatedPaths createdPaths = createPaths(getDeletingFS(), methodPath());
-    markerTool(getDeletingFS(), createdPaths.base, false, 0);
-    markerTool(getDeletingFS(), createdPaths.base, true, 0);
-  }
-
   @Test
   @Test
   public void testCleanMarkersFileLimit() throws Throwable {
   public void testCleanMarkersFileLimit() throws Throwable {
-    describe("Clean markers under a keeping FS -with file limit");
-    CreatedPaths createdPaths = createPaths(getKeepingFS(), methodPath());
+    describe("Clean markers with file limit");
+    CreatedPaths createdPaths = createPaths(getFileSystem(), methodPath());
 
 
     // audit will be interrupted
     // audit will be interrupted
-    markerTool(EXIT_INTERRUPTED, getDeletingFS(),
+    markerTool(EXIT_INTERRUPTED, getFileSystem(),
         createdPaths.base, false, 0, 1, false);
         createdPaths.base, false, 0, 1, false);
   }
   }
 
 
   @Test
   @Test
-  public void testCleanMarkersKeepingDir() throws Throwable {
-    describe("Audit then clean markers under a deleting FS "
-        + "-expect markers to be found and then cleaned up");
-    CreatedPaths createdPaths = createPaths(getKeepingFS(), methodPath());
-
-    // audit will find the expected entries
-    int expectedMarkerCount = createdPaths.dirs.size();
-    S3AFileSystem fs = getDeletingFS();
-    LOG.info("Auditing a directory with retained markers -expect failure");
-    markerTool(EXIT_NOT_ACCEPTABLE, fs,
-        createdPaths.base, false, 0, UNLIMITED_LISTING, false);
-
-    LOG.info("Auditing a directory expecting retained markers");
-    markerTool(fs, createdPaths.base, false,
-        expectedMarkerCount);
-
-    // we require that a purge didn't take place, so run the
-    // audit again.
-    LOG.info("Auditing a directory expecting retained markers");
-    markerTool(fs, createdPaths.base, false,
-        expectedMarkerCount);
-
-    LOG.info("Purging a directory of retained markers");
-    // purge cleans up
-    assertMarkersDeleted(expectedMarkerCount,
-        markerTool(fs, createdPaths.base, true, expectedMarkerCount));
-    // and a rerun doesn't find markers
-    LOG.info("Auditing a directory with retained markers -expect success");
-    assertMarkersDeleted(0,
-        markerTool(fs, createdPaths.base, true, 0));
-  }
-
-  @Test
-  public void testRenameKeepingFS() throws Throwable {
-    describe("Rename with the keeping FS -verify that no markers"
+  public void testRenameDoesntCopyMarkers() throws Throwable {
+    describe("Rename with and verify that no markers"
         + " exist at far end");
         + " exist at far end");
     Path base = methodPath();
     Path base = methodPath();
     Path source = new Path(base, "source");
     Path source = new Path(base, "source");
     Path dest = new Path(base, "dest");
     Path dest = new Path(base, "dest");
 
 
-    S3AFileSystem fs = getKeepingFS();
+    S3AFileSystem fs = getFileSystem();
     CreatedPaths createdPaths = createPaths(fs, source);
     CreatedPaths createdPaths = createPaths(fs, source);
 
 
     // audit will find three entries
     // audit will find three entries
     int expectedMarkerCount = createdPaths.dirs.size();
     int expectedMarkerCount = createdPaths.dirs.size();
 
 
-    markerTool(fs, source, false, expectedMarkerCount);
+    markerTool(fs, source, expectedMarkerCount);
     fs.rename(source, dest);
     fs.rename(source, dest);
     assertIsDirectory(dest);
     assertIsDirectory(dest);
 
 
     // there are no markers
     // there are no markers
-    markerTool(fs, dest, false, 0);
+    markerTool(fs, dest, 0);
     LOG.info("Auditing destination paths");
     LOG.info("Auditing destination paths");
     verifyRenamed(dest, createdPaths);
     verifyRenamed(dest, createdPaths);
   }
   }
 
 
-  /**
-   * Create a FS where only dir2 in the source tree keeps markers;
-   * verify all is good.
-   */
-  @Test
-  public void testAuthPathIsMixed() throws Throwable {
-    describe("Create a source tree with mixed semantics");
-    Path base = methodPath();
-    Path source = new Path(base, "source");
-    Path dest = new Path(base, "dest");
-    Path dir2 = new Path(source, "dir2");
-    S3AFileSystem mixedFSDir2 = createFS(DIRECTORY_MARKER_POLICY_AUTHORITATIVE,
-        dir2.toUri().toString());
-    // line up for close in teardown
-    setMixedFS(mixedFSDir2);
-    // some of these paths will retain markers, some will not
-    CreatedPaths createdPaths = createPaths(mixedFSDir2, source);
-
-    // markers are only under dir2
-    markerTool(mixedFSDir2, toPath(source, "dir1"), false, 0);
-    markerTool(mixedFSDir2, source, false, expectedMarkersUnderDir2);
-
-    // full scan of source will fail
-    markerTool(EXIT_NOT_ACCEPTABLE,
-        mixedFSDir2, source, false, 0, 0, false);
-
-    // but add the -nonauth option and the markers under dir2 are skipped
-    markerTool(0, mixedFSDir2, source, false, 0, 0, true);
-
-    // if we now rename, all will be good
-    LOG.info("Executing rename");
-    mixedFSDir2.rename(source, dest);
-    assertIsDirectory(dest);
-
-    // there are no markers
-    MarkerTool.ScanResult scanResult = markerTool(mixedFSDir2, dest, false, 0);
-    // there are exactly the files we want
-    Assertions.assertThat(scanResult)
-        .describedAs("Scan result %s", scanResult)
-        .extracting(s -> s.getTracker().getFilesFound())
-        .isEqualTo(expectedFileCount);
-    verifyRenamed(dest, createdPaths);
-  }
-
   /**
   /**
    * Assert that an expected number of markers were deleted.
    * Assert that an expected number of markers were deleted.
    * @param expected expected count.
    * @param expected expected count.
@@ -247,8 +157,7 @@ public class ITestMarkerTool extends AbstractMarkerToolTest {
   @Test
   @Test
   public void testRunAuditWithExpectedMarkers() throws Throwable {
   public void testRunAuditWithExpectedMarkers() throws Throwable {
     describe("Run a verbose audit expecting some markers");
     describe("Run a verbose audit expecting some markers");
-    // a run under the keeping FS will create paths
-    CreatedPaths createdPaths = createPaths(getKeepingFS(), methodPath());
+    CreatedPaths createdPaths = createPaths(getFileSystem(), methodPath());
     final File audit = tempAuditFile();
     final File audit = tempAuditFile();
     run(MARKERS, V,
     run(MARKERS, V,
         AUDIT,
         AUDIT,
@@ -260,28 +169,10 @@ public class ITestMarkerTool extends AbstractMarkerToolTest {
     expectMarkersInOutput(audit, expectedMarkersWithBaseDir);
     expectMarkersInOutput(audit, expectedMarkersWithBaseDir);
   }
   }
 
 
-  @Test
-  public void testRunAuditWithExpectedMarkersSwappedMinMax() throws Throwable {
-    describe("Run a verbose audit with the min/max ranges swapped;"
-        + " see HADOOP-17332");
-    // a run under the keeping FS will create paths
-    CreatedPaths createdPaths = createPaths(getKeepingFS(), methodPath());
-    final File audit = tempAuditFile();
-    run(MARKERS, V,
-        AUDIT,
-        m(OPT_LIMIT), 0,
-        m(OPT_OUT), audit,
-        m(OPT_MIN), expectedMarkersWithBaseDir + 1,
-        m(OPT_MAX), expectedMarkersWithBaseDir - 1,
-        createdPaths.base);
-    expectMarkersInOutput(audit, expectedMarkersWithBaseDir);
-  }
-
   @Test
   @Test
   public void testRunAuditWithExcessMarkers() throws Throwable {
   public void testRunAuditWithExcessMarkers() throws Throwable {
     describe("Run a verbose audit failing as surplus markers were found");
     describe("Run a verbose audit failing as surplus markers were found");
-    // a run under the keeping FS will create paths
-    CreatedPaths createdPaths = createPaths(getKeepingFS(), methodPath());
+    CreatedPaths createdPaths = createPaths(getFileSystem(), methodPath());
     final File audit = tempAuditFile();
     final File audit = tempAuditFile();
     runToFailure(EXIT_NOT_ACCEPTABLE, MARKERS, V,
     runToFailure(EXIT_NOT_ACCEPTABLE, MARKERS, V,
         AUDIT,
         AUDIT,
@@ -293,7 +184,7 @@ public class ITestMarkerTool extends AbstractMarkerToolTest {
   @Test
   @Test
   public void testRunLimitedAudit() throws Throwable {
   public void testRunLimitedAudit() throws Throwable {
     describe("Audit with a limited number of files (2)");
     describe("Audit with a limited number of files (2)");
-    CreatedPaths createdPaths = createPaths(getKeepingFS(), methodPath());
+    CreatedPaths createdPaths = createPaths(getFileSystem(), methodPath());
     runToFailure(EXIT_INTERRUPTED,
     runToFailure(EXIT_INTERRUPTED,
         MARKERS, V,
         MARKERS, V,
         m(OPT_LIMIT), 2,
         m(OPT_LIMIT), 2,
@@ -325,47 +216,14 @@ public class ITestMarkerTool extends AbstractMarkerToolTest {
   }
   }
 
 
   @Test
   @Test
-  public void testBucketInfoKeepingOnDeleting() throws Throwable {
-    describe("Run bucket info with the keeping config on the deleting fs");
-    runS3GuardCommandToFailure(uncachedFSConfig(getDeletingFS()),
-        EXIT_NOT_ACCEPTABLE,
-        BUCKET_INFO,
-        m(MARKERS), DIRECTORY_MARKER_POLICY_KEEP,
-        methodPath());
-  }
-
-  @Test
-  public void testBucketInfoKeepingOnKeeping() throws Throwable {
-    describe("Run bucket info with the keeping config on the keeping fs");
-    runS3GuardCommand(uncachedFSConfig(getKeepingFS()),
+  public void testBucketInfoKeeping() throws Throwable {
+    describe("Run bucket info with the keeping option");
+    runS3GuardCommand(uncachedFSConfig(getFileSystem()),
         BUCKET_INFO,
         BUCKET_INFO,
         m(MARKERS), DIRECTORY_MARKER_POLICY_KEEP,
         m(MARKERS), DIRECTORY_MARKER_POLICY_KEEP,
         methodPath());
         methodPath());
   }
   }
 
 
-  @Test
-  public void testBucketInfoDeletingOnDeleting() throws Throwable {
-    describe("Run bucket info with the deleting config on the deleting fs");
-    runS3GuardCommand(uncachedFSConfig(getDeletingFS()),
-        BUCKET_INFO,
-        m(MARKERS), DIRECTORY_MARKER_POLICY_DELETE,
-        methodPath());
-  }
-
-  @Test
-  public void testBucketInfoAuthOnAuth() throws Throwable {
-    describe("Run bucket info with the auth FS");
-    Path base = methodPath();
-
-    S3AFileSystem authFS = createFS(DIRECTORY_MARKER_POLICY_AUTHORITATIVE,
-        base.toUri().toString());
-    // line up for close in teardown
-    setMixedFS(authFS);
-    runS3GuardCommand(uncachedFSConfig(authFS),
-        BUCKET_INFO,
-        m(MARKERS), DIRECTORY_MARKER_POLICY_AUTHORITATIVE,
-        methodPath());
-  }
 
 
   /**
   /**
    * Tracker of created paths.
    * Tracker of created paths.

+ 0 - 205
hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/TestS3xLoginHelper.java

@@ -1,205 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs.s3native;
-
-import java.net.URI;
-import java.net.URISyntaxException;
-
-import org.junit.Assert;
-import org.junit.Test;
-
-
-import static org.apache.hadoop.test.LambdaTestUtils.intercept;
-
-/**
- * Test how URIs and login details are extracted from URIs.
- */
-public class TestS3xLoginHelper extends Assert {
-  public static final String BUCKET = "s3a://bucket";
-  private static final URI ENDPOINT = uri(BUCKET);
-  public static final String S = "%2f";
-  public static final String P = "%2b";
-  public static final String P_RAW = "+";
-  public static final String USER = "user";
-  public static final String PASLASHSLASH = "pa" + S + S;
-  public static final String PAPLUS = "pa" + P;
-  public static final String PAPLUS_RAW = "pa" + P_RAW;
-
-  public static final URI WITH_USER_AND_PASS = uri("s3a://user:pass@bucket");
-
-  public static final URI WITH_SLASH_IN_PASS = uri(
-      "s3a://user:" + PASLASHSLASH + "@bucket");
-  public static final URI WITH_PLUS_IN_PASS = uri(
-      "s3a://user:" + PAPLUS + "@bucket");
-  public static final URI WITH_PLUS_RAW_IN_PASS = uri(
-      "s3a://user:" + PAPLUS_RAW + "@bucket");
-  public static final URI USER_NO_PASS = uri("s3a://user@bucket");
-  public static final URI WITH_USER_AND_COLON = uri("s3a://user:@bucket");
-  public static final URI NO_USER = uri("s3a://:pass@bucket");
-  public static final URI NO_USER_NO_PASS = uri("s3a://:@bucket");
-  public static final URI NO_USER_NO_PASS_TWO_COLON = uri("s3a://::@bucket");
-
-  /**
-   * Construct a URI; raises an RTE if it won't parse.
-   * This allows it to be used in static constructors.
-   * @param s URI string
-   * @return the URI
-   * @throws RuntimeException on a URI syntax problem
-   */
-  private static URI uri(String s) {
-    try {
-      return new URI(s);
-    } catch (URISyntaxException e) {
-      throw new RuntimeException(e.toString(), e);
-    }
-  }
-
-  /**
-   * Assert that a built up FS URI matches the endpoint.
-   * @param uri URI to build the FS URI from
-   */
-  private void assertMatchesEndpoint(URI uri) {
-    assertEquals("Source " + uri,
-        ENDPOINT, S3xLoginHelper.buildFSURI(uri));
-  }
-
-  /**
-   * Assert that the supplied FS URI is invalid as it contains
-   * username:password secrets.
-   * @param uri URI to build the FS URI from
-   */
-  private void assertInvalid(URI uri) throws Exception {
-    intercept(IllegalArgumentException.class,
-    S3xLoginHelper.LOGIN_WARNING,
-    () -> S3xLoginHelper.buildFSURI(uri));
-  }
-
-  /**
-   * Assert that the login/pass details from a URI match that expected.
-   * @param user username
-   * @param pass password
-   * @param uri URI to build login details from
-   * @return the login tuple
-   */
-  private S3xLoginHelper.Login assertMatchesLogin(String user,
-      String pass, URI uri) {
-    S3xLoginHelper.Login expected = new S3xLoginHelper.Login(user, pass);
-    S3xLoginHelper.Login actual = S3xLoginHelper.extractLoginDetails(uri);
-    if (!expected.equals(actual)) {
-      Assert.fail("Source " + uri
-          + " login expected=:" + toString(expected)
-          + " actual=" + toString(actual));
-    }
-    return actual;
-  }
-
-  @Test
-  public void testSimpleFSURI() throws Throwable {
-    assertMatchesEndpoint(ENDPOINT);
-  }
-
-  @Test
-  public void testLoginSimple() throws Throwable {
-    S3xLoginHelper.Login login = assertMatchesLogin("", "", ENDPOINT);
-    assertFalse("Login of " + login, login.hasLogin());
-  }
-
-  @Test
-  public void testLoginWithUser() throws Throwable {
-    assertMatchesLogin(USER, "", USER_NO_PASS);
-  }
-
-  @Test
-  public void testLoginWithUserAndColon() throws Throwable {
-    assertMatchesLogin(USER, "", WITH_USER_AND_COLON);
-  }
-
-  @Test
-  public void testLoginNoUser() throws Throwable {
-    assertMatchesLogin("", "", NO_USER);
-  }
-
-  @Test
-  public void testLoginNoUserNoPass() throws Throwable {
-    assertMatchesLogin("", "", NO_USER_NO_PASS);
-  }
-
-  @Test
-  public void testLoginNoUserNoPassTwoColon() throws Throwable {
-    assertMatchesLogin("", "", NO_USER_NO_PASS_TWO_COLON);
-  }
-
-  @Test
-  public void testFsUriWithUserAndPass() throws Throwable {
-    assertInvalid(WITH_USER_AND_PASS);
-  }
-
-  @Test
-  public void testFsUriWithSlashInPass() throws Throwable {
-    assertInvalid(WITH_SLASH_IN_PASS);
-  }
-
-  @Test
-  public void testFsUriWithPlusInPass() throws Throwable {
-    assertInvalid(WITH_PLUS_IN_PASS);
-  }
-
-  @Test
-  public void testFsUriWithPlusRawInPass() throws Throwable {
-    assertInvalid(WITH_PLUS_RAW_IN_PASS);
-  }
-
-  @Test
-  public void testFsUriWithUser() throws Throwable {
-    assertInvalid(USER_NO_PASS);
-  }
-
-  @Test
-  public void testFsUriWithUserAndColon() throws Throwable {
-    assertInvalid(WITH_USER_AND_COLON);
-  }
-
-  @Test
-  public void testFsiNoUser() throws Throwable {
-    assertMatchesEndpoint(NO_USER);
-  }
-
-  @Test
-  public void testFsUriNoUserNoPass() throws Throwable {
-    assertMatchesEndpoint(NO_USER_NO_PASS);
-  }
-
-  @Test
-  public void testFsUriNoUserNoPassTwoColon() throws Throwable {
-    assertMatchesEndpoint(NO_USER_NO_PASS_TWO_COLON);
-  }
-
-  /**
-   * Stringifier. Kept in the code to avoid accidental logging in production
-   * code.
-   * @return login details for assertions.
-   */
-  public String toString(S3xLoginHelper.Login login) {
-    final StringBuilder sb = new StringBuilder("LoginTuple{");
-    sb.append("<'").append(login.getUser()).append('\'');
-    sb.append(", '").append(login.getPassword()).append('\'');
-    sb.append('>');
-    return sb.toString();
-  }
-}