Browse Source

HADOOP-19415. [JDK17] Upgrade JUnit from 4 to 5 in hadoop-common Part6. (#7419)

* HADOOP-19415. [JDK17] Upgrade JUnit from 4 to 5 in hadoop-common Part6.

Co-authored-by: Anuj Modi <anujmodi2011@gmail.com>
Co-authored-by: Hualong Zhang <hualong.z@hotmail.com>
Reviewed-by: Anuj Modi <anujmodi2011@gmail.com>
Reviewed-by: Hualong Zhang <hualong.z@hotmail.com>
Signed-off-by: Shilun Fan <slfan1989@apache.org>
slfan1989 4 weeks ago
parent
commit
8345235b66
100 changed files with 716 additions and 612 deletions
  1. 3 2
      hadoop-cloud-storage-project/hadoop-huaweicloud/src/test/java/org/apache/hadoop/fs/obs/TestOBSContractAppend.java
  2. 1 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFSCopyFromLocal.java
  3. 3 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractAppendTest.java
  4. 3 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractBulkDeleteTest.java
  5. 3 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractConcatTest.java
  6. 1 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractContentSummaryTest.java
  7. 12 10
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCopyFromLocalTest.java
  8. 19 19
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCreateTest.java
  9. 7 7
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractDeleteTest.java
  10. 1 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractEtagTest.java
  11. 30 30
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractGetEnclosingRoot.java
  12. 27 26
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractGetFileStatusTest.java
  13. 1 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractLeaseRecoveryTest.java
  14. 1 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMkdirTest.java
  15. 12 11
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMultipartUploaderTest.java
  16. 22 24
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractOpenTest.java
  17. 55 41
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractPathHandleTest.java
  18. 16 17
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRenameTest.java
  19. 11 9
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRootDirectoryTest.java
  20. 1 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSafeModeTest.java
  21. 16 12
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSeekTest.java
  22. 3 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSetTimesTest.java
  23. 5 3
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractStreamIOStatisticsTest.java
  24. 11 9
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractUnbufferTest.java
  25. 102 62
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractVectoredReadTest.java
  26. 19 27
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractFSContractTestBase.java
  27. 1 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/localfs/TestLocalFSContractCreate.java
  28. 5 5
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/localfs/TestLocalFSContractLoaded.java
  29. 17 9
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/localfs/TestLocalFSContractVectoredRead.java
  30. 1 2
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/rawlocal/TestRawLocalContractVectoredRead.java
  31. 1 5
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/rawlocal/TestRawlocalContractPathHandle.java
  32. 1 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/rawlocal/TestRawlocalContractRename.java
  33. 5 3
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/wrappedio/impl/TestWrappedIO.java
  34. 4 4
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractAppend.java
  35. 4 4
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractAppendSecure.java
  36. 4 4
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractConcat.java
  37. 4 4
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractConcatSecure.java
  38. 4 4
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractCreate.java
  39. 4 4
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractCreateSecure.java
  40. 8 12
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractDelegationToken.java
  41. 4 4
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractDelete.java
  42. 4 4
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractDeleteSecure.java
  43. 4 4
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractGetFileStatus.java
  44. 4 4
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractGetFileStatusSecure.java
  45. 4 4
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractMkdir.java
  46. 4 4
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractMkdirSecure.java
  47. 4 4
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractOpen.java
  48. 4 4
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractOpenSecure.java
  49. 4 4
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractRename.java
  50. 4 4
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractRenameSecure.java
  51. 4 4
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractRootDirectory.java
  52. 4 4
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractRootDirectorySecure.java
  53. 4 4
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractSeek.java
  54. 4 4
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractSeekSecure.java
  55. 4 4
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractSetTimes.java
  56. 4 4
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractSetTimesSecure.java
  57. 4 4
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/web/TestRouterWebHDFSContractAppend.java
  58. 4 4
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/web/TestRouterWebHDFSContractConcat.java
  59. 4 4
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/web/TestRouterWebHDFSContractCreate.java
  60. 4 4
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/web/TestRouterWebHDFSContractDelete.java
  61. 4 4
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/web/TestRouterWebHDFSContractMkdir.java
  62. 5 5
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/web/TestRouterWebHDFSContractOpen.java
  63. 4 4
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/web/TestRouterWebHDFSContractRename.java
  64. 4 4
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/web/TestRouterWebHDFSContractRootDirectory.java
  65. 4 4
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/web/TestRouterWebHDFSContractSeek.java
  66. 4 4
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestDFSWrappedIO.java
  67. 4 4
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractAppend.java
  68. 4 4
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractBulkDelete.java
  69. 4 4
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractConcat.java
  70. 4 4
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractCreate.java
  71. 4 4
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractDelete.java
  72. 4 4
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractGetFileStatus.java
  73. 4 4
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractLeaseRecovery.java
  74. 4 4
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractMkdir.java
  75. 4 4
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractMultipartUploader.java
  76. 4 4
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractOpen.java
  77. 5 8
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractPathHandle.java
  78. 4 4
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractRename.java
  79. 4 4
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractRootDirectory.java
  80. 4 4
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractSafeMode.java
  81. 4 4
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractSeek.java
  82. 4 4
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractSetTimes.java
  83. 4 4
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractUnbuffer.java
  84. 5 6
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractVectoredRead.java
  85. 5 3
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/committer/manifest/AbstractManifestCommitterTest.java
  86. 3 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/committer/manifest/TestCleanupStage.java
  87. 3 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/committer/manifest/TestCommitTaskStage.java
  88. 3 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/committer/manifest/TestCreateOutputDirectoriesStage.java
  89. 6 4
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/committer/manifest/TestJobThroughManifestCommitter.java
  90. 5 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/committer/manifest/TestLoadManifestsStage.java
  91. 7 3
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/committer/manifest/TestManifestCommitProtocol.java
  92. 6 4
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/committer/manifest/TestRenameStageFailure.java
  93. 3 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/committer/manifest/TestTaskManifestFileIO.java
  94. 5 5
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/committer/manifest/impl/TestEntryFileIO.java
  95. 1 1
      hadoop-project/pom.xml
  96. 2 0
      hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/contract/TestAliyunOSSContractGetFileStatusV1List.java
  97. 1 1
      hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/contract/TestAliyunOSSContractSeek.java
  98. 8 5
      hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/yarn/TestOSSMiniYarnCluster.java
  99. 8 2
      hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractAnalyticsStreamVectoredRead.java
  100. 23 15
      hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractBulkDelete.java

+ 3 - 2
hadoop-cloud-storage-project/hadoop-huaweicloud/src/test/java/org/apache/hadoop/fs/obs/TestOBSContractAppend.java

@@ -21,7 +21,8 @@ package org.apache.hadoop.fs.obs;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractAppendTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.Assume;
+
+import static org.junit.jupiter.api.Assumptions.assumeTrue;
 
 /**
  * Append test cases on obs file system.
@@ -35,6 +36,6 @@ public class TestOBSContractAppend extends AbstractContractAppendTest {
 
   @Override
   public void testRenameFileBeingAppended() {
-    Assume.assumeTrue("unsupport.", false);
+    assumeTrue(false, "unsupport.");
   }
 }

+ 1 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFSCopyFromLocal.java

@@ -20,7 +20,7 @@ package org.apache.hadoop.fs;
 
 import java.io.File;
 
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractCopyFromLocalTest;

+ 3 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractAppendTest.java

@@ -22,7 +22,8 @@ import org.apache.hadoop.fs.CommonPathCapabilities;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.Path;
 
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -41,6 +42,7 @@ public abstract class AbstractContractAppendTest extends AbstractFSContractTestB
   private Path testPath;
   private Path target;
 
+  @BeforeEach
   @Override
   public void setup() throws Exception {
     super.setup();

+ 3 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractBulkDeleteTest.java

@@ -25,7 +25,8 @@ import java.util.List;
 import java.util.Map;
 
 import org.assertj.core.api.Assertions;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -74,6 +75,7 @@ public abstract class AbstractContractBulkDeleteTest extends AbstractFSContractT
    */
   private DynamicWrappedIO dynamicWrappedIO;
 
+  @BeforeEach
   @Override
   public void setup() throws Exception {
     super.setup();

+ 3 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractConcatTest.java

@@ -20,7 +20,8 @@ package org.apache.hadoop.fs.contract;
 
 import org.apache.hadoop.fs.Path;
 
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -44,6 +45,7 @@ public abstract class AbstractContractConcatTest extends AbstractFSContractTestB
   private Path zeroByteFile;
   private Path target;
 
+  @BeforeEach
   @Override
   public void setup() throws Exception {
     super.setup();

+ 1 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractContentSummaryTest.java

@@ -23,7 +23,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 
 import org.assertj.core.api.Assertions;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 import java.io.FileNotFoundException;
 

+ 12 - 10
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCopyFromLocalTest.java

@@ -25,7 +25,8 @@ import java.nio.charset.Charset;
 import java.nio.charset.StandardCharsets;
 import java.nio.file.Files;
 
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Test;
 
 import org.apache.commons.io.FileUtils;
 import org.apache.commons.io.IOUtils;
@@ -43,6 +44,7 @@ public abstract class AbstractContractCopyFromLocalTest extends
   private static final Charset ASCII = StandardCharsets.US_ASCII;
   private File file;
 
+  @AfterEach
   @Override
   public void teardown() throws Exception {
     super.teardown();
@@ -65,12 +67,12 @@ public abstract class AbstractContractCopyFromLocalTest extends
     Path dest = copyFromLocal(file, true);
 
     assertPathExists("uploaded file not found", dest);
-    assertTrue("source file deleted", Files.exists(file.toPath()));
+    assertTrue(Files.exists(file.toPath()), "source file deleted");
 
     FileSystem fs = getFileSystem();
     FileStatus status = fs.getFileStatus(dest);
-    assertEquals("File length not equal " + status,
-        message.getBytes(ASCII).length, status.getLen());
+    assertEquals(message.getBytes(ASCII).length, status.getLen(),
+        "File length not equal " + status);
     assertFileTextEquals(dest, message);
   }
 
@@ -109,7 +111,7 @@ public abstract class AbstractContractCopyFromLocalTest extends
     file = createTempFile("test");
     copyFromLocal(file, false, true);
 
-    assertFalse("Source file not deleted", Files.exists(file.toPath()));
+    assertFalse(Files.exists(file.toPath()), "Source file not deleted");
   }
 
   @Test
@@ -215,7 +217,7 @@ public abstract class AbstractContractCopyFromLocalTest extends
     copyFromLocal(source, false, true);
     Path dest = fileToPath(child, source.getParentFile());
 
-    assertFalse("Directory not deleted", Files.exists(source.toPath()));
+    assertFalse(Files.exists(source.toPath()), "Directory not deleted");
     assertFileTextEquals(dest, contents);
   }
 
@@ -258,8 +260,8 @@ public abstract class AbstractContractCopyFromLocalTest extends
     Path dst = path(srcDir.getFileName().toString());
     getFileSystem().copyFromLocalFile(true, true, src, dst);
 
-    assertFalse("Source directory was not deleted",
-        Files.exists(srcDir));
+    assertFalse(Files.exists(srcDir),
+        "Source directory was not deleted");
   }
 
   @Test
@@ -330,7 +332,7 @@ public abstract class AbstractContractCopyFromLocalTest extends
 
   private void assertFileTextEquals(Path path, String expected)
       throws IOException {
-    assertEquals("Wrong data in " + path,
-        expected, IOUtils.toString(getFileSystem().open(path), ASCII));
+    assertEquals(expected, IOUtils.toString(getFileSystem().open(path), ASCII),
+        "Wrong data in " + path);
   }
 }

+ 19 - 19
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCreateTest.java

@@ -27,7 +27,7 @@ import org.apache.hadoop.fs.ParentNotDirectoryException;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.StreamCapabilities;
 
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 import org.junit.AssumptionViolatedException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -278,8 +278,8 @@ public abstract class AbstractContractCreateTest extends
     FileSystem fs = getFileSystem();
 
     long rootPath = fs.getDefaultBlockSize(path("/"));
-    assertTrue("Root block size is invalid " + rootPath,
-        rootPath > 0);
+    assertTrue(rootPath > 0,
+        "Root block size is invalid " + rootPath);
 
     Path path = path("testFileStatusBlocksizeNonEmptyFile");
     byte[] data = dataset(256, 'a', 'z');
@@ -303,13 +303,13 @@ public abstract class AbstractContractCreateTest extends
     FileStatus status =
         getFileStatusEventually(fs, path, CREATE_TIMEOUT);
     String statusDetails = status.toString();
-    assertTrue("File status block size too low:  " + statusDetails
-            + " min value: " + minValue,
-        status.getBlockSize() >= minValue);
+    assertTrue(status.getBlockSize() >= minValue,
+        "File status block size too low:  " + statusDetails
+        + " min value: " + minValue);
     long defaultBlockSize = fs.getDefaultBlockSize(path);
-    assertTrue("fs.getDefaultBlockSize(" + path + ") size " +
-            defaultBlockSize + " is below the minimum of " + minValue,
-        defaultBlockSize >= minValue);
+    assertTrue(defaultBlockSize >= minValue,
+        "fs.getDefaultBlockSize(" + path + ") size " +
+        defaultBlockSize + " is below the minimum of " + minValue);
   }
 
   @Test
@@ -320,14 +320,14 @@ public abstract class AbstractContractCreateTest extends
     Path parent = new Path(grandparent, "parent");
     Path child = new Path(parent, "child");
     touch(fs, child);
-    assertEquals("List status of parent should include the 1 child file",
-        1, fs.listStatus(parent).length);
-    assertTrue("Parent directory does not appear to be a directory",
-        fs.getFileStatus(parent).isDirectory());
-    assertEquals("List status of grandparent should include the 1 parent dir",
-        1, fs.listStatus(grandparent).length);
-    assertTrue("Grandparent directory does not appear to be a directory",
-        fs.getFileStatus(grandparent).isDirectory());
+    assertEquals(1, fs.listStatus(parent).length,
+        "List status of parent should include the 1 child file");
+    assertTrue(fs.getFileStatus(parent).isDirectory(),
+        "Parent directory does not appear to be a directory");
+    assertEquals(1, fs.listStatus(grandparent).length,
+        "List status of grandparent should include the 1 parent dir");
+    assertTrue(fs.getFileStatus(grandparent).isDirectory(),
+        "Grandparent directory does not appear to be a directory");
   }
 
   @Test
@@ -531,8 +531,8 @@ public abstract class AbstractContractCreateTest extends
         final FileStatus st = fs.getFileStatus(path);
         if (metadataUpdatedOnHSync) {
           // not all stores reliably update it, HDFS/webHDFS in particular
-          assertEquals("Metadata not updated during write " + st,
-              2, st.getLen());
+          assertEquals(2, st.getLen(),
+              "Metadata not updated during write " + st);
         }
 
         // there's no way to verify durability, but we can

+ 7 - 7
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractDeleteTest.java

@@ -20,7 +20,7 @@ package org.apache.hadoop.fs.contract;
 
 import org.apache.hadoop.fs.Path;
 
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 import java.io.IOException;
 
@@ -49,9 +49,9 @@ public abstract class AbstractContractDeleteTest extends
     Path path = path("testDeleteNonexistentPathRecursive");
     assertPathDoesNotExist("leftover", path);
     ContractTestUtils.rejectRootOperation(path);
-    assertFalse("Returned true attempting to recursively delete"
-                + " a nonexistent path " + path,
-                getFileSystem().delete(path, true));
+    assertFalse(getFileSystem().delete(path, true),
+        "Returned true attempting to recursively delete"
+        + " a nonexistent path " + path);
   }
 
   @Test
@@ -59,9 +59,9 @@ public abstract class AbstractContractDeleteTest extends
     Path path = path("testDeleteNonexistentPathNonRecursive");
     assertPathDoesNotExist("leftover", path);
     ContractTestUtils.rejectRootOperation(path);
-    assertFalse("Returned true attempting to non recursively delete"
-                + " a nonexistent path " + path,
-                getFileSystem().delete(path, false));
+    assertFalse(getFileSystem().delete(path, false),
+        "Returned true attempting to non recursively delete"
+        + " a nonexistent path " + path);
   }
 
   @Test

+ 1 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractEtagTest.java

@@ -22,7 +22,7 @@ import java.nio.charset.StandardCharsets;
 
 import org.assertj.core.api.Assertions;
 import org.junit.Assume;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 

+ 30 - 30
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractGetEnclosingRoot.java

@@ -22,7 +22,7 @@ import java.security.PrivilegedExceptionAction;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -36,23 +36,20 @@ public abstract class AbstractContractGetEnclosingRoot extends AbstractFSContrac
     Path root = path("/");
     Path foobar = path("/foo/bar");
 
-    assertEquals("Ensure getEnclosingRoot on the root directory returns the root directory",
-        root, fs.getEnclosingRoot(foobar));
-    assertEquals("Ensure getEnclosingRoot called on itself returns the root directory",
-        root, fs.getEnclosingRoot(fs.getEnclosingRoot(foobar)));
-    assertEquals(
+    assertEquals(root, fs.getEnclosingRoot(foobar),
+        "Ensure getEnclosingRoot on the root directory returns the root directory");
+    assertEquals(root, fs.getEnclosingRoot(fs.getEnclosingRoot(foobar)),
+        "Ensure getEnclosingRoot called on itself returns the root directory");
+    assertEquals(fs.getEnclosingRoot(root), fs.getEnclosingRoot(foobar),
         "Ensure getEnclosingRoot for different paths in the same enclosing root "
-            + "returns the same path",
-        fs.getEnclosingRoot(root), fs.getEnclosingRoot(foobar));
-    assertEquals("Ensure getEnclosingRoot on a path returns the root directory",
-        root, fs.getEnclosingRoot(methodPath()));
-    assertEquals("Ensure getEnclosingRoot called on itself on a path returns the root directory",
-        root, fs.getEnclosingRoot(fs.getEnclosingRoot(methodPath())));
-    assertEquals(
+        + "returns the same path");
+    assertEquals(root, fs.getEnclosingRoot(methodPath()),
+        "Ensure getEnclosingRoot on a path returns the root directory");
+    assertEquals(root, fs.getEnclosingRoot(fs.getEnclosingRoot(methodPath())),
+        "Ensure getEnclosingRoot called on itself on a path returns the root directory");
+    assertEquals(fs.getEnclosingRoot(root), fs.getEnclosingRoot(methodPath()),
         "Ensure getEnclosingRoot for different paths in the same enclosing root "
-            + "returns the same path",
-        fs.getEnclosingRoot(root),
-        fs.getEnclosingRoot(methodPath()));
+        + "returns the same path");
   }
 
 
@@ -63,11 +60,12 @@ public abstract class AbstractContractGetEnclosingRoot extends AbstractFSContrac
     Path foobar = methodPath();
     fs.mkdirs(foobar);
 
-    assertEquals(
-        "Ensure getEnclosingRoot returns the root directory when the root directory exists",
-        root, fs.getEnclosingRoot(foobar));
-    assertEquals("Ensure getEnclosingRoot returns the root directory when the directory exists",
-        root, fs.getEnclosingRoot(foobar));
+    assertEquals(root, fs.getEnclosingRoot(foobar),
+        "Ensure getEnclosingRoot returns the root directory " +
+        "when the root directory exists");
+    assertEquals(root, fs.getEnclosingRoot(foobar),
+        "Ensure getEnclosingRoot returns the root directory " +
+        "when the directory exists");
   }
 
   @Test
@@ -77,12 +75,12 @@ public abstract class AbstractContractGetEnclosingRoot extends AbstractFSContrac
     Path root = path("/");
 
     // .
-    assertEquals(
-        "Ensure getEnclosingRoot returns the root directory even when the path does not exist",
-        root, fs.getEnclosingRoot(foobar));
-    assertEquals(
-        "Ensure getEnclosingRoot returns the root directory even when the path does not exist",
-        root, fs.getEnclosingRoot(methodPath()));
+    assertEquals(root, fs.getEnclosingRoot(foobar),
+        "Ensure getEnclosingRoot returns the root directory " +
+        "even when the path does not exist");
+    assertEquals(root, fs.getEnclosingRoot(methodPath()),
+        "Ensure getEnclosingRoot returns the root directory " +
+        "even when the path does not exist");
   }
 
   @Test
@@ -90,14 +88,16 @@ public abstract class AbstractContractGetEnclosingRoot extends AbstractFSContrac
     FileSystem fs = getFileSystem();
     Path root = path("/");
 
-    assertEquals("Ensure getEnclosingRoot returns the root directory when the directory exists",
-        root, fs.getEnclosingRoot(new Path("/foo/bar")));
+    assertEquals(root, fs.getEnclosingRoot(new Path("/foo/bar")),
+        "Ensure getEnclosingRoot returns the root directory " +
+        "when the directory exists");
 
     UserGroupInformation ugi = UserGroupInformation.createRemoteUser("foo");
     Path p = ugi.doAs((PrivilegedExceptionAction<Path>) () -> {
       FileSystem wFs = getContract().getTestFileSystem();
       return wFs.getEnclosingRoot(new Path("/foo/bar"));
     });
-    assertEquals("Ensure getEnclosingRoot works correctly within a wrapped FileSystem", root, p);
+    assertEquals(root, p, "Ensure getEnclosingRoot works correctly " +
+        "within a wrapped FileSystem");
   }
 }

+ 27 - 26
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractGetFileStatusTest.java

@@ -33,7 +33,8 @@ import org.apache.hadoop.fs.LocatedFileStatus;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathFilter;
 import org.apache.hadoop.fs.RemoteIterator;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 import static org.apache.hadoop.fs.contract.ContractTestUtils.*;
 import static org.apache.hadoop.test.LambdaTestUtils.intercept;
@@ -55,6 +56,7 @@ public abstract class AbstractContractGetFileStatusTest extends
   private static final int TREE_FILES = 4;
   private static final int TREE_FILESIZE = 512;
 
+  @BeforeEach
   @Override
   public void setup() throws Exception {
     super.setup();
@@ -284,10 +286,9 @@ public abstract class AbstractContractGetFileStatusTest extends
     treeWalk.assertFieldsEquivalent("files", listing,
         treeWalk.getFiles(),
         listing.getFiles());
-    assertEquals("Size of status list through next() calls",
-        count,
-        toListThroughNextCallsAlone(
-            fs.listFiles(tree.getBasePath(), true)).size());
+    assertEquals(count, toListThroughNextCallsAlone(
+        fs.listFiles(tree.getBasePath(), true)).size(),
+        "Size of status list through next() calls");
   }
 
   @Test
@@ -398,12 +399,12 @@ public abstract class AbstractContractGetFileStatusTest extends
     Path f = touchf("listfilesfile");
     List<LocatedFileStatus> statusList = toList(
         getFileSystem().listFiles(f, false));
-    assertEquals("size of file list returned", 1, statusList.size());
+    assertEquals(1, statusList.size(), "size of file list returned");
     assertIsNamedFile(f, statusList.get(0));
     List<LocatedFileStatus> statusList2 = toListThroughNextCallsAlone(
         getFileSystem().listFiles(f, false));
-    assertEquals("size of file list returned through next() calls",
-        1, statusList2.size());
+    assertEquals(1, statusList2.size(),
+        "size of file list returned through next() calls");
     assertIsNamedFile(f, statusList2.get(0));
   }
 
@@ -413,11 +414,11 @@ public abstract class AbstractContractGetFileStatusTest extends
     Path f = touchf("listfilesRecursive");
     List<LocatedFileStatus> statusList = toList(
         getFileSystem().listFiles(f, true));
-    assertEquals("size of file list returned", 1, statusList.size());
+    assertEquals(1, statusList.size(), "size of file list returned");
     assertIsNamedFile(f, statusList.get(0));
     List<LocatedFileStatus> statusList2 = toListThroughNextCallsAlone(
         getFileSystem().listFiles(f, true));
-    assertEquals("size of file list returned", 1, statusList2.size());
+    assertEquals(1, statusList2.size(), "size of file list returned");
   }
 
   @Test
@@ -426,12 +427,12 @@ public abstract class AbstractContractGetFileStatusTest extends
     Path f = touchf("listLocatedStatus");
     List<LocatedFileStatus> statusList = toList(
         getFileSystem().listLocatedStatus(f));
-    assertEquals("size of file list returned", 1, statusList.size());
+    assertEquals(1, statusList.size(), "size of file list returned");
     assertIsNamedFile(f, statusList.get(0));
     List<LocatedFileStatus> statusList2 = toListThroughNextCallsAlone(
         getFileSystem().listLocatedStatus(f));
-    assertEquals("size of file list returned through next() calls",
-        1, statusList2.size());
+    assertEquals(1, statusList2.size(),
+        "size of file list returned through next() calls");
   }
 
   /**
@@ -451,8 +452,8 @@ public abstract class AbstractContractGetFileStatusTest extends
    * @param fileStatus status to validate
    */
   private void assertIsNamedFile(Path f, FileStatus fileStatus) {
-    assertEquals("Wrong pathname in " + fileStatus, f, fileStatus.getPath());
-    assertTrue("Not a file: " + fileStatus, fileStatus.isFile());
+    assertEquals(f, fileStatus.getPath(), "Wrong pathname in " + fileStatus);
+    assertTrue(fileStatus.isFile(), "Not a file: " + fileStatus);
   }
 
   /**
@@ -515,10 +516,10 @@ public abstract class AbstractContractGetFileStatusTest extends
       count++;
       LocatedFileStatus next = results.next();
       FileStatus fileStatus = getFileSystem().getFileStatus(next.getPath());
-      assertEquals("isDirectory", fileStatus.isDirectory(), next.isDirectory());
-      assertEquals("isFile", fileStatus.isFile(), next.isFile());
-      assertEquals("getLen", fileStatus.getLen(), next.getLen());
-      assertEquals("getOwner", fileStatus.getOwner(), next.getOwner());
+      assertEquals(fileStatus.isDirectory(), next.isDirectory(), "isDirectory");
+      assertEquals(fileStatus.isFile(), next.isFile(), "isFile");
+      assertEquals(fileStatus.getLen(), next.getLen(), "getLen");
+      assertEquals(fileStatus.getOwner(), next.getOwner(), "getOwner");
     }
     return count;
   }
@@ -604,9 +605,9 @@ public abstract class AbstractContractGetFileStatusTest extends
       Path path,
       PathFilter filter) throws IOException {
     FileStatus[] result = getFileSystem().listStatus(path, filter);
-    assertEquals("length of listStatus(" + path + ", " + filter + " ) " +
-        Arrays.toString(result),
-        expected, result.length);
+    assertEquals(expected, result.length,
+        "length of listStatus(" + path + ", " + filter + " ) " +
+        Arrays.toString(result));
     return result;
   }
 
@@ -626,8 +627,8 @@ public abstract class AbstractContractGetFileStatusTest extends
       PathFilter filter) throws IOException {
     RemoteIterator<LocatedFileStatus> it = xfs.listLocatedStatus(path, filter);
     List<LocatedFileStatus> result = toList(it);
-    assertEquals("length of listLocatedStatus(" + path + ", " + filter + " )",
-        expected, result.size());
+    assertEquals(expected, result.size(),
+        "length of listLocatedStatus(" + path + ", " + filter + " )");
     return result;
   }
 
@@ -650,8 +651,8 @@ public abstract class AbstractContractGetFileStatusTest extends
       PathFilter filter) throws IOException {
     RemoteIterator<LocatedFileStatus> it = xfs.listLocatedStatus(path, filter);
     List<LocatedFileStatus> result = toListThroughNextCallsAlone(it);
-    assertEquals("length of listLocatedStatus(" + path + ", " + filter + " )",
-        expected, result.size());
+    assertEquals(expected, result.size(),
+        "length of listLocatedStatus(" + path + ", " + filter + " )");
     return result;
   }
 

+ 1 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractLeaseRecoveryTest.java

@@ -22,7 +22,7 @@ import java.io.FileNotFoundException;
 import java.io.IOException;
 
 import org.assertj.core.api.Assertions;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.LeaseRecoverable;

+ 1 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMkdirTest.java

@@ -22,7 +22,7 @@ import org.apache.hadoop.fs.FileAlreadyExistsException;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.ParentNotDirectoryException;
 import org.apache.hadoop.fs.Path;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 import java.io.IOException;
 

+ 12 - 11
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMultipartUploaderTest.java

@@ -30,8 +30,8 @@ import java.util.Random;
 import java.util.concurrent.CompletableFuture;
 
 import org.assertj.core.api.Assertions;
-import org.junit.Assume;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -55,6 +55,7 @@ import static org.apache.hadoop.io.IOUtils.cleanupWithLogger;
 import static org.apache.hadoop.test.LambdaTestUtils.eventually;
 import static org.apache.hadoop.test.LambdaTestUtils.intercept;
 import static org.apache.hadoop.util.functional.FutureIO.awaitFuture;
+import static org.junit.jupiter.api.Assumptions.assumeTrue;
 
 /**
  * Tests of multipart uploads.
@@ -83,15 +84,16 @@ public abstract class AbstractContractMultipartUploaderTest extends
   private UploadHandle activeUpload;
   private Path activeUploadPath;
 
+  @BeforeEach
   @Override
   public void setup() throws Exception {
     super.setup();
 
     final FileSystem fs = getFileSystem();
     Path testPath = getContract().getTestPath();
-    Assume.assumeTrue("Multipart uploader is not supported",
-        fs.hasPathCapability(testPath,
-            CommonPathCapabilities.FS_MULTIPART_UPLOADER));
+    assumeTrue(fs.hasPathCapability(testPath,
+        CommonPathCapabilities.FS_MULTIPART_UPLOADER),
+        "Multipart uploader is not supported");
     uploader0 = fs.createMultipartUploader(testPath).build();
     uploader1 = fs.createMultipartUploader(testPath).build();
   }
@@ -264,8 +266,8 @@ public abstract class AbstractContractMultipartUploaderTest extends
     } else {
       // otherwise, the same or other uploader can try again.
       PathHandle fd2 = complete(completer, uploadHandle, file, partHandles);
-      assertArrayEquals("Path handles differ", fd.toByteArray(),
-          fd2.toByteArray());
+      assertArrayEquals(fd.toByteArray(),
+          fd2.toByteArray(), "Path handles differ");
     }
   }
 
@@ -791,9 +793,8 @@ public abstract class AbstractContractMultipartUploaderTest extends
     UploadHandle upload2;
     try {
       upload2 = startUpload(file);
-      Assume.assumeTrue(
-          "The Filesystem is unexpectedly supporting concurrent uploads",
-          concurrent);
+      assumeTrue(concurrent,
+          "The Filesystem is unexpectedly supporting concurrent uploads");
     } catch (IOException e) {
       if (!concurrent) {
         // this is expected, so end the test
@@ -805,7 +806,7 @@ public abstract class AbstractContractMultipartUploaderTest extends
     }
     Map<Integer, PartHandle> partHandles2 = new HashMap<>();
 
-    assertNotEquals("Upload handles match", upload1, upload2);
+    assertNotEquals(upload1, upload2, "Upload handles match");
 
     // put part 1
     partHandles1.put(partId1, putPart(file, upload1, partId1, false, payload1));

+ 22 - 24
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractOpenTest.java

@@ -47,7 +47,8 @@ import static org.apache.hadoop.test.LambdaTestUtils.interceptFuture;
 import static org.apache.hadoop.util.functional.FutureIO.awaitFuture;
 
 import org.assertj.core.api.Assertions;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Test;
 
 /**
  * Test Open operations.
@@ -64,6 +65,7 @@ public abstract class AbstractContractOpenTest
     return conf;
   }
 
+  @AfterEach
   @Override
   public void teardown() throws Exception {
     IOUtils.closeStream(instream);
@@ -84,13 +86,12 @@ public abstract class AbstractContractOpenTest
 
   @Test
   public void testFsIsEncrypted() throws Exception {
-      describe("create an empty file and call FileStatus.isEncrypted()");
-      final Path path = path("file");
-      createFile(getFileSystem(), path, false, new byte[0]);
-      final FileStatus stat = getFileSystem().getFileStatus(path);
-      assertEquals("Result wrong for for isEncrypted() in " + stat,
-          areZeroByteFilesEncrypted(),
-          stat.isEncrypted());
+    describe("create an empty file and call FileStatus.isEncrypted()");
+    final Path path = path("file");
+    createFile(getFileSystem(), path, false, new byte[0]);
+    final FileStatus stat = getFileSystem().getFileStatus(path);
+    assertEquals(areZeroByteFilesEncrypted(),
+        stat.isEncrypted(), "Result wrong for for isEncrypted() in " + stat);
   }
 
   /**
@@ -155,10 +156,10 @@ public abstract class AbstractContractOpenTest
       int c = instream1.read();
       assertEquals(0,c);
       instream2 = getFileSystem().open(path);
-      assertEquals("first read of instream 2", 0, instream2.read());
-      assertEquals("second read of instream 1", 1, instream1.read());
+      assertEquals(0, instream2.read(), "first read of instream 2");
+      assertEquals(1, instream1.read(), "second read of instream 1");
       instream1.close();
-      assertEquals("second read of instream 2", 1, instream2.read());
+      assertEquals(1, instream2.read(), "second read of instream 2");
       //close instream1 again
       instream1.close();
     } finally {
@@ -241,8 +242,8 @@ public abstract class AbstractContractOpenTest
     FutureDataInputStreamBuilder builder =
         getFileSystem().openFile(path("testOpenFileFailExceptionally"))
             .opt("fs.test.something", true);
-    assertNull("exceptional uprating",
-        builder.build().exceptionally(ex -> null).get());
+    assertNull(builder.build().exceptionally(ex -> null).get(),
+        "exceptional uprating");
   }
 
   @Test
@@ -306,9 +307,8 @@ public abstract class AbstractContractOpenTest
         .withFileStatus(st)
         .build()
         .thenApply(ContractTestUtils::readStream);
-    assertEquals("Wrong number of bytes read value",
-        len,
-        (long) readAllBytes.get());
+    assertEquals(len, (long) readAllBytes.get(),
+        "Wrong number of bytes read value");
     // now reattempt with a new FileStatus and a different path
     // other than the final name element
     // implementations MUST use path in openFile() call
@@ -322,13 +322,12 @@ public abstract class AbstractContractOpenTest
         st.getOwner(),
         st.getGroup(),
         new Path("gopher:///localhost:/" + path.getName()));
-    assertEquals("Wrong number of bytes read value",
-        len,
+    assertEquals(len,
         (long) fs.openFile(path)
-            .withFileStatus(st2)
-            .build()
-            .thenApply(ContractTestUtils::readStream)
-            .get());
+        .withFileStatus(st2)
+        .build()
+        .thenApply(ContractTestUtils::readStream)
+        .get(), "Wrong number of bytes read value");
   }
 
   @Test
@@ -347,8 +346,7 @@ public abstract class AbstractContractOpenTest
       accepted.set(true);
       return ContractTestUtils.readStream(stream);
     }).get();
-    assertTrue("async accept operation not invoked",
-        accepted.get());
+    assertTrue(accepted.get(), "async accept operation not invoked");
     Assertions.assertThat(bytes)
         .describedAs("bytes read from stream")
         .isEqualTo(len);

+ 55 - 41
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractPathHandleTest.java

@@ -44,9 +44,8 @@ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_
 import static org.apache.hadoop.test.LambdaTestUtils.interceptFuture;
 
 import org.apache.hadoop.fs.RawPathHandle;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.MethodSource;
 
 /**
  * Test {@link PathHandle} operations and semantics.
@@ -56,26 +55,26 @@ import org.junit.runners.Parameterized;
  * @see org.apache.hadoop.fs.FileSystem#open(PathHandle)
  * @see org.apache.hadoop.fs.FileSystem#open(PathHandle, int)
  */
-@RunWith(Parameterized.class)
 public abstract class AbstractContractPathHandleTest
     extends AbstractFSContractTestBase {
 
-  private final HandleOpt[] opts;
-  private final boolean serialized;
+  private HandleOpt[] opts;
+  private boolean serialized;
 
   private static final byte[] B1 = dataset(TEST_FILE_LEN, 43, 255);
   private static final byte[] B2 = dataset(TEST_FILE_LEN, 44, 255);
 
   /**
    * Create an instance of the test from {@link #params()}.
-   * @param testname Name of the set of options under test
-   * @param opts Set of {@link HandleOpt} params under test.
-   * @param serialized Serialize the handle before using it.
+   * @param pTestname Name of the set of options under test
+   * @param pOpts Set of {@link HandleOpt} params under test.
+   * @param pSerialized Serialize the handle before using it.
    */
-  public AbstractContractPathHandleTest(String testname, HandleOpt[] opts,
-      boolean serialized) {
-    this.opts = opts;
-    this.serialized = serialized;
+  public void initAbstractContractPathHandleTest(
+      String pTestname, HandleOpt[] pOpts,
+      boolean pSerialized) {
+    this.opts = pOpts;
+    this.serialized = pSerialized;
   }
 
   /**
@@ -83,7 +82,6 @@ public abstract class AbstractContractPathHandleTest
    * after converting the PathHandle to bytes and back.
    * @return
    */
-  @Parameterized.Parameters(name="Test{0}")
   public static Collection<Object[]> params() {
     return Arrays.asList(
         Arrays.asList("Exact", HandleOpt.exact()),
@@ -108,8 +106,11 @@ public abstract class AbstractContractPathHandleTest
     return conf;
   }
 
-  @Test
-  public void testIdent() throws IOException {
+  @MethodSource("params")
+  @ParameterizedTest
+  public void testIdent(String pTestname, HandleOpt[] pOpts,
+      boolean pSerialized) throws IOException {
+    initAbstractContractPathHandleTest(pTestname, pOpts, pSerialized);
     describe("verify simple open, no changes");
     FileStatus stat = testFile(B1);
     PathHandle fd = getHandleOrSkip(stat);
@@ -120,8 +121,11 @@ public abstract class AbstractContractPathHandleTest
     }
   }
 
-  @Test
-  public void testChanged() throws IOException {
+  @MethodSource("params")
+  @ParameterizedTest
+  public void testChanged(String pTestname, HandleOpt[] pOpts,
+      boolean pSerialized) throws IOException {
+    initAbstractContractPathHandleTest(pTestname, pOpts, pSerialized);
     describe("verify open(PathHandle, changed(*))");
     assumeSupportsContentCheck();
     HandleOpt.Data data = HandleOpt.getOpt(HandleOpt.Data.class, opts)
@@ -143,15 +147,18 @@ public abstract class AbstractContractPathHandleTest
     PathHandle fd = getHandleOrSkip(stat);
 
     try (FSDataInputStream in = getFileSystem().open(fd)) {
-      assertTrue("Failed to detect content change", data.allowChange());
+      assertTrue(data.allowChange(), "Failed to detect content change");
       verifyRead(in, b12, 0, b12.length);
     } catch (InvalidPathHandleException e) {
-      assertFalse("Failed to allow content change", data.allowChange());
+      assertFalse(data.allowChange(), "Failed to allow content change");
     }
   }
 
-  @Test
-  public void testMoved() throws IOException {
+  @MethodSource("params")
+  @ParameterizedTest
+  public void testMoved(String pTestname, HandleOpt[] pOpts,
+      boolean pSerialized) throws IOException {
+    initAbstractContractPathHandleTest(pTestname, pOpts, pSerialized);
     describe("verify open(PathHandle, moved(*))");
     assumeSupportsFileReference();
     HandleOpt.Location loc = HandleOpt.getOpt(HandleOpt.Location.class, opts)
@@ -164,15 +171,18 @@ public abstract class AbstractContractPathHandleTest
     PathHandle fd = getHandleOrSkip(stat);
 
     try (FSDataInputStream in = getFileSystem().open(fd)) {
-      assertTrue("Failed to detect location change", loc.allowChange());
+      assertTrue(loc.allowChange(), "Failed to detect location change");
       verifyRead(in, B1, 0, B1.length);
     } catch (InvalidPathHandleException e) {
-      assertFalse("Failed to allow location change", loc.allowChange());
+      assertFalse(loc.allowChange(), "Failed to allow location change");
     }
   }
 
-  @Test
-  public void testChangedAndMoved() throws IOException {
+  @MethodSource("params")
+  @ParameterizedTest
+  public void testChangedAndMoved(String pTestname, HandleOpt[] pOpts,
+      boolean pSerialized) throws IOException {
+    initAbstractContractPathHandleTest(pTestname, pOpts, pSerialized);
     describe("verify open(PathHandle, changed(*), moved(*))");
     assumeSupportsFileReference();
     assumeSupportsContentCheck();
@@ -189,15 +199,15 @@ public abstract class AbstractContractPathHandleTest
     byte[] b12 = Arrays.copyOf(B1, B1.length + B2.length);
     System.arraycopy(B2, 0, b12, B1.length, B2.length);
     try (FSDataInputStream in = getFileSystem().open(fd)) {
-      assertTrue("Failed to detect location change", loc.allowChange());
-      assertTrue("Failed to detect content change", data.allowChange());
+      assertTrue(loc.allowChange(), "Failed to detect location change");
+      assertTrue(data.allowChange(), "Failed to detect content change");
       verifyRead(in, b12, 0, b12.length);
     } catch (InvalidPathHandleException e) {
       if (data.allowChange()) {
-        assertFalse("Failed to allow location change", loc.allowChange());
+        assertFalse(loc.allowChange(), "Failed to allow location change");
       }
       if (loc.allowChange()) {
-        assertFalse("Failed to allow content change", data.allowChange());
+        assertFalse(data.allowChange(), "Failed to allow content change");
       }
     }
   }
@@ -255,7 +265,8 @@ public abstract class AbstractContractPathHandleTest
   }
 
 
-  @Test
+  @MethodSource("params")
+  @ParameterizedTest
   public void testOpenFileApplyRead() throws Throwable {
     describe("use the apply sequence to read a whole file");
     CompletableFuture<Long> readAllBytes = getFileSystem()
@@ -264,13 +275,15 @@ public abstract class AbstractContractPathHandleTest
                 testFile(B1)))
         .build()
         .thenApply(ContractTestUtils::readStream);
-    assertEquals("Wrong number of bytes read value",
-        TEST_FILE_LEN,
-        (long) readAllBytes.get());
+    assertEquals(TEST_FILE_LEN,
+        (long) readAllBytes.get(), "Wrong number of bytes read value");
   }
 
-  @Test
-  public void testOpenFileDelete() throws Throwable {
+  @MethodSource("params")
+  @ParameterizedTest
+  public void testOpenFileDelete(String pTestname, HandleOpt[] pOpts,
+      boolean pSerialized) throws Throwable {
+    initAbstractContractPathHandleTest(pTestname, pOpts, pSerialized);
     describe("use the apply sequence to read a whole file");
     FileStatus testFile = testFile(B1);
     PathHandle handle = getHandleOrSkip(testFile);
@@ -295,8 +308,10 @@ public abstract class AbstractContractPathHandleTest
     }
   }
 
-  @Test
-  public void testOpenFileLazyFail() throws Throwable {
+  @MethodSource("params")
+  @ParameterizedTest
+  public void testOpenFileLazyFail(String pTestname, HandleOpt[] pOpts,
+      boolean pSerialized) throws Throwable {
     describe("openFile fails on a misssng file in the get() and not before");
     FileStatus stat = testFile(B1);
     CompletableFuture<Long> readAllBytes = getFileSystem()
@@ -305,9 +320,8 @@ public abstract class AbstractContractPathHandleTest
                 stat))
         .build()
         .thenApply(ContractTestUtils::readStream);
-    assertEquals("Wrong number of bytes read value",
-        TEST_FILE_LEN,
-        (long) readAllBytes.get());
+    assertEquals(TEST_FILE_LEN,
+        (long) readAllBytes.get(), "Wrong number of bytes read value");
   }
 
 }

+ 16 - 17
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRenameTest.java

@@ -21,7 +21,7 @@ package org.apache.hadoop.fs.contract;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
@@ -43,8 +43,8 @@ public abstract class AbstractContractRenameTest extends
     writeDataset(getFileSystem(), renameSrc,
         data, data.length, 1024 * 1024, false);
     boolean rename = rename(renameSrc, renameTarget);
-    assertTrue("rename("+renameSrc+", "+ renameTarget+") returned false",
-        rename);
+    assertTrue(rename,
+        "rename("+renameSrc+", "+ renameTarget+") returned false");
     assertListStatusFinds(getFileSystem(),
         renameTarget.getParent(), renameTarget);
     verifyFileContents(getFileSystem(), renameTarget, data);
@@ -70,7 +70,7 @@ public abstract class AbstractContractRenameTest extends
         // at least one FS only returns false here, if that is the case
         // warn but continue
         getLogger().warn("Rename returned {} renaming a nonexistent file", renamed);
-        assertFalse("Renaming a missing file returned true", renamed);
+        assertFalse(renamed, "Renaming a missing file returned true");
       }
     } catch (FileNotFoundException e) {
       if (renameReturnsFalseOnFailure) {
@@ -105,9 +105,9 @@ public abstract class AbstractContractRenameTest extends
     boolean renameOverwritesDest = isSupported(RENAME_OVERWRITES_DEST);
     boolean renameReturnsFalseOnRenameDestExists =
         isSupported(RENAME_RETURNS_FALSE_IF_DEST_EXISTS);
-    assertFalse(RENAME_OVERWRITES_DEST + " and " +
-        RENAME_RETURNS_FALSE_IF_DEST_EXISTS + " cannot be both supported",
-        renameOverwritesDest && renameReturnsFalseOnRenameDestExists);
+    assertFalse(renameOverwritesDest && renameReturnsFalseOnRenameDestExists,
+        RENAME_OVERWRITES_DEST + " and " +
+        RENAME_RETURNS_FALSE_IF_DEST_EXISTS + " cannot be both supported");
     String expectedTo = "expected rename(" + srcFile + ", " + destFile + ") to ";
 
     boolean destUnchanged = true;
@@ -117,11 +117,10 @@ public abstract class AbstractContractRenameTest extends
       destUnchanged = !renamed;
 
       if (renameOverwritesDest) {
-        assertTrue(expectedTo + "overwrite destination, but got false",
-            renamed);
+        assertTrue(renamed, expectedTo + "overwrite destination, but got false");
       } else if (renameReturnsFalseOnRenameDestExists) {
-        assertFalse(expectedTo + "be rejected with false, but destination " +
-            "was overwritten", renamed);
+        assertFalse(renamed, expectedTo + "be rejected with false, but destination " +
+            "was overwritten");
       } else if (renamed) {
         String destDirLS = generateAndLogErrorListing(srcFile, destFile);
         getLogger().error("dest dir {}", destDirLS);
@@ -133,10 +132,10 @@ public abstract class AbstractContractRenameTest extends
     } catch (FileAlreadyExistsException e) {
       // rename(file, file2) should throw exception iff
       // it neither overwrites nor returns false
-      assertFalse(expectedTo + "overwrite destination, but got exception",
-          renameOverwritesDest);
-      assertFalse(expectedTo + "be rejected with false, but got exception",
-          renameReturnsFalseOnRenameDestExists);
+      assertFalse(renameOverwritesDest,
+          expectedTo + "overwrite destination, but got exception");
+      assertFalse(renameReturnsFalseOnRenameDestExists,
+          expectedTo + "be rejected with false, but got exception");
 
       handleExpectedException(e);
     }
@@ -170,7 +169,7 @@ public abstract class AbstractContractRenameTest extends
     assertIsFile(destFilePath);
     assertIsDirectory(renamedSrc);
     verifyFileContents(fs, destFilePath, destData);
-    assertTrue("rename returned false though the contents were copied", rename);
+    assertTrue(rename, "rename returned false though the contents were copied");
   }
 
   @Test
@@ -348,7 +347,7 @@ public abstract class AbstractContractRenameTest extends
       outcome = "rename raised an exception: " + e;
     }
     assertPathDoesNotExist("after " + outcome, renameTarget);
-    assertFalse(outcome, renamed);
+    assertFalse(renamed, outcome);
     assertPathExists(action, renameSrc);
   }
 

+ 11 - 9
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractRootDirectoryTest.java

@@ -21,7 +21,8 @@ package org.apache.hadoop.fs.contract;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.LocatedFileStatus;
 import org.apache.hadoop.fs.Path;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.assertj.core.api.Assertions;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -56,6 +57,7 @@ public abstract class AbstractContractRootDirectoryTest extends AbstractFSContra
       LoggerFactory.getLogger(AbstractContractRootDirectoryTest.class);
   public static final int OBJECTSTORE_RETRY_TIMEOUT = 30000;
 
+  @BeforeEach
   @Override
   public void setup() throws Exception {
     super.setup();
@@ -230,16 +232,16 @@ public abstract class AbstractContractRootDirectoryTest extends AbstractFSContra
         fs.listLocatedStatus(root));
     String locatedStatusResult = join(locatedStatusList, "\n");
 
-    assertEquals("listStatus(/) vs listLocatedStatus(/) with \n"
-            + "listStatus =" + listStatusResult
-            +" listLocatedStatus = " + locatedStatusResult,
-        statuses.length, locatedStatusList.size());
+    assertEquals(statuses.length,
+        locatedStatusList.size(), "listStatus(/) vs listLocatedStatus(/) with \n"
+        + "listStatus =" + listStatusResult
+        +" listLocatedStatus = " + locatedStatusResult);
     List<LocatedFileStatus> fileList = toList(fs.listFiles(root, false));
     String listFilesResult = join(fileList, "\n");
-    assertTrue("listStatus(/) vs listFiles(/, false) with \n"
-            + "listStatus = " + listStatusResult
-            + "listFiles = " + listFilesResult,
-        fileList.size() <= statuses.length);
+    assertTrue(fileList.size() <= statuses.length,
+        "listStatus(/) vs listFiles(/, false) with \n"
+        + "listStatus = " + listStatusResult
+        + "listFiles = " + listFilesResult);
     List<FileStatus> statusList = (List<FileStatus>) iteratorToList(
             fs.listStatusIterator(root));
     Assertions.assertThat(statusList)

+ 1 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSafeModeTest.java

@@ -19,7 +19,7 @@
 package org.apache.hadoop.fs.contract;
 
 import org.assertj.core.api.Assertions;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.SafeMode;

+ 16 - 12
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSeekTest.java

@@ -24,7 +24,9 @@ import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.IOUtils;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -51,6 +53,7 @@ public abstract class AbstractContractSeekTest extends AbstractFSContractTestBas
   private Path zeroByteFile;
   private FSDataInputStream instream;
 
+  @BeforeEach
   @Override
   public void setup() throws Exception {
     super.setup();
@@ -72,6 +75,7 @@ public abstract class AbstractContractSeekTest extends AbstractFSContractTestBas
     return conf;
   }
 
+  @AfterEach
   @Override
   public void teardown() throws Exception {
     IOUtils.closeStream(instream);
@@ -225,8 +229,8 @@ public abstract class AbstractContractSeekTest extends AbstractFSContractTestBas
     //expect that seek to 0 works
     //go just before the end
     instream.seek(TEST_FILE_LEN - 2);
-    assertTrue("Premature EOF", instream.read() != -1);
-    assertTrue("Premature EOF", instream.read() != -1);
+    assertTrue(instream.read() != -1, "Premature EOF");
+    assertTrue(instream.read() != -1, "Premature EOF");
     assertMinusOne("read past end of file", instream.read());
   }
 
@@ -260,7 +264,7 @@ public abstract class AbstractContractSeekTest extends AbstractFSContractTestBas
     }
     //now go back and try to read from a valid point in the file
     instream.seek(1);
-    assertTrue("Premature EOF", instream.read() != -1);
+    assertTrue(instream.read() != -1, "Premature EOF");
   }
 
   /**
@@ -284,13 +288,13 @@ public abstract class AbstractContractSeekTest extends AbstractFSContractTestBas
 
     //do seek 32KB ahead
     instream.seek(32768);
-    assertEquals("@32768", block[32768], (byte) instream.read());
+    assertEquals(block[32768], (byte) instream.read(), "@32768");
     instream.seek(40000);
-    assertEquals("@40000", block[40000], (byte) instream.read());
+    assertEquals(block[40000], (byte) instream.read(), "@40000");
     instream.seek(8191);
-    assertEquals("@8191", block[8191], (byte) instream.read());
+    assertEquals(block[8191], (byte) instream.read(), "@8191");
     instream.seek(0);
-    assertEquals("@0", 0, (byte) instream.read());
+    assertEquals(0, (byte) instream.read(), "@0");
 
     // try read & readFully
     instream.seek(0);
@@ -321,10 +325,10 @@ public abstract class AbstractContractSeekTest extends AbstractFSContractTestBas
     //have gone back
     assertEquals(40000, instream.getPos());
     //content is the same too
-    assertEquals("@40000", block[40000], (byte) instream.read());
+    assertEquals(block[40000], (byte) instream.read(), "@40000");
     //now verify the picked up data
     for (int i = 0; i < 256; i++) {
-      assertEquals("@" + i, block[i + 128], readBuffer[i]);
+      assertEquals(block[i + 128], readBuffer[i], "@" + i);
     }
   }
 
@@ -585,7 +589,7 @@ public abstract class AbstractContractSeekTest extends AbstractFSContractTestBas
     describe("read at the end of the file");
     instream = getFileSystem().open(smallSeekFile);
     instream.seek(TEST_FILE_LEN -1);
-    assertTrue("read at last byte", instream.read() > 0);
-    assertEquals("read just past EOF", -1, instream.read());
+    assertTrue(instream.read() > 0, "read at last byte");
+    assertEquals(-1, instream.read(), "read just past EOF");
   }
 }

+ 3 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractSetTimesTest.java

@@ -21,7 +21,8 @@ package org.apache.hadoop.fs.contract;
 import java.io.FileNotFoundException;
 
 import org.apache.hadoop.fs.Path;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -36,6 +37,7 @@ public abstract class AbstractContractSetTimesTest extends
   private Path testPath;
   private Path target;
 
+  @BeforeEach
   @Override
   public void setup() throws Exception {
     super.setup();

+ 5 - 3
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractStreamIOStatisticsTest.java

@@ -22,8 +22,9 @@ import java.util.Collections;
 import java.util.List;
 
 import org.assertj.core.api.Assertions;
-import org.junit.AfterClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -65,6 +66,7 @@ public abstract class AbstractContractStreamIOStatisticsTest
   protected static final IOStatisticsSnapshot FILESYSTEM_IOSTATS =
       snapshotIOStatistics();
 
+  @AfterEach
   @Override
   public void teardown() throws Exception {
     final FileSystem fs = getFileSystem();
@@ -77,7 +79,7 @@ public abstract class AbstractContractStreamIOStatisticsTest
   /**
    * Dump the filesystem statistics after the class if contains any values.
    */
-  @AfterClass
+  @AfterAll
   public static void dumpFileSystemIOStatistics() {
     if (!FILESYSTEM_IOSTATS.counters().isEmpty()) {
       // if there is at least one counter

+ 11 - 9
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractUnbufferTest.java

@@ -18,7 +18,8 @@
 
 package org.apache.hadoop.fs.contract;
 
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 import java.io.IOException;
 import java.util.Arrays;
@@ -37,6 +38,7 @@ public abstract class AbstractContractUnbufferTest extends AbstractFSContractTes
   private Path file;
   private byte[] fileBytes;
 
+  @BeforeEach
   @Override
   public void setup() throws Exception {
     super.setup();
@@ -115,16 +117,16 @@ public abstract class AbstractContractUnbufferTest extends AbstractFSContractTes
       unbuffer(stream);
       validateFileContents(stream, TEST_FILE_LEN / 2, TEST_FILE_LEN / 2);
       unbuffer(stream);
-      assertEquals("stream should be at end of file", TEST_FILE_LEN,
-              stream.getPos());
+      assertEquals(TEST_FILE_LEN,
+          stream.getPos(), "stream should be at end of file");
     }
   }
 
   private void unbuffer(FSDataInputStream stream) throws IOException {
     long pos = stream.getPos();
     stream.unbuffer();
-    assertEquals("unbuffer unexpectedly changed the stream position", pos,
-            stream.getPos());
+    assertEquals(pos,
+        stream.getPos(), "unbuffer unexpectedly changed the stream position");
   }
 
   protected void validateFullFileContents(FSDataInputStream stream)
@@ -136,9 +138,9 @@ public abstract class AbstractContractUnbufferTest extends AbstractFSContractTes
                                       int startIndex)
           throws IOException {
     byte[] streamData = new byte[length];
-    assertEquals("failed to read expected number of bytes from "
-            + "stream. This may be transient",
-        length, stream.read(streamData));
+    assertEquals(length, stream.read(streamData),
+        "failed to read expected number of bytes from "
+        + "stream. This may be transient");
     byte[] validateFileBytes;
     if (startIndex == 0 && length == fileBytes.length) {
       validateFileBytes = fileBytes;
@@ -146,7 +148,7 @@ public abstract class AbstractContractUnbufferTest extends AbstractFSContractTes
       validateFileBytes = Arrays.copyOfRange(fileBytes, startIndex,
               startIndex + length);
     }
-    assertArrayEquals("invalid file contents", validateFileBytes, streamData);
+    assertArrayEquals(validateFileBytes, streamData, "invalid file contents");
   }
 
   protected Path getFile() {

+ 102 - 62
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractVectoredReadTest.java

@@ -22,6 +22,7 @@ import java.io.EOFException;
 import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.List;
 import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.CountDownLatch;
@@ -33,9 +34,9 @@ import java.util.concurrent.atomic.AtomicInteger;
 import java.util.function.IntFunction;
 
 import org.assertj.core.api.Assertions;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.MethodSource;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -48,7 +49,6 @@ import org.apache.hadoop.io.ElasticByteBufferPool;
 import org.apache.hadoop.io.WeakReferencedElasticByteBufferPool;
 import org.apache.hadoop.util.concurrent.HadoopExecutors;
 
-import static java.util.Arrays.asList;
 import static org.apache.hadoop.fs.Options.OpenFileOptions.FS_OPTION_OPENFILE_LENGTH;
 import static org.apache.hadoop.fs.Options.OpenFileOptions.FS_OPTION_OPENFILE_READ_POLICY;
 import static org.apache.hadoop.fs.Options.OpenFileOptions.FS_OPTION_OPENFILE_READ_POLICY_VECTOR;
@@ -58,7 +58,7 @@ import static org.apache.hadoop.fs.contract.ContractTestUtils.createFile;
 import static org.apache.hadoop.fs.contract.ContractTestUtils.range;
 import static org.apache.hadoop.fs.contract.ContractTestUtils.returnBuffersToPoolPostRead;
 import static org.apache.hadoop.fs.contract.ContractTestUtils.validateVectoredReadResult;
-  import static org.apache.hadoop.test.LambdaTestUtils.intercept;
+import static org.apache.hadoop.test.LambdaTestUtils.intercept;
 import static org.apache.hadoop.test.LambdaTestUtils.interceptFuture;
 import static org.apache.hadoop.util.functional.FutureIO.awaitFuture;
 
@@ -68,7 +68,6 @@ import static org.apache.hadoop.util.functional.FutureIO.awaitFuture;
  * Both the original readVectored(allocator) and the readVectored(allocator, release)
  * operations are tested.
  */
-@RunWith(Parameterized.class)
 public abstract class AbstractContractVectoredReadTest extends AbstractFSContractTestBase {
 
   private static final Logger LOG =
@@ -81,15 +80,15 @@ public abstract class AbstractContractVectoredReadTest extends AbstractFSContrac
   /**
    * Buffer allocator for vector IO.
    */
-  private final IntFunction<ByteBuffer> allocate;
+  protected IntFunction<ByteBuffer> allocate;
 
   /**
    * Buffer pool for vector IO.
    */
-  private final ElasticByteBufferPool pool =
+  protected final ElasticByteBufferPool pool =
           new WeakReferencedElasticByteBufferPool();
 
-  private final String bufferType;
+  protected String bufferType;
 
   /**
    * Path to the vector file.
@@ -103,13 +102,12 @@ public abstract class AbstractContractVectoredReadTest extends AbstractFSContrac
    */
   private final AtomicInteger bufferReleases = new AtomicInteger();
 
-  @Parameterized.Parameters(name = "Buffer type : {0}")
   public static List<String> params() {
-    return asList("direct", "array");
+    return Arrays.asList("direct", "array");
   }
 
-  protected AbstractContractVectoredReadTest(String bufferType) {
-    this.bufferType = bufferType;
+  public void initAbstractContractVectoredReadTest(String pBufferType) {
+    this.bufferType = pBufferType;
     final boolean isDirect = !"array".equals(bufferType);
     this.allocate = size -> pool.getBuffer(isDirect, size);
   }
@@ -140,6 +138,7 @@ public abstract class AbstractContractVectoredReadTest extends AbstractFSContrac
     return pool;
   }
 
+  @BeforeEach
   @Override
   public void setup() throws Exception {
     super.setup();
@@ -178,8 +177,10 @@ public abstract class AbstractContractVectoredReadTest extends AbstractFSContrac
             .build());
   }
 
-  @Test
-  public void testVectoredReadMultipleRanges() throws Exception {
+  @MethodSource("params")
+  @ParameterizedTest(name = "Buffer type : {0}")
+  public void testVectoredReadMultipleRanges(String pBufferType) throws Exception {
+    initAbstractContractVectoredReadTest(pBufferType);
     List<FileRange> fileRanges = new ArrayList<>();
     for (int i = 0; i < 10; i++) {
       FileRange fileRange = FileRange.createFileRange(i * 100, 100);
@@ -200,8 +201,10 @@ public abstract class AbstractContractVectoredReadTest extends AbstractFSContrac
     }
   }
 
-  @Test
-  public void testVectoredReadAndReadFully()  throws Exception {
+  @MethodSource("params")
+  @ParameterizedTest(name = "Buffer type : {0}")
+  public void testVectoredReadAndReadFully(String pBufferType)  throws Exception {
+    initAbstractContractVectoredReadTest(pBufferType);
     List<FileRange> fileRanges = new ArrayList<>();
     range(fileRanges, 100, 100);
     try (FSDataInputStream in = openVectorFile()) {
@@ -216,8 +219,10 @@ public abstract class AbstractContractVectoredReadTest extends AbstractFSContrac
     }
   }
 
-  @Test
-  public void testVectoredReadWholeFile()  throws Exception {
+  @MethodSource("params")
+  @ParameterizedTest(name = "Buffer type : {0}")
+  public void testVectoredReadWholeFile(String pBufferType)  throws Exception {
+    initAbstractContractVectoredReadTest(pBufferType);
     describe("Read the whole file in one single vectored read");
     List<FileRange> fileRanges = new ArrayList<>();
     range(fileRanges, 0, DATASET_LEN);
@@ -235,8 +240,10 @@ public abstract class AbstractContractVectoredReadTest extends AbstractFSContrac
    * As the minimum seek value is 4*1024,none of the below ranges
    * will get merged.
    */
-  @Test
-  public void testDisjointRanges() throws Exception {
+  @MethodSource("params")
+  @ParameterizedTest(name = "Buffer type : {0}")
+  public void testDisjointRanges(String pBufferType) throws Exception {
+    initAbstractContractVectoredReadTest(pBufferType);
     List<FileRange> fileRanges = new ArrayList<>();
     range(fileRanges, 0, 100);
     range(fileRanges, 4_000 + 101, 100);
@@ -252,8 +259,10 @@ public abstract class AbstractContractVectoredReadTest extends AbstractFSContrac
    * As the minimum seek value is 4*1024, all the below ranges
    * will get merged into one.
    */
-  @Test
-  public void testAllRangesMergedIntoOne() throws Exception {
+  @MethodSource("params")
+  @ParameterizedTest(name = "Buffer type : {0}")
+  public void testAllRangesMergedIntoOne(String pBufferType) throws Exception {
+    initAbstractContractVectoredReadTest(pBufferType);
     List<FileRange> fileRanges = new ArrayList<>();
     final int length = 100;
     range(fileRanges, 0, length);
@@ -270,8 +279,10 @@ public abstract class AbstractContractVectoredReadTest extends AbstractFSContrac
    * As the minimum seek value is 4*1024, the first three ranges will be
    * merged into and other two will remain as it is.
    */
-  @Test
-  public void testSomeRangesMergedSomeUnmerged() throws Exception {
+  @MethodSource("params")
+  @ParameterizedTest(name = "Buffer type : {0}")
+  public void testSomeRangesMergedSomeUnmerged(String pBufferType) throws Exception {
+    initAbstractContractVectoredReadTest(pBufferType);
     FileSystem fs = getFileSystem();
     List<FileRange> fileRanges = new ArrayList<>();
     range(fileRanges, 8 * 1024, 100);
@@ -295,8 +306,10 @@ public abstract class AbstractContractVectoredReadTest extends AbstractFSContrac
    * Most file systems won't support overlapping ranges.
    * Currently, only Raw Local supports it.
    */
-  @Test
-  public void testOverlappingRanges() throws Exception {
+  @MethodSource("params")
+  @ParameterizedTest(name = "Buffer type : {0}")
+  public void testOverlappingRanges(String pBufferType) throws Exception {
+    initAbstractContractVectoredReadTest(pBufferType);
     if (!isSupported(VECTOR_IO_OVERLAPPING_RANGES)) {
       verifyExceptionalVectoredRead(
               getSampleOverlappingRanges(),
@@ -314,8 +327,10 @@ public abstract class AbstractContractVectoredReadTest extends AbstractFSContrac
   /**
    * Same ranges are special case of overlapping.
    */
-  @Test
-  public void testSameRanges() throws Exception {
+  @MethodSource("params")
+  @ParameterizedTest(name = "Buffer type : {0}")
+  public void testSameRanges(String pBufferType) throws Exception {
+    initAbstractContractVectoredReadTest(pBufferType);
     if (!isSupported(VECTOR_IO_OVERLAPPING_RANGES)) {
       verifyExceptionalVectoredRead(
               getSampleSameRanges(),
@@ -333,8 +348,10 @@ public abstract class AbstractContractVectoredReadTest extends AbstractFSContrac
   /**
    * A null range is not permitted.
    */
-  @Test
-  public void testNullRange() throws Exception {
+  @MethodSource("params")
+  @ParameterizedTest(name = "Buffer type : {0}")
+  public void testNullRange(String pBufferType) throws Exception {
+    initAbstractContractVectoredReadTest(pBufferType);
     List<FileRange> fileRanges = new ArrayList<>();
     range(fileRanges, 500, 100);
     fileRanges.add(null);
@@ -345,15 +362,19 @@ public abstract class AbstractContractVectoredReadTest extends AbstractFSContrac
   /**
    * A null range is not permitted.
    */
-  @Test
-  public void testNullRangeList() throws Exception {
+  @MethodSource("params")
+  @ParameterizedTest(name = "Buffer type : {0}")
+  public void testNullRangeList(String pBufferType) throws Exception {
+    initAbstractContractVectoredReadTest(pBufferType);
     verifyExceptionalVectoredRead(
         null,
         NullPointerException.class);
   }
 
-  @Test
-  public void testSomeRandomNonOverlappingRanges() throws Exception {
+  @MethodSource("params")
+  @ParameterizedTest(name = "Buffer type : {0}")
+  public void testSomeRandomNonOverlappingRanges(String pBufferType) throws Exception {
+    initAbstractContractVectoredReadTest(pBufferType);
     List<FileRange> fileRanges = new ArrayList<>();
     range(fileRanges, 500, 100);
     range(fileRanges, 1000, 200);
@@ -366,8 +387,10 @@ public abstract class AbstractContractVectoredReadTest extends AbstractFSContrac
     }
   }
 
-  @Test
-  public void testConsecutiveRanges() throws Exception {
+  @MethodSource("params")
+  @ParameterizedTest(name = "Buffer type : {0}")
+  public void testConsecutiveRanges(String pBufferType) throws Exception {
+    initAbstractContractVectoredReadTest(pBufferType);
     List<FileRange> fileRanges = new ArrayList<>();
     final int offset = 500;
     final int length = 2011;
@@ -380,8 +403,10 @@ public abstract class AbstractContractVectoredReadTest extends AbstractFSContrac
     }
   }
 
-  @Test
-  public void testEmptyRanges() throws Exception {
+  @MethodSource("params")
+  @ParameterizedTest(name = "Buffer type : {0}")
+  public void testEmptyRanges(String pBufferType) throws Exception {
+    initAbstractContractVectoredReadTest(pBufferType);
     List<FileRange> fileRanges = new ArrayList<>();
     try (FSDataInputStream in = openVectorFile()) {
       in.readVectored(fileRanges, allocate);
@@ -400,8 +425,10 @@ public abstract class AbstractContractVectoredReadTest extends AbstractFSContrac
    * The contract option {@link ContractOptions#VECTOR_IO_EARLY_EOF_CHECK} is used
    * to determine which check to perform.
    */
-  @Test
-  public void testEOFRanges()  throws Exception {
+  @MethodSource("params")
+  @ParameterizedTest(name = "Buffer type : {0}")
+  public void testEOFRanges(String pBufferType)  throws Exception {
+    initAbstractContractVectoredReadTest(pBufferType);
     describe("Testing reading with an offset past the end of the file");
     List<FileRange> fileRanges = range(DATASET_LEN + 1, 100);
 
@@ -414,8 +441,10 @@ public abstract class AbstractContractVectoredReadTest extends AbstractFSContrac
   }
 
 
-  @Test
-  public void testVectoredReadWholeFilePlusOne()  throws Exception {
+  @MethodSource("params")
+  @ParameterizedTest(name = "Buffer type : {0}")
+  public void testVectoredReadWholeFilePlusOne(String pBufferType)  throws Exception {
+    initAbstractContractVectoredReadTest(pBufferType);
     describe("Try to read whole file plus 1 byte");
     List<FileRange> fileRanges = range(0, DATASET_LEN + 1);
 
@@ -442,30 +471,35 @@ public abstract class AbstractContractVectoredReadTest extends AbstractFSContrac
     }
   }
 
-  @Test
-  public void testNegativeLengthRange()  throws Exception {
-
+  @MethodSource("params")
+  @ParameterizedTest(name = "Buffer type : {0}")
+  public void testNegativeLengthRange(String pBufferType)  throws Exception {
+    initAbstractContractVectoredReadTest(pBufferType);
     verifyExceptionalVectoredRead(range(0, -50), IllegalArgumentException.class);
   }
 
-  @Test
-  public void testNegativeOffsetRange()  throws Exception {
+  @MethodSource("params")
+  @ParameterizedTest(name = "Buffer type : {0}")
+  public void testNegativeOffsetRange(String pBufferType)  throws Exception {
+    initAbstractContractVectoredReadTest(pBufferType);
     verifyExceptionalVectoredRead(range(-1, 50), EOFException.class);
   }
 
-  @Test
-  public void testNullReleaseOperation()  throws Exception {
-
+  @MethodSource("params")
+  @ParameterizedTest(name = "Buffer type : {0}")
+  public void testNullReleaseOperation(String pBufferType)  throws Exception {
+    initAbstractContractVectoredReadTest(pBufferType);
     final List<FileRange> range = range(0, 10);
-
     try (FSDataInputStream in = openVectorFile()) {
-      intercept(NullPointerException.class, () ->
-          in.readVectored(range, allocate, null));
+        intercept(NullPointerException.class, () ->
+            in.readVectored(range, allocate, null));
     }
   }
 
-  @Test
-  public void testNormalReadAfterVectoredRead() throws Exception {
+  @MethodSource("params")
+  @ParameterizedTest(name = "Buffer type : {0}")
+  public void testNormalReadAfterVectoredRead(String pBufferType) throws Exception {
+    initAbstractContractVectoredReadTest(pBufferType);
     List<FileRange> fileRanges = createSampleNonOverlappingRanges();
     try (FSDataInputStream in = openVectorFile()) {
       in.readVectored(fileRanges, allocate);
@@ -480,8 +514,10 @@ public abstract class AbstractContractVectoredReadTest extends AbstractFSContrac
     }
   }
 
-  @Test
-  public void testVectoredReadAfterNormalRead() throws Exception {
+  @MethodSource("params")
+  @ParameterizedTest(name = "Buffer type : {0}")
+  public void testVectoredReadAfterNormalRead(String pBufferType) throws Exception {
+    initAbstractContractVectoredReadTest(pBufferType);
     List<FileRange> fileRanges = createSampleNonOverlappingRanges();
     try (FSDataInputStream in = openVectorFile()) {
       // read starting 200 bytes
@@ -496,8 +532,10 @@ public abstract class AbstractContractVectoredReadTest extends AbstractFSContrac
     }
   }
 
-  @Test
-  public void testMultipleVectoredReads() throws Exception {
+  @MethodSource("params")
+  @ParameterizedTest(name = "Buffer type : {0}")
+  public void testMultipleVectoredReads(String pBufferType) throws Exception {
+    initAbstractContractVectoredReadTest(pBufferType);
     List<FileRange> fileRanges1 = createSampleNonOverlappingRanges();
     List<FileRange> fileRanges2 = createSampleNonOverlappingRanges();
     try (FSDataInputStream in = openVectorFile()) {
@@ -515,8 +553,10 @@ public abstract class AbstractContractVectoredReadTest extends AbstractFSContrac
    * operation and then uses a separate thread pool to process the
    * results asynchronously.
    */
-  @Test
-  public void testVectoredIOEndToEnd() throws Exception {
+  @MethodSource("params")
+  @ParameterizedTest(name = "Buffer type : {0}")
+  public void testVectoredIOEndToEnd(String pBufferType) throws Exception {
+    initAbstractContractVectoredReadTest(pBufferType);
     List<FileRange> fileRanges = new ArrayList<>();
     range(fileRanges, 8 * 1024, 100);
     range(fileRanges, 14 * 1024, 100);

+ 19 - 27
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractFSContractTestBase.java

@@ -22,20 +22,19 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Rule;
+import org.apache.hadoop.test.TestName;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Timeout;
 import org.junit.AssumptionViolatedException;
-import org.junit.rules.TestName;
-import org.junit.rules.Timeout;
+import org.junit.jupiter.api.extension.RegisterExtension;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.net.URI;
-import java.util.concurrent.TimeUnit;
 
 import static org.apache.hadoop.fs.contract.ContractTestUtils.cleanup;
 import static org.apache.hadoop.fs.contract.ContractTestUtils.skip;
@@ -43,7 +42,8 @@ import static org.apache.hadoop.fs.contract.ContractTestUtils.skip;
 /**
  * This is the base class for all the contract tests.
  */
-public abstract class AbstractFSContractTestBase extends Assert
+@Timeout(180)
+public abstract class AbstractFSContractTestBase extends Assertions
   implements ContractOptions {
 
   private static final Logger LOG =
@@ -74,16 +74,15 @@ public abstract class AbstractFSContractTestBase extends Assert
    */
   private Path testPath;
 
-  @Rule
+  @RegisterExtension
   public TestName methodName = new TestName();
 
-
-  @BeforeClass
+  @BeforeAll
   public static void nameTestThread() {
     Thread.currentThread().setName("JUnit");
   }
 
-  @Before
+  @BeforeEach
   public void nameThread() {
     Thread.currentThread().setName("JUnit-" + getMethodName());
   }
@@ -161,13 +160,6 @@ public abstract class AbstractFSContractTestBase extends Assert
     return new Configuration();
   }
 
-  /**
-   * Set the timeout for every test.
-   */
-  @Rule
-  public Timeout testTimeout =
-      new Timeout(getTestTimeoutMillis(), TimeUnit.MILLISECONDS);
-
   /**
    * Option for tests to override the default timeout value.
    * @return the current test timeout
@@ -181,7 +173,7 @@ public abstract class AbstractFSContractTestBase extends Assert
    * Setup: create the contract then init it.
    * @throws Exception on any failure
    */
-  @Before
+  @BeforeEach
   public void setup() throws Exception {
     Thread.currentThread().setName("setup");
     LOG.debug("== Setup ==");
@@ -191,15 +183,15 @@ public abstract class AbstractFSContractTestBase extends Assert
     assumeEnabled();
     //extract the test FS
     fileSystem = contract.getTestFileSystem();
-    assertNotNull("null filesystem", fileSystem);
+    assertNotNull(fileSystem, "null filesystem");
     URI fsURI = fileSystem.getUri();
     LOG.info("Test filesystem = {} implemented by {}",
         fsURI, fileSystem);
     //sanity check to make sure that the test FS picked up really matches
     //the scheme chosen. This is to avoid defaulting back to the localFS
     //which would be drastic for root FS tests
-    assertEquals("wrong filesystem of " + fsURI,
-                 contract.getScheme(), fsURI.getScheme());
+    assertEquals(contract.getScheme(), fsURI.getScheme(),
+        "wrong filesystem of " + fsURI);
     //create the test path
     testPath = getContract().getTestPath();
     mkdirs(testPath);
@@ -210,7 +202,7 @@ public abstract class AbstractFSContractTestBase extends Assert
    * Teardown.
    * @throws Exception on any failure
    */
-  @After
+  @AfterEach
   public void teardown() throws Exception {
     Thread.currentThread().setName("teardown");
     LOG.debug("== Teardown ==");
@@ -360,7 +352,7 @@ public abstract class AbstractFSContractTestBase extends Assert
    * @throws IOException IO problems during file operations
    */
   protected void mkdirs(Path path) throws IOException {
-    assertTrue("Failed to mkdir " + path, fileSystem.mkdirs(path));
+    assertTrue(fileSystem.mkdirs(path), "Failed to mkdir " + path);
   }
 
   /**
@@ -381,7 +373,7 @@ public abstract class AbstractFSContractTestBase extends Assert
    * @param result read result to validate
    */
   protected void assertMinusOne(String text, int result) {
-    assertEquals(text + " wrong read result " + result, -1, result);
+    assertEquals(-1, result, text + " wrong read result " + result);
   }
 
   protected boolean rename(Path src, Path dst) throws IOException {

+ 1 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/localfs/TestLocalFSContractCreate.java

@@ -18,7 +18,7 @@
 
 package org.apache.hadoop.fs.contract.localfs;
 
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.LocalFileSystem;

+ 5 - 5
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/localfs/TestLocalFSContractLoaded.java

@@ -21,7 +21,7 @@ package org.apache.hadoop.fs.contract.localfs;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
 import org.apache.hadoop.fs.contract.AbstractFSContractTestBase;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 import java.net.URL;
 
@@ -38,9 +38,9 @@ public class TestLocalFSContractLoaded extends AbstractFSContractTestBase {
   @Test
   public void testContractWorks() throws Throwable {
     String key = getContract().getConfKey(SUPPORTS_ATOMIC_RENAME);
-    assertNotNull("not set: " + key, getContract().getConf().get(key));
-    assertTrue("not true: " + key,
-               getContract().isSupported(SUPPORTS_ATOMIC_RENAME, false));
+    assertNotNull(getContract().getConf().get(key), "not set: " + key);
+    assertTrue(getContract().isSupported(SUPPORTS_ATOMIC_RENAME, false),
+        "not true: " + key);
   }
 
   @Test
@@ -48,6 +48,6 @@ public class TestLocalFSContractLoaded extends AbstractFSContractTestBase {
     URL url = this.getClass()
                        .getClassLoader()
                        .getResource(LocalFSContract.CONTRACT_XML);
-    assertNotNull("could not find contract resource", url);
+    assertNotNull(url, "could not find contract resource");
   }
 }

+ 17 - 9
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/localfs/TestLocalFSContractVectoredRead.java

@@ -23,7 +23,6 @@ import java.util.List;
 import java.util.concurrent.CompletableFuture;
 
 import org.assertj.core.api.Assertions;
-import org.junit.Test;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.ChecksumException;
@@ -35,14 +34,15 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.contract.AbstractContractVectoredReadTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
 import org.apache.hadoop.fs.contract.ContractTestUtils;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.MethodSource;
 
 import static org.apache.hadoop.fs.contract.ContractTestUtils.validateVectoredReadResult;
 import static org.apache.hadoop.test.LambdaTestUtils.intercept;
 
 public class TestLocalFSContractVectoredRead extends AbstractContractVectoredReadTest {
 
-  public TestLocalFSContractVectoredRead(String bufferType) {
-    super(bufferType);
+  public TestLocalFSContractVectoredRead() {
   }
 
   @Override
@@ -50,8 +50,11 @@ public class TestLocalFSContractVectoredRead extends AbstractContractVectoredRea
     return new LocalFSContract(conf);
   }
 
-  @Test
-  public void testChecksumValidationDuringVectoredRead() throws Exception {
+  @MethodSource("params")
+  @ParameterizedTest(name = "Buffer type : {0}")
+  public void testChecksumValidationDuringVectoredRead(
+      String pBufferType) throws Exception {
+    initAbstractContractVectoredReadTest(pBufferType);
     Path testPath = path("big_range_checksum_file");
     List<FileRange> someRandomRanges = new ArrayList<>();
     someRandomRanges.add(FileRange.createFileRange(10, 1024));
@@ -64,8 +67,11 @@ public class TestLocalFSContractVectoredRead extends AbstractContractVectoredRea
    * Test for file size less than checksum chunk size.
    * {@code ChecksumFileSystem#bytesPerChecksum}.
    */
-  @Test
-  public void testChecksumValidationDuringVectoredReadSmallFile() throws Exception {
+  @MethodSource("params")
+  @ParameterizedTest(name = "Buffer type : {0}")
+  public void testChecksumValidationDuringVectoredReadSmallFile(
+      String pBufferType) throws Exception {
+    initAbstractContractVectoredReadTest(pBufferType);
     Path testPath = path("big_range_checksum_file");
     final int length = 471;
     List<FileRange> smallFileRanges = new ArrayList<>();
@@ -104,8 +110,10 @@ public class TestLocalFSContractVectoredRead extends AbstractContractVectoredRea
           () -> validateVectoredReadResult(ranges, datasetCorrupted, 0));
     }
   }
-  @Test
-  public void tesChecksumVectoredReadBoundaries() throws Exception {
+  @MethodSource("params")
+  @ParameterizedTest(name = "Buffer type : {0}")
+  public void tesChecksumVectoredReadBoundaries(String pBufferType) throws Exception {
+    initAbstractContractVectoredReadTest(pBufferType);
     Path testPath = path("boundary_range_checksum_file");
     final int length = 1071;
     LocalFileSystem localFs = (LocalFileSystem) getFileSystem();

+ 1 - 2
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/rawlocal/TestRawLocalContractVectoredRead.java

@@ -24,8 +24,7 @@ import org.apache.hadoop.fs.contract.AbstractFSContract;
 
 public class TestRawLocalContractVectoredRead extends AbstractContractVectoredReadTest {
 
-  public TestRawLocalContractVectoredRead(String bufferType) {
-    super(bufferType);
+  public TestRawLocalContractVectoredRead() {
   }
 
   @Override

+ 1 - 5
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/rawlocal/TestRawlocalContractPathHandle.java

@@ -18,17 +18,13 @@
 package org.apache.hadoop.fs.contract.rawlocal;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Options;
 import org.apache.hadoop.fs.contract.AbstractContractPathHandleTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.apache.hadoop.fs.contract.rawlocal.RawlocalFSContract;
 
 public class TestRawlocalContractPathHandle
     extends AbstractContractPathHandleTest {
 
-  public TestRawlocalContractPathHandle(String testname,
-      Options.HandleOpt[] opts, boolean serialized) {
-    super(testname, opts, serialized);
+  public TestRawlocalContractPathHandle() {
   }
 
   @Override

+ 1 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/rawlocal/TestRawlocalContractRename.java

@@ -25,7 +25,7 @@ import org.apache.hadoop.fs.RawLocalFileSystem;
 import org.apache.hadoop.fs.contract.AbstractContractRenameTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
 import org.apache.hadoop.fs.contract.ContractTestUtils;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 public class TestRawlocalContractRename extends AbstractContractRenameTest {
 

+ 5 - 3
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/wrappedio/impl/TestWrappedIO.java

@@ -29,8 +29,9 @@ import java.util.List;
 import java.util.Map;
 
 import org.assertj.core.api.Assertions;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -79,7 +80,7 @@ public class TestWrappedIO extends AbstractFSContractTestBase {
    */
   private DynamicWrappedStatistics statistics;
 
-  @Before
+  @BeforeEach
   public void setup() throws Exception {
     super.setup();
 
@@ -88,6 +89,7 @@ public class TestWrappedIO extends AbstractFSContractTestBase {
     statistics.iostatisticsContext_reset();
   }
 
+  @AfterEach
   @Override
   public void teardown() throws Exception {
     super.teardown();

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractAppend.java

@@ -19,20 +19,20 @@ import java.io.IOException;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractAppendTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 /**
  * Test append operations on the Router-based FS.
  */
 public class TestRouterHDFSContractAppend extends AbstractContractAppendTest {
 
-  @BeforeClass
+  @BeforeAll
   public static void createCluster() throws IOException {
     RouterHDFSContract.createCluster();
   }
 
-  @AfterClass
+  @AfterAll
   public static void teardownCluster() throws IOException {
     RouterHDFSContract.destroyCluster();
   }

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractAppendSecure.java

@@ -18,8 +18,8 @@ import java.io.IOException;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractAppendTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 /**
  * Test secure append operations on the Router-based FS.
@@ -27,12 +27,12 @@ import org.junit.BeforeClass;
 public class TestRouterHDFSContractAppendSecure
     extends AbstractContractAppendTest {
 
-  @BeforeClass
+  @BeforeAll
   public static void createCluster() throws Exception {
     RouterHDFSContract.createCluster(true);
   }
 
-  @AfterClass
+  @AfterAll
   public static void teardownCluster() throws IOException {
     RouterHDFSContract.destroyCluster();
   }

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractConcat.java

@@ -22,8 +22,8 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.contract.AbstractContractConcatTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 import java.io.IOException;
 
@@ -32,14 +32,14 @@ import java.io.IOException;
  */
 public class TestRouterHDFSContractConcat extends AbstractContractConcatTest {
 
-  @BeforeClass
+  @BeforeAll
   public static void createCluster() throws IOException {
     RouterHDFSContract.createCluster();
     // perform a simple operation on the cluster to verify it is up
     RouterHDFSContract.getFileSystem().getDefaultBlockSize(new Path("/"));
   }
 
-  @AfterClass
+  @AfterAll
   public static void teardownCluster() throws IOException {
     RouterHDFSContract.destroyCluster();
   }

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractConcatSecure.java

@@ -18,8 +18,8 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.contract.AbstractContractConcatTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 import java.io.IOException;
 
@@ -29,14 +29,14 @@ import java.io.IOException;
 public class TestRouterHDFSContractConcatSecure
     extends AbstractContractConcatTest {
 
-  @BeforeClass
+  @BeforeAll
   public static void createCluster() throws Exception {
     RouterHDFSContract.createCluster(true);
     // perform a simple operation on the cluster to verify it is up
     RouterHDFSContract.getFileSystem().getDefaultBlockSize(new Path("/"));
   }
 
-  @AfterClass
+  @AfterAll
   public static void teardownCluster() throws IOException {
     RouterHDFSContract.destroyCluster();
   }

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractCreate.java

@@ -21,8 +21,8 @@ package org.apache.hadoop.fs.contract.router;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractCreateTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 import java.io.IOException;
 
@@ -31,12 +31,12 @@ import java.io.IOException;
  */
 public class TestRouterHDFSContractCreate extends AbstractContractCreateTest {
 
-  @BeforeClass
+  @BeforeAll
   public static void createCluster() throws IOException {
     RouterHDFSContract.createCluster();
   }
 
-  @AfterClass
+  @AfterAll
   public static void teardownCluster() throws IOException {
     RouterHDFSContract.destroyCluster();
   }

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractCreateSecure.java

@@ -17,8 +17,8 @@ package org.apache.hadoop.fs.contract.router;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractCreateTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 import java.io.IOException;
 
@@ -28,12 +28,12 @@ import java.io.IOException;
 public class TestRouterHDFSContractCreateSecure
     extends AbstractContractCreateTest {
 
-  @BeforeClass
+  @BeforeAll
   public static void createCluster() throws Exception {
     RouterHDFSContract.createCluster(true);
   }
 
-  @AfterClass
+  @AfterAll
   public static void teardownCluster() throws IOException {
     RouterHDFSContract.destroyCluster();
   }

+ 8 - 12
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractDelegationToken.java

@@ -32,11 +32,9 @@ import org.apache.hadoop.hdfs.server.federation.FederationTestUtils;
 import org.apache.hadoop.hdfs.server.federation.metrics.RouterMBean;
 import org.apache.hadoop.security.token.SecretManager;
 import org.apache.hadoop.security.token.Token;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 
 /**
  * Test to verify router contracts for delegation token operations.
@@ -44,12 +42,12 @@ import org.junit.rules.ExpectedException;
 public class TestRouterHDFSContractDelegationToken
     extends AbstractFSContractTestBase {
 
-  @BeforeClass
+  @BeforeAll
   public static void createCluster() throws Exception {
     RouterHDFSContract.createCluster(false, 1, true);
   }
 
-  @AfterClass
+  @AfterAll
   public static void teardownCluster() throws IOException {
     RouterHDFSContract.destroyCluster();
   }
@@ -59,9 +57,6 @@ public class TestRouterHDFSContractDelegationToken
     return new RouterHDFSContract(conf);
   }
 
-  @Rule
-  public ExpectedException exceptionRule = ExpectedException.none();
-
   @Test
   public void testRouterDelegationToken() throws Exception {
     RouterMBean bean = FederationTestUtils.getBean(
@@ -109,7 +104,8 @@ public class TestRouterHDFSContractDelegationToken
     assertEquals(0, bean.getCurrentTokensCount());
 
     // Renew a cancelled token
-    exceptionRule.expect(SecretManager.InvalidToken.class);
-    token.renew(initSecurity());
+    assertThrows(SecretManager.InvalidToken.class, () -> {
+      token.renew(initSecurity());
+    });
   }
 }

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractDelete.java

@@ -21,8 +21,8 @@ package org.apache.hadoop.fs.contract.router;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractDeleteTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 import java.io.IOException;
 
@@ -31,12 +31,12 @@ import java.io.IOException;
  */
 public class TestRouterHDFSContractDelete extends AbstractContractDeleteTest {
 
-  @BeforeClass
+  @BeforeAll
   public static void createCluster() throws IOException {
     RouterHDFSContract.createCluster();
   }
 
-  @AfterClass
+  @AfterAll
   public static void teardownCluster() throws IOException {
     RouterHDFSContract.destroyCluster();
   }

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractDeleteSecure.java

@@ -18,8 +18,8 @@ import java.io.IOException;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractDeleteTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 /**
  * Test secure delete operations on the Router-based FS.
@@ -27,12 +27,12 @@ import org.junit.BeforeClass;
 public class TestRouterHDFSContractDeleteSecure
     extends AbstractContractDeleteTest {
 
-  @BeforeClass
+  @BeforeAll
   public static void createCluster() throws Exception {
     RouterHDFSContract.createCluster(true);
   }
 
-  @AfterClass
+  @AfterAll
   public static void teardownCluster() throws IOException {
     RouterHDFSContract.destroyCluster();
   }

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractGetFileStatus.java

@@ -21,8 +21,8 @@ package org.apache.hadoop.fs.contract.router;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractGetFileStatusTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 import java.io.IOException;
 
@@ -32,12 +32,12 @@ import java.io.IOException;
 public class TestRouterHDFSContractGetFileStatus
     extends AbstractContractGetFileStatusTest {
 
-  @BeforeClass
+  @BeforeAll
   public static void createCluster() throws IOException {
     RouterHDFSContract.createCluster();
   }
 
-  @AfterClass
+  @AfterAll
   public static void teardownCluster() throws IOException {
     RouterHDFSContract.destroyCluster();
   }

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractGetFileStatusSecure.java

@@ -18,8 +18,8 @@ import java.io.IOException;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractGetFileStatusTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 /**
  * Test secure get file status operations on the Router-based FS.
@@ -27,12 +27,12 @@ import org.junit.BeforeClass;
 public class TestRouterHDFSContractGetFileStatusSecure
     extends AbstractContractGetFileStatusTest {
 
-  @BeforeClass
+  @BeforeAll
   public static void createCluster() throws Exception {
     RouterHDFSContract.createCluster(true);
   }
 
-  @AfterClass
+  @AfterAll
   public static void teardownCluster() throws IOException {
     RouterHDFSContract.destroyCluster();
   }

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractMkdir.java

@@ -21,8 +21,8 @@ package org.apache.hadoop.fs.contract.router;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractMkdirTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 import java.io.IOException;
 
@@ -31,12 +31,12 @@ import java.io.IOException;
  */
 public class TestRouterHDFSContractMkdir extends AbstractContractMkdirTest {
 
-  @BeforeClass
+  @BeforeAll
   public static void createCluster() throws IOException {
     RouterHDFSContract.createCluster();
   }
 
-  @AfterClass
+  @AfterAll
   public static void teardownCluster() throws IOException {
     RouterHDFSContract.destroyCluster();
   }

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractMkdirSecure.java

@@ -17,8 +17,8 @@ package org.apache.hadoop.fs.contract.router;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractMkdirTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 import java.io.IOException;
 
@@ -28,12 +28,12 @@ import java.io.IOException;
 public class TestRouterHDFSContractMkdirSecure
     extends AbstractContractMkdirTest {
 
-  @BeforeClass
+  @BeforeAll
   public static void createCluster() throws Exception {
     RouterHDFSContract.createCluster(true);
   }
 
-  @AfterClass
+  @AfterAll
   public static void teardownCluster() throws IOException {
     RouterHDFSContract.destroyCluster();
   }

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractOpen.java

@@ -21,8 +21,8 @@ package org.apache.hadoop.fs.contract.router;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractOpenTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 import java.io.IOException;
 
@@ -31,12 +31,12 @@ import java.io.IOException;
  */
 public class TestRouterHDFSContractOpen extends AbstractContractOpenTest {
 
-  @BeforeClass
+  @BeforeAll
   public static void createCluster() throws IOException {
     RouterHDFSContract.createCluster();
   }
 
-  @AfterClass
+  @AfterAll
   public static void teardownCluster() throws IOException {
     RouterHDFSContract.destroyCluster();
   }

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractOpenSecure.java

@@ -17,8 +17,8 @@ package org.apache.hadoop.fs.contract.router;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractOpenTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 import java.io.IOException;
 
@@ -27,12 +27,12 @@ import java.io.IOException;
  */
 public class TestRouterHDFSContractOpenSecure extends AbstractContractOpenTest {
 
-  @BeforeClass
+  @BeforeAll
   public static void createCluster() throws Exception {
     RouterHDFSContract.createCluster(true);
   }
 
-  @AfterClass
+  @AfterAll
   public static void teardownCluster() throws IOException {
     RouterHDFSContract.destroyCluster();
   }

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractRename.java

@@ -21,8 +21,8 @@ package org.apache.hadoop.fs.contract.router;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractRenameTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 import java.io.IOException;
 
@@ -31,12 +31,12 @@ import java.io.IOException;
  */
 public class TestRouterHDFSContractRename extends AbstractContractRenameTest {
 
-  @BeforeClass
+  @BeforeAll
   public static void createCluster() throws IOException {
     RouterHDFSContract.createCluster();
   }
 
-  @AfterClass
+  @AfterAll
   public static void teardownCluster() throws IOException {
     RouterHDFSContract.destroyCluster();
   }

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractRenameSecure.java

@@ -17,8 +17,8 @@ package org.apache.hadoop.fs.contract.router;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractRenameTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 import java.io.IOException;
 
@@ -28,12 +28,12 @@ import java.io.IOException;
 public class TestRouterHDFSContractRenameSecure
     extends AbstractContractRenameTest {
 
-  @BeforeClass
+  @BeforeAll
   public static void createCluster() throws Exception {
     RouterHDFSContract.createCluster(true);
   }
 
-  @AfterClass
+  @AfterAll
   public static void teardownCluster() throws IOException {
     RouterHDFSContract.destroyCluster();
   }

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractRootDirectory.java

@@ -21,8 +21,8 @@ package org.apache.hadoop.fs.contract.router;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractRootDirectoryTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 import java.io.IOException;
 
@@ -32,12 +32,12 @@ import java.io.IOException;
 public class TestRouterHDFSContractRootDirectory extends
     AbstractContractRootDirectoryTest {
 
-  @BeforeClass
+  @BeforeAll
   public static void createCluster() throws IOException {
     RouterHDFSContract.createCluster();
   }
 
-  @AfterClass
+  @AfterAll
   public static void teardownCluster() throws IOException {
     RouterHDFSContract.destroyCluster();
   }

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractRootDirectorySecure.java

@@ -17,8 +17,8 @@ package org.apache.hadoop.fs.contract.router;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractRootDirectoryTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 import java.io.IOException;
 
@@ -28,12 +28,12 @@ import java.io.IOException;
 public class TestRouterHDFSContractRootDirectorySecure
     extends AbstractContractRootDirectoryTest {
 
-  @BeforeClass
+  @BeforeAll
   public static void createCluster() throws Exception {
     RouterHDFSContract.createCluster(true);
   }
 
-  @AfterClass
+  @AfterAll
   public static void teardownCluster() throws IOException {
     RouterHDFSContract.destroyCluster();
   }

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractSeek.java

@@ -21,8 +21,8 @@ package org.apache.hadoop.fs.contract.router;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractSeekTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 import java.io.IOException;
 
@@ -31,12 +31,12 @@ import java.io.IOException;
  */
 public class TestRouterHDFSContractSeek extends AbstractContractSeekTest {
 
-  @BeforeClass
+  @BeforeAll
   public static void createCluster() throws IOException {
     RouterHDFSContract.createCluster();
   }
 
-  @AfterClass
+  @AfterAll
   public static void teardownCluster() throws IOException {
     RouterHDFSContract.destroyCluster();
   }

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractSeekSecure.java

@@ -17,8 +17,8 @@ package org.apache.hadoop.fs.contract.router;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractSeekTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 import java.io.IOException;
 
@@ -27,12 +27,12 @@ import java.io.IOException;
  */
 public class TestRouterHDFSContractSeekSecure extends AbstractContractSeekTest {
 
-  @BeforeClass
+  @BeforeAll
   public static void createCluster() throws Exception {
     RouterHDFSContract.createCluster(true);
   }
 
-  @AfterClass
+  @AfterAll
   public static void teardownCluster() throws IOException {
     RouterHDFSContract.destroyCluster();
   }

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractSetTimes.java

@@ -21,8 +21,8 @@ package org.apache.hadoop.fs.contract.router;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractSetTimesTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 import java.io.IOException;
 
@@ -32,12 +32,12 @@ import java.io.IOException;
 public class TestRouterHDFSContractSetTimes
     extends AbstractContractSetTimesTest {
 
-  @BeforeClass
+  @BeforeAll
   public static void createCluster() throws IOException {
     RouterHDFSContract.createCluster();
   }
 
-  @AfterClass
+  @AfterAll
   public static void teardownCluster() throws IOException {
     RouterHDFSContract.destroyCluster();
   }

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/TestRouterHDFSContractSetTimesSecure.java

@@ -17,8 +17,8 @@ package org.apache.hadoop.fs.contract.router;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractSetTimesTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 import java.io.IOException;
 
@@ -28,12 +28,12 @@ import java.io.IOException;
 public class TestRouterHDFSContractSetTimesSecure
     extends AbstractContractSetTimesTest {
 
-  @BeforeClass
+  @BeforeAll
   public static void createCluster() throws Exception {
     RouterHDFSContract.createCluster(true);
   }
 
-  @AfterClass
+  @AfterAll
   public static void teardownCluster() throws IOException {
     RouterHDFSContract.destroyCluster();
   }

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/web/TestRouterWebHDFSContractAppend.java

@@ -17,8 +17,8 @@ package org.apache.hadoop.fs.contract.router.web;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractAppendTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 import java.io.IOException;
 
@@ -28,12 +28,12 @@ import java.io.IOException;
 public class TestRouterWebHDFSContractAppend
     extends AbstractContractAppendTest {
 
-  @BeforeClass
+  @BeforeAll
   public static void createCluster() throws IOException {
     RouterWebHDFSContract.createCluster();
   }
 
-  @AfterClass
+  @AfterAll
   public static void teardownCluster() throws IOException {
     RouterWebHDFSContract.destroyCluster();
   }

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/web/TestRouterWebHDFSContractConcat.java

@@ -22,8 +22,8 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.contract.AbstractContractConcatTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 import java.io.IOException;
 
@@ -33,14 +33,14 @@ import java.io.IOException;
 public class TestRouterWebHDFSContractConcat
     extends AbstractContractConcatTest {
 
-  @BeforeClass
+  @BeforeAll
   public static void createCluster() throws IOException {
     RouterWebHDFSContract.createCluster();
     // perform a simple operation on the cluster to verify it is up
     RouterWebHDFSContract.getFileSystem().getDefaultBlockSize(new Path("/"));
   }
 
-  @AfterClass
+  @AfterAll
   public static void teardownCluster() throws IOException {
     RouterWebHDFSContract.destroyCluster();
   }

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/web/TestRouterWebHDFSContractCreate.java

@@ -21,8 +21,8 @@ package org.apache.hadoop.fs.contract.router.web;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractCreateTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 import java.io.IOException;
 
@@ -32,12 +32,12 @@ import java.io.IOException;
 public class TestRouterWebHDFSContractCreate
     extends AbstractContractCreateTest {
 
-  @BeforeClass
+  @BeforeAll
   public static void createCluster() throws IOException {
     RouterWebHDFSContract.createCluster();
   }
 
-  @AfterClass
+  @AfterAll
   public static void teardownCluster() throws IOException {
     RouterWebHDFSContract.destroyCluster();
   }

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/web/TestRouterWebHDFSContractDelete.java

@@ -21,8 +21,8 @@ package org.apache.hadoop.fs.contract.router.web;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractDeleteTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 import java.io.IOException;
 
@@ -32,12 +32,12 @@ import java.io.IOException;
 public class TestRouterWebHDFSContractDelete
     extends AbstractContractDeleteTest {
 
-  @BeforeClass
+  @BeforeAll
   public static void createCluster() throws IOException {
     RouterWebHDFSContract.createCluster();
   }
 
-  @AfterClass
+  @AfterAll
   public static void teardownCluster() throws IOException {
     RouterWebHDFSContract.destroyCluster();
   }

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/web/TestRouterWebHDFSContractMkdir.java

@@ -21,8 +21,8 @@ package org.apache.hadoop.fs.contract.router.web;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractMkdirTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 import java.io.IOException;
 
@@ -31,12 +31,12 @@ import java.io.IOException;
  */
 public class TestRouterWebHDFSContractMkdir extends AbstractContractMkdirTest {
 
-  @BeforeClass
+  @BeforeAll
   public static void createCluster() throws IOException {
     RouterWebHDFSContract.createCluster();
   }
 
-  @AfterClass
+  @AfterAll
   public static void teardownCluster() throws IOException {
     RouterWebHDFSContract.destroyCluster();
   }

+ 5 - 5
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/web/TestRouterWebHDFSContractOpen.java

@@ -21,9 +21,9 @@ package org.apache.hadoop.fs.contract.router.web;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractOpenTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 
 import java.io.IOException;
 
@@ -32,12 +32,12 @@ import java.io.IOException;
  */
 public class TestRouterWebHDFSContractOpen extends AbstractContractOpenTest {
 
-  @BeforeClass
+  @BeforeAll
   public static void createCluster() throws IOException {
     RouterWebHDFSContract.createCluster();
   }
 
-  @AfterClass
+  @AfterAll
   public static void teardownCluster() throws IOException {
     RouterWebHDFSContract.destroyCluster();
   }

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/web/TestRouterWebHDFSContractRename.java

@@ -21,8 +21,8 @@ package org.apache.hadoop.fs.contract.router.web;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractRenameTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 import java.io.IOException;
 
@@ -32,12 +32,12 @@ import java.io.IOException;
 public class TestRouterWebHDFSContractRename
     extends AbstractContractRenameTest {
 
-  @BeforeClass
+  @BeforeAll
   public static void createCluster() throws IOException {
     RouterWebHDFSContract.createCluster();
   }
 
-  @AfterClass
+  @AfterAll
   public static void teardownCluster() throws IOException {
     RouterWebHDFSContract.destroyCluster();
   }

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/web/TestRouterWebHDFSContractRootDirectory.java

@@ -21,8 +21,8 @@ package org.apache.hadoop.fs.contract.router.web;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractRootDirectoryTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 import java.io.IOException;
 
@@ -32,12 +32,12 @@ import java.io.IOException;
 public class TestRouterWebHDFSContractRootDirectory extends
     AbstractContractRootDirectoryTest {
 
-  @BeforeClass
+  @BeforeAll
   public static void createCluster() throws IOException {
     RouterWebHDFSContract.createCluster();
   }
 
-  @AfterClass
+  @AfterAll
   public static void teardownCluster() throws IOException {
     RouterWebHDFSContract.destroyCluster();
   }

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/fs/contract/router/web/TestRouterWebHDFSContractSeek.java

@@ -21,8 +21,8 @@ package org.apache.hadoop.fs.contract.router.web;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractSeekTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 import java.io.IOException;
 
@@ -31,12 +31,12 @@ import java.io.IOException;
  */
 public class TestRouterWebHDFSContractSeek extends AbstractContractSeekTest {
 
-  @BeforeClass
+  @BeforeAll
   public static void createCluster() throws IOException {
     RouterWebHDFSContract.createCluster();
   }
 
-  @AfterClass
+  @AfterAll
   public static void teardownCluster() throws IOException {
     RouterWebHDFSContract.destroyCluster();
   }

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestDFSWrappedIO.java

@@ -20,8 +20,8 @@ package org.apache.hadoop.fs.contract.hdfs;
 
 import java.io.IOException;
 
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
@@ -32,12 +32,12 @@ import org.apache.hadoop.io.wrappedio.impl.TestWrappedIO;
  */
 public class TestDFSWrappedIO extends TestWrappedIO {
 
-  @BeforeClass
+  @BeforeAll
   public static void createCluster() throws IOException {
     HDFSContract.createCluster();
   }
 
-  @AfterClass
+  @AfterAll
   public static void teardownCluster() throws IOException {
     HDFSContract.destroyCluster();
   }

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractAppend.java

@@ -17,19 +17,19 @@ package org.apache.hadoop.fs.contract.hdfs;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractAppendTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 import java.io.IOException;
 
 public class TestHDFSContractAppend extends AbstractContractAppendTest {
 
-  @BeforeClass
+  @BeforeAll
   public static void createCluster() throws IOException {
     HDFSContract.createCluster();
   }
 
-  @AfterClass
+  @AfterAll
   public static void teardownCluster() throws IOException {
     HDFSContract.destroyCluster();
   }

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractBulkDelete.java

@@ -20,8 +20,8 @@ package org.apache.hadoop.fs.contract.hdfs;
 
 import java.io.IOException;
 
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractBulkDeleteTest;
@@ -37,12 +37,12 @@ public class TestHDFSContractBulkDelete extends AbstractContractBulkDeleteTest {
     return new HDFSContract(conf);
   }
 
-  @BeforeClass
+  @BeforeAll
   public static void createCluster() throws IOException {
     HDFSContract.createCluster();
   }
 
-  @AfterClass
+  @AfterAll
   public static void teardownCluster() throws IOException {
     HDFSContract.destroyCluster();
   }

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractConcat.java

@@ -21,8 +21,8 @@ package org.apache.hadoop.fs.contract.hdfs;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractConcatTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 import java.io.IOException;
 
@@ -31,14 +31,14 @@ import java.io.IOException;
  */
 public class TestHDFSContractConcat extends AbstractContractConcatTest {
 
-  @BeforeClass
+  @BeforeAll
   public static void createCluster() throws IOException {
     HDFSContract.createCluster();
     // perform a simple operation on the cluster to verify it is up
     HDFSContract.getCluster().getFileSystem().getDefaultBlockSize();
   }
 
-  @AfterClass
+  @AfterAll
   public static void teardownCluster() throws IOException {
     HDFSContract.destroyCluster();
   }

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractCreate.java

@@ -21,19 +21,19 @@ package org.apache.hadoop.fs.contract.hdfs;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractCreateTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 import java.io.IOException;
 
 public class TestHDFSContractCreate extends AbstractContractCreateTest {
 
-  @BeforeClass
+  @BeforeAll
   public static void createCluster() throws IOException {
     HDFSContract.createCluster();
   }
 
-  @AfterClass
+  @AfterAll
   public static void teardownCluster() throws IOException {
     HDFSContract.destroyCluster();
   }

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractDelete.java

@@ -21,8 +21,8 @@ package org.apache.hadoop.fs.contract.hdfs;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractDeleteTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 import java.io.IOException;
 
@@ -31,12 +31,12 @@ import java.io.IOException;
  */
 public class TestHDFSContractDelete extends AbstractContractDeleteTest {
 
-  @BeforeClass
+  @BeforeAll
   public static void createCluster() throws IOException {
     HDFSContract.createCluster();
   }
 
-  @AfterClass
+  @AfterAll
   public static void teardownCluster() throws IOException {
     HDFSContract.destroyCluster();
   }

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractGetFileStatus.java

@@ -21,20 +21,20 @@ package org.apache.hadoop.fs.contract.hdfs;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractGetFileStatusTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 import java.io.IOException;
 
 public class TestHDFSContractGetFileStatus extends
     AbstractContractGetFileStatusTest {
 
-  @BeforeClass
+  @BeforeAll
   public static void createCluster() throws IOException {
     HDFSContract.createCluster();
   }
 
-  @AfterClass
+  @AfterAll
   public static void teardownCluster() throws IOException {
     HDFSContract.destroyCluster();
   }

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractLeaseRecovery.java

@@ -20,8 +20,8 @@ package org.apache.hadoop.fs.contract.hdfs;
 
 import java.io.IOException;
 
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractLeaseRecoveryTest;
@@ -32,12 +32,12 @@ import org.apache.hadoop.fs.contract.AbstractFSContract;
  */
 public class TestHDFSContractLeaseRecovery extends AbstractContractLeaseRecoveryTest {
 
-  @BeforeClass
+  @BeforeAll
   public static void createCluster() throws IOException {
     HDFSContract.createCluster();
   }
 
-  @AfterClass
+  @AfterAll
   public static void teardownCluster() throws IOException {
     HDFSContract.destroyCluster();
   }

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractMkdir.java

@@ -21,8 +21,8 @@ package org.apache.hadoop.fs.contract.hdfs;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractMkdirTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 import java.io.IOException;
 
@@ -31,12 +31,12 @@ import java.io.IOException;
  */
 public class TestHDFSContractMkdir extends AbstractContractMkdirTest {
 
-  @BeforeClass
+  @BeforeAll
   public static void createCluster() throws IOException {
     HDFSContract.createCluster();
   }
 
-  @AfterClass
+  @AfterAll
   public static void teardownCluster() throws IOException {
     HDFSContract.destroyCluster();
   }

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractMultipartUploader.java

@@ -19,8 +19,8 @@ package org.apache.hadoop.fs.contract.hdfs;
 
 import java.io.IOException;
 
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -37,12 +37,12 @@ public class TestHDFSContractMultipartUploader extends
   protected static final Logger LOG =
       LoggerFactory.getLogger(TestHDFSContractMultipartUploader.class);
 
-  @BeforeClass
+  @BeforeAll
   public static void createCluster() throws IOException {
     HDFSContract.createCluster();
   }
 
-  @AfterClass
+  @AfterAll
   public static void teardownCluster() throws IOException {
     HDFSContract.destroyCluster();
   }

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractOpen.java

@@ -21,8 +21,8 @@ package org.apache.hadoop.fs.contract.hdfs;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractOpenTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 import java.io.IOException;
 
@@ -31,12 +31,12 @@ import java.io.IOException;
  */
 public class TestHDFSContractOpen extends AbstractContractOpenTest {
 
-  @BeforeClass
+  @BeforeAll
   public static void createCluster() throws IOException {
     HDFSContract.createCluster();
   }
 
-  @AfterClass
+  @AfterAll
   public static void teardownCluster() throws IOException {
     HDFSContract.destroyCluster();
   }

+ 5 - 8
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractPathHandle.java

@@ -18,11 +18,10 @@
 package org.apache.hadoop.fs.contract.hdfs;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Options;
 import org.apache.hadoop.fs.contract.AbstractContractPathHandleTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 import java.io.IOException;
 
@@ -33,17 +32,15 @@ import java.io.IOException;
 public class TestHDFSContractPathHandle
     extends AbstractContractPathHandleTest {
 
-  public TestHDFSContractPathHandle(String testname, Options.HandleOpt[] opts,
-      boolean serialized) {
-    super(testname, opts, serialized);
+  public TestHDFSContractPathHandle() {
   }
 
-  @BeforeClass
+  @BeforeAll
   public static void createCluster() throws IOException {
     HDFSContract.createCluster();
   }
 
-  @AfterClass
+  @AfterAll
   public static void teardownCluster() throws IOException {
     HDFSContract.destroyCluster();
   }

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractRename.java

@@ -21,19 +21,19 @@ package org.apache.hadoop.fs.contract.hdfs;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractRenameTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 import java.io.IOException;
 
 public class TestHDFSContractRename extends AbstractContractRenameTest {
 
-  @BeforeClass
+  @BeforeAll
   public static void createCluster() throws IOException {
     HDFSContract.createCluster();
   }
 
-  @AfterClass
+  @AfterAll
   public static void teardownCluster() throws IOException {
     HDFSContract.destroyCluster();
   }

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractRootDirectory.java

@@ -21,8 +21,8 @@ package org.apache.hadoop.fs.contract.hdfs;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractRootDirectoryTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 import java.io.IOException;
 
@@ -32,12 +32,12 @@ import java.io.IOException;
 public class TestHDFSContractRootDirectory extends
     AbstractContractRootDirectoryTest {
 
-  @BeforeClass
+  @BeforeAll
   public static void createCluster() throws IOException {
     HDFSContract.createCluster();
   }
 
-  @AfterClass
+  @AfterAll
   public static void teardownCluster() throws IOException {
     HDFSContract.destroyCluster();
   }

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractSafeMode.java

@@ -20,8 +20,8 @@ package org.apache.hadoop.fs.contract.hdfs;
 
 import java.io.IOException;
 
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractSafeModeTest;
@@ -32,12 +32,12 @@ import org.apache.hadoop.fs.contract.AbstractFSContract;
  */
 public class TestHDFSContractSafeMode extends AbstractContractSafeModeTest {
 
-  @BeforeClass
+  @BeforeAll
   public static void createCluster() throws IOException {
     HDFSContract.createCluster();
   }
 
-  @AfterClass
+  @AfterAll
   public static void teardownCluster() throws IOException {
     HDFSContract.destroyCluster();
   }

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractSeek.java

@@ -21,8 +21,8 @@ package org.apache.hadoop.fs.contract.hdfs;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractSeekTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 import java.io.IOException;
 
@@ -31,12 +31,12 @@ import java.io.IOException;
  */
 public class TestHDFSContractSeek extends AbstractContractSeekTest {
 
-  @BeforeClass
+  @BeforeAll
   public static void createCluster() throws IOException {
     HDFSContract.createCluster();
   }
 
-  @AfterClass
+  @AfterAll
   public static void teardownCluster() throws IOException {
     HDFSContract.destroyCluster();
   }

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractSetTimes.java

@@ -21,19 +21,19 @@ package org.apache.hadoop.fs.contract.hdfs;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractSetTimesTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 import java.io.IOException;
 
 public class TestHDFSContractSetTimes extends AbstractContractSetTimesTest {
 
-  @BeforeClass
+  @BeforeAll
   public static void createCluster() throws IOException {
     HDFSContract.createCluster();
   }
 
-  @AfterClass
+  @AfterAll
   public static void teardownCluster() throws IOException {
     HDFSContract.destroyCluster();
   }

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractUnbuffer.java

@@ -22,19 +22,19 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractUnbufferTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
 
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 import java.io.IOException;
 
 public class TestHDFSContractUnbuffer extends AbstractContractUnbufferTest {
 
-  @BeforeClass
+  @BeforeAll
   public static void createCluster() throws IOException {
     HDFSContract.createCluster();
   }
 
-  @AfterClass
+  @AfterAll
   public static void teardownCluster() throws IOException {
     HDFSContract.destroyCluster();
   }

+ 5 - 6
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractVectoredRead.java

@@ -20,8 +20,8 @@ package org.apache.hadoop.fs.contract.hdfs;
 
 import java.io.IOException;
 
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractVectoredReadTest;
@@ -33,16 +33,15 @@ import org.apache.hadoop.fs.contract.AbstractFSContract;
 public class TestHDFSContractVectoredRead
     extends AbstractContractVectoredReadTest {
 
-  public TestHDFSContractVectoredRead(final String bufferType) {
-    super(bufferType);
+  public TestHDFSContractVectoredRead() {
   }
 
-  @BeforeClass
+  @BeforeAll
   public static void createCluster() throws IOException {
     HDFSContract.createCluster();
   }
 
-  @AfterClass
+  @AfterAll
   public static void teardownCluster() throws IOException {
     HDFSContract.destroyCluster();
   }

+ 5 - 3
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/committer/manifest/AbstractManifestCommitterTest.java

@@ -36,7 +36,8 @@ import java.util.stream.Collectors;
 import java.util.stream.IntStream;
 
 import org.assertj.core.api.Assertions;
-import org.junit.AfterClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeEach;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -309,6 +310,7 @@ public abstract class AbstractManifestCommitterTest
     return enableManifestCommitter(super.createConfiguration());
   }
 
+  @BeforeEach
   @Override
   public void setup() throws Exception {
 
@@ -444,7 +446,7 @@ public abstract class AbstractManifestCommitterTest
   /**
    * Make sure there's no thread leakage.
    */
-  @AfterClass
+  @AfterAll
   public static void threadLeakage() {
     THREAD_LEAK_TRACKER.assertNoThreadLeakage();
   }
@@ -452,7 +454,7 @@ public abstract class AbstractManifestCommitterTest
   /**
    * Dump the filesystem statistics after the class.
    */
-  @AfterClass
+  @AfterAll
   public static void dumpFileSystemIOStatistics() {
     LOG.info("Aggregate FileSystem Statistics {}",
         ioStatisticsToPrettyString(FILESYSTEM_IOSTATS));

+ 3 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/committer/manifest/TestCleanupStage.java

@@ -20,7 +20,8 @@ package org.apache.hadoop.mapreduce.lib.output.committer.manifest;
 
 import java.util.List;
 
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.mapreduce.lib.output.committer.manifest.files.TaskManifest;
@@ -62,6 +63,7 @@ public class TestCleanupStage extends AbstractManifestCommitterTest {
    */
   private List<TaskManifest> manifests;
 
+  @BeforeEach
   @Override
   public void setup() throws Exception {
     super.setup();

+ 3 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/committer/manifest/TestCommitTaskStage.java

@@ -22,7 +22,8 @@ import java.io.FileNotFoundException;
 import java.net.SocketTimeoutException;
 
 import org.assertj.core.api.Assertions;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.Path;
@@ -62,6 +63,7 @@ public class TestCommitTaskStage extends AbstractManifestCommitterTest {
   public static final String TASK1_ATTEMPT1 = String.format("%s_%02d",
       TASK1, 1);
 
+  @BeforeEach
   @Override
   public void setup() throws Exception {
     super.setup();

+ 3 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/committer/manifest/TestCreateOutputDirectoriesStage.java

@@ -26,7 +26,8 @@ import java.util.concurrent.CompletableFuture;
 import java.util.stream.Collectors;
 
 import org.assertj.core.api.Assertions;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.statistics.impl.IOStatisticsStore;
@@ -75,6 +76,7 @@ public class TestCreateOutputDirectoriesStage extends AbstractManifestCommitterT
   private StageConfig stageConfig;
   private IOStatisticsStore iostats;
 
+  @BeforeEach
   @Override
   public void setup() throws Exception {
     super.setup();

+ 6 - 4
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/committer/manifest/TestJobThroughManifestCommitter.java

@@ -26,9 +26,10 @@ import java.util.stream.Collectors;
 
 import org.assertj.core.api.Assertions;
 import org.assertj.core.api.Assumptions;
-import org.junit.FixMethodOrder;
-import org.junit.Test;
-import org.junit.runners.MethodSorters;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.MethodOrderer;
+import org.junit.jupiter.api.TestMethodOrder;
+import org.junit.jupiter.api.Test;
 
 import org.apache.hadoop.fs.FileAlreadyExistsException;
 import org.apache.hadoop.fs.FileSystem;
@@ -83,7 +84,7 @@ import static org.apache.hadoop.test.LambdaTestUtils.intercept;
  * after each test case.
  * The last test case MUST perform the cleanup.
  */
-@FixMethodOrder(MethodSorters.NAME_ASCENDING)
+@TestMethodOrder(MethodOrderer.Alphanumeric.class)
 public class TestJobThroughManifestCommitter
     extends AbstractManifestCommitterTest {
 
@@ -152,6 +153,7 @@ public class TestJobThroughManifestCommitter
   private static LoadedManifestData
       loadedManifestData;
 
+  @BeforeEach
   @Override
   public void setup() throws Exception {
     super.setup();

+ 5 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/committer/manifest/TestLoadManifestsStage.java

@@ -23,7 +23,9 @@ import java.util.List;
 import java.util.Set;
 
 import org.assertj.core.api.Assertions;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
@@ -71,6 +73,7 @@ public class TestLoadManifestsStage extends AbstractManifestCommitterTest {
     return ManifestCommitterTestSupport.NUMBER_OF_TASK_ATTEMPTS;
   }
 
+  @BeforeEach
   @Override
   public void setup() throws Exception {
     super.setup();
@@ -80,6 +83,7 @@ public class TestLoadManifestsStage extends AbstractManifestCommitterTest {
         .isGreaterThan(0);
   }
 
+  @AfterEach
   @Override
   public void teardown() throws Exception {
     if (entryFile != null) {

+ 7 - 3
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/committer/manifest/TestManifestCommitProtocol.java

@@ -29,8 +29,10 @@ import java.util.concurrent.ExecutorService;
 import java.util.concurrent.TimeUnit;
 
 import org.assertj.core.api.Assertions;
-import org.junit.AfterClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -228,6 +230,7 @@ public class TestManifestCommitProtocol
     return suitename() + "-" + super.getMethodName();
   }
 
+  @BeforeEach
   @Override
   public void setup() throws Exception {
     super.setup();
@@ -236,6 +239,7 @@ public class TestManifestCommitProtocol
     cleanupOutputDir();
   }
 
+  @AfterEach
   @Override
   public void teardown() throws Exception {
     describe("teardown");
@@ -254,7 +258,7 @@ public class TestManifestCommitProtocol
     super.teardown();
   }
 
-  @AfterClass
+  @AfterAll
   public static void logAggregateIOStatistics() {
     LOG.info("Final IOStatistics {}",
         ioStatisticsToPrettyString(IOSTATISTICS));

+ 6 - 4
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/committer/manifest/TestRenameStageFailure.java

@@ -26,8 +26,8 @@ import java.util.Collections;
 import java.util.List;
 
 import org.assertj.core.api.Assertions;
-import org.junit.Assume;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 import org.apache.commons.lang3.tuple.Triple;
 import org.apache.hadoop.fs.CommonPathCapabilities;
@@ -57,6 +57,7 @@ import static org.apache.hadoop.mapreduce.lib.output.committer.manifest.impl.Man
 import static org.apache.hadoop.mapreduce.lib.output.committer.manifest.impl.UnreliableManifestStoreOperations.SIMULATED_FAILURE;
 import static org.apache.hadoop.mapreduce.lib.output.committer.manifest.stages.AbstractJobOrTaskStage.FAILED_TO_RENAME_PREFIX;
 import static org.apache.hadoop.test.LambdaTestUtils.intercept;
+import static org.junit.jupiter.api.Assumptions.assumeTrue;
 
 /**
  * Test renaming files with fault injection.
@@ -103,6 +104,7 @@ public class TestRenameStageFailure extends AbstractManifestCommitterTest {
     return etagsSupported;
   }
 
+  @BeforeEach
   @Override
   public void setup() throws Exception {
     super.setup();
@@ -269,8 +271,8 @@ public class TestRenameStageFailure extends AbstractManifestCommitterTest {
     describe("commit where rename() returns false for one file." +
         " Expect failure to be escalated to an IOE");
 
-    Assume.assumeTrue("not used when resilient commits are available",
-        !resilientCommit);
+    assumeTrue(!resilientCommit,
+        "not used when resilient commits are available");
     // destination directory.
     Path destDir = methodPath();
     StageConfig stageConfig = createStageConfigForJob(JOB1, destDir);

+ 3 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/committer/manifest/TestTaskManifestFileIO.java

@@ -21,7 +21,8 @@ package org.apache.hadoop.mapreduce.lib.output.committer.manifest;
 import java.io.IOException;
 
 import org.assertj.core.api.Assertions;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.mapreduce.lib.output.committer.manifest.files.DirEntry;
@@ -50,6 +51,7 @@ public class TestTaskManifestFileIO extends AbstractManifestCommitterTest {
 
   private Path taPath;
 
+  @BeforeEach
   @Override
   public void setup() throws Exception {
     super.setup();

+ 5 - 5
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/committer/manifest/impl/TestEntryFileIO.java

@@ -26,9 +26,9 @@ import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.atomic.AtomicLong;
 
 import org.assertj.core.api.Assertions;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
 import org.slf4j.Logger;
@@ -73,7 +73,7 @@ public class TestEntryFileIO extends AbstractManifestCommitterTest {
   /**
    * Create an entry file during setup.
    */
-  @Before
+  @BeforeEach
   public void setup() throws Exception {
     entryFileIO = new EntryFileIO(new Configuration());
     createEntryFile();
@@ -83,7 +83,7 @@ public class TestEntryFileIO extends AbstractManifestCommitterTest {
    * Teardown deletes any entry file.
    * @throws Exception on any failure
    */
-  @After
+  @AfterEach
   public void teardown() throws Exception {
     Thread.currentThread().setName("teardown");
     if (getEntryFile() != null) {

+ 1 - 1
hadoop-project/pom.xml

@@ -186,7 +186,7 @@
       --enable-native-access=ALL-UNNAMED
     </extraJavaTestArgs>
     <!-- Plugin versions and config -->
-    <maven-surefire-plugin.argLine>-Xmx2048m -Xss2m -XX:+HeapDumpOnOutOfMemoryError ${extraJavaTestArgs}</maven-surefire-plugin.argLine>
+    <maven-surefire-plugin.argLine>-Xmx4096m -Xss2m -XX:+HeapDumpOnOutOfMemoryError ${extraJavaTestArgs}</maven-surefire-plugin.argLine>
     <maven-surefire-plugin.version>3.0.0-M4</maven-surefire-plugin.version>
     <maven-surefire-report-plugin.version>${maven-surefire-plugin.version}</maven-surefire-report-plugin.version>
     <maven-failsafe-plugin.version>${maven-surefire-plugin.version}</maven-failsafe-plugin.version>

+ 2 - 0
hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/contract/TestAliyunOSSContractGetFileStatusV1List.java

@@ -22,6 +22,7 @@ import org.apache.hadoop.fs.aliyun.oss.AliyunOSSTestUtils;
 import org.apache.hadoop.fs.aliyun.oss.Constants;
 import org.apache.hadoop.fs.contract.AbstractContractGetFileStatusTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
+import org.junit.jupiter.api.AfterEach;
 
 /**
  * Test getFileStatus and related listing operations,
@@ -35,6 +36,7 @@ public class TestAliyunOSSContractGetFileStatusV1List
     return new AliyunOSSContract(conf);
   }
 
+  @AfterEach
   @Override
   public void teardown() throws Exception {
     getLogger().info("FS details {}", getFileSystem());

+ 1 - 1
hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/contract/TestAliyunOSSContractSeek.java

@@ -24,7 +24,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.contract.AbstractContractSeekTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 import static org.apache.hadoop.fs.contract.ContractTestUtils.createFile;
 import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset;

+ 8 - 5
hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/yarn/TestOSSMiniYarnCluster.java

@@ -37,7 +37,9 @@ import org.apache.hadoop.mapreduce.Job;
 import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
 import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
 import org.apache.hadoop.yarn.server.MiniYARNCluster;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 import java.io.IOException;
 import java.util.EnumSet;
@@ -60,6 +62,7 @@ public class TestOSSMiniYarnCluster extends AbstractFSContractTestBase {
     return new AliyunOSSContract(conf);
   }
 
+  @BeforeEach
   @Override
   public void setup() throws Exception {
     super.setup();
@@ -97,7 +100,7 @@ public class TestOSSMiniYarnCluster extends AbstractFSContractTestBase {
     FileOutputFormat.setOutputPath(job, output);
 
     int exitCode = (job.waitForCompletion(true) ? 0 : 1);
-    assertEquals("Returned error code.", 0, exitCode);
+    assertEquals(0, exitCode, "Returned error code.");
 
     assertTrue(fs.exists(new Path(output, "_SUCCESS")));
     String outputAsStr = readStringFromFile(new Path(output, "part-r-00000"));
@@ -118,9 +121,8 @@ public class TestOSSMiniYarnCluster extends AbstractFSContractTestBase {
     Map<String, Integer> result = new HashMap<>();
     for (String line : outputAsStr.split("\n")) {
       String[] tokens = line.split("\t");
-      assertTrue("Not enough tokens in in string \" "
-              + line + "\" from output \"" + outputAsStr + "\"",
-          tokens.length > 1);
+      assertTrue(tokens.length > 1, "Not enough tokens in in string \" "
+          + line + "\" from output \"" + outputAsStr + "\"");
       result.put(tokens[0], Integer.parseInt(tokens[1]));
     }
     return result;
@@ -149,6 +151,7 @@ public class TestOSSMiniYarnCluster extends AbstractFSContractTestBase {
     }
   }
 
+  @AfterEach
   @Override
   public void teardown() throws Exception {
     if (yarnCluster != null) {

+ 8 - 2
hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractAnalyticsStreamVectoredRead.java

@@ -21,6 +21,7 @@ package org.apache.hadoop.fs.contract.s3a;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractVectoredReadTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
+import org.junit.jupiter.api.BeforeEach;
 
 import static org.apache.hadoop.fs.s3a.S3ATestUtils.enableAnalyticsAccelerator;
 import static org.apache.hadoop.fs.s3a.S3ATestUtils.skipForAnyEncryptionExceptSSES3;
@@ -35,8 +36,13 @@ import static org.apache.hadoop.fs.s3a.S3ATestUtils.skipForAnyEncryptionExceptSS
  */
 public class ITestS3AContractAnalyticsStreamVectoredRead extends AbstractContractVectoredReadTest {
 
-  public ITestS3AContractAnalyticsStreamVectoredRead(String bufferType) {
-    super(bufferType);
+  public ITestS3AContractAnalyticsStreamVectoredRead() {
+  }
+
+  @BeforeEach
+  @Override
+  public void setup() throws Exception {
+    super.setup();
   }
 
   /**

+ 23 - 15
hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/contract/s3a/ITestS3AContractBulkDelete.java

@@ -23,9 +23,8 @@ import java.util.Arrays;
 import java.util.List;
 
 import org.assertj.core.api.Assertions;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.MethodSource;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -54,7 +53,7 @@ import static org.apache.hadoop.test.LambdaTestUtils.intercept;
 /**
  * Contract tests for bulk delete operation for S3A Implementation.
  */
-@RunWith(Parameterized.class)
+
 public class ITestS3AContractBulkDelete extends AbstractContractBulkDeleteTest {
 
   private static final Logger LOG = LoggerFactory.getLogger(ITestS3AContractBulkDelete.class);
@@ -67,9 +66,8 @@ public class ITestS3AContractBulkDelete extends AbstractContractBulkDeleteTest {
    */
   private static final int DELETE_PAGE_SIZE = 20;
 
-  private final boolean enableMultiObjectDelete;
+  private boolean enableMultiObjectDelete;
 
-  @Parameterized.Parameters(name = "enableMultiObjectDelete = {0}")
   public static Iterable<Object[]> enableMultiObjectDelete() {
     return Arrays.asList(new Object[][]{
             {true},
@@ -77,8 +75,8 @@ public class ITestS3AContractBulkDelete extends AbstractContractBulkDeleteTest {
     });
   }
 
-  public ITestS3AContractBulkDelete(boolean enableMultiObjectDelete) {
-    this.enableMultiObjectDelete = enableMultiObjectDelete;
+  public void initITestS3AContractBulkDelete(boolean pEnableMultiObjectDelete) {
+    this.enableMultiObjectDelete = pEnableMultiObjectDelete;
   }
 
   @Override
@@ -119,8 +117,11 @@ public class ITestS3AContractBulkDelete extends AbstractContractBulkDeleteTest {
             .isEqualTo(getExpectedPageSize());
   }
 
-  @Test
-  public void testBulkDeleteZeroPageSizePrecondition() throws Exception {
+  @MethodSource("enableMultiObjectDelete")
+  @ParameterizedTest(name = "enableMultiObjectDelete = {0}")
+  public void testBulkDeleteZeroPageSizePrecondition(
+      boolean pEnableMultiObjectDelete) throws Exception {
+    initITestS3AContractBulkDelete(pEnableMultiObjectDelete);
     if (!enableMultiObjectDelete) {
       // if multi-object delete is disabled, skip this test as
       // page size is always 1.
@@ -135,8 +136,11 @@ public class ITestS3AContractBulkDelete extends AbstractContractBulkDeleteTest {
     }
   }
 
-  @Test
-  public void testPageSizeWhenMultiObjectsDisabled() throws Exception {
+  @MethodSource("enableMultiObjectDelete")
+  @ParameterizedTest(name = "enableMultiObjectDelete = {0}")
+  public void testPageSizeWhenMultiObjectsDisabled(
+      boolean pEnableMultiObjectDelete) throws Exception {
+    initITestS3AContractBulkDelete(pEnableMultiObjectDelete);
     Configuration conf = getContract().getConf();
     conf.setBoolean(Constants.ENABLE_MULTI_DELETE, false);
     Path testPath = path(getMethodName());
@@ -165,8 +169,11 @@ public class ITestS3AContractBulkDelete extends AbstractContractBulkDeleteTest {
     assertIsDirectory(dirPath);
   }
 
-  @Test
-  public void testBulkDeleteParentDirectoryWithDirectories() throws Exception {
+  @MethodSource("enableMultiObjectDelete")
+  @ParameterizedTest(name = "enableMultiObjectDelete = {0}")
+  public void testBulkDeleteParentDirectoryWithDirectories(
+      boolean pEnableMultiObjectDelete) throws Exception {
+    initITestS3AContractBulkDelete(pEnableMultiObjectDelete);
     List<Path> paths = new ArrayList<>();
     Path dirPath = new Path(basePath, "dir");
     fs.mkdirs(dirPath);
@@ -195,7 +202,8 @@ public class ITestS3AContractBulkDelete extends AbstractContractBulkDeleteTest {
   }
 
 
-  @Test
+  @MethodSource("enableMultiObjectDelete")
+  @ParameterizedTest(name = "enableMultiObjectDelete = {0}")
   public void testRateLimiting() throws Exception {
     if (!enableMultiObjectDelete) {
       skip("Multi-object delete is disabled so hard to trigger rate limiting");

Some files were not shown because too many files changed in this diff