Explorar o código

HADOOP-19415. [JDK17] Upgrade JUnit from 4 to 5 in hadoop-common Part9. (#7668)

* HADOOP-19415. [JDK17] Upgrade JUnit from 4 to 5 in hadoop-common Part9.

Co-authored-by: Chris Nauroth <cnauroth@apache.org>
Co-authored-by: Hualong Zhang <hualong.z@hotmail.com>
Reviewed-by: Chris Nauroth <cnauroth@apache.org>
Reviewed-by: Hualong Zhang <hualong.z@hotmail.com>
Signed-off-by: Shilun Fan <slfan1989@apache.org>
slfan1989 hai 1 mes
pai
achega
49d12ad4e4
Modificáronse 51 ficheiros con 1002 adicións e 918 borrados
  1. 12 12
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextUtilBase.java
  2. 84 88
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
  3. 8 7
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemTestHelper.java
  4. 14 11
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemTestWrapper.java
  5. 9 9
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestChecksumFs.java
  6. 2 2
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFcLocalFsUtil.java
  7. 14 8
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemCaching.java
  8. 1 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestGetEnclosingRoot.java
  9. 8 8
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestRawLocalFileSystemContract.java
  10. 7 7
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/impl/TestFutureIO.java
  11. 45 45
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/impl/TestVectoredReadUtils.java
  12. 4 4
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestMoreWeakReferencedElasticByteBufferPool.java
  13. 68 62
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestWeakReferencedElasticByteBufferPool.java
  14. 16 15
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/HadoopTestBase.java
  15. 31 29
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestCloseableReferenceCount.java
  16. 13 18
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestIntrusiveCollection.java
  17. 6 8
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestJsonSerialization.java
  18. 13 14
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestLimitInputStream.java
  19. 15 18
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestUTF8ByteArrayUtils.java
  20. 75 58
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/functional/TestTaskPool.java
  21. 5 5
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSFileSystemContract.java
  22. 15 15
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultipleNNPortQOP.java
  23. 7 7
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestViewDistributedFileSystemContract.java
  24. 15 12
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java
  25. 21 20
      hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AFileSystemContract.java
  26. 7 7
      hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestArnResource.java
  27. 31 32
      hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestDataBlocks.java
  28. 45 36
      hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestInvoker.java
  29. 21 21
      hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestStreamChangeTracker.java
  30. 8 10
      hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/TestMarshalledCredentials.java
  31. 18 13
      hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/StagingTestBase.java
  32. 13 13
      hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/TestDirectoryCommitterScale.java
  33. 2 2
      hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/TestPaths.java
  34. 199 154
      hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/TestStagingCommitter.java
  35. 5 4
      hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/TestStagingDirectoryOutputCommitter.java
  36. 9 11
      hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/TestStagingPartitionedFileListing.java
  37. 7 4
      hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/TestStagingPartitionedJobCommit.java
  38. 7 7
      hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/TestStagingPartitionedTaskCommit.java
  39. 2 2
      hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/fileContext/ITestS3AFileContextUtil.java
  40. 6 6
      hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/TestCreateFileBuilder.java
  41. 15 15
      hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/TestHeaderProcessing.java
  42. 7 7
      hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/TestOpenFileSupport.java
  43. 16 16
      hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/TestS3AMultipartUploaderSupport.java
  44. 6 6
      hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/TestSDKStreamDrainer.java
  45. 5 6
      hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/mapreduce/filecache/TestS3AResourceScope.java
  46. 7 6
      hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlFileSystemContractLive.java
  47. 8 8
      hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemContractEmulator.java
  48. 15 16
      hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemContractLive.java
  49. 16 15
      hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemContractPageBlobLive.java
  50. 9 9
      hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemContractMocked.java
  51. 10 9
      hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ITestAzureBlobFileSystemBasics.java

+ 12 - 12
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextUtilBase.java

@@ -19,15 +19,15 @@ package org.apache.hadoop.fs;
 
 import static org.apache.hadoop.fs.FileContextTestHelper.readFile;
 import static org.apache.hadoop.fs.FileContextTestHelper.writeFile;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.util.Arrays;
 
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.StringUtils;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.slf4j.event.Level;
 
 /**
@@ -57,12 +57,12 @@ public abstract class FileContextUtilBase {
     }
   }
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     fc.mkdir(fileContextTestHelper.getTestRootPath(fc), FileContext.DEFAULT_PERM, true);
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     if (fc != null) {
       fc.delete(fileContextTestHelper.getTestRootPath(fc), true);
@@ -80,10 +80,10 @@ public abstract class FileContextUtilBase {
     fc.util().copy(file1, file2);
 
     // verify that newly copied file2 exists
-    assertTrue("Failed to copy file2  ", fc.util().exists(file2));
+    assertTrue(fc.util().exists(file2), "Failed to copy file2  ");
     // verify that file2 contains test string
-    assertTrue("Copied files does not match ",Arrays.equals(ts.getBytes(),
-        readFile(fc,file2,ts.getBytes().length)));
+    assertTrue(Arrays.equals(ts.getBytes(),
+         readFile(fc, file2, ts.getBytes().length)), "Copied files does not match ");
   }
 
   @Test
@@ -103,9 +103,9 @@ public abstract class FileContextUtilBase {
     fc.util().copy(dir1, dir2);
 
     // verify that newly copied file2 exists
-    assertTrue("Failed to copy file2  ", fc.util().exists(file2));
+    assertTrue(fc.util().exists(file2), "Failed to copy file2  ");
     // verify that file2 contains test string
-    assertTrue("Copied files does not match ",Arrays.equals(ts.getBytes(),
-        readFile(fc,file2,ts.getBytes().length)));
+    assertTrue(Arrays.equals(ts.getBytes(),
+        readFile(fc, file2, ts.getBytes().length)), "Copied files does not match ");
   }
 }

+ 84 - 88
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java

@@ -21,8 +21,8 @@ package org.apache.hadoop.fs;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.ArrayList;
-import java.util.concurrent.TimeUnit;
 
+import org.junit.jupiter.api.Timeout;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -31,13 +31,15 @@ import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.util.StringUtils;
 
-import static org.junit.Assert.*;
-import static org.junit.Assume.assumeTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
+import static org.junit.jupiter.api.Assumptions.assumeTrue;
 
-import org.junit.After;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.Timeout;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Test;
 
 /**
  * <p>
@@ -52,6 +54,7 @@ import org.junit.rules.Timeout;
  * {@link FileSystem} instance variable.
  * </p>
  */
+@Timeout(30)
 public abstract class FileSystemContractBaseTest {
   private static final Logger LOG =
       LoggerFactory.getLogger(FileSystemContractBaseTest.class);
@@ -60,10 +63,6 @@ public abstract class FileSystemContractBaseTest {
   protected FileSystem fs;
   protected byte[] data = dataset(getBlockSize() * 2, 0, 255);
 
-  @Rule
-  public Timeout globalTimeout =
-      new Timeout(getGlobalTimeout(), TimeUnit.MILLISECONDS);
-
   /**
    * Get the timeout in milliseconds for each test case.
    * @return a time in milliseconds.
@@ -72,7 +71,7 @@ public abstract class FileSystemContractBaseTest {
     return 30 * 1000;
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     if (fs != null) {
       // some cases use this absolute path
@@ -195,7 +194,7 @@ public abstract class FileSystemContractBaseTest {
     assertTrue(fs.mkdirs(testDir));
 
     assertTrue(fs.exists(testDir));
-    assertTrue("Should be a directory", fs.isDirectory(testDir));
+    assertTrue(fs.isDirectory(testDir), "Should be a directory");
     assertFalse(fs.isFile(testDir));
 
     Path parentDir = testDir.getParent();
@@ -365,8 +364,8 @@ public abstract class FileSystemContractBaseTest {
 
     createFile(path);
     
-    assertTrue("Exists", fs.exists(path));
-    assertEquals("Length", data.length, fs.getFileStatus(path).getLen());
+    assertTrue(fs.exists(path), "Exists");
+    assertEquals(data.length, fs.getFileStatus(path).getLen(), "Length");
     
     try {
       fs.create(path, false).close();
@@ -379,27 +378,27 @@ public abstract class FileSystemContractBaseTest {
     out.write(data, 0, data.length);
     out.close();
     
-    assertTrue("Exists", fs.exists(path));
-    assertEquals("Length", data.length, fs.getFileStatus(path).getLen());
+    assertTrue(fs.exists(path), "Exists");
+    assertEquals(data.length, fs.getFileStatus(path).getLen(), "Length");
     
   }
 
   @Test
   public void testWriteInNonExistentDirectory() throws IOException {
     Path path = path("testWriteInNonExistentDirectory/file");
-    assertFalse("Parent exists", fs.exists(path.getParent()));
+    assertFalse(fs.exists(path.getParent()), "Parent exists");
     createFile(path);
     
-    assertTrue("Exists", fs.exists(path));
-    assertEquals("Length", data.length, fs.getFileStatus(path).getLen());
-    assertTrue("Parent exists", fs.exists(path.getParent()));
+    assertTrue(fs.exists(path), "Exists");
+    assertEquals(data.length, fs.getFileStatus(path).getLen(), "Length");
+    assertTrue(fs.exists(path.getParent()), "Parent exists");
   }
 
   @Test
   public void testDeleteNonExistentFile() throws IOException {
     Path path = path("testDeleteNonExistentFile/file");
-    assertFalse("Path exists: " + path, fs.exists(path));
-    assertFalse("No deletion", fs.delete(path, true));
+    assertFalse(fs.exists(path), "Path exists: " + path);
+    assertFalse(fs.delete(path, true), "No deletion");
   }
 
   @Test
@@ -409,11 +408,11 @@ public abstract class FileSystemContractBaseTest {
     Path subdir = path("testDeleteRecursively/subdir");
     
     createFile(file);
-    assertTrue("Created subdir", fs.mkdirs(subdir));
+    assertTrue(fs.mkdirs(subdir), "Created subdir");
     
-    assertTrue("File exists", fs.exists(file));
-    assertTrue("Dir exists", fs.exists(dir));
-    assertTrue("Subdir exists", fs.exists(subdir));
+    assertTrue(fs.exists(file), "File exists");
+    assertTrue(fs.exists(dir), "Dir exists");
+    assertTrue(fs.exists(subdir), "Subdir exists");
     
     try {
       fs.delete(dir, false);
@@ -421,23 +420,23 @@ public abstract class FileSystemContractBaseTest {
     } catch (IOException e) {
       // expected
     }
-    assertTrue("File still exists", fs.exists(file));
-    assertTrue("Dir still exists", fs.exists(dir));
-    assertTrue("Subdir still exists", fs.exists(subdir));
+    assertTrue(fs.exists(file), "File still exists");
+    assertTrue(fs.exists(dir), "Dir still exists");
+    assertTrue(fs.exists(subdir), "Subdir still exists");
     
-    assertTrue("Deleted", fs.delete(dir, true));
-    assertFalse("File doesn't exist", fs.exists(file));
-    assertFalse("Dir doesn't exist", fs.exists(dir));
-    assertFalse("Subdir doesn't exist", fs.exists(subdir));
+    assertTrue(fs.delete(dir, true), "Deleted");
+    assertFalse(fs.exists(file), "File doesn't exist");
+    assertFalse(fs.exists(dir), "Dir doesn't exist");
+    assertFalse(fs.exists(subdir), "Subdir doesn't exist");
   }
 
   @Test
   public void testDeleteEmptyDirectory() throws IOException {
     Path dir = path("testDeleteEmptyDirectory");
     assertTrue(fs.mkdirs(dir));
-    assertTrue("Dir exists", fs.exists(dir));
-    assertTrue("Deleted", fs.delete(dir, false));
-    assertFalse("Dir doesn't exist", fs.exists(dir));
+    assertTrue(fs.exists(dir), "Dir exists");
+    assertTrue(fs.delete(dir, false), "Deleted");
+    assertFalse(fs.exists(dir), "Dir doesn't exist");
   }
 
   @Test
@@ -516,14 +515,14 @@ public abstract class FileSystemContractBaseTest {
     fs.mkdirs(dst.getParent());
     rename(src, dst, true, false, true);
     
-    assertFalse("Nested file1 exists",
-        fs.exists(path(src + "/file1")));
-    assertFalse("Nested file2 exists",
-        fs.exists(path(src + "/subdir/file2")));
-    assertTrue("Renamed nested file1 exists",
-        fs.exists(path(dst + "/file1")));
-    assertTrue("Renamed nested exists",
-        fs.exists(path(dst + "/subdir/file2")));
+    assertFalse(fs.exists(path(src + "/file1")),
+        "Nested file1 exists");
+    assertFalse(fs.exists(path(src + "/subdir/file2")),
+        "Nested file2 exists");
+    assertTrue(fs.exists(path(dst + "/file1")),
+        "Renamed nested file1 exists");
+    assertTrue(fs.exists(path(dst + "/subdir/file2")),
+        "Renamed nested exists");
   }
 
   @Test
@@ -548,16 +547,16 @@ public abstract class FileSystemContractBaseTest {
     final Path dst = path("testRenameDirectoryAsExistingDirectoryNew/newdir");
     fs.mkdirs(dst);
     rename(src, dst, true, false, true);
-    assertTrue("Destination changed",
-        fs.exists(path(dst + "/dir")));
-    assertFalse("Nested file1 exists",
-        fs.exists(path(src + "/file1")));
-    assertFalse("Nested file2 exists",
-        fs.exists(path(src + "/dir/subdir/file2")));
-    assertTrue("Renamed nested file1 exists",
-        fs.exists(path(dst + "/dir/file1")));
-    assertTrue("Renamed nested exists",
-        fs.exists(path(dst + "/dir/subdir/file2")));
+    assertTrue(fs.exists(path(dst + "/dir")),
+        "Destination changed");
+    assertFalse(fs.exists(path(src + "/file1")),
+        "Nested file1 exists");
+    assertFalse(fs.exists(path(src + "/dir/subdir/file2")),
+        "Nested file2 exists");
+    assertTrue(fs.exists(path(dst + "/dir/file1")),
+        "Renamed nested file1 exists");
+    assertTrue(fs.exists(path(dst + "/dir/subdir/file2")),
+        "Renamed nested exists");
   }
 
   @Test
@@ -590,9 +589,9 @@ public abstract class FileSystemContractBaseTest {
   
   protected void rename(Path src, Path dst, boolean renameSucceeded,
       boolean srcExists, boolean dstExists) throws IOException {
-    assertEquals("Rename result", renameSucceeded, fs.rename(src, dst));
-    assertEquals("Source exists", srcExists, fs.exists(src));
-    assertEquals("Destination exists" + dst, dstExists, fs.exists(dst));
+    assertEquals(renameSucceeded, fs.rename(src, dst), "Rename result");
+    assertEquals(srcExists, fs.exists(src), "Source exists");
+    assertEquals(dstExists, fs.exists(dst), "Destination exists" + dst);
   }
 
   /**
@@ -633,27 +632,26 @@ public abstract class FileSystemContractBaseTest {
     String mixedCaseFilename = "testFilesystemIsCaseSensitive";
     Path upper = path(mixedCaseFilename);
     Path lower = path(StringUtils.toLowerCase(mixedCaseFilename));
-    assertFalse("File exists" + upper, fs.exists(upper));
-    assertFalse("File exists" + lower, fs.exists(lower));
+    assertFalse(fs.exists(upper), "File exists" + upper);
+    assertFalse(fs.exists(lower), "File exists" + lower);
     FSDataOutputStream out = fs.create(upper);
     out.writeUTF("UPPER");
     out.close();
     FileStatus upperStatus = fs.getFileStatus(upper);
-    assertTrue("File does not exist" + upper, fs.exists(upper));
+    assertTrue(fs.exists(upper), "File does not exist" + upper);
     //verify the lower-case version of the filename doesn't exist
-    assertFalse("File exists" + lower, fs.exists(lower));
+    assertFalse(fs.exists(lower), "File exists" + lower);
     //now overwrite the lower case version of the filename with a
     //new version.
     out = fs.create(lower);
     out.writeUTF("l");
     out.close();
-    assertTrue("File does not exist" + lower, fs.exists(lower));
+    assertTrue(fs.exists(lower), "File does not exist" + lower);
     //verify the length of the upper file hasn't changed
     FileStatus newStatus = fs.getFileStatus(upper);
-    assertEquals("Expected status:" + upperStatus
-                 + " actual status " + newStatus,
-                 upperStatus.getLen(),
-                 newStatus.getLen()); }
+    assertEquals(upperStatus.getLen(),
+        newStatus.getLen(), "Expected status:" + upperStatus
+        + " actual status " + newStatus); }
 
   /**
    * Asserts that a zero byte file has a status of file and not
@@ -693,7 +691,7 @@ public abstract class FileSystemContractBaseTest {
     fs.getFileStatus(path("/"));
     //this catches overrides of the base exists() method that don't
     //use getFileStatus() as an existence probe
-    assertTrue("FileSystem.exists() fails for root", fs.exists(path("/")));
+    assertTrue(fs.exists(path("/")), "FileSystem.exists() fails for root");
   }
 
   /**
@@ -789,8 +787,8 @@ public abstract class FileSystemContractBaseTest {
     Path parent = testdir.getParent();
     //the outcome here is ambiguous, so is not checked
     fs.rename(testdir, parent);
-    assertEquals("Source exists: " + testdir, true, fs.exists(testdir));
-    assertEquals("Destination exists" + parent, true, fs.exists(parent));
+    assertEquals(true, fs.exists(testdir), "Source exists: " + testdir);
+    assertEquals(true, fs.exists(parent), "Destination exists" + parent);
   }
 
   /**
@@ -855,9 +853,8 @@ public abstract class FileSystemContractBaseTest {
         found = true;
       }
     }
-    assertTrue("Path " + subdir
-               + " not found in directory " + dir + ":" + builder,
-               found);
+    assertTrue(found, "Path " + subdir
+        + " not found in directory " + dir + ":" + builder);
   }
 
   protected void assertListStatusFinds(Path dir, Path subdir)
@@ -871,9 +868,8 @@ public abstract class FileSystemContractBaseTest {
         found = true;
       }
     }
-    assertTrue("Path " + subdir
-               + " not found in directory " + dir + ":" + builder,
-               found);
+    assertTrue(found, "Path " + subdir
+        + " not found in directory " + dir + ":" + builder);
   }
 
 
@@ -884,14 +880,14 @@ public abstract class FileSystemContractBaseTest {
    * @throws IOException IO problems during file operations
    */
   private void assertIsFile(Path filename) throws IOException {
-    assertTrue("Does not exist: " + filename, fs.exists(filename));
+    assertTrue(fs.exists(filename), "Does not exist: " + filename);
     FileStatus status = fs.getFileStatus(filename);
     String fileInfo = filename + "  " + status;
-    assertTrue("Not a file " + fileInfo, status.isFile());
-    assertFalse("File claims to be a symlink " + fileInfo,
-                status.isSymlink());
-    assertFalse("File claims to be a directory " + fileInfo,
-                status.isDirectory());
+    assertTrue(status.isFile(), "Not a file " + fileInfo);
+    assertFalse(status.isSymlink(),
+        "File claims to be a symlink " + fileInfo);
+    assertFalse(status.isDirectory(),
+        "File claims to be a directory " + fileInfo);
   }
 
   /**
@@ -918,8 +914,8 @@ public abstract class FileSystemContractBaseTest {
   protected void writeAndRead(Path path, byte[] src, int len,
                               boolean overwrite,
                               boolean delete) throws IOException {
-    assertTrue("Not enough data in source array to write " + len + " bytes",
-               src.length >= len);
+    assertTrue(src.length >= len,
+        "Not enough data in source array to write " + len + " bytes");
     fs.mkdirs(path.getParent());
 
     FSDataOutputStream out = fs.create(path, overwrite,
@@ -929,8 +925,8 @@ public abstract class FileSystemContractBaseTest {
     out.write(src, 0, len);
     out.close();
 
-    assertTrue("Exists", fs.exists(path));
-    assertEquals("Length", len, fs.getFileStatus(path).getLen());
+    assertTrue(fs.exists(path), "Exists");
+    assertEquals(len, fs.getFileStatus(path).getLen(), "Length");
 
     FSDataInputStream in = fs.open(path);
     byte[] buf = new byte[len];
@@ -978,8 +974,8 @@ public abstract class FileSystemContractBaseTest {
 
     if (delete) {
       boolean deleted = fs.delete(path, false);
-      assertTrue("Deleted", deleted);
-      assertFalse("No longer exists", fs.exists(path));
+      assertTrue(deleted, "Deleted");
+      assertFalse(fs.exists(path), "No longer exists");
     }
   }
 

+ 8 - 7
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemTestHelper.java

@@ -25,9 +25,10 @@ import java.util.Random;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Assert;
 
-import static org.junit.Assert.*;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 import static org.mockito.Mockito.mock;
 
 /**
@@ -241,15 +242,15 @@ public class FileSystemTestHelper {
   public static void checkFileStatus(FileSystem aFs, String path,
       fileType expectedType) throws IOException {
     FileStatus s = aFs.getFileStatus(new Path(path));
-    Assert.assertNotNull(s);
+    assertNotNull(s);
     if (expectedType == fileType.isDir) {
-      Assert.assertTrue(s.isDirectory());
+      assertTrue(s.isDirectory());
     } else if (expectedType == fileType.isFile) {
-      Assert.assertTrue(s.isFile());
+      assertTrue(s.isFile());
     } else if (expectedType == fileType.isSymlink) {
-      Assert.assertTrue(s.isSymlink());
+      assertTrue(s.isSymlink());
     }
-    Assert.assertEquals(aFs.makeQualified(new Path(path)), s.getPath());
+    assertEquals(aFs.makeQualified(new Path(path)), s.getPath());
   }
   
   /**

+ 14 - 11
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemTestWrapper.java

@@ -17,6 +17,10 @@
  */
 package org.apache.hadoop.fs;
 
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+
 import java.io.DataInputStream;
 import java.io.FileNotFoundException;
 import java.io.IOException;
@@ -29,7 +33,6 @@ import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.util.Progressable;
-import org.junit.Assert;
 
 /**
  * Helper class for unit tests.
@@ -170,29 +173,29 @@ public final class FileSystemTestWrapper extends FSTestWrapper {
   public void checkFileStatus(String path, fileType expectedType)
       throws IOException {
     FileStatus s = fs.getFileStatus(new Path(path));
-    Assert.assertNotNull(s);
+    assertNotNull(s);
     if (expectedType == fileType.isDir) {
-      Assert.assertTrue(s.isDirectory());
+      assertTrue(s.isDirectory());
     } else if (expectedType == fileType.isFile) {
-      Assert.assertTrue(s.isFile());
+      assertTrue(s.isFile());
     } else if (expectedType == fileType.isSymlink) {
-      Assert.assertTrue(s.isSymlink());
+      assertTrue(s.isSymlink());
     }
-    Assert.assertEquals(fs.makeQualified(new Path(path)), s.getPath());
+    assertEquals(fs.makeQualified(new Path(path)), s.getPath());
   }
 
   public void checkFileLinkStatus(String path, fileType expectedType)
       throws IOException {
     FileStatus s = fs.getFileLinkStatus(new Path(path));
-    Assert.assertNotNull(s);
+    assertNotNull(s);
     if (expectedType == fileType.isDir) {
-      Assert.assertTrue(s.isDirectory());
+      assertTrue(s.isDirectory());
     } else if (expectedType == fileType.isFile) {
-      Assert.assertTrue(s.isFile());
+      assertTrue(s.isFile());
     } else if (expectedType == fileType.isSymlink) {
-      Assert.assertTrue(s.isSymlink());
+      assertTrue(s.isSymlink());
     }
-    Assert.assertEquals(fs.makeQualified(new Path(path)), s.getPath());
+    assertEquals(fs.makeQualified(new Path(path)), s.getPath());
   }
 
   //

+ 9 - 9
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestChecksumFs.java

@@ -21,9 +21,9 @@ package org.apache.hadoop.fs;
 import java.io.IOException;
 import java.util.EnumSet;
 
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.permission.FsPermission;
@@ -40,7 +40,7 @@ public class TestChecksumFs extends HadoopTestBase {
   private Path testRootDirPath;
   private FileContext fc;
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     conf = getTestConfiguration();
     fc = FileContext.getFileContext(conf);
@@ -49,7 +49,7 @@ public class TestChecksumFs extends HadoopTestBase {
     mkdirs(testRootDirPath);
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     if (fc != null) {
       fc.delete(testRootDirPath, true);
@@ -101,11 +101,11 @@ public class TestChecksumFs extends HadoopTestBase {
 
     // ensure file + checksum are moved
     createTestFile(fs, srcPath, 1);
-    assertTrue("Checksum file doesn't exist for source file - " + srcPath,
-        fc.util().exists(fs.getChecksumFile(srcPath)));
+    assertTrue(fc.util().exists(fs.getChecksumFile(srcPath)),
+        "Checksum file doesn't exist for source file - " + srcPath);
     fs.rename(srcPath, dstPath, renameOpt);
-    assertTrue("Checksum file doesn't exist for dest file - " + srcPath,
-        fc.util().exists(fs.getChecksumFile(dstPath)));
+    assertTrue(fc.util().exists(fs.getChecksumFile(dstPath)),
+        "Checksum file doesn't exist for dest file - " + srcPath);
     try (FSDataInputStream is = fs.open(dstPath)) {
       assertEquals(1, is.readInt());
     }

+ 2 - 2
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFcLocalFsUtil.java

@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.fs;
 
-import org.junit.Before;
+import org.junit.jupiter.api.BeforeEach;
 
 /**
  * Test Util for localFs using FileContext API.
@@ -26,7 +26,7 @@ public class TestFcLocalFsUtil extends
   FileContextUtilBase {
 
   @Override
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     fc = FileContext.getLocalFSFileContext();
     super.setUp();

+ 14 - 8
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemCaching.java

@@ -37,13 +37,20 @@ import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.Listenable
 import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListeningExecutorService;
 import org.apache.hadoop.util.BlockingThreadPoolExecutorService;
 
-import org.assertj.core.api.Assertions;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_CREATION_PARALLEL_COUNT;
 import static org.apache.hadoop.test.LambdaTestUtils.intercept;
-import static org.mockito.Mockito.*;
 
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mockito.anyBoolean;
+import static org.mockito.Mockito.eq;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+import static org.mockito.Mockito.reset;
 
 public class TestFileSystemCaching extends HadoopTestBase {
 
@@ -355,7 +362,7 @@ public class TestFileSystemCaching extends HadoopTestBase {
   public void testCacheSingleSemaphoredConstruction() throws Exception {
     FileSystem.Cache cache = semaphoredCache(1);
     createFileSystems(cache, 10);
-    Assertions.assertThat(cache.getDiscardedInstances())
+    assertThat(cache.getDiscardedInstances())
         .describedAs("Discarded FS instances")
         .isEqualTo(0);
   }
@@ -374,7 +381,7 @@ public class TestFileSystemCaching extends HadoopTestBase {
   public void testCacheDualSemaphoreConstruction() throws Exception {
     FileSystem.Cache cache = semaphoredCache(2);
     createFileSystems(cache, 10);
-    Assertions.assertThat(cache.getDiscardedInstances())
+    assertThat(cache.getDiscardedInstances())
         .describedAs("Discarded FS instances")
         .isEqualTo(1);
   }
@@ -393,7 +400,7 @@ public class TestFileSystemCaching extends HadoopTestBase {
     FileSystem.Cache cache = semaphoredCache(999);
     int count = 10;
     createFileSystems(cache, count);
-    Assertions.assertThat(cache.getDiscardedInstances())
+    assertThat(cache.getDiscardedInstances())
         .describedAs("Discarded FS instances")
         .isEqualTo(count -1);
   }
@@ -450,8 +457,7 @@ public class TestFileSystemCaching extends HadoopTestBase {
     // verify all the others are the same instance
     for (int i = 1; i < count; i++) {
       FileSystem fs = futures.get(i).get();
-      Assertions.assertThat(fs)
-          .isSameAs(createdFS);
+      assertThat(fs).isSameAs(createdFS);
     }
   }
 

+ 1 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestGetEnclosingRoot.java

@@ -22,7 +22,7 @@ import java.security.PrivilegedExceptionAction;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.HadoopTestBase;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 public class TestGetEnclosingRoot extends HadoopTestBase {
   @Test

+ 8 - 8
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestRawLocalFileSystemContract.java

@@ -27,11 +27,11 @@ import org.apache.hadoop.test.StatUtils;
 import org.apache.hadoop.util.NativeCodeLoader;
 import org.apache.hadoop.util.Shell;
 
-import org.junit.Before;
-import org.junit.Test;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assume.assumeTrue;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assumptions.assumeTrue;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -60,7 +60,7 @@ public class TestRawLocalFileSystemContract extends FileSystemContractBaseTest {
     return HAS_DRIVE_LETTER_SPECIFIER.matcher(filesys).find();
   }
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     Configuration conf = new Configuration();
     fs = FileSystem.getLocal(conf).getRawFileSystem();
@@ -129,8 +129,8 @@ public class TestRawLocalFileSystemContract extends FileSystemContractBaseTest {
   @Test
   @SuppressWarnings("deprecation")
   public void testPermission() throws Exception {
-    assumeTrue("No native library",
-        NativeCodeLoader.isNativeCodeLoaded());
+    assumeTrue(NativeCodeLoader.isNativeCodeLoaded(),
+        "No native library");
     Path testDir = getTestBaseDir();
     String testFilename = "teststat2File";
     Path path = new Path(testDir, testFilename);

+ 7 - 7
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/impl/TestFutureIO.java

@@ -21,8 +21,8 @@ package org.apache.hadoop.fs.impl;
 import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.atomic.AtomicInteger;
 
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 import org.apache.hadoop.test.HadoopTestBase;
 import org.apache.hadoop.util.LambdaUtils;
@@ -35,7 +35,7 @@ public class TestFutureIO extends HadoopTestBase {
 
   private ThreadLocal<AtomicInteger> local;
 
-  @Before
+  @BeforeEach
   public void setup() throws Exception {
     local = ThreadLocal.withInitial(() -> new AtomicInteger(1));
   }
@@ -50,8 +50,8 @@ public class TestFutureIO extends HadoopTestBase {
         () -> {
           return getLocal().addAndGet(2);
         });
-    assertEquals("Thread local value", 3, getLocalValue());
-    assertEquals("Evaluated Value", 3, eval.get().intValue());
+    assertEquals(3, getLocalValue(), "Thread local value");
+    assertEquals(3, eval.get().intValue(), "Evaluated Value");
   }
 
   /**
@@ -61,8 +61,8 @@ public class TestFutureIO extends HadoopTestBase {
   public void testEvalAsync() throws Throwable {
     final CompletableFuture<Integer> eval = CompletableFuture.supplyAsync(
         () -> getLocal().addAndGet(2));
-    assertEquals("Thread local value", 1, getLocalValue());
-    assertEquals("Evaluated Value", 3, eval.get().intValue());
+    assertEquals(1, getLocalValue(), "Thread local value");
+    assertEquals(3, eval.get().intValue(), "Evaluated Value");
   }
 
 

+ 45 - 45
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/impl/TestVectoredReadUtils.java

@@ -30,10 +30,9 @@ import java.util.concurrent.CompletableFuture;
 import java.util.function.Consumer;
 import java.util.function.IntFunction;
 
-import org.assertj.core.api.Assertions;
 import org.assertj.core.api.ListAssert;
 import org.assertj.core.api.ObjectAssert;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 import org.mockito.ArgumentMatchers;
 import org.mockito.Mockito;
 
@@ -57,6 +56,7 @@ import static org.apache.hadoop.fs.VectoredReadUtils.validateAndSortRanges;
 import static org.apache.hadoop.test.LambdaTestUtils.intercept;
 import static org.apache.hadoop.test.MoreAsserts.assertFutureCompletedSuccessfully;
 import static org.apache.hadoop.test.MoreAsserts.assertFutureFailedExceptionally;
+import static org.assertj.core.api.Assertions.assertThat;
 
 /**
  * Test behavior of {@link VectoredReadUtils}.
@@ -78,11 +78,11 @@ public class TestVectoredReadUtils extends HadoopTestBase {
     // ensure we don't make unnecessary slices
     ByteBuffer slice = VectoredReadUtils.sliceTo(buffer, 100,
         createFileRange(100, size));
-    Assertions.assertThat(buffer)
+    assertThat(buffer)
             .describedAs("Slicing on the same offset shouldn't " +
                     "create a new buffer")
             .isEqualTo(slice);
-    Assertions.assertThat(slice.position())
+    assertThat(slice.position())
         .describedAs("Slicing should return buffers starting from position 0")
         .isEqualTo(0);
 
@@ -93,19 +93,19 @@ public class TestVectoredReadUtils extends HadoopTestBase {
     slice = VectoredReadUtils.sliceTo(buffer, offset,
         createFileRange(offset + sliceStart, sliceLength));
     // make sure they aren't the same, but use the same backing data
-    Assertions.assertThat(buffer)
+    assertThat(buffer)
         .describedAs("Slicing on new offset should create a new buffer")
         .isNotEqualTo(slice);
-    Assertions.assertThat(buffer.array())
+    assertThat(buffer.array())
         .describedAs("Slicing should use the same underlying data")
         .isEqualTo(slice.array());
-    Assertions.assertThat(slice.position())
+    assertThat(slice.position())
         .describedAs("Slicing should return buffers starting from position 0")
         .isEqualTo(0);
     // test the contents of the slice
     intBuffer = slice.asIntBuffer();
     for(int i=0; i < sliceLength / Integer.BYTES; ++i) {
-      assertEquals("i = " + i, i + sliceStart / Integer.BYTES, intBuffer.get());
+      assertEquals(i + sliceStart / Integer.BYTES, intBuffer.get(), "i = " + i);
     }
   }
 
@@ -116,11 +116,11 @@ public class TestVectoredReadUtils extends HadoopTestBase {
   @Test
   public void testRounding() {
     for (int i = 5; i < 10; ++i) {
-      assertEquals("i = " + i, 5, VectoredReadUtils.roundDown(i, 5));
-      assertEquals("i = " + i, 10, VectoredReadUtils.roundUp(i + 1, 5));
+      assertEquals(5, VectoredReadUtils.roundDown(i, 5), "i = " + i);
+      assertEquals(10, VectoredReadUtils.roundUp(i + 1, 5), "i = " + i);
     }
-    assertEquals("Error while roundDown", 13, VectoredReadUtils.roundDown(13, 1));
-    assertEquals("Error while roundUp", 13, VectoredReadUtils.roundUp(13, 1));
+    assertEquals(13, VectoredReadUtils.roundDown(13, 1), "Error while roundDown");
+    assertEquals(13, VectoredReadUtils.roundUp(13, 1), "Error while roundUp");
   }
 
   /**
@@ -135,32 +135,32 @@ public class TestVectoredReadUtils extends HadoopTestBase {
     CombinedFileRange mergeBase = new CombinedFileRange(2000, 3000, base);
 
     // test when the gap between is too big
-    assertFalse("Large gap ranges shouldn't get merged", mergeBase.merge(5000, 6000,
-        createFileRange(5000, 1000), 2000, 4000));
+    assertFalse(mergeBase.merge(5000, 6000,
+        createFileRange(5000, 1000), 2000, 4000), "Large gap ranges shouldn't get merged");
     assertUnderlyingSize(mergeBase,
         "Number of ranges in merged range shouldn't increase",
         1);
     assertFileRange(mergeBase, 2000, 1000);
 
     // test when the total size gets exceeded
-    assertFalse("Large size ranges shouldn't get merged",
+    assertFalse(
         mergeBase.merge(5000, 6000,
-        createFileRange(5000, 1000), 2001, 3999));
-    assertEquals("Number of ranges in merged range shouldn't increase",
-        1, mergeBase.getUnderlying().size());
+        createFileRange(5000, 1000), 2001, 3999), "Large size ranges shouldn't get merged");
+    assertEquals(1, mergeBase.getUnderlying().size(),
+        "Number of ranges in merged range shouldn't increase");
     assertFileRange(mergeBase, 2000, 1000);
 
     // test when the merge works
-    assertTrue("ranges should get merged ", mergeBase.merge(5000, 6000,
+    assertTrue(mergeBase.merge(5000, 6000,
         createFileRange(5000, 1000, tracker2),
-        2001, 4000));
+        2001, 4000), "ranges should get merged ");
     assertUnderlyingSize(mergeBase, "merge list after merge", 2);
     assertFileRange(mergeBase, 2000, 4000);
 
-    Assertions.assertThat(mergeBase.getUnderlying().get(0).getReference())
+    assertThat(mergeBase.getUnderlying().get(0).getReference())
         .describedAs("reference of range %s", mergeBase.getUnderlying().get(0))
         .isSameAs(tracker1);
-    Assertions.assertThat(mergeBase.getUnderlying().get(1).getReference())
+    assertThat(mergeBase.getUnderlying().get(1).getReference())
         .describedAs("reference of range %s", mergeBase.getUnderlying().get(1))
         .isSameAs(tracker2);
 
@@ -168,8 +168,8 @@ public class TestVectoredReadUtils extends HadoopTestBase {
     mergeBase = new CombinedFileRange(200, 300, base);
     assertFileRange(mergeBase, 200, 100);
 
-    assertTrue("ranges should get merged ", mergeBase.merge(500, 600,
-        createFileRange(5000, 1000), 201, 400));
+    assertTrue(mergeBase.merge(500, 600,
+        createFileRange(5000, 1000), 201, 400), "ranges should get merged ");
     assertUnderlyingSize(mergeBase, "merge list after merge", 2);
     assertFileRange(mergeBase, 200, 400);
   }
@@ -184,7 +184,7 @@ public class TestVectoredReadUtils extends HadoopTestBase {
       final CombinedFileRange combinedFileRange,
       final String description,
       final int expected) {
-    return Assertions.assertThat(combinedFileRange.getUnderlying())
+    return assertThat(combinedFileRange.getUnderlying())
         .describedAs(description)
         .hasSize(expected);
   }
@@ -267,13 +267,13 @@ public class TestVectoredReadUtils extends HadoopTestBase {
   private static <ELEMENT extends FileRange> void assertFileRange(
       ELEMENT range, long start, int length) {
 
-    Assertions.assertThat(range)
+    assertThat(range)
         .describedAs("file range %s", range)
         .isNotNull();
-    Assertions.assertThat(range.getOffset())
+    assertThat(range.getOffset())
         .describedAs("offset of %s", range)
         .isEqualTo(start);
-    Assertions.assertThat(range.getLength())
+    assertThat(range.getLength())
         .describedAs("length of %s", range)
         .isEqualTo(length);
   }
@@ -291,10 +291,10 @@ public class TestVectoredReadUtils extends HadoopTestBase {
         );
     final FileRange[] rangeArray = sortRanges(input);
     final List<? extends FileRange> rangeList = sortRangeList(input);
-    Assertions.assertThat(rangeArray)
+    assertThat(rangeArray)
         .describedAs("range array from sortRanges()")
         .isSortedAccordingTo(Comparator.comparingLong(FileRange::getOffset));
-    Assertions.assertThat(rangeList.toArray(new FileRange[0]))
+    assertThat(rangeList.toArray(new FileRange[0]))
         .describedAs("range from sortRangeList()")
         .isEqualTo(rangeArray);
   }
@@ -311,7 +311,7 @@ public class TestVectoredReadUtils extends HadoopTestBase {
       ELEMENT range, long offset, int length, Object reference) {
 
     assertFileRange(range, offset, length);
-    Assertions.assertThat(range.getReference())
+    assertThat(range.getReference())
         .describedAs("reference field of file range %s", range)
         .isEqualTo(reference);
   }
@@ -342,7 +342,7 @@ public class TestVectoredReadUtils extends HadoopTestBase {
   private static <ELEMENT extends FileRange> ListAssert<ELEMENT> assertRangeListSize(
       final List<ELEMENT> ranges,
       final int size) {
-    return Assertions.assertThat(ranges)
+    return assertThat(ranges)
         .describedAs("coalesced ranges")
         .hasSize(size);
   }
@@ -357,7 +357,7 @@ public class TestVectoredReadUtils extends HadoopTestBase {
   private static <ELEMENT extends FileRange> ListAssert<ELEMENT> assertRangesCountAtLeast(
       final List<ELEMENT> ranges,
       final int size) {
-    return Assertions.assertThat(ranges)
+    return assertThat(ranges)
         .describedAs("coalesced ranges")
         .hasSizeGreaterThanOrEqualTo(size);
   }
@@ -392,7 +392,7 @@ public class TestVectoredReadUtils extends HadoopTestBase {
       List<? extends FileRange> input,
       int chunkSize,
       int minimumSeek) {
-    Assertions.assertThat(isOrderedDisjoint(input, chunkSize, minimumSeek))
+    assertThat(isOrderedDisjoint(input, chunkSize, minimumSeek))
         .describedAs("ranges are ordered and disjoint")
         .isTrue();
   }
@@ -407,7 +407,7 @@ public class TestVectoredReadUtils extends HadoopTestBase {
       List<ELEMENT> input,
       int chunkSize,
       int minimumSeek) {
-    Assertions.assertThat(isOrderedDisjoint(input, chunkSize, minimumSeek))
+    assertThat(isOrderedDisjoint(input, chunkSize, minimumSeek))
         .describedAs("Ranges are non disjoint/ordered")
         .isFalse();
   }
@@ -426,7 +426,7 @@ public class TestVectoredReadUtils extends HadoopTestBase {
     assertIsNotOrderedDisjoint(input, 100, 800);
     List<CombinedFileRange> outputList = mergeSortedRanges(
             sortRangeList(input), 1, 1001, 2500);
-    Assertions.assertThat(outputList)
+    assertThat(outputList)
             .describedAs("merged range size")
             .hasSize(1);
     CombinedFileRange output = outputList.get(0);
@@ -551,10 +551,10 @@ public class TestVectoredReadUtils extends HadoopTestBase {
         ByteBuffer::allocate);
     assertFutureCompletedSuccessfully(result);
     ByteBuffer buffer = result.get();
-    assertEquals("Size of result buffer", 100, buffer.remaining());
+    assertEquals(100, buffer.remaining(), "Size of result buffer");
     byte b = 0;
     while (buffer.remaining() > 0) {
-      assertEquals("remain = " + buffer.remaining(), b++, buffer.get());
+      assertEquals(b++, buffer.get(), "remain = " + buffer.remaining());
     }
   }
 
@@ -597,7 +597,7 @@ public class TestVectoredReadUtils extends HadoopTestBase {
             allocate);
     assertFutureCompletedSuccessfully(result);
     ByteBuffer buffer = result.get();
-    assertEquals("Size of result buffer", 100, buffer.remaining());
+    assertEquals(100, buffer.remaining(), "Size of result buffer");
     validateBuffer("buffer", buffer, 0);
 
 
@@ -639,8 +639,8 @@ public class TestVectoredReadUtils extends HadoopTestBase {
   private static void validateBuffer(String message, ByteBuffer buffer, int start) {
     byte expected = (byte) start;
     while (buffer.remaining() > 0) {
-      assertEquals(message + " remain: " + buffer.remaining(), expected,
-          buffer.get());
+      assertEquals(expected,
+          buffer.get(), message + " remain: " + buffer.remaining());
       // increment with wrapping.
       expected = (byte) (expected + 1);
     }
@@ -668,7 +668,7 @@ public class TestVectoredReadUtils extends HadoopTestBase {
     runAndValidateVectoredRead(input);
     // look up by name and validate.
     final FileRange r1 = retrieve(input, "1");
-    Assertions.assertThat(r1.getData().get().limit())
+    assertThat(r1.getData().get().limit())
         .describedAs("Data limit of %s", r1)
         .isEqualTo(0);
   }
@@ -688,7 +688,7 @@ public class TestVectoredReadUtils extends HadoopTestBase {
   }
 
   /**
-   * Mock run a vectored read and validate the results with the assertions.
+   * Mock run a vectored read and validate the results with the
    * <ol>
    *   <li> {@code ByteBufferPositionedReadable.readFully()} is invoked once per range.</li>
    *   <li> The buffers are filled with data</li>
@@ -833,7 +833,7 @@ public class TestVectoredReadUtils extends HadoopTestBase {
 
     // inlined lambda to assert the pool size
     Consumer<Integer> assertPoolSizeEquals = (size) -> {
-      Assertions.assertThat(elasticByteBufferPool.size(false))
+      assertThat(elasticByteBufferPool.size(false))
           .describedAs("Pool size")
           .isEqualTo(size);
     };
@@ -855,7 +855,7 @@ public class TestVectoredReadUtils extends HadoopTestBase {
 
     // expect the returned buffer back
     ByteBuffer b3 = vectorBuffers.getBuffer(true, 100);
-    Assertions.assertThat(b3)
+    assertThat(b3)
         .describedAs("buffer returned from a get after a previous one was returned")
         .isSameAs(b1);
     assertPoolSizeEquals.accept(0);

+ 4 - 4
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestMoreWeakReferencedElasticByteBufferPool.java

@@ -21,12 +21,12 @@ package org.apache.hadoop.io;
 import java.nio.BufferOverflowException;
 import java.nio.ByteBuffer;
 
-import org.assertj.core.api.Assertions;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 import org.apache.hadoop.test.HadoopTestBase;
 
 import static org.apache.hadoop.test.LambdaTestUtils.intercept;
+import static org.assertj.core.api.Assertions.assertThat;
 
 /**
  * Non parameterized tests for {@code WeakReferencedElasticByteBufferPool}.
@@ -87,10 +87,10 @@ public class TestMoreWeakReferencedElasticByteBufferPool
   private void assertBufferCounts(WeakReferencedElasticByteBufferPool pool,
                                   int numDirectBuffersExpected,
                                   int numHeapBuffersExpected) {
-    Assertions.assertThat(pool.getCurrentBuffersCount(true))
+    assertThat(pool.getCurrentBuffersCount(true))
             .describedAs("Number of direct buffers in pool")
             .isEqualTo(numDirectBuffersExpected);
-    Assertions.assertThat(pool.getCurrentBuffersCount(false))
+    assertThat(pool.getCurrentBuffersCount(false))
             .describedAs("Number of heap buffers in pool")
             .isEqualTo(numHeapBuffersExpected);
   }

+ 68 - 62
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestWeakReferencedElasticByteBufferPool.java

@@ -23,153 +23,157 @@ import java.util.Arrays;
 import java.util.List;
 import java.util.Random;
 
-import org.assertj.core.api.Assertions;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-
 import org.apache.hadoop.test.HadoopTestBase;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.MethodSource;
+
+import static org.assertj.core.api.Assertions.assertThat;
 
 /**
  * Unit tests for {@code WeakReferencedElasticByteBufferPool}.
  */
-@RunWith(Parameterized.class)
 public class TestWeakReferencedElasticByteBufferPool
         extends HadoopTestBase {
 
-  private final boolean isDirect;
+  private boolean isDirect;
 
-  private final String type;
+  private String type;
 
-  @Parameterized.Parameters(name = "Buffer type : {0}")
   public static List<String> params() {
     return Arrays.asList("direct", "array");
   }
 
-  public TestWeakReferencedElasticByteBufferPool(String type) {
-    this.type = type;
-    this.isDirect = !"array".equals(type);
+  public void initTestWeakReferencedElasticByteBufferPool(String pType) {
+    this.type = pType;
+    this.isDirect = !"array".equals(pType);
   }
 
-  @Test
-  public void testGetAndPutBasic() {
+  @ParameterizedTest(name = "Buffer type : {0}")
+  @MethodSource("params")
+  public void testGetAndPutBasic(String pType) {
+    initTestWeakReferencedElasticByteBufferPool(pType);
     WeakReferencedElasticByteBufferPool pool = new WeakReferencedElasticByteBufferPool();
     int bufferSize = 5;
     ByteBuffer buffer = pool.getBuffer(isDirect, bufferSize);
-    Assertions.assertThat(buffer.isDirect())
-            .describedAs("Buffered returned should be of correct type {}", type)
-            .isEqualTo(isDirect);
-    Assertions.assertThat(buffer.capacity())
-            .describedAs("Initial capacity of returned buffer from pool")
-            .isEqualTo(bufferSize);
-    Assertions.assertThat(buffer.position())
-            .describedAs("Initial position of returned buffer from pool")
-            .isEqualTo(0);
+    assertThat(buffer.isDirect())
+        .describedAs("Buffered returned should be of correct type {}", type)
+        .isEqualTo(isDirect);
+    assertThat(buffer.capacity())
+        .describedAs("Initial capacity of returned buffer from pool")
+        .isEqualTo(bufferSize);
+    assertThat(buffer.position())
+        .describedAs("Initial position of returned buffer from pool")
+        .isEqualTo(0);
 
     byte[] arr = createByteArray(bufferSize);
     buffer.put(arr, 0, arr.length);
     buffer.flip();
     validateBufferContent(buffer, arr);
-    Assertions.assertThat(buffer.position())
-            .describedAs("Buffer's position after filling bytes in it")
-            .isEqualTo(bufferSize);
+    assertThat(buffer.position())
+        .describedAs("Buffer's position after filling bytes in it")
+        .isEqualTo(bufferSize);
     // releasing buffer to the pool.
     pool.putBuffer(buffer);
-    Assertions.assertThat(buffer.position())
-            .describedAs("Position should be reset to 0 after returning buffer to the pool")
-            .isEqualTo(0);
-
+    assertThat(buffer.position())
+        .describedAs("Position should be reset to 0 after returning buffer to the pool")
+        .isEqualTo(0);
   }
 
-  @Test
-  public void testPoolingWithDifferentSizes() {
+  @ParameterizedTest(name = "Buffer type : {0}")
+  @MethodSource("params")
+  public void testPoolingWithDifferentSizes(String pType) {
+    initTestWeakReferencedElasticByteBufferPool(pType);
     WeakReferencedElasticByteBufferPool pool = new WeakReferencedElasticByteBufferPool();
     ByteBuffer buffer = pool.getBuffer(isDirect, 5);
     ByteBuffer buffer1 = pool.getBuffer(isDirect, 10);
     ByteBuffer buffer2 = pool.getBuffer(isDirect, 15);
 
-    Assertions.assertThat(pool.getCurrentBuffersCount(isDirect))
+    assertThat(pool.getCurrentBuffersCount(isDirect))
             .describedAs("Number of buffers in the pool")
             .isEqualTo(0);
 
     pool.putBuffer(buffer1);
     pool.putBuffer(buffer2);
-    Assertions.assertThat(pool.getCurrentBuffersCount(isDirect))
+    assertThat(pool.getCurrentBuffersCount(isDirect))
             .describedAs("Number of buffers in the pool")
             .isEqualTo(2);
     ByteBuffer buffer3 = pool.getBuffer(isDirect, 12);
-    Assertions.assertThat(buffer3.capacity())
+    assertThat(buffer3.capacity())
             .describedAs("Pooled buffer should have older capacity")
             .isEqualTo(15);
-    Assertions.assertThat(pool.getCurrentBuffersCount(isDirect))
+    assertThat(pool.getCurrentBuffersCount(isDirect))
             .describedAs("Number of buffers in the pool")
             .isEqualTo(1);
     pool.putBuffer(buffer);
     ByteBuffer buffer4 = pool.getBuffer(isDirect, 6);
-    Assertions.assertThat(buffer4.capacity())
+    assertThat(buffer4.capacity())
             .describedAs("Pooled buffer should have older capacity")
             .isEqualTo(10);
-    Assertions.assertThat(pool.getCurrentBuffersCount(isDirect))
+    assertThat(pool.getCurrentBuffersCount(isDirect))
             .describedAs("Number of buffers in the pool")
             .isEqualTo(1);
 
     pool.release();
-    Assertions.assertThat(pool.getCurrentBuffersCount(isDirect))
+    assertThat(pool.getCurrentBuffersCount(isDirect))
             .describedAs("Number of buffers in the pool post release")
             .isEqualTo(0);
   }
 
-  @Test
-  public void testPoolingWithDifferentInsertionTime() {
+  @ParameterizedTest(name = "Buffer type : {0}")
+  @MethodSource("params")
+  public void testPoolingWithDifferentInsertionTime(String pType) {
+    initTestWeakReferencedElasticByteBufferPool(pType);
     WeakReferencedElasticByteBufferPool pool = new WeakReferencedElasticByteBufferPool();
     ByteBuffer buffer = pool.getBuffer(isDirect, 10);
     ByteBuffer buffer1 = pool.getBuffer(isDirect, 10);
     ByteBuffer buffer2 = pool.getBuffer(isDirect, 10);
 
-    Assertions.assertThat(pool.getCurrentBuffersCount(isDirect))
+    assertThat(pool.getCurrentBuffersCount(isDirect))
             .describedAs("Number of buffers in the pool")
             .isEqualTo(0);
 
     pool.putBuffer(buffer1);
     pool.putBuffer(buffer2);
-    Assertions.assertThat(pool.getCurrentBuffersCount(isDirect))
+    assertThat(pool.getCurrentBuffersCount(isDirect))
             .describedAs("Number of buffers in the pool")
             .isEqualTo(2);
     ByteBuffer buffer3 = pool.getBuffer(isDirect, 10);
     // As buffer1 is returned to the pool before buffer2, it should
     // be returned when buffer of same size is asked again from
     // the pool. Memory references must match not just content
-    // that is why {@code Assertions.isSameAs} is used here rather
-    // than usual {@code Assertions.isEqualTo}.
-    Assertions.assertThat(buffer3)
+    // that is why {@code isSameAs} is used here rather
+    // than usual {@code isEqualTo}.
+    assertThat(buffer3)
             .describedAs("Buffers should be returned in order of their " +
                     "insertion time")
             .isSameAs(buffer1);
     pool.putBuffer(buffer);
     ByteBuffer buffer4 = pool.getBuffer(isDirect, 10);
-    Assertions.assertThat(buffer4)
+    assertThat(buffer4)
             .describedAs("Buffers should be returned in order of their " +
                     "insertion time")
             .isSameAs(buffer2);
   }
 
-  @Test
-  public void testGarbageCollection() {
+  @ParameterizedTest(name = "Buffer type : {0}")
+  @MethodSource("params")
+  public void testGarbageCollection(String pType) {
+    initTestWeakReferencedElasticByteBufferPool(pType);
     WeakReferencedElasticByteBufferPool pool = new WeakReferencedElasticByteBufferPool();
     ByteBuffer buffer = pool.getBuffer(isDirect, 5);
     ByteBuffer buffer1 = pool.getBuffer(isDirect, 10);
     ByteBuffer buffer2 = pool.getBuffer(isDirect, 15);
-    Assertions.assertThat(pool.getCurrentBuffersCount(isDirect))
+    assertThat(pool.getCurrentBuffersCount(isDirect))
             .describedAs("Number of buffers in the pool")
             .isEqualTo(0);
     pool.putBuffer(buffer1);
     pool.putBuffer(buffer2);
-    Assertions.assertThat(pool.getCurrentBuffersCount(isDirect))
+    assertThat(pool.getCurrentBuffersCount(isDirect))
             .describedAs("Number of buffers in the pool")
             .isEqualTo(2);
     // Before GC.
     ByteBuffer buffer4 = pool.getBuffer(isDirect, 12);
-    Assertions.assertThat(buffer4.capacity())
+    assertThat(buffer4.capacity())
             .describedAs("Pooled buffer should have older capacity")
             .isEqualTo(15);
     pool.putBuffer(buffer4);
@@ -179,14 +183,16 @@ public class TestWeakReferencedElasticByteBufferPool
     buffer4 = null;
     System.gc();
     ByteBuffer buffer3 = pool.getBuffer(isDirect, 12);
-    Assertions.assertThat(buffer3.capacity())
+    assertThat(buffer3.capacity())
             .describedAs("After garbage collection new buffer should be " +
                     "returned with fixed capacity")
             .isEqualTo(12);
   }
 
-  @Test
-  public void testWeakReferencesPruning() {
+  @ParameterizedTest(name = "Buffer type : {0}")
+  @MethodSource("params")
+  public void testWeakReferencesPruning(String pType) {
+    initTestWeakReferencedElasticByteBufferPool(pType);
     WeakReferencedElasticByteBufferPool pool = new WeakReferencedElasticByteBufferPool();
     ByteBuffer buffer1 = pool.getBuffer(isDirect, 5);
     ByteBuffer buffer2 = pool.getBuffer(isDirect, 10);
@@ -194,7 +200,7 @@ public class TestWeakReferencedElasticByteBufferPool
 
     pool.putBuffer(buffer2);
     pool.putBuffer(buffer3);
-    Assertions.assertThat(pool.getCurrentBuffersCount(isDirect))
+    assertThat(pool.getCurrentBuffersCount(isDirect))
             .describedAs("Number of buffers in the pool")
             .isEqualTo(2);
 
@@ -204,10 +210,10 @@ public class TestWeakReferencedElasticByteBufferPool
     ByteBuffer buffer4 = pool.getBuffer(isDirect, 10);
     // Number of buffers in the pool is 0 as one got garbage
     // collected and other got returned in above call.
-    Assertions.assertThat(pool.getCurrentBuffersCount(isDirect))
+    assertThat(pool.getCurrentBuffersCount(isDirect))
             .describedAs("Number of buffers in the pool")
             .isEqualTo(0);
-    Assertions.assertThat(buffer4.capacity())
+    assertThat(buffer4.capacity())
             .describedAs("After gc, pool should return next greater than " +
                     "available buffer")
             .isEqualTo(15);
@@ -216,10 +222,10 @@ public class TestWeakReferencedElasticByteBufferPool
 
   private void validateBufferContent(ByteBuffer buffer, byte[] arr) {
     for (int i=0; i<arr.length; i++) {
-      Assertions.assertThat(buffer.get())
-              .describedAs("Content of buffer at index {} should match " +
-                      "with content of byte array", i)
-              .isEqualTo(arr[i]);
+      assertThat(buffer.get())
+          .describedAs("Content of buffer at index {} should match " +
+          "with content of byte array", i)
+          .isEqualTo(arr[i]);
     }
   }
 

+ 16 - 15
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/HadoopTestBase.java

@@ -17,14 +17,15 @@
  */
 package org.apache.hadoop.test;
 
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Timeout;
+import org.junit.jupiter.api.extension.RegisterExtension;
+
 import java.util.concurrent.TimeUnit;
 
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Rule;
-import org.junit.rules.TestName;
-import org.junit.rules.Timeout;
+import static org.apache.hadoop.test.HadoopTestBase.TEST_DEFAULT_TIMEOUT_VALUE;
 
 /**
  * A base class for JUnit4 tests that sets a default timeout for all tests
@@ -33,7 +34,8 @@ import org.junit.rules.Timeout;
  * Threads are named to the method being executed, for ease of diagnostics
  * in logs and thread dumps.
  */
-public abstract class HadoopTestBase extends Assert {
+@Timeout(value = TEST_DEFAULT_TIMEOUT_VALUE, unit = TimeUnit.MILLISECONDS)
+public abstract class HadoopTestBase extends Assertions {
 
   /**
    * System property name to set the test timeout: {@value}.
@@ -51,8 +53,7 @@ public abstract class HadoopTestBase extends Assert {
   /**
    * The JUnit rule that sets the default timeout for tests.
    */
-  @Rule
-  public Timeout defaultTimeout = retrieveTestTimeout();
+  private int defaultTimeout = retrieveTestTimeout();
 
   /**
    * Retrieve the test timeout from the system property
@@ -61,7 +62,7 @@ public abstract class HadoopTestBase extends Assert {
    * property is not defined.
    * @return the recommended timeout for tests
    */
-  protected Timeout retrieveTestTimeout() {
+  protected int retrieveTestTimeout() {
     String propval = System.getProperty(PROPERTY_TEST_DEFAULT_TIMEOUT,
                                          Integer.toString(
                                            TEST_DEFAULT_TIMEOUT_VALUE));
@@ -72,14 +73,14 @@ public abstract class HadoopTestBase extends Assert {
       //fall back to the default value, as the property cannot be parsed
       millis = TEST_DEFAULT_TIMEOUT_VALUE;
     }
-    return new Timeout(millis, TimeUnit.MILLISECONDS);
+    return millis;
   }
 
   /**
    * The method name.
    */
-  @Rule
-  public TestName methodName = new TestName();
+  @RegisterExtension
+  private TestName methodName = new TestName();
 
   /**
    * Get the method name; defaults to the value of {@link #methodName}.
@@ -93,7 +94,7 @@ public abstract class HadoopTestBase extends Assert {
   /**
    * Static initializer names this thread "JUnit".
    */
-  @BeforeClass
+  @BeforeAll
   public static void nameTestThread() {
     Thread.currentThread().setName("JUnit");
   }
@@ -101,7 +102,7 @@ public abstract class HadoopTestBase extends Assert {
   /**
    * Before each method, the thread is renamed to match the method name.
    */
-  @Before
+  @BeforeEach
   public void nameThreadToMethod() {
     Thread.currentThread().setName("JUnit-" + getMethodName());
   }

+ 31 - 29
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestCloseableReferenceCount.java

@@ -20,20 +20,16 @@ package org.apache.hadoop.util;
 
 import java.nio.channels.ClosedChannelException;
 
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 import org.apache.hadoop.test.HadoopTestBase;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
 public class TestCloseableReferenceCount extends HadoopTestBase {
   @Test
   public void testReference() throws ClosedChannelException {
     CloseableReferenceCount clr = new CloseableReferenceCount();
     clr.reference();
-    assertEquals("Incorrect reference count", 1, clr.getReferenceCount());
+    assertEquals(1, clr.getReferenceCount(), "Incorrect reference count");
   }
 
   @Test
@@ -41,9 +37,9 @@ public class TestCloseableReferenceCount extends HadoopTestBase {
     CloseableReferenceCount clr = new CloseableReferenceCount();
     clr.reference();
     clr.reference();
-    assertFalse("New reference count should not equal STATUS_CLOSED_MASK",
-        clr.unreference());
-    assertEquals("Incorrect reference count", 1, clr.getReferenceCount());
+    assertFalse(clr.unreference(),
+        "New reference count should not equal STATUS_CLOSED_MASK");
+    assertEquals(1, clr.getReferenceCount(), "Incorrect reference count");
   }
 
   @Test
@@ -52,40 +48,46 @@ public class TestCloseableReferenceCount extends HadoopTestBase {
     clr.reference();
     clr.reference();
     clr.unreferenceCheckClosed();
-    assertEquals("Incorrect reference count", 1, clr.getReferenceCount());
+    assertEquals(1, clr.getReferenceCount(), "Incorrect reference count");
   }
 
   @Test
   public void testSetClosed() throws ClosedChannelException {
     CloseableReferenceCount clr = new CloseableReferenceCount();
-    assertTrue("Reference count should be open", clr.isOpen());
+    assertTrue(clr.isOpen(), "Reference count should be open");
     clr.setClosed();
-    assertFalse("Reference count should be closed", clr.isOpen());
+    assertFalse(clr.isOpen(), "Reference count should be closed");
   }
 
-  @Test(expected = ClosedChannelException.class)
+  @Test
   public void testReferenceClosedReference() throws ClosedChannelException {
-    CloseableReferenceCount clr = new CloseableReferenceCount();
-    clr.setClosed();
-    assertFalse("Reference count should be closed", clr.isOpen());
-    clr.reference();
+    assertThrows(ClosedChannelException.class, () -> {
+      CloseableReferenceCount clr = new CloseableReferenceCount();
+      clr.setClosed();
+      assertFalse(clr.isOpen(), "Reference count should be closed");
+      clr.reference();
+    });
   }
 
-  @Test(expected = ClosedChannelException.class)
+  @Test
   public void testUnreferenceClosedReference() throws ClosedChannelException {
-    CloseableReferenceCount clr = new CloseableReferenceCount();
-    clr.reference();
-    clr.setClosed();
-    assertFalse("Reference count should be closed", clr.isOpen());
-    clr.unreferenceCheckClosed();
+    assertThrows(ClosedChannelException.class, () -> {
+      CloseableReferenceCount clr = new CloseableReferenceCount();
+      clr.reference();
+      clr.setClosed();
+      assertFalse(clr.isOpen(), "Reference count should be closed");
+      clr.unreferenceCheckClosed();
+    });
   }
 
-  @Test(expected = ClosedChannelException.class)
+  @Test
   public void testDoubleClose() throws ClosedChannelException {
-    CloseableReferenceCount clr = new CloseableReferenceCount();
-    assertTrue("Reference count should be open", clr.isOpen());
-    clr.setClosed();
-    assertFalse("Reference count should be closed", clr.isOpen());
-    clr.setClosed();
+    assertThrows(ClosedChannelException.class, () -> {
+      CloseableReferenceCount clr = new CloseableReferenceCount();
+      assertTrue(clr.isOpen(), "Reference count should be open");
+      clr.setClosed();
+      assertFalse(clr.isOpen(), "Reference count should be closed");
+      clr.setClosed();
+    });
   }
 }

+ 13 - 18
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestIntrusiveCollection.java

@@ -30,15 +30,11 @@ import java.util.HashMap;
 import java.util.Iterator;
 import java.util.Map;
 
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 import org.apache.hadoop.test.HadoopTestBase;
 import org.apache.hadoop.util.IntrusiveCollection.Element;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
 public class TestIntrusiveCollection extends HadoopTestBase {
   static class SimpleElement implements IntrusiveCollection.Element {
     private Map<IntrusiveCollection<? extends Element>, Element>
@@ -111,10 +107,10 @@ public class TestIntrusiveCollection extends HadoopTestBase {
     SimpleElement element = new SimpleElement();
     intrusiveCollection.add(element);
 
-    assertFalse("Collection should not be empty",
-        intrusiveCollection.isEmpty());
-    assertTrue("Collection should contain added element",
-        intrusiveCollection.contains(element));
+    assertFalse(intrusiveCollection.isEmpty(),
+        "Collection should not be empty");
+    assertTrue(intrusiveCollection.contains(element),
+        "Collection should contain added element");
   }
 
   /**
@@ -135,9 +131,9 @@ public class TestIntrusiveCollection extends HadoopTestBase {
 
     intrusiveCollection.remove(element);
 
-    assertTrue("Collection should be empty", intrusiveCollection.isEmpty());
-    assertFalse("Collection should not contain removed element",
-        intrusiveCollection.contains(element));
+    assertTrue(intrusiveCollection.isEmpty(), "Collection should be empty");
+    assertFalse(intrusiveCollection.contains(element),
+        "Collection should not contain removed element");
   }
 
   /**
@@ -159,7 +155,7 @@ public class TestIntrusiveCollection extends HadoopTestBase {
 
     intrusiveCollection.clear();
 
-    assertTrue("Collection should be empty", intrusiveCollection.isEmpty());
+    assertTrue(intrusiveCollection.isEmpty(), "Collection should be empty");
   }
 
   /**
@@ -184,10 +180,9 @@ public class TestIntrusiveCollection extends HadoopTestBase {
 
     Iterator<SimpleElement> iterator = intrusiveCollection.iterator();
 
-    assertEquals("First element returned is incorrect", elem1, iterator.next());
-    assertEquals("Second element returned is incorrect", elem2,
-        iterator.next());
-    assertEquals("Third element returned is incorrect", elem3, iterator.next());
-    assertFalse("Iterator should not have next element", iterator.hasNext());
+    assertEquals(elem1, iterator.next(), "First element returned is incorrect");
+    assertEquals(elem2, iterator.next(), "Second element returned is incorrect");
+    assertEquals(elem3, iterator.next(), "Third element returned is incorrect");
+    assertFalse(iterator.hasNext(), "Iterator should not have next element");
   }
 }

+ 6 - 8
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestJsonSerialization.java

@@ -25,7 +25,7 @@ import java.io.Serializable;
 import java.util.Objects;
 
 import com.fasterxml.jackson.core.JsonParseException;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
@@ -106,7 +106,7 @@ public class TestJsonSerialization extends HadoopTestBase {
   public void testStringRoundTrip() throws Throwable {
     String wire = serDeser.toJson(source);
     KeyVal unmarshalled = serDeser.fromJson(wire);
-    assertEquals("Failed to unmarshall: " + wire, source, unmarshalled);
+    assertEquals(source, unmarshalled, "Failed to unmarshall: " + wire);
   }
 
   @Test
@@ -164,12 +164,10 @@ public class TestJsonSerialization extends HadoopTestBase {
     LocalFileSystem fs = FileSystem.getLocal(new Configuration());
     try {
       serDeser.save(fs, tempPath, source, false);
-      assertEquals("JSON loaded with load(fs, path)",
-          source,
-          serDeser.load(fs, tempPath));
-      assertEquals("JSON loaded with load(fs, path, status)",
-          source,
-          serDeser.load(fs, tempPath, fs.getFileStatus(tempPath)));
+      assertEquals(source, serDeser.load(fs, tempPath),
+          "JSON loaded with load(fs, path)");
+      assertEquals(source, serDeser.load(fs, tempPath, fs.getFileStatus(tempPath)),
+          "JSON loaded with load(fs, path, status)");
     } finally {
       fs.delete(tempPath, false);
     }

+ 13 - 14
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestLimitInputStream.java

@@ -22,13 +22,10 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.util.Random;
 
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 import org.apache.hadoop.test.HadoopTestBase;
 
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
-
 public class TestLimitInputStream extends HadoopTestBase {
   static class RandomInputStream extends InputStream {
     private Random rn = new Random(0);
@@ -41,22 +38,24 @@ public class TestLimitInputStream extends HadoopTestBase {
   public void testRead() throws IOException {
     try (LimitInputStream limitInputStream =
       new LimitInputStream(new RandomInputStream(), 0)) {
-      assertEquals("Reading byte after reaching limit should return -1", -1,
-          limitInputStream.read());
+      assertEquals(-1, limitInputStream.read(),
+          "Reading byte after reaching limit should return -1");
     }
     try (LimitInputStream limitInputStream =
       new LimitInputStream(new RandomInputStream(), 4)) {
-      assertEquals("Incorrect byte returned", new Random(0).nextInt(),
-          limitInputStream.read());
+      assertEquals(new Random(0).nextInt(),
+          limitInputStream.read(), "Incorrect byte returned");
     }
   }
 
-  @Test(expected = IOException.class)
+  @Test
   public void testResetWithoutMark() throws IOException {
-    try (LimitInputStream limitInputStream =
-      new LimitInputStream(new RandomInputStream(), 128)) {
-      limitInputStream.reset();
-    }
+    assertThrows(IOException.class, () -> {
+      try (LimitInputStream limitInputStream =
+          new LimitInputStream(new RandomInputStream(), 128)) {
+        limitInputStream.reset();
+      }
+    });
   }
 
   @Test
@@ -68,7 +67,7 @@ public class TestLimitInputStream extends HadoopTestBase {
       byte[] expected = { (byte) r.nextInt(), (byte) r.nextInt(),
                           (byte) r.nextInt(), (byte) r.nextInt() };
       limitInputStream.read(data, 0, 4);
-      assertArrayEquals("Incorrect bytes returned", expected, data);
+      assertArrayEquals(expected, data, "Incorrect bytes returned");
     }
   }
 }

+ 15 - 18
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestUTF8ByteArrayUtils.java

@@ -18,40 +18,37 @@
 
 package org.apache.hadoop.util;
 
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 import org.apache.hadoop.test.HadoopTestBase;
 
-import static org.junit.Assert.assertEquals;
-
 public class TestUTF8ByteArrayUtils extends HadoopTestBase {
   @Test
   public void testFindByte() {
     byte[] data = "Hello, world!".getBytes();
-    assertEquals("Character 'a' does not exist in string", -1,
-        UTF8ByteArrayUtils.findByte(data, 0, data.length, (byte) 'a'));
-    assertEquals("Did not find first occurrence of character 'o'", 4,
-        UTF8ByteArrayUtils.findByte(data, 0, data.length, (byte) 'o'));
+    assertEquals(-1, UTF8ByteArrayUtils.findByte(data, 0, data.length, (byte) 'a'),
+        "Character 'a' does not exist in string");
+    assertEquals(4, UTF8ByteArrayUtils.findByte(data, 0, data.length, (byte) 'o'),
+        "Did not find first occurrence of character 'o'");
   }
 
   @Test
   public void testFindBytes() {
     byte[] data = "Hello, world!".getBytes();
-    assertEquals("Did not find first occurrence of pattern 'ello'", 1,
-        UTF8ByteArrayUtils.findBytes(data, 0, data.length, "ello".getBytes()));
-    assertEquals(
-        "Substring starting at position 2 does not contain pattern 'ello'", -1,
-        UTF8ByteArrayUtils.findBytes(data, 2, data.length, "ello".getBytes()));
+    assertEquals(1, UTF8ByteArrayUtils.findBytes(data, 0, data.length, "ello".getBytes()),
+        "Did not find first occurrence of pattern 'ello'");
+    assertEquals(-1, UTF8ByteArrayUtils.findBytes(data, 2, data.length, "ello".getBytes()),
+        "Substring starting at position 2 does not contain pattern 'ello'");
   }
 
   @Test
   public void testFindNthByte() {
     byte[] data = "Hello, world!".getBytes();
-    assertEquals("Did not find 2nd occurrence of character 'l'", 3,
-        UTF8ByteArrayUtils.findNthByte(data, 0, data.length, (byte) 'l', 2));
-    assertEquals("4th occurrence of character 'l' does not exist", -1,
-        UTF8ByteArrayUtils.findNthByte(data, 0, data.length, (byte) 'l', 4));
-    assertEquals("Did not find 3rd occurrence of character 'l'", 10,
-        UTF8ByteArrayUtils.findNthByte(data, (byte) 'l', 3));
+    assertEquals(3, UTF8ByteArrayUtils.findNthByte(data, 0, data.length, (byte) 'l', 2),
+        "Did not find 2nd occurrence of character 'l'");
+    assertEquals(-1, UTF8ByteArrayUtils.findNthByte(data, 0, data.length, (byte) 'l', 4),
+        "4th occurrence of character 'l' does not exist");
+    assertEquals(10, UTF8ByteArrayUtils.findNthByte(data, (byte) 'l', 3),
+        "Did not find 3rd occurrence of character 'l'");
   }
 }

+ 75 - 58
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/functional/TestTaskPool.java

@@ -32,11 +32,10 @@ import java.util.stream.Collectors;
 import java.util.stream.IntStream;
 
 import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.MethodSource;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -48,7 +47,6 @@ import static org.apache.hadoop.test.LambdaTestUtils.intercept;
  * Test Task Pool class.
  * This is pulled straight out of the S3A version.
  */
-@RunWith(Parameterized.class)
 public class TestTaskPool extends HadoopTestBase {
 
   private static final Logger LOG =
@@ -58,7 +56,7 @@ public class TestTaskPool extends HadoopTestBase {
 
   private static final int FAILPOINT = 8;
 
-  private final int numThreads;
+  private int numThreads;
 
   /**
    * Thread pool for task execution.
@@ -89,7 +87,6 @@ public class TestTaskPool extends HadoopTestBase {
    * more checks on single thread than parallel ops.
    * @return a list of parameter tuples.
    */
-  @Parameterized.Parameters(name = "threads={0}")
   public static Collection<Object[]> params() {
     return Arrays.asList(new Object[][]{
         {0},
@@ -104,10 +101,10 @@ public class TestTaskPool extends HadoopTestBase {
 
   /**
    * Construct the parameterized test.
-   * @param numThreads number of threads
+   * @param pNumThreads number of threads
    */
-  public TestTaskPool(int numThreads) {
-    this.numThreads = numThreads;
+  public void initTestTaskPool(int pNumThreads) {
+    this.numThreads = pNumThreads;
   }
 
   /**
@@ -118,7 +115,7 @@ public class TestTaskPool extends HadoopTestBase {
     return numThreads > 1;
   }
 
-  @Before
+  @BeforeEach
   public void setup() {
     items = IntStream.rangeClosed(1, ITEM_COUNT)
         .mapToObj(i -> new Item(i,
@@ -138,7 +135,7 @@ public class TestTaskPool extends HadoopTestBase {
 
   }
 
-  @After
+  @AfterEach
   public void teardown() {
     if (threadPool != null) {
       threadPool.shutdown();
@@ -166,13 +163,13 @@ public class TestTaskPool extends HadoopTestBase {
   private void assertRun(TaskPool.Builder<Item> builder,
       CounterTask task) throws IOException {
     boolean b = builder.run(task);
-    assertTrue("Run of " + task + " failed", b);
+    assertTrue(b, "Run of " + task + " failed");
   }
 
   private void assertFailed(TaskPool.Builder<Item> builder,
       CounterTask task) throws IOException {
     boolean b = builder.run(task);
-    assertFalse("Run of " + task + " unexpectedly succeeded", b);
+    assertFalse(b, "Run of " + task + " unexpectedly succeeded");
   }
 
   private String itemsToString() {
@@ -180,22 +177,28 @@ public class TestTaskPool extends HadoopTestBase {
         .collect(Collectors.joining("\n")) + "]";
   }
 
-  @Test
-  public void testSimpleInvocation() throws Throwable {
+  @ParameterizedTest(name = "threads={0}")
+  @MethodSource("params")
+  public void testSimpleInvocation(int pNumThreads) throws Throwable {
+    initTestTaskPool(pNumThreads);
     CounterTask t = new CounterTask("simple", 0, Item::commit);
     assertRun(builder(), t);
     t.assertInvoked("", ITEM_COUNT);
   }
 
-  @Test
-  public void testFailNoStoppingSuppressed() throws Throwable {
+  @ParameterizedTest(name = "threads={0}")
+  @MethodSource("params")
+  public void testFailNoStoppingSuppressed(int pNumThreads) throws Throwable {
+    initTestTaskPool(pNumThreads);
     assertFailed(builder().suppressExceptions(), failingTask);
     failingTask.assertInvoked("Continued through operations", ITEM_COUNT);
     items.forEach(Item::assertCommittedOrFailed);
   }
 
-  @Test
-  public void testFailFastSuppressed() throws Throwable {
+  @ParameterizedTest(name = "threads={0}")
+  @MethodSource("params")
+  public void testFailFastSuppressed(int pNumThreads) throws Throwable {
+    initTestTaskPool(pNumThreads);
     assertFailed(builder()
             .suppressExceptions()
             .stopOnFailure(),
@@ -207,8 +210,10 @@ public class TestTaskPool extends HadoopTestBase {
     }
   }
 
-  @Test
-  public void testFailedCallAbortSuppressed() throws Throwable {
+  @ParameterizedTest(name = "threads={0}")
+  @MethodSource("params")
+  public void testFailedCallAbortSuppressed(int pNumThreads) throws Throwable {
+    initTestTaskPool(pNumThreads);
     assertFailed(builder()
             .stopOnFailure()
             .suppressExceptions()
@@ -221,12 +226,14 @@ public class TestTaskPool extends HadoopTestBase {
       items.stream().filter(i -> !i.committed)
           .map(Item::assertAborted);
       items.stream().filter(i -> i.committed)
-          .forEach(i -> assertFalse(i.toString(), i.aborted));
+          .forEach(i -> assertFalse(i.aborted, i.toString()));
     }
   }
 
-  @Test
-  public void testFailedCalledWhenNotStoppingSuppressed() throws Throwable {
+  @ParameterizedTest(name = "threads={0}")
+  @MethodSource("params")
+  public void testFailedCalledWhenNotStoppingSuppressed(int pNumThreads) throws Throwable {
+    initTestTaskPool(pNumThreads);
     assertFailed(builder()
             .suppressExceptions()
             .onFailure(failures),
@@ -236,8 +243,10 @@ public class TestTaskPool extends HadoopTestBase {
     failures.assertInvoked("failure event", 1);
   }
 
-  @Test
-  public void testFailFastCallRevertSuppressed() throws Throwable {
+  @ParameterizedTest(name = "threads={0}")
+  @MethodSource("params")
+  public void testFailFastCallRevertSuppressed(int pNumThreads) throws Throwable {
+    initTestTaskPool(pNumThreads);
     assertFailed(builder()
             .stopOnFailure()
             .revertWith(reverter)
@@ -264,8 +273,10 @@ public class TestTaskPool extends HadoopTestBase {
     failures.assertInvoked("failure event", 1);
   }
 
-  @Test
-  public void testFailSlowCallRevertSuppressed() throws Throwable {
+  @ParameterizedTest(name = "threads={0}")
+  @MethodSource("params")
+  public void testFailSlowCallRevertSuppressed(int pNumThreads) throws Throwable {
+    initTestTaskPool(pNumThreads);
     assertFailed(builder()
             .suppressExceptions()
             .revertWith(reverter)
@@ -287,8 +298,10 @@ public class TestTaskPool extends HadoopTestBase {
     failures.assertInvoked("failure event", 1);
   }
 
-  @Test
-  public void testFailFastExceptions() throws Throwable {
+  @ParameterizedTest(name = "threads={0}")
+  @MethodSource("params")
+  public void testFailFastExceptions(int pNumThreads) throws Throwable {
+    initTestTaskPool(pNumThreads);
     intercept(IOException.class,
         () -> builder()
             .stopOnFailure()
@@ -300,8 +313,10 @@ public class TestTaskPool extends HadoopTestBase {
     }
   }
 
-  @Test
-  public void testFailSlowExceptions() throws Throwable {
+  @ParameterizedTest(name = "threads={0}")
+  @MethodSource("params")
+  public void testFailSlowExceptions(int pNumThreads) throws Throwable {
+    initTestTaskPool(pNumThreads);
     intercept(IOException.class,
         () -> builder()
             .run(failingTask));
@@ -309,8 +324,10 @@ public class TestTaskPool extends HadoopTestBase {
     items.forEach(Item::assertCommittedOrFailed);
   }
 
-  @Test
-  public void testFailFastExceptionsWithAbortFailure() throws Throwable {
+  @ParameterizedTest(name = "threads={0}")
+  @MethodSource("params")
+  public void testFailFastExceptionsWithAbortFailure(int pNumThreads) throws Throwable {
+    initTestTaskPool(pNumThreads);
     CounterTask failFirst = new CounterTask("task", 1, Item::commit);
     CounterTask a = new CounterTask("aborter", 1, Item::abort);
     intercept(IOException.class,
@@ -324,8 +341,10 @@ public class TestTaskPool extends HadoopTestBase {
     }
   }
 
-  @Test
-  public void testFailFastExceptionsWithAbortFailureStopped() throws Throwable {
+  @ParameterizedTest(name = "threads={0}")
+  @MethodSource("params")
+  public void testFailFastExceptionsWithAbortFailureStopped(int pNumThreads) throws Throwable {
+    initTestTaskPool(pNumThreads);
     CounterTask failFirst = new CounterTask("task", 1, Item::commit);
     CounterTask a = new CounterTask("aborter", 1, Item::abort);
     intercept(IOException.class,
@@ -345,8 +364,10 @@ public class TestTaskPool extends HadoopTestBase {
    * The actual ID of the last task has to be picke dup from the
    * failure callback, as in the pool it may be one of any.
    */
-  @Test
-  public void testRevertAllSuppressed() throws Throwable {
+  @ParameterizedTest(name = "threads={0}")
+  @MethodSource("params")
+  public void testRevertAllSuppressed(int pNumThreads) throws Throwable {
+    initTestTaskPool(pNumThreads);
     CounterTask failLast = new CounterTask("task", ITEM_COUNT, Item::commit);
 
     assertFailed(builder()
@@ -417,30 +438,27 @@ public class TestTaskPool extends HadoopTestBase {
     }
 
     public Item assertCommitted() {
-      assertTrue(toString() + " was not committed in\n"
-              + itemsToString(),
-          committed);
+      assertTrue(committed, toString() + " was not committed in\n"
+          + itemsToString());
       return this;
     }
 
     public Item assertCommittedOrFailed() {
-      assertTrue(toString() + " was not committed nor failed in\n"
-              + itemsToString(),
-          committed || failed);
+      assertTrue(committed || failed,
+          toString() + " was not committed nor failed in\n"
+          + itemsToString());
       return this;
     }
 
     public Item assertAborted() {
-      assertTrue(toString() + " was not aborted in\n"
-              + itemsToString(),
-          aborted);
+      assertTrue(aborted, toString() + " was not aborted in\n"
+          + itemsToString());
       return this;
     }
 
     public Item assertReverted() {
-      assertTrue(toString() + " was not reverted in\n"
-              + itemsToString(),
-          reverted);
+      assertTrue(reverted, toString() + " was not reverted in\n"
+          + itemsToString());
       return this;
     }
 
@@ -519,16 +537,15 @@ public class TestTaskPool extends HadoopTestBase {
     }
 
     void assertInvoked(String text, int expected) {
-      assertEquals(toString() + ": " + text, expected, getCount());
+      assertEquals(expected, getCount(), toString() + ": " + text);
     }
 
     void assertInvokedAtLeast(String text, int expected) {
       int actual = getCount();
-      assertTrue(toString() + ": " + text
-              + "-expected " + expected
-              + " invocations, but got " + actual
-              + " in " + itemsToString(),
-          expected <= actual);
+      assertTrue(expected <= actual, toString() + ": " + text
+          + "-expected " + expected
+          + " invocations, but got " + actual
+          + " in " + itemsToString());
     }
 
     @Override

+ 5 - 5
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSFileSystemContract.java

@@ -29,9 +29,9 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.SafeMode;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 import static org.apache.hadoop.fs.CommonPathCapabilities.LEASE_RECOVERABLE;
 import static org.assertj.core.api.Assertions.assertThat;
@@ -41,7 +41,7 @@ public class TestHDFSFileSystemContract extends FileSystemContractBaseTest {
   private MiniDFSCluster cluster;
   private String defaultWorkingDirectory;
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     Configuration conf = new HdfsConfiguration();
     conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY,
@@ -54,7 +54,7 @@ public class TestHDFSFileSystemContract extends FileSystemContractBaseTest {
            UserGroupInformation.getCurrentUser().getShortUserName();
   }
   
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     super.tearDown();
     if (cluster != null) {

+ 15 - 15
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultipleNNPortQOP.java

@@ -66,21 +66,21 @@ public class TestMultipleNNPortQOP extends SaslDataTransferTestCase {
     clusterConf = createSecureConfig(
         "authentication,integrity,privacy");
     clusterConf.set(DFS_NAMENODE_RPC_ADDRESS_AUXILIARY_KEY,
-        "12000,12100,12200");
+        "12001,12101,12201");
     // explicitly setting service rpc for datanode. This because
     // DFSUtil.getNNServiceRpcAddressesForCluster looks up client facing port
     // and service port at the same time, and if no setting for service
     // rpc, it would return client port, in this case, it will be the
     // auxiliary port for data node. Which is not what auxiliary is for.
     // setting service rpc port to avoid this.
-    clusterConf.set(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, "localhost:9020");
+    clusterConf.set(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, "localhost:9021");
     clusterConf.set(
         CommonConfigurationKeys.HADOOP_SECURITY_SASL_PROPS_RESOLVER_CLASS,
         "org.apache.hadoop.security.IngressPortBasedResolver");
-    clusterConf.set("ingress.port.sasl.configured.ports", "12000,12100,12200");
-    clusterConf.set("ingress.port.sasl.prop.12000", "authentication");
-    clusterConf.set("ingress.port.sasl.prop.12100", "integrity");
-    clusterConf.set("ingress.port.sasl.prop.12200", "privacy");
+    clusterConf.set("ingress.port.sasl.configured.ports", "12001,12101,12201");
+    clusterConf.set("ingress.port.sasl.prop.12001", "authentication");
+    clusterConf.set("ingress.port.sasl.prop.12101", "integrity");
+    clusterConf.set("ingress.port.sasl.prop.12201", "privacy");
     clusterConf.setBoolean(DFS_NAMENODE_SEND_QOP_ENABLED, true);
   }
 
@@ -106,11 +106,11 @@ public class TestMultipleNNPortQOP extends SaslDataTransferTestCase {
 
       URI currentURI = cluster.getURI();
       URI uriAuthPort = new URI(currentURI.getScheme() + "://" +
-              currentURI.getHost() + ":12000");
+              currentURI.getHost() + ":12001");
       URI uriIntegrityPort = new URI(currentURI.getScheme() + "://" +
-              currentURI.getHost() + ":12100");
+              currentURI.getHost() + ":12101");
       URI uriPrivacyPort = new URI(currentURI.getScheme() +
-          "://" + currentURI.getHost() + ":12200");
+          "://" + currentURI.getHost() + ":12201");
 
       // If connecting to primary port, block token should not include
       // handshake secret
@@ -183,11 +183,11 @@ public class TestMultipleNNPortQOP extends SaslDataTransferTestCase {
 
       URI currentURI = cluster.getURI();
       URI uriAuthPort = new URI(currentURI.getScheme() +
-          "://" + currentURI.getHost() + ":12000");
+          "://" + currentURI.getHost() + ":12001");
       URI uriIntegrityPort = new URI(currentURI.getScheme() +
-          "://" + currentURI.getHost() + ":12100");
+          "://" + currentURI.getHost() + ":12101");
       URI uriPrivacyPort = new URI(currentURI.getScheme() +
-          "://" + currentURI.getHost() + ":12200");
+          "://" + currentURI.getHost() + ":12201");
 
       clientConf.set(HADOOP_RPC_PROTECTION, "privacy");
       FileSystem fsPrivacy = FileSystem.get(uriPrivacyPort, clientConf);
@@ -243,13 +243,13 @@ public class TestMultipleNNPortQOP extends SaslDataTransferTestCase {
       URI currentURI = cluster.getURI();
       URI uriAuthPort =
           new URI(currentURI.getScheme() + "://" +
-              currentURI.getHost() + ":12000");
+              currentURI.getHost() + ":12001");
       URI uriIntegrityPort =
           new URI(currentURI.getScheme() + "://" +
-              currentURI.getHost() + ":12100");
+              currentURI.getHost() + ":12101");
       URI uriPrivacyPort =
           new URI(currentURI.getScheme() + "://" +
-              currentURI.getHost() + ":12200");
+              currentURI.getHost() + ":12201");
 
       clientConf.set(HADOOP_RPC_PROTECTION, "privacy");
       FileSystem fsPrivacy = FileSystem.get(uriPrivacyPort, clientConf);

+ 7 - 7
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestViewDistributedFileSystemContract.java

@@ -26,10 +26,10 @@ import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.LambdaTestUtils;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 
 import java.io.File;
 import java.io.IOException;
@@ -41,7 +41,7 @@ public class TestViewDistributedFileSystemContract
   private static String defaultWorkingDirectory;
   private static Configuration conf = new HdfsConfiguration();
 
-  @BeforeClass
+  @BeforeAll
   public static void init() throws IOException {
     final File basedir = GenericTestUtils.getRandomizedTestDir();
     conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY,
@@ -53,7 +53,7 @@ public class TestViewDistributedFileSystemContract
         "/user/" + UserGroupInformation.getCurrentUser().getShortUserName();
   }
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     conf.set("fs.hdfs.impl", ViewDistributedFileSystem.class.getName());
     URI defaultFSURI =
@@ -65,7 +65,7 @@ public class TestViewDistributedFileSystemContract
     fs = FileSystem.get(conf);
   }
 
-  @AfterClass
+  @AfterAll
   public static void tearDownAfter() throws Exception {
     if (cluster != null) {
       cluster.shutdown();

+ 15 - 12
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java

@@ -18,7 +18,11 @@
 
 package org.apache.hadoop.hdfs.web;
 
-import static org.junit.Assert.*;
+import static org.junit.jupiter.api.Assertions.assertArrayEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
 
 import java.io.BufferedReader;
 import java.io.FileNotFoundException;
@@ -53,9 +57,8 @@ import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 public class TestWebHdfsFileSystemContract extends FileSystemContractBaseTest {
   private static final Configuration conf = new Configuration();
@@ -77,7 +80,7 @@ public class TestWebHdfsFileSystemContract extends FileSystemContractBaseTest {
     }
   }
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     //get file system as a non-superuser
     final UserGroupInformation current = UserGroupInformation.getCurrentUser();
@@ -154,13 +157,13 @@ public class TestWebHdfsFileSystemContract extends FileSystemContractBaseTest {
       String names2[] = computed[i].getNames();
       Arrays.sort(names1);
       Arrays.sort(names2);
-      Assert.assertArrayEquals("Names differ", names1, names2);
+      assertArrayEquals(names1, names2, "Names differ");
       // Check topology
       String topos1[] = expected[i].getTopologyPaths();
       String topos2[] = computed[i].getTopologyPaths();
       Arrays.sort(topos1);
       Arrays.sort(topos2);
-      Assert.assertArrayEquals("Topology differs", topos1, topos2);
+      assertArrayEquals(topos1, topos2, "Topology differs");
     }
   }
 
@@ -243,8 +246,8 @@ public class TestWebHdfsFileSystemContract extends FileSystemContractBaseTest {
       in.close();
   
       for (int i = 0; i < buf.length; i++) {
-        assertEquals("Position " + i + ", offset=" + offset + ", length=" + len,
-            mydata[i + offset], buf[i]);
+        assertEquals(mydata[i + offset], buf[i],
+            "Position " + i + ", offset=" + offset + ", length=" + len);
       }
     }
 
@@ -258,8 +261,8 @@ public class TestWebHdfsFileSystemContract extends FileSystemContractBaseTest {
       in.close();
   
       for (int i = 0; i < buf.length; i++) {
-        assertEquals("Position " + i + ", offset=" + offset + ", length=" + len,
-            mydata[i + offset], buf[i]);
+        assertEquals(mydata[i + offset], buf[i],
+            "Position " + i + ", offset=" + offset + ", length=" + len);
       }
     }
   }
@@ -272,7 +275,7 @@ public class TestWebHdfsFileSystemContract extends FileSystemContractBaseTest {
     final WebHdfsFileSystem webhdfs = (WebHdfsFileSystem)fs;
     final URL url = webhdfs.toUrl(GetOpParam.Op.NULL, root);
     WebHdfsFileSystem.LOG.info("null url=" + url);
-    Assert.assertTrue(url.toString().contains("v1"));
+    assertTrue(url.toString().contains("v1"));
 
     //test root permission
     final FileStatus status = fs.getFileStatus(root);

+ 21 - 20
hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3AFileSystemContract.java

@@ -21,11 +21,10 @@ package org.apache.hadoop.fs.s3a;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 
-import org.assertj.core.api.Assertions;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TestName;
+import org.apache.hadoop.test.TestName;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.extension.RegisterExtension;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -39,8 +38,10 @@ import static org.apache.hadoop.fs.s3a.S3ATestUtils.isCreatePerformanceEnabled;
 import static org.apache.hadoop.fs.s3a.S3ATestUtils.setPerformanceFlags;
 import static org.apache.hadoop.fs.s3a.S3ATestUtils.skipIfAnalyticsAcceleratorEnabled;
 import static org.apache.hadoop.test.LambdaTestUtils.intercept;
-import static org.junit.Assume.*;
-import static org.junit.Assert.*;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assumptions.assumeTrue;
 
 /**
  *  Tests a live S3 system. If your keys and bucket aren't specified, all tests
@@ -53,8 +54,8 @@ public class ITestS3AFileSystemContract extends FileSystemContractBaseTest {
 
   private Path basePath;
 
-  @Rule
-  public TestName methodName = new TestName();
+  @RegisterExtension
+  private TestName methodName = new TestName();
 
   private void nameThread() {
     Thread.currentThread().setName("JUnit-" + methodName.getMethodName());
@@ -65,7 +66,7 @@ public class ITestS3AFileSystemContract extends FileSystemContractBaseTest {
     return S3ATestConstants.S3A_TEST_TIMEOUT;
   }
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     nameThread();
     Configuration conf = setPerformanceFlags(
@@ -73,7 +74,7 @@ public class ITestS3AFileSystemContract extends FileSystemContractBaseTest {
         "");
 
     fs = S3ATestUtils.createTestFileSystem(conf);
-    assumeNotNull(fs);
+    assumeTrue(fs != null);
     basePath = fs.makeQualified(
         S3ATestUtils.createTestPath(new Path("s3afilesystemcontract")));
   }
@@ -100,14 +101,14 @@ public class ITestS3AFileSystemContract extends FileSystemContractBaseTest {
     Path dst = path("testRenameDirectoryAsExistingNew/newdir");
     fs.mkdirs(dst);
     rename(src, dst, true, false, true);
-    assertFalse("Nested file1 exists",
-        fs.exists(path(src + "/file1")));
-    assertFalse("Nested file2 exists",
-        fs.exists(path(src + "/subdir/file2")));
-    assertTrue("Renamed nested file1 exists",
-        fs.exists(path(dst + "/file1")));
-    assertTrue("Renamed nested exists",
-        fs.exists(path(dst + "/subdir/file2")));
+    assertFalse(fs.exists(path(src + "/file1")),
+        "Nested file1 exists");
+    assertFalse(fs.exists(path(src + "/subdir/file2")),
+        "Nested file2 exists");
+    assertTrue(fs.exists(path(dst + "/file1")),
+        "Renamed nested file1 exists");
+    assertTrue(fs.exists(path(dst + "/subdir/file2")),
+        "Renamed nested exists");
   }
 
   @Test
@@ -151,7 +152,7 @@ public class ITestS3AFileSystemContract extends FileSystemContractBaseTest {
     boolean createPerformance = isCreatePerformanceEnabled(fs);
     try {
       super.testOverwrite();
-      Assertions.assertThat(createPerformance)
+      assertThat(createPerformance)
           .describedAs("create performance enabled")
           .isFalse();
     } catch (AssertionError e) {

+ 7 - 7
hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestArnResource.java

@@ -19,8 +19,7 @@
 package org.apache.hadoop.fs.s3a;
 
 import software.amazon.awssdk.regions.Region;
-import org.assertj.core.api.Assertions;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -28,6 +27,7 @@ import org.slf4j.LoggerFactory;
 import org.apache.hadoop.test.HadoopTestBase;
 
 import static org.apache.hadoop.test.LambdaTestUtils.intercept;
+import static org.assertj.core.api.Assertions.assertThat;
 
 /**
  * Verifies the mapping of ARN declaration of resource to the associated
@@ -58,9 +58,9 @@ public class TestArnResource extends HadoopTestBase {
       String partition = testPair[1];
 
       ArnResource resource = getArnResourceFrom(partition, "s3", region, MOCK_ACCOUNT, accessPoint);
-      assertEquals("Access Point name does not match", accessPoint, resource.getName());
-      assertEquals("Account Id does not match", MOCK_ACCOUNT, resource.getOwnerAccountId());
-      assertEquals("Region does not match", region, resource.getRegion());
+      assertEquals(accessPoint, resource.getName(), "Access Point name does not match");
+      assertEquals(MOCK_ACCOUNT, resource.getOwnerAccountId(), "Account Id does not match");
+      assertEquals(region, resource.getRegion(), "Region does not match");
     }
   }
 
@@ -72,7 +72,7 @@ public class TestArnResource extends HadoopTestBase {
         "test");
     String expected = "s3-accesspoint.eu-west-1.amazonaws.com";
 
-    Assertions.assertThat(accessPoint.getEndpoint())
+    assertThat(accessPoint.getEndpoint())
         .describedAs("Endpoint has invalid format. Access Point requests will not work")
         .isEqualTo(expected);
   }
@@ -85,7 +85,7 @@ public class TestArnResource extends HadoopTestBase {
         "test");
     String expected = "s3-outposts.eu-west-1.amazonaws.com";
 
-    Assertions.assertThat(accessPoint.getEndpoint())
+    assertThat(accessPoint.getEndpoint())
         .describedAs("Endpoint has invalid format. Access Point requests will not work")
         .isEqualTo(expected);
   }

+ 31 - 32
hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestDataBlocks.java

@@ -20,37 +20,34 @@ package org.apache.hadoop.fs.s3a;
 
 import java.io.IOException;
 import java.io.InputStream;
+import java.nio.file.Path;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.Optional;
 
-import org.assertj.core.api.Assertions;
 import org.assertj.core.data.Index;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
 
 import org.apache.hadoop.fs.contract.ContractTestUtils;
 import org.apache.hadoop.fs.s3a.impl.UploadContentProviders;
 import org.apache.hadoop.fs.store.ByteBufferInputStream;
 import org.apache.hadoop.test.HadoopTestBase;
+import org.junit.jupiter.api.io.TempDir;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.MethodSource;
 
 import static java.util.Optional.empty;
 import static org.apache.hadoop.fs.s3a.Constants.FAST_UPLOAD_BUFFER_ARRAY;
 import static org.apache.hadoop.fs.s3a.Constants.FAST_UPLOAD_BUFFER_DISK;
 import static org.apache.hadoop.fs.s3a.Constants.FAST_UPLOAD_BYTEBUFFER;
 import static org.apache.hadoop.test.LambdaTestUtils.intercept;
+import static org.assertj.core.api.Assertions.assertThat;
 
 /**
  * Unit tests for {@link S3ADataBlocks}.
  * Parameterized on the buffer type.
  */
-@RunWith(Parameterized.class)
 public class TestDataBlocks extends HadoopTestBase {
 
-  @Parameterized.Parameters(name = "{0}")
   public static Collection<Object[]> params() {
     return Arrays.asList(new Object[][]{
         {FAST_UPLOAD_BUFFER_DISK},
@@ -59,16 +56,16 @@ public class TestDataBlocks extends HadoopTestBase {
     });
   }
 
-  @Rule
-  public final TemporaryFolder tempDir = new TemporaryFolder();
+  @TempDir
+  private Path tempDir;
 
   /**
    * Buffer type.
    */
-  private final String bufferType;
+  private String bufferType;
 
-  public TestDataBlocks(final String bufferType) {
-    this.bufferType = bufferType;
+  public void initTestDataBlocks(final String pBufferType) {
+    this.bufferType = pBufferType;
   }
 
   /**
@@ -80,7 +77,7 @@ public class TestDataBlocks extends HadoopTestBase {
     // this one passed in a file allocation function
     case FAST_UPLOAD_BUFFER_DISK:
       return new S3ADataBlocks.DiskBlockFactory((i, l) ->
-          tempDir.newFile("file" + i));
+          tempDir.resolve("file" + i).toFile());
     case FAST_UPLOAD_BUFFER_ARRAY:
       return new S3ADataBlocks.ArrayBlockFactory(null);
     case FAST_UPLOAD_BYTEBUFFER:
@@ -95,8 +92,10 @@ public class TestDataBlocks extends HadoopTestBase {
    * they produce.
    * There are extra assertions on the {@link ByteBufferInputStream}.
    */
-  @Test
-  public void testBlockFactoryIO() throws Throwable {
+  @ParameterizedTest(name = "BufferType : {0}")
+  @MethodSource("params")
+  public void testBlockFactoryIO(String pBufferType) throws Throwable {
+    initTestDataBlocks(pBufferType);
     try (S3ADataBlocks.BlockFactory factory = createFactory()) {
       int limit = 128;
       S3ADataBlocks.DataBlock block
@@ -107,11 +106,11 @@ public class TestDataBlocks extends HadoopTestBase {
       int bufferLen = buffer.length;
       block.write(buffer, 0, bufferLen);
       assertEquals(bufferLen, block.dataSize());
-      assertEquals("capacity in " + block,
-          limit - bufferLen, block.remainingCapacity());
-      assertTrue("hasCapacity(64) in " + block, block.hasCapacity(64));
-      assertTrue("No capacity in " + block,
-          block.hasCapacity(limit - bufferLen));
+      assertEquals(limit - bufferLen, block.remainingCapacity(),
+          "capacity in " + block);
+      assertTrue(block.hasCapacity(64), "hasCapacity(64) in " + block);
+      assertTrue(block.hasCapacity(limit - bufferLen),
+          "No capacity in " + block);
 
       // now start the write
       S3ADataBlocks.BlockUploadData blockUploadData = block.startUpload();
@@ -122,7 +121,7 @@ public class TestDataBlocks extends HadoopTestBase {
       InputStream stream = cp.newStream();
 
       assertStreamCreationCount(cp, 1);
-      Assertions.assertThat(stream.markSupported())
+      assertThat(stream.markSupported())
           .describedAs("markSupported() of %s", stream)
           .isTrue();
 
@@ -132,7 +131,7 @@ public class TestDataBlocks extends HadoopTestBase {
               : empty();
 
       bbStream.ifPresent(bb -> {
-        Assertions.assertThat(bb.hasRemaining())
+        assertThat(bb.hasRemaining())
             .describedAs("hasRemaining() in %s", bb)
             .isTrue();
       });
@@ -170,7 +169,7 @@ public class TestDataBlocks extends HadoopTestBase {
       assertAvailableValue(stream, 0);
 
       bbStream.ifPresent(bb -> {
-        Assertions.assertThat(bb.hasRemaining())
+        assertThat(bb.hasRemaining())
             .describedAs("hasRemaining() in %s", bb)
             .isFalse();
       });
@@ -189,17 +188,17 @@ public class TestDataBlocks extends HadoopTestBase {
 
       // this must close the old stream
       bbStream.ifPresent(bb -> {
-        Assertions.assertThat(bb.isOpen())
+        assertThat(bb.isOpen())
             .describedAs("stream %s is open", bb)
             .isFalse();
       });
 
       // do a read(byte[]) of everything
       byte[] readBuffer = new byte[bufferLen];
-      Assertions.assertThat(stream2.read(readBuffer))
+      assertThat(stream2.read(readBuffer))
           .describedAs("number of bytes read from stream %s", stream2)
           .isEqualTo(bufferLen);
-      Assertions.assertThat(readBuffer)
+      assertThat(readBuffer)
           .describedAs("data read into buffer")
           .isEqualTo(buffer);
 
@@ -220,21 +219,21 @@ public class TestDataBlocks extends HadoopTestBase {
 
   private static void assertByteAtIndex(final byte[] bytes,
       final int index, final char expected) {
-    Assertions.assertThat(bytes)
+    assertThat(bytes)
         .contains(expected, Index.atIndex(index));
   }
 
   private static void assertReadEquals(final InputStream stream,
       final int ch)
       throws IOException {
-    Assertions.assertThat(stream.read())
+    assertThat(stream.read())
         .describedAs("read() in %s", stream)
         .isEqualTo(ch);
   }
 
   private static void assertAvailableValue(final InputStream stream,
       final int expected) throws IOException {
-    Assertions.assertThat(stream.available())
+    assertThat(stream.available())
         .describedAs("wrong available() in %s", stream)
         .isEqualTo(expected);
   }
@@ -242,7 +241,7 @@ public class TestDataBlocks extends HadoopTestBase {
   private static void assertStreamCreationCount(
       final UploadContentProviders.BaseContentProvider<?> cp,
       final int count) {
-    Assertions.assertThat(cp.getStreamCreationCount())
+    assertThat(cp.getStreamCreationCount())
         .describedAs("stream creation count of %s", cp)
         .isEqualTo(count);
   }
@@ -261,7 +260,7 @@ public class TestDataBlocks extends HadoopTestBase {
     if (factory instanceof S3ADataBlocks.ByteBufferBlockFactory) {
       S3ADataBlocks.ByteBufferBlockFactory bufferFactory =
           (S3ADataBlocks.ByteBufferBlockFactory) factory;
-      Assertions.assertThat(bufferFactory.getOutstandingBufferCount())
+      assertThat(bufferFactory.getOutstandingBufferCount())
           .describedAs("outstanding buffers in %s", factory)
           .isEqualTo(expectedCount);
     }

+ 45 - 36
hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestInvoker.java

@@ -26,13 +26,12 @@ import java.util.concurrent.CompletionException;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.atomic.AtomicInteger;
 
-import org.assertj.core.api.Assertions;
 import software.amazon.awssdk.awscore.exception.AwsServiceException;
 import software.amazon.awssdk.core.exception.SdkClientException;
 import software.amazon.awssdk.core.exception.SdkException;
 import software.amazon.awssdk.services.s3.model.S3Exception;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.retry.RetryPolicy;
@@ -49,6 +48,7 @@ import static org.apache.hadoop.fs.s3a.impl.InternalConstants.SC_501_NOT_IMPLEME
 import static org.apache.hadoop.fs.s3a.impl.InternalConstants.SC_503_SERVICE_UNAVAILABLE;
 import static org.apache.hadoop.fs.s3a.impl.InternalConstants.SC_504_GATEWAY_TIMEOUT;
 import static org.apache.hadoop.test.LambdaTestUtils.*;
+import static org.assertj.core.api.Assertions.assertThat;
 
 /**
  * Test the {@link Invoker} code and the associated {@link S3ARetryPolicy}.
@@ -153,7 +153,7 @@ public class TestInvoker extends HadoopTestBase {
       SC_400_BAD_REQUEST,
       "bad request");
 
-  @Before
+  @BeforeEach
   public void setup() {
     resetCounters();
   }
@@ -225,7 +225,7 @@ public class TestInvoker extends HadoopTestBase {
         verifyTranslated(AWSStatus500Exception.class, ex);
 
     // the status code is preserved
-    Assertions.assertThat(ex500.statusCode())
+    assertThat(ex500.statusCode())
         .describedAs("status code of %s", ex)
         .isEqualTo(SC_500_INTERNAL_SERVER_ERROR);
 
@@ -234,7 +234,7 @@ public class TestInvoker extends HadoopTestBase {
         RETRY_POLICY, RetryPolicy.RetryAction.RETRY,
         ex, 0, true);
 
-    Assertions.assertThat(invoker.getRetryPolicy()
+    assertThat(invoker.getRetryPolicy()
         .shouldRetry(ex500, 1, 0, false).action)
         .describedAs("should retry %s", ex500)
         .isEqualTo(RetryPolicy.RetryAction.RETRY.action);
@@ -258,7 +258,7 @@ public class TestInvoker extends HadoopTestBase {
     assertRetryAction("Expected failure first throttle",
         RETRY_POLICY_NO_500_ERRORS, RetryPolicy.RetryAction.FAIL,
         ex, 0, true);
-    Assertions.assertThat(failingInvoker.getRetryPolicy()
+    assertThat(failingInvoker.getRetryPolicy()
         .shouldRetry(ex500, 1, 0, false).action)
         .describedAs("should retry %s", ex500)
         .isEqualTo(RetryPolicy.RetryAction.FAIL.action);
@@ -276,10 +276,10 @@ public class TestInvoker extends HadoopTestBase {
             invoker.retry("ex", null, true, () -> {
               throw ex;
             }));
-    Assertions.assertThat(ex501.statusCode())
+    assertThat(ex501.statusCode())
         .describedAs("status code of %s", ex)
         .isEqualTo(501);
-    Assertions.assertThat(retryCount)
+    assertThat(retryCount)
         .describedAs("retry count")
         .isEqualTo(0);
   }
@@ -375,40 +375,45 @@ public class TestInvoker extends HadoopTestBase {
     assertEquals(ACTIVE_RETRY_LIMIT, counter.get());
   }
 
-  @Test(expected = org.apache.hadoop.net.ConnectTimeoutException.class)
+  @Test
   public void testExtractConnectTimeoutException() throws Throwable {
-    throw extractException("", "",
-        new ExecutionException(
-            SdkException.builder()
-                .cause(LOCAL_CONNECTION_TIMEOUT_EX)
-                .build()));
+    assertThrows(org.apache.hadoop.net.ConnectTimeoutException.class, () -> {
+      throw extractException("", "", new ExecutionException(
+        SdkException.builder().cause(LOCAL_CONNECTION_TIMEOUT_EX).build()));
+    });
   }
 
-  @Test(expected = SocketTimeoutException.class)
+  @Test
   public void testExtractSocketTimeoutException() throws Throwable {
-    throw extractException("", "",
+    assertThrows(SocketTimeoutException.class, () -> {
+      throw extractException("", "",
         new ExecutionException(
             SdkException.builder()
-                .cause(SOCKET_TIMEOUT_EX)
-                .build()));
+            .cause(SOCKET_TIMEOUT_EX)
+            .build()));
+    });
   }
 
-  @Test(expected = org.apache.hadoop.net.ConnectTimeoutException.class)
+  @Test
   public void testExtractConnectTimeoutExceptionFromCompletionException() throws Throwable {
-    throw extractException("", "",
+    assertThrows(org.apache.hadoop.net.ConnectTimeoutException.class, () -> {
+      throw extractException("", "",
         new CompletionException(
-            SdkException.builder()
-                .cause(LOCAL_CONNECTION_TIMEOUT_EX)
-                .build()));
+          SdkException.builder()
+          .cause(LOCAL_CONNECTION_TIMEOUT_EX)
+          .build()));
+    });
   }
 
-  @Test(expected = SocketTimeoutException.class)
+  @Test
   public void testExtractSocketTimeoutExceptionFromCompletionException() throws Throwable {
-    throw extractException("", "",
+    assertThrows(SocketTimeoutException.class, () -> {
+      throw extractException("", "",
         new CompletionException(
             SdkException.builder()
-                .cause(SOCKET_TIMEOUT_EX)
-                .build()));
+            .cause(SOCKET_TIMEOUT_EX)
+            .build()));
+    });
   }
 
   /**
@@ -430,7 +435,7 @@ public class TestInvoker extends HadoopTestBase {
       boolean idempotent) throws Exception {
     RetryPolicy.RetryAction outcome = policy.shouldRetry(ex, retries, 0,
         idempotent);
-    Assertions.assertThat(outcome.action)
+    assertThat(outcome.action)
         .describedAs("%s Expected action %s from shouldRetry(%s, %s, %s)",
                       text, expected, ex.toString(), retries, idempotent)
         .isEqualTo(expected.action);
@@ -477,12 +482,14 @@ public class TestInvoker extends HadoopTestBase {
    * Non-idempotent operations fail on anything which isn't a throttle
    * or connectivity problem.
    */
-  @Test(expected = AWSBadRequestException.class)
+  @Test
   public void testNoRetryOfBadRequestNonIdempotent() throws Throwable {
-    invoker.retry("test", null, false,
-        () -> {
+    assertThrows(AWSBadRequestException.class, () -> {
+      invoker.retry("test", null, false,
+          () -> {
           throw serviceException(400, "bad request");
         });
+    });
   }
 
   /**
@@ -503,12 +510,14 @@ public class TestInvoker extends HadoopTestBase {
   /**
    * Repeatedly retry until eventually a bad request succeeds.
    */
-  @Test(expected = AWSBadRequestException.class)
+  @Test
   public void testRetryBadRequestNotIdempotent() throws Throwable {
-    invoker.retry("test", null, false,
-        () -> {
+    assertThrows(AWSBadRequestException.class, () -> {
+      invoker.retry("test", null, false,
+          () -> {
           throw BAD_REQUEST;
         });
+    });
   }
 
   @Test
@@ -585,7 +594,7 @@ public class TestInvoker extends HadoopTestBase {
         RETRY_POLICY, RetryPolicy.RetryAction.FAIL,
         new NullPointerException("oops"), 1, true);
     // catch notification didn't see it
-    assertEquals("retry count ", 0, retryCount);
+    assertEquals(0, retryCount, "retry count ");
   }
 
   /**

+ 21 - 21
hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestStreamChangeTracker.java

@@ -25,7 +25,7 @@ import software.amazon.awssdk.services.s3.model.CopyObjectResponse;
 import software.amazon.awssdk.services.s3.model.CopyObjectResult;
 import software.amazon.awssdk.services.s3.model.GetObjectRequest;
 import software.amazon.awssdk.services.s3.model.GetObjectResponse;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -68,8 +68,8 @@ public class TestStreamChangeTracker extends HadoopTestBase {
         ChangeDetectionPolicy.Mode.Client,
         ChangeDetectionPolicy.Source.VersionId,
         false);
-    assertFalse("Tracker should not have applied contraints " + tracker,
-        tracker.maybeApplyConstraint(newGetObjectRequestBuilder()));
+    assertFalse(tracker.maybeApplyConstraint(newGetObjectRequestBuilder()),
+        "Tracker should not have applied contraints " + tracker);
     tracker.processResponse(
         newResponse(null, null),
         "", 0);
@@ -96,8 +96,8 @@ public class TestStreamChangeTracker extends HadoopTestBase {
         ChangeDetectionPolicy.Mode.Warn,
         ChangeDetectionPolicy.Source.ETag,
         false);
-    assertFalse("Tracker should not have applied constraints " + tracker,
-        tracker.maybeApplyConstraint(newGetObjectRequestBuilder()));
+    assertFalse(tracker.maybeApplyConstraint(newGetObjectRequestBuilder()),
+        "Tracker should not have applied constraints " + tracker);
     tracker.processResponse(
         newResponse("e1", null),
         "", 0);
@@ -122,8 +122,8 @@ public class TestStreamChangeTracker extends HadoopTestBase {
         ChangeDetectionPolicy.Mode.Client,
         ChangeDetectionPolicy.Source.VersionId,
         false);
-    assertFalse("Tracker should not have applied constraints " + tracker,
-        tracker.maybeApplyConstraint(newGetObjectRequestBuilder()));
+    assertFalse(tracker.maybeApplyConstraint(newGetObjectRequestBuilder()),
+        "Tracker should not have applied constraints " + tracker);
     tracker.processResponse(
         newResponse(null, "rev1"),
         "", 0);
@@ -149,8 +149,8 @@ public class TestStreamChangeTracker extends HadoopTestBase {
         ChangeDetectionPolicy.Mode.Server,
         ChangeDetectionPolicy.Source.VersionId,
         false);
-    assertFalse("Tracker should not have applied contraints " + tracker,
-        tracker.maybeApplyConstraint(newGetObjectRequestBuilder()));
+    assertFalse(tracker.maybeApplyConstraint(newGetObjectRequestBuilder()),
+        "Tracker should not have applied contraints " + tracker);
     tracker.processResponse(
         newResponse(null, "rev1"),
         "", 0);
@@ -209,8 +209,8 @@ public class TestStreamChangeTracker extends HadoopTestBase {
         ChangeDetectionPolicy.Source.VersionId,
         false,
         objectAttributes("etag1", "versionid1"));
-    assertFalse("Tracker should not have applied contraints " + tracker,
-        tracker.maybeApplyConstraint(newCopyObjectRequest()));
+    assertFalse(tracker.maybeApplyConstraint(newCopyObjectRequest()),
+        "Tracker should not have applied contraints " + tracker);
   }
 
   @Test
@@ -264,14 +264,14 @@ public class TestStreamChangeTracker extends HadoopTestBase {
 
   protected void assertConstraintApplied(final ChangeTracker tracker,
       final GetObjectRequest.Builder builder) {
-    assertTrue("Tracker should have applied contraints " + tracker,
-        tracker.maybeApplyConstraint(builder));
+    assertTrue(tracker.maybeApplyConstraint(builder),
+        "Tracker should have applied contraints " + tracker);
   }
 
   protected void assertConstraintApplied(final ChangeTracker tracker,
       final CopyObjectRequest.Builder requestBuilder) throws PathIOException {
-    assertTrue("Tracker should have applied contraints " + tracker,
-        tracker.maybeApplyConstraint(requestBuilder));
+    assertTrue(tracker.maybeApplyConstraint(requestBuilder),
+        "Tracker should have applied contraints " + tracker);
   }
 
   protected RemoteFileChangedException expectChangeException(
@@ -352,16 +352,16 @@ public class TestStreamChangeTracker extends HadoopTestBase {
 
   protected void assertRevisionId(final ChangeTracker tracker,
       final String revId) {
-    assertEquals("Wrong revision ID in " + tracker,
-        revId, tracker.getRevisionId());
+    assertEquals(revId, tracker.getRevisionId(),
+        "Wrong revision ID in " + tracker);
   }
 
 
   protected void assertTrackerMismatchCount(
       final ChangeTracker tracker,
       final int expectedCount) {
-    assertEquals("counter in tracker " + tracker,
-        expectedCount, tracker.getVersionMismatches());
+    assertEquals(expectedCount, tracker.getVersionMismatches(),
+        "counter in tracker " + tracker);
   }
 
   /**
@@ -391,8 +391,8 @@ public class TestStreamChangeTracker extends HadoopTestBase {
         new CountingChangeTracker(), objectAttributes);
     if (objectAttributes.getVersionId() == null
         && objectAttributes.getETag() == null) {
-      assertFalse("Tracker should not have applied constraints " + tracker,
-          tracker.maybeApplyConstraint(newGetObjectRequestBuilder()));
+      assertFalse(tracker.maybeApplyConstraint(newGetObjectRequestBuilder()),
+          "Tracker should not have applied constraints " + tracker);
     }
     return tracker;
   }

+ 8 - 10
hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/TestMarshalledCredentials.java

@@ -22,8 +22,8 @@ import java.net.URI;
 import java.net.URISyntaxException;
 
 import software.amazon.awssdk.auth.credentials.AwsCredentials;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.s3a.S3AEncryptionMethods;
@@ -44,7 +44,7 @@ public class TestMarshalledCredentials extends HadoopTestBase {
 
   private URI bucketURI;
 
-  @Before
+  @BeforeEach
   public void createSessionToken() throws URISyntaxException {
     bucketURI = new URI("s3a://bucket1");
     credentials = new MarshalledCredentials("accessKey",
@@ -84,7 +84,7 @@ public class TestMarshalledCredentials extends HadoopTestBase {
         "encryptionContext");
     EncryptionSecrets result = S3ATestUtils.roundTrip(secrets,
         new Configuration());
-    assertEquals("round trip", secrets, result);
+    assertEquals(secrets, result, "round trip");
   }
 
   @Test
@@ -96,12 +96,10 @@ public class TestMarshalledCredentials extends HadoopTestBase {
         credentials,
         MarshalledCredentials.CredentialTypeRequired.SessionOnly);
     AwsCredentials aws = provider.resolveCredentials();
-    assertEquals(credentials.toString(),
-        credentials.getAccessKey(),
-        aws.accessKeyId());
-    assertEquals(credentials.toString(),
-        credentials.getSecretKey(),
-        aws.secretAccessKey());
+    assertEquals(credentials.getAccessKey(),
+        aws.accessKeyId(), credentials.toString());
+    assertEquals(credentials.getSecretKey(),
+        aws.secretAccessKey(), credentials.toString());
     // because the credentials are set to full only, creation will fail
   }
 

+ 18 - 13
hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/StagingTestBase.java

@@ -48,10 +48,9 @@ import org.apache.hadoop.fs.s3a.S3AInternals;
 import org.apache.hadoop.fs.s3a.S3AStore;
 import org.apache.hadoop.util.Lists;
 import org.apache.hadoop.thirdparty.com.google.common.collect.Maps;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.BeforeAll;
 import org.mockito.invocation.InvocationOnMock;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -83,9 +82,15 @@ import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils;
 import org.apache.hadoop.service.ServiceOperations;
 import org.apache.hadoop.test.HadoopTestBase;
 
-
-import static org.mockito.ArgumentMatchers.*;
-import static org.mockito.Mockito.*;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mockito.anyString;
+import static org.mockito.Mockito.doAnswer;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.verifyNoMoreInteractions;
+import static org.mockito.Mockito.when;
 
 /**
  * Test base for mock tests of staging committers:
@@ -203,8 +208,8 @@ public class StagingTestBase {
       StagingCommitter committer,
       JobContext job,
       ConflictResolution mode) {
-    Assert.assertEquals("Conflict resolution mode in " + committer,
-        mode, committer.getConflictResolutionMode(job, new Configuration()));
+    assertEquals(mode, committer.getConflictResolutionMode(job, new Configuration()),
+        "Conflict resolution mode in " + committer);
   }
 
   public static void pathsExist(FileSystem mockS3, String... children)
@@ -316,7 +321,7 @@ public class StagingTestBase {
      * Setup the mini HDFS cluster.
      * @throws IOException Failure
      */
-    @BeforeClass
+    @BeforeAll
     @SuppressWarnings("deprecation")
     public static void setupHDFS() throws IOException {
       if (hdfs == null) {
@@ -329,7 +334,7 @@ public class StagingTestBase {
     }
 
     @SuppressWarnings("ThrowableNotThrown")
-    @AfterClass
+    @AfterAll
     public static void teardownFS() throws IOException {
       ServiceOperations.stopQuietly(hdfs);
       conf = null;
@@ -357,7 +362,7 @@ public class StagingTestBase {
     private StagingTestBase.ClientErrors errors = null;
     private S3Client mockClient = null;
 
-    @Before
+    @BeforeEach
     public void setupJob() throws Exception {
       this.jobConf = createJobConf();
 
@@ -424,7 +429,7 @@ public class StagingTestBase {
     private TaskAttemptContext tac = null;
     private File tempDir;
 
-    @Before
+    @BeforeEach
     public void setupTask() throws Exception {
       this.jobCommitter = newJobCommitter();
       jobCommitter.setupJob(getJob());

+ 13 - 13
hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/TestDirectoryCommitterScale.java

@@ -27,15 +27,14 @@ import java.util.Map;
 import java.util.stream.Collectors;
 import java.util.stream.IntStream;
 
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.MethodOrderer;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.TestMethodOrder;
 import software.amazon.awssdk.services.s3.model.CompletedPart;
 
 import org.apache.hadoop.thirdparty.com.google.common.collect.Maps;
-import org.assertj.core.api.Assertions;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.FixMethodOrder;
-import org.junit.Test;
-import org.junit.runners.MethodSorters;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -66,6 +65,7 @@ import static org.apache.hadoop.fs.s3a.commit.staging.StagingTestBase.BUCKET;
 import static org.apache.hadoop.fs.s3a.commit.staging.StagingTestBase.getOutputPath;
 import static org.apache.hadoop.fs.s3a.commit.staging.StagingTestBase.getOutputPathUri;
 import static org.apache.hadoop.fs.s3a.commit.staging.StagingTestBase.pathIsDirectory;
+import static org.assertj.core.api.Assertions.assertThat;
 
 /**
  * Scale test of the directory committer: if there are many, many files
@@ -74,7 +74,7 @@ import static org.apache.hadoop.fs.s3a.commit.staging.StagingTestBase.pathIsDire
  * it does use a lot of local filesystem files though so as to
  * simulate real large scale deployment better.
  */
-@FixMethodOrder(MethodSorters.NAME_ASCENDING)
+@TestMethodOrder(MethodOrderer.MethodName.class)
 public class TestDirectoryCommitterScale
     extends StagingTestBase.JobCommitterTest<DirectoryStagingCommitter> {
 
@@ -105,7 +105,7 @@ public class TestDirectoryCommitterScale
         createTaskAttemptForJob());
   }
 
-  @BeforeClass
+  @BeforeAll
   public static void setupStaging() throws Exception {
     stagingDir = File.createTempFile("staging", null);
     stagingDir.delete();
@@ -115,7 +115,7 @@ public class TestDirectoryCommitterScale
   }
 
 
-  @AfterClass
+  @AfterAll
   public static void teardownStaging() throws IOException {
     try {
       if (stagingDir != null) {
@@ -228,7 +228,7 @@ public class TestDirectoryCommitterScale
         null, COMMITTER_THREAD_COUNT)) {
       AbstractS3ACommitter.ActiveCommit activeCommit
           = committer.listPendingUploadsToCommit(commitContext);
-      Assertions.assertThat(activeCommit.getSourceFiles())
+      assertThat(activeCommit.getSourceFiles())
           .describedAs("Source files of %s", activeCommit)
           .hasSize(TASKS);
     }
@@ -250,15 +250,15 @@ public class TestDirectoryCommitterScale
       committer.commitJob(getJob());
     }
 
-    Assertions.assertThat(results.getCommits())
+    assertThat(results.getCommits())
         .describedAs("commit count")
         .hasSize(TOTAL_COMMIT_COUNT);
     AbstractS3ACommitter.ActiveCommit activeCommit = committer.activeCommit;
-    Assertions.assertThat(activeCommit.getCommittedObjects())
+    assertThat(activeCommit.getCommittedObjects())
         .describedAs("committed objects in active commit")
         .hasSize(Math.min(TOTAL_COMMIT_COUNT,
             CommitConstants.SUCCESS_MARKER_FILE_LIMIT));
-    Assertions.assertThat(activeCommit.getCommittedFileCount())
+    assertThat(activeCommit.getCommittedFileCount())
         .describedAs("committed objects in active commit")
         .isEqualTo(TOTAL_COMMIT_COUNT);
 

+ 2 - 2
hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/TestPaths.java

@@ -18,7 +18,7 @@
 
 package org.apache.hadoop.fs.s3a.commit.staging;
 
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
@@ -79,7 +79,7 @@ public class TestPaths extends HadoopTestBase {
   }
 
   private void assertUUIDAdded(String path, String expected) {
-    assertEquals("from " + path, expected, addUUID(path, "UUID"));
+    assertEquals(expected, addUUID(path, "UUID"), "from " + path);
   }
 
   private static final String DATA = UNIT_TEST_EXAMPLE_PATH;

+ 199 - 154
hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/TestStagingCommitter.java

@@ -31,17 +31,14 @@ import java.util.Set;
 import java.util.UUID;
 import java.util.stream.Collectors;
 
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.MethodSource;
 import software.amazon.awssdk.services.s3.S3Client;
 import software.amazon.awssdk.services.s3.model.AbortMultipartUploadRequest;
 import software.amazon.awssdk.services.s3.model.CompleteMultipartUploadRequest;
 
 import org.apache.hadoop.util.Sets;
-import org.assertj.core.api.Assertions;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
+import org.junit.jupiter.api.AfterEach;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -78,12 +75,12 @@ import static org.apache.hadoop.fs.contract.ContractTestUtils.*;
 import static org.apache.hadoop.fs.s3a.commit.staging.Paths.*;
 import static org.apache.hadoop.fs.s3a.commit.staging.StagingTestBase.*;
 import static org.apache.hadoop.test.LambdaTestUtils.*;
+import static org.assertj.core.api.Assertions.assertThat;
 
 /**
  * The main unit test suite of the staging committer.
  * Parameterized on thread count and unique filename policy.
  */
-@RunWith(Parameterized.class)
 public class TestStagingCommitter extends StagingTestBase.MiniDFSTest {
 
   private static final JobID JOB_ID = new JobID("job", 1);
@@ -97,8 +94,8 @@ public class TestStagingCommitter extends StagingTestBase.MiniDFSTest {
   private static final Logger LOG =
       LoggerFactory.getLogger(TestStagingCommitter.class);
 
-  private final int numThreads;
-  private final boolean uniqueFilenames;
+  private int numThreads;
+  private boolean uniqueFilenames;
   private JobContext job = null;
   private TaskAttemptContext tac = null;
   private Configuration conf = null;
@@ -129,7 +126,6 @@ public class TestStagingCommitter extends StagingTestBase.MiniDFSTest {
    * whether or not filenames are unique.
    * @return a list of parameter tuples.
    */
-  @Parameterized.Parameters(name="threads-{0}-unique-{1}")
   public static Collection<Object[]> params() {
     return Arrays.asList(new Object[][] {
         {0, false},
@@ -138,12 +134,12 @@ public class TestStagingCommitter extends StagingTestBase.MiniDFSTest {
     });
   }
 
-  public TestStagingCommitter(int numThreads, boolean uniqueFilenames) {
-    this.numThreads = numThreads;
-    this.uniqueFilenames = uniqueFilenames;
+  public void initTestStagingCommitter(int pNumThreads, boolean pNniqueFilenames) throws Exception {
+    this.numThreads = pNumThreads;
+    this.uniqueFilenames = pNniqueFilenames;
+    setupCommitter();
   }
 
-  @Before
   public void setupCommitter() throws Exception {
     JobConf jobConf = getConfiguration();
     jobConf.setInt(FS_S3A_COMMITTER_THREADS, numThreads);
@@ -187,7 +183,7 @@ public class TestStagingCommitter extends StagingTestBase.MiniDFSTest {
     Paths.resetTempFolderCache();
   }
 
-  @After
+  @AfterEach
   public void cleanup() {
     try {
       if (tmpDir != null) {
@@ -202,35 +198,40 @@ public class TestStagingCommitter extends StagingTestBase.MiniDFSTest {
     return new Configuration(false);
   }
 
-  @Test
-  public void testMockFSclientWiredUp() throws Throwable {
+  @ParameterizedTest(name = "threads-{0}-unique-{1}")
+  @MethodSource("params")
+  public void testMockFSclientWiredUp(int pNumThreads, boolean pNniqueFilenames) throws Throwable {
+    initTestStagingCommitter(pNumThreads, pNniqueFilenames);
     final S3Client client = mockFS.getS3AInternals().getAmazonS3Client("test");
-    Assertions.assertThat(client)
+    assertThat(client)
         .describedAs("S3Client from FS")
         .isNotNull()
         .isSameAs(mockClient);
   }
 
-  @Test
-  public void testUUIDPropagation() throws Exception {
+  @ParameterizedTest(name = "threads-{0}-unique-{1}")
+  @MethodSource("params")
+  public void testUUIDPropagation(int pNumThreads, boolean pNniqueFilenames) throws Exception {
+    initTestStagingCommitter(pNumThreads, pNniqueFilenames);
     Configuration config = newConfig();
     String uuid = uuid();
     config.set(SPARK_WRITE_UUID, uuid);
     config.setBoolean(FS_S3A_COMMITTER_REQUIRE_UUID, true);
     Pair<String, AbstractS3ACommitter.JobUUIDSource> t3 = AbstractS3ACommitter
         .buildJobUUID(config, JOB_ID);
-    assertEquals("Job UUID", uuid, t3.getLeft());
-    assertEquals("Job UUID source: " + t3,
-        AbstractS3ACommitter.JobUUIDSource.SparkWriteUUID,
-        t3.getRight());
+    assertEquals(uuid, t3.getLeft(), "Job UUID");
+    assertEquals(AbstractS3ACommitter.JobUUIDSource.SparkWriteUUID,
+        t3.getRight(), "Job UUID source: " + t3);
   }
 
   /**
    * If the Spark UUID is required, then binding will fail
    * if a UUID did not get passed in.
    */
-  @Test
-  public void testUUIDValidation() throws Exception {
+  @ParameterizedTest(name = "threads-{0}-unique-{1}")
+  @MethodSource("params")
+  public void testUUIDValidation(int pNumThreads, boolean pNniqueFilenames) throws Exception {
+    initTestStagingCommitter(pNumThreads, pNniqueFilenames);
     Configuration config = newConfig();
     config.setBoolean(FS_S3A_COMMITTER_REQUIRE_UUID, true);
     intercept(PathCommitException.class, E_NO_SPARK_UUID, () ->
@@ -240,8 +241,10 @@ public class TestStagingCommitter extends StagingTestBase.MiniDFSTest {
   /**
    * Validate ordering of UUID retrieval.
    */
-  @Test
-  public void testUUIDLoadOrdering() throws Exception {
+  @ParameterizedTest(name = "threads-{0}-unique-{1}")
+  @MethodSource("params")
+  public void testUUIDLoadOrdering(int pNumThreads, boolean pNniqueFilenames) throws Exception {
+    initTestStagingCommitter(pNumThreads, pNniqueFilenames);
     Configuration config = newConfig();
     config.setBoolean(FS_S3A_COMMITTER_REQUIRE_UUID, true);
     String uuid = uuid();
@@ -250,24 +253,24 @@ public class TestStagingCommitter extends StagingTestBase.MiniDFSTest {
     config.set(SPARK_WRITE_UUID, "something");
     Pair<String, AbstractS3ACommitter.JobUUIDSource> t3 = AbstractS3ACommitter
         .buildJobUUID(config, JOB_ID);
-    assertEquals("Job UUID", uuid, t3.getLeft());
-    assertEquals("Job UUID source: " + t3,
-        AbstractS3ACommitter.JobUUIDSource.CommitterUUIDProperty,
-        t3.getRight());
+    assertEquals(uuid, t3.getLeft(), "Job UUID");
+    assertEquals(AbstractS3ACommitter.JobUUIDSource.CommitterUUIDProperty,
+        t3.getRight(), "Job UUID source: " + t3);
   }
 
   /**
    * Verify that unless the config enables self-generation, JobIDs
    * are used.
    */
-  @Test
-  public void testJobIDIsUUID() throws Exception {
+  @ParameterizedTest(name = "threads-{0}-unique-{1}")
+  @MethodSource("params")
+  public void testJobIDIsUUID(int pNumThreads, boolean pNniqueFilenames) throws Exception {
+    initTestStagingCommitter(pNumThreads, pNniqueFilenames);
     Configuration config = newConfig();
     Pair<String, AbstractS3ACommitter.JobUUIDSource> t3 = AbstractS3ACommitter
         .buildJobUUID(config, JOB_ID);
-    assertEquals("Job UUID source: " + t3,
-        AbstractS3ACommitter.JobUUIDSource.JobID,
-        t3.getRight());
+    assertEquals(AbstractS3ACommitter.JobUUIDSource.JobID,
+        t3.getRight(), "Job UUID source: " + t3);
     // parse it as a JobID
     JobID.forName(t3.getLeft());
   }
@@ -276,15 +279,16 @@ public class TestStagingCommitter extends StagingTestBase.MiniDFSTest {
    * Verify self-generated UUIDs are supported when enabled,
    * and come before JobID.
    */
-  @Test
-  public void testSelfGeneratedUUID() throws Exception {
+  @ParameterizedTest(name = "threads-{0}-unique-{1}")
+  @MethodSource("params")
+  public void testSelfGeneratedUUID(int pNumThreads, boolean pNniqueFilenames) throws Exception {
+    initTestStagingCommitter(pNumThreads, pNniqueFilenames);
     Configuration config = newConfig();
     config.setBoolean(FS_S3A_COMMITTER_GENERATE_UUID, true);
     Pair<String, AbstractS3ACommitter.JobUUIDSource> t3 = AbstractS3ACommitter
         .buildJobUUID(config, JOB_ID);
-    assertEquals("Job UUID source: " + t3,
-        AbstractS3ACommitter.JobUUIDSource.GeneratedLocally,
-        t3.getRight());
+    assertEquals(AbstractS3ACommitter.JobUUIDSource.GeneratedLocally,
+        t3.getRight(), "Job UUID source: " + t3);
     // parse it
     UUID.fromString(t3.getLeft());
   }
@@ -308,21 +312,27 @@ public class TestStagingCommitter extends StagingTestBase.MiniDFSTest {
     return UUID.randomUUID().toString();
   }
 
-  @Test
-  public void testAttemptPathConstructionNoSchema() throws Exception {
+  @ParameterizedTest(name = "threads-{0}-unique-{1}")
+  @MethodSource("params")
+  public void testAttemptPathConstructionNoSchema(int pNumThreads, boolean pNniqueFilenames)
+      throws Exception {
+    initTestStagingCommitter(pNumThreads, pNniqueFilenames);
     Configuration config = newConfig();
     final String jobUUID = addUUID(config);
     config.set(BUFFER_DIR, "/tmp/mr-local-0,/tmp/mr-local-1");
     String commonPath = "file:/tmp/mr-local-";
-    Assertions.assertThat(getLocalTaskAttemptTempDir(config,
+    assertThat(getLocalTaskAttemptTempDir(config,
         jobUUID, tac.getTaskAttemptID()).toString())
         .describedAs("Missing scheme should produce local file paths")
         .startsWith(commonPath)
         .contains(jobUUID);
   }
 
-  @Test
-  public void testAttemptPathsDifferentByTaskAttempt() throws Exception {
+  @ParameterizedTest(name = "threads-{0}-unique-{1}")
+  @MethodSource("params")
+  public void testAttemptPathsDifferentByTaskAttempt(int pNumThreads, boolean pNniqueFilenames)
+      throws Exception {
+    initTestStagingCommitter(pNumThreads, pNniqueFilenames);
     Configuration config = newConfig();
     final String jobUUID = addUUID(config);
     config.set(BUFFER_DIR, "file:/tmp/mr-local-0");
@@ -330,13 +340,16 @@ public class TestStagingCommitter extends StagingTestBase.MiniDFSTest {
         jobUUID, AID).toString();
     String attempt2Path = getLocalTaskAttemptTempDir(config,
         jobUUID, AID2).toString();
-    Assertions.assertThat(attempt2Path)
+    assertThat(attempt2Path)
         .describedAs("local task attempt dir of TA1 must not match that of TA2")
         .isNotEqualTo(attempt1Path);
   }
 
-  @Test
-  public void testAttemptPathConstructionWithSchema() throws Exception {
+  @ParameterizedTest(name = "threads-{0}-unique-{1}")
+  @MethodSource("params")
+  public void testAttemptPathConstructionWithSchema(int pNumThreads, boolean pNniqueFilenames)
+      throws Exception {
+    initTestStagingCommitter(pNumThreads, pNniqueFilenames);
     Configuration config = newConfig();
     final String jobUUID = addUUID(config);
     String commonPath = "file:/tmp/mr-local-";
@@ -344,58 +357,66 @@ public class TestStagingCommitter extends StagingTestBase.MiniDFSTest {
     config.set(BUFFER_DIR,
         "file:/tmp/mr-local-0,file:/tmp/mr-local-1");
 
-    Assertions.assertThat(
+    assertThat(
         getLocalTaskAttemptTempDir(config,
             jobUUID, tac.getTaskAttemptID()).toString())
         .describedAs("Path should be the same with file scheme")
         .startsWith(commonPath);
   }
 
-  @Test
-  public void testAttemptPathConstructionWrongSchema() throws Exception {
+  @ParameterizedTest(name = "threads-{0}-unique-{1}")
+  @MethodSource("params")
+  public void testAttemptPathConstructionWrongSchema(int pNumThreads,
+      boolean pNniqueFilenames) throws Exception {
+    initTestStagingCommitter(pNumThreads, pNniqueFilenames);
     Configuration config = newConfig();
     final String jobUUID = addUUID(config);
     config.set(BUFFER_DIR,
         "hdfs://nn:8020/tmp/mr-local-0,hdfs://nn:8020/tmp/mr-local-1");
     intercept(IllegalArgumentException.class, "Wrong FS",
         () -> getLocalTaskAttemptTempDir(config, jobUUID,
-                tac.getTaskAttemptID()));
+        tac.getTaskAttemptID()));
   }
 
-  @Test
-  public void testCommitPathConstruction() throws Exception {
+  @ParameterizedTest(name = "threads-{0}-unique-{1}")
+  @MethodSource("params")
+  public void testCommitPathConstruction(int pNumThreads,
+      boolean pNniqueFilenames) throws Exception {
+    initTestStagingCommitter(pNumThreads, pNniqueFilenames);
     Path committedTaskPath = committer.getCommittedTaskPath(tac);
-    assertEquals("Path should be in HDFS: " + committedTaskPath,
-        "hdfs", committedTaskPath.toUri().getScheme());
+    assertEquals("hdfs", committedTaskPath.toUri().getScheme(),
+        "Path should be in HDFS: " + committedTaskPath);
     String ending = STAGING_UPLOADS + "/_temporary/0/task_job_0001_r_000002";
-    assertTrue("Did not end with \"" + ending +"\" :" + committedTaskPath,
-        committedTaskPath.toString().endsWith(ending));
+    assertTrue(committedTaskPath.toString().endsWith(ending),
+        "Did not end with \"" + ending +"\" :" + committedTaskPath);
   }
 
-  @Test
-  public void testSingleTaskCommit() throws Exception {
+  @ParameterizedTest(name = "threads-{0}-unique-{1}")
+  @MethodSource("params")
+  public void testSingleTaskCommit(int pNumThreads,
+      boolean pNniqueFilenames) throws Exception {
+    initTestStagingCommitter(pNumThreads, pNniqueFilenames);
     Path file = new Path(commitTask(committer, tac, 1).iterator().next());
 
     List<String> uploads = results.getUploads();
-    assertEquals("Should initiate one upload: " + results, 1, uploads.size());
+    assertEquals(1, uploads.size(), "Should initiate one upload: " + results);
 
     Path committedPath = committer.getCommittedTaskPath(tac);
     FileSystem dfs = committedPath.getFileSystem(conf);
 
-    assertEquals("Should commit to HDFS: "+ committer, getDFS(), dfs);
+    assertEquals(getDFS(), dfs, "Should commit to HDFS: "+ committer);
 
     FileStatus[] stats = dfs.listStatus(committedPath);
-    assertEquals("Should produce one commit file: " + results, 1, stats.length);
-    assertEquals("Should name the commits file with the task ID: " + results,
-        "task_job_0001_r_000002", stats[0].getPath().getName());
+    assertEquals(1, stats.length, "Should produce one commit file: " + results);
+    assertEquals("task_job_0001_r_000002", stats[0].getPath().getName(),
+        "Should name the commits file with the task ID: " + results);
 
     PendingSet pending = PersistentCommitData.load(dfs, stats[0], PendingSet.serializer());
-    assertEquals("Should have one pending commit", 1, pending.size());
+    assertEquals(1, pending.size(), "Should have one pending commit");
     SinglePendingCommit commit = pending.getCommits().get(0);
-    assertEquals("Should write to the correct bucket:" + results,
-        BUCKET, commit.getBucket());
-    assertEquals("Should write to the correct key: " + results,
-        OUTPUT_PREFIX + "/" + file.getName(), commit.getDestinationKey());
+    assertEquals(BUCKET, commit.getBucket(), "Should write to the correct bucket:" + results);
+    assertEquals(OUTPUT_PREFIX + "/" + file.getName(), commit.getDestinationKey(),
+        "Should write to the correct key: " + results);
 
     assertValidUpload(results.getTagsByUpload(), commit);
   }
@@ -404,8 +425,11 @@ public class TestStagingCommitter extends StagingTestBase.MiniDFSTest {
    * This originally verified that empty files weren't PUT. They are now.
    * @throws Exception on a failure
    */
-  @Test
-  public void testSingleTaskEmptyFileCommit() throws Exception {
+  @ParameterizedTest(name = "threads-{0}-unique-{1}")
+  @MethodSource("params")
+  public void testSingleTaskEmptyFileCommit(int pNumThreads,
+      boolean pNniqueFilenames) throws Exception {
+    initTestStagingCommitter(pNumThreads, pNniqueFilenames);
     committer.setupTask(tac);
 
     Path attemptPath = committer.getTaskAttemptPath(tac);
@@ -416,60 +440,65 @@ public class TestStagingCommitter extends StagingTestBase.MiniDFSTest {
     committer.commitTask(tac);
 
     List<String> uploads = results.getUploads();
-    assertEquals("Should initiate one upload", 1, uploads.size());
+    assertEquals(1, uploads.size(), "Should initiate one upload");
 
     Path committedPath = committer.getCommittedTaskPath(tac);
     FileSystem dfs = committedPath.getFileSystem(conf);
 
-    assertEquals("Should commit to HDFS", getDFS(), dfs);
+    assertEquals(getDFS(), dfs, "Should commit to HDFS");
 
     assertIsFile(dfs, committedPath);
     FileStatus[] stats = dfs.listStatus(committedPath);
-    assertEquals("Should produce one commit file", 1, stats.length);
-    assertEquals("Should name the commits file with the task ID",
-        "task_job_0001_r_000002", stats[0].getPath().getName());
+    assertEquals(1, stats.length, "Should produce one commit file");
+    assertEquals("task_job_0001_r_000002", stats[0].getPath().getName(),
+        "Should name the commits file with the task ID");
 
     PendingSet pending = PersistentCommitData.load(dfs, stats[0], PendingSet.serializer());
-    assertEquals("Should have one pending commit", 1, pending.size());
+    assertEquals(1, pending.size(), "Should have one pending commit");
   }
 
-  @Test
-  public void testSingleTaskMultiFileCommit() throws Exception {
+  @ParameterizedTest(name = "threads-{0}-unique-{1}")
+  @MethodSource("params")
+  public void testSingleTaskMultiFileCommit(int pNumThreads,
+      boolean pNniqueFilenames) throws Exception {
+    initTestStagingCommitter(pNumThreads, pNniqueFilenames);
     int numFiles = 3;
     Set<String> files = commitTask(committer, tac, numFiles);
 
     List<String> uploads = results.getUploads();
-    assertEquals("Should initiate multiple uploads", numFiles, uploads.size());
+    assertEquals(numFiles, uploads.size(), "Should initiate multiple uploads");
 
     Path committedPath = committer.getCommittedTaskPath(tac);
     FileSystem dfs = committedPath.getFileSystem(conf);
 
-    assertEquals("Should commit to HDFS", getDFS(), dfs);
+    assertEquals(getDFS(), dfs, "Should commit to HDFS");
     assertIsFile(dfs, committedPath);
     FileStatus[] stats = dfs.listStatus(committedPath);
-    assertEquals("Should produce one commit file", 1, stats.length);
-    assertEquals("Should name the commits file with the task ID",
-        "task_job_0001_r_000002", stats[0].getPath().getName());
+    assertEquals(1, stats.length, "Should produce one commit file");
+    assertEquals("task_job_0001_r_000002", stats[0].getPath().getName(),
+        "Should name the commits file with the task ID");
 
     List<SinglePendingCommit> pending =
         PersistentCommitData.load(dfs, stats[0], PendingSet.serializer()).getCommits();
-    assertEquals("Should have correct number of pending commits",
-        files.size(), pending.size());
+    assertEquals(files.size(), pending.size(),
+        "Should have correct number of pending commits");
 
     Set<String> keys = Sets.newHashSet();
     for (SinglePendingCommit commit : pending) {
-      assertEquals("Should write to the correct bucket: " + commit,
-          BUCKET, commit.getBucket());
+      assertEquals(BUCKET, commit.getBucket(),
+          "Should write to the correct bucket: " + commit);
       assertValidUpload(results.getTagsByUpload(), commit);
       keys.add(commit.getDestinationKey());
     }
 
-    assertEquals("Should write to the correct key",
-        files, keys);
+    assertEquals(files, keys, "Should write to the correct key");
   }
 
-  @Test
-  public void testTaskInitializeFailure() throws Exception {
+  @ParameterizedTest(name = "threads-{0}-unique-{1}")
+  @MethodSource("params")
+  public void testTaskInitializeFailure(int pNumThreads,
+      boolean pNniqueFilenames) throws Exception {
+    initTestStagingCommitter(pNumThreads, pNniqueFilenames);
     committer.setupTask(tac);
 
     errors.failOnInit(1);
@@ -487,18 +516,20 @@ public class TestStagingCommitter extends StagingTestBase.MiniDFSTest {
         "Should fail during init",
         () -> committer.commitTask(tac));
 
-    assertEquals("Should have initialized one file upload",
-        1, results.getUploads().size());
-    assertEquals("Should abort the upload",
-        new HashSet<>(results.getUploads()),
-        getAbortedIds(results.getAborts()));
+    assertEquals(1, results.getUploads().size(),
+        "Should have initialized one file upload");
+    assertEquals(new HashSet<>(results.getUploads()),
+        getAbortedIds(results.getAborts()), "Should abort the upload");
     assertPathDoesNotExist(fs,
         "Should remove the attempt path",
         attemptPath);
   }
 
-  @Test
-  public void testTaskSingleFileUploadFailure() throws Exception {
+  @ParameterizedTest(name = "threads-{0}-unique-{1}")
+  @MethodSource("params")
+  public void testTaskSingleFileUploadFailure(int pNumThreads,
+      boolean pNniqueFilenames) throws Exception {
+    initTestStagingCommitter(pNumThreads, pNniqueFilenames);
     describe("Set up a single file upload to fail on upload 2");
     committer.setupTask(tac);
 
@@ -518,17 +549,19 @@ public class TestStagingCommitter extends StagingTestBase.MiniDFSTest {
           return committer.toString();
         });
 
-    assertEquals("Should have attempted one file upload",
-        1, results.getUploads().size());
-    assertEquals("Should abort the upload",
-        results.getUploads().get(0),
-        results.getAborts().get(0).uploadId());
+    assertEquals(1, results.getUploads().size(),
+        "Should have attempted one file upload");
+    assertEquals(results.getUploads().get(0),
+        results.getAborts().get(0).uploadId(), "Should abort the upload");
     assertPathDoesNotExist(fs, "Should remove the attempt path",
         attemptPath);
   }
 
-  @Test
-  public void testTaskMultiFileUploadFailure() throws Exception {
+  @ParameterizedTest(name = "threads-{0}-unique-{1}")
+  @MethodSource("params")
+  public void testTaskMultiFileUploadFailure(int pNumThreads,
+      boolean pNniqueFilenames) throws Exception {
+    initTestStagingCommitter(pNumThreads, pNniqueFilenames);
     committer.setupTask(tac);
 
     errors.failOnUpload(5);
@@ -549,17 +582,19 @@ public class TestStagingCommitter extends StagingTestBase.MiniDFSTest {
           return committer.toString();
         });
 
-    assertEquals("Should have attempted two file uploads",
-        2, results.getUploads().size());
-    assertEquals("Should abort the upload",
-        new HashSet<>(results.getUploads()),
-        getAbortedIds(results.getAborts()));
+    assertEquals(2, results.getUploads().size(),
+        "Should have attempted two file uploads");
+    assertEquals(new HashSet<>(results.getUploads()),
+        getAbortedIds(results.getAborts()), "Should abort the upload");
     assertPathDoesNotExist(fs, "Should remove the attempt path",
         attemptPath);
   }
 
-  @Test
-  public void testTaskUploadAndAbortFailure() throws Exception {
+  @ParameterizedTest(name = "threads-{0}-unique-{1}")
+  @MethodSource("params")
+  public void testTaskUploadAndAbortFailure(int pNumThreads,
+      boolean pNniqueFilenames) throws Exception {
+    initTestStagingCommitter(pNumThreads, pNniqueFilenames);
     committer.setupTask(tac);
 
     errors.failOnUpload(5);
@@ -581,16 +616,18 @@ public class TestStagingCommitter extends StagingTestBase.MiniDFSTest {
             return committer.toString();
         });
 
-    assertEquals("Should have attempted two file uploads",
-        2, results.getUploads().size());
-    assertEquals("Should not have succeeded with any aborts",
-        new HashSet<>(),
-        getAbortedIds(results.getAborts()));
+    assertEquals(2, results.getUploads().size(),
+        "Should have attempted two file uploads");
+    assertEquals(new HashSet<>(), getAbortedIds(results.getAborts()),
+        "Should not have succeeded with any aborts");
     assertPathDoesNotExist(fs, "Should remove the attempt path", attemptPath);
   }
 
-  @Test
-  public void testSingleTaskAbort() throws Exception {
+  @ParameterizedTest(name = "threads-{0}-unique-{1}")
+  @MethodSource("params")
+  public void testSingleTaskAbort(int pNumThreads,
+      boolean pNniqueFilenames) throws Exception {
+    initTestStagingCommitter(pNumThreads, pNniqueFilenames);
     committer.setupTask(tac);
 
     Path attemptPath = committer.getTaskAttemptPath(tac);
@@ -601,17 +638,19 @@ public class TestStagingCommitter extends StagingTestBase.MiniDFSTest {
 
     committer.abortTask(tac);
 
-    assertEquals("Should not upload anything",
-        0, results.getUploads().size());
-    assertEquals("Should not upload anything",
-        0, results.getParts().size());
+    assertEquals(0, results.getUploads().size(),
+        "Should not upload anything");
+    assertEquals(0, results.getParts().size(), "Should not upload anything");
     assertPathDoesNotExist(fs, "Should remove all attempt data", outPath);
     assertPathDoesNotExist(fs, "Should remove the attempt path", attemptPath);
 
   }
 
-  @Test
-  public void testJobCommit() throws Exception {
+  @ParameterizedTest(name = "threads-{0}-unique-{1}")
+  @MethodSource("params")
+  public void testJobCommit(int pNumThreads,
+      boolean pNniqueFilenames) throws Exception {
+    initTestStagingCommitter(pNumThreads, pNniqueFilenames);
     Path jobAttemptPath = jobCommitter.getJobAttemptPath(job);
     FileSystem fs = jobAttemptPath.getFileSystem(conf);
 
@@ -621,21 +660,24 @@ public class TestStagingCommitter extends StagingTestBase.MiniDFSTest {
     assertPathExists(fs, "No job attempt path", jobAttemptPath);
 
     jobCommitter.commitJob(job);
-    assertEquals("Should have aborted no uploads",
-        0, results.getAborts().size());
+    assertEquals(0, results.getAborts().size(),
+        "Should have aborted no uploads");
 
-    assertEquals("Should have deleted no uploads",
-        0, results.getDeletes().size());
+    assertEquals(0, results.getDeletes().size(),
+        "Should have deleted no uploads");
 
-    assertEquals("Should have committed all uploads",
-        uploads, getCommittedIds(results.getCommits()));
+    assertEquals(uploads, getCommittedIds(results.getCommits()),
+        "Should have committed all uploads");
 
     assertPathDoesNotExist(fs, "jobAttemptPath not deleted", jobAttemptPath);
 
   }
 
-  @Test
-  public void testJobCommitFailure() throws Exception {
+  @ParameterizedTest(name = "threads-{0}-unique-{1}")
+  @MethodSource("params")
+  public void testJobCommitFailure(int pNumThreads,
+      boolean pNniqueFilenames) throws Exception {
+    initTestStagingCommitter(pNumThreads, pNniqueFilenames);
     Path jobAttemptPath = jobCommitter.getJobAttemptPath(job);
     FileSystem fs = jobAttemptPath.getFileSystem(conf);
 
@@ -666,24 +708,27 @@ public class TestStagingCommitter extends StagingTestBase.MiniDFSTest {
             "s3a://" + delete.bucket() + "/" + delete.key())
         .collect(Collectors.toSet());
 
-    Assertions.assertThat(commits)
+    assertThat(commits)
         .describedAs("Committed objects compared to deleted paths %s", results)
         .containsExactlyInAnyOrderElementsOf(deletes);
 
-    Assertions.assertThat(results.getAborts())
+    assertThat(results.getAborts())
         .describedAs("aborted count in %s", results)
         .hasSize(7);
     Set<String> uploadIds = getCommittedIds(results.getCommits());
     uploadIds.addAll(getAbortedIds(results.getAborts()));
-    Assertions.assertThat(uploadIds)
+    assertThat(uploadIds)
         .describedAs("Combined commit/delete and aborted upload IDs")
         .containsExactlyInAnyOrderElementsOf(uploads);
 
     assertPathDoesNotExist(fs, "jobAttemptPath not deleted", jobAttemptPath);
   }
 
-  @Test
-  public void testJobAbort() throws Exception {
+  @ParameterizedTest(name = "threads-{0}-unique-{1}")
+  @MethodSource("params")
+  public void testJobAbort(int pNumThreads,
+      boolean pNniqueFilenames) throws Exception {
+    initTestStagingCommitter(pNumThreads, pNniqueFilenames);
     Path jobAttemptPath = jobCommitter.getJobAttemptPath(job);
     FileSystem fs = jobAttemptPath.getFileSystem(conf);
 
@@ -691,14 +736,14 @@ public class TestStagingCommitter extends StagingTestBase.MiniDFSTest {
 
     assertPathExists(fs, "No job attempt path", jobAttemptPath);
     jobCommitter.abortJob(job, JobStatus.State.KILLED);
-    assertEquals("Should have committed no uploads: " + jobCommitter,
-        0, results.getCommits().size());
+    assertEquals(0, results.getCommits().size(),
+        "Should have committed no uploads: " + jobCommitter);
 
-    assertEquals("Should have deleted no uploads: " + jobCommitter,
-        0, results.getDeletes().size());
+    assertEquals(0, results.getDeletes().size(),
+        "Should have deleted no uploads: " + jobCommitter);
 
-    assertEquals("Should have aborted all uploads: " + jobCommitter,
-        uploads, getAbortedIds(results.getAborts()));
+    assertEquals(uploads, getAbortedIds(results.getAborts()),
+        "Should have aborted all uploads: " + jobCommitter);
 
     assertPathDoesNotExist(fs, "jobAttemptPath not deleted", jobAttemptPath);
   }
@@ -771,16 +816,16 @@ public class TestStagingCommitter extends StagingTestBase.MiniDFSTest {
 
   private static void assertValidUpload(Map<String, List<String>> parts,
                                         SinglePendingCommit commit) {
-    assertTrue("Should commit a valid uploadId",
-        parts.containsKey(commit.getUploadId()));
+    assertTrue(parts.containsKey(commit.getUploadId()),
+        "Should commit a valid uploadId");
 
     List<String> tags = parts.get(commit.getUploadId());
-    assertEquals("Should commit the correct number of file parts",
-        tags.size(), commit.getPartCount());
+    assertEquals(tags.size(), commit.getPartCount(),
+        "Should commit the correct number of file parts");
 
     for (int i = 0; i < tags.size(); i += 1) {
-      assertEquals("Should commit the correct part tags",
-          tags.get(i), commit.getEtags().get(i).getEtag());
+      assertEquals(tags.get(i), commit.getEtags().get(i).getEtag(),
+          "Should commit the correct part tags");
     }
   }
 

+ 5 - 4
hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/TestStagingDirectoryOutputCommitter.java

@@ -21,7 +21,7 @@ package org.apache.hadoop.fs.s3a.commit.staging;
 import java.util.Arrays;
 import java.util.stream.Collectors;
 
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 import org.mockito.Mockito;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -39,7 +39,8 @@ import org.apache.hadoop.fs.statistics.IOStatisticsContext;
 import static org.apache.hadoop.fs.s3a.commit.CommitConstants.*;
 import static org.apache.hadoop.fs.s3a.commit.staging.StagingTestBase.*;
 import static org.apache.hadoop.test.LambdaTestUtils.intercept;
-import static org.mockito.Mockito.*;
+import static org.mockito.Mockito.verifyNoMoreInteractions;
+import static org.mockito.Mockito.reset;
 
 /** Mocking test of directory committer. */
 public class TestStagingDirectoryOutputCommitter
@@ -199,7 +200,7 @@ public class TestStagingDirectoryOutputCommitter
     LOG.info("source of conflict mode {}", sourceStr);
     String baseConfVal = baseConf
         .getTrimmed(FS_S3A_COMMITTER_STAGING_CONFLICT_MODE);
-    assertEquals("conflict mode in core config from " + sourceStr,
-        CONFLICT_MODE_APPEND, baseConfVal);
+    assertEquals(CONFLICT_MODE_APPEND, baseConfVal,
+        "conflict mode in core config from " + sourceStr);
   }
 }

+ 9 - 11
hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/TestStagingPartitionedFileListing.java

@@ -29,8 +29,8 @@ import java.util.UUID;
 import java.util.stream.Collectors;
 
 import org.apache.hadoop.util.Lists;
-import org.junit.After;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Test;
 
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
@@ -39,8 +39,7 @@ import org.apache.hadoop.fs.Path;
 import static org.apache.hadoop.fs.contract.ContractTestUtils.*;
 import static org.apache.hadoop.fs.s3a.S3AUtils.*;
 import static org.apache.hadoop.fs.s3a.commit.staging.StagingTestBase.*;
-import static org.hamcrest.CoreMatchers.allOf;
-import static org.hamcrest.CoreMatchers.hasItem;
+import static org.assertj.core.api.Assertions.assertThat;
 
 /**
  * Test partitioned staging committer's logic for putting data in the right
@@ -63,7 +62,7 @@ public class TestStagingPartitionedFileListing
   private FileSystem attemptFS;
   private Path attemptPath;
 
-  @After
+  @AfterEach
   public void cleanupAttempt() {
     cleanup("teardown", attemptFS, attemptPath);
   }
@@ -96,7 +95,7 @@ public class TestStagingPartitionedFileListing
           .collect(Collectors.toList());
       Collections.sort(expectedFiles);
       Collections.sort(actualFiles);
-      assertEquals("File sets should match", expectedFiles, actualFiles);
+      assertEquals(expectedFiles, actualFiles, "File sets should match");
     } finally {
       deleteQuietly(attemptFS, attemptPath, true);
     }
@@ -136,7 +135,7 @@ public class TestStagingPartitionedFileListing
           .collect(Collectors.toList());
       Collections.sort(expectedFiles);
       Collections.sort(actualFiles);
-      assertEquals("File sets should match", expectedFiles, actualFiles);
+      assertEquals(expectedFiles, actualFiles, "File sets should match");
     } finally {
       deleteQuietly(attemptFS, attemptPath, true);
     }
@@ -158,14 +157,13 @@ public class TestStagingPartitionedFileListing
     String oct2017 = "year=2017/month=10";
     Path octLog = new Path(attemptPath, oct2017 + "/log-2017-10-04.txt");
     touch(attemptFS, octLog);
-    assertThat(listPartitions(attemptFS, attemptPath), hasItem(oct2017));
+    assertThat(listPartitions(attemptFS, attemptPath)).contains(oct2017);
 
     // add a root entry and it ends up under the table_root entry
     Path rootFile = new Path(attemptPath, "root.txt");
     touch(attemptFS, rootFile);
-    assertThat(listPartitions(attemptFS, attemptPath),
-        allOf(hasItem(oct2017),
-            hasItem(StagingCommitterConstants.TABLE_ROOT)));
+    assertThat(listPartitions(attemptFS, attemptPath)).
+        containsAnyOf(oct2017, StagingCommitterConstants.TABLE_ROOT);
   }
 
   /**

+ 7 - 4
hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/TestStagingPartitionedJobCommit.java

@@ -24,7 +24,8 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.UUID;
 
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.LocalFileSystem;
@@ -39,7 +40,8 @@ import org.apache.hadoop.fs.s3a.commit.impl.CommitContext;
 import org.apache.hadoop.mapreduce.TaskAttemptContext;
 
 import static org.apache.hadoop.test.LambdaTestUtils.intercept;
-import static org.mockito.Mockito.*;
+import static org.mockito.Mockito.when;
+import static org.mockito.Mockito.reset;
 import static org.apache.hadoop.fs.s3a.commit.staging.StagingTestBase.*;
 import static org.apache.hadoop.fs.s3a.commit.CommitConstants.*;
 
@@ -47,6 +49,7 @@ import static org.apache.hadoop.fs.s3a.commit.CommitConstants.*;
 public class TestStagingPartitionedJobCommit
     extends StagingTestBase.JobCommitterTest<PartitionedStagingCommitter> {
 
+  @BeforeEach
   @Override
   public void setupJob() throws Exception {
     super.setupJob();
@@ -255,8 +258,8 @@ public class TestStagingPartitionedJobCommit
 
     verifyReplaceCommitActions(mockS3);
     verifyDeleted(mockS3, "dateint=20161116/hour=14");
-    assertTrue("Should have aborted",
-        ((PartitionedStagingCommitterForTesting) committer).aborted);
+    assertTrue(((PartitionedStagingCommitterForTesting) committer).aborted,
+        "Should have aborted");
     verifyCompletion(mockS3);
   }
 

+ 7 - 7
hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/staging/TestStagingPartitionedTaskCommit.java

@@ -27,9 +27,8 @@ import java.util.UUID;
 import software.amazon.awssdk.services.s3.model.CreateMultipartUploadRequest;
 import org.apache.hadoop.util.Lists;
 import org.apache.hadoop.util.Sets;
-import org.assertj.core.api.Assertions;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -39,7 +38,8 @@ import org.apache.hadoop.mapreduce.JobContext;
 
 import static org.apache.hadoop.fs.s3a.commit.CommitConstants.*;
 import static org.apache.hadoop.test.LambdaTestUtils.intercept;
-import static org.mockito.Mockito.*;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.mockito.Mockito.reset;
 import static org.apache.hadoop.fs.s3a.commit.staging.StagingTestBase.*;
 
 /** Mocking test of the partitioned committer. */
@@ -60,7 +60,7 @@ public class TestStagingPartitionedTaskCommit
   // The set of files used by this test
   private static List<String> relativeFiles = Lists.newArrayList();
 
-  @BeforeClass
+  @BeforeAll
   public static void createRelativeFileList() {
     for (String dateint : Arrays.asList("20161115", "20161116")) {
       for (String hour : Arrays.asList("14", "15")) {
@@ -152,12 +152,12 @@ public class TestStagingPartitionedTaskCommit
       assertEquals(BUCKET, request.bucket());
       files.add(request.key());
     }
-    Assertions.assertThat(files)
+    assertThat(files)
         .describedAs("Should have the right number of uploads")
         .hasSize(relativeFiles.size());
 
     Set<String> expected = buildExpectedList(committer);
-    Assertions.assertThat(files)
+    assertThat(files)
         .describedAs("Should have correct paths")
         .containsExactlyInAnyOrderElementsOf(expected);
   }

+ 2 - 2
hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/fileContext/ITestS3AFileContextUtil.java

@@ -17,14 +17,14 @@ import java.io.IOException;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileContextUtilBase;
 import org.apache.hadoop.fs.s3a.S3ATestUtils;
-import org.junit.Before;
+import org.junit.jupiter.api.BeforeEach;
 
 /**
  * S3A implementation of FileContextUtilBase.
  */
 public class ITestS3AFileContextUtil extends FileContextUtilBase {
 
-  @Before
+  @BeforeEach
   public void setUp() throws IOException, Exception {
     Configuration conf = new Configuration();
     fc = S3ATestUtils.createTestFileContext(conf);

+ 6 - 6
hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/TestCreateFileBuilder.java

@@ -22,8 +22,7 @@ import java.io.IOException;
 import java.io.OutputStream;
 import java.util.Map;
 
-import org.assertj.core.api.Assertions;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CreateFlag;
@@ -38,6 +37,7 @@ import static org.apache.hadoop.fs.s3a.Constants.FS_S3A_CREATE_HEADER;
 import static org.apache.hadoop.fs.s3a.Constants.FS_S3A_CREATE_PERFORMANCE;
 import static org.apache.hadoop.test.LambdaTestUtils.intercept;
 import static org.apache.hadoop.fs.s3a.impl.AWSHeaders.IF_NONE_MATCH;
+import static org.assertj.core.api.Assertions.assertThat;
 
 /**
  * Unit test of {@link CreateFileBuilder}.
@@ -55,7 +55,7 @@ public class TestCreateFileBuilder extends HadoopTestBase {
 
   private BuilderOutputStream unwrap(FSDataOutputStream out) {
     OutputStream s = out.getWrappedStream();
-    Assertions.assertThat(s)
+    assertThat(s)
         .isInstanceOf(BuilderOutputStream.class);
     return (BuilderOutputStream) s;
   }
@@ -67,7 +67,7 @@ public class TestCreateFileBuilder extends HadoopTestBase {
 
   @Test
   public void testSimpleBuild() throws Throwable {
-    Assertions.assertThat(build(mkBuilder().create()))
+    assertThat(build(mkBuilder().create()))
         .matches(p -> !p.isOverwrite())
         .matches(p -> !p.isPerformance());
   }
@@ -82,7 +82,7 @@ public class TestCreateFileBuilder extends HadoopTestBase {
   public void testPerformanceSupport() throws Throwable {
     CreateFileBuilder builder = mkBuilder().create();
     builder.must(FS_S3A_CREATE_PERFORMANCE, true);
-    Assertions.assertThat(build(builder))
+    assertThat(build(builder))
         .matches(p -> p.isPerformance());
   }
 
@@ -93,7 +93,7 @@ public class TestCreateFileBuilder extends HadoopTestBase {
         .must(FS_S3A_CREATE_HEADER + "." + IF_NONE_MATCH, "*")
         .opt(FS_S3A_CREATE_HEADER + ".owner", "engineering");
     final Map<String, String> headers = build(builder).getHeaders();
-    Assertions.assertThat(headers)
+    assertThat(headers)
         .containsEntry("retention", "permanent")
         .containsEntry("owner", "engineering")
         .containsEntry(IF_NONE_MATCH, "*");

+ 15 - 15
hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/TestHeaderProcessing.java

@@ -29,10 +29,9 @@ import java.util.Map;
 import software.amazon.awssdk.services.s3.model.CopyObjectRequest;
 import software.amazon.awssdk.services.s3.model.HeadBucketResponse;
 import software.amazon.awssdk.services.s3.model.HeadObjectResponse;
-import org.assertj.core.api.Assertions;
 import org.assertj.core.util.Lists;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.s3a.MockS3AFileSystem;
@@ -51,6 +50,7 @@ import static org.apache.hadoop.fs.s3a.impl.HeaderProcessing.decodeBytes;
 import static org.apache.hadoop.fs.s3a.impl.HeaderProcessing.encodeBytes;
 import static org.apache.hadoop.fs.s3a.impl.HeaderProcessing.extractXAttrLongValue;
 import static org.apache.hadoop.test.LambdaTestUtils.intercept;
+import static org.assertj.core.api.Assertions.assertThat;
 
 /**
  * Unit tests of header processing logic in {@link HeaderProcessing}.
@@ -92,7 +92,7 @@ public class TestHeaderProcessing extends HadoopTestBase {
       XA_LAST_MODIFIED
   };
 
-  @Before
+  @BeforeEach
   public void setup() throws Exception {
     CONTEXT_ACCESSORS.len = FILE_LENGTH;
     CONTEXT_ACCESSORS.userHeaders.put(
@@ -105,7 +105,7 @@ public class TestHeaderProcessing extends HadoopTestBase {
 
   @Test
   public void testByteRoundTrip() throws Throwable {
-    Assertions.assertThat(decodeBytes(encodeBytes(VALUE)))
+    assertThat(decodeBytes(encodeBytes(VALUE)))
         .describedAs("encoding of " + VALUE)
         .isEqualTo(VALUE);
   }
@@ -125,9 +125,9 @@ public class TestHeaderProcessing extends HadoopTestBase {
    */
   @Test
   public void testGetDateXAttr() throws Throwable {
-    Assertions.assertThat(
+    assertThat(
         decodeBytes(headerProcessing.getXAttr(MAGIC_PATH,
-            XA_LAST_MODIFIED)))
+        XA_LAST_MODIFIED)))
         .describedAs("XAttribute " + XA_LAST_MODIFIED)
         .isEqualTo(CONTEXT_ACCESSORS.date.toString());
   }
@@ -148,7 +148,7 @@ public class TestHeaderProcessing extends HadoopTestBase {
   @Test
   public void testGetAllXAttrs() throws Throwable {
     Map<String, byte[]> xAttrs = headerProcessing.getXAttrs(MAGIC_PATH);
-    Assertions.assertThat(xAttrs.keySet())
+    assertThat(xAttrs.keySet())
         .describedAs("Attribute keys")
         .contains(RETRIEVED_XATTRS);
   }
@@ -160,7 +160,7 @@ public class TestHeaderProcessing extends HadoopTestBase {
   @Test
   public void testListXAttrKeys() throws Throwable {
     List<String> xAttrs = headerProcessing.listXAttrs(MAGIC_PATH);
-    Assertions.assertThat(xAttrs)
+    assertThat(xAttrs)
         .describedAs("Attribute keys")
         .contains(RETRIEVED_XATTRS);
   }
@@ -172,7 +172,7 @@ public class TestHeaderProcessing extends HadoopTestBase {
   public void testGetFilteredXAttrs() throws Throwable {
     Map<String, byte[]> xAttrs = headerProcessing.getXAttrs(MAGIC_PATH,
         Lists.list(XA_MAGIC_MARKER, XA_CONTENT_LENGTH, "unknown"));
-    Assertions.assertThat(xAttrs.keySet())
+    assertThat(xAttrs.keySet())
         .describedAs("Attribute keys")
         .containsExactlyInAnyOrder(XA_MAGIC_MARKER, XA_CONTENT_LENGTH);
     // and the values are good
@@ -193,7 +193,7 @@ public class TestHeaderProcessing extends HadoopTestBase {
   public void testFilterEmptyXAttrs() throws Throwable {
     Map<String, byte[]> xAttrs = headerProcessing.getXAttrs(MAGIC_PATH,
         Lists.list());
-    Assertions.assertThat(xAttrs.keySet())
+    assertThat(xAttrs.keySet())
         .describedAs("Attribute keys")
         .isEmpty();
   }
@@ -211,17 +211,17 @@ public class TestHeaderProcessing extends HadoopTestBase {
     final HeadObjectResponse source = CONTEXT_ACCESSORS
         .getObjectMetadata(MAGIC_KEY);
     final Map<String, String> sourceUserMD = source.metadata();
-    Assertions.assertThat(sourceUserMD.get(owner))
+    assertThat(sourceUserMD.get(owner))
         .describedAs("owner header in copied MD")
         .isEqualTo(root);
 
     Map<String, String> destUserMetadata = new HashMap<>();
     headerProcessing.cloneObjectMetadata(source, destUserMetadata, CopyObjectRequest.builder());
 
-    Assertions.assertThat(destUserMetadata.get(X_HEADER_MAGIC_MARKER))
+    assertThat(destUserMetadata.get(X_HEADER_MAGIC_MARKER))
         .describedAs("Magic marker header in copied MD")
         .isNull();
-    Assertions.assertThat(destUserMetadata.get(owner))
+    assertThat(destUserMetadata.get(owner))
         .describedAs("owner header in copied MD")
         .isEqualTo(root);
   }
@@ -236,7 +236,7 @@ public class TestHeaderProcessing extends HadoopTestBase {
       final String key,
       final byte[] bytes,
       final long expected) {
-    Assertions.assertThat(extractXAttrLongValue(bytes))
+    assertThat(extractXAttrLongValue(bytes))
         .describedAs("XAttribute " + key)
         .isNotEmpty()
         .hasValue(expected);

+ 7 - 7
hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/TestOpenFileSupport.java

@@ -25,9 +25,8 @@ import java.util.Collections;
 import java.util.HashSet;
 import java.util.Set;
 
-import org.assertj.core.api.Assertions;
 import org.assertj.core.api.ObjectAssert;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
@@ -64,6 +63,7 @@ import static org.apache.hadoop.fs.s3a.S3AInputPolicy.Normal;
 import static org.apache.hadoop.fs.s3a.S3AInputPolicy.Random;
 import static org.apache.hadoop.fs.s3a.S3AInputPolicy.Sequential;
 import static org.apache.hadoop.test.LambdaTestUtils.intercept;
+import static org.assertj.core.api.Assertions.assertThat;
 
 /**
  * Unit tests for {@link OpenFileSupport} and the associated
@@ -120,7 +120,7 @@ public class TestOpenFileSupport extends HadoopTestBase {
    */
   private ObjectAssert<OpenFileSupport.OpenFileInformation> assertFileInfo(
       final OpenFileSupport.OpenFileInformation fi) {
-    return Assertions.assertThat(fi)
+    return assertThat(fi)
         .describedAs("File Information %s", fi);
   }
 
@@ -209,21 +209,21 @@ public class TestOpenFileSupport extends HadoopTestBase {
     Configuration conf = conf(FS_OPTION_OPENFILE_READ_POLICY, plist);
     Collection<String> options = conf.getTrimmedStringCollection(
         FS_OPTION_OPENFILE_READ_POLICY);
-    Assertions.assertThat(S3AInputPolicy.getFirstSupportedPolicy(options, null))
+    assertThat(S3AInputPolicy.getFirstSupportedPolicy(options, null))
         .describedAs("Policy from " + plist)
         .isEqualTo(Random);
   }
 
   @Test
   public void testAdaptiveSeekPolicyRecognized() throws Throwable {
-    Assertions.assertThat(S3AInputPolicy.getPolicy("adaptive", null))
+    assertThat(S3AInputPolicy.getPolicy("adaptive", null))
         .describedAs("adaptive")
         .isEqualTo(Normal);
   }
 
   @Test
   public void testUnknownSeekPolicyFallback() throws Throwable {
-    Assertions.assertThat(S3AInputPolicy.getPolicy("unknown", null))
+    assertThat(S3AInputPolicy.getPolicy("unknown", null))
         .describedAs("unknown policy")
         .isNull();
   }
@@ -251,7 +251,7 @@ public class TestOpenFileSupport extends HadoopTestBase {
     };
     for (Object[] mapping : policyMapping) {
       String name = (String) mapping[0];
-      Assertions.assertThat(S3AInputPolicy.getPolicy(name, null))
+      assertThat(S3AInputPolicy.getPolicy(name, null))
           .describedAs("Policy %s", name)
           .isEqualTo(mapping[1]);
     }

+ 16 - 16
hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/TestS3AMultipartUploaderSupport.java

@@ -22,8 +22,7 @@ import java.io.EOFException;
 import java.io.IOException;
 import java.util.Map;
 
-import org.assertj.core.api.Assertions;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 import software.amazon.awssdk.services.s3.model.UploadPartResponse;
 
 import org.apache.hadoop.test.HadoopTestBase;
@@ -33,6 +32,7 @@ import static org.apache.hadoop.fs.s3a.impl.S3AMultipartUploader.buildPartHandle
 import static org.apache.hadoop.fs.s3a.impl.S3AMultipartUploader.parsePartHandlePayload;
 import static org.apache.hadoop.fs.s3a.impl.S3AMultipartUploader.extractChecksum;
 import static org.apache.hadoop.test.LambdaTestUtils.intercept;
+import static org.assertj.core.api.Assertions.assertThat;
 
 /**
  * Unit test of multipart upload support methods and classes.
@@ -51,9 +51,9 @@ public class TestS3AMultipartUploaderSupport extends HadoopTestBase {
     assertEquals(999, result.getPartNumber());
     assertEquals("tag", result.getEtag());
     assertEquals(1, result.getLen());
-    Assertions.assertThat(result.getChecksumAlgorithm())
+    assertThat(result.getChecksumAlgorithm())
         .describedAs("Checksum algorithm must not be present").isNull();
-    Assertions.assertThat(result.getChecksum())
+    assertThat(result.getChecksum())
         .describedAs("Checksum must not be generated").isNull();
   }
 
@@ -65,9 +65,9 @@ public class TestS3AMultipartUploaderSupport extends HadoopTestBase {
     assertEquals(1, result.getPartNumber());
     assertEquals("11223344", result.getEtag());
     assertEquals(len, result.getLen());
-    Assertions.assertThat(result.getChecksumAlgorithm())
+    assertThat(result.getChecksumAlgorithm())
         .describedAs("Checksum algorithm must not be present").isNull();
-    Assertions.assertThat(result.getChecksum())
+    assertThat(result.getChecksum())
         .describedAs("Checksum must not be generated").isNull();
   }
 
@@ -80,10 +80,10 @@ public class TestS3AMultipartUploaderSupport extends HadoopTestBase {
     assertEquals(999, result.getPartNumber());
     assertEquals("tag", result.getEtag());
     assertEquals(1, result.getLen());
-    Assertions.assertThat(result.getChecksumAlgorithm())
+    assertThat(result.getChecksumAlgorithm())
         .describedAs("Expect the checksum algorithm to be SHA256")
         .isEqualTo("SHA256");
-    Assertions.assertThat(result.getChecksum())
+    assertThat(result.getChecksum())
         .describedAs("Checksum must be set")
         .isEqualTo("checksum");
   }
@@ -135,10 +135,10 @@ public class TestS3AMultipartUploaderSupport extends HadoopTestBase {
         .checksumCRC32("checksum")
         .build();
     final Map.Entry<String, String> checksum = extractChecksum(uploadPartResponse);
-    Assertions.assertThat(checksum.getKey())
+    assertThat(checksum.getKey())
         .describedAs("Expect the checksum algorithm to be CRC32")
         .isEqualTo("CRC32");
-    Assertions.assertThat(checksum.getValue())
+    assertThat(checksum.getValue())
         .describedAs("Checksum must be set")
         .isEqualTo("checksum");
   }
@@ -149,10 +149,10 @@ public class TestS3AMultipartUploaderSupport extends HadoopTestBase {
         .checksumCRC32C("checksum")
         .build();
     final Map.Entry<String, String> checksum = extractChecksum(uploadPartResponse);
-    Assertions.assertThat(checksum.getKey())
+    assertThat(checksum.getKey())
         .describedAs("Expect the checksum algorithm to be CRC32C")
         .isEqualTo("CRC32C");
-    Assertions.assertThat(checksum.getValue())
+    assertThat(checksum.getValue())
         .describedAs("Checksum must be set")
         .isEqualTo("checksum");
   }
@@ -163,10 +163,10 @@ public class TestS3AMultipartUploaderSupport extends HadoopTestBase {
         .checksumSHA1("checksum")
         .build();
     final Map.Entry<String, String> checksum = extractChecksum(uploadPartResponse);
-    Assertions.assertThat(checksum.getKey())
+    assertThat(checksum.getKey())
         .describedAs("Expect the checksum algorithm to be SHA1")
         .isEqualTo("SHA1");
-    Assertions.assertThat(checksum.getValue())
+    assertThat(checksum.getValue())
         .describedAs("Checksum must be set")
         .isEqualTo("checksum");
   }
@@ -177,10 +177,10 @@ public class TestS3AMultipartUploaderSupport extends HadoopTestBase {
         .checksumSHA256("checksum")
         .build();
     final Map.Entry<String, String> checksum = extractChecksum(uploadPartResponse);
-    Assertions.assertThat(checksum.getKey())
+    assertThat(checksum.getKey())
         .describedAs("Expect the checksum algorithm to be SHA256")
         .isEqualTo("SHA256");
-    Assertions.assertThat(checksum.getValue())
+    assertThat(checksum.getValue())
         .describedAs("Checksum must be set")
         .isEqualTo("checksum");
   }

+ 6 - 6
hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/impl/TestSDKStreamDrainer.java

@@ -22,8 +22,7 @@ import java.io.IOException;
 import java.io.InputStream;
 
 import software.amazon.awssdk.http.Abortable;
-import org.assertj.core.api.Assertions;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 import org.apache.hadoop.test.HadoopTestBase;
 
@@ -31,6 +30,7 @@ import org.apache.hadoop.test.HadoopTestBase;
 import static org.apache.hadoop.fs.s3a.impl.InternalConstants.DRAIN_BUFFER_SIZE;
 import static org.apache.hadoop.fs.s3a.statistics.impl.EmptyS3AStatisticsContext.EMPTY_INPUT_STREAM_STATISTICS;
 import static org.apache.hadoop.test.LambdaTestUtils.intercept;
+import static org.assertj.core.api.Assertions.assertThat;
 
 /**
  * Unit tests for stream draining.
@@ -165,7 +165,7 @@ public class TestSDKStreamDrainer extends HadoopTestBase {
     while (stream.read() > 0) {
       count++;
     }
-    Assertions.assertThat(count)
+    assertThat(count)
         .describedAs("bytes read from %s", stream)
         .isEqualTo(BYTES);
   }
@@ -198,7 +198,7 @@ public class TestSDKStreamDrainer extends HadoopTestBase {
    * @return the drainer.
    */
   private SDKStreamDrainer assertAborted(SDKStreamDrainer drainer) {
-    Assertions.assertThat(drainer)
+    assertThat(drainer)
         .matches(SDKStreamDrainer::aborted, "aborted");
     return drainer;
   }
@@ -209,7 +209,7 @@ public class TestSDKStreamDrainer extends HadoopTestBase {
    * @return the drainer.
    */
   private SDKStreamDrainer assertNotAborted(SDKStreamDrainer drainer) {
-    Assertions.assertThat(drainer)
+    assertThat(drainer)
         .matches(d -> !d.aborted(), "is not aborted");
     return drainer;
   }
@@ -233,7 +233,7 @@ public class TestSDKStreamDrainer extends HadoopTestBase {
    */
   private static SDKStreamDrainer assertBytesRead(final SDKStreamDrainer drainer,
       final int bytes) {
-    Assertions.assertThat(drainer)
+    assertThat(drainer)
         .describedAs("bytes read by %s", drainer)
         .extracting(SDKStreamDrainer::getDrained)
         .isEqualTo(bytes);

+ 5 - 6
hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/mapreduce/filecache/TestS3AResourceScope.java

@@ -23,7 +23,7 @@ import java.net.URI;
 import java.util.HashMap;
 import java.util.Map;
 
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.Path;
@@ -42,14 +42,14 @@ public class TestS3AResourceScope extends HadoopTestBase {
   @Test
   public void testS3AFilesArePrivate() throws Throwable {
     S3AFileStatus status = new S3AFileStatus(false, PATH, "self");
-    assertTrue("Not encrypted: " + status, status.isEncrypted());
+    assertTrue(status.isEncrypted(), "Not encrypted: " + status);
     assertNotExecutable(status);
   }
 
   @Test
   public void testS3AFilesArePrivateOtherContstructor() throws Throwable {
     S3AFileStatus status = new S3AFileStatus(0, 0, PATH, 1, "self", null, null);
-    assertTrue("Not encrypted: " + status, status.isEncrypted());
+    assertTrue(status.isEncrypted(), "Not encrypted: " + status);
     assertNotExecutable(status);
   }
 
@@ -57,8 +57,7 @@ public class TestS3AResourceScope extends HadoopTestBase {
       throws IOException {
     Map<URI, FileStatus> cache = new HashMap<>();
     cache.put(PATH.toUri(), status);
-    assertFalse("Should not have been executable " + status,
-        ClientDistributedCacheManager.ancestorsHaveExecutePermissions(
-            null, PATH, cache));
+    assertFalse(ClientDistributedCacheManager.ancestorsHaveExecutePermissions(
+        null, PATH, cache), "Should not have been executable " + status);
   }
 }

+ 7 - 6
hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/live/TestAdlFileSystemContractLive.java

@@ -19,12 +19,13 @@
 
 package org.apache.hadoop.fs.adl.live;
 
+import static org.junit.jupiter.api.Assumptions.assumeTrue;
+
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystemContractBaseTest;
 import org.apache.hadoop.fs.Path;
-import org.junit.After;
-import static org.junit.Assume.*;
-import org.junit.Before;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
 
 import java.io.IOException;
 
@@ -34,17 +35,17 @@ import java.io.IOException;
 public class TestAdlFileSystemContractLive extends FileSystemContractBaseTest {
   private FileSystem adlStore;
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     skipTestCheck();
     adlStore = AdlStorageConfiguration.createStorageConnector();
     if (AdlStorageConfiguration.isContractTestEnabled()) {
       fs = adlStore;
     }
-    assumeNotNull(fs);
+    assumeTrue(fs != null);
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     if (AdlStorageConfiguration.isContractTestEnabled()) {
       cleanup();

+ 8 - 8
hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemContractEmulator.java

@@ -18,15 +18,15 @@
 
 package org.apache.hadoop.fs.azure;
 
-import static org.junit.Assume.assumeNotNull;
+import static org.junit.jupiter.api.Assumptions.assumeTrue;
 
 import org.apache.hadoop.fs.FileSystemContractBaseTest;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.azure.integration.AzureTestUtils;
+import org.apache.hadoop.test.TestName;
 
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.rules.TestName;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.extension.RegisterExtension;
 
 /**
  * Run the {@code FileSystemContractBaseTest} tests against the emulator
@@ -36,21 +36,21 @@ public class ITestNativeAzureFileSystemContractEmulator extends
   private AzureBlobStorageTestAccount testAccount;
   private Path basePath;
 
-  @Rule
-  public TestName methodName = new TestName();
+  @RegisterExtension
+  private TestName methodName = new TestName();
 
   private void nameThread() {
     Thread.currentThread().setName("JUnit-" + methodName.getMethodName());
   }
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     nameThread();
     testAccount = AzureBlobStorageTestAccount.createForEmulator();
     if (testAccount != null) {
       fs = testAccount.getFileSystem();
     }
-    assumeNotNull(fs);
+    assumeTrue(fs != null);
     basePath = fs.makeQualified(
         AzureTestUtils.createTestPath(
             new Path("ITestNativeAzureFileSystemContractEmulator")));

+ 15 - 16
hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemContractLive.java

@@ -18,19 +18,18 @@
 
 package org.apache.hadoop.fs.azure;
 
-import static org.junit.Assume.assumeNotNull;
+import static org.junit.jupiter.api.Assumptions.assumeTrue;
 
 import org.apache.hadoop.fs.FileSystemContractBaseTest;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.azure.integration.AzureTestConstants;
 import org.apache.hadoop.fs.azure.integration.AzureTestUtils;
+import org.apache.hadoop.test.TestName;
 
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Ignore;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TestName;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Disabled;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.extension.RegisterExtension;
 
 /**
  * Run the {@link FileSystemContractBaseTest} test suite against azure storage.
@@ -40,21 +39,21 @@ public class ITestNativeAzureFileSystemContractLive extends
   private AzureBlobStorageTestAccount testAccount;
   private Path basePath;
 
-  @Rule
-  public TestName methodName = new TestName();
+  @RegisterExtension
+  private TestName methodName = new TestName();
 
   private void nameThread() {
     Thread.currentThread().setName("JUnit-" + methodName.getMethodName());
   }
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     nameThread();
     testAccount = AzureBlobStorageTestAccount.create();
     if (testAccount != null) {
       fs = testAccount.getFileSystem();
     }
-    assumeNotNull(fs);
+    assumeTrue(fs != null);
     basePath = fs.makeQualified(
         AzureTestUtils.createTestPath(
             new Path("NativeAzureFileSystemContractLive")));
@@ -81,27 +80,27 @@ public class ITestNativeAzureFileSystemContractLive extends
    * file system code needs to be modified to make them pass.
    * A separate work item has been opened for this.
    */
-  @Ignore
+  @Disabled
   @Test
   public void testMoveFileUnderParent() throws Throwable {
   }
 
-  @Ignore
+  @Disabled
   @Test
   public void testRenameFileToSelf() throws Throwable {
   }
 
-  @Ignore
+  @Disabled
   @Test
   public void testRenameChildDirForbidden() throws Exception {
   }
 
-  @Ignore
+  @Disabled
   @Test
   public void testMoveDirUnderParent() throws Throwable {
   }
 
-  @Ignore
+  @Disabled
   @Test
   public void testRenameDirToSelf() throws Throwable {
   }

+ 16 - 15
hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemContractPageBlobLive.java

@@ -18,18 +18,19 @@
 
 package org.apache.hadoop.fs.azure;
 
+import static org.junit.jupiter.api.Assumptions.assumeTrue;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystemContractBaseTest;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.azure.integration.AzureTestConstants;
 import org.apache.hadoop.fs.azure.integration.AzureTestUtils;
 
-import static org.junit.Assume.assumeNotNull;
-import org.junit.Before;
-import org.junit.Ignore;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TestName;
+import org.apache.hadoop.test.TestName;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Disabled;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.extension.RegisterExtension;
 
 /**
  * Run the {@link FileSystemContractBaseTest} test suite against azure
@@ -39,8 +40,8 @@ public class ITestNativeAzureFileSystemContractPageBlobLive extends
     FileSystemContractBaseTest {
   private AzureBlobStorageTestAccount testAccount;
   private Path basePath;
-  @Rule
-  public TestName methodName = new TestName();
+  @RegisterExtension
+  private TestName methodName = new TestName();
 
   private void nameThread() {
     Thread.currentThread().setName("JUnit-" + methodName.getMethodName());
@@ -59,10 +60,10 @@ public class ITestNativeAzureFileSystemContractPageBlobLive extends
     return AzureBlobStorageTestAccount.create(conf);
   }
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     testAccount = createTestAccount();
-    assumeNotNull(testAccount);
+    assumeTrue(testAccount != null);
     fs = testAccount.getFileSystem();
     basePath = AzureTestUtils.pathForTests(fs, "filesystemcontractpageblob");
   }
@@ -87,27 +88,27 @@ public class ITestNativeAzureFileSystemContractPageBlobLive extends
    * file system code needs to be modified to make them pass.
    * A separate work item has been opened for this.
    */
-  @Ignore
+  @Disabled
   @Test
   public void testMoveFileUnderParent() throws Throwable {
   }
 
-  @Ignore
+  @Disabled
   @Test
   public void testRenameFileToSelf() throws Throwable {
   }
   
-  @Ignore
+  @Disabled
   @Test
   public void testRenameChildDirForbidden() throws Exception {
   }
   
-  @Ignore
+  @Disabled
   @Test
   public void testMoveDirUnderParent() throws Throwable {
   }
   
-  @Ignore
+  @Disabled
   @Test
   public void testRenameDirToSelf() throws Throwable {
   }

+ 9 - 9
hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestNativeAzureFileSystemContractMocked.java

@@ -19,9 +19,9 @@
 package org.apache.hadoop.fs.azure;
 
 import org.apache.hadoop.fs.FileSystemContractBaseTest;
-import org.junit.Before;
-import org.junit.Ignore;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Disabled;
+import org.junit.jupiter.api.Test;
 
 /**
  * Mocked testing of FileSystemContractBaseTest.
@@ -29,7 +29,7 @@ import org.junit.Test;
 public class TestNativeAzureFileSystemContractMocked extends
     FileSystemContractBaseTest {
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     fs = AzureBlobStorageTestAccount.createMock().getFileSystem();
   }
@@ -39,27 +39,27 @@ public class TestNativeAzureFileSystemContractMocked extends
    * file system code needs to be modified to make them pass.
    * A separate work item has been opened for this.
    */
-  @Ignore
+  @Disabled
   @Test
   public void testMoveFileUnderParent() throws Throwable {
   }
 
-  @Ignore
+  @Disabled
   @Test
   public void testRenameFileToSelf() throws Throwable {
   }
   
-  @Ignore
+  @Disabled
   @Test
   public void testRenameChildDirForbidden() throws Exception {
   }
   
-  @Ignore
+  @Disabled
   @Test
   public void testMoveDirUnderParent() throws Throwable {
   }
   
-  @Ignore
+  @Disabled
   @Test
   public void testRenameDirToSelf() throws Throwable {
   }

+ 10 - 9
hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/contract/ITestAzureBlobFileSystemBasics.java

@@ -18,24 +18,26 @@
 package org.apache.hadoop.fs.azurebfs.contract;
 
 import java.io.IOException;
+import java.util.concurrent.TimeUnit;
 
 import org.apache.hadoop.fs.FileSystemContractBaseTest;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.contract.ContractTestUtils;
 
-import org.junit.Before;
-import org.junit.Ignore;
-import org.junit.Test;
-import org.junit.rules.Timeout;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Disabled;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
 
 import static org.apache.hadoop.fs.azurebfs.constants.TestConfigurationKeys.TEST_TIMEOUT;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 /**
  * Basic Contract test for Azure BlobFileSystem.
  */
+@Timeout(value = TEST_TIMEOUT, unit = TimeUnit.MILLISECONDS)
 public class ITestAzureBlobFileSystemBasics extends FileSystemContractBaseTest {
   private final ABFSContractTestBinding binding;
 
@@ -43,11 +45,10 @@ public class ITestAzureBlobFileSystemBasics extends FileSystemContractBaseTest {
     // If all contract tests are running in parallel, some root level tests in FileSystemContractBaseTest will fail
     // due to the race condition. Hence for this contract test it should be tested in different container
     binding = new ABFSContractTestBinding(false);
-    globalTimeout = Timeout.millis(TEST_TIMEOUT);
   }
 
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     binding.setup();
     fs = binding.getFileSystem();
@@ -102,7 +103,7 @@ public class ITestAzureBlobFileSystemBasics extends FileSystemContractBaseTest {
   }
 
   @Override
-  @Ignore("Not implemented in ABFS yet")
+  @Disabled("Not implemented in ABFS yet")
   public void testMkdirsWithUmask() throws Exception {
   }
 }