浏览代码

HDFS-12431. [JDK17] Upgrade JUnit from 4 to 5 in hadoop-hdfs Part8. (#7805)

Co-authored-by: Shilun Fan <slfan1989@apache.org>
Signed-off-by: Shilun Fan <slfan1989@apache.org>
zhtttylz 1 周之前
父节点
当前提交
ed05d79b8e
共有 50 个文件被更改,包括 782 次插入768 次删除
  1. 16 18
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AdminStatesBaseTest.java
  2. 44 47
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
  3. 14 13
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/ParameterizedTestDFSStripedOutputStreamWithFailure.java
  4. 1 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/ParameterizedTestDFSStripedOutputStreamWithFailureWithRandomECPolicy.java
  5. 14 9
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java
  6. 10 9
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailureBase.java
  7. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeConfig.java
  8. 18 15
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java
  9. 85 85
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
  10. 3 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithBackoffMonitor.java
  11. 6 6
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSOutputSummer.java
  12. 4 4
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
  13. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFsShellPermission.java
  14. 13 13
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java
  15. 10 10
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetFileChecksum.java
  16. 3 4
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHAAuxiliaryPort.java
  17. 24 25
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSPolicyProvider.java
  18. 52 46
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSServerPorts.java
  19. 15 17
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSTrash.java
  20. 14 16
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHFlush.java
  21. 30 24
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHdfsAdmin.java
  22. 9 6
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHttpPolicy.java
  23. 2 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java
  24. 7 7
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestIsMethodSupported.java
  25. 12 11
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestKeyProviderCache.java
  26. 12 12
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLargeBlock.java
  27. 62 59
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java
  28. 12 11
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java
  29. 30 29
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java
  30. 12 10
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java
  31. 10 10
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestListFilesInFileContext.java
  32. 8 5
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLocalDFS.java
  33. 14 15
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLocatedBlocksRefresher.java
  34. 36 46
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMaintenanceState.java
  35. 13 12
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMaintenanceWithStriped.java
  36. 31 25
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java
  37. 8 9
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java
  38. 4 4
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestModTime.java
  39. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultiThreadedHflush.java
  40. 4 4
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelRead.java
  41. 10 10
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java
  42. 4 4
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelShortCircuitLegacyRead.java
  43. 9 9
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelShortCircuitRead.java
  44. 9 9
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelShortCircuitReadNoChecksum.java
  45. 9 9
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelShortCircuitReadUnCached.java
  46. 9 9
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelUnixDomainRead.java
  47. 7 8
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPersistBlocks.java
  48. 20 22
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithDecodingCorruptData.java
  49. 18 21
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithDecodingDeletedData.java
  50. 21 20
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java

+ 16 - 18
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AdminStatesBaseTest.java

@@ -17,9 +17,9 @@
  */
  */
 package org.apache.hadoop.hdfs;
 package org.apache.hadoop.hdfs;
 
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 
 import java.io.IOException;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.ArrayList;
@@ -28,12 +28,10 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.List;
 import java.util.Map;
 import java.util.Map;
 import java.util.Random;
 import java.util.Random;
-import java.util.concurrent.TimeUnit;
 
 
 import org.apache.hadoop.util.Lists;
 import org.apache.hadoop.util.Lists;
-import org.junit.Rule;
-import org.junit.rules.TemporaryFolder;
-import org.junit.rules.Timeout;
+import org.junit.jupiter.api.Timeout;
+import org.junit.jupiter.api.io.TempDir;
 import org.slf4j.Logger;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
@@ -51,12 +49,13 @@ import org.apache.hadoop.hdfs.server.blockmanagement.HostConfigManager;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.hdfs.util.HostsFileWriter;
 import org.apache.hadoop.hdfs.util.HostsFileWriter;
-import org.junit.After;
-import org.junit.Before;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
 
 
 /**
 /**
  * This class provide utilities for testing of the admin operations of nodes.
  * This class provide utilities for testing of the admin operations of nodes.
  */
  */
+@Timeout(600)
 public class AdminStatesBaseTest {
 public class AdminStatesBaseTest {
   public static final Logger LOG =
   public static final Logger LOG =
       LoggerFactory.getLogger(AdminStatesBaseTest.class);
       LoggerFactory.getLogger(AdminStatesBaseTest.class);
@@ -69,10 +68,9 @@ public class AdminStatesBaseTest {
 
 
   final private Random myrand = new Random();
   final private Random myrand = new Random();
 
 
-  @Rule
-  public TemporaryFolder baseDir = new TemporaryFolder();
-  @Rule
-  public Timeout timeout = new Timeout(600, TimeUnit.SECONDS);
+  @SuppressWarnings("checkstyle:VisibilityModifier")
+  @TempDir
+  public java.nio.file.Path baseDir;
 
 
   private HostsFileWriter hostsFileWriter;
   private HostsFileWriter hostsFileWriter;
   private Configuration conf;
   private Configuration conf;
@@ -91,7 +89,7 @@ public class AdminStatesBaseTest {
     return cluster;
     return cluster;
   }
   }
 
 
-  @Before
+  @BeforeEach
   public void setup() throws IOException {
   public void setup() throws IOException {
     // Set up the hosts/exclude files.
     // Set up the hosts/exclude files.
     hostsFileWriter = new HostsFileWriter();
     hostsFileWriter = new HostsFileWriter();
@@ -118,7 +116,7 @@ public class AdminStatesBaseTest {
 
 
   }
   }
 
 
-  @After
+  @AfterEach
   public void teardown() throws IOException {
   public void teardown() throws IOException {
     hostsFileWriter.cleanup();
     hostsFileWriter.cleanup();
     shutdownCluster();
     shutdownCluster();
@@ -391,7 +389,7 @@ public class AdminStatesBaseTest {
   protected static void validateCluster(DFSClient client, int numDNs)
   protected static void validateCluster(DFSClient client, int numDNs)
       throws IOException {
       throws IOException {
     DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
     DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
-    assertEquals("Number of Datanodes ", numDNs, info.length);
+    assertEquals(numDNs, info.length, "Number of Datanodes ");
   }
   }
 
 
   /** Start a MiniDFSCluster.
   /** Start a MiniDFSCluster.
@@ -406,7 +404,7 @@ public class AdminStatesBaseTest {
   protected void startCluster(int numNameNodes, int numDatanodes,
   protected void startCluster(int numNameNodes, int numDatanodes,
       boolean setupHostsFile, long[] nodesCapacity,
       boolean setupHostsFile, long[] nodesCapacity,
       boolean checkDataNodeHostConfig, boolean federation) throws IOException {
       boolean checkDataNodeHostConfig, boolean federation) throws IOException {
-    MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf, baseDir.getRoot())
+    MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf, baseDir.toFile())
         .numDataNodes(numDatanodes);
         .numDataNodes(numDatanodes);
     if (federation) {
     if (federation) {
       builder.nnTopology(
       builder.nnTopology(
@@ -441,7 +439,7 @@ public class AdminStatesBaseTest {
 
 
 
 
   protected void startSimpleHACluster(int numDatanodes) throws IOException {
   protected void startSimpleHACluster(int numDatanodes) throws IOException {
-    cluster = new MiniDFSCluster.Builder(conf, baseDir.getRoot())
+    cluster = new MiniDFSCluster.Builder(conf, baseDir.toFile())
         .nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(
         .nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(
         numDatanodes).build();
         numDatanodes).build();
     cluster.transitionToActive(0);
     cluster.transitionToActive(0);

+ 44 - 47
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java

@@ -25,10 +25,11 @@ import static org.apache.hadoop.fs.CreateFlag.OVERWRITE;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.fail;
+import static org.junit.jupiter.api.Assumptions.assumeTrue;
 
 
 import java.io.BufferedOutputStream;
 import java.io.BufferedOutputStream;
 import java.io.BufferedReader;
 import java.io.BufferedReader;
@@ -190,8 +191,6 @@ import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.VersionInfo;
 import org.apache.hadoop.util.VersionInfo;
-import org.junit.Assert;
-import org.junit.Assume;
 import org.apache.hadoop.util.ToolRunner;
 import org.apache.hadoop.util.ToolRunner;
 
 
 import org.apache.hadoop.classification.VisibleForTesting;
 import org.apache.hadoop.classification.VisibleForTesting;
@@ -1672,15 +1671,14 @@ public class DFSTestUtil {
   }
   }
 
 
   public static void checkComponentsEquals(byte[][] expected, byte[][] actual) {
   public static void checkComponentsEquals(byte[][] expected, byte[][] actual) {
-    assertEquals("expected: " + DFSUtil.byteArray2PathString(expected)
-        + ", actual: " + DFSUtil.byteArray2PathString(actual), expected.length,
-        actual.length);
+    assertEquals(expected.length, actual.length,
+        "expected: " + DFSUtil.byteArray2PathString(expected) + ", actual: "
+            + DFSUtil.byteArray2PathString(actual));
     int i = 0;
     int i = 0;
     for (byte[] e : expected) {
     for (byte[] e : expected) {
       byte[] actualComponent = actual[i++];
       byte[] actualComponent = actual[i++];
-      assertTrue("expected: " + DFSUtil.bytes2String(e) + ", actual: "
-          + DFSUtil.bytes2String(actualComponent),
-          Arrays.equals(e, actualComponent));
+      assertTrue(Arrays.equals(e, actualComponent), "expected: " + DFSUtil.bytes2String(e)
+          + ", actual: " + DFSUtil.bytes2String(actualComponent));
     }
     }
   }
   }
 
 
@@ -1699,7 +1697,7 @@ public class DFSTestUtil {
       this.sockDir = new TemporarySocketDirectory();
       this.sockDir = new TemporarySocketDirectory();
       DomainSocket.disableBindPathValidation();
       DomainSocket.disableBindPathValidation();
       formerTcpReadsDisabled = DFSInputStream.tcpReadsDisabledForTesting;
       formerTcpReadsDisabled = DFSInputStream.tcpReadsDisabledForTesting;
-      Assume.assumeTrue(DomainSocket.getLoadingFailureReason() == null);
+      assumeTrue(DomainSocket.getLoadingFailureReason() == null);
     }
     }
     
     
     public Configuration newConfiguration() {
     public Configuration newConfiguration() {
@@ -1737,7 +1735,7 @@ public class DFSTestUtil {
     try (FSDataInputStream in1 = fs.open(p1);
     try (FSDataInputStream in1 = fs.open(p1);
          FSDataInputStream in2 = fs.open(p2)) {
          FSDataInputStream in2 = fs.open(p2)) {
       for (int i = 0; i < len; i++) {
       for (int i = 0; i < len; i++) {
-        assertEquals("Mismatch at byte " + i, in1.read(), in2.read());
+        assertEquals(in1.read(), in2.read(), "Mismatch at byte " + i);
       }
       }
     }
     }
   }
   }
@@ -1813,32 +1811,31 @@ public class DFSTestUtil {
         client.getReplicatedBlockStats();
         client.getReplicatedBlockStats();
     ECBlockGroupStats ecBlockGroupStats = client.getECBlockGroupStats();
     ECBlockGroupStats ecBlockGroupStats = client.getECBlockGroupStats();
 
 
-    assertEquals("Under replicated stats not matching!",
-        aggregatedStats[ClientProtocol.GET_STATS_LOW_REDUNDANCY_IDX],
-        aggregatedStats[ClientProtocol.GET_STATS_UNDER_REPLICATED_IDX]);
-    assertEquals("Low redundancy stats not matching!",
-        aggregatedStats[ClientProtocol.GET_STATS_LOW_REDUNDANCY_IDX],
+    assertEquals(aggregatedStats[ClientProtocol.GET_STATS_LOW_REDUNDANCY_IDX],
+        aggregatedStats[ClientProtocol.GET_STATS_UNDER_REPLICATED_IDX],
+        "Under replicated stats not matching!");
+    assertEquals(aggregatedStats[ClientProtocol.GET_STATS_LOW_REDUNDANCY_IDX],
         replicatedBlockStats.getLowRedundancyBlocks() +
         replicatedBlockStats.getLowRedundancyBlocks() +
-            ecBlockGroupStats.getLowRedundancyBlockGroups());
-    assertEquals("Corrupt blocks stats not matching!",
-        aggregatedStats[ClientProtocol.GET_STATS_CORRUPT_BLOCKS_IDX],
-        replicatedBlockStats.getCorruptBlocks() +
-            ecBlockGroupStats.getCorruptBlockGroups());
-    assertEquals("Missing blocks stats not matching!",
-        aggregatedStats[ClientProtocol.GET_STATS_MISSING_BLOCKS_IDX],
+            ecBlockGroupStats.getLowRedundancyBlockGroups(),
+        "Low redundancy stats not matching!");
+    assertEquals(aggregatedStats[ClientProtocol.GET_STATS_CORRUPT_BLOCKS_IDX],
+        replicatedBlockStats.getCorruptBlocks() + ecBlockGroupStats.getCorruptBlockGroups(),
+        "Corrupt blocks stats not matching!");
+    assertEquals(aggregatedStats[ClientProtocol.GET_STATS_MISSING_BLOCKS_IDX],
         replicatedBlockStats.getMissingReplicaBlocks() +
         replicatedBlockStats.getMissingReplicaBlocks() +
-            ecBlockGroupStats.getMissingBlockGroups());
-    assertEquals("Missing blocks with replication factor one not matching!",
-        aggregatedStats[ClientProtocol.GET_STATS_MISSING_REPL_ONE_BLOCKS_IDX],
-        replicatedBlockStats.getMissingReplicationOneBlocks());
-    assertEquals("Bytes in future blocks stats not matching!",
-        aggregatedStats[ClientProtocol.GET_STATS_BYTES_IN_FUTURE_BLOCKS_IDX],
+            ecBlockGroupStats.getMissingBlockGroups(),
+        "Missing blocks stats not matching!");
+    assertEquals(aggregatedStats[ClientProtocol.GET_STATS_MISSING_REPL_ONE_BLOCKS_IDX],
+        replicatedBlockStats.getMissingReplicationOneBlocks(),
+        "Missing blocks with replication factor one not matching!");
+    assertEquals(aggregatedStats[ClientProtocol.GET_STATS_BYTES_IN_FUTURE_BLOCKS_IDX],
         replicatedBlockStats.getBytesInFutureBlocks() +
         replicatedBlockStats.getBytesInFutureBlocks() +
-            ecBlockGroupStats.getBytesInFutureBlockGroups());
-    assertEquals("Pending deletion blocks stats not matching!",
-        aggregatedStats[ClientProtocol.GET_STATS_PENDING_DELETION_BLOCKS_IDX],
+            ecBlockGroupStats.getBytesInFutureBlockGroups(),
+        "Bytes in future blocks stats not matching!");
+    assertEquals(aggregatedStats[ClientProtocol.GET_STATS_PENDING_DELETION_BLOCKS_IDX],
         replicatedBlockStats.getPendingDeletionBlocks() +
         replicatedBlockStats.getPendingDeletionBlocks() +
-            ecBlockGroupStats.getPendingDeletionBlocks());
+            ecBlockGroupStats.getPendingDeletionBlocks(),
+        "Pending deletion blocks stats not matching!");
   }
   }
 
 
   /**
   /**
@@ -1911,8 +1908,8 @@ public class DFSTestUtil {
       ExtendedBlock blk) {
       ExtendedBlock blk) {
     BlockManager bm0 = nn.getNamesystem().getBlockManager();
     BlockManager bm0 = nn.getNamesystem().getBlockManager();
     BlockInfo storedBlock = bm0.getStoredBlock(blk.getLocalBlock());
     BlockInfo storedBlock = bm0.getStoredBlock(blk.getLocalBlock());
-    assertTrue("Block " + blk + " should be under construction, " +
-        "got: " + storedBlock, !storedBlock.isComplete());
+    assertTrue(!storedBlock.isComplete(),
+        "Block " + blk + " should be under construction, " + "got: " + storedBlock);
     // We expect that the replica with the most recent heart beat will be
     // We expect that the replica with the most recent heart beat will be
     // the one to be in charge of the synchronization / recovery protocol.
     // the one to be in charge of the synchronization / recovery protocol.
     final DatanodeStorageInfo[] storages = storedBlock
     final DatanodeStorageInfo[] storages = storedBlock
@@ -1960,8 +1957,8 @@ public class DFSTestUtil {
     }
     }
     assertEquals(retcode, ret);
     assertEquals(retcode, ret);
     if (contain != null) {
     if (contain != null) {
-      assertTrue("The real output is: " + output + ".\n It should contain: "
-          + contain, output.contains(contain));
+      assertTrue(output.contains(contain),
+          "The real output is: " + output + ".\n It should contain: " + contain);
     }
     }
   }
   }
 
 
@@ -2365,23 +2362,23 @@ public class DFSTestUtil {
 
 
   public static void verifyDelete(FsShell shell, FileSystem fs, Path path,
   public static void verifyDelete(FsShell shell, FileSystem fs, Path path,
       Path trashPath, boolean shouldExistInTrash) throws Exception {
       Path trashPath, boolean shouldExistInTrash) throws Exception {
-    assertTrue(path + " file does not exist", fs.exists(path));
+    assertTrue(fs.exists(path), path + " file does not exist");
 
 
     // Verify that trashPath has a path component named ".Trash"
     // Verify that trashPath has a path component named ".Trash"
     Path checkTrash = trashPath;
     Path checkTrash = trashPath;
     while (!checkTrash.isRoot() && !checkTrash.getName().equals(".Trash")) {
     while (!checkTrash.isRoot() && !checkTrash.getName().equals(".Trash")) {
       checkTrash = checkTrash.getParent();
       checkTrash = checkTrash.getParent();
     }
     }
-    assertEquals("No .Trash component found in trash path " + trashPath,
-        ".Trash", checkTrash.getName());
+    assertEquals(".Trash", checkTrash.getName(),
+        "No .Trash component found in trash path " + trashPath);
 
 
     String[] argv = new String[]{"-rm", "-r", path.toString()};
     String[] argv = new String[]{"-rm", "-r", path.toString()};
     int res = ToolRunner.run(shell, argv);
     int res = ToolRunner.run(shell, argv);
-    assertEquals("rm failed", 0, res);
+    assertEquals(0, res, "rm failed");
     if (shouldExistInTrash) {
     if (shouldExistInTrash) {
-      assertTrue("File not in trash : " + trashPath, fs.exists(trashPath));
+      assertTrue(fs.exists(trashPath), "File not in trash : " + trashPath);
     } else {
     } else {
-      assertFalse("File in trash : " + trashPath, fs.exists(trashPath));
+      assertFalse(fs.exists(trashPath), "File in trash : " + trashPath);
     }
     }
   }
   }
 
 
@@ -2590,7 +2587,7 @@ public class DFSTestUtil {
       Path filePath, int namenodeCount, boolean createMoverPath)
       Path filePath, int namenodeCount, boolean createMoverPath)
           throws IOException {
           throws IOException {
     final Collection<URI> namenodes = DFSUtil.getInternalNsRpcUris(conf);
     final Collection<URI> namenodes = DFSUtil.getInternalNsRpcUris(conf);
-    Assert.assertEquals(namenodeCount, namenodes.size());
+    assertEquals(namenodeCount, namenodes.size());
     NameNodeConnector.checkOtherInstanceRunning(createMoverPath);
     NameNodeConnector.checkOtherInstanceRunning(createMoverPath);
     while (true) {
     while (true) {
       try {
       try {

+ 14 - 13
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/ParameterizedTestDFSStripedOutputStreamWithFailure.java

@@ -17,23 +17,22 @@
  */
  */
 package org.apache.hadoop.hdfs;
 package org.apache.hadoop.hdfs;
 
 
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.MethodSource;
 import org.slf4j.Logger;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.LoggerFactory;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
+import org.junit.jupiter.api.Timeout;
 
 
 import java.util.ArrayList;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collection;
 import java.util.List;
 import java.util.List;
 
 
-import static org.junit.Assume.assumeTrue;
+import static org.junit.jupiter.api.Assumptions.assumeTrue;
 
 
 /**
 /**
  * Test striped file write operation with data node failures with parameterized
  * Test striped file write operation with data node failures with parameterized
  * test cases.
  * test cases.
  */
  */
-@RunWith(Parameterized.class)
 public class ParameterizedTestDFSStripedOutputStreamWithFailure extends
 public class ParameterizedTestDFSStripedOutputStreamWithFailure extends
     TestDFSStripedOutputStreamWithFailureBase{
     TestDFSStripedOutputStreamWithFailureBase{
   public static final Logger LOG = LoggerFactory.getLogger(
   public static final Logger LOG = LoggerFactory.getLogger(
@@ -41,7 +40,6 @@ public class ParameterizedTestDFSStripedOutputStreamWithFailure extends
 
 
   private int base;
   private int base;
 
 
-  @Parameterized.Parameters
   public static Collection<Object[]> data() {
   public static Collection<Object[]> data() {
     List<Object[]> parameters = new ArrayList<>();
     List<Object[]> parameters = new ArrayList<>();
     for (int i = 0; i <= 10; i++) {
     for (int i = 0; i <= 10; i++) {
@@ -50,21 +48,24 @@ public class ParameterizedTestDFSStripedOutputStreamWithFailure extends
     return parameters;
     return parameters;
   }
   }
 
 
-  public ParameterizedTestDFSStripedOutputStreamWithFailure(int base) {
-    this.base = base;
+  public void initParameterizedTestDFSStripedOutputStreamWithFailure(int pBase) {
+    this.base = pBase;
   }
   }
 
 
-  @Test(timeout = 240000)
-  public void runTestWithSingleFailure() {
+  @ParameterizedTest
+  @MethodSource("data")
+  @Timeout(value = 240)
+  public void runTestWithSingleFailure(int pBase) {
+    initParameterizedTestDFSStripedOutputStreamWithFailure(pBase);
     assumeTrue(base >= 0);
     assumeTrue(base >= 0);
     if (base > lengths.size()) {
     if (base > lengths.size()) {
       base = base % lengths.size();
       base = base % lengths.size();
     }
     }
     final int i = base;
     final int i = base;
     final Integer length = getLength(i);
     final Integer length = getLength(i);
-    assumeTrue("Skip test " + i + " since length=null.", length != null);
-    assumeTrue("Test " + i + ", length=" + length
-        + ", is not chosen to run.", RANDOM.nextInt(16) != 0);
+    assumeTrue(length != null, "Skip test " + i + " since length=null.");
+    assumeTrue(RANDOM.nextInt(16) != 0, "Test " + i + ", length=" + length
+        + ", is not chosen to run.");
     System.out.println("Run test " + i + ", length=" + length);
     System.out.println("Run test " + i + ", length=" + length);
     runTest(length);
     runTest(length);
   }
   }

+ 1 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/ParameterizedTestDFSStripedOutputStreamWithFailureWithRandomECPolicy.java

@@ -35,9 +35,7 @@ public class
       ParameterizedTestDFSStripedOutputStreamWithFailureWithRandomECPolicy
       ParameterizedTestDFSStripedOutputStreamWithFailureWithRandomECPolicy
           .class.getName());
           .class.getName());
 
 
-  public ParameterizedTestDFSStripedOutputStreamWithFailureWithRandomECPolicy(
-      int base) {
-    super(base);
+  public ParameterizedTestDFSStripedOutputStreamWithFailureWithRandomECPolicy() {
     schema = StripedFileTestUtil.getRandomNonDefaultECPolicy().getSchema();
     schema = StripedFileTestUtil.getRandomNonDefaultECPolicy().getSchema();
     LOG.info(schema.toString());
     LOG.info(schema.toString());
   }
   }

+ 14 - 9
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java

@@ -25,7 +25,8 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.test.LambdaTestUtils;
 import org.apache.hadoop.test.LambdaTestUtils;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
 import org.slf4j.Logger;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.LoggerFactory;
 
 
@@ -33,8 +34,8 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Arrays;
 
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 
 /**
 /**
  * Test striped file write operation with data node failures with fixed
  * Test striped file write operation with data node failures with fixed
@@ -45,7 +46,8 @@ public class TestDFSStripedOutputStreamWithFailure extends
   public static final Logger LOG = LoggerFactory.getLogger(
   public static final Logger LOG = LoggerFactory.getLogger(
       TestDFSStripedOutputStreamWithFailure.class);
       TestDFSStripedOutputStreamWithFailure.class);
 
 
-  @Test(timeout=300000)
+  @Test
+  @Timeout(value = 300)
   public void testMultipleDatanodeFailure56() throws Exception {
   public void testMultipleDatanodeFailure56() throws Exception {
     runTestWithMultipleFailure(getLength(56));
     runTestWithMultipleFailure(getLength(56));
   }
   }
@@ -62,7 +64,8 @@ public class TestDFSStripedOutputStreamWithFailure extends
     runTestWithMultipleFailure(getLength(lenIndex));
     runTestWithMultipleFailure(getLength(lenIndex));
   }
   }
 
 
-  @Test(timeout=240000)
+  @Test
+  @Timeout(value = 240)
   public void testBlockTokenExpired() throws Exception {
   public void testBlockTokenExpired() throws Exception {
     // Make sure killPos is greater than the length of one stripe
     // Make sure killPos is greater than the length of one stripe
     final int length = dataBlocks * cellSize * 3;
     final int length = dataBlocks * cellSize * 3;
@@ -86,7 +89,8 @@ public class TestDFSStripedOutputStreamWithFailure extends
     }
     }
   }
   }
 
 
-  @Test(timeout = 90000)
+  @Test
+  @Timeout(value = 90)
   public void testAddBlockWhenNoSufficientDataBlockNumOfNodes()
   public void testAddBlockWhenNoSufficientDataBlockNumOfNodes()
       throws Exception {
       throws Exception {
     HdfsConfiguration conf = new HdfsConfiguration();
     HdfsConfiguration conf = new HdfsConfiguration();
@@ -105,7 +109,7 @@ public class TestDFSStripedOutputStreamWithFailure extends
       cluster.triggerHeartbeats();
       cluster.triggerHeartbeats();
       DatanodeInfo[] info = dfs.getClient().datanodeReport(
       DatanodeInfo[] info = dfs.getClient().datanodeReport(
           DatanodeReportType.LIVE);
           DatanodeReportType.LIVE);
-      assertEquals("Mismatches number of live Dns", numDatanodes, info.length);
+      assertEquals(numDatanodes, info.length, "Mismatches number of live Dns");
       final Path dirFile = new Path(dir, "ecfile");
       final Path dirFile = new Path(dir, "ecfile");
       LambdaTestUtils.intercept(
       LambdaTestUtils.intercept(
           IOException.class,
           IOException.class,
@@ -200,7 +204,8 @@ public class TestDFSStripedOutputStreamWithFailure extends
     }
     }
   }
   }
 
 
-  @Test(timeout = 90000)
+  @Test
+  @Timeout(value = 90)
   public void testAddBlockWhenNoSufficientParityNumOfNodes()
   public void testAddBlockWhenNoSufficientParityNumOfNodes()
       throws IOException {
       throws IOException {
     HdfsConfiguration conf = new HdfsConfiguration();
     HdfsConfiguration conf = new HdfsConfiguration();
@@ -218,7 +223,7 @@ public class TestDFSStripedOutputStreamWithFailure extends
       cluster.triggerHeartbeats();
       cluster.triggerHeartbeats();
       DatanodeInfo[] info = dfs.getClient().datanodeReport(
       DatanodeInfo[] info = dfs.getClient().datanodeReport(
           DatanodeReportType.LIVE);
           DatanodeReportType.LIVE);
-      assertEquals("Mismatches number of live Dns", numDatanodes, info.length);
+      assertEquals(numDatanodes, info.length, "Mismatches number of live Dns");
       Path srcPath = new Path(dir, "testAddBlockWhenNoSufficientParityNodes");
       Path srcPath = new Path(dir, "testAddBlockWhenNoSufficientParityNodes");
       int fileLength = cellSize - 1000;
       int fileLength = cellSize - 1000;
       final byte[] expected = StripedFileTestUtil.generateBytes(fileLength);
       final byte[] expected = StripedFileTestUtil.generateBytes(fileLength);

+ 10 - 9
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailureBase.java

@@ -41,8 +41,7 @@ import org.apache.hadoop.util.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.LoggerFactory;
 import org.slf4j.event.Level;
 import org.slf4j.event.Level;
-import org.junit.Assert;
-import org.junit.Before;
+import org.junit.jupiter.api.BeforeEach;
 
 
 import java.io.IOException;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.ArrayList;
@@ -54,8 +53,10 @@ import java.util.Random;
 import java.util.Stack;
 import java.util.Stack;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.atomic.AtomicInteger;
 
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
 
 
 /**
 /**
  * Base class for test striped file write operation.
  * Base class for test striped file write operation.
@@ -95,7 +96,7 @@ public class TestDFSStripedOutputStreamWithFailureBase {
   /*
   /*
    * Initialize erasure coding policy.
    * Initialize erasure coding policy.
    */
    */
-  @Before
+  @BeforeEach
   public void init() {
   public void init() {
     ecPolicy = new ErasureCodingPolicy(getEcSchema(), cellSize);
     ecPolicy = new ErasureCodingPolicy(getEcSchema(), cellSize);
     dataBlocks = ecPolicy.getNumDataUnits();
     dataBlocks = ecPolicy.getNumDataUnits();
@@ -245,7 +246,7 @@ public class TestDFSStripedOutputStreamWithFailureBase {
         final String err = "failed, dn=" + dn + ", length=" + length
         final String err = "failed, dn=" + dn + ", length=" + length
             + StringUtils.stringifyException(e);
             + StringUtils.stringifyException(e);
         LOG.error(err);
         LOG.error(err);
-        Assert.fail(err);
+        fail(err);
       } finally {
       } finally {
         tearDown();
         tearDown();
       }
       }
@@ -389,15 +390,15 @@ public class TestDFSStripedOutputStreamWithFailureBase {
       }
       }
 
 
       if (datanodes != null) {
       if (datanodes != null) {
-        Assert.assertEquals(1, datanodes.length);
-        Assert.assertNotNull(datanodes[0]);
+        assertEquals(1, datanodes.length);
+        assertNotNull(datanodes[0]);
         return datanodes[0];
         return datanodes[0];
       }
       }
 
 
       try {
       try {
         Thread.sleep(100);
         Thread.sleep(100);
       } catch (InterruptedException ie) {
       } catch (InterruptedException ie) {
-        Assert.fail(StringUtils.stringifyException(ie));
+        fail(StringUtils.stringifyException(ie));
         return null;
         return null;
       }
       }
     }
     }

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeConfig.java

@@ -21,7 +21,7 @@ import static org.apache.hadoop.hdfs.server.common.Util.fileAsURI;
 import static org.junit.jupiter.api.Assertions.assertNull;
 import static org.junit.jupiter.api.Assertions.assertNull;
 import static org.junit.jupiter.api.Assertions.assertTrue;
 import static org.junit.jupiter.api.Assertions.assertTrue;
 import static org.junit.jupiter.api.Assertions.fail;
 import static org.junit.jupiter.api.Assertions.fail;
-import static org.junit.Assume.assumeTrue;
+import static org.junit.jupiter.api.Assumptions.assumeTrue;
 
 
 import java.io.File;
 import java.io.File;
 import java.io.IOException;
 import java.io.IOException;

+ 18 - 15
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java

@@ -39,7 +39,7 @@ import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.VersionInfo;
 import org.apache.hadoop.util.VersionInfo;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 
 import java.util.function.Supplier;
 import java.util.function.Supplier;
 
 
@@ -47,8 +47,8 @@ import java.net.InetSocketAddress;
 import java.security.Permission;
 import java.security.Permission;
 import java.util.concurrent.TimeoutException;
 import java.util.concurrent.TimeoutException;
 
 
-import static org.junit.Assert.*;
-import static org.junit.Assume.assumeTrue;
+import static org.junit.jupiter.api.Assertions.*;
+import static org.junit.jupiter.api.Assumptions.assumeTrue;
 import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.mock;
 
 
@@ -83,7 +83,8 @@ public class TestDatanodeRegistration {
     try {
     try {
       System.setSecurityManager(sm);
       System.setSecurityManager(sm);
     } catch (UnsupportedOperationException e) {
     } catch (UnsupportedOperationException e) {
-      assumeTrue("Test is skipped because SecurityManager cannot be set (JEP 411)", false);
+      assumeTrue(false,
+          "Test is skipped because SecurityManager cannot be set (JEP 411)");
     }
     }
 
 
     MiniDFSCluster cluster = null;
     MiniDFSCluster cluster = null;
@@ -93,7 +94,7 @@ public class TestDatanodeRegistration {
       cluster.waitActive();
       cluster.waitActive();
       
       
       int initialLookups = sm.lookups;
       int initialLookups = sm.lookups;
-      assertTrue("dns security manager is active", initialLookups != 0);
+      assertTrue(initialLookups != 0, "dns security manager is active");
       
       
       DatanodeManager dm =
       DatanodeManager dm =
           cluster.getNamesystem().getBlockManager().getDatanodeManager();
           cluster.getNamesystem().getBlockManager().getDatanodeManager();
@@ -203,7 +204,7 @@ public class TestDatanodeRegistration {
       rpcServer.registerDatanode(dnReg);
       rpcServer.registerDatanode(dnReg);
 
 
       DatanodeInfo[] report = client.datanodeReport(DatanodeReportType.ALL);
       DatanodeInfo[] report = client.datanodeReport(DatanodeReportType.ALL);
-      assertEquals("Expected a registered datanode", 1, report.length);
+      assertEquals(1, report.length, "Expected a registered datanode");
 
 
       // register the same datanode again with a different storage ID
       // register the same datanode again with a different storage ID
       dnId = new DatanodeID(DN_IP_ADDR, DN_HOSTNAME,
       dnId = new DatanodeID(DN_IP_ADDR, DN_HOSTNAME,
@@ -214,8 +215,8 @@ public class TestDatanodeRegistration {
       rpcServer.registerDatanode(dnReg);
       rpcServer.registerDatanode(dnReg);
 
 
       report = client.datanodeReport(DatanodeReportType.ALL);
       report = client.datanodeReport(DatanodeReportType.ALL);
-      assertEquals("Datanode with changed storage ID not recognized",
-          1, report.length);
+      assertEquals(1, report.length,
+          "Datanode with changed storage ID not recognized");
     } finally {
     } finally {
       if (cluster != null) {
       if (cluster != null) {
         cluster.shutdown();
         cluster.shutdown();
@@ -247,6 +248,7 @@ public class TestDatanodeRegistration {
       doReturn(123).when(mockDnReg).getXferPort();
       doReturn(123).when(mockDnReg).getXferPort();
       doReturn("fake-storage-id").when(mockDnReg).getDatanodeUuid();
       doReturn("fake-storage-id").when(mockDnReg).getDatanodeUuid();
       doReturn(mockStorageInfo).when(mockDnReg).getStorageInfo();
       doReturn(mockStorageInfo).when(mockDnReg).getStorageInfo();
+      doReturn("localhost").when(mockDnReg).getHostName();
       
       
       // Should succeed when software versions are the same.
       // Should succeed when software versions are the same.
       doReturn("3.0.0").when(mockDnReg).getSoftwareVersion();
       doReturn("3.0.0").when(mockDnReg).getSoftwareVersion();
@@ -301,6 +303,7 @@ public class TestDatanodeRegistration {
       doReturn(VersionInfo.getVersion()).when(mockDnReg).getSoftwareVersion();
       doReturn(VersionInfo.getVersion()).when(mockDnReg).getSoftwareVersion();
       doReturn("127.0.0.1").when(mockDnReg).getIpAddr();
       doReturn("127.0.0.1").when(mockDnReg).getIpAddr();
       doReturn(123).when(mockDnReg).getXferPort();
       doReturn(123).when(mockDnReg).getXferPort();
+      doReturn("localhost").when(mockDnReg).getHostName();
       rpcServer.registerDatanode(mockDnReg);
       rpcServer.registerDatanode(mockDnReg);
       
       
       // Should succeed when software versions are the same and CTimes are
       // Should succeed when software versions are the same and CTimes are
@@ -371,16 +374,16 @@ public class TestDatanodeRegistration {
       waitForHeartbeat(dn, dnd);
       waitForHeartbeat(dn, dnd);
       assertTrue(dnd.isRegistered());
       assertTrue(dnd.isRegistered());
       assertSame(lastReg, dn.getDNRegistrationForBP(bpId));
       assertSame(lastReg, dn.getDNRegistrationForBP(bpId));
-      assertTrue("block report is not processed for DN " + dnd,
-          waitForBlockReport(dn, dnd));
+      assertTrue(waitForBlockReport(dn, dnd),
+          "block report is not processed for DN " + dnd);
       assertTrue(dnd.isRegistered());
       assertTrue(dnd.isRegistered());
       assertSame(lastReg, dn.getDNRegistrationForBP(bpId));
       assertSame(lastReg, dn.getDNRegistrationForBP(bpId));
 
 
       // check that block report is not processed and registration didn't
       // check that block report is not processed and registration didn't
       // change.
       // change.
       dnd.setForceRegistration(true);
       dnd.setForceRegistration(true);
-      assertFalse("block report is processed for DN " + dnd,
-          waitForBlockReport(dn, dnd));
+      assertFalse(waitForBlockReport(dn, dnd),
+          "block report is processed for DN " + dnd);
       assertFalse(dnd.isRegistered());
       assertFalse(dnd.isRegistered());
       assertSame(lastReg, dn.getDNRegistrationForBP(bpId));
       assertSame(lastReg, dn.getDNRegistrationForBP(bpId));
 
 
@@ -391,8 +394,8 @@ public class TestDatanodeRegistration {
       newReg = dn.getDNRegistrationForBP(bpId);
       newReg = dn.getDNRegistrationForBP(bpId);
       assertNotSame(lastReg, newReg);
       assertNotSame(lastReg, newReg);
       lastReg = newReg;
       lastReg = newReg;
-      assertTrue("block report is not processed for DN " + dnd,
-          waitForBlockReport(dn, dnd));
+      assertTrue(waitForBlockReport(dn, dnd),
+          "block report is not processed for DN " + dnd);
       assertTrue(dnd.isRegistered());
       assertTrue(dnd.isRegistered());
       assertSame(lastReg, dn.getDNRegistrationForBP(bpId));
       assertSame(lastReg, dn.getDNRegistrationForBP(bpId));
 
 
@@ -412,7 +415,7 @@ public class TestDatanodeRegistration {
       } catch (NullPointerException npe) {
       } catch (NullPointerException npe) {
         failed = true;
         failed = true;
       }
       }
-      assertTrue("didn't fail", failed);
+      assertTrue(failed, "didn't fail");
       assertFalse(dnd.isRegistered());
       assertFalse(dnd.isRegistered());
 
 
       // should remain unregistered until next heartbeat.
       // should remain unregistered until next heartbeat.

+ 85 - 85
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java

@@ -18,11 +18,12 @@
 package org.apache.hadoop.hdfs;
 package org.apache.hadoop.hdfs;
 
 
 import static org.assertj.core.api.Assertions.assertThat;
 import static org.assertj.core.api.Assertions.assertThat;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
 
 
 import java.io.ByteArrayOutputStream;
 import java.io.ByteArrayOutputStream;
 import java.io.IOException;
 import java.io.IOException;
@@ -86,9 +87,8 @@ import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Lists;
 import org.apache.hadoop.util.Lists;
 import org.apache.hadoop.util.ToolRunner;
 import org.apache.hadoop.util.ToolRunner;
 
 
-import org.junit.Assert;
-import org.junit.Ignore;
-import org.junit.Test;
+import org.junit.jupiter.api.Disabled;
+import org.junit.jupiter.api.Test;
 import org.eclipse.jetty.util.ajax.JSON;
 import org.eclipse.jetty.util.ajax.JSON;
 import org.slf4j.Logger;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.LoggerFactory;
@@ -116,8 +116,7 @@ public class TestDecommission extends AdminStatesBaseTest {
     String downnode, int numDatanodes) throws IOException {
     String downnode, int numDatanodes) throws IOException {
     boolean isNodeDown = (downnode != null);
     boolean isNodeDown = (downnode != null);
     // need a raw stream
     // need a raw stream
-    assertTrue("Not HDFS:"+fileSys.getUri(),
-        fileSys instanceof DistributedFileSystem);
+    assertTrue(fileSys instanceof DistributedFileSystem, "Not HDFS:" + fileSys.getUri());
     HdfsDataInputStream dis = (HdfsDataInputStream)
     HdfsDataInputStream dis = (HdfsDataInputStream)
         fileSys.open(name);
         fileSys.open(name);
     Collection<LocatedBlock> dinfo = dis.getAllBlocks();
     Collection<LocatedBlock> dinfo = dis.getAllBlocks();
@@ -170,8 +169,7 @@ public class TestDecommission extends AdminStatesBaseTest {
 
 
       // For decommissioning nodes, ensure capacity of the DN and dfsUsed
       // For decommissioning nodes, ensure capacity of the DN and dfsUsed
       //  is no longer counted towards total
       //  is no longer counted towards total
-      assertEquals(newStats[0],
-          decommissioning ? 0 : info.getCapacity());
+      assertEquals(newStats[0], decommissioning ? 0 : info.getCapacity());
 
 
       // Ensure cluster used capacity is counted for normal nodes only
       // Ensure cluster used capacity is counted for normal nodes only
       assertEquals(newStats[1], decommissioning ? 0 : info.getDfsUsed());
       assertEquals(newStats[1], decommissioning ? 0 : info.getDfsUsed());
@@ -232,8 +230,8 @@ public class TestDecommission extends AdminStatesBaseTest {
 
 
     // Ensure decommissioned datanode is not automatically shutdown
     // Ensure decommissioned datanode is not automatically shutdown
     DFSClient client = getDfsClient(0);
     DFSClient client = getDfsClient(0);
-    assertEquals("All datanodes must be alive", numDatanodes,
-        client.datanodeReport(DatanodeReportType.LIVE).length);
+    assertEquals(numDatanodes, client.datanodeReport(DatanodeReportType.LIVE).length,
+        "All datanodes must be alive");
     assertNull(checkFile(fileSys, file1, replicas, decomNode.getXferAddr(),
     assertNull(checkFile(fileSys, file1, replicas, decomNode.getXferAddr(),
         numDatanodes));
         numDatanodes));
     cleanupFile(fileSys, file1);
     cleanupFile(fileSys, file1);
@@ -406,8 +404,8 @@ public class TestDecommission extends AdminStatesBaseTest {
 
 
         // Ensure decommissioned datanode is not automatically shutdown
         // Ensure decommissioned datanode is not automatically shutdown
         DFSClient client = getDfsClient(i);
         DFSClient client = getDfsClient(i);
-        assertEquals("All datanodes must be alive", numDatanodes, 
-            client.datanodeReport(DatanodeReportType.LIVE).length);
+        assertEquals(numDatanodes, client.datanodeReport(DatanodeReportType.LIVE).length,
+            "All datanodes must be alive");
         // wait for the block to be replicated
         // wait for the block to be replicated
         int tries = 0;
         int tries = 0;
         while (tries++ < 20) {
         while (tries++ < 20) {
@@ -420,8 +418,8 @@ public class TestDecommission extends AdminStatesBaseTest {
           } catch (InterruptedException ie) {
           } catch (InterruptedException ie) {
           }
           }
         }
         }
-        assertTrue("Checked if block was replicated after decommission, tried "
-            + tries + " times.", tries < 20);
+        assertTrue(tries < 20,
+            "Checked if block was replicated after decommission, tried " + tries + " times.");
         cleanupFile(fileSys, file1);
         cleanupFile(fileSys, file1);
       }
       }
     }
     }
@@ -454,8 +452,8 @@ public class TestDecommission extends AdminStatesBaseTest {
 
 
       // Decommission one of the datanodes with a replica
       // Decommission one of the datanodes with a replica
       BlockLocation loc = fileSys.getFileBlockLocations(file1, 0, 1)[0];
       BlockLocation loc = fileSys.getFileBlockLocations(file1, 0, 1)[0];
-      assertEquals("Unexpected number of replicas from getFileBlockLocations",
-          replicas, loc.getHosts().length);
+      assertEquals(replicas, loc.getHosts().length,
+          "Unexpected number of replicas from getFileBlockLocations");
       final String toDecomHost = loc.getNames()[0];
       final String toDecomHost = loc.getNames()[0];
       String toDecomUuid = null;
       String toDecomUuid = null;
       for (DataNode d : getCluster().getDataNodes()) {
       for (DataNode d : getCluster().getDataNodes()) {
@@ -464,7 +462,7 @@ public class TestDecommission extends AdminStatesBaseTest {
           break;
           break;
         }
         }
       }
       }
-      assertNotNull("Could not find a dn with the block!", toDecomUuid);
+      assertNotNull(toDecomUuid, "Could not find a dn with the block!");
       final DatanodeInfo decomNode = takeNodeOutofService(0, toDecomUuid,
       final DatanodeInfo decomNode = takeNodeOutofService(0, toDecomUuid,
           0, decommissionedNodes, AdminStates.DECOMMISSIONED);
           0, decommissionedNodes, AdminStates.DECOMMISSIONED);
       decommissionedNodes.add(decomNode);
       decommissionedNodes.add(decomNode);
@@ -476,8 +474,8 @@ public class TestDecommission extends AdminStatesBaseTest {
 
 
       // Ensure decommissioned datanode is not automatically shutdown
       // Ensure decommissioned datanode is not automatically shutdown
       DFSClient client = getDfsClient(0);
       DFSClient client = getDfsClient(0);
-      assertEquals("All datanodes must be alive", numDatanodes,
-          client.datanodeReport(DatanodeReportType.LIVE).length);
+      assertEquals(numDatanodes, client.datanodeReport(DatanodeReportType.LIVE).length,
+          "All datanodes must be alive");
 
 
       // wait for the block to be replicated
       // wait for the block to be replicated
       final ExtendedBlock b = DFSTestUtil.getFirstBlock(fileSys, file1);
       final ExtendedBlock b = DFSTestUtil.getFirstBlock(fileSys, file1);
@@ -566,7 +564,7 @@ public class TestDecommission extends AdminStatesBaseTest {
         break;
         break;
       }
       }
     }
     }
-    assertNotNull("Could not find decomNode in cluster!", decomNode);
+    assertNotNull(decomNode, "Could not find decomNode in cluster!");
     return decomNode;
     return decomNode;
   }
   }
 
 
@@ -612,14 +610,14 @@ public class TestDecommission extends AdminStatesBaseTest {
         Thread.sleep(HEARTBEAT_INTERVAL * 1000);
         Thread.sleep(HEARTBEAT_INTERVAL * 1000);
         info = client.datanodeReport(DatanodeReportType.LIVE);
         info = client.datanodeReport(DatanodeReportType.LIVE);
       }
       }
-      assertEquals("Number of live nodes should be 0", 0, info.length);
+      assertEquals(0, info.length, "Number of live nodes should be 0");
       
       
       // Test that bogus hostnames are considered "dead".
       // Test that bogus hostnames are considered "dead".
       // The dead report should have an entry for the bogus entry in the hosts
       // The dead report should have an entry for the bogus entry in the hosts
       // file.  The original datanode is excluded from the report because it
       // file.  The original datanode is excluded from the report because it
       // is no longer in the included list.
       // is no longer in the included list.
       info = client.datanodeReport(DatanodeReportType.DEAD);
       info = client.datanodeReport(DatanodeReportType.DEAD);
-      assertEquals("There should be 1 dead node", 1, info.length);
+      assertEquals(1, info.length, "There should be 1 dead node");
       assertEquals(bogusIp, info[0].getHostName());
       assertEquals(bogusIp, info[0].getHostName());
     }
     }
   }
   }
@@ -772,7 +770,7 @@ public class TestDecommission extends AdminStatesBaseTest {
 
 
               toolOut.reset();
               toolOut.reset();
               assertEquals(0,
               assertEquals(0,
-                  ToolRunner.run(dfsAdmin, new String[] {"-listOpenFiles",
+                  ToolRunner.run(dfsAdmin, new String[]{"-listOpenFiles",
                       "-blockingDecommission", "-path", firstOpenFile}));
                       "-blockingDecommission", "-path", firstOpenFile}));
               toolOut.flush();
               toolOut.flush();
               result2 = verifyOpenFilesListing(
               result2 = verifyOpenFilesListing(
@@ -950,7 +948,7 @@ public class TestDecommission extends AdminStatesBaseTest {
           OpenFilesIterator.FILTER_PATH_DEFAULT);
           OpenFilesIterator.FILTER_PATH_DEFAULT);
       assertEquals(0, batchedListEntries.size());
       assertEquals(0, batchedListEntries.size());
     } catch (NullPointerException e) {
     } catch (NullPointerException e) {
-      Assert.fail("Should not throw NPE when the file is not under " +
+      fail("Should not throw NPE when the file is not under " +
           "construction but has lease!");
           "construction but has lease!");
     }
     }
     initExcludeHost("");
     initExcludeHost("");
@@ -1176,8 +1174,8 @@ public class TestDecommission extends AdminStatesBaseTest {
     getCluster().startDataNodes(getConf(), 1, true, null, null, null, null);
     getCluster().startDataNodes(getConf(), 1, true, null, null, null, null);
     numDatanodes+=1;
     numDatanodes+=1;
 
 
-    assertEquals("Number of datanodes should be 2 ", 2,
-        getCluster().getDataNodes().size());
+    assertEquals(2, getCluster().getDataNodes().size(),
+        "Number of datanodes should be 2 ");
     //Restart the namenode
     //Restart the namenode
     getCluster().restartNameNode();
     getCluster().restartNameNode();
     DatanodeInfo datanodeInfo = NameNodeAdapter.getDatanode(
     DatanodeInfo datanodeInfo = NameNodeAdapter.getDatanode(
@@ -1185,11 +1183,11 @@ public class TestDecommission extends AdminStatesBaseTest {
     waitNodeState(datanodeInfo, AdminStates.DECOMMISSIONED);
     waitNodeState(datanodeInfo, AdminStates.DECOMMISSIONED);
 
 
     // Ensure decommissioned datanode is not automatically shutdown
     // Ensure decommissioned datanode is not automatically shutdown
-    assertEquals("All datanodes must be alive", numDatanodes, 
-        client.datanodeReport(DatanodeReportType.LIVE).length);
-    assertTrue("Checked if block was replicated after decommission.",
-        checkFile(fileSys, file1, replicas, datanodeInfo.getXferAddr(),
-        numDatanodes) == null);
+    assertEquals(numDatanodes, client.datanodeReport(DatanodeReportType.LIVE).length,
+        "All datanodes must be alive");
+    assertTrue(checkFile(fileSys, file1, replicas, datanodeInfo.getXferAddr(),
+            numDatanodes) == null,
+        "Checked if block was replicated after decommission.");
 
 
     cleanupFile(fileSys, file1);
     cleanupFile(fileSys, file1);
     // Restart the cluster and ensure recommissioned datanodes
     // Restart the cluster and ensure recommissioned datanodes
@@ -1227,10 +1225,10 @@ public class TestDecommission extends AdminStatesBaseTest {
     //Restart the namenode
     //Restart the namenode
     getCluster().restartNameNode();
     getCluster().restartNameNode();
 
 
-    assertEquals("There should be one node alive", 1,
-        client.datanodeReport(DatanodeReportType.LIVE).length);
-    assertEquals("There should be one node dead", 1,
-        client.datanodeReport(DatanodeReportType.DEAD).length);
+    assertEquals(1, client.datanodeReport(DatanodeReportType.LIVE).length,
+        "There should be one node alive");
+    assertEquals(1, client.datanodeReport(DatanodeReportType.DEAD).length,
+        "There should be one node dead");
   }
   }
 
 
   /**
   /**
@@ -1247,7 +1245,7 @@ public class TestDecommission extends AdminStatesBaseTest {
    * It is not recommended to use a registration name which is not also a
    * It is not recommended to use a registration name which is not also a
    * valid DNS hostname for the DataNode.  See HDFS-5237 for background.
    * valid DNS hostname for the DataNode.  See HDFS-5237 for background.
    */
    */
-  @Ignore
+  @Disabled
   @Test
   @Test
   public void testIncludeByRegistrationName() throws Exception {
   public void testIncludeByRegistrationName() throws Exception {
     // Any IPv4 address starting with 127 functions as a "loopback" address
     // Any IPv4 address starting with 127 functions as a "loopback" address
@@ -1301,8 +1299,8 @@ public class TestDecommission extends AdminStatesBaseTest {
         try {
         try {
           DatanodeInfo info[] = client.datanodeReport(DatanodeReportType.LIVE);
           DatanodeInfo info[] = client.datanodeReport(DatanodeReportType.LIVE);
           if (info.length == 1) {
           if (info.length == 1) {
-            Assert.assertFalse(info[0].isDecommissioned());
-            Assert.assertFalse(info[0].isDecommissionInProgress());
+            assertFalse(info[0].isDecommissioned());
+            assertFalse(info[0].isDecommissionInProgress());
             assertEquals(registrationName, info[0].getHostName());
             assertEquals(registrationName, info[0].getHostName());
             return true;
             return true;
           }
           }
@@ -1358,8 +1356,8 @@ public class TestDecommission extends AdminStatesBaseTest {
     }
     }
     // Run decom scan and check
     // Run decom scan and check
     BlockManagerTestUtil.recheckDecommissionState(datanodeManager);
     BlockManagerTestUtil.recheckDecommissionState(datanodeManager);
-    assertEquals("Unexpected # of nodes checked", expectedNumCheckedNodes, 
-        decomManager.getNumNodesChecked());
+    assertEquals(expectedNumCheckedNodes, decomManager.getNumNodesChecked(),
+        "Unexpected # of nodes checked");
     // Recommission all nodes
     // Recommission all nodes
     for (DatanodeInfo dn : decommissionedNodes) {
     for (DatanodeInfo dn : decommissionedNodes) {
       putNodeInService(0, dn);
       putNodeInService(0, dn);
@@ -1470,10 +1468,10 @@ public class TestDecommission extends AdminStatesBaseTest {
 
 
   private void assertTrackedAndPending(DatanodeAdminManager decomManager,
   private void assertTrackedAndPending(DatanodeAdminManager decomManager,
       int tracked, int pending) {
       int tracked, int pending) {
-    assertEquals("Unexpected number of tracked nodes", tracked,
-        decomManager.getNumTrackedNodes());
-    assertEquals("Unexpected number of pending nodes", pending,
-        decomManager.getNumPendingNodes());
+    assertEquals(tracked, decomManager.getNumTrackedNodes(),
+        "Unexpected number of tracked nodes");
+    assertEquals(pending, decomManager.getNumPendingNodes(),
+        "Unexpected number of pending nodes");
   }
   }
 
 
   /**
   /**
@@ -1628,12 +1626,12 @@ public class TestDecommission extends AdminStatesBaseTest {
     long newTotalCapacity = datanodeStatistics.getCapacityTotal();
     long newTotalCapacity = datanodeStatistics.getCapacityTotal();
     long newBlockPoolUsed = datanodeStatistics.getBlockPoolUsed();
     long newBlockPoolUsed = datanodeStatistics.getBlockPoolUsed();
 
 
-    assertTrue("DfsUsedCapacity should not be the same after a node has " +
-        "been decommissioned!", initialUsedCapacity != newUsedCapacity);
-    assertTrue("TotalCapacity should not be the same after a node has " +
-        "been decommissioned!", initialTotalCapacity != newTotalCapacity);
-    assertTrue("BlockPoolUsed should not be the same after a node has " +
-        "been decommissioned!",initialBlockPoolUsed != newBlockPoolUsed);
+    assertTrue(initialUsedCapacity != newUsedCapacity,
+        "DfsUsedCapacity should not be the same after a node has " + "been decommissioned!");
+    assertTrue(initialTotalCapacity != newTotalCapacity,
+        "TotalCapacity should not be the same after a node has " + "been decommissioned!");
+    assertTrue(initialBlockPoolUsed != newBlockPoolUsed,
+        "BlockPoolUsed should not be the same after a node has " + "been decommissioned!");
   }
   }
 
 
   /**
   /**
@@ -1712,15 +1710,16 @@ public class TestDecommission extends AdminStatesBaseTest {
     while (Instant.now().isBefore(checkUntil)) {
     while (Instant.now().isBefore(checkUntil)) {
       BlockManagerTestUtil.recheckDecommissionState(datanodeManager);
       BlockManagerTestUtil.recheckDecommissionState(datanodeManager);
       assertEquals(
       assertEquals(
-          "Unexpected number of decommissioning nodes queued in DatanodeAdminManager.",
-          0, decomManager.getNumPendingNodes());
+          0, decomManager.getNumPendingNodes(),
+          "Unexpected number of decommissioning nodes queued in DatanodeAdminManager.");
       assertEquals(
       assertEquals(
-          "Unexpected number of decommissioning nodes tracked in DatanodeAdminManager.",
-          numDeadNodes, decomManager.getNumTrackedNodes());
+          numDeadNodes, decomManager.getNumTrackedNodes(),
+          "Unexpected number of decommissioning nodes tracked in DatanodeAdminManager.");
       assertTrue(
       assertTrue(
-          "Dead decommissioning nodes unexpectedly transitioned out of DECOMMISSION_INPROGRESS.",
+
           deadNodeProps.keySet().stream()
           deadNodeProps.keySet().stream()
-              .allMatch(node -> node.getAdminState().equals(AdminStates.DECOMMISSION_INPROGRESS)));
+              .allMatch(node -> node.getAdminState().equals(AdminStates.DECOMMISSION_INPROGRESS)),
+          "Dead decommissioning nodes unexpectedly transitioned out of DECOMMISSION_INPROGRESS.");
       Thread.sleep(500);
       Thread.sleep(500);
     }
     }
 
 
@@ -1759,35 +1758,36 @@ public class TestDecommission extends AdminStatesBaseTest {
       // "processPendingNodes" to de-queue the live nodes & decommission them
       // "processPendingNodes" to de-queue the live nodes & decommission them
       BlockManagerTestUtil.recheckDecommissionState(datanodeManager);
       BlockManagerTestUtil.recheckDecommissionState(datanodeManager);
       assertEquals(
       assertEquals(
-          "DatanodeAdminBackoffMonitor did not re-queue dead decommissioning nodes as expected.",
-          2, decomManager.getNumPendingNodes());
+          2, decomManager.getNumPendingNodes(),
+          "DatanodeAdminBackoffMonitor did not re-queue dead decommissioning nodes as expected.");
       assertEquals(
       assertEquals(
-          "DatanodeAdminBackoffMonitor did not re-queue dead decommissioning nodes as expected.",
-          0, decomManager.getNumTrackedNodes());
+          0, decomManager.getNumTrackedNodes(),
+          "DatanodeAdminBackoffMonitor did not re-queue dead decommissioning nodes as expected.");
     } else {
     } else {
       // For TestDecommission a single tick/execution of the DatanodeAdminDefaultMonitor
       // For TestDecommission a single tick/execution of the DatanodeAdminDefaultMonitor
       // will re-queue the dead nodes. A seconds tick is needed to de-queue the live nodes
       // will re-queue the dead nodes. A seconds tick is needed to de-queue the live nodes
       // & decommission them
       // & decommission them
       BlockManagerTestUtil.recheckDecommissionState(datanodeManager);
       BlockManagerTestUtil.recheckDecommissionState(datanodeManager);
       assertEquals(
       assertEquals(
-          "DatanodeAdminDefaultMonitor did not re-queue dead decommissioning nodes as expected.",
-          4, decomManager.getNumPendingNodes());
+          4, decomManager.getNumPendingNodes(),
+          "DatanodeAdminDefaultMonitor did not re-queue dead decommissioning nodes as expected.");
       assertEquals(
       assertEquals(
-          "DatanodeAdminDefaultMonitor did not re-queue dead decommissioning nodes as expected.",
-          0, decomManager.getNumTrackedNodes());
+          0, decomManager.getNumTrackedNodes(),
+          "DatanodeAdminDefaultMonitor did not re-queue dead decommissioning nodes as expected.");
       BlockManagerTestUtil.recheckDecommissionState(datanodeManager);
       BlockManagerTestUtil.recheckDecommissionState(datanodeManager);
       assertEquals(
       assertEquals(
-          "DatanodeAdminDefaultMonitor did not decommission live nodes as expected.",
-          2, decomManager.getNumPendingNodes());
+          2, decomManager.getNumPendingNodes(),
+          "DatanodeAdminDefaultMonitor did not decommission live nodes as expected.");
       assertEquals(
       assertEquals(
-          "DatanodeAdminDefaultMonitor did not decommission live nodes as expected.",
-          0, decomManager.getNumTrackedNodes());
+          0, decomManager.getNumTrackedNodes(),
+          "DatanodeAdminDefaultMonitor did not decommission live nodes as expected.");
     }
     }
-    assertTrue("Live nodes not DECOMMISSIONED as expected.", liveDecommNodes.stream()
-        .allMatch(node -> node.getAdminState().equals(AdminStates.DECOMMISSIONED)));
-    assertTrue("Dead nodes not DECOMMISSION_INPROGRESS as expected.",
-        deadNodeProps.keySet().stream()
-            .allMatch(node -> node.getAdminState().equals(AdminStates.DECOMMISSION_INPROGRESS)));
+    assertTrue(liveDecommNodes.stream()
+            .allMatch(node -> node.getAdminState().equals(AdminStates.DECOMMISSIONED)),
+        "Live nodes not DECOMMISSIONED as expected.");
+    assertTrue(deadNodeProps.keySet().stream()
+            .allMatch(node -> node.getAdminState().equals(AdminStates.DECOMMISSION_INPROGRESS)),
+        "Dead nodes not DECOMMISSION_INPROGRESS as expected.");
     assertThat(deadNodeProps.keySet())
     assertThat(deadNodeProps.keySet())
         .as("Check all dead decommissioning nodes queued in DatanodeAdminManager")
         .as("Check all dead decommissioning nodes queued in DatanodeAdminManager")
         .containsAll(decomManager.getPendingNodes());
         .containsAll(decomManager.getPendingNodes());
@@ -1798,15 +1798,15 @@ public class TestDecommission extends AdminStatesBaseTest {
     while (Instant.now().isBefore(checkUntil)) {
     while (Instant.now().isBefore(checkUntil)) {
       BlockManagerTestUtil.recheckDecommissionState(datanodeManager);
       BlockManagerTestUtil.recheckDecommissionState(datanodeManager);
       assertEquals(
       assertEquals(
-          "Unexpected number of decommissioning nodes queued in DatanodeAdminManager.",
-          0, decomManager.getNumPendingNodes());
+          0, decomManager.getNumPendingNodes(),
+          "Unexpected number of decommissioning nodes queued in DatanodeAdminManager.");
       assertEquals(
       assertEquals(
-          "Unexpected number of decommissioning nodes tracked in DatanodeAdminManager.",
-          numDeadNodes, decomManager.getNumTrackedNodes());
+          numDeadNodes, decomManager.getNumTrackedNodes(),
+          "Unexpected number of decommissioning nodes tracked in DatanodeAdminManager.");
       assertTrue(
       assertTrue(
-          "Dead decommissioning nodes unexpectedly transitioned out of DECOMMISSION_INPROGRESS.",
           deadNodeProps.keySet().stream()
           deadNodeProps.keySet().stream()
-              .allMatch(node -> node.getAdminState().equals(AdminStates.DECOMMISSION_INPROGRESS)));
+              .allMatch(node -> node.getAdminState().equals(AdminStates.DECOMMISSION_INPROGRESS)),
+          "Dead decommissioning nodes unexpectedly transitioned out of DECOMMISSION_INPROGRESS.");
       Thread.sleep(500);
       Thread.sleep(500);
     }
     }
 
 
@@ -1844,8 +1844,8 @@ public class TestDecommission extends AdminStatesBaseTest {
       final List<DatanodeDescriptor> liveNodes, final int numDeadNodes,
       final List<DatanodeDescriptor> liveNodes, final int numDeadNodes,
       final Map<DatanodeDescriptor, MiniDFSCluster.DataNodeProperties> deadNodeProps,
       final Map<DatanodeDescriptor, MiniDFSCluster.DataNodeProperties> deadNodeProps,
       final ArrayList<DatanodeInfo> decommissionedNodes, final Path filePath) throws Exception {
       final ArrayList<DatanodeInfo> decommissionedNodes, final Path filePath) throws Exception {
-    assertTrue("Must have numLiveNode > 0", numLiveNodes > 0);
-    assertTrue("Must have numDeadNode > 0", numDeadNodes > 0);
+    assertTrue(numLiveNodes > 0, "Must have numLiveNode > 0");
+    assertTrue(numDeadNodes > 0, "Must have numDeadNode > 0");
     int numNodes = numLiveNodes + numDeadNodes;
     int numNodes = numLiveNodes + numDeadNodes;
 
 
     // Allow "numDeadNodes" datanodes to be decommissioned at a time
     // Allow "numDeadNodes" datanodes to be decommissioned at a time

+ 3 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithBackoffMonitor.java

@@ -22,7 +22,8 @@ import org.apache.hadoop.hdfs.server.blockmanagement
     .DatanodeAdminBackoffMonitor;
     .DatanodeAdminBackoffMonitor;
 import org.apache.hadoop.hdfs.server.blockmanagement
 import org.apache.hadoop.hdfs.server.blockmanagement
     .DatanodeAdminMonitorInterface;
     .DatanodeAdminMonitorInterface;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 
 import java.io.IOException;
 import java.io.IOException;
 
 
@@ -35,6 +36,7 @@ import java.io.IOException;
 public class TestDecommissionWithBackoffMonitor extends TestDecommission {
 public class TestDecommissionWithBackoffMonitor extends TestDecommission {
 
 
   @Override
   @Override
+  @BeforeEach
   public void setup() throws IOException {
   public void setup() throws IOException {
     super.setup();
     super.setup();
     Configuration conf = getConf();
     Configuration conf = getConf();

+ 6 - 6
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSOutputSummer.java

@@ -18,8 +18,8 @@
 package org.apache.hadoop.hdfs;
 package org.apache.hadoop.hdfs;
 
 
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 
 import java.io.IOException;
 import java.io.IOException;
 import java.util.Random;
 import java.util.Random;
@@ -29,7 +29,7 @@ import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 
 /**
 /**
  * This class tests if FSOutputSummer works correctly.
  * This class tests if FSOutputSummer works correctly.
@@ -90,9 +90,9 @@ public class TestFSOutputSummer {
   private void checkAndEraseData(byte[] actual, int from, byte[] expected,
   private void checkAndEraseData(byte[] actual, int from, byte[] expected,
       String message) throws Exception {
       String message) throws Exception {
     for (int idx = 0; idx < actual.length; idx++) {
     for (int idx = 0; idx < actual.length; idx++) {
-      assertEquals(message+" byte "+(from+idx)+" differs. expected "+
-                        expected[from+idx]+" actual "+actual[idx],
-                        actual[idx], expected[from+idx]);
+      assertEquals(actual[idx], expected[from + idx],
+          message + " byte " + (from + idx)
+          + " differs. expected " + expected[from + idx] + " actual " + actual[idx]);
       actual[idx] = 0;
       actual[idx] = 0;
     }
     }
   }
   }

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java

@@ -39,7 +39,7 @@ import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertNull;
 import static org.junit.jupiter.api.Assertions.assertNull;
 import static org.junit.jupiter.api.Assertions.assertTrue;
 import static org.junit.jupiter.api.Assertions.assertTrue;
 import static org.junit.jupiter.api.Assertions.fail;
 import static org.junit.jupiter.api.Assertions.fail;
-import static org.junit.Assume.assumeTrue;
+import static org.junit.jupiter.api.Assumptions.assumeTrue;
 import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.doReturn;
 
 
 import java.io.BufferedReader;
 import java.io.BufferedReader;
@@ -400,8 +400,8 @@ public class TestFileCreation {
       
       
       // verify the disk space the file occupied
       // verify the disk space the file occupied
       long diskSpace = dfs.getContentSummary(file1.getParent()).getLength();
       long diskSpace = dfs.getContentSummary(file1.getParent()).getLength();
-      assertEquals(fileSize, diskSpace, file1 + " should take " + fileSize + " bytes disk space " +
-          "but found to take " + diskSpace + " bytes");
+      assertEquals(fileSize, diskSpace, file1 + " should take " + fileSize + " bytes disk space "
+          + "but found to take " + diskSpace + " bytes");
       
       
       // Check storage usage 
       // Check storage usage 
       // can't check capacities for real storage since the OS file system may be changing under us.
       // can't check capacities for real storage since the OS file system may be changing under us.
@@ -409,7 +409,7 @@ public class TestFileCreation {
         DataNode dn = cluster.getDataNodes().get(0);
         DataNode dn = cluster.getDataNodes().get(0);
         FsDatasetSpi<?> dataset = DataNodeTestUtils.getFSDataset(dn);
         FsDatasetSpi<?> dataset = DataNodeTestUtils.getFSDataset(dn);
         assertEquals(fileSize, dataset.getDfsUsed());
         assertEquals(fileSize, dataset.getDfsUsed());
-        assertEquals(SimulatedFSDataset.DEFAULT_CAPACITY-fileSize,
+        assertEquals(SimulatedFSDataset.DEFAULT_CAPACITY - fileSize,
             dataset.getRemaining());
             dataset.getRemaining());
       }
       }
     } finally {
     } finally {

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFsShellPermission.java

@@ -18,7 +18,7 @@
 
 
 package org.apache.hadoop.hdfs;
 package org.apache.hadoop.hdfs;
 
 
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 
 import java.io.ByteArrayOutputStream;
 import java.io.ByteArrayOutputStream;
 import java.io.IOException;
 import java.io.IOException;
@@ -37,7 +37,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 
 /**
 /**
  * This test covers privilege related aspects of FsShell
  * This test covers privilege related aspects of FsShell

+ 13 - 13
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java

@@ -17,7 +17,7 @@
  */
  */
 package org.apache.hadoop.hdfs;
 package org.apache.hadoop.hdfs;
 
 
-import static org.junit.Assert.*;
+import static org.junit.jupiter.api.Assertions.*;
 
 
 import java.net.InetSocketAddress;
 import java.net.InetSocketAddress;
 import java.util.Arrays;
 import java.util.Arrays;
@@ -60,7 +60,7 @@ import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.test.LambdaTestUtils;
 import org.apache.hadoop.test.LambdaTestUtils;
 
 
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 import org.slf4j.Logger;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.LoggerFactory;
 
 
@@ -78,7 +78,7 @@ public class TestGetBlocks {
 
 
   /**
   /**
    * Stop the heartbeat of a datanode in the MiniDFSCluster
    * Stop the heartbeat of a datanode in the MiniDFSCluster
-   * 
+   *
    * @param cluster
    * @param cluster
    *          The MiniDFSCluster
    *          The MiniDFSCluster
    * @param hostName
    * @param hostName
@@ -101,7 +101,7 @@ public class TestGetBlocks {
    * when stale nodes checking is enabled. Also test during the scenario when 1)
    * when stale nodes checking is enabled. Also test during the scenario when 1)
    * stale nodes checking is enabled, 2) a writing is going on, 3) a datanode
    * stale nodes checking is enabled, 2) a writing is going on, 3) a datanode
    * becomes stale happen simultaneously
    * becomes stale happen simultaneously
-   * 
+   *
    * @throws Exception
    * @throws Exception
    */
    */
   @Test
   @Test
@@ -121,8 +121,8 @@ public class TestGetBlocks {
     List<DatanodeDescriptor> nodeInfoList = cluster.getNameNode()
     List<DatanodeDescriptor> nodeInfoList = cluster.getNameNode()
         .getNamesystem().getBlockManager().getDatanodeManager()
         .getNamesystem().getBlockManager().getDatanodeManager()
         .getDatanodeListForReport(DatanodeReportType.LIVE);
         .getDatanodeListForReport(DatanodeReportType.LIVE);
-    assertEquals("Unexpected number of datanodes", NUM_DATA_NODES,
-        nodeInfoList.size());
+    assertEquals(NUM_DATA_NODES, nodeInfoList.size(),
+        "Unexpected number of datanodes");
     FileSystem fileSys = cluster.getFileSystem();
     FileSystem fileSys = cluster.getFileSystem();
     FSDataOutputStream stm = null;
     FSDataOutputStream stm = null;
     try {
     try {
@@ -331,18 +331,18 @@ public class TestGetBlocks {
     String dId = cluster.getDataNodes().get(0).getDatanodeUuid();
     String dId = cluster.getDataNodes().get(0).getDatanodeUuid();
     DatanodeDescriptor dnd = BlockManagerTestUtil.getDatanode(ns, dId);
     DatanodeDescriptor dnd = BlockManagerTestUtil.getDatanode(ns, dId);
     DatanodeStorageInfo[] storages = dnd.getStorageInfos();
     DatanodeStorageInfo[] storages = dnd.getStorageInfos();
-    assertEquals("DataNode should have 4 storages", 4, storages.length);
+    assertEquals(4, storages.length, "DataNode should have 4 storages");
 
 
     Iterator<BlockInfo> dnBlockIt = null;
     Iterator<BlockInfo> dnBlockIt = null;
     // check illegal start block number
     // check illegal start block number
     try {
     try {
       dnBlockIt = BlockManagerTestUtil.getBlockIterator(
       dnBlockIt = BlockManagerTestUtil.getBlockIterator(
           cluster.getNamesystem(), dId, -1);
           cluster.getNamesystem(), dId, -1);
-      assertTrue("Should throw IllegalArgumentException", false);
+      assertTrue(false, "Should throw IllegalArgumentException");
     } catch(IllegalArgumentException ei) {
     } catch(IllegalArgumentException ei) {
       // as expected
       // as expected
     }
     }
-    assertNull("Iterator should be null", dnBlockIt);
+    assertNull(dnBlockIt, "Iterator should be null");
 
 
     // form an array of all DataNode blocks
     // form an array of all DataNode blocks
     int numBlocks = dnd.numBlocks();
     int numBlocks = dnd.numBlocks();
@@ -356,7 +356,7 @@ public class TestGetBlocks {
         try {
         try {
           storageBlockIt.remove();
           storageBlockIt.remove();
           assertTrue(
           assertTrue(
-              "BlockInfo iterator should have been unmodifiable", false);
+              false, "BlockInfo iterator should have been unmodifiable");
         } catch (UnsupportedOperationException e) {
         } catch (UnsupportedOperationException e) {
           //expected exception
           //expected exception
         }
         }
@@ -367,17 +367,17 @@ public class TestGetBlocks {
     for(int i = 0; i < allBlocks.length; i++) {
     for(int i = 0; i < allBlocks.length; i++) {
       // create iterator starting from i
       // create iterator starting from i
       dnBlockIt = BlockManagerTestUtil.getBlockIterator(ns, dId, i);
       dnBlockIt = BlockManagerTestUtil.getBlockIterator(ns, dId, i);
-      assertTrue("Block iterator should have next block", dnBlockIt.hasNext());
+      assertTrue(dnBlockIt.hasNext(), "Block iterator should have next block");
       // check iterator lists blocks in the desired order
       // check iterator lists blocks in the desired order
       for(int j = i; j < allBlocks.length; j++) {
       for(int j = i; j < allBlocks.length; j++) {
-        assertEquals("Wrong block order", allBlocks[j], dnBlockIt.next());
+        assertEquals(allBlocks[j], dnBlockIt.next(), "Wrong block order");
       }
       }
     }
     }
 
 
     // check start block number larger than numBlocks in the DataNode
     // check start block number larger than numBlocks in the DataNode
     dnBlockIt = BlockManagerTestUtil.getBlockIterator(
     dnBlockIt = BlockManagerTestUtil.getBlockIterator(
         ns, dId, allBlocks.length + 1);
         ns, dId, allBlocks.length + 1);
-    assertFalse("Iterator should not have next block", dnBlockIt.hasNext());
+    assertFalse(dnBlockIt.hasNext(), "Iterator should not have next block");
   }
   }
 
 
   @Test
   @Test

+ 10 - 10
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetFileChecksum.java

@@ -17,7 +17,8 @@
  */
  */
 package org.apache.hadoop.hdfs;
 package org.apache.hadoop.hdfs;
 
 
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.fail;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 
 import java.io.IOException;
 import java.io.IOException;
 
 
@@ -25,10 +26,9 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileChecksum;
 import org.apache.hadoop.fs.FileChecksum;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 
 public class TestGetFileChecksum {
 public class TestGetFileChecksum {
   private static final int BLOCKSIZE = 1024;
   private static final int BLOCKSIZE = 1024;
@@ -38,7 +38,7 @@ public class TestGetFileChecksum {
   private MiniDFSCluster cluster;
   private MiniDFSCluster cluster;
   private DistributedFileSystem dfs;
   private DistributedFileSystem dfs;
 
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
   public void setUp() throws Exception {
     conf = new Configuration();
     conf = new Configuration();
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCKSIZE);
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCKSIZE);
@@ -48,7 +48,7 @@ public class TestGetFileChecksum {
     dfs = cluster.getFileSystem();
     dfs = cluster.getFileSystem();
   }
   }
 
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
   public void tearDown() throws Exception {
     if (cluster != null) {
     if (cluster != null) {
       cluster.shutdown();
       cluster.shutdown();
@@ -69,7 +69,7 @@ public class TestGetFileChecksum {
 
 
     for (int i = 0; i < appendRounds + 1; i++) {
     for (int i = 0; i < appendRounds + 1; i++) {
       FileChecksum checksum = dfs.getFileChecksum(foo, appendLength * (i+1));
       FileChecksum checksum = dfs.getFileChecksum(foo, appendLength * (i+1));
-      Assert.assertTrue(checksum.equals(fc[i]));
+      assertTrue(checksum.equals(fc[i]));
     }
     }
   }
   }
 
 
@@ -82,8 +82,8 @@ public class TestGetFileChecksum {
       fail("getFileChecksum should fail for files "
       fail("getFileChecksum should fail for files "
           + "with blocks under construction");
           + "with blocks under construction");
     } catch (IOException ie) {
     } catch (IOException ie) {
-      Assert.assertTrue(ie.getMessage().contains(
-          "Fail to get checksum, since file /testFile "
+      assertTrue(ie.getMessage()
+          .contains("Fail to get checksum, since file /testFile "
               + "is under construction."));
               + "is under construction."));
     }
     }
   }
   }

+ 3 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHAAuxiliaryPort.java

@@ -23,14 +23,13 @@ import java.util.Set;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX;
 import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_NAMENODE_RPC_ADDRESS_AUXILIARY_KEY;
 import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_NAMENODE_RPC_ADDRESS_AUXILIARY_KEY;
 import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_NAMESERVICES;
 import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_NAMESERVICES;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 
 /**
 /**
  * Test NN auxiliary port with HA.
  * Test NN auxiliary port with HA.

+ 24 - 25
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSPolicyProvider.java

@@ -17,7 +17,8 @@
  */
  */
 package org.apache.hadoop.hdfs;
 package org.apache.hadoop.hdfs;
 
 
-import static org.junit.Assert.*;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 
 import java.util.Arrays;
 import java.util.Arrays;
 import java.util.Comparator;
 import java.util.Comparator;
@@ -30,16 +31,14 @@ import org.apache.hadoop.hdfs.qjournal.server.JournalNodeRpcServer;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.security.authorize.Service;
 import org.apache.hadoop.security.authorize.Service;
+import org.apache.hadoop.test.TestName;
 import org.apache.hadoop.util.Sets;
 import org.apache.hadoop.util.Sets;
 
 
-import org.junit.BeforeClass;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TestName;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-import org.junit.runners.Parameterized.Parameters;
+import org.junit.jupiter.api.BeforeAll;
 
 
+import org.junit.jupiter.api.extension.RegisterExtension;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.MethodSource;
 import org.slf4j.Logger;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.LoggerFactory;
 
 
@@ -51,7 +50,6 @@ import org.slf4j.LoggerFactory;
  * HDFSPolicyProvider.  This is a parameterized test repeated for multiple HDFS
  * HDFSPolicyProvider.  This is a parameterized test repeated for multiple HDFS
  * RPC server classes.
  * RPC server classes.
  */
  */
-@RunWith(Parameterized.class)
 public class TestHDFSPolicyProvider {
 public class TestHDFSPolicyProvider {
 
 
   private static final Logger LOG =
   private static final Logger LOG =
@@ -67,12 +65,12 @@ public class TestHDFSPolicyProvider {
         }
         }
       };
       };
 
 
-  @Rule
-  public TestName testName = new TestName();
+  @RegisterExtension
+  private TestName methodName = new TestName();
 
 
-  private final Class<?> rpcServerClass;
+  private Class<?> rpcServerClass;
 
 
-  @BeforeClass
+  @BeforeAll
   public static void initialize() {
   public static void initialize() {
     Service[] services = new HDFSPolicyProvider().getServices();
     Service[] services = new HDFSPolicyProvider().getServices();
     policyProviderProtocols = new HashSet<>(services.length);
     policyProviderProtocols = new HashSet<>(services.length);
@@ -81,11 +79,11 @@ public class TestHDFSPolicyProvider {
     }
     }
   }
   }
 
 
-  public TestHDFSPolicyProvider(Class<?> rpcServerClass) {
-    this.rpcServerClass = rpcServerClass;
+  public void initTestHDFSPolicyProvider(Class<?> pRpcServerClass) {
+    this.rpcServerClass = pRpcServerClass;
+    initialize();
   }
   }
 
 
-  @Parameters(name = "protocolsForServer-{0}")
   public static List<Class<?>[]> data() {
   public static List<Class<?>[]> data() {
     return Arrays.asList(new Class<?>[][]{
     return Arrays.asList(new Class<?>[][]{
         {NameNodeRpcServer.class},
         {NameNodeRpcServer.class},
@@ -94,8 +92,10 @@ public class TestHDFSPolicyProvider {
     });
     });
   }
   }
 
 
-  @Test
-  public void testPolicyProviderForServer() {
+  @ParameterizedTest(name = "protocolsForServer-{0}")
+  @MethodSource("data")
+  public void testPolicyProviderForServer(Class<?> pRpcServerClass) {
+    initTestHDFSPolicyProvider(pRpcServerClass);
     List<?> ifaces = ClassUtils.getAllInterfaces(rpcServerClass);
     List<?> ifaces = ClassUtils.getAllInterfaces(rpcServerClass);
     Set<Class<?>> serverProtocols = new HashSet<>(ifaces.size());
     Set<Class<?>> serverProtocols = new HashSet<>(ifaces.size());
     for (Object obj : ifaces) {
     for (Object obj : ifaces) {
@@ -105,17 +105,16 @@ public class TestHDFSPolicyProvider {
       }
       }
     }
     }
     LOG.info("Running test {} for RPC server {}.  Found server protocols {} "
     LOG.info("Running test {} for RPC server {}.  Found server protocols {} "
-        + "and policy provider protocols {}.", testName.getMethodName(),
+        + "and policy provider protocols {}.", methodName.getMethodName(),
         rpcServerClass.getName(), serverProtocols, policyProviderProtocols);
         rpcServerClass.getName(), serverProtocols, policyProviderProtocols);
-    assertFalse("Expected to find at least one protocol in server.",
-        serverProtocols.isEmpty());
+    assertFalse(serverProtocols.isEmpty(),
+        "Expected to find at least one protocol in server.");
     final Set<Class<?>> differenceSet =
     final Set<Class<?>> differenceSet =
         Sets.difference(serverProtocols, policyProviderProtocols);
         Sets.difference(serverProtocols, policyProviderProtocols);
-    assertTrue(
+    assertTrue(differenceSet.isEmpty(),
         String.format("Following protocols for server %s are not defined in "
         String.format("Following protocols for server %s are not defined in "
-            + "%s: %s",
+                + "%s: %s",
             rpcServerClass.getName(), HDFSPolicyProvider.class.getName(),
             rpcServerClass.getName(), HDFSPolicyProvider.class.getName(),
-            Arrays.toString(differenceSet.toArray())),
-        differenceSet.isEmpty());
+            Arrays.toString(differenceSet.toArray())));
   }
   }
 }
 }

+ 52 - 46
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSServerPorts.java

@@ -29,15 +29,16 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.net.DNS;
 import org.apache.hadoop.net.DNS;
 import org.apache.hadoop.test.PathUtils;
 import org.apache.hadoop.test.PathUtils;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
 
 
 import java.io.File;
 import java.io.File;
 import java.io.IOException;
 import java.io.IOException;
 import java.net.UnknownHostException;
 import java.net.UnknownHostException;
 
 
 import static org.apache.hadoop.hdfs.server.common.Util.fileAsURI;
 import static org.apache.hadoop.hdfs.server.common.Util.fileAsURI;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 
 /**
 /**
  * This test checks correctness of port usage by hdfs components:
  * This test checks correctness of port usage by hdfs components:
@@ -247,7 +248,8 @@ public class TestHDFSServerPorts {
     return true;
     return true;
   }
   }
 
 
-  @Test(timeout = 300000)
+  @Test
+  @Timeout(value = 300)
   public void testNameNodePorts() throws Exception {
   public void testNameNodePorts() throws Exception {
     runTestNameNodePorts(false);
     runTestNameNodePorts(false);
     runTestNameNodePorts(true);
     runTestNameNodePorts(true);
@@ -280,7 +282,7 @@ public class TestHDFSServerPorts {
       started = canStartNameNode(conf2);
       started = canStartNameNode(conf2);
 
 
       if (withService) {
       if (withService) {
-        assertFalse("Should've failed on service port", started);
+        assertFalse(started, "Should've failed on service port");
 
 
         // reset conf2 since NameNode modifies it
         // reset conf2 since NameNode modifies it
         FileSystem.setDefaultUri(conf2, "hdfs://" + THIS_HOST);
         FileSystem.setDefaultUri(conf2, "hdfs://" + THIS_HOST);
@@ -298,7 +300,8 @@ public class TestHDFSServerPorts {
   /**
   /**
    * Verify datanode port usage.
    * Verify datanode port usage.
    */
    */
-  @Test(timeout = 300000)
+  @Test
+  @Timeout(value = 300)
   public void testDataNodePorts() throws Exception {
   public void testDataNodePorts() throws Exception {
     NameNode nn = null;
     NameNode nn = null;
     try {
     try {
@@ -334,7 +337,8 @@ public class TestHDFSServerPorts {
   /**
   /**
    * Verify secondary namenode port usage.
    * Verify secondary namenode port usage.
    */
    */
-  @Test(timeout = 300000)
+  @Test
+  @Timeout(value = 300)
   public void testSecondaryNodePorts() throws Exception {
   public void testSecondaryNodePorts() throws Exception {
     NameNode nn = null;
     NameNode nn = null;
     try {
     try {
@@ -359,44 +363,46 @@ public class TestHDFSServerPorts {
       stopNameNode(nn);
       stopNameNode(nn);
     }
     }
   }
   }
-    
-    /**
-     * Verify BackupNode port usage.
-     */
-    @Test(timeout = 300000)
-    public void testBackupNodePorts() throws Exception {
-      NameNode nn = null;
-      try {
-        nn = startNameNode();
-
-        Configuration backup_config = new HdfsConfiguration(config);
-        backup_config.set(
-            DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY, THIS_HOST);
-        // bind http server to the same port as name-node
-        backup_config.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY, 
-            backup_config.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY));
-
-        LOG.info("= Starting 1 on: " + backup_config.get(
-            DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY));
-
-        assertFalse("Backup started on same port as Namenode", 
-                           canStartBackupNode(backup_config)); // should fail
-
-        // reset namenode backup address because Windows does not release
-        // port used previously properly.
-        backup_config.set(
-            DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY, THIS_HOST);
-
-        // bind http server to a different port
-        backup_config.set(
-            DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY, THIS_HOST);
-        LOG.info("= Starting 2 on: " + backup_config.get(
-            DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY));
-
-        boolean started = canStartBackupNode(backup_config);
-        assertTrue("Backup Namenode should've started", started); // should start now
-      } finally {
-        stopNameNode(nn);
-      }
+
+  /**
+   * Verify BackupNode port usage.
+   */
+  @SuppressWarnings("checkstyle:localvariablename")
+  @Test
+  @Timeout(value = 300)
+  public void testBackupNodePorts() throws Exception {
+    NameNode nn = null;
+    try {
+      nn = startNameNode();
+
+      Configuration backup_config = new HdfsConfiguration(config);
+      backup_config.set(
+          DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY, THIS_HOST);
+      // bind http server to the same port as name-node
+      backup_config.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY,
+          backup_config.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY));
+
+      LOG.info("= Starting 1 on: " + backup_config.get(
+          DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY));
+
+      assertFalse(canStartBackupNode(backup_config),
+          "Backup started on same port as Namenode"); // should fail
+
+      // reset namenode backup address because Windows does not release
+      // port used previously properly.
+      backup_config.set(
+          DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY, THIS_HOST);
+
+      // bind http server to a different port
+      backup_config.set(
+          DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY, THIS_HOST);
+      LOG.info("= Starting 2 on: " + backup_config.get(
+          DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY));
+
+      boolean started = canStartBackupNode(backup_config);
+      assertTrue(started, "Backup Namenode should've started"); // should start now
+    } finally {
+      stopNameNode(nn);
+    }
   }
   }
 }
 }

+ 15 - 17
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSTrash.java

@@ -17,9 +17,9 @@
  */
  */
 package org.apache.hadoop.hdfs;
 package org.apache.hadoop.hdfs;
 
 
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
 
 
 import java.io.IOException;
 import java.io.IOException;
 import java.util.UUID;
 import java.util.UUID;
@@ -37,9 +37,9 @@ import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 import org.mockito.Mockito;
 import org.mockito.Mockito;
 
 
 /**
 /**
@@ -66,7 +66,7 @@ public class TestHDFSTrash {
   private static UserGroupInformation user1;
   private static UserGroupInformation user1;
   private static UserGroupInformation user2;
   private static UserGroupInformation user2;
 
 
-  @BeforeClass
+  @BeforeAll
   public static void setUp() throws Exception {
   public static void setUp() throws Exception {
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
     fs = FileSystem.get(conf);
     fs = FileSystem.get(conf);
@@ -93,7 +93,7 @@ public class TestHDFSTrash {
         null, FsAction.ALL, FsAction.ALL, FsAction.ALL);
         null, FsAction.ALL, FsAction.ALL, FsAction.ALL);
   }
   }
 
 
-  @AfterClass
+  @AfterAll
   public static void tearDown() {
   public static void tearDown() {
     if (cluster != null) { cluster.shutdown(); }
     if (cluster != null) { cluster.shutdown(); }
   }
   }
@@ -142,13 +142,11 @@ public class TestHDFSTrash {
     fs.mkdirs(user1Tmp);
     fs.mkdirs(user1Tmp);
     Trash u1Trash = getPerUserTrash(user1, fs, testConf);
     Trash u1Trash = getPerUserTrash(user1, fs, testConf);
     Path u1t = u1Trash.getCurrentTrashDir(user1Tmp);
     Path u1t = u1Trash.getCurrentTrashDir(user1Tmp);
-    assertTrue(String.format("Failed to move %s to trash", user1Tmp),
-        u1Trash.moveToTrash(user1Tmp));
-    assertTrue(
-        String.format(
-            "%s should be allowed to remove its own trash directory %s",
-            user1.getUserName(), u1t),
-        fs.delete(u1t, true));
+    assertTrue(u1Trash.moveToTrash(user1Tmp),
+        String.format("Failed to move %s to trash", user1Tmp));
+    assertTrue(fs.delete(u1t, true), String.format(
+        "%s should be allowed to remove its own trash directory %s",
+        user1.getUserName(), u1t));
     assertFalse(fs.exists(u1t));
     assertFalse(fs.exists(u1t));
 
 
     // login as user2, move something to trash
     // login as user2, move something to trash
@@ -166,8 +164,8 @@ public class TestHDFSTrash {
               USER1_NAME, USER2_NAME));
               USER1_NAME, USER2_NAME));
     } catch (AccessControlException e) {
     } catch (AccessControlException e) {
       assertTrue(e instanceof AccessControlException);
       assertTrue(e instanceof AccessControlException);
-      assertTrue("Permission denied messages must carry the username",
-          e.getMessage().contains(USER1_NAME));
+      assertTrue(e.getMessage().contains(USER1_NAME),
+          "Permission denied messages must carry the username");
     }
     }
   }
   }
 
 

+ 14 - 16
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHFlush.java

@@ -17,9 +17,9 @@
  */
  */
 package org.apache.hadoop.hdfs;
 package org.apache.hadoop.hdfs;
 
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 
 import java.io.IOException;
 import java.io.IOException;
 import java.io.InterruptedIOException;
 import java.io.InterruptedIOException;
@@ -35,7 +35,7 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 import org.slf4j.event.Level;
 import org.slf4j.event.Level;
 
 
 /** Class contains a set of tests to verify the correctness of 
 /** Class contains a set of tests to verify the correctness of 
@@ -164,16 +164,15 @@ public class TestHFlush {
       ((DFSOutputStream) stm.getWrappedStream()).hsync(EnumSet
       ((DFSOutputStream) stm.getWrappedStream()).hsync(EnumSet
           .of(SyncFlag.END_BLOCK));
           .of(SyncFlag.END_BLOCK));
       currentFileLength = fileSystem.getFileStatus(path).getLen();
       currentFileLength = fileSystem.getFileStatus(path).getLen();
-      assertEquals(preferredBlockSize + preferredBlockSize / 2,
-          currentFileLength);
+      assertEquals(preferredBlockSize + preferredBlockSize / 2, currentFileLength);
       blocks = fileSystem.dfs.getLocatedBlocks(path.toString(), 0);
       blocks = fileSystem.dfs.getLocatedBlocks(path.toString(), 0);
       assertEquals(2, blocks.getLocatedBlocks().size());
       assertEquals(2, blocks.getLocatedBlocks().size());
 
 
       stm.write(new byte[preferredBlockSize / 4]);
       stm.write(new byte[preferredBlockSize / 4]);
       stm.hsync();
       stm.hsync();
       currentFileLength = fileSystem.getFileStatus(path).getLen();
       currentFileLength = fileSystem.getFileStatus(path).getLen();
-      assertEquals(preferredBlockSize + preferredBlockSize / 2
-          + preferredBlockSize / 4, currentFileLength);
+      assertEquals(preferredBlockSize + preferredBlockSize / 2 + preferredBlockSize / 4,
+          currentFileLength);
       blocks = fileSystem.dfs.getLocatedBlocks(path.toString(), 0);
       blocks = fileSystem.dfs.getLocatedBlocks(path.toString(), 0);
       assertEquals(3, blocks.getLocatedBlocks().size());
       assertEquals(3, blocks.getLocatedBlocks().size());
     } finally {
     } finally {
@@ -337,9 +336,8 @@ public class TestHFlush {
         // Check file length if updatelength is required
         // Check file length if updatelength is required
         if (isSync && syncFlags.contains(SyncFlag.UPDATE_LENGTH)) {
         if (isSync && syncFlags.contains(SyncFlag.UPDATE_LENGTH)) {
           long currentFileLength = fileSystem.getFileStatus(path).getLen();
           long currentFileLength = fileSystem.getFileStatus(path).getLen();
-          assertEquals(
-            "File size doesn't match for hsync/hflush with updating the length",
-            tenth * (i + 1), currentFileLength);
+          assertEquals(tenth * (i + 1), currentFileLength,
+              "File size doesn't match for hsync/hflush with updating the length");
         } else if (isSync && syncFlags.contains(SyncFlag.END_BLOCK)) {
         } else if (isSync && syncFlags.contains(SyncFlag.END_BLOCK)) {
           LocatedBlocks blocks = fileSystem.dfs.getLocatedBlocks(pathName, 0);
           LocatedBlocks blocks = fileSystem.dfs.getLocatedBlocks(pathName, 0);
           assertEquals(i + 1, blocks.getLocatedBlocks().size());
           assertEquals(i + 1, blocks.getLocatedBlocks().size());
@@ -353,7 +351,7 @@ public class TestHFlush {
         is.seek(tenth * i);
         is.seek(tenth * i);
         int readBytes = is.read(toRead, 0, tenth);
         int readBytes = is.read(toRead, 0, tenth);
         System.out.println("Has read " + readBytes);
         System.out.println("Has read " + readBytes);
-        assertTrue("Should've get more bytes", (readBytes > 0) && (readBytes <= tenth));
+        assertTrue((readBytes > 0) && (readBytes <= tenth), "Should've get more bytes");
         is.close();
         is.close();
         checkData(toRead, 0, readBytes, expected, "Partial verification");
         checkData(toRead, 0, readBytes, expected, "Partial verification");
       }
       }
@@ -361,7 +359,8 @@ public class TestHFlush {
       stm.write(fileContent, tenth * SECTIONS, rounding);
       stm.write(fileContent, tenth * SECTIONS, rounding);
       stm.close();
       stm.close();
 
 
-      assertEquals("File size doesn't match ", AppendTestUtil.FILE_SIZE, fileSystem.getFileStatus(path).getLen());
+      assertEquals(AppendTestUtil.FILE_SIZE, fileSystem.getFileStatus(path).getLen(),
+          "File size doesn't match ");
       AppendTestUtil.checkFullFile(fileSystem, path, fileContent.length, fileContent, "hflush()");
       AppendTestUtil.checkFullFile(fileSystem, path, fileContent.length, fileContent, "hflush()");
     } finally {
     } finally {
       fileSystem.close();
       fileSystem.close();
@@ -371,9 +370,8 @@ public class TestHFlush {
   static void checkData(final byte[] actual, int from, int len,
   static void checkData(final byte[] actual, int from, int len,
                         final byte[] expected, String message) {
                         final byte[] expected, String message) {
     for (int idx = 0; idx < len; idx++) {
     for (int idx = 0; idx < len; idx++) {
-      assertEquals(message+" byte "+(from+idx)+" differs. expected "+
-                   expected[from+idx]+" actual "+actual[idx],
-                   expected[from+idx], actual[idx]);
+      assertEquals(expected[from + idx], actual[idx], message + " byte " + (from + idx)
+          + " differs. expected " + expected[from + idx] + " actual " + actual[idx]);
       actual[idx] = 0;
       actual[idx] = 0;
     }
     }
   }
   }

+ 30 - 24
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHdfsAdmin.java

@@ -17,9 +17,12 @@
  */
  */
 package org.apache.hadoop.hdfs;
 package org.apache.hadoop.hdfs;
 
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertNull;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 
 import java.io.File;
 import java.io.File;
 import java.io.IOException;
 import java.io.IOException;
@@ -46,10 +49,10 @@ import org.apache.hadoop.hdfs.protocol.OpenFilesIterator;
 import org.apache.hadoop.hdfs.protocol.OpenFilesIterator.OpenFilesType;
 import org.apache.hadoop.hdfs.protocol.OpenFilesIterator.OpenFilesType;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
 import org.apache.hadoop.util.Sets;
 import org.apache.hadoop.util.Sets;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
 
 
 public class TestHdfsAdmin {
 public class TestHdfsAdmin {
   
   
@@ -60,7 +63,7 @@ public class TestHdfsAdmin {
   private final Configuration conf = new Configuration();
   private final Configuration conf = new Configuration();
   private MiniDFSCluster cluster;
   private MiniDFSCluster cluster;
 
 
-  @Before
+  @BeforeEach
   public void setUpCluster() throws IOException {
   public void setUpCluster() throws IOException {
     conf.setLong(
     conf.setLong(
         DFSConfigKeys.DFS_NAMENODE_LIST_OPENFILES_NUM_RESPONSES,
         DFSConfigKeys.DFS_NAMENODE_LIST_OPENFILES_NUM_RESPONSES,
@@ -69,7 +72,7 @@ public class TestHdfsAdmin {
     cluster.waitActive();
     cluster.waitActive();
   }
   }
   
   
-  @After
+  @AfterEach
   public void shutDownCluster() {
   public void shutDownCluster() {
     if (cluster != null) {
     if (cluster != null) {
       cluster.shutdown();
       cluster.shutdown();
@@ -116,9 +119,11 @@ public class TestHdfsAdmin {
   /**
   /**
    * Make sure that a non-HDFS URI throws a helpful error.
    * Make sure that a non-HDFS URI throws a helpful error.
    */
    */
-  @Test(expected = IllegalArgumentException.class)
+  @Test
   public void testHdfsAdminWithBadUri() throws IOException, URISyntaxException {
   public void testHdfsAdminWithBadUri() throws IOException, URISyntaxException {
-    new HdfsAdmin(new URI("file:///bad-scheme"), conf);
+    assertThrows(IllegalArgumentException.class, () -> {
+      new HdfsAdmin(new URI("file:///bad-scheme"), conf);
+    });
   }
   }
 
 
   /**
   /**
@@ -182,9 +187,9 @@ public class TestHdfsAdmin {
       policyNamesSet2.add(policy.getName());
       policyNamesSet2.add(policy.getName());
     }
     }
     // Ensure that we got the same set of policies in both cases.
     // Ensure that we got the same set of policies in both cases.
-    Assert.assertTrue(
+    assertTrue(
         Sets.difference(policyNamesSet1, policyNamesSet2).isEmpty());
         Sets.difference(policyNamesSet1, policyNamesSet2).isEmpty());
-    Assert.assertTrue(
+    assertTrue(
         Sets.difference(policyNamesSet2, policyNamesSet1).isEmpty());
         Sets.difference(policyNamesSet2, policyNamesSet1).isEmpty());
   }
   }
 
 
@@ -200,8 +205,8 @@ public class TestHdfsAdmin {
   @Test
   @Test
   public void testGetKeyProvider() throws IOException {
   public void testGetKeyProvider() throws IOException {
     HdfsAdmin hdfsAdmin = new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
     HdfsAdmin hdfsAdmin = new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
-    Assert.assertNull("should return null for an non-encrypted cluster",
-        hdfsAdmin.getKeyProvider());
+    assertNull(hdfsAdmin.getKeyProvider(),
+        "should return null for an non-encrypted cluster");
 
 
     shutDownCluster();
     shutDownCluster();
 
 
@@ -213,11 +218,12 @@ public class TestHdfsAdmin {
     cluster.waitActive();
     cluster.waitActive();
     hdfsAdmin = new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
     hdfsAdmin = new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
 
 
-    Assert.assertNotNull("should not return null for an encrypted cluster",
-        hdfsAdmin.getKeyProvider());
+    assertNotNull(hdfsAdmin.getKeyProvider(),
+        "should not return null for an encrypted cluster");
   }
   }
 
 
-  @Test(timeout = 120000L)
+  @Test
+  @Timeout(120)
   public void testListOpenFiles() throws IOException {
   public void testListOpenFiles() throws IOException {
     HashSet<Path> closedFileSet = new HashSet<>();
     HashSet<Path> closedFileSet = new HashSet<>();
     HashMap<Path, FSDataOutputStream> openFileMap = new HashMap<>();
     HashMap<Path, FSDataOutputStream> openFileMap = new HashMap<>();
@@ -260,10 +266,10 @@ public class TestHdfsAdmin {
       HashSet<Path> openFiles) throws IOException {
       HashSet<Path> openFiles) throws IOException {
     while (openFilesRemoteItr.hasNext()) {
     while (openFilesRemoteItr.hasNext()) {
       String filePath = openFilesRemoteItr.next().getFilePath();
       String filePath = openFilesRemoteItr.next().getFilePath();
-      assertFalse(filePath + " should not be listed under open files!",
-          closedFiles.contains(new Path(filePath)));
-      assertTrue(filePath + " is not listed under open files!",
-          openFiles.remove(new Path(filePath)));
+      assertFalse(closedFiles.contains(new Path(filePath)),
+          filePath + " should not be listed under open files!");
+      assertTrue(openFiles.remove(new Path(filePath)),
+          filePath + " is not listed under open files!");
     }
     }
   }
   }
 
 
@@ -275,7 +281,7 @@ public class TestHdfsAdmin {
         hdfsAdmin.listOpenFiles(EnumSet.of(OpenFilesType.ALL_OPEN_FILES),
         hdfsAdmin.listOpenFiles(EnumSet.of(OpenFilesType.ALL_OPEN_FILES),
             OpenFilesIterator.FILTER_PATH_DEFAULT);
             OpenFilesIterator.FILTER_PATH_DEFAULT);
     verifyOpenFilesHelper(openFilesRemoteItr, closedFiles, openFiles);
     verifyOpenFilesHelper(openFilesRemoteItr, closedFiles, openFiles);
-    assertTrue("Not all open files are listed!", openFiles.isEmpty());
+    assertTrue(openFiles.isEmpty(), "Not all open files are listed!");
   }
   }
 
 
   /**
   /**
@@ -289,6 +295,6 @@ public class TestHdfsAdmin {
     RemoteIterator<OpenFileEntry> openFilesRemoteItr =
     RemoteIterator<OpenFileEntry> openFilesRemoteItr =
         hdfsAdmin.listOpenFiles(EnumSet.of(OpenFilesType.ALL_OPEN_FILES));
         hdfsAdmin.listOpenFiles(EnumSet.of(OpenFilesType.ALL_OPEN_FILES));
     verifyOpenFilesHelper(openFilesRemoteItr, closedFiles, openFiles);
     verifyOpenFilesHelper(openFilesRemoteItr, closedFiles, openFiles);
-    assertTrue("Not all open files are listed!", openFiles.isEmpty());
+    assertTrue(openFiles.isEmpty(), "Not all open files are listed!");
   }
   }
 }
 }

+ 9 - 6
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHttpPolicy.java

@@ -19,14 +19,17 @@ package org.apache.hadoop.hdfs;
 
 
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 
-public final class TestHttpPolicy {
+import static org.junit.jupiter.api.Assertions.assertThrows;
 
 
-  @Test(expected = HadoopIllegalArgumentException.class)
+public final class TestHttpPolicy {
+  @Test
   public void testInvalidPolicyValue() {
   public void testInvalidPolicyValue() {
-    Configuration conf = new Configuration();
-    conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, "invalid");
-    DFSUtil.getHttpPolicy(conf);
+    assertThrows(HadoopIllegalArgumentException.class, () -> {
+      Configuration conf = new Configuration();
+      conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, "invalid");
+      DFSUtil.getHttpPolicy(conf);
+    });
   }
   }
 }
 }

+ 2 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java

@@ -17,7 +17,7 @@
  */
  */
 package org.apache.hadoop.hdfs;
 package org.apache.hadoop.hdfs;
 
 
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 
 import java.io.IOException;
 import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.net.InetSocketAddress;
@@ -38,8 +38,7 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
 import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.Time;
-import org.junit.Test;
-
+import org.junit.jupiter.api.Test;
 
 
 /**
 /**
  * This class tests the replication and injection of blocks of a DFS file for simulated storage.
  * This class tests the replication and injection of blocks of a DFS file for simulated storage.

+ 7 - 7
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestIsMethodSupported.java

@@ -17,8 +17,8 @@
  */
  */
 package org.apache.hadoop.hdfs;
 package org.apache.hadoop.hdfs;
 
 
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 
 import java.io.IOException;
 import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.net.InetSocketAddress;
@@ -45,9 +45,9 @@ import org.apache.hadoop.ipc.protocolPB.RefreshCallQueueProtocolClientSideTransl
 import org.apache.hadoop.ipc.RefreshCallQueueProtocol;
 import org.apache.hadoop.ipc.RefreshCallQueueProtocol;
 import org.apache.hadoop.tools.GetUserMappingsProtocol;
 import org.apache.hadoop.tools.GetUserMappingsProtocol;
 import org.apache.hadoop.tools.protocolPB.GetUserMappingsProtocolClientSideTranslatorPB;
 import org.apache.hadoop.tools.protocolPB.GetUserMappingsProtocolClientSideTranslatorPB;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 
 
 /**
 /**
  * Test cases to verify that client side translators correctly implement the
  * Test cases to verify that client side translators correctly implement the
@@ -59,7 +59,7 @@ public class TestIsMethodSupported {
   private static InetSocketAddress nnAddress = null;
   private static InetSocketAddress nnAddress = null;
   private static InetSocketAddress dnAddress = null;
   private static InetSocketAddress dnAddress = null;
   
   
-  @BeforeClass
+  @BeforeAll
   public static void setUp() throws Exception {
   public static void setUp() throws Exception {
     cluster = (new MiniDFSCluster.Builder(conf))
     cluster = (new MiniDFSCluster.Builder(conf))
         .numDataNodes(1).build();
         .numDataNodes(1).build();
@@ -69,7 +69,7 @@ public class TestIsMethodSupported {
                                       dn.getIpcPort());
                                       dn.getIpcPort());
   }
   }
 
 
-  @AfterClass
+  @AfterAll
   public static void tearDown() throws Exception {
   public static void tearDown() throws Exception {
     if (cluster != null) {
     if (cluster != null) {
       cluster.shutdown();
       cluster.shutdown();

+ 12 - 11
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestKeyProviderCache.java

@@ -25,8 +25,12 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.crypto.key.KeyProvider;
 import org.apache.hadoop.crypto.key.KeyProvider;
 import org.apache.hadoop.crypto.key.KeyProviderFactory;
 import org.apache.hadoop.crypto.key.KeyProviderFactory;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
+
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 
 public class TestKeyProviderCache {
 public class TestKeyProviderCache {
 
 
@@ -104,35 +108,32 @@ public class TestKeyProviderCache {
         "dummy://foo:bar@test_provider1");
         "dummy://foo:bar@test_provider1");
     KeyProvider keyProvider1 = kpCache.get(conf,
     KeyProvider keyProvider1 = kpCache.get(conf,
         getKeyProviderUriFromConf(conf));
         getKeyProviderUriFromConf(conf));
-    Assert.assertNotNull("Returned Key Provider is null !!", keyProvider1);
+    assertNotNull(keyProvider1, "Returned Key Provider is null !!");
 
 
     conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH,
     conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH,
         "dummy://foo:bar@test_provider1");
         "dummy://foo:bar@test_provider1");
     KeyProvider keyProvider2 = kpCache.get(conf,
     KeyProvider keyProvider2 = kpCache.get(conf,
         getKeyProviderUriFromConf(conf));
         getKeyProviderUriFromConf(conf));
 
 
-    Assert.assertTrue("Different KeyProviders returned !!",
-        keyProvider1 == keyProvider2);
+    assertTrue(keyProvider1 == keyProvider2, "Different KeyProviders returned !!");
 
 
     conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH,
     conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH,
         "dummy://test_provider3");
         "dummy://test_provider3");
     KeyProvider keyProvider3 = kpCache.get(conf,
     KeyProvider keyProvider3 = kpCache.get(conf,
         getKeyProviderUriFromConf(conf));
         getKeyProviderUriFromConf(conf));
 
 
-    Assert.assertFalse("Same KeyProviders returned !!",
-        keyProvider1 == keyProvider3);
+    assertFalse(keyProvider1 == keyProvider3, "Same KeyProviders returned !!");
 
 
     conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH,
     conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH,
         "dummy://hello:there@test_provider1");
         "dummy://hello:there@test_provider1");
     KeyProvider keyProvider4 = kpCache.get(conf,
     KeyProvider keyProvider4 = kpCache.get(conf,
         getKeyProviderUriFromConf(conf));
         getKeyProviderUriFromConf(conf));
 
 
-    Assert.assertFalse("Same KeyProviders returned !!",
-        keyProvider1 == keyProvider4);
+    assertFalse(keyProvider1 == keyProvider4, "Same KeyProviders returned !!");
 
 
     kpCache.invalidateCache();
     kpCache.invalidateCache();
-    Assert.assertEquals("Expected number of closing calls doesn't match",
-        3, DummyKeyProvider.CLOSE_CALL_COUNT);
+    assertEquals(3, DummyKeyProvider.CLOSE_CALL_COUNT,
+        "Expected number of closing calls doesn't match");
   }
   }
 
 
   private URI getKeyProviderUriFromConf(Configuration conf) {
   private URI getKeyProviderUriFromConf(Configuration conf) {

+ 12 - 12
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLargeBlock.java

@@ -17,7 +17,7 @@
  */
  */
 package org.apache.hadoop.hdfs;
 package org.apache.hadoop.hdfs;
 
 
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 
 import java.io.IOException;
 import java.io.IOException;
 import java.util.Arrays;
 import java.util.Arrays;
@@ -30,7 +30,8 @@ import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
 
 
 /**
 /**
  * This class tests that blocks can be larger than 2GB
  * This class tests that blocks can be larger than 2GB
@@ -135,13 +136,13 @@ public class TestLargeBlock {
       if (verifyData) {
       if (verifyData) {
         // verify data read
         // verify data read
         if (thisread == readSize) {
         if (thisread == readSize) {
-          assertTrue("file is corrupted at or after byte " +
-              (fileSize - bytesToRead), Arrays.equals(b, compb));
+          assertTrue(Arrays.equals(b, compb),
+              "file is corrupted at or after byte " + (fileSize - bytesToRead));
         } else {
         } else {
           // b was only partially filled by last read
           // b was only partially filled by last read
           for (int k = 0; k < thisread; k++) {
           for (int k = 0; k < thisread; k++) {
-            assertTrue("file is corrupted at or after byte " +
-                (fileSize - bytesToRead), b[k] == compb[k]);
+            assertTrue(b[k] == compb[k],
+                "file is corrupted at or after byte " + (fileSize - bytesToRead));
           }
           }
         }
         }
       }
       }
@@ -160,7 +161,8 @@ public class TestLargeBlock {
    * timeout here.
    * timeout here.
    * @throws IOException in case of errors
    * @throws IOException in case of errors
    */
    */
-  @Test (timeout = 1800000)
+  @Test
+  @Timeout(value = 1800)
   public void testLargeBlockSize() throws IOException {
   public void testLargeBlockSize() throws IOException {
     final long blockSize = 2L * 1024L * 1024L * 1024L + 512L; // 2GB + 512B
     final long blockSize = 2L * 1024L * 1024L * 1024L + 512L; // 2GB + 512B
     runTest(blockSize);
     runTest(blockSize);
@@ -190,8 +192,7 @@ public class TestLargeBlock {
           " blocksize " + blockSize);
           " blocksize " + blockSize);
 
 
       // verify that file exists in FS namespace
       // verify that file exists in FS namespace
-      assertTrue(file1 + " should be a file", 
-                  fs.getFileStatus(file1).isFile());
+      assertTrue(fs.getFileStatus(file1).isFile(), file1 + " should be a file");
 
 
       // write to file
       // write to file
       writeFile(stm, fileSize);
       writeFile(stm, fileSize);
@@ -206,9 +207,8 @@ public class TestLargeBlock {
 
 
       // verify that file size has changed
       // verify that file size has changed
       long len = fs.getFileStatus(file1).getLen();
       long len = fs.getFileStatus(file1).getLen();
-      assertTrue(file1 + " should be of size " +  fileSize +
-                 " but found to be of size " + len, 
-                  len == fileSize);
+      assertTrue(len == fileSize,
+          file1 + " should be of size " + fileSize + " but found to be of size " + len);
 
 
     } finally {
     } finally {
       cluster.shutdown();
       cluster.shutdown();

+ 62 - 59
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java

@@ -17,6 +17,10 @@
  */
  */
 package org.apache.hadoop.hdfs;
 package org.apache.hadoop.hdfs;
 
 
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
 import static org.mockito.ArgumentMatchers.anyBoolean;
 import static org.mockito.ArgumentMatchers.anyBoolean;
 import static org.mockito.ArgumentMatchers.anyString;
 import static org.mockito.ArgumentMatchers.anyString;
 import static org.mockito.ArgumentMatchers.anyShort;
 import static org.mockito.ArgumentMatchers.anyShort;
@@ -50,8 +54,7 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.Time;
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 import org.mockito.Mockito;
 import org.mockito.Mockito;
 
 
 public class TestLease {
 public class TestLease {
@@ -109,7 +112,7 @@ public class TestLease {
         d_out.write(buf, 0, 1024);
         d_out.write(buf, 0, 1024);
         LOG.info("Write worked beyond the soft limit as expected.");
         LOG.info("Write worked beyond the soft limit as expected.");
       } catch (IOException e) {
       } catch (IOException e) {
-        Assert.fail("Write failed.");
+        fail("Write failed.");
       }
       }
 
 
       long hardlimit = conf.getLong(DFSConfigKeys.DFS_LEASE_HARDLIMIT_KEY,
       long hardlimit = conf.getLong(DFSConfigKeys.DFS_LEASE_HARDLIMIT_KEY,
@@ -122,14 +125,14 @@ public class TestLease {
       try {
       try {
         d_out.write(buf, 0, 1024);
         d_out.write(buf, 0, 1024);
         d_out.close();
         d_out.close();
-        Assert.fail("Write did not fail even after the fatal lease renewal failure");
+        fail("Write did not fail even after the fatal lease renewal failure");
       } catch (IOException e) {
       } catch (IOException e) {
         LOG.info("Write failed as expected. ", e);
         LOG.info("Write failed as expected. ", e);
       }
       }
 
 
       // If aborted, the renewer should be empty. (no reference to clients)
       // If aborted, the renewer should be empty. (no reference to clients)
       Thread.sleep(1000);
       Thread.sleep(1000);
-      Assert.assertTrue(originalRenewer.isEmpty());
+      assertTrue(originalRenewer.isEmpty());
 
 
       // unstub
       // unstub
       doNothing().when(spyNN).renewLease(anyString(), any());
       doNothing().when(spyNN).renewLease(anyString(), any());
@@ -138,12 +141,12 @@ public class TestLease {
       try {
       try {
         int num = c_in.read(buf, 0, 1);
         int num = c_in.read(buf, 0, 1);
         if (num != 1) {
         if (num != 1) {
-          Assert.fail("Failed to read 1 byte");
+          fail("Failed to read 1 byte");
         }
         }
         c_in.close();
         c_in.close();
       } catch (IOException e) {
       } catch (IOException e) {
-         LOG.error("Read failed with ", e);
-         Assert.fail("Read after lease renewal failure failed");
+        LOG.error("Read failed with ", e);
+        fail("Read after lease renewal failure failed");
       }
       }
 
 
       // new file writes should work.
       // new file writes should work.
@@ -153,7 +156,7 @@ public class TestLease {
         c_out.close();
         c_out.close();
       } catch (IOException e) {
       } catch (IOException e) {
         LOG.error("Write failed with ", e);
         LOG.error("Write failed with ", e);
-        Assert.fail("Write failed");
+        fail("Write failed");
       }
       }
     } finally {
     } finally {
       cluster.shutdown();
       cluster.shutdown();
@@ -173,8 +176,8 @@ public class TestLease {
       FSDataOutputStream out = fs.create(p);
       FSDataOutputStream out = fs.create(p);
       out.writeBytes("something");
       out.writeBytes("something");
       //out.hsync();
       //out.hsync();
-      Assert.assertTrue(hasLease(cluster, p));
-      Assert.assertEquals(1, leaseCount(cluster));
+      assertTrue(hasLease(cluster, p));
+      assertEquals(1, leaseCount(cluster));
       
       
       // just to ensure first fs doesn't have any logic to twiddle leases
       // just to ensure first fs doesn't have any logic to twiddle leases
       DistributedFileSystem fs2 = (DistributedFileSystem) FileSystem.newInstance(fs.getUri(), fs.getConf());
       DistributedFileSystem fs2 = (DistributedFileSystem) FileSystem.newInstance(fs.getUri(), fs.getConf());
@@ -184,24 +187,24 @@ public class TestLease {
       Path pRenamed = new Path(d, p.getName());
       Path pRenamed = new Path(d, p.getName());
       fs2.mkdirs(d);
       fs2.mkdirs(d);
       fs2.rename(p, pRenamed);
       fs2.rename(p, pRenamed);
-      Assert.assertFalse(p+" exists", fs2.exists(p));
-      Assert.assertTrue(pRenamed+" not found", fs2.exists(pRenamed));
-      Assert.assertFalse("has lease for "+p, hasLease(cluster, p));
-      Assert.assertTrue("no lease for "+pRenamed, hasLease(cluster, pRenamed));
-      Assert.assertEquals(1, leaseCount(cluster));
+      assertFalse(fs2.exists(p), p + " exists");
+      assertTrue(fs2.exists(pRenamed), pRenamed + " not found");
+      assertFalse(hasLease(cluster, p), "has lease for " + p);
+      assertTrue(hasLease(cluster, pRenamed), "no lease for " + pRenamed);
+      assertEquals(1, leaseCount(cluster));
     
     
       // rename the parent dir to a new non-existent dir
       // rename the parent dir to a new non-existent dir
       LOG.info("DMS: rename parent dir");
       LOG.info("DMS: rename parent dir");
       Path pRenamedAgain = new Path(d2, pRenamed.getName());
       Path pRenamedAgain = new Path(d2, pRenamed.getName());
       fs2.rename(d, d2);
       fs2.rename(d, d2);
       // src gone
       // src gone
-      Assert.assertFalse(d+" exists", fs2.exists(d));
-      Assert.assertFalse("has lease for "+pRenamed, hasLease(cluster, pRenamed));
+      assertFalse(fs2.exists(d), d + " exists");
+      assertFalse(hasLease(cluster, pRenamed), "has lease for " + pRenamed);
       // dst checks
       // dst checks
-      Assert.assertTrue(d2+" not found", fs2.exists(d2));
-      Assert.assertTrue(pRenamedAgain+" not found", fs2.exists(pRenamedAgain));
-      Assert.assertTrue("no lease for "+pRenamedAgain, hasLease(cluster, pRenamedAgain));
-      Assert.assertEquals(1, leaseCount(cluster));
+      assertTrue(fs2.exists(d2), d2 + " not found");
+      assertTrue(fs2.exists(pRenamedAgain), pRenamedAgain + " not found");
+      assertTrue(hasLease(cluster, pRenamedAgain), "no lease for " + pRenamedAgain);
+      assertEquals(1, leaseCount(cluster));
 
 
       // rename the parent dir to existing dir
       // rename the parent dir to existing dir
       // NOTE: rename w/o options moves paths into existing dir
       // NOTE: rename w/o options moves paths into existing dir
@@ -211,26 +214,26 @@ public class TestLease {
       fs2.mkdirs(d);
       fs2.mkdirs(d);
       fs2.rename(d2, d);
       fs2.rename(d2, d);
       // src gone
       // src gone
-      Assert.assertFalse(d2+" exists", fs2.exists(d2));
-      Assert.assertFalse("no lease for "+pRenamed, hasLease(cluster, pRenamed));
+      assertFalse(fs2.exists(d2), d2 + " exists");
+      assertFalse(hasLease(cluster, pRenamed), "no lease for " + pRenamed);
       // dst checks
       // dst checks
-      Assert.assertTrue(d+" not found", fs2.exists(d));
-      Assert.assertTrue(pRenamedAgain +" not found", fs2.exists(pRenamedAgain));
-      Assert.assertTrue("no lease for "+pRenamedAgain, hasLease(cluster, pRenamedAgain));
-      Assert.assertEquals(1, leaseCount(cluster));
+      assertTrue(fs2.exists(d), d + " not found");
+      assertTrue(fs2.exists(pRenamedAgain), pRenamedAgain + " not found");
+      assertTrue(hasLease(cluster, pRenamedAgain), "no lease for " + pRenamedAgain);
+      assertEquals(1, leaseCount(cluster));
       
       
       // rename with opts to non-existent dir
       // rename with opts to non-existent dir
       pRenamed = pRenamedAgain;
       pRenamed = pRenamedAgain;
       pRenamedAgain = new Path(d2, p.getName());
       pRenamedAgain = new Path(d2, p.getName());
       fs2.rename(pRenamed.getParent(), d2, Options.Rename.OVERWRITE);
       fs2.rename(pRenamed.getParent(), d2, Options.Rename.OVERWRITE);
       // src gone
       // src gone
-      Assert.assertFalse(pRenamed.getParent() +" not found", fs2.exists(pRenamed.getParent()));
-      Assert.assertFalse("has lease for "+pRenamed, hasLease(cluster, pRenamed));
+      assertFalse(fs2.exists(pRenamed.getParent()), pRenamed.getParent() + " not found");
+      assertFalse(hasLease(cluster, pRenamed), "has lease for " + pRenamed);
       // dst checks
       // dst checks
-      Assert.assertTrue(d2+" not found", fs2.exists(d2));
-      Assert.assertTrue(pRenamedAgain+" not found", fs2.exists(pRenamedAgain));
-      Assert.assertTrue("no lease for "+pRenamedAgain, hasLease(cluster, pRenamedAgain));
-      Assert.assertEquals(1, leaseCount(cluster));
+      assertTrue(fs2.exists(d2), d2 + " not found");
+      assertTrue(fs2.exists(pRenamedAgain), pRenamedAgain + " not found");
+      assertTrue(hasLease(cluster, pRenamedAgain), "no lease for " + pRenamedAgain);
+      assertEquals(1, leaseCount(cluster));
 
 
       // rename with opts to existing dir
       // rename with opts to existing dir
       // NOTE: rename with options will not move paths into the existing dir
       // NOTE: rename with options will not move paths into the existing dir
@@ -238,13 +241,13 @@ public class TestLease {
       pRenamedAgain = new Path(d, p.getName());
       pRenamedAgain = new Path(d, p.getName());
       fs2.rename(pRenamed.getParent(), d, Options.Rename.OVERWRITE);
       fs2.rename(pRenamed.getParent(), d, Options.Rename.OVERWRITE);
       // src gone
       // src gone
-      Assert.assertFalse(pRenamed.getParent() +" not found", fs2.exists(pRenamed.getParent()));
-      Assert.assertFalse("has lease for "+pRenamed, hasLease(cluster, pRenamed));
+      assertFalse(fs2.exists(pRenamed.getParent()), pRenamed.getParent() + " not found");
+      assertFalse(hasLease(cluster, pRenamed), "has lease for " + pRenamed);
       // dst checks
       // dst checks
-      Assert.assertTrue(d+" not found", fs2.exists(d));
-      Assert.assertTrue(pRenamedAgain+" not found", fs2.exists(pRenamedAgain));
-      Assert.assertTrue("no lease for "+pRenamedAgain, hasLease(cluster, pRenamedAgain));
-      Assert.assertEquals(1, leaseCount(cluster));
+      assertTrue(fs2.exists(d), d + " not found");
+      assertTrue(fs2.exists(pRenamedAgain), pRenamedAgain + " not found");
+      assertTrue(hasLease(cluster, pRenamedAgain), "no lease for " + pRenamedAgain);
+      assertEquals(1, leaseCount(cluster));
       out.close();
       out.close();
     } finally {
     } finally {
       cluster.shutdown();
       cluster.shutdown();
@@ -270,8 +273,8 @@ public class TestLease {
       FileSystem fs = cluster.getFileSystem();
       FileSystem fs = cluster.getFileSystem();
       FSDataOutputStream out1 = fs.create(path1);
       FSDataOutputStream out1 = fs.create(path1);
       out1.writeBytes(contents1);
       out1.writeBytes(contents1);
-      Assert.assertTrue(hasLease(cluster, path1));
-      Assert.assertEquals(1, leaseCount(cluster));
+      assertTrue(hasLease(cluster, path1));
+      assertEquals(1, leaseCount(cluster));
 
 
       DistributedFileSystem fs2 = (DistributedFileSystem)
       DistributedFileSystem fs2 = (DistributedFileSystem)
           FileSystem.newInstance(fs.getUri(), fs.getConf());
           FileSystem.newInstance(fs.getUri(), fs.getConf());
@@ -282,14 +285,14 @@ public class TestLease {
       out2.close();
       out2.close();
 
 
       // The first file should still be open and valid
       // The first file should still be open and valid
-      Assert.assertTrue(hasLease(cluster, path2));
+      assertTrue(hasLease(cluster, path2));
       out1.close();
       out1.close();
 
 
       // Contents should be as expected
       // Contents should be as expected
       DistributedFileSystem fs3 = (DistributedFileSystem)
       DistributedFileSystem fs3 = (DistributedFileSystem)
           FileSystem.newInstance(fs.getUri(), fs.getConf());
           FileSystem.newInstance(fs.getUri(), fs.getConf());
-      Assert.assertEquals(contents1, DFSTestUtil.readFile(fs3, path2));
-      Assert.assertEquals(contents2, DFSTestUtil.readFile(fs3, path1));
+      assertEquals(contents1, DFSTestUtil.readFile(fs3, path2));
+      assertEquals(contents2, DFSTestUtil.readFile(fs3, path1));
     } finally {
     } finally {
       cluster.shutdown();
       cluster.shutdown();
     }
     }
@@ -300,7 +303,7 @@ public class TestLease {
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
     try {
     try {
       FileSystem fs = cluster.getFileSystem();
       FileSystem fs = cluster.getFileSystem();
-      Assert.assertTrue(fs.mkdirs(dir));
+      assertTrue(fs.mkdirs(dir));
       
       
       Path a = new Path(dir, "a");
       Path a = new Path(dir, "a");
       Path b = new Path(dir, "b");
       Path b = new Path(dir, "b");
@@ -308,30 +311,30 @@ public class TestLease {
       DataOutputStream a_out = fs.create(a);
       DataOutputStream a_out = fs.create(a);
       a_out.writeBytes("something");
       a_out.writeBytes("something");
 
 
-      Assert.assertTrue(hasLease(cluster, a));
-      Assert.assertTrue(!hasLease(cluster, b));
+      assertTrue(hasLease(cluster, a));
+      assertTrue(!hasLease(cluster, b));
       
       
       DataOutputStream b_out = fs.create(b);
       DataOutputStream b_out = fs.create(b);
       b_out.writeBytes("something");
       b_out.writeBytes("something");
 
 
-      Assert.assertTrue(hasLease(cluster, a));
-      Assert.assertTrue(hasLease(cluster, b));
+      assertTrue(hasLease(cluster, a));
+      assertTrue(hasLease(cluster, b));
 
 
       a_out.close();
       a_out.close();
       b_out.close();
       b_out.close();
 
 
-      Assert.assertTrue(!hasLease(cluster, a));
-      Assert.assertTrue(!hasLease(cluster, b));
+      assertTrue(!hasLease(cluster, a));
+      assertTrue(!hasLease(cluster, b));
 
 
       Path fileA = new Path(dir, "fileA");
       Path fileA = new Path(dir, "fileA");
       FSDataOutputStream fileA_out = fs.create(fileA);
       FSDataOutputStream fileA_out = fs.create(fileA);
       fileA_out.writeBytes("something");
       fileA_out.writeBytes("something");
-      Assert.assertTrue("Failed to get the lease!", hasLease(cluster, fileA));
+      assertTrue(hasLease(cluster, fileA), "Failed to get the lease!");
 
 
       fs.delete(dir, true);
       fs.delete(dir, true);
       try {
       try {
         fileA_out.hflush();
         fileA_out.hflush();
-        Assert.fail("Should validate file existence!");
+        fail("Should validate file existence!");
       } catch (FileNotFoundException e) {
       } catch (FileNotFoundException e) {
         // expected
         // expected
         GenericTestUtils.assertExceptionContains("File does not exist", e);
         GenericTestUtils.assertExceptionContains("File does not exist", e);
@@ -381,17 +384,17 @@ public class TestLease {
     FSDataOutputStream out1 = createFsOut(c1, "/out1");
     FSDataOutputStream out1 = createFsOut(c1, "/out1");
     final DFSClient c2 = createDFSClientAs(ugi[0], conf);
     final DFSClient c2 = createDFSClientAs(ugi[0], conf);
     FSDataOutputStream out2 = createFsOut(c2, "/out2");
     FSDataOutputStream out2 = createFsOut(c2, "/out2");
-    Assert.assertEquals(c1.getLeaseRenewer(), c2.getLeaseRenewer());
+    assertEquals(c1.getLeaseRenewer(), c2.getLeaseRenewer());
     final DFSClient c3 = createDFSClientAs(ugi[1], conf);
     final DFSClient c3 = createDFSClientAs(ugi[1], conf);
     FSDataOutputStream out3 = createFsOut(c3, "/out3");
     FSDataOutputStream out3 = createFsOut(c3, "/out3");
-    Assert.assertTrue(c1.getLeaseRenewer() != c3.getLeaseRenewer());
+    assertTrue(c1.getLeaseRenewer() != c3.getLeaseRenewer());
     final DFSClient c4 = createDFSClientAs(ugi[1], conf);
     final DFSClient c4 = createDFSClientAs(ugi[1], conf);
     FSDataOutputStream out4 = createFsOut(c4, "/out4");
     FSDataOutputStream out4 = createFsOut(c4, "/out4");
-    Assert.assertEquals(c3.getLeaseRenewer(), c4.getLeaseRenewer());
+    assertEquals(c3.getLeaseRenewer(), c4.getLeaseRenewer());
     final DFSClient c5 = createDFSClientAs(ugi[2], conf);
     final DFSClient c5 = createDFSClientAs(ugi[2], conf);
     FSDataOutputStream out5 = createFsOut(c5, "/out5");
     FSDataOutputStream out5 = createFsOut(c5, "/out5");
-    Assert.assertTrue(c1.getLeaseRenewer() != c5.getLeaseRenewer());
-    Assert.assertTrue(c3.getLeaseRenewer() != c5.getLeaseRenewer());
+    assertTrue(c1.getLeaseRenewer() != c5.getLeaseRenewer());
+    assertTrue(c3.getLeaseRenewer() != c5.getLeaseRenewer());
   }
   }
   
   
   private FSDataOutputStream createFsOut(DFSClient dfs, String path) 
   private FSDataOutputStream createFsOut(DFSClient dfs, String path) 

+ 12 - 11
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery.java

@@ -16,11 +16,12 @@
  * limitations under the License.
  * limitations under the License.
  */
  */
 package org.apache.hadoop.hdfs;
 package org.apache.hadoop.hdfs;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
 
 
 import java.io.IOException;
 import java.io.IOException;
 import java.util.EnumSet;
 import java.util.EnumSet;
@@ -57,8 +58,8 @@ import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.DataChecksum;
-import org.junit.After;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Test;
 
 
 public class TestLeaseRecovery {
 public class TestLeaseRecovery {
   static final int BLOCK_SIZE = 1024;
   static final int BLOCK_SIZE = 1024;
@@ -67,7 +68,7 @@ public class TestLeaseRecovery {
 
 
   private MiniDFSCluster cluster;
   private MiniDFSCluster cluster;
 
 
-  @After
+  @AfterEach
   public void shutdown() throws IOException {
   public void shutdown() throws IOException {
     if (cluster != null) {
     if (cluster != null) {
       cluster.shutdown();
       cluster.shutdown();
@@ -172,7 +173,7 @@ public class TestLeaseRecovery {
     waitLeaseRecovery(cluster);
     waitLeaseRecovery(cluster);
     // verify that we still cannot recover the lease
     // verify that we still cannot recover the lease
     LeaseManager lm = NameNodeAdapter.getLeaseManager(cluster.getNamesystem());
     LeaseManager lm = NameNodeAdapter.getLeaseManager(cluster.getNamesystem());
-    assertTrue("Found " + lm.countLease() + " lease, expected 1", lm.countLease() == 1);
+    assertTrue(lm.countLease() == 1, "Found " + lm.countLease() + " lease, expected 1");
     cluster.getNameNodeRpc().setSafeMode(
     cluster.getNameNodeRpc().setSafeMode(
         HdfsConstants.SafeModeAction.SAFEMODE_LEAVE, false);
         HdfsConstants.SafeModeAction.SAFEMODE_LEAVE, false);
   }
   }
@@ -232,7 +233,7 @@ public class TestLeaseRecovery {
     while (++count < 10 && !newdfs.recoverLease(file)) {
     while (++count < 10 && !newdfs.recoverLease(file)) {
       Thread.sleep(1000);
       Thread.sleep(1000);
     }
     }
-    assertTrue("File should be closed", newdfs.recoverLease(file));
+    assertTrue(newdfs.recoverLease(file), "File should be closed");
 
 
     // Verify file length after lease recovery. The new file length should not
     // Verify file length after lease recovery. The new file length should not
     // include the bytes with corrupted checksum.
     // include the bytes with corrupted checksum.
@@ -282,7 +283,7 @@ public class TestLeaseRecovery {
       Thread.sleep(1000);
       Thread.sleep(1000);
     }
     }
     // The lease should have been recovered.
     // The lease should have been recovered.
-    assertTrue("File should be closed", newDfs.recoverLease(file));
+    assertTrue(newDfs.recoverLease(file), "File should be closed");
   }
   }
 
 
   /**
   /**

+ 30 - 29
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java

@@ -17,10 +17,10 @@
  */
  */
 package org.apache.hadoop.hdfs;
 package org.apache.hadoop.hdfs;
 
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
 import static org.mockito.Mockito.doNothing;
 import static org.mockito.Mockito.doNothing;
 import static org.mockito.Mockito.spy;
 import static org.mockito.Mockito.spy;
 
 
@@ -53,10 +53,10 @@ import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
 import org.mockito.Mockito;
 import org.mockito.Mockito;
 import org.slf4j.event.Level;
 import org.slf4j.event.Level;
 
 
@@ -92,7 +92,7 @@ public class TestLeaseRecovery2 {
    * 
    * 
    * @throws IOException
    * @throws IOException
    */
    */
-  @Before
+  @BeforeEach
   public void startUp() throws IOException {
   public void startUp() throws IOException {
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
     conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
     conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
@@ -109,7 +109,7 @@ public class TestLeaseRecovery2 {
    * stop the cluster
    * stop the cluster
    * @throws IOException
    * @throws IOException
    */
    */
-  @After
+  @AfterEach
   public void tearDown() throws IOException {
   public void tearDown() throws IOException {
     if (cluster != null) {
     if (cluster != null) {
       IOUtils.closeStream(dfs);
       IOUtils.closeStream(dfs);
@@ -225,8 +225,7 @@ public class TestLeaseRecovery2 {
     }
     }
 
 
     stm.close();
     stm.close();
-    assertEquals(cluster.getNamesystem().getBlockManager().
-        getMissingBlocksCount(), 0);
+    assertEquals(cluster.getNamesystem().getBlockManager().getMissingBlocksCount(), 0);
   }
   }
 
 
   @Test
   @Test
@@ -321,10 +320,10 @@ public class TestLeaseRecovery2 {
         + "Validating its contents now...");
         + "Validating its contents now...");
 
 
     // verify that file-size matches
     // verify that file-size matches
-    assertTrue("File should be " + size + " bytes, but is actually " +
-               " found to be " + dfs.getFileStatus(filepath).getLen() +
-               " bytes",
-               dfs.getFileStatus(filepath).getLen() == size);
+    assertTrue(dfs.getFileStatus(filepath).getLen() == size,
+        "File should be " + size + " bytes, but is actually " +
+            " found to be " + dfs.getFileStatus(filepath).getLen() +
+            " bytes");
 
 
     // verify that there is enough data to read.
     // verify that there is enough data to read.
     System.out.println("File size is good. Now validating sizes from datanodes...");
     System.out.println("File size is good. Now validating sizes from datanodes...");
@@ -471,8 +470,8 @@ public class TestLeaseRecovery2 {
 
 
     // verify that file-size matches
     // verify that file-size matches
     long fileSize = dfs.getFileStatus(filepath).getLen();
     long fileSize = dfs.getFileStatus(filepath).getLen();
-    assertTrue("File should be " + size + " bytes, but is actually " +
-        " found to be " + fileSize + " bytes", fileSize == size);
+    assertTrue(fileSize == size, "File should be " + size + " bytes, but is actually "
+        + " found to be " + fileSize + " bytes");
 
 
     // verify data
     // verify data
     AppendTestUtil.LOG.info("File size is good. " +
     AppendTestUtil.LOG.info("File size is good. " +
@@ -493,17 +492,20 @@ public class TestLeaseRecovery2 {
    * 
    * 
    * @throws Exception
    * @throws Exception
    */
    */
-  @Test(timeout = 60000)
+  @Test
+  @Timeout(value = 60)
   public void testHardLeaseRecoveryAfterNameNodeRestart() throws Exception {
   public void testHardLeaseRecoveryAfterNameNodeRestart() throws Exception {
     hardLeaseRecoveryRestartHelper(false, -1);
     hardLeaseRecoveryRestartHelper(false, -1);
   }
   }
 
 
-  @Test(timeout = 60000)
+  @Test
+  @Timeout(value = 60)
   public void testHardLeaseRecoveryAfterNameNodeRestart2() throws Exception {
   public void testHardLeaseRecoveryAfterNameNodeRestart2() throws Exception {
     hardLeaseRecoveryRestartHelper(false, 1535);
     hardLeaseRecoveryRestartHelper(false, 1535);
   }
   }
 
 
-  @Test(timeout = 60000)
+  @Test
+  @Timeout(value = 60)
   public void testHardLeaseRecoveryWithRenameAfterNameNodeRestart()
   public void testHardLeaseRecoveryWithRenameAfterNameNodeRestart()
       throws Exception {
       throws Exception {
     hardLeaseRecoveryRestartHelper(true, -1);
     hardLeaseRecoveryRestartHelper(true, -1);
@@ -530,9 +532,8 @@ public class TestLeaseRecovery2 {
     String originalLeaseHolder = NameNodeAdapter.getLeaseHolderForPath(
     String originalLeaseHolder = NameNodeAdapter.getLeaseHolderForPath(
         cluster.getNameNode(), fileStr);
         cluster.getNameNode(), fileStr);
     
     
-    assertFalse("original lease holder should not be the NN",
-        originalLeaseHolder.startsWith(
-        HdfsServerConstants.NAMENODE_LEASE_HOLDER));
+    assertFalse(originalLeaseHolder.startsWith(HdfsServerConstants.NAMENODE_LEASE_HOLDER),
+        "original lease holder should not be the NN");
 
 
     // hflush file
     // hflush file
     AppendTestUtil.LOG.info("hflush");
     AppendTestUtil.LOG.info("hflush");
@@ -540,7 +541,7 @@ public class TestLeaseRecovery2 {
     
     
     // check visible length
     // check visible length
     final HdfsDataInputStream in = (HdfsDataInputStream)dfs.open(filePath);
     final HdfsDataInputStream in = (HdfsDataInputStream)dfs.open(filePath);
-    Assert.assertEquals(size, in.getVisibleLength());
+    assertEquals(size, in.getVisibleLength());
     in.close();
     in.close();
     
     
     if (doRename) {
     if (doRename) {
@@ -628,12 +629,12 @@ public class TestLeaseRecovery2 {
   
   
   static void checkLease(String f, int size) {
   static void checkLease(String f, int size) {
     final String holder = NameNodeAdapter.getLeaseHolderForPath(
     final String holder = NameNodeAdapter.getLeaseHolderForPath(
-        cluster.getNameNode(), f); 
+        cluster.getNameNode(), f);
     if (size == 0) {
     if (size == 0) {
-      assertEquals("lease holder should null, file is closed", null, holder);
+      assertEquals(null, holder, "lease holder should null, file is closed");
     } else {
     } else {
-      assertTrue("lease holder should now be the NN",
-          holder.startsWith(HdfsServerConstants.NAMENODE_LEASE_HOLDER));
+      assertTrue(holder.startsWith(HdfsServerConstants.NAMENODE_LEASE_HOLDER),
+          "lease holder should now be the NN");
     }
     }
     
     
   }
   }

+ 12 - 10
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java

@@ -39,10 +39,9 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.Whitebox;
 import org.apache.hadoop.test.Whitebox;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.StringUtils;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.slf4j.event.Level;
 import org.slf4j.event.Level;
 import org.slf4j.Logger;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.LoggerFactory;
@@ -59,6 +58,9 @@ import java.util.Set;
 import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.TimeoutException;
 import java.util.concurrent.TimeoutException;
 
 
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.fail;
+
 public class TestLeaseRecoveryStriped {
 public class TestLeaseRecoveryStriped {
   public static final Logger LOG = LoggerFactory
   public static final Logger LOG = LoggerFactory
       .getLogger(TestLeaseRecoveryStriped.class);
       .getLogger(TestLeaseRecoveryStriped.class);
@@ -91,7 +93,7 @@ public class TestLeaseRecoveryStriped {
   final Path p = new Path(dir, "testfile");
   final Path p = new Path(dir, "testfile");
   private final int testFileLength = (stripesPerBlock - 1) * stripeSize;
   private final int testFileLength = (stripesPerBlock - 1) * stripeSize;
 
 
-  @Before
+  @BeforeEach
   public void setup() throws IOException {
   public void setup() throws IOException {
     conf = new HdfsConfiguration();
     conf = new HdfsConfiguration();
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
@@ -107,7 +109,7 @@ public class TestLeaseRecoveryStriped {
     dfs.setErasureCodingPolicy(dir, ecPolicy.getName());
     dfs.setErasureCodingPolicy(dir, ecPolicy.getName());
   }
   }
 
 
-  @After
+  @AfterEach
   public void tearDown() {
   public void tearDown() {
     if (cluster != null) {
     if (cluster != null) {
       cluster.shutdown();
       cluster.shutdown();
@@ -187,7 +189,7 @@ public class TestLeaseRecoveryStriped {
         String msg = "failed testCase at i=" + i + ", blockLengths="
         String msg = "failed testCase at i=" + i + ", blockLengths="
             + blockLengths + "\n"
             + blockLengths + "\n"
             + StringUtils.stringifyException(e);
             + StringUtils.stringifyException(e);
-        Assert.fail(msg);
+        fail(msg);
       }
       }
     }
     }
   }
   }
@@ -243,7 +245,7 @@ public class TestLeaseRecoveryStriped {
         String msg = "failed testCase at i=" + i + ", blockLengths="
         String msg = "failed testCase at i=" + i + ", blockLengths="
             + blockLengths + "\n"
             + blockLengths + "\n"
             + StringUtils.stringifyException(e);
             + StringUtils.stringifyException(e);
-        Assert.fail(msg);
+        fail(msg);
       }
       }
     }
     }
   }
   }
@@ -284,7 +286,7 @@ public class TestLeaseRecoveryStriped {
       LOG.info("Trigger recover lease manually successfully.");
       LOG.info("Trigger recover lease manually successfully.");
     } catch (Throwable e) {
     } catch (Throwable e) {
       String msg = "failed testCase" + StringUtils.stringifyException(e);
       String msg = "failed testCase" + StringUtils.stringifyException(e);
-      Assert.fail(msg);
+      fail(msg);
     }
     }
   }
   }
 
 
@@ -292,7 +294,7 @@ public class TestLeaseRecoveryStriped {
     int[] blockLengths = new int[]{blockLength, blockLength, blockLength, blockLength,
     int[] blockLengths = new int[]{blockLength, blockLength, blockLength, blockLength,
         blockLength, blockLength};
         blockLength, blockLength};
     long safeLength = new BlockLengths(ecPolicy, blockLengths).getSafeLength();
     long safeLength = new BlockLengths(ecPolicy, blockLengths).getSafeLength();
-    Assert.assertEquals(expectedSafeLength, safeLength);
+    assertEquals(expectedSafeLength, safeLength);
   }
   }
 
 
   private void runTest(int[] blockLengths, long safeLength) throws Exception {
   private void runTest(int[] blockLengths, long safeLength) throws Exception {

+ 10 - 10
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestListFilesInFileContext.java

@@ -17,9 +17,9 @@
  */
  */
 package org.apache.hadoop.hdfs;
 package org.apache.hadoop.hdfs;
 
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 
 import java.io.IOException;
 import java.io.IOException;
 import java.util.EnumSet;
 import java.util.EnumSet;
@@ -34,10 +34,10 @@ import org.apache.hadoop.fs.Options;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 
 
 /**
 /**
  * This class tests the FileStatus API.
  * This class tests the FileStatus API.
@@ -56,7 +56,7 @@ public class TestListFilesInFileContext {
   final private static Path FILE2 = new Path(DIR1, "file2");
   final private static Path FILE2 = new Path(DIR1, "file2");
   final private static Path FILE3 = new Path(DIR1, "file3");
   final private static Path FILE3 = new Path(DIR1, "file3");
 
 
-  @BeforeClass
+  @BeforeAll
   public static void testSetUp() throws Exception {
   public static void testSetUp() throws Exception {
     cluster = new MiniDFSCluster.Builder(conf).build();
     cluster = new MiniDFSCluster.Builder(conf).build();
     fc = FileContext.getFileContext(cluster.getConfiguration(0));
     fc = FileContext.getFileContext(cluster.getConfiguration(0));
@@ -75,7 +75,7 @@ public class TestListFilesInFileContext {
     stm.close();
     stm.close();
   }
   }
   
   
-  @AfterClass
+  @AfterAll
   public static void testShutdown() throws Exception {
   public static void testShutdown() throws Exception {
     if (cluster != null) {
     if (cluster != null) {
       cluster.shutdown();
       cluster.shutdown();
@@ -106,7 +106,7 @@ public class TestListFilesInFileContext {
     assertEquals(1, stat.getBlockLocations().length);
     assertEquals(1, stat.getBlockLocations().length);
   }
   }
 
 
-  @After
+  @AfterEach
   public void cleanDir() throws IOException {
   public void cleanDir() throws IOException {
     fc.delete(TEST_DIR, true);
     fc.delete(TEST_DIR, true);
   }
   }

+ 8 - 5
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLocalDFS.java

@@ -17,8 +17,8 @@
  */
  */
 package org.apache.hadoop.hdfs;
 package org.apache.hadoop.hdfs;
 
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 
 import java.io.DataInputStream;
 import java.io.DataInputStream;
 import java.io.DataOutputStream;
 import java.io.DataOutputStream;
@@ -28,7 +28,8 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
 
 
 /**
 /**
  * This class tests the DFS class via the FileSystem interface in a single node
  * This class tests the DFS class via the FileSystem interface in a single node
@@ -66,7 +67,8 @@ public class TestLocalDFS {
   /**
   /**
    * Tests get/set working directory in DFS.
    * Tests get/set working directory in DFS.
    */
    */
-  @Test(timeout=20000)
+  @Test
+  @Timeout(value = 20)
   public void testWorkingDirectory() throws IOException {
   public void testWorkingDirectory() throws IOException {
     Configuration conf = new HdfsConfiguration();
     Configuration conf = new HdfsConfiguration();
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
@@ -106,7 +108,8 @@ public class TestLocalDFS {
   /**
   /**
    * Tests get/set working directory in DFS.
    * Tests get/set working directory in DFS.
    */
    */
-  @Test(timeout=30000)
+  @Test
+  @Timeout(value = 30)
   public void testHomeDirectory() throws IOException {
   public void testHomeDirectory() throws IOException {
     final String[] homeBases = new String[] {"/home", "/home/user"};
     final String[] homeBases = new String[] {"/home", "/home/user"};
     Configuration conf = new HdfsConfiguration();
     Configuration conf = new HdfsConfiguration();

+ 14 - 15
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLocatedBlocksRefresher.java

@@ -19,14 +19,14 @@ package org.apache.hadoop.hdfs;
 
 
 import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_BLOCK_SIZE_KEY;
 import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_BLOCK_SIZE_KEY;
 import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_REFRESH_READ_BLOCK_LOCATIONS_MS_KEY;
 import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_REFRESH_READ_BLOCK_LOCATIONS_MS_KEY;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNotSame;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertSame;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertNotSame;
+import static org.junit.jupiter.api.Assertions.assertNull;
+import static org.junit.jupiter.api.Assertions.assertSame;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
 
 
 import java.io.IOException;
 import java.io.IOException;
 import java.util.List;
 import java.util.List;
@@ -42,9 +42,9 @@ import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.Time;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.slf4j.Logger;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.LoggerFactory;
 
 
@@ -64,7 +64,7 @@ public class TestLocatedBlocksRefresher {
   private MiniDFSCluster cluster;
   private MiniDFSCluster cluster;
   private Configuration conf;
   private Configuration conf;
 
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
   public void setUp() throws Exception {
     cluster = null;
     cluster = null;
     conf = new HdfsConfiguration();
     conf = new HdfsConfiguration();
@@ -79,7 +79,7 @@ public class TestLocatedBlocksRefresher {
         dfsClientPrefetchSize);
         dfsClientPrefetchSize);
   }
   }
 
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
   public void tearDown() throws Exception {
     if (cluster != null) {
     if (cluster != null) {
       cluster.shutdown(true, true);
       cluster.shutdown(true, true);
@@ -122,8 +122,7 @@ public class TestLocatedBlocksRefresher {
 
 
     try (DFSInputStream fin = client.open(fileName)) {
     try (DFSInputStream fin = client.open(fileName)) {
       LocatedBlocks locatedBlocks = fin.locatedBlocks;
       LocatedBlocks locatedBlocks = fin.locatedBlocks;
-      assertEquals(dfsClientPrefetchSize / BLOCK_SIZE,
-          locatedBlocks.locatedBlockCount());
+      assertEquals(dfsClientPrefetchSize / BLOCK_SIZE, locatedBlocks.locatedBlockCount());
 
 
       // should not be tracked yet
       // should not be tracked yet
       assertFalse(refresher.isInputStreamTracked(fin));
       assertFalse(refresher.isInputStreamTracked(fin));

+ 36 - 46
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMaintenanceState.java

@@ -17,14 +17,10 @@
  */
  */
 package org.apache.hadoop.hdfs;
 package org.apache.hadoop.hdfs;
 
 
-import static org.hamcrest.CoreMatchers.allOf;
-import static org.hamcrest.CoreMatchers.containsString;
-import static org.hamcrest.CoreMatchers.is;
-import static org.hamcrest.CoreMatchers.not;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertThat;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
+import static org.assertj.core.api.Assertions.assertThat;
 
 
 import java.io.ByteArrayOutputStream;
 import java.io.ByteArrayOutputStream;
 import java.io.IOException;
 import java.io.IOException;
@@ -56,8 +52,7 @@ import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Lists;
 import org.apache.hadoop.util.Lists;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.ToolRunner;
 import org.apache.hadoop.util.ToolRunner;
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 import org.slf4j.Logger;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.LoggerFactory;
 
 
@@ -344,16 +339,16 @@ public class TestMaintenanceState extends AdminStatesBaseTest {
     DFSTestUtil.waitForDatanodeState(
     DFSTestUtil.waitForDatanodeState(
         getCluster(), nodeOutofService.getDatanodeUuid(), false, 20000);
         getCluster(), nodeOutofService.getDatanodeUuid(), false, 20000);
     DFSClient client = getDfsClient(0);
     DFSClient client = getDfsClient(0);
-    assertEquals("maintenance node shouldn't be live", numDatanodes - 1,
-        client.datanodeReport(DatanodeReportType.LIVE).length);
+    assertEquals(numDatanodes - 1, client.datanodeReport(DatanodeReportType.LIVE).length,
+        "maintenance node shouldn't be live");
     assertEquals(1, ns.getNumEnteringMaintenanceDataNodes());
     assertEquals(1, ns.getNumEnteringMaintenanceDataNodes());
 
 
     getCluster().restartDataNode(dnProp, true);
     getCluster().restartDataNode(dnProp, true);
     getCluster().waitActive();
     getCluster().waitActive();
     waitNodeState(nodeOutofService, AdminStates.ENTERING_MAINTENANCE);
     waitNodeState(nodeOutofService, AdminStates.ENTERING_MAINTENANCE);
     assertEquals(1, ns.getNumEnteringMaintenanceDataNodes());
     assertEquals(1, ns.getNumEnteringMaintenanceDataNodes());
-    assertEquals("maintenance node should be live", numDatanodes,
-        client.datanodeReport(DatanodeReportType.LIVE).length);
+    assertEquals(numDatanodes, client.datanodeReport(DatanodeReportType.LIVE).length,
+        "maintenance node should be live");
 
 
     cleanupFile(fileSys, file);
     cleanupFile(fileSys, file);
   }
   }
@@ -479,7 +474,7 @@ public class TestMaintenanceState extends AdminStatesBaseTest {
     int fileBlockReplication = maintenanceMinRepl + 1;
     int fileBlockReplication = maintenanceMinRepl + 1;
     int numAddedDataNodes = 1;
     int numAddedDataNodes = 1;
     int numInitialDataNodes = (maintenanceMinRepl * 2 - numAddedDataNodes);
     int numInitialDataNodes = (maintenanceMinRepl * 2 - numAddedDataNodes);
-    Assert.assertTrue(maintenanceMinRepl <= defaultReplication);
+    assertTrue(maintenanceMinRepl <= defaultReplication);
     testFileBlockReplicationImpl(maintenanceMinRepl,
     testFileBlockReplicationImpl(maintenanceMinRepl,
         numInitialDataNodes, numAddedDataNodes, fileBlockReplication);
         numInitialDataNodes, numAddedDataNodes, fileBlockReplication);
 
 
@@ -557,8 +552,8 @@ public class TestMaintenanceState extends AdminStatesBaseTest {
         AdminStates.IN_MAINTENANCE);
         AdminStates.IN_MAINTENANCE);
 
 
     DFSClient client = getDfsClient(0);
     DFSClient client = getDfsClient(0);
-    assertEquals("All datanodes must be alive", numDatanodes,
-        client.datanodeReport(DatanodeReportType.LIVE).length);
+    assertEquals(numDatanodes, client.datanodeReport(DatanodeReportType.LIVE).length,
+        "All datanodes must be alive");
 
 
     // test 1, verify the replica in IN_MAINTENANCE state isn't in LocatedBlock
     // test 1, verify the replica in IN_MAINTENANCE state isn't in LocatedBlock
     checkWithRetry(ns, fileSys, file, replicas - 1,
     checkWithRetry(ns, fileSys, file, replicas - 1,
@@ -784,14 +779,14 @@ public class TestMaintenanceState extends AdminStatesBaseTest {
         nodeOutofService);
         nodeOutofService);
 
 
     final DFSClient client = getDfsClient(0);
     final DFSClient client = getDfsClient(0);
-    assertEquals("All datanodes must be alive", numDatanodes,
-        client.datanodeReport(DatanodeReportType.LIVE).length);
+    assertEquals(numDatanodes, client.datanodeReport(DatanodeReportType.LIVE).length,
+        "All datanodes must be alive");
 
 
     getCluster().stopDataNode(nodeOutofService.getXferAddr());
     getCluster().stopDataNode(nodeOutofService.getXferAddr());
     DFSTestUtil.waitForDatanodeState(
     DFSTestUtil.waitForDatanodeState(
         getCluster(), nodeOutofService.getDatanodeUuid(), false, 20000);
         getCluster(), nodeOutofService.getDatanodeUuid(), false, 20000);
-    assertEquals("maintenance node shouldn't be alive", numDatanodes - 1,
-        client.datanodeReport(DatanodeReportType.LIVE).length);
+    assertEquals(numDatanodes - 1, client.datanodeReport(DatanodeReportType.LIVE).length,
+        "maintenance node shouldn't be alive");
 
 
     // Dead maintenance node's blocks should remain in block map.
     // Dead maintenance node's blocks should remain in block map.
     checkWithRetry(ns, fileSys, file, replicas - 1,
     checkWithRetry(ns, fileSys, file, replicas - 1,
@@ -840,15 +835,15 @@ public class TestMaintenanceState extends AdminStatesBaseTest {
         nodeOutofService);
         nodeOutofService);
 
 
     DFSClient client = getDfsClient(0);
     DFSClient client = getDfsClient(0);
-    assertEquals("All datanodes must be alive", numDatanodes,
-        client.datanodeReport(DatanodeReportType.LIVE).length);
+    assertEquals(numDatanodes, client.datanodeReport(DatanodeReportType.LIVE).length,
+        "All datanodes must be alive");
 
 
     MiniDFSCluster.DataNodeProperties dnProp =
     MiniDFSCluster.DataNodeProperties dnProp =
         getCluster().stopDataNode(nodeOutofService.getXferAddr());
         getCluster().stopDataNode(nodeOutofService.getXferAddr());
     DFSTestUtil.waitForDatanodeState(
     DFSTestUtil.waitForDatanodeState(
         getCluster(), nodeOutofService.getDatanodeUuid(), false, 20000);
         getCluster(), nodeOutofService.getDatanodeUuid(), false, 20000);
-    assertEquals("maintenance node shouldn't be alive", numDatanodes - 1,
-        client.datanodeReport(DatanodeReportType.LIVE).length);
+    assertEquals(numDatanodes - 1, client.datanodeReport(DatanodeReportType.LIVE).length,
+        "maintenance node shouldn't be alive");
 
 
     // Dead maintenance node's blocks should remain in block map.
     // Dead maintenance node's blocks should remain in block map.
     checkWithRetry(ns, fileSys, file, replicas - 1,
     checkWithRetry(ns, fileSys, file, replicas - 1,
@@ -1026,8 +1021,7 @@ public class TestMaintenanceState extends AdminStatesBaseTest {
       Path name, int repl, DatanodeInfo expectedExcludedNode,
       Path name, int repl, DatanodeInfo expectedExcludedNode,
       DatanodeInfo expectedMaintenanceNode) throws IOException {
       DatanodeInfo expectedMaintenanceNode) throws IOException {
     // need a raw stream
     // need a raw stream
-    assertTrue("Not HDFS:"+fileSys.getUri(),
-        fileSys instanceof DistributedFileSystem);
+    assertTrue(fileSys instanceof DistributedFileSystem, "Not HDFS:" + fileSys.getUri());
     HdfsDataInputStream dis = (HdfsDataInputStream)fileSys.open(name);
     HdfsDataInputStream dis = (HdfsDataInputStream)fileSys.open(name);
     BlockManager bm = ns.getBlockManager();
     BlockManager bm = ns.getBlockManager();
     Collection<LocatedBlock> dinfo = dis.getAllBlocks();
     Collection<LocatedBlock> dinfo = dis.getAllBlocks();
@@ -1127,8 +1121,7 @@ public class TestMaintenanceState extends AdminStatesBaseTest {
   static private DatanodeInfo[] getFirstBlockReplicasDatanodeInfos(
   static private DatanodeInfo[] getFirstBlockReplicasDatanodeInfos(
       FileSystem fileSys, Path name) throws IOException {
       FileSystem fileSys, Path name) throws IOException {
     // need a raw stream
     // need a raw stream
-    assertTrue("Not HDFS:"+fileSys.getUri(),
-        fileSys instanceof DistributedFileSystem);
+    assertTrue(fileSys instanceof DistributedFileSystem, "Not HDFS:" + fileSys.getUri());
     HdfsDataInputStream dis = (HdfsDataInputStream)fileSys.open(name);
     HdfsDataInputStream dis = (HdfsDataInputStream)fileSys.open(name);
     Collection<LocatedBlock> dinfo = dis.getAllBlocks();
     Collection<LocatedBlock> dinfo = dis.getAllBlocks();
     if (dinfo.iterator().hasNext()) { // for the first block
     if (dinfo.iterator().hasNext()) { // for the first block
@@ -1164,13 +1157,11 @@ public class TestMaintenanceState extends AdminStatesBaseTest {
     int ret = ToolRunner.run(dfsAdmin,
     int ret = ToolRunner.run(dfsAdmin,
         new String[] {"-report", "-enteringmaintenance", "-inmaintenance"});
         new String[] {"-report", "-enteringmaintenance", "-inmaintenance"});
     assertEquals(0, ret);
     assertEquals(0, ret);
-    assertThat(out.toString(),
-        is(allOf(containsString("Entering maintenance datanodes (0):"),
-            containsString("In maintenance datanodes (0):"),
-            not(containsString(
-                getCluster().getDataNodes().get(0).getDisplayName())),
-            not(containsString(
-                getCluster().getDataNodes().get(1).getDisplayName())))));
+    assertThat(out.toString())
+        .contains("Entering maintenance datanodes (0):")
+        .contains("In maintenance datanodes (0):")
+        .doesNotContain(getCluster().getDataNodes().get(0).getDisplayName())
+        .doesNotContain(getCluster().getDataNodes().get(1).getDisplayName());
 
 
     final Path file = new Path("/testReportMaintenanceNodes.dat");
     final Path file = new Path("/testReportMaintenanceNodes.dat");
     writeFile(fileSys, file, numNodes, 1);
     writeFile(fileSys, file, numNodes, 1);
@@ -1192,10 +1183,10 @@ public class TestMaintenanceState extends AdminStatesBaseTest {
     ret = ToolRunner.run(dfsAdmin,
     ret = ToolRunner.run(dfsAdmin,
         new String[] {"-report", "-enteringmaintenance"});
         new String[] {"-report", "-enteringmaintenance"});
     assertEquals(0, ret);
     assertEquals(0, ret);
-    assertThat(out.toString(),
-        is(allOf(containsString("Entering maintenance datanodes (1):"),
-            containsString(nodes[0].getXferAddr()),
-            not(containsString(nodes[1].getXferAddr())))));
+    assertThat(out.toString())
+        .contains("Entering maintenance datanodes (1):")
+        .contains(nodes[0].getXferAddr())
+        .doesNotContain(nodes[1].getXferAddr());
 
 
     // reset stream
     // reset stream
     out.reset();
     out.reset();
@@ -1212,12 +1203,11 @@ public class TestMaintenanceState extends AdminStatesBaseTest {
     ret = ToolRunner.run(dfsAdmin,
     ret = ToolRunner.run(dfsAdmin,
         new String[] {"-report", "-inmaintenance"});
         new String[] {"-report", "-inmaintenance"});
     assertEquals(0, ret);
     assertEquals(0, ret);
-    assertThat(out.toString(),
-        is(allOf(containsString("In maintenance datanodes (1):"),
-            containsString(nodes[0].getXferAddr()),
-            not(containsString(nodes[1].getXferAddr())),
-            not(containsString(
-                getCluster().getDataNodes().get(2).getDisplayName())))));
+    assertThat(out.toString()).contains("In maintenance datanodes (1):")
+        .contains(nodes[0].getXferAddr())
+        .doesNotContain(nodes[1].getXferAddr())
+        .doesNotContain(
+            getCluster().getDataNodes().get(2).getDisplayName());
 
 
     cleanupFile(getCluster().getFileSystem(), file);
     cleanupFile(getCluster().getFileSystem(), file);
   }
   }

+ 13 - 12
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMaintenanceWithStriped.java

@@ -17,8 +17,8 @@
  */
  */
 package org.apache.hadoop.hdfs;
 package org.apache.hadoop.hdfs;
 
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 
 import java.io.IOException;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.ArrayList;
@@ -49,10 +49,10 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.hdfs.util.HostsFileWriter;
 import org.apache.hadoop.hdfs.util.HostsFileWriter;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
 import org.slf4j.Logger;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.LoggerFactory;
 
 
@@ -92,7 +92,7 @@ public class TestMaintenanceWithStriped {
     return new HdfsConfiguration();
     return new HdfsConfiguration();
   }
   }
 
 
-  @Before
+  @BeforeEach
   public void setup() throws IOException {
   public void setup() throws IOException {
     // Set up the hosts/exclude files.
     // Set up the hosts/exclude files.
     hostsFileWriter = new HostsFileWriter();
     hostsFileWriter = new HostsFileWriter();
@@ -136,7 +136,7 @@ public class TestMaintenanceWithStriped {
         StripedFileTestUtil.getDefaultECPolicy().getName());
         StripedFileTestUtil.getDefaultECPolicy().getName());
   }
   }
 
 
-  @After
+  @AfterEach
   public void teardown() throws IOException {
   public void teardown() throws IOException {
     hostsFileWriter.cleanup();
     hostsFileWriter.cleanup();
     if (cluster != null) {
     if (cluster != null) {
@@ -149,14 +149,15 @@ public class TestMaintenanceWithStriped {
    * test DN maintenance with striped blocks.
    * test DN maintenance with striped blocks.
    * @throws Exception
    * @throws Exception
    */
    */
-  @Test(timeout = 120000)
+  @Test
+  @Timeout(value = 120)
   public void testInMaintenance() throws Exception {
   public void testInMaintenance() throws Exception {
     //1. create EC file
     //1. create EC file
     // d0 d1 d2 d3 d4 d5 d6 d7 d8
     // d0 d1 d2 d3 d4 d5 d6 d7 d8
     final Path ecFile = new Path(ecDir, "testInMaintenance");
     final Path ecFile = new Path(ecDir, "testInMaintenance");
     int writeBytes = cellSize * dataBlocks;
     int writeBytes = cellSize * dataBlocks;
     writeStripedFile(dfs, ecFile, writeBytes);
     writeStripedFile(dfs, ecFile, writeBytes);
-    Assert.assertEquals(0, bm.numOfUnderReplicatedBlocks());
+    assertEquals(0, bm.numOfUnderReplicatedBlocks());
     FileChecksum fileChecksum1 = dfs.getFileChecksum(ecFile, writeBytes);
     FileChecksum fileChecksum1 = dfs.getFileChecksum(ecFile, writeBytes);
 
 
     final INodeFile fileNode = cluster.getNamesystem().getFSDirectory()
     final INodeFile fileNode = cluster.getNamesystem().getFSDirectory()
@@ -197,7 +198,7 @@ public class TestMaintenanceWithStriped {
     assertEquals(5, bm.countNodes(blockInfo).maintenanceNotForReadReplicas());
     assertEquals(5, bm.countNodes(blockInfo).maintenanceNotForReadReplicas());
 
 
     FileChecksum fileChecksum2 = dfs.getFileChecksum(ecFile, writeBytes);
     FileChecksum fileChecksum2 = dfs.getFileChecksum(ecFile, writeBytes);
-    Assert.assertEquals("Checksum mismatches!", fileChecksum1, fileChecksum2);
+    assertEquals(fileChecksum1, fileChecksum2, "Checksum mismatches!");
   }
   }
 
 
 
 
@@ -239,7 +240,7 @@ public class TestMaintenanceWithStriped {
           break;
           break;
         }
         }
       }
       }
-      assertTrue("Datanode: " + dn + " is not LIVE", nodeExists);
+      assertTrue(nodeExists, "Datanode: " + dn + " is not LIVE");
       maintenanceNodes.put(dn.getName(), maintenanceExpirationInMS);
       maintenanceNodes.put(dn.getName(), maintenanceExpirationInMS);
       LOG.info("Maintenance node: " + dn.getName());
       LOG.info("Maintenance node: " + dn.getName());
     }
     }

+ 31 - 25
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java

@@ -20,8 +20,8 @@ package org.apache.hadoop.hdfs;
 
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY;
 import static org.assertj.core.api.Assertions.assertThat;
 import static org.assertj.core.api.Assertions.assertThat;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assume.assumeTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assumptions.assumeTrue;
 
 
 import java.io.File;
 import java.io.File;
 import java.io.IOException;
 import java.io.IOException;
@@ -43,8 +43,9 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.test.LambdaTestUtils;
 import org.apache.hadoop.test.LambdaTestUtils;
 import org.apache.hadoop.test.PathUtils;
 import org.apache.hadoop.test.PathUtils;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
 import org.slf4j.Logger;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.LoggerFactory;
 
 
@@ -66,7 +67,8 @@ public class TestMiniDFSCluster {
   private static final String CLUSTER_4 = "cluster4";
   private static final String CLUSTER_4 = "cluster4";
   private static final String CLUSTER_5 = "cluster5";
   private static final String CLUSTER_5 = "cluster5";
   protected File testDataPath;
   protected File testDataPath;
-  @Before
+
+  @BeforeEach
   public void setUp() {
   public void setUp() {
     testDataPath = new File(PathUtils.getTestDir(getClass()), "miniclusters");
     testDataPath = new File(PathUtils.getTestDir(getClass()), "miniclusters");
   }
   }
@@ -77,7 +79,8 @@ public class TestMiniDFSCluster {
    *
    *
    * @throws Throwable on a failure
    * @throws Throwable on a failure
    */
    */
-  @Test(timeout=100000)
+  @Test
+  @Timeout(value = 100)
   public void testClusterWithoutSystemProperties() throws Throwable {
   public void testClusterWithoutSystemProperties() throws Throwable {
     String oldPrp = System.getProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA);
     String oldPrp = System.getProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA);
     System.clearProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA);
     System.clearProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA);
@@ -98,7 +101,8 @@ public class TestMiniDFSCluster {
   /**
   /**
    * Tests storage capacity setting still effective after cluster restart.
    * Tests storage capacity setting still effective after cluster restart.
    */
    */
-  @Test(timeout=100000)
+  @Test
+  @Timeout(value = 100)
   public void testClusterSetStorageCapacity() throws Throwable {
   public void testClusterSetStorageCapacity() throws Throwable {
 
 
     final Configuration conf = new HdfsConfiguration();
     final Configuration conf = new HdfsConfiguration();
@@ -211,7 +215,8 @@ public class TestMiniDFSCluster {
     return cluster;
     return cluster;
   }
   }
 
 
-  @Test(timeout=100000)
+  @Test
+  @Timeout(value = 100)
   public void testIsClusterUpAfterShutdown() throws Throwable {
   public void testIsClusterUpAfterShutdown() throws Throwable {
     Configuration conf = new HdfsConfiguration();
     Configuration conf = new HdfsConfiguration();
     File testDataCluster4 = new File(testDataPath, CLUSTER_4);
     File testDataCluster4 = new File(testDataPath, CLUSTER_4);
@@ -229,7 +234,8 @@ public class TestMiniDFSCluster {
   }
   }
 
 
   /** MiniDFSCluster should not clobber dfs.datanode.hostname if requested */
   /** MiniDFSCluster should not clobber dfs.datanode.hostname if requested */
-  @Test(timeout=100000)
+  @Test
+  @Timeout(value = 100)
   public void testClusterSetDatanodeHostname() throws Throwable {
   public void testClusterSetDatanodeHostname() throws Throwable {
     assumeTrue(System.getProperty("os.name").startsWith("Linux"));
     assumeTrue(System.getProperty("os.name").startsWith("Linux"));
     Configuration conf = new HdfsConfiguration();
     Configuration conf = new HdfsConfiguration();
@@ -240,8 +246,8 @@ public class TestMiniDFSCluster {
           .numDataNodes(1)
           .numDataNodes(1)
           .checkDataNodeHostConfig(true)
           .checkDataNodeHostConfig(true)
           .build()) {
           .build()) {
-      assertEquals("DataNode hostname config not respected", "MYHOST",
-          cluster5.getDataNodes().get(0).getDatanodeId().getHostName());
+      assertEquals("MYHOST", cluster5.getDataNodes().get(0).getDatanodeId().getHostName(),
+          "DataNode hostname config not respected");
     }
     }
   }
   }
 
 
@@ -257,8 +263,8 @@ public class TestMiniDFSCluster {
       ArrayList<DataNode> dataNodes = cluster.getDataNodes();
       ArrayList<DataNode> dataNodes = cluster.getDataNodes();
       // Check the number of directory in DN's
       // Check the number of directory in DN's
       for (int i = 0; i < storageType.length; i++) {
       for (int i = 0; i < storageType.length; i++) {
-        assertEquals(DataNode.getStorageLocations(dataNodes.get(i).getConf())
-            .size(), storageType[i].length);
+        assertEquals(DataNode.getStorageLocations(dataNodes.get(i).getConf()).size(),
+            storageType[i].length);
       }
       }
     }
     }
   }
   }
@@ -304,18 +310,18 @@ public class TestMiniDFSCluster {
           DFSUtil.addKeySuffixes(DFS_NAMENODE_HTTP_ADDRESS_KEY, "ns1", "nn1"));
           DFSUtil.addKeySuffixes(DFS_NAMENODE_HTTP_ADDRESS_KEY, "ns1", "nn1"));
 
 
       for(NameNodeInfo nnInfo : cluster.getNameNodeInfos()) {
       for(NameNodeInfo nnInfo : cluster.getNameNodeInfos()) {
-        assertEquals(ns0nn0, nnInfo.conf.get(
-            DFSUtil.addKeySuffixes(
-            DFS_NAMENODE_HTTP_ADDRESS_KEY, "ns0", "nn0")));
-        assertEquals(ns0nn1, nnInfo.conf.get(
-            DFSUtil.addKeySuffixes(
-            DFS_NAMENODE_HTTP_ADDRESS_KEY, "ns0", "nn1")));
-        assertEquals(ns1nn0, nnInfo.conf.get(
-            DFSUtil.addKeySuffixes(
-            DFS_NAMENODE_HTTP_ADDRESS_KEY, "ns1", "nn0")));
-        assertEquals(ns1nn1, nnInfo.conf.get(
-            DFSUtil.addKeySuffixes(
-            DFS_NAMENODE_HTTP_ADDRESS_KEY, "ns1", "nn1")));
+        assertEquals(ns0nn0,
+            nnInfo.conf.get(
+                DFSUtil.addKeySuffixes(DFS_NAMENODE_HTTP_ADDRESS_KEY, "ns0", "nn0")));
+        assertEquals(ns0nn1,
+            nnInfo.conf.get(
+                DFSUtil.addKeySuffixes(DFS_NAMENODE_HTTP_ADDRESS_KEY, "ns0", "nn1")));
+        assertEquals(ns1nn0,
+            nnInfo.conf.get(
+                DFSUtil.addKeySuffixes(DFS_NAMENODE_HTTP_ADDRESS_KEY, "ns1", "nn0")));
+        assertEquals(ns1nn1,
+            nnInfo.conf.get(
+                DFSUtil.addKeySuffixes(DFS_NAMENODE_HTTP_ADDRESS_KEY, "ns1", "nn1")));
       }
       }
 
 
       // Shutdown namenodes individually.
       // Shutdown namenodes individually.

+ 8 - 9
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java

@@ -27,16 +27,15 @@ import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.server.blockmanagement.AvailableSpaceBlockPlacementPolicy;
 import org.apache.hadoop.hdfs.server.blockmanagement.AvailableSpaceBlockPlacementPolicy;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 
 import javax.management.*;
 import javax.management.*;
 
 
 import java.io.IOException;
 import java.io.IOException;
 import java.lang.management.ManagementFactory;
 import java.lang.management.ManagementFactory;
 
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 
 /**
 /**
  * The test makes sure that NameNode detects presense blocks that do not have
  * The test makes sure that NameNode detects presense blocks that do not have
@@ -105,8 +104,8 @@ public class TestMissingBlocksAlert {
       MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
       MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
       ObjectName mxbeanName = new ObjectName(
       ObjectName mxbeanName = new ObjectName(
               "Hadoop:service=NameNode,name=NameNodeInfo");
               "Hadoop:service=NameNode,name=NameNodeInfo");
-      Assert.assertEquals(1, (long)(Long) mbs.getAttribute(mxbeanName,
-                      "NumberOfMissingBlocks"));
+      assertEquals(1, (long) (Long) mbs.getAttribute(mxbeanName,
+          "NumberOfMissingBlocks"));
 
 
       // now do the reverse : remove the file expect the number of missing 
       // now do the reverse : remove the file expect the number of missing 
       // blocks to go to zero
       // blocks to go to zero
@@ -121,8 +120,8 @@ public class TestMissingBlocksAlert {
       assertEquals(2, dfs.getLowRedundancyBlocksCount());
       assertEquals(2, dfs.getLowRedundancyBlocksCount());
       assertEquals(2, bm.getUnderReplicatedNotMissingBlocks());
       assertEquals(2, bm.getUnderReplicatedNotMissingBlocks());
 
 
-      Assert.assertEquals(0, (long)(Long) mbs.getAttribute(mxbeanName,
-              "NumberOfMissingBlocks"));
+      assertEquals(0, (long) (Long) mbs.getAttribute(mxbeanName,
+          "NumberOfMissingBlocks"));
 
 
       Path replOneFile = new Path("/testMissingBlocks/replOneFile");
       Path replOneFile = new Path("/testMissingBlocks/replOneFile");
       DFSTestUtil.createFile(dfs, replOneFile, fileLen, (short)1, 0);
       DFSTestUtil.createFile(dfs, replOneFile, fileLen, (short)1, 0);
@@ -138,7 +137,7 @@ public class TestMissingBlocksAlert {
       }
       }
       in.close();
       in.close();
       assertEquals(1, dfs.getMissingReplOneBlocksCount());
       assertEquals(1, dfs.getMissingReplOneBlocksCount());
-      Assert.assertEquals(1, (long)(Long) mbs.getAttribute(mxbeanName,
+      assertEquals(1, (long) (Long) mbs.getAttribute(mxbeanName,
           "NumberOfMissingBlocksWithReplicationFactorOne"));
           "NumberOfMissingBlocksWithReplicationFactorOne"));
     } finally {
     } finally {
       if (cluster != null) {
       if (cluster != null) {

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestModTime.java

@@ -17,8 +17,8 @@
  */
  */
 package org.apache.hadoop.hdfs;
 package org.apache.hadoop.hdfs;
 
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 
 import java.io.IOException;
 import java.io.IOException;
 import java.io.OutputStream;
 import java.io.OutputStream;
@@ -32,7 +32,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.util.ThreadUtil;
 import org.apache.hadoop.util.ThreadUtil;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 
 /**
 /**
  * This class tests the decommissioning of nodes.
  * This class tests the decommissioning of nodes.
@@ -77,7 +77,7 @@ public class TestModTime {
                                                    cluster.getNameNodePort());
                                                    cluster.getNameNodePort());
     DFSClient client = new DFSClient(addr, conf);
     DFSClient client = new DFSClient(addr, conf);
     DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
     DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
-    assertEquals("Number of Datanodes ", numDatanodes, info.length);
+    assertEquals(numDatanodes, info.length, "Number of Datanodes ");
     FileSystem fileSys = cluster.getFileSystem();
     FileSystem fileSys = cluster.getFileSystem();
     int replicas = numDatanodes - 1;
     int replicas = numDatanodes - 1;
     assertTrue(fileSys instanceof DistributedFileSystem);
     assertTrue(fileSys instanceof DistributedFileSystem);

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultiThreadedHflush.java

@@ -35,7 +35,7 @@ import org.apache.hadoop.metrics2.util.SampleQuantiles;
 import org.apache.hadoop.util.StopWatch;
 import org.apache.hadoop.util.StopWatch;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 import org.apache.hadoop.util.ToolRunner;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 
 /**
 /**
  * This class tests hflushing concurrently from many threads.
  * This class tests hflushing concurrently from many threads.

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelRead.java

@@ -18,11 +18,11 @@
 package org.apache.hadoop.hdfs;
 package org.apache.hadoop.hdfs;
 
 
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 
 public class TestParallelRead extends TestParallelReadUtil {
 public class TestParallelRead extends TestParallelReadUtil {
-  @BeforeClass
+  @BeforeAll
   static public void setupCluster() throws Exception {
   static public void setupCluster() throws Exception {
     // This is a test of the normal (TCP) read path.  For this reason, we turn
     // This is a test of the normal (TCP) read path.  For this reason, we turn
     // off both short-circuit local reads and UNIX domain socket data traffic.
     // off both short-circuit local reads and UNIX domain socket data traffic.
@@ -37,7 +37,7 @@ public class TestParallelRead extends TestParallelReadUtil {
     setupCluster(DEFAULT_REPLICATION_FACTOR, conf);
     setupCluster(DEFAULT_REPLICATION_FACTOR, conf);
   }
   }
 
 
-  @AfterClass
+  @AfterAll
   static public void teardownCluster() throws Exception {
   static public void teardownCluster() throws Exception {
     TestParallelReadUtil.teardownCluster();
     TestParallelReadUtil.teardownCluster();
   }
   }

+ 10 - 10
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java

@@ -17,23 +17,23 @@
  */
  */
 package org.apache.hadoop.hdfs;
 package org.apache.hadoop.hdfs;
 
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
 
 
 import java.io.IOException;
 import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.nio.ByteBuffer;
 import java.util.Random;
 import java.util.Random;
 
 
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.junit.jupiter.api.Disabled;
 import org.slf4j.Logger;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.client.impl.BlockReaderTestUtil;
 import org.apache.hadoop.hdfs.client.impl.BlockReaderTestUtil;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.Time;
-import org.junit.Ignore;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 import org.slf4j.event.Level;
 import org.slf4j.event.Level;
 
 
 /**
 /**
@@ -43,7 +43,7 @@ import org.slf4j.event.Level;
  * This class is marked as @Ignore so that junit doesn't try to execute the
  * This class is marked as @Ignore so that junit doesn't try to execute the
  * tests in here directly.  They are executed from subclasses.
  * tests in here directly.  They are executed from subclasses.
  */
  */
-@Ignore
+@Disabled
 public class TestParallelReadUtil {
 public class TestParallelReadUtil {
 
 
   static final Logger LOG = LoggerFactory.getLogger(TestParallelReadUtil.class);
   static final Logger LOG = LoggerFactory.getLogger(TestParallelReadUtil.class);
@@ -261,8 +261,8 @@ public class TestParallelReadUtil {
      */
      */
     private void read(int start, int len) throws Exception {
     private void read(int start, int len) throws Exception {
       assertTrue(
       assertTrue(
-          "Bad args: " + start + " + " + len + " should be <= " + fileSize,
-          start + len <= fileSize);
+          start + len <= fileSize,
+          "Bad args: " + start + " + " + len + " should be <= " + fileSize);
       readCount++;
       readCount++;
       DFSInputStream dis = testInfo.dis;
       DFSInputStream dis = testInfo.dis;
 
 
@@ -277,8 +277,8 @@ public class TestParallelReadUtil {
      */
      */
     private void pRead(int start, int len) throws Exception {
     private void pRead(int start, int len) throws Exception {
       assertTrue(
       assertTrue(
-          "Bad args: " + start + " + " + len + " should be <= " + fileSize,
-          start + len <= fileSize);
+          start + len <= fileSize,
+          "Bad args: " + start + " + " + len + " should be <= " + fileSize);
       DFSInputStream dis = testInfo.dis;
       DFSInputStream dis = testInfo.dis;
 
 
       byte buf[] = new byte[len];
       byte buf[] = new byte[len];

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelShortCircuitLegacyRead.java

@@ -20,11 +20,11 @@ package org.apache.hadoop.hdfs;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.net.unix.DomainSocket;
 import org.apache.hadoop.net.unix.DomainSocket;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 
 public class TestParallelShortCircuitLegacyRead extends TestParallelReadUtil {
 public class TestParallelShortCircuitLegacyRead extends TestParallelReadUtil {
-  @BeforeClass
+  @BeforeAll
   static public void setupCluster() throws Exception {
   static public void setupCluster() throws Exception {
     DFSInputStream.tcpReadsDisabledForTesting = true;
     DFSInputStream.tcpReadsDisabledForTesting = true;
     HdfsConfiguration conf = new HdfsConfiguration();
     HdfsConfiguration conf = new HdfsConfiguration();
@@ -40,7 +40,7 @@ public class TestParallelShortCircuitLegacyRead extends TestParallelReadUtil {
     setupCluster(1, conf);
     setupCluster(1, conf);
   }
   }
 
 
-  @AfterClass
+  @AfterAll
   static public void teardownCluster() throws Exception {
   static public void teardownCluster() throws Exception {
     TestParallelReadUtil.teardownCluster();
     TestParallelReadUtil.teardownCluster();
   }
   }

+ 9 - 9
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelShortCircuitRead.java

@@ -17,22 +17,22 @@
  */
  */
 package org.apache.hadoop.hdfs;
 package org.apache.hadoop.hdfs;
 
 
-import static org.hamcrest.CoreMatchers.equalTo;
 
 
 import java.io.File;
 import java.io.File;
 
 
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.net.unix.DomainSocket;
 import org.apache.hadoop.net.unix.DomainSocket;
 import org.apache.hadoop.net.unix.TemporarySocketDirectory;
 import org.apache.hadoop.net.unix.TemporarySocketDirectory;
-import org.junit.AfterClass;
-import org.junit.Assume;
-import org.junit.Before;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.BeforeAll;
+
+import static org.assertj.core.api.Assumptions.assumeThat;
 
 
 public class TestParallelShortCircuitRead extends TestParallelReadUtil {
 public class TestParallelShortCircuitRead extends TestParallelReadUtil {
   private static TemporarySocketDirectory sockDir;
   private static TemporarySocketDirectory sockDir;
 
 
-  @BeforeClass
+  @BeforeAll
   static public void setupCluster() throws Exception {
   static public void setupCluster() throws Exception {
     if (DomainSocket.getLoadingFailureReason() != null) return;
     if (DomainSocket.getLoadingFailureReason() != null) return;
     DFSInputStream.tcpReadsDisabledForTesting = true;
     DFSInputStream.tcpReadsDisabledForTesting = true;
@@ -47,12 +47,12 @@ public class TestParallelShortCircuitRead extends TestParallelReadUtil {
     setupCluster(1, conf);
     setupCluster(1, conf);
   }
   }
 
 
-  @Before
+  @BeforeEach
   public void before() {
   public void before() {
-    Assume.assumeThat(DomainSocket.getLoadingFailureReason(), equalTo(null));
+    assumeThat(DomainSocket.getLoadingFailureReason()).isNull();
   }
   }
 
 
-  @AfterClass
+  @AfterAll
   static public void teardownCluster() throws Exception {
   static public void teardownCluster() throws Exception {
     if (DomainSocket.getLoadingFailureReason() != null) return;
     if (DomainSocket.getLoadingFailureReason() != null) return;
     sockDir.close();
     sockDir.close();

+ 9 - 9
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelShortCircuitReadNoChecksum.java

@@ -17,22 +17,22 @@
  */
  */
 package org.apache.hadoop.hdfs;
 package org.apache.hadoop.hdfs;
 
 
-import static org.hamcrest.CoreMatchers.equalTo;
 
 
 import java.io.File;
 import java.io.File;
 
 
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.net.unix.DomainSocket;
 import org.apache.hadoop.net.unix.DomainSocket;
 import org.apache.hadoop.net.unix.TemporarySocketDirectory;
 import org.apache.hadoop.net.unix.TemporarySocketDirectory;
-import org.junit.AfterClass;
-import org.junit.Assume;
-import org.junit.Before;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.BeforeAll;
+
+import static org.assertj.core.api.Assumptions.assumeThat;
 
 
 public class TestParallelShortCircuitReadNoChecksum extends TestParallelReadUtil {
 public class TestParallelShortCircuitReadNoChecksum extends TestParallelReadUtil {
   private static TemporarySocketDirectory sockDir;
   private static TemporarySocketDirectory sockDir;
 
 
-  @BeforeClass
+  @BeforeAll
   static public void setupCluster() throws Exception {
   static public void setupCluster() throws Exception {
     if (DomainSocket.getLoadingFailureReason() != null) return;
     if (DomainSocket.getLoadingFailureReason() != null) return;
     DFSInputStream.tcpReadsDisabledForTesting = true;
     DFSInputStream.tcpReadsDisabledForTesting = true;
@@ -47,12 +47,12 @@ public class TestParallelShortCircuitReadNoChecksum extends TestParallelReadUtil
     setupCluster(1, conf);
     setupCluster(1, conf);
   }
   }
 
 
-  @Before
+  @BeforeEach
   public void before() {
   public void before() {
-    Assume.assumeThat(DomainSocket.getLoadingFailureReason(), equalTo(null));
+    assumeThat(DomainSocket.getLoadingFailureReason()).isNull();
   }
   }
 
 
-  @AfterClass
+  @AfterAll
   static public void teardownCluster() throws Exception {
   static public void teardownCluster() throws Exception {
     if (DomainSocket.getLoadingFailureReason() != null) return;
     if (DomainSocket.getLoadingFailureReason() != null) return;
     sockDir.close();
     sockDir.close();

+ 9 - 9
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelShortCircuitReadUnCached.java

@@ -17,17 +17,17 @@
  */
  */
 package org.apache.hadoop.hdfs;
 package org.apache.hadoop.hdfs;
 
 
-import static org.hamcrest.CoreMatchers.equalTo;
 
 
 import java.io.File;
 import java.io.File;
 
 
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.net.unix.DomainSocket;
 import org.apache.hadoop.net.unix.DomainSocket;
 import org.apache.hadoop.net.unix.TemporarySocketDirectory;
 import org.apache.hadoop.net.unix.TemporarySocketDirectory;
-import org.junit.AfterClass;
-import org.junit.Assume;
-import org.junit.Before;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.BeforeAll;
+
+import static org.assertj.core.api.Assumptions.assumeThat;
 
 
 /**
 /**
  * This class tests short-circuit local reads without any FileInputStream or
  * This class tests short-circuit local reads without any FileInputStream or
@@ -36,7 +36,7 @@ import org.junit.BeforeClass;
 public class TestParallelShortCircuitReadUnCached extends TestParallelReadUtil {
 public class TestParallelShortCircuitReadUnCached extends TestParallelReadUtil {
   private static TemporarySocketDirectory sockDir;
   private static TemporarySocketDirectory sockDir;
 
 
-  @BeforeClass
+  @BeforeAll
   static public void setupCluster() throws Exception {
   static public void setupCluster() throws Exception {
     if (DomainSocket.getLoadingFailureReason() != null) return;
     if (DomainSocket.getLoadingFailureReason() != null) return;
     sockDir = new TemporarySocketDirectory();
     sockDir = new TemporarySocketDirectory();
@@ -66,12 +66,12 @@ public class TestParallelShortCircuitReadUnCached extends TestParallelReadUtil {
     setupCluster(1, conf);
     setupCluster(1, conf);
   }
   }
 
 
-  @Before
+  @BeforeEach
   public void before() {
   public void before() {
-    Assume.assumeThat(DomainSocket.getLoadingFailureReason(), equalTo(null));
+    assumeThat(DomainSocket.getLoadingFailureReason()).isNull();
   }
   }
 
 
-  @AfterClass
+  @AfterAll
   static public void teardownCluster() throws Exception {
   static public void teardownCluster() throws Exception {
     if (DomainSocket.getLoadingFailureReason() != null) return;
     if (DomainSocket.getLoadingFailureReason() != null) return;
     sockDir.close();
     sockDir.close();

+ 9 - 9
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelUnixDomainRead.java

@@ -17,22 +17,22 @@
  */
  */
 package org.apache.hadoop.hdfs;
 package org.apache.hadoop.hdfs;
 
 
-import static org.hamcrest.CoreMatchers.equalTo;
 
 
 import java.io.File;
 import java.io.File;
 
 
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.net.unix.DomainSocket;
 import org.apache.hadoop.net.unix.DomainSocket;
 import org.apache.hadoop.net.unix.TemporarySocketDirectory;
 import org.apache.hadoop.net.unix.TemporarySocketDirectory;
-import org.junit.AfterClass;
-import org.junit.Assume;
-import org.junit.Before;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.BeforeAll;
+
+import static org.assertj.core.api.Assumptions.assumeThat;
 
 
 public class TestParallelUnixDomainRead extends TestParallelReadUtil {
 public class TestParallelUnixDomainRead extends TestParallelReadUtil {
   private static TemporarySocketDirectory sockDir;
   private static TemporarySocketDirectory sockDir;
 
 
-  @BeforeClass
+  @BeforeAll
   static public void setupCluster() throws Exception {
   static public void setupCluster() throws Exception {
     if (DomainSocket.getLoadingFailureReason() != null) return;
     if (DomainSocket.getLoadingFailureReason() != null) return;
     DFSInputStream.tcpReadsDisabledForTesting = true;
     DFSInputStream.tcpReadsDisabledForTesting = true;
@@ -46,12 +46,12 @@ public class TestParallelUnixDomainRead extends TestParallelReadUtil {
     setupCluster(1, conf);
     setupCluster(1, conf);
   }
   }
 
 
-  @Before
+  @BeforeEach
   public void before() {
   public void before() {
-    Assume.assumeThat(DomainSocket.getLoadingFailureReason(), equalTo(null));
+    assumeThat(DomainSocket.getLoadingFailureReason()).isNull();
   }
   }
 
 
-  @AfterClass
+  @AfterAll
   static public void teardownCluster() throws Exception {
   static public void teardownCluster() throws Exception {
     if (DomainSocket.getLoadingFailureReason() != null) return;
     if (DomainSocket.getLoadingFailureReason() != null) return;
     sockDir.close();
     sockDir.close();

+ 7 - 8
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPersistBlocks.java

@@ -18,9 +18,9 @@
 
 
 package org.apache.hadoop.hdfs;
 package org.apache.hadoop.hdfs;
 
 
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertArrayEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 
 import java.io.File;
 import java.io.File;
 import java.io.IOException;
 import java.io.IOException;
@@ -44,7 +44,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.PathUtils;
 import org.apache.hadoop.test.PathUtils;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 import org.slf4j.event.Level;
 import org.slf4j.event.Level;
 
 
 /**
 /**
@@ -128,8 +128,7 @@ public class TestPersistBlocks {
       // Check that the file has no less bytes than before the restart
       // Check that the file has no less bytes than before the restart
       // This would mean that blocks were successfully persisted to the log
       // This would mean that blocks were successfully persisted to the log
       FileStatus status = fs.getFileStatus(FILE_PATH);
       FileStatus status = fs.getFileStatus(FILE_PATH);
-      assertTrue("Length too short: " + status.getLen(),
-          status.getLen() >= len);
+      assertTrue(status.getLen() >= len, "Length too short: " + status.getLen());
       
       
       // And keep writing (ensures that leases are also persisted correctly)
       // And keep writing (ensures that leases are also persisted correctly)
       stream.write(DATA_AFTER_RESTART);
       stream.write(DATA_AFTER_RESTART);
@@ -194,8 +193,8 @@ public class TestPersistBlocks {
       // Check that the file has no less bytes than before the restart
       // Check that the file has no less bytes than before the restart
       // This would mean that blocks were successfully persisted to the log
       // This would mean that blocks were successfully persisted to the log
       FileStatus status = fs.getFileStatus(FILE_PATH);
       FileStatus status = fs.getFileStatus(FILE_PATH);
-      assertTrue("Length incorrect: " + status.getLen(),
-          status.getLen() == len - BLOCK_SIZE);
+      assertTrue(status.getLen() == len - BLOCK_SIZE,
+          "Length incorrect: " + status.getLen());
 
 
       // Verify the data showed up from before restart, sans abandoned block.
       // Verify the data showed up from before restart, sans abandoned block.
       FSDataInputStream readStream = fs.open(FILE_PATH);
       FSDataInputStream readStream = fs.open(FILE_PATH);

+ 20 - 22
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithDecodingCorruptData.java

@@ -17,13 +17,11 @@
  */
  */
 package org.apache.hadoop.hdfs;
 package org.apache.hadoop.hdfs;
 
 
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.Timeout;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Timeout;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.MethodSource;
 import org.slf4j.Logger;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.LoggerFactory;
 
 
@@ -36,7 +34,7 @@ import static org.apache.hadoop.hdfs.ReadStripedFileWithDecodingHelper.tearDownC
 /**
 /**
  * Test online recovery with corrupt files. This test is parameterized.
  * Test online recovery with corrupt files. This test is parameterized.
  */
  */
-@RunWith(Parameterized.class)
+@Timeout(300)
 public class TestReadStripedFileWithDecodingCorruptData {
 public class TestReadStripedFileWithDecodingCorruptData {
   static final Logger LOG =
   static final Logger LOG =
       LoggerFactory.getLogger(TestReadStripedFileWithDecodingCorruptData.class);
       LoggerFactory.getLogger(TestReadStripedFileWithDecodingCorruptData.class);
@@ -44,21 +42,17 @@ public class TestReadStripedFileWithDecodingCorruptData {
   private static MiniDFSCluster cluster;
   private static MiniDFSCluster cluster;
   private static DistributedFileSystem dfs;
   private static DistributedFileSystem dfs;
 
 
-  @Rule
-  public Timeout globalTimeout = new Timeout(300000);
-
-  @BeforeClass
+  @BeforeAll
   public static void setup() throws IOException {
   public static void setup() throws IOException {
     cluster = initializeCluster();
     cluster = initializeCluster();
     dfs = cluster.getFileSystem();
     dfs = cluster.getFileSystem();
   }
   }
 
 
-  @AfterClass
-  public static void tearDown() throws IOException {
+  @AfterEach
+  public void tearDown() throws IOException {
     tearDownCluster(cluster);
     tearDownCluster(cluster);
   }
   }
 
 
-  @Parameterized.Parameters
   public static Collection<Object[]> getParameters() {
   public static Collection<Object[]> getParameters() {
     return ReadStripedFileWithDecodingHelper.getParameters();
     return ReadStripedFileWithDecodingHelper.getParameters();
   }
   }
@@ -67,19 +61,23 @@ public class TestReadStripedFileWithDecodingCorruptData {
   private int dataDelNum;
   private int dataDelNum;
   private int parityDelNum;
   private int parityDelNum;
 
 
-  public TestReadStripedFileWithDecodingCorruptData(int fileLength, int
-      dataDelNum, int parityDelNum) {
-    this.fileLength = fileLength;
-    this.dataDelNum = dataDelNum;
-    this.parityDelNum = parityDelNum;
+  public void initTestReadStripedFileWithDecodingCorruptData(int pFileLength, int
+      pDataDelNum, int pParityDelNum) {
+    this.fileLength = pFileLength;
+    this.dataDelNum = pDataDelNum;
+    this.parityDelNum = pParityDelNum;
   }
   }
 
 
   /**
   /**
    * Corrupt tolerable number of block before reading.
    * Corrupt tolerable number of block before reading.
    * Verify the decoding works correctly.
    * Verify the decoding works correctly.
    */
    */
-  @Test
-  public void testReadCorruptedData() throws IOException {
+  @ParameterizedTest
+  @MethodSource("getParameters")
+  public void testReadCorruptedData(int pFileLength, int
+      pDataDelNum, int pParityDelNum) throws IOException {
+    initTestReadStripedFileWithDecodingCorruptData(pFileLength, pDataDelNum, pParityDelNum);
+    setup();
     String src = "/corrupted_" + dataDelNum + "_" + parityDelNum;
     String src = "/corrupted_" + dataDelNum + "_" + parityDelNum;
     ReadStripedFileWithDecodingHelper.testReadWithBlockCorrupted(cluster,
     ReadStripedFileWithDecodingHelper.testReadWithBlockCorrupted(cluster,
         dfs, src, fileLength, dataDelNum, parityDelNum, false);
         dfs, src, fileLength, dataDelNum, parityDelNum, false);

+ 18 - 21
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithDecodingDeletedData.java

@@ -17,13 +17,11 @@
  */
  */
 package org.apache.hadoop.hdfs;
 package org.apache.hadoop.hdfs;
 
 
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.Timeout;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Timeout;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.MethodSource;
 import org.slf4j.Logger;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.LoggerFactory;
 
 
@@ -37,7 +35,7 @@ import static org.apache.hadoop.hdfs.ReadStripedFileWithDecodingHelper.tearDownC
  * Test online recovery with files with deleted blocks. This test is
  * Test online recovery with files with deleted blocks. This test is
  * parameterized.
  * parameterized.
  */
  */
-@RunWith(Parameterized.class)
+@Timeout(300)
 public class TestReadStripedFileWithDecodingDeletedData {
 public class TestReadStripedFileWithDecodingDeletedData {
   static final Logger LOG =
   static final Logger LOG =
       LoggerFactory.getLogger(TestReadStripedFileWithDecodingDeletedData.class);
       LoggerFactory.getLogger(TestReadStripedFileWithDecodingDeletedData.class);
@@ -45,21 +43,17 @@ public class TestReadStripedFileWithDecodingDeletedData {
   private static MiniDFSCluster cluster;
   private static MiniDFSCluster cluster;
   private static DistributedFileSystem dfs;
   private static DistributedFileSystem dfs;
 
 
-  @Rule
-  public Timeout globalTimeout = new Timeout(300000);
-
-  @BeforeClass
+  @BeforeAll
   public static void setup() throws IOException {
   public static void setup() throws IOException {
     cluster = initializeCluster();
     cluster = initializeCluster();
     dfs = cluster.getFileSystem();
     dfs = cluster.getFileSystem();
   }
   }
 
 
-  @AfterClass
+  @AfterAll
   public static void tearDown() throws IOException {
   public static void tearDown() throws IOException {
     tearDownCluster(cluster);
     tearDownCluster(cluster);
   }
   }
 
 
-  @Parameterized.Parameters
   public static Collection<Object[]> getParameters() {
   public static Collection<Object[]> getParameters() {
     return ReadStripedFileWithDecodingHelper.getParameters();
     return ReadStripedFileWithDecodingHelper.getParameters();
   }
   }
@@ -68,19 +62,22 @@ public class TestReadStripedFileWithDecodingDeletedData {
   private int dataDelNum;
   private int dataDelNum;
   private int parityDelNum;
   private int parityDelNum;
 
 
-  public TestReadStripedFileWithDecodingDeletedData(int fileLength, int
-      dataDelNum, int parityDelNum) {
-    this.fileLength = fileLength;
-    this.dataDelNum = dataDelNum;
-    this.parityDelNum = parityDelNum;
+  public void initTestReadStripedFileWithDecodingDeletedData(int pFileLength, int
+      pDataDelNum, int pParityDelNum) {
+    this.fileLength = pFileLength;
+    this.dataDelNum = pDataDelNum;
+    this.parityDelNum = pParityDelNum;
   }
   }
 
 
   /**
   /**
    * Delete tolerable number of block before reading.
    * Delete tolerable number of block before reading.
    * Verify the decoding works correctly.
    * Verify the decoding works correctly.
    */
    */
-  @Test
-  public void testReadCorruptedDataByDeleting() throws IOException {
+  @ParameterizedTest
+  @MethodSource("getParameters")
+  public void testReadCorruptedDataByDeleting(int pFileLength, int
+      pDataDelNum, int pParityDelNum) throws IOException {
+    initTestReadStripedFileWithDecodingDeletedData(pFileLength, pDataDelNum, pParityDelNum);
     String src = "/deleted_" + dataDelNum + "_" + parityDelNum;
     String src = "/deleted_" + dataDelNum + "_" + parityDelNum;
     ReadStripedFileWithDecodingHelper.testReadWithBlockCorrupted(cluster,
     ReadStripedFileWithDecodingHelper.testReadWithBlockCorrupted(cluster,
         dfs, src, fileLength, dataDelNum, parityDelNum, true);
         dfs, src, fileLength, dataDelNum, parityDelNum, true);

+ 21 - 20
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java

@@ -17,9 +17,11 @@
  */
  */
 package org.apache.hadoop.hdfs;
 package org.apache.hadoop.hdfs;
 
 
+import static org.junit.jupiter.api.Assertions.assertArrayEquals;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertTrue;
 import static org.junit.jupiter.api.Assertions.assertTrue;
-import static org.junit.Assume.assumeTrue;
+import static org.junit.jupiter.api.Assertions.fail;
+import static org.junit.jupiter.api.Assumptions.assumeTrue;
 
 
 import java.io.File;
 import java.io.File;
 import java.io.IOException;
 import java.io.IOException;
@@ -68,7 +70,6 @@ import org.apache.hadoop.io.erasurecode.rawcoder.NativeRSRawErasureCoderFactory;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.LambdaTestUtils;
 import org.apache.hadoop.test.LambdaTestUtils;
 import org.junit.jupiter.api.AfterEach;
 import org.junit.jupiter.api.AfterEach;
-import org.junit.jupiter.api.Assertions;
 import org.junit.jupiter.api.BeforeEach;
 import org.junit.jupiter.api.BeforeEach;
 import org.junit.jupiter.api.Test;
 import org.junit.jupiter.api.Test;
 import org.junit.jupiter.api.Timeout;
 import org.junit.jupiter.api.Timeout;
@@ -344,7 +345,7 @@ public class TestReconstructStripedFile {
   void assertFileBlocksReconstruction(String fileName, int fileLen,
   void assertFileBlocksReconstruction(String fileName, int fileLen,
       ReconstructionType type, int toRecoverBlockNum) throws Exception {
       ReconstructionType type, int toRecoverBlockNum) throws Exception {
     if (toRecoverBlockNum < 1 || toRecoverBlockNum > parityBlkNum) {
     if (toRecoverBlockNum < 1 || toRecoverBlockNum > parityBlkNum) {
-      Assertions.fail("toRecoverBlockNum should be between 1 ~ " + parityBlkNum);
+      fail("toRecoverBlockNum should be between 1 ~ " + parityBlkNum);
     }
     }
     assertTrue(fileLen > 0, "File length must be positive.");
     assertTrue(fileLen > 0, "File length must be positive.");
 
 
@@ -439,7 +440,7 @@ public class TestReconstructStripedFile {
       byte[] replicaContentAfterReconstruction =
       byte[] replicaContentAfterReconstruction =
           DFSTestUtil.readFileAsBytes(replicaAfterReconstruction);
           DFSTestUtil.readFileAsBytes(replicaAfterReconstruction);
 
 
-      Assertions.assertArrayEquals(replicaContents[i], replicaContentAfterReconstruction);
+      assertArrayEquals(replicaContents[i], replicaContentAfterReconstruction);
     }
     }
   }
   }
 
 
@@ -458,7 +459,7 @@ public class TestReconstructStripedFile {
         }
         }
       }
       }
       if (result[i] == -1) {
       if (result[i] == -1) {
-        Assertions.fail("Failed to reconstruct striped block: "
+        fail("Failed to reconstruct striped block: "
             + blocks[i].getBlockId());
             + blocks[i].getBlockId());
       }
       }
     }
     }
@@ -528,8 +529,8 @@ public class TestReconstructStripedFile {
     }
     }
 
 
     // Inject data-loss by tear down desired number of DataNodes.
     // Inject data-loss by tear down desired number of DataNodes.
-    assumeTrue("Ignore case where num dead DNs > num parity units",
-        policy.getNumParityUnits() >= deadDN);
+    assumeTrue(policy.getNumParityUnits() >= deadDN,
+        "Ignore case where num dead DNs > num parity units");
     List<DataNode> dataNodes = new ArrayList<>(cluster.getDataNodes());
     List<DataNode> dataNodes = new ArrayList<>(cluster.getDataNodes());
     Collections.shuffle(dataNodes);
     Collections.shuffle(dataNodes);
     for (DataNode dn : dataNodes.subList(0, deadDN)) {
     for (DataNode dn : dataNodes.subList(0, deadDN)) {
@@ -631,8 +632,8 @@ public class TestReconstructStripedFile {
   @Test
   @Test
   @Timeout(value = 120)
   @Timeout(value = 120)
   public void testTimeoutReadBlockInReconstruction() throws Exception {
   public void testTimeoutReadBlockInReconstruction() throws Exception {
-    assumeTrue("Ignore case where num parity units <= 1",
-        ecPolicy.getNumParityUnits() > 1);
+    assumeTrue(ecPolicy.getNumParityUnits() > 1,
+        "Ignore case where num parity units <= 1");
     int stripedBufferSize = conf.getInt(
     int stripedBufferSize = conf.getInt(
         DFSConfigKeys.DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_BUFFER_SIZE_KEY,
         DFSConfigKeys.DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_BUFFER_SIZE_KEY,
         cellSize);
         cellSize);
@@ -649,7 +650,7 @@ public class TestReconstructStripedFile {
 
 
     LocatedBlocks locatedBlocks =
     LocatedBlocks locatedBlocks =
         StripedFileTestUtil.getLocatedBlocks(file, fs);
         StripedFileTestUtil.getLocatedBlocks(file, fs);
-    Assertions.assertEquals(1, locatedBlocks.getLocatedBlocks().size());
+    assertEquals(1, locatedBlocks.getLocatedBlocks().size());
     // The file only has one block group
     // The file only has one block group
     LocatedBlock lblock = locatedBlocks.get(0);
     LocatedBlock lblock = locatedBlocks.get(0);
     DatanodeInfo[] datanodeinfos = lblock.getLocations();
     DatanodeInfo[] datanodeinfos = lblock.getLocations();
@@ -661,7 +662,7 @@ public class TestReconstructStripedFile {
         DFSConfigKeys.DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_TIMEOUT_MILLIS_KEY,
         DFSConfigKeys.DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_TIMEOUT_MILLIS_KEY,
         DFSConfigKeys.
         DFSConfigKeys.
             DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_TIMEOUT_MILLIS_DEFAULT);
             DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_TIMEOUT_MILLIS_DEFAULT);
-    Assertions.assertTrue(
+    assertTrue(
         stripedReadTimeoutInMills > 2000,
         stripedReadTimeoutInMills > 2000,
         DFSConfigKeys.DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_TIMEOUT_MILLIS_KEY
         DFSConfigKeys.DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_TIMEOUT_MILLIS_KEY
             + " must be greater than 2000");
             + " must be greater than 2000");
@@ -684,7 +685,7 @@ public class TestReconstructStripedFile {
                 stripedReadTimeoutInMills * 3
                 stripedReadTimeoutInMills * 3
             );
             );
           } catch (TimeoutException e) {
           } catch (TimeoutException e) {
-            Assertions.fail("Can't reconstruct the file's first part.");
+            fail("Can't reconstruct the file's first part.");
           } catch (InterruptedException e) {
           } catch (InterruptedException e) {
           }
           }
         }
         }
@@ -720,8 +721,8 @@ public class TestReconstructStripedFile {
   @Test
   @Test
   @Timeout(value = 120)
   @Timeout(value = 120)
   public void testAbnormallyCloseDoesNotWriteBufferAgain() throws Exception {
   public void testAbnormallyCloseDoesNotWriteBufferAgain() throws Exception {
-    assumeTrue("Ignore case where num parity units <= 1",
-        ecPolicy.getNumParityUnits() > 1);
+    assumeTrue(ecPolicy.getNumParityUnits() > 1,
+        "Ignore case where num parity units <= 1");
     int stripedBufferSize = conf.getInt(
     int stripedBufferSize = conf.getInt(
         DFSConfigKeys.DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_BUFFER_SIZE_KEY,
         DFSConfigKeys.DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_BUFFER_SIZE_KEY,
         cellSize);
         cellSize);
@@ -734,7 +735,7 @@ public class TestReconstructStripedFile {
 
 
     LocatedBlocks locatedBlocks =
     LocatedBlocks locatedBlocks =
         StripedFileTestUtil.getLocatedBlocks(file, fs);
         StripedFileTestUtil.getLocatedBlocks(file, fs);
-    Assertions.assertEquals(1, locatedBlocks.getLocatedBlocks().size());
+    assertEquals(1, locatedBlocks.getLocatedBlocks().size());
     // The file only has one block group
     // The file only has one block group
     LocatedBlock lblock = locatedBlocks.get(0);
     LocatedBlock lblock = locatedBlocks.get(0);
     DatanodeInfo[] datanodeinfos = lblock.getLocations();
     DatanodeInfo[] datanodeinfos = lblock.getLocations();
@@ -746,7 +747,7 @@ public class TestReconstructStripedFile {
         DFSConfigKeys.DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_TIMEOUT_MILLIS_KEY,
         DFSConfigKeys.DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_TIMEOUT_MILLIS_KEY,
         DFSConfigKeys.
         DFSConfigKeys.
             DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_TIMEOUT_MILLIS_DEFAULT);
             DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_TIMEOUT_MILLIS_DEFAULT);
-    Assertions.assertTrue(
+    assertTrue(
         stripedReadTimeoutInMills > 2000,
         stripedReadTimeoutInMills > 2000,
         DFSConfigKeys.DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_TIMEOUT_MILLIS_KEY
         DFSConfigKeys.DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_TIMEOUT_MILLIS_KEY
             + " must be greater than 2000");
             + " must be greater than 2000");
@@ -778,7 +779,7 @@ public class TestReconstructStripedFile {
                 stripedReadTimeoutInMills * 3
                 stripedReadTimeoutInMills * 3
             );
             );
           } catch (TimeoutException e) {
           } catch (TimeoutException e) {
-            Assertions.fail("Can't reconstruct the file's first part.");
+            fail("Can't reconstruct the file's first part.");
           } catch (InterruptedException e) {
           } catch (InterruptedException e) {
           }
           }
         }
         }
@@ -793,7 +794,7 @@ public class TestReconstructStripedFile {
                 stripedReadTimeoutInMills * 3
                 stripedReadTimeoutInMills * 3
             );
             );
           } catch (TimeoutException e) {
           } catch (TimeoutException e) {
-            Assertions.fail("Can't reconstruct the file's remaining part.");
+            fail("Can't reconstruct the file's remaining part.");
           } catch (InterruptedException e) {
           } catch (InterruptedException e) {
           }
           }
         }
         }
@@ -819,7 +820,7 @@ public class TestReconstructStripedFile {
                 stripedReadTimeoutInMills * 3
                 stripedReadTimeoutInMills * 3
             );
             );
           } catch (TimeoutException e) {
           } catch (TimeoutException e) {
-            Assertions.fail("Can't finish the file's reconstruction.");
+            fail("Can't finish the file's reconstruction.");
           } catch (InterruptedException e) {
           } catch (InterruptedException e) {
           }
           }
         }
         }
@@ -847,7 +848,7 @@ public class TestReconstructStripedFile {
     while (bufferPool.size(direct) != 0) {
     while (bufferPool.size(direct) != 0) {
       // iterate all ByteBuffers in ElasticByteBufferPool
       // iterate all ByteBuffers in ElasticByteBufferPool
       ByteBuffer byteBuffer =  bufferPool.getBuffer(direct, 0);
       ByteBuffer byteBuffer =  bufferPool.getBuffer(direct, 0);
-      Assertions.assertEquals(0, byteBuffer.position());
+      assertEquals(0, byteBuffer.position());
     }
     }
   }
   }