Browse Source

HDFS-12431. [JDK17] Upgrade JUnit from 4 to 5 in hadoop-hdfs Part4. (#7647)

* HDFS-12431. [JDK17] Upgrade JUnit from 4 to 5 in hadoop-hdfs Part4.

Co-authored-by: Chris Nauroth <cnauroth@apache.org>
Co-authored-by: Shilun Fan <slfan1989@apache.org>
Reviewed-by: Chris Nauroth <cnauroth@apache.org>
Reviewed-by: Shilun Fan <slfan1989@apache.org>
Signed-off-by: Shilun Fan <slfan1989@apache.org>
zhtttylz 1 month ago
parent
commit
1ecf53d4f0
48 changed files with 1436 additions and 1293 deletions
  1. 8 7
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestAdminHelper.java
  2. 140 143
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
  3. 159 114
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java
  4. 6 6
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java
  5. 15 16
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java
  6. 18 12
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDebugAdmin.java
  7. 38 34
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDelegationTokenFetcher.java
  8. 81 83
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestECAdmin.java
  9. 46 32
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestGetConf.java
  10. 5 5
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicyCommands.java
  11. 10 7
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicySatisfyAdminCommands.java
  12. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestViewFSStoragePolicyCommands.java
  13. 10 13
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestViewFileSystemOverloadSchemeWithDFSAdmin.java
  14. 8 9
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestViewFileSystemOverloadSchemeWithFSCommands.java
  15. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestWebHDFSStoragePolicyCommands.java
  16. 42 50
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java
  17. 57 53
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
  18. 6 6
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerForAcl.java
  19. 6 6
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerForContentSummary.java
  20. 6 6
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerForErasureCodingPolicy.java
  21. 6 6
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerForStoragePolicy.java
  22. 13 15
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerForXAttr.java
  23. 29 21
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerWithStripedBlocks.java
  24. 7 4
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestPBImageCorruption.java
  25. 19 24
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestAtomicFileOutputStream.java
  26. 5 4
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestBestEffortLongFile.java
  27. 16 13
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestCombinedHostsFileReader.java
  28. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestCyclicIteration.java
  29. 12 8
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestDiff.java
  30. 9 9
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestLightWeightHashSet.java
  31. 16 11
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestLightWeightLinkedSet.java
  32. 11 8
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestMD5FileUtils.java
  33. 17 16
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestReferenceCountMap.java
  34. 8 13
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestStripedBlockUtil.java
  35. 8 6
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestXMLUtils.java
  36. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestAuthFilter.java
  37. 10 8
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestHttpsFileSystem.java
  38. 44 40
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java
  39. 4 4
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtilClient.java
  40. 185 190
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
  41. 14 10
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSForHA.java
  42. 56 33
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java
  43. 76 63
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTokens.java
  44. 42 35
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java
  45. 8 7
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsWithAuthenticationFilter.java
  46. 12 11
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsWithMultipleNameNodes.java
  47. 60 46
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsWithRestCsrfPreventionFilter.java
  48. 77 75
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java

+ 8 - 7
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestAdminHelper.java

@@ -17,8 +17,10 @@
  */
 package org.apache.hadoop.hdfs.tools;
 
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
+
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 /**
  * Test class to test Admin Helper.
@@ -28,11 +30,10 @@ public class TestAdminHelper {
   @Test
   public void prettifyExceptionWithNpe() {
     String pretty = AdminHelper.prettifyException(new NullPointerException());
-    Assert.assertTrue(
+    assertTrue(pretty.startsWith("NullPointerException at org.apache.hadoop.hdfs.tools"
+            + ".TestAdminHelper.prettifyExceptionWithNpe"),
         "Prettified exception message doesn't contain the required exception "
-            + "message",
-        pretty.startsWith("NullPointerException at org.apache.hadoop.hdfs.tools"
-            + ".TestAdminHelper.prettifyExceptionWithNpe"));
+            + "message");
   }
 
   @Test
@@ -42,7 +43,7 @@ public class TestAdminHelper {
         new IllegalArgumentException("Something is wrong",
             new IllegalArgumentException("Something is illegal")));
 
-    Assert.assertEquals(
+    assertEquals(
         "IllegalArgumentException: Something is wrong",
         pretty);
 

+ 140 - 143
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java

@@ -93,28 +93,25 @@ import org.apache.hadoop.test.PathUtils;
 import org.apache.hadoop.util.Lists;
 import org.apache.hadoop.util.ToolRunner;
 
-import org.assertj.core.api.Assertions;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.Assert;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.MethodOrderer;
+import org.junit.jupiter.api.Order;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.TestMethodOrder;
+import org.junit.jupiter.api.Timeout;
 import org.mockito.Mockito;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LOCK_DETAILED_METRICS_KEY;
 import static org.apache.hadoop.hdfs.client.HdfsAdmin.TRASH_PERMISSION;
-import static org.hamcrest.CoreMatchers.allOf;
-import static org.hamcrest.CoreMatchers.anyOf;
-import static org.hamcrest.CoreMatchers.is;
-import static org.hamcrest.CoreMatchers.not;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotEquals;
-import static org.junit.Assert.assertThat;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-import static org.hamcrest.CoreMatchers.containsString;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertNotEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
 import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
@@ -122,6 +119,7 @@ import static org.mockito.Mockito.when;
 /**
  * set/clrSpaceQuote are tested in {@link org.apache.hadoop.hdfs.TestQuota}.
  */
+@TestMethodOrder(MethodOrderer.OrderAnnotation.class)
 public class TestDFSAdmin {
   private static final Logger LOG = LoggerFactory.getLogger(TestDFSAdmin.class);
   private Configuration conf = null;
@@ -136,7 +134,7 @@ public class TestDFSAdmin {
   private String tempResource = null;
   private static final int NUM_DATANODES = 2;
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     conf = new Configuration();
     conf.setInt(IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 3);
@@ -160,7 +158,7 @@ public class TestDFSAdmin {
     err.reset();
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     try {
       System.out.flush();
@@ -240,7 +238,8 @@ public class TestDFSAdmin {
     scanner.close();
   }
 
-  @Test(timeout = 30000)
+  @Test
+  @Timeout(value = 30)
   public void testGetDatanodeInfo() throws Exception {
     redirectStream();
     final DFSAdmin dfsAdmin = new DFSAdmin(conf);
@@ -260,18 +259,18 @@ public class TestDFSAdmin {
       final List<String> outs = Lists.newArrayList();
       scanIntoList(out, outs);
       /* verify results */
-      assertEquals(
+      assertEquals(1, outs.size(),
           "One line per DataNode like: Uptime: XXX, Software version: x.y.z,"
-              + " Config version: core-x.y.z,hdfs-x",
-          1, outs.size());
-      assertThat(outs.get(0),
-          is(allOf(containsString("Uptime:"),
-              containsString("Software version"),
-              containsString("Config version"))));
+              + " Config version: core-x.y.z,hdfs-x");
+      assertThat(outs.get(0))
+          .contains("Uptime:")
+          .contains("Software version")
+          .contains("Config version");
     }
   }
 
-  @Test(timeout = 30000)
+  @Test
+  @Timeout(value = 30)
   public void testTriggerBlockReport() throws Exception {
     redirectStream();
     final DFSAdmin dfsAdmin = new DFSAdmin(conf);
@@ -291,12 +290,13 @@ public class TestDFSAdmin {
 
     scanIntoList(out, outs);
     assertEquals(1, outs.size());
-    assertThat(outs.get(0),
-        is(allOf(containsString("Triggering an incremental block report on "),
-            containsString(" to namenode "))));
+    assertThat(outs.get(0)).
+        contains("Triggering an incremental block report on ").
+        contains(" to namenode ");
   }
 
-  @Test(timeout = 30000)
+  @Test
+  @Timeout(value = 30)
   public void testGetVolumeReport() throws Exception {
     redirectStream();
     final DFSAdmin dfsAdmin = new DFSAdmin(conf);
@@ -320,7 +320,8 @@ public class TestDFSAdmin {
    * Test that if datanode is not reachable, some DFSAdmin commands will fail
    * elegantly with non-zero ret error code along with exception error message.
    */
-  @Test(timeout = 60000)
+  @Test
+  @Timeout(value = 60)
   public void testDFSAdminUnreachableDatanode() throws Exception {
     redirectStream();
     final DFSAdmin dfsAdmin = new DFSAdmin(conf);
@@ -337,13 +338,14 @@ public class TestDFSAdmin {
       assertEquals(-1, ret);
 
       scanIntoList(out, outs);
-      assertTrue("Unexpected " + command + " stdout: " + out, outs.isEmpty());
-      assertTrue("Unexpected " + command + " stderr: " + err,
-          err.toString().contains("Exception"));
+      assertTrue(outs.isEmpty(), "Unexpected " + command + " stdout: " + out);
+      assertTrue(err.toString().contains("Exception"),
+          "Unexpected " + command + " stderr: " + err);
     }
   }
 
-  @Test(timeout = 30000)
+  @Test
+  @Timeout(value = 30)
   public void testDataNodeGetReconfigurableProperties() throws IOException, InterruptedException {
     final int port = datanode.getIpcPort();
     final String address = "localhost:" + port;
@@ -386,23 +388,23 @@ public class TestDFSAdmin {
     final int port = datanode.getIpcPort();
     final String address = "localhost:" + port;
 
-    assertThat(admin.startReconfiguration("datanode", address), is(0));
+    assertThat(admin.startReconfiguration("datanode", address)).isEqualTo(0);
 
     final List<String> outs = Lists.newArrayList();
     final List<String> errs = Lists.newArrayList();
     awaitReconfigurationFinished("datanode", address, outs, errs);
 
     if (expectedSuccuss) {
-      assertThat(outs.size(), is(4));
+      assertThat(outs.size()).isEqualTo(4);
     } else {
-      assertThat(outs.size(), is(6));
+      assertThat(outs.size()).isEqualTo(6);
     }
 
     List<StorageLocation> locations = DataNode.getStorageLocations(
         datanode.getConf());
     if (expectedSuccuss) {
-      assertThat(locations.size(), is(1));
-      assertThat(new File(locations.get(0).getUri()), is(newDir));
+      assertThat(locations.size()).isEqualTo(1);
+      assertThat(new File(locations.get(0).getUri())).isEqualTo(newDir);
       // Verify the directory is appropriately formatted.
       assertTrue(new File(newDir, Storage.STORAGE_DIR_CURRENT).isDirectory());
     } else {
@@ -411,27 +413,28 @@ public class TestDFSAdmin {
 
     int offset = 1;
     if (expectedSuccuss) {
-      assertThat(outs.get(offset),
-          containsString("SUCCESS: Changed property " +
-              DFS_DATANODE_DATA_DIR_KEY));
+      assertThat(outs.get(offset)).
+          contains("SUCCESS: Changed property " +
+              DFS_DATANODE_DATA_DIR_KEY);
     } else {
-      assertThat(outs.get(offset),
-          containsString("FAILED: Change property " +
-              DFS_DATANODE_DATA_DIR_KEY));
+      assertThat(outs.get(offset)).
+          contains("FAILED: Change property " +
+              DFS_DATANODE_DATA_DIR_KEY);
     }
     File dnDir0 = cluster.getInstanceStorageDir(0, 0);
     File dnDir1 = cluster.getInstanceStorageDir(0, 1);
-    assertThat(outs.get(offset + 1), is(allOf(containsString("From:"),
-                containsString(dnDir0.getName()),
-                containsString(dnDir1.getName()))));
-    assertThat(outs.get(offset + 2),
-        is(not(anyOf(containsString(dnDir0.getName()),
-            containsString(dnDir1.getName())))));
-    assertThat(outs.get(offset + 2),
-        is(allOf(containsString("To"), containsString("data_new"))));
+    assertThat(outs.get(offset + 1)).
+        contains("From:").
+        contains(dnDir0.getName())
+        .contains(dnDir1.getName());
+    assertThat(outs.get(offset + 2))
+        .doesNotContain(dnDir0.getName())
+        .doesNotContain(dnDir1.getName());
+    assertThat(outs.get(offset + 2)).contains("To").contains("data_new");
   }
 
-  @Test(timeout = 30000)
+  @Test
+  @Timeout(value = 30)
   public void testDataNodeGetReconfigurationStatus() throws IOException,
       InterruptedException, TimeoutException {
     testDataNodeGetReconfigurationStatus(true);
@@ -439,7 +442,8 @@ public class TestDFSAdmin {
     testDataNodeGetReconfigurationStatus(false);
   }
 
-  @Test(timeout = 30000)
+  @Test
+  @Timeout(value = 30)
   public void testNameNodeGetReconfigurableProperties() throws IOException, InterruptedException {
     final String address = namenode.getHostAndPort();
     final List<String> outs = Lists.newArrayList();
@@ -485,7 +489,8 @@ public class TestDFSAdmin {
     }, 100, 100 * 100);
   }
 
-  @Test(timeout = 30000)
+  @Test
+  @Timeout(value = 30)
   public void testPrintTopology() throws Exception {
     redirectStream();
 
@@ -518,23 +523,23 @@ public class TestDFSAdmin {
 
       /* verify results */
       assertEquals(0, ret);
-      assertEquals(
+      assertEquals(12, outs.size(),
           "There should be three lines per Datanode: the 1st line is"
               + " rack info, 2nd node info, 3rd empty line. The total"
-              + " should be as a result of 3 * numDn.",
-          12, outs.size());
-      assertThat(outs.get(0),
-          is(allOf(containsString("Rack:"), containsString("/d1/r1"))));
-      assertThat(outs.get(3),
-          is(allOf(containsString("Rack:"), containsString("/d1/r2"))));
-      assertThat(outs.get(6),
-          is(allOf(containsString("Rack:"), containsString("/d2/r1"))));
-      assertThat(outs.get(9),
-          is(allOf(containsString("Rack:"), containsString("/d2/r2"))));
+              + " should be as a result of 3 * numDn.");
+      assertThat(outs.get(0)).
+          contains("Rack:").contains("/d1/r1");
+      assertThat(outs.get(3)).
+          contains("Rack:").contains("/d1/r2");
+      assertThat(outs.get(6)).
+          contains("Rack:").contains("/d2/r1");
+      assertThat(outs.get(9)).
+          contains("Rack:").contains("/d2/r2");
     }
   }
 
-  @Test(timeout = 30000)
+  @Test
+  @Timeout(value = 30)
   public void testPrintTopologyWithStatus() throws Exception {
     redirectStream();
     final Configuration dfsConf = new HdfsConfiguration();
@@ -580,7 +585,8 @@ public class TestDFSAdmin {
     }
   }
 
-  @Test(timeout = 30000)
+  @Test
+  @Timeout(value = 30)
   public void testNameNodeGetReconfigurationStatus() throws IOException,
       InterruptedException, TimeoutException {
     ReconfigurationUtil ru = mock(ReconfigurationUtil.class);
@@ -596,35 +602,28 @@ public class TestDFSAdmin {
         "randomKey", "new123", "old456"));
     when(ru.parseChangedProperties(any(Configuration.class),
         any(Configuration.class))).thenReturn(changes);
-    assertThat(admin.startReconfiguration("namenode", address), is(0));
+    assertThat(admin.startReconfiguration("namenode", address)).isEqualTo(0);
 
     final List<String> outs = Lists.newArrayList();
     final List<String> errs = Lists.newArrayList();
     awaitReconfigurationFinished("namenode", address, outs, errs);
 
     // verify change
-    assertEquals(
-        DFS_HEARTBEAT_INTERVAL_KEY + " has wrong value",
-        6,
-        namenode
-          .getConf()
-          .getLong(DFS_HEARTBEAT_INTERVAL_KEY,
-                DFS_HEARTBEAT_INTERVAL_DEFAULT));
-    assertEquals(DFS_HEARTBEAT_INTERVAL_KEY + " has wrong value",
-        6,
-        namenode
-          .getNamesystem()
-          .getBlockManager()
-          .getDatanodeManager()
-          .getHeartbeatInterval());
+    assertEquals(6, namenode
+        .getConf()
+        .getLong(DFS_HEARTBEAT_INTERVAL_KEY,
+            DFS_HEARTBEAT_INTERVAL_DEFAULT), DFS_HEARTBEAT_INTERVAL_KEY + " has wrong value");
+    assertEquals(6, namenode
+        .getNamesystem()
+        .getBlockManager()
+        .getDatanodeManager()
+        .getHeartbeatInterval(), DFS_HEARTBEAT_INTERVAL_KEY + " has wrong value");
 
     int offset = 1;
-    assertThat(outs.get(offset), containsString("SUCCESS: Changed property "
-        + DFS_HEARTBEAT_INTERVAL_KEY));
-    assertThat(outs.get(offset + 1),
-        is(allOf(containsString("From:"), containsString("3"))));
-    assertThat(outs.get(offset + 2),
-        is(allOf(containsString("To:"), containsString("6"))));
+    assertThat(outs.get(offset)).contains("SUCCESS: Changed property "
+        + DFS_HEARTBEAT_INTERVAL_KEY);
+    assertThat(outs.get(offset + 1)).contains("From:").contains("3");
+    assertThat(outs.get(offset + 2)).contains("To:").contains("6");
   }
 
   private static String scanIntoString(final ByteArrayOutputStream baos) {
@@ -657,7 +656,8 @@ public class TestDFSAdmin {
     }, 1000, 60000);
   }
 
-  @Test(timeout = 180000)
+  @Test
+  @Timeout(value = 180)
   public void testReportCommand() throws Exception {
     tearDown();
     redirectStream();
@@ -702,8 +702,7 @@ public class TestDFSAdmin {
       LocatedBlocks lbs = miniCluster.getFileSystem().getClient().
           getNamenode().getBlockLocations(
           file.toString(), 0, fileLength);
-      assertTrue("Unexpected block type: " + lbs.get(0),
-          lbs.get(0) instanceof LocatedBlock);
+      assertTrue(lbs.get(0) instanceof LocatedBlock, "Unexpected block type: " + lbs.get(0));
       LocatedBlock locatedBlock = lbs.get(0);
       DatanodeInfo locatedDataNode = locatedBlock.getLocations()[0];
       LOG.info("Replica block located on: " + locatedDataNode);
@@ -725,7 +724,6 @@ public class TestDFSAdmin {
       resetStream();
       assertEquals(0, ToolRunner.run(dfsAdmin, new String[] {"-report"}));
       verifyNodesAndCorruptBlocks(numDn, numDn, 0, 0, client, 0L, 0L);
-
       // Choose a DataNode to shutdown
       final List<DataNode> datanodes = miniCluster.getDataNodes();
       DataNode dataNodeToShutdown = null;
@@ -736,9 +734,7 @@ public class TestDFSAdmin {
           break;
         }
       }
-      assertTrue("Unable to choose a DataNode to shutdown!",
-          dataNodeToShutdown != null);
-
+      assertTrue(dataNodeToShutdown != null, "Unable to choose a DataNode to shutdown!");
       // Shut down the DataNode not hosting the replicated block
       LOG.info("Shutting down: " + dataNodeToShutdown);
       dataNodeToShutdown.shutdown();
@@ -751,8 +747,8 @@ public class TestDFSAdmin {
       // Corrupt the replicated block
       final int blockFilesCorrupted = miniCluster
           .corruptBlockOnDataNodes(block);
-      assertEquals("Fail to corrupt all replicas for block " + block,
-          replFactor, blockFilesCorrupted);
+      assertEquals(replFactor, blockFilesCorrupted,
+          "Fail to corrupt all replicas for block " + block);
 
       try {
         IOUtils.copyBytes(fs.open(file), new IOUtils.NullOutputStream(),
@@ -779,8 +775,7 @@ public class TestDFSAdmin {
       lbs = miniCluster.getFileSystem().getClient().
           getNamenode().getBlockLocations(
           ecFile.toString(), 0, blockGroupSize);
-      assertTrue("Unexpected block type: " + lbs.get(0),
-          lbs.get(0) instanceof LocatedStripedBlock);
+      assertTrue(lbs.get(0) instanceof LocatedStripedBlock, "Unexpected block type: " + lbs.get(0));
       LocatedStripedBlock bg =
           (LocatedStripedBlock)(lbs.get(0));
 
@@ -811,7 +806,8 @@ public class TestDFSAdmin {
     }
   }
 
-  @Test(timeout = 300000L)
+  @Test
+  @Timeout(300)
   public void testListOpenFiles() throws Exception {
     redirectStream();
 
@@ -923,7 +919,7 @@ public class TestDFSAdmin {
           new String[] {"-listOpenFiles", "-path", "/invalid_path"}));
       outStr = scanIntoString(out);
       for (Path openFilePath : openFilesMap.keySet()) {
-        assertThat(outStr, not(containsString(openFilePath.toString())));
+        assertThat(outStr).doesNotContain(openFilePath.toString());
       }
       DFSTestUtil.closeOpenFiles(openFilesMap, openFilesMap.size());
     }
@@ -935,15 +931,14 @@ public class TestDFSAdmin {
     LOG.info("dfsadmin -listOpenFiles output: \n" + out);
     if (closedFileSet != null) {
       for (Path closedFilePath : closedFileSet) {
-        assertThat(outStr,
-            not(containsString(closedFilePath.toString() +
-                System.lineSeparator())));
+        assertThat(outStr).doesNotContain(closedFilePath.toString() +
+            System.lineSeparator());
       }
     }
 
     for (Path openFilePath : openFilesMap.keySet()) {
-      assertThat(outStr, is(containsString(openFilePath.toString() +
-          System.lineSeparator())));
+      assertThat(outStr).contains(openFilePath.toString() +
+          System.lineSeparator());
     }
   }
 
@@ -979,12 +974,12 @@ public class TestDFSAdmin {
         highestPriorityLowRedundancyReplicatedBlocks);
 
     // verify nodes and corrupt blocks
-    assertThat(outStr, is(allOf(
-        containsString(expectedLiveNodesStr),
-        containsString(expectedCorruptedBlocksStr),
-        containsString(expectedCorruptedECBlockGroupsStr),
-        containsString(highestPriorityLowRedundancyReplicatedBlocksStr),
-        containsString(highestPriorityLowRedundancyECBlocksStr))));
+    assertThat(outStr).
+        contains(expectedLiveNodesStr).
+        contains(expectedCorruptedBlocksStr).
+        contains(expectedCorruptedECBlockGroupsStr).
+        contains(highestPriorityLowRedundancyReplicatedBlocksStr).
+        contains(highestPriorityLowRedundancyECBlocksStr);
 
     assertEquals(
         numDn,
@@ -1117,23 +1112,23 @@ public class TestDFSAdmin {
     assertEquals(0, ToolRunner.run(dfsAdmin,
         new String[]{"-setBalancerBandwidth", "10000"}));
     outStr = scanIntoString(out);
-    assertTrue("Did not set bandwidth!", outStr.contains("Balancer " +
-        "bandwidth is set to 10000"));
+    assertTrue(outStr.contains("Balancer " +
+        "bandwidth is set to 10000"), "Did not set bandwidth!");
 
     // Test parsing with units
     resetStream();
     assertEquals(0, ToolRunner.run(dfsAdmin,
         new String[]{"-setBalancerBandwidth", "10m"}));
     outStr = scanIntoString(out);
-    assertTrue("Did not set bandwidth!", outStr.contains("Balancer " +
-        "bandwidth is set to 10485760"));
+    assertTrue(outStr.contains("Balancer " +
+        "bandwidth is set to 10485760"), "Did not set bandwidth!");
 
     resetStream();
     assertEquals(0, ToolRunner.run(dfsAdmin,
         new String[]{"-setBalancerBandwidth", "10k"}));
     outStr = scanIntoString(out);
-    assertTrue("Did not set bandwidth!", outStr.contains("Balancer " +
-        "bandwidth is set to 10240"));
+    assertTrue(outStr.contains("Balancer " +
+        "bandwidth is set to 10240"), "Did not set bandwidth!");
 
     // Test negative numbers
     assertEquals(-1, ToolRunner.run(dfsAdmin,
@@ -1142,7 +1137,8 @@ public class TestDFSAdmin {
         new String[]{"-setBalancerBandwidth", "-10m"}));
   }
 
-  @Test(timeout = 300000L)
+  @Test
+  @Timeout(300)
   public void testCheckNumOfBlocksInReportCommand() throws Exception {
     DistributedFileSystem dfs = cluster.getFileSystem();
     Path path = new Path("/tmp.txt");
@@ -1207,9 +1203,9 @@ public class TestDFSAdmin {
         }
       });
     } catch (RemoteException re) {
-      Assert.assertTrue(re.unwrapRemoteException()
+      assertTrue(re.unwrapRemoteException()
           instanceof AccessControlException);
-      Assert.assertTrue(re.unwrapRemoteException().getMessage()
+      assertTrue(re.unwrapRemoteException().getMessage()
           .equals("User: " + realUser +
               " is not allowed to impersonate " + proxyUser));
     }
@@ -1239,6 +1235,7 @@ public class TestDFSAdmin {
   }
 
   @Test
+  @Order(1)
   public void testAllDatanodesReconfig()
       throws IOException, InterruptedException, TimeoutException {
     ReconfigurationUtil reconfigurationUtil = mock(ReconfigurationUtil.class);
@@ -1253,7 +1250,7 @@ public class TestDFSAdmin {
         any(Configuration.class))).thenReturn(changes);
 
     int result = admin.startReconfiguration("datanode", "livenodes");
-    Assertions.assertThat(result).isEqualTo(0);
+    assertThat(result).isEqualTo(0);
     final List<String> outsForStartReconf = new ArrayList<>();
     final List<String> errsForStartReconf = new ArrayList<>();
     reconfigurationOutErrFormatter("startReconfiguration", "datanode",
@@ -1261,21 +1258,21 @@ public class TestDFSAdmin {
     String started = "Started reconfiguration task on node";
     String starting =
         "Starting of reconfiguration task successful on 2 nodes, failed on 0 nodes.";
-    Assertions.assertThat(outsForStartReconf).hasSize(3);
-    Assertions.assertThat(errsForStartReconf).hasSize(0);
-    Assertions.assertThat(outsForStartReconf.get(0)).startsWith(started);
-    Assertions.assertThat(outsForStartReconf.get(1)).startsWith(started);
-    Assertions.assertThat(outsForStartReconf.get(2)).startsWith(starting);
+    assertThat(outsForStartReconf).hasSize(3);
+    assertThat(errsForStartReconf).hasSize(0);
+    assertThat(outsForStartReconf.get(0)).startsWith(started);
+    assertThat(outsForStartReconf.get(1)).startsWith(started);
+    assertThat(outsForStartReconf.get(2)).startsWith(starting);
 
     Thread.sleep(1000);
     final List<String> outs = new ArrayList<>();
     final List<String> errs = new ArrayList<>();
     awaitReconfigurationFinished("datanode", "livenodes", outs, errs);
-    Assertions.assertThat(outs).hasSize(9);
-    Assertions.assertThat(errs).hasSize(0);
+    assertThat(outs).hasSize(9);
+    assertThat(errs).hasSize(0);
     LOG.info("dfsadmin -status -livenodes output:");
     outs.forEach(s -> LOG.info("{}", s));
-    Assertions.assertThat(outs.get(0)).startsWith("Reconfiguring status for node");
+    assertThat(outs.get(0)).startsWith("Reconfiguring status for node");
 
     String success = "SUCCESS: Changed property dfs.datanode.peer.stats.enabled";
     String from = "\tFrom: \"false\"";
@@ -1283,8 +1280,8 @@ public class TestDFSAdmin {
     String retrieval =
         "Retrieval of reconfiguration status successful on 2 nodes, failed on 0 nodes.";
 
-    Assertions.assertThat(outs.subList(1, 5)).containsSubsequence(success, from, to);
-    Assertions.assertThat(outs.subList(5, 9)).containsSubsequence(success, from, to, retrieval);
+    assertThat(outs.subList(1, 5)).containsSubsequence(success, from, to);
+    assertThat(outs.subList(5, 9)).containsSubsequence(success, from, to, retrieval);
   }
 
   @Test
@@ -1324,11 +1321,11 @@ public class TestDFSAdmin {
       String started = "Started reconfiguration task on node";
       String starting =
           "Starting of reconfiguration task successful on 2 nodes, failed on 0 nodes.";
-      Assertions.assertThat(outsForStartReconf).hasSize(3);
-      Assertions.assertThat(errsForStartReconf).hasSize(0);
-      Assertions.assertThat(outsForStartReconf.get(0)).startsWith(started);
-      Assertions.assertThat(outsForStartReconf.get(1)).startsWith(started);
-      Assertions.assertThat(outsForStartReconf.get(2)).startsWith(starting);
+      assertThat(outsForStartReconf).hasSize(3);
+      assertThat(errsForStartReconf).hasSize(0);
+      assertThat(outsForStartReconf.get(0)).startsWith(started);
+      assertThat(outsForStartReconf.get(1)).startsWith(started);
+      assertThat(outsForStartReconf.get(2)).startsWith(starting);
 
       // verify getReconfigurationStatus results is as expected
       Thread.sleep(1000);
@@ -1344,9 +1341,9 @@ public class TestDFSAdmin {
       String retrieval =
           "Retrieval of reconfiguration status successful on 2 nodes, failed on 0 nodes.";
 
-      Assertions.assertThat(outsForFinishReconf.subList(1, 5)).
+      assertThat(outsForFinishReconf.subList(1, 5)).
           containsSubsequence(success, from, to);
-      Assertions.assertThat(outsForFinishReconf.subList(5, 9)).
+      assertThat(outsForFinishReconf.subList(5, 9)).
           containsSubsequence(success, from, to, retrieval);
 
       // verify refreshed decommissioningNode is as expected

+ 159 - 114
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java

@@ -34,13 +34,14 @@ import org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.namenode.ha.BootstrapStandby;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
 
 public class TestDFSAdminWithHA {
 
@@ -125,7 +126,7 @@ public class TestDFSAdminWithHA {
     conf.setInt(HdfsClientConfigKeys.Failover.SLEEPTIME_MAX_KEY, 0);
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     try {
       System.out.flush();
@@ -144,91 +145,96 @@ public class TestDFSAdminWithHA {
     err.reset();
   }
 
-  @Test(timeout = 30000)
+  @Test
+  @Timeout(value = 30)
   public void testSetSafeMode() throws Exception {
     setUpHaCluster(false);
     // Enter safemode
     int exitCode = admin.run(new String[] {"-safemode", "enter"});
-    assertEquals(err.toString().trim(), 0, exitCode);
+    assertEquals(0, exitCode, err.toString().trim());
     String message = "Safe mode is ON in.*";
     assertOutputMatches(message + newLine + message + newLine);
 
     // Get safemode
     exitCode = admin.run(new String[] {"-safemode", "get"});
-    assertEquals(err.toString().trim(), 0, exitCode);
+    assertEquals(0, exitCode, err.toString().trim());
     message = "Safe mode is ON in.*";
     assertOutputMatches(message + newLine + message + newLine);
 
     // Leave safemode
     exitCode = admin.run(new String[] {"-safemode", "leave"});
-    assertEquals(err.toString().trim(), 0, exitCode);
+    assertEquals(0, exitCode, err.toString().trim());
     message = "Safe mode is OFF in.*";
     assertOutputMatches(message + newLine + message + newLine);
 
     // Get safemode
     exitCode = admin.run(new String[] {"-safemode", "get"});
-    assertEquals(err.toString().trim(), 0, exitCode);
+    assertEquals(0, exitCode, err.toString().trim());
     message = "Safe mode is OFF in.*";
     assertOutputMatches(message + newLine + message + newLine);
   }
 
-  @Test (timeout = 30000)
+  @Test
+  @Timeout(value = 30)
   public void testSaveNamespace() throws Exception {
     setUpHaCluster(false);
     // Safe mode should be turned ON in order to create namespace image.
     int exitCode = admin.run(new String[] {"-safemode", "enter"});
-    assertEquals(err.toString().trim(), 0, exitCode);
+    assertEquals(0, exitCode, err.toString().trim());
     String message = "Safe mode is ON in.*";
     assertOutputMatches(message + newLine + message + newLine);
 
     exitCode = admin.run(new String[] {"-saveNamespace"});
-    assertEquals(err.toString().trim(), 0, exitCode);
+    assertEquals(0, exitCode, err.toString().trim());
     message = "Save namespace successful for.*";
     assertOutputMatches(message + newLine + message + newLine);
   }
 
-  @Test (timeout = 30000)
+  @Test
+  @Timeout(value = 30)
   public void testSaveNamespaceNN1UpNN2Down() throws Exception {
     setUpHaCluster(false);
     // Safe mode should be turned ON in order to create namespace image.
     int exitCode = admin.run(new String[] {"-safemode", "enter"});
-    assertEquals(err.toString().trim(), 0, exitCode);
+    assertEquals(0, exitCode, err.toString().trim());
     String message = "Safe mode is ON in.*";
     assertOutputMatches(message + newLine + message + newLine);
 
     cluster.getDfsCluster().shutdownNameNode(1);
 //
     exitCode = admin.run(new String[] {"-saveNamespace"});
-    assertNotEquals(err.toString().trim(), 0, exitCode);
+    assertNotEquals(0, exitCode, err.toString().trim());
     String outMessage = "Save namespace successful for.*" + newLine;
     String errMessage = "Save namespace failed for ([\\s\\S]*)" + newLine;
     assertOutputMatches(outMessage, errMessage);
   }
 
-  @Test (timeout = 30000)
+  @Test
+  @Timeout(value = 30)
   public void testSaveNamespaceNN1DownNN2Up() throws Exception {
     setUpHaCluster(false);
     // Safe mode should be turned ON in order to create namespace image.
     int exitCode = admin.run(new String[] {"-safemode", "enter"});
-    assertEquals(err.toString().trim(), 0, exitCode);
+    assertEquals(0, exitCode, err.toString().trim());
     String message = "Safe mode is ON in.*";
     assertOutputMatches(message + newLine + message + newLine);
 
     cluster.getDfsCluster().shutdownNameNode(0);
 
     exitCode = admin.run(new String[] {"-saveNamespace"});
-    assertNotEquals(err.toString().trim(), 0, exitCode);
+    assertNotEquals(0, exitCode, err.toString().trim());
     String errMessage = "Save namespace failed for ([\\s\\S]*)" + newLine;
     String outMessage = "Save namespace successful for.*" + newLine;
     assertOutputMatches(outMessage, errMessage);
   }
 
-  @Test (timeout = 30000)
+  @Test
+  @Timeout(value = 30)
   public void testSaveNamespaceNN1DownNN2Down() throws Exception {
     setUpHaCluster(false);
     // Safe mode should be turned ON in order to create namespace image.
     int exitCode = admin.run(new String[] {"-safemode", "enter"});
-    assertEquals(err.toString().trim(), 0, exitCode);
+    assertEquals(0, exitCode, err.toString().trim());
     String message = "Safe mode is ON in.*";
     assertOutputMatches(message + newLine + message + newLine);
 
@@ -236,172 +242,183 @@ public class TestDFSAdminWithHA {
     cluster.getDfsCluster().shutdownNameNode(1);
 
     exitCode = admin.run(new String[] {"-saveNamespace"});
-    assertNotEquals(err.toString().trim(), 0, exitCode);
+    assertNotEquals(0, exitCode, err.toString().trim());
     message = "Save namespace failed for ([\\s\\S]*)";
     assertOutputMatches(message + newLine + message + newLine);
   }
 
-  @Test (timeout = 30000)
+  @Test
+  @Timeout(value = 30)
   public void testRestoreFailedStorage() throws Exception {
     setUpHaCluster(false);
     int exitCode = admin.run(new String[] {"-restoreFailedStorage", "check"});
-    assertEquals(err.toString().trim(), 0, exitCode);
+    assertEquals(0, exitCode, err.toString().trim());
     String message = "restoreFailedStorage is set to false for.*";
     // Default is false
     assertOutputMatches(message + newLine + message + newLine);
 
     exitCode = admin.run(new String[] {"-restoreFailedStorage", "true"});
-    assertEquals(err.toString().trim(), 0, exitCode);
+    assertEquals(0, exitCode, err.toString().trim());
     message = "restoreFailedStorage is set to true for.*";
     assertOutputMatches(message + newLine + message + newLine);
 
     exitCode = admin.run(new String[] {"-restoreFailedStorage", "false"});
-    assertEquals(err.toString().trim(), 0, exitCode);
+    assertEquals(0, exitCode, err.toString().trim());
     message = "restoreFailedStorage is set to false for.*";
     assertOutputMatches(message + newLine + message + newLine);
   }
 
-  @Test (timeout = 30000)
+  @Test
+  @Timeout(value = 30)
   public void testRestoreFailedStorageNN1UpNN2Down() throws Exception {
     setUpHaCluster(false);
     cluster.getDfsCluster().shutdownNameNode(1);
     int exitCode = admin.run(new String[] {"-restoreFailedStorage", "check"});
-    assertNotEquals(err.toString().trim(), 0, exitCode);
+    assertNotEquals(0, exitCode, err.toString().trim());
     String outMessage = "restoreFailedStorage is set to false for.*" + newLine;
     String errMessage = "restoreFailedStorage failed for ([\\s\\S]*)" + newLine;
     // Default is false
     assertOutputMatches(outMessage, errMessage);
 
     exitCode = admin.run(new String[] {"-restoreFailedStorage", "true"});
-    assertNotEquals(err.toString().trim(), 0, exitCode);
+    assertNotEquals(0, exitCode, err.toString().trim());
     outMessage = "restoreFailedStorage is set to true for.*" + newLine;
     errMessage = "restoreFailedStorage failed for ([\\s\\S]*)" + newLine;
     assertOutputMatches(outMessage, errMessage);
 
     exitCode = admin.run(new String[] {"-restoreFailedStorage", "false"});
-    assertNotEquals(err.toString().trim(), 0, exitCode);
+    assertNotEquals(0, exitCode, err.toString().trim());
     outMessage = "restoreFailedStorage is set to false for.*" + newLine;
     errMessage = "restoreFailedStorage failed for ([\\s\\S]*)" + newLine;
     assertOutputMatches(outMessage, errMessage);
   }
 
-  @Test (timeout = 30000)
+  @Test
+  @Timeout(value = 30)
   public void testRestoreFailedStorageNN1DownNN2Up() throws Exception {
     setUpHaCluster(false);
     cluster.getDfsCluster().shutdownNameNode(0);
     int exitCode = admin.run(new String[] {"-restoreFailedStorage", "check"});
-    assertNotEquals(err.toString().trim(), 0, exitCode);
+    assertNotEquals(0, exitCode, err.toString().trim());
     String errMessage = "restoreFailedStorage failed for ([\\s\\S]*)" + newLine;
     String outMessage = "restoreFailedStorage is set to false for.*" + newLine;
     // Default is false
     assertOutputMatches(outMessage, errMessage);
 
     exitCode = admin.run(new String[] {"-restoreFailedStorage", "true"});
-    assertNotEquals(err.toString().trim(), 0, exitCode);
+    assertNotEquals(0, exitCode, err.toString().trim());
     errMessage = "restoreFailedStorage failed for ([\\s\\S]*)" + newLine;
     outMessage = "restoreFailedStorage is set to true for.*" + newLine;
     assertOutputMatches(outMessage, errMessage);
 
     exitCode = admin.run(new String[] {"-restoreFailedStorage", "false"});
-    assertNotEquals(err.toString().trim(), 0, exitCode);
+    assertNotEquals(0, exitCode, err.toString().trim());
     errMessage = "restoreFailedStorage failed for ([\\s\\S]*)" + newLine;
     outMessage = "restoreFailedStorage is set to false for.*" + newLine;
     assertOutputMatches(outMessage, errMessage);
   }
 
-  @Test (timeout = 30000)
+  @Test
+  @Timeout(value = 30)
   public void testRestoreFailedStorageNN1DownNN2Down() throws Exception {
     setUpHaCluster(false);
     cluster.getDfsCluster().shutdownNameNode(0);
     cluster.getDfsCluster().shutdownNameNode(1);
     int exitCode = admin.run(new String[] {"-restoreFailedStorage", "check"});
-    assertNotEquals(err.toString().trim(), 0, exitCode);
+    assertNotEquals(0, exitCode, err.toString().trim());
     String message = "restoreFailedStorage failed for ([\\s\\S]*)";
     // Default is false
     assertOutputMatches(message + newLine + message + newLine);
 
     exitCode = admin.run(new String[] {"-restoreFailedStorage", "true"});
-    assertNotEquals(err.toString().trim(), 0, exitCode);
+    assertNotEquals(0, exitCode, err.toString().trim());
     message = "restoreFailedStorage failed for ([\\s\\S]*)";
     assertOutputMatches(message + newLine + message + newLine);
 
     exitCode = admin.run(new String[] {"-restoreFailedStorage", "false"});
-    assertNotEquals(err.toString().trim(), 0, exitCode);
+    assertNotEquals(0, exitCode, err.toString().trim());
     message = "restoreFailedStorage failed for ([\\s\\S]*)";
     assertOutputMatches(message + newLine + message + newLine);
   }
 
-  @Test (timeout = 30000)
+  @Test
+  @Timeout(value = 30)
   public void testRefreshNodes() throws Exception {
     setUpHaCluster(false);
     int exitCode = admin.run(new String[] {"-refreshNodes"});
-    assertEquals(err.toString().trim(), 0, exitCode);
+    assertEquals(0, exitCode, err.toString().trim());
     String message = "Refresh nodes successful for.*";
     assertOutputMatches(message + newLine + message + newLine);
   }
 
-  @Test (timeout = 30000)
+  @Test
+  @Timeout(value = 30)
   public void testRefreshNodesNN1UpNN2Down() throws Exception {
     setUpHaCluster(false);
     cluster.getDfsCluster().shutdownNameNode(1);
     int exitCode = admin.run(new String[] {"-refreshNodes"});
-    assertNotEquals(err.toString().trim(), 0, exitCode);
+    assertNotEquals(0, exitCode, err.toString().trim());
     String outMessage = "Refresh nodes successful for .*" + newLine;
     String errMessage = "Refresh nodes failed for ([\\s\\S]*)" + newLine;
     assertOutputMatches(outMessage, errMessage);
   }
 
-  @Test (timeout = 30000)
+  @Test
+  @Timeout(value = 30)
   public void testRefreshNodesNN1DownNN2Up() throws Exception {
     setUpHaCluster(false);
     cluster.getDfsCluster().shutdownNameNode(0);
     int exitCode = admin.run(new String[] {"-refreshNodes"});
-    assertNotEquals(err.toString().trim(), 0, exitCode);
+    assertNotEquals(0, exitCode, err.toString().trim());
     String errMessage = "Refresh nodes failed for ([\\s\\S]*)" + newLine;
     String outMessage = "Refresh nodes successful for .*" + newLine;
     assertOutputMatches(outMessage, errMessage);
   }
 
-  @Test (timeout = 30000)
+  @Test
+  @Timeout(value = 30)
   public void testRefreshNodesNN1DownNN2Down() throws Exception {
     setUpHaCluster(false);
     cluster.getDfsCluster().shutdownNameNode(0);
     cluster.getDfsCluster().shutdownNameNode(1);
     int exitCode = admin.run(new String[] {"-refreshNodes"});
-    assertNotEquals(err.toString().trim(), 0, exitCode);
+    assertNotEquals(0, exitCode, err.toString().trim());
     String message = "Refresh nodes failed for ([\\s\\S]*)";
     assertOutputMatches(message + newLine + message + newLine);
   }
 
-  @Test (timeout = 30000)
+  @Test
+  @Timeout(value = 30)
   public void testSetBalancerBandwidth() throws Exception {
     setUpHaCluster(false);
     cluster.getDfsCluster().transitionToActive(0);
 
     int exitCode = admin.run(new String[] {"-setBalancerBandwidth", "10"});
-    assertEquals(err.toString().trim(), 0, exitCode);
+    assertEquals(0, exitCode, err.toString().trim());
     String message = "Balancer bandwidth is set to 10";
     assertOutputMatches(message + newLine);
   }
 
-  @Test (timeout = 30000)
+  @Test
+  @Timeout(value = 30)
   public void testSetBalancerBandwidthNN1UpNN2Down() throws Exception {
     setUpHaCluster(false);
     cluster.getDfsCluster().shutdownNameNode(1);
     cluster.getDfsCluster().transitionToActive(0);
     int exitCode = admin.run(new String[] {"-setBalancerBandwidth", "10"});
-    assertEquals(err.toString().trim(), 0, exitCode);
+    assertEquals(0, exitCode, err.toString().trim());
     String message = "Balancer bandwidth is set to 10";
     assertOutputMatches(message + newLine);
   }
 
-  @Test (timeout = 30000)
+  @Test
+  @Timeout(value = 30)
   public void testSetBalancerBandwidthNN1DownNN2Up() throws Exception {
     setUpHaCluster(false);
     cluster.getDfsCluster().shutdownNameNode(0);
     cluster.getDfsCluster().transitionToActive(1);
     int exitCode = admin.run(new String[] {"-setBalancerBandwidth", "10"});
-    assertEquals(err.toString().trim(), 0, exitCode);
+    assertEquals(0, exitCode, err.toString().trim());
     String message = "Balancer bandwidth is set to 10";
     assertOutputMatches(message + newLine);
   }
@@ -412,25 +429,27 @@ public class TestDFSAdminWithHA {
     cluster.getDfsCluster().shutdownNameNode(0);
     cluster.getDfsCluster().shutdownNameNode(1);
     int exitCode = admin.run(new String[] {"-setBalancerBandwidth", "10"});
-    assertNotEquals(err.toString().trim(), 0, exitCode);
+    assertNotEquals(0, exitCode, err.toString().trim());
     String message = "Balancer bandwidth is set failed." + newLine
         + ".*" + newLine;
     assertOutputMatches(message);
   }
 
-  @Test (timeout = 30000)
+  @Test
+  @Timeout(value = 30)
   public void testSetNegativeBalancerBandwidth() throws Exception {
     setUpHaCluster(false);
     int exitCode = admin.run(new String[] {"-setBalancerBandwidth", "-10"});
-    assertEquals("Negative bandwidth value must fail the command", -1, exitCode);
+    assertEquals(-1, exitCode, "Negative bandwidth value must fail the command");
   }
 
-  @Test (timeout = 30000)
+  @Test
+  @Timeout(value = 30)
   public void testMetaSave() throws Exception {
     setUpHaCluster(false);
     cluster.getDfsCluster().transitionToActive(0);
     int exitCode = admin.run(new String[] {"-metasave", "dfs.meta"});
-    assertEquals(err.toString().trim(), 0, exitCode);
+    assertEquals(0, exitCode, err.toString().trim());
     String messageFromActiveNN = "Created metasave file dfs.meta "
         + "in the log directory of namenode.*";
     String messageFromStandbyNN = "Skip Standby NameNode, since it "
@@ -439,13 +458,14 @@ public class TestDFSAdminWithHA {
         messageFromStandbyNN + newLine);
   }
 
-  @Test (timeout = 30000)
+  @Test
+  @Timeout(value = 30)
   public void testMetaSaveNN1UpNN2Down() throws Exception {
     setUpHaCluster(false);
     cluster.getDfsCluster().transitionToActive(0);
     cluster.getDfsCluster().shutdownNameNode(1);
     int exitCode = admin.run(new String[] {"-metasave", "dfs.meta"});
-    assertNotEquals(err.toString().trim(), 0, exitCode);
+    assertNotEquals(0, exitCode, err.toString().trim());
     String outMessage = "Created metasave file dfs.meta in the log " +
             "directory of namenode.*" + newLine;
     String errMessage = "Created metasave file dfs.meta in the log " +
@@ -453,13 +473,14 @@ public class TestDFSAdminWithHA {
     assertOutputMatches(outMessage, errMessage);
   }
 
-  @Test (timeout = 30000)
+  @Test
+  @Timeout(value = 30)
   public void testMetaSaveNN1DownNN2Up() throws Exception {
     setUpHaCluster(false);
     cluster.getDfsCluster().transitionToActive(1);
     cluster.getDfsCluster().shutdownNameNode(0);
     int exitCode = admin.run(new String[] {"-metasave", "dfs.meta"});
-    assertNotEquals(err.toString().trim(), 0, exitCode);
+    assertNotEquals(0, exitCode, err.toString().trim());
     String errMessage = "Created metasave file dfs.meta in the log " +
             "directory of namenode.*failed" + newLine + ".*" + newLine;
     String outMessage = "Created metasave file dfs.meta in the log " +
@@ -467,120 +488,131 @@ public class TestDFSAdminWithHA {
     assertOutputMatches(outMessage, errMessage);
   }
 
-  @Test (timeout = 30000)
+  @Test
+  @Timeout(value = 30)
   public void testMetaSaveNN1DownNN2Down() throws Exception {
     setUpHaCluster(false);
     cluster.getDfsCluster().shutdownNameNode(0);
     cluster.getDfsCluster().shutdownNameNode(1);
     int exitCode = admin.run(new String[] {"-metasave", "dfs.meta"});
-    assertNotEquals(err.toString().trim(), 0, exitCode);
+    assertNotEquals(0, exitCode, err.toString().trim());
     String message = "([\\s\\S]*)2 exceptions([\\s\\S]*)";
     assertOutputMatches(message + newLine);
   }
 
-  @Test (timeout = 30000)
+  @Test
+  @Timeout(value = 30)
   public void testRefreshServiceAcl() throws Exception {
     setUpHaCluster(true);
     int exitCode = admin.run(new String[] {"-refreshServiceAcl"});
-    assertEquals(err.toString().trim(), 0, exitCode);
+    assertEquals(0, exitCode, err.toString().trim());
     String message = "Refresh service acl successful for.*";
     assertOutputMatches(message + newLine + message + newLine);
   }
 
-  @Test (timeout = 30000)
+  @Test
+  @Timeout(value = 30)
   public void testRefreshServiceAclNN1UpNN2Down() throws Exception {
     setUpHaCluster(true);
     cluster.getDfsCluster().shutdownNameNode(1);
     int exitCode = admin.run(new String[] {"-refreshServiceAcl"});
-    assertNotEquals(err.toString().trim(), 0, exitCode);
+    assertNotEquals(0, exitCode, err.toString().trim());
     String outMessage = "Refresh service acl successful for.*" + newLine;
     String errMessage = "Refresh service acl failed for([\\s\\S]*)" + newLine;
     assertOutputMatches(outMessage, errMessage);
   }
 
-  @Test (timeout = 30000)
+  @Test
+  @Timeout(value = 30)
   public void testRefreshServiceAclNN1DownNN2Up() throws Exception {
     setUpHaCluster(true);
     cluster.getDfsCluster().shutdownNameNode(0);
     int exitCode = admin.run(new String[] {"-refreshServiceAcl"});
-    assertNotEquals(err.toString().trim(), 0, exitCode);
+    assertNotEquals(0, exitCode, err.toString().trim());
     String errMessage = "Refresh service acl failed for([\\s\\S]*)" + newLine;
     String outMessage = "Refresh service acl successful for.*" + newLine;
     assertOutputMatches(outMessage, errMessage);
   }
 
-  @Test (timeout = 30000)
+  @Test
+  @Timeout(value = 30)
   public void testRefreshServiceAclNN1DownNN2Down() throws Exception {
     setUpHaCluster(true);
     cluster.getDfsCluster().shutdownNameNode(0);
     cluster.getDfsCluster().shutdownNameNode(1);
     int exitCode = admin.run(new String[] {"-refreshServiceAcl"});
-    assertNotEquals(err.toString().trim(), 0, exitCode);
+    assertNotEquals(0, exitCode, err.toString().trim());
     String message = "([\\s\\S]*)2 exceptions([\\s\\S]*)";
     assertOutputMatches(message + newLine);
   }
 
 
-  @Test (timeout = 30000)
+  @Test
+  @Timeout(value = 30)
   public void testRefreshUserToGroupsMappings() throws Exception {
     setUpHaCluster(false);
     int exitCode = admin.run(new String[] {"-refreshUserToGroupsMappings"});
-    assertEquals(err.toString().trim(), 0, exitCode);
+    assertEquals(0, exitCode, err.toString().trim());
     String message = "Refresh user to groups mapping successful for.*";
     assertOutputMatches(message + newLine + message + newLine);
   }
 
-  @Test (timeout = 30000)
+  @Test
+  @Timeout(value = 30)
   public void testRefreshUserToGroupsMappingsNN1UpNN2Down() throws Exception {
     setUpHaCluster(false);
     cluster.getDfsCluster().shutdownNameNode(1);
     int exitCode = admin.run(new String[] {"-refreshUserToGroupsMappings"});
-    assertNotEquals(err.toString().trim(), 0, exitCode);
+    assertNotEquals(0, exitCode, err.toString().trim());
     String outMessage = "Refresh user to groups mapping successful for.*" + newLine;
     String errMessage = "Refresh user to groups mapping failed for([\\s\\S]*)" + newLine;
     assertOutputMatches(outMessage, errMessage);
   }
 
-  @Test (timeout = 30000)
+  @Test
+  @Timeout(value = 30)
   public void testRefreshUserToGroupsMappingsNN1DownNN2Up() throws Exception {
     setUpHaCluster(false);
     cluster.getDfsCluster().shutdownNameNode(0);
     int exitCode = admin.run(new String[] {"-refreshUserToGroupsMappings"});
-    assertNotEquals(err.toString().trim(), 0, exitCode);
+    assertNotEquals(0, exitCode, err.toString().trim());
     String errMessage = "Refresh user to groups mapping failed for([\\s\\S]*)" + newLine;
     String outMessage = "Refresh user to groups mapping successful for.*" + newLine;
     assertOutputMatches(outMessage, errMessage);
   }
 
-  @Test (timeout = 30000)
+  @Test
+  @Timeout(value = 30)
   public void testRefreshUserToGroupsMappingsNN1DownNN2Down() throws Exception {
     setUpHaCluster(false);
     cluster.getDfsCluster().shutdownNameNode(0);
     cluster.getDfsCluster().shutdownNameNode(1);
     int exitCode = admin.run(new String[] {"-refreshUserToGroupsMappings"});
-    assertNotEquals(err.toString().trim(), 0, exitCode);
+    assertNotEquals(0, exitCode, err.toString().trim());
     String message = "([\\s\\S]*)2 exceptions([\\s\\S]*)";
     assertOutputMatches(message + newLine);
   }
 
-  @Test (timeout = 30000)
+  @Test
+  @Timeout(value = 30)
   public void testRefreshSuperUserGroupsConfiguration() throws Exception {
     setUpHaCluster(false);
     int exitCode = admin.run(
         new String[] {"-refreshSuperUserGroupsConfiguration"});
-    assertEquals(err.toString().trim(), 0, exitCode);
+    assertEquals(0, exitCode, err.toString().trim());
     String message = "Refresh super user groups configuration successful for.*";
     assertOutputMatches(message + newLine + message + newLine);
   }
 
-  @Test (timeout = 30000)
+  @Test
+  @Timeout(value = 30)
   public void testRefreshSuperUserGroupsConfigurationNN1UpNN2Down()
       throws Exception {
     setUpHaCluster(false);
     cluster.getDfsCluster().shutdownNameNode(1);
     int exitCode = admin.run(
         new String[] {"-refreshSuperUserGroupsConfiguration"});
-    assertNotEquals(err.toString().trim(), 0, exitCode);
+    assertNotEquals(0, exitCode, err.toString().trim());
     String outMessage = "Refresh super user groups configuration successful for.*"
             + newLine;
     String errMessage = "Refresh super user groups configuration failed for([\\s\\S]*)"
@@ -588,14 +620,15 @@ public class TestDFSAdminWithHA {
     assertOutputMatches(outMessage, errMessage);
   }
 
-  @Test (timeout = 30000)
+  @Test
+  @Timeout(value = 30)
   public void testRefreshSuperUserGroupsConfigurationNN1DownNN2Up()
       throws Exception {
     setUpHaCluster(false);
     cluster.getDfsCluster().shutdownNameNode(0);
     int exitCode = admin.run(
         new String[] {"-refreshSuperUserGroupsConfiguration"});
-    assertNotEquals(err.toString().trim(), 0, exitCode);
+    assertNotEquals(0, exitCode, err.toString().trim());
     String errMessage = "Refresh super user groups configuration failed for([\\s\\S]*)"
             + newLine;
     String outMessage = "Refresh super user groups configuration successful for.*"
@@ -603,7 +636,8 @@ public class TestDFSAdminWithHA {
     assertOutputMatches(outMessage, errMessage);
   }
 
-  @Test (timeout = 30000)
+  @Test
+  @Timeout(value = 30)
   public void testRefreshSuperUserGroupsConfigurationNN1DownNN2Down()
       throws Exception {
     setUpHaCluster(false);
@@ -611,104 +645,113 @@ public class TestDFSAdminWithHA {
     cluster.getDfsCluster().shutdownNameNode(1);
     int exitCode = admin.run(
         new String[] {"-refreshSuperUserGroupsConfiguration"});
-    assertNotEquals(err.toString().trim(), 0, exitCode);
+    assertNotEquals(0, exitCode, err.toString().trim());
     String message = "([\\s\\S]*)2 exceptions([\\s\\S]*)";
     assertOutputMatches(message + newLine);
   }
 
-  @Test (timeout = 30000)
+  @Test
+  @Timeout(value = 30)
   public void testRefreshCallQueue() throws Exception {
     setUpHaCluster(false);
     int exitCode = admin.run(new String[] {"-refreshCallQueue"});
-    assertEquals(err.toString().trim(), 0, exitCode);
+    assertEquals(0, exitCode, err.toString().trim());
     String message = "Refresh call queue successful for.*";
     assertOutputMatches(message + newLine + message + newLine);
   }
 
-  @Test (timeout = 30000)
+  @Test
+  @Timeout(value = 30)
   public void testRefreshCallQueueNN1UpNN2Down() throws Exception {
     setUpHaCluster(false);
     cluster.getDfsCluster().shutdownNameNode(1);
     int exitCode = admin.run(new String[] {"-refreshCallQueue"});
-    assertNotEquals(err.toString().trim(), 0, exitCode);
+    assertNotEquals(0, exitCode, err.toString().trim());
     String outMessage = "Refresh call queue successful for.*" + newLine;
     String errMessage = "Refresh call queue failed for([\\s\\S]*)" + newLine;
     assertOutputMatches(outMessage, errMessage);
   }
 
-  @Test (timeout = 30000)
+  @Test
+  @Timeout(value = 30)
   public void testRefreshCallQueueNN1DownNN2Up() throws Exception {
     setUpHaCluster(false);
     cluster.getDfsCluster().shutdownNameNode(0);
     int exitCode = admin.run(new String[] {"-refreshCallQueue"});
-    assertNotEquals(err.toString().trim(), 0, exitCode);
+    assertNotEquals(0, exitCode, err.toString().trim());
     String errMessage = "Refresh call queue failed for([\\s\\S]*)" + newLine;
     String outMessage = "Refresh call queue successful for.*" + newLine;
     assertOutputMatches(outMessage, errMessage);
   }
 
-  @Test (timeout = 30000)
+  @Test
+  @Timeout(value = 30)
   public void testRefreshCallQueueNN1DownNN2Down() throws Exception {
     setUpHaCluster(false);
     cluster.getDfsCluster().shutdownNameNode(0);
     cluster.getDfsCluster().shutdownNameNode(1);
     int exitCode = admin.run(new String[] {"-refreshCallQueue"});
-    assertNotEquals(err.toString().trim(), 0, exitCode);
+    assertNotEquals(0, exitCode, err.toString().trim());
     String message = "([\\s\\S]*)2 exceptions([\\s\\S]*)";
     assertOutputMatches(message + newLine);
   }
 
-  @Test (timeout = 30000)
+  @Test
+  @Timeout(value = 30)
   public void testFinalizeUpgrade() throws Exception {
     setUpHaCluster(false);
     int exitCode = admin.run(new String[] {"-finalizeUpgrade"});
-    assertNotEquals(err.toString().trim(), 0, exitCode);
+    assertNotEquals(0, exitCode, err.toString().trim());
     String message = ".*Cannot finalize with no NameNode active";
     assertOutputMatches(message + newLine);
 
     cluster.getDfsCluster().transitionToActive(0);
     exitCode = admin.run(new String[] {"-finalizeUpgrade"});
-    assertEquals(err.toString().trim(), 0, exitCode);
+    assertEquals(0, exitCode, err.toString().trim());
     message = "Finalize upgrade successful for.*";
     assertOutputMatches(message + newLine + message + newLine);
   }
 
-  @Test (timeout = 30000)
+  @Test
+  @Timeout(value = 30)
   public void testFinalizeUpgradeNN1UpNN2Down() throws Exception {
     setUpHaCluster(false);
     cluster.getDfsCluster().shutdownNameNode(1);
     cluster.getDfsCluster().transitionToActive(0);
     int exitCode = admin.run(new String[] {"-finalizeUpgrade"});
-    assertNotEquals(err.toString().trim(), 0, exitCode);
+    assertNotEquals(0, exitCode, err.toString().trim());
     String outMessage = "Finalize upgrade successful for .*" + newLine;
     String errMessage = "Finalize upgrade failed for ([\\s\\S]*)" + newLine;
     assertOutputMatches(outMessage, errMessage);
   }
 
-  @Test (timeout = 30000)
+  @Test
+  @Timeout(value = 30)
   public void testFinalizeUpgradeNN1DownNN2Up() throws Exception {
     setUpHaCluster(false);
     cluster.getDfsCluster().shutdownNameNode(0);
     cluster.getDfsCluster().transitionToActive(1);
     int exitCode = admin.run(new String[] {"-finalizeUpgrade"});
-    assertNotEquals(err.toString().trim(), 0, exitCode);
+    assertNotEquals(0, exitCode, err.toString().trim());
     String errMessage = "Finalize upgrade failed for ([\\s\\S]*)" + newLine;
     String outMessage = "Finalize upgrade successful for .*" + newLine;
     assertOutputMatches(outMessage, errMessage);
   }
 
-  @Test (timeout = 30000)
+  @Test
+  @Timeout(value = 30)
   public void testFinalizeUpgradeNN1DownNN2Down() throws Exception {
     setUpHaCluster(false);
     cluster.getDfsCluster().shutdownNameNode(0);
     cluster.getDfsCluster().shutdownNameNode(1);
     int exitCode = admin.run(new String[] {"-finalizeUpgrade"});
-    assertNotEquals(err.toString().trim(), 0, exitCode);
+    assertNotEquals(0, exitCode, err.toString().trim());
     String message = ".*2 exceptions.*";
     assertOutputMatches(message + newLine);
   }
 
-  @Test (timeout = 300000)
+  @Test
+  @Timeout(value = 300)
   public void testUpgradeCommand() throws Exception {
     final String finalizedMsg = "Upgrade finalized for.*";
     final String notFinalizedMsg = "Upgrade not finalized for.*";
@@ -756,7 +799,7 @@ public class TestDFSAdminWithHA {
 
     // Finalize the upgrade
     int exitCode = admin.run(new String[] {"-upgrade", "finalize"});
-    assertEquals(err.toString().trim(), 0, exitCode);
+    assertEquals(0, exitCode, err.toString().trim());
     message = finalizeSuccessMsg + newLine + finalizeSuccessMsg + newLine;
     assertOutputMatches(message);
 
@@ -768,26 +811,28 @@ public class TestDFSAdminWithHA {
   private void verifyUpgradeQueryOutput(String message, int expected) throws
       Exception {
     int exitCode = admin.run(new String[] {"-upgrade", "query"});
-    assertEquals(err.toString().trim(), expected, exitCode);
+    assertEquals(expected, exitCode, err.toString().trim());
     assertOutputMatches(message);
   }
 
-  @Test (timeout = 30000)
+  @Test
+  @Timeout(value = 30)
   public void testListOpenFilesNN1UpNN2Down() throws Exception{
     setUpHaCluster(false);
     cluster.getDfsCluster().shutdownNameNode(1);
     cluster.getDfsCluster().transitionToActive(0);
     int exitCode = admin.run(new String[] {"-listOpenFiles"});
-    assertEquals(err.toString().trim(), 0, exitCode);
+    assertEquals(0, exitCode, err.toString().trim());
   }
 
-  @Test (timeout = 30000)
+  @Test
+  @Timeout(value = 30)
   public void testListOpenFilesNN1DownNN2Up() throws Exception{
     setUpHaCluster(false);
     cluster.getDfsCluster().shutdownNameNode(0);
     cluster.getDfsCluster().transitionToActive(1);
     int exitCode = admin.run(new String[] {"-listOpenFiles"});
-    assertEquals(err.toString().trim(), 0, exitCode);
+    assertEquals(0, exitCode, err.toString().trim());
   }
 
   @Test
@@ -796,7 +841,7 @@ public class TestDFSAdminWithHA {
     cluster.getDfsCluster().shutdownNameNode(0);
     cluster.getDfsCluster().shutdownNameNode(1);
     int exitCode = admin.run(new String[] {"-listOpenFiles"});
-    assertNotEquals(err.toString().trim(), 0, exitCode);
+    assertNotEquals(0, exitCode, err.toString().trim());
     String message = "List open files failed." + newLine
             + ".*" + newLine;
     assertOutputMatches(message);

+ 6 - 6
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java

@@ -18,9 +18,9 @@
 
 package org.apache.hadoop.hdfs.tools;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
 
 import java.io.ByteArrayInputStream;
 import java.io.ByteArrayOutputStream;
@@ -44,8 +44,8 @@ import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.test.MockitoUtil;
 import org.apache.hadoop.util.Shell;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.mockito.ArgumentCaptor;
 import org.mockito.Mockito;
 
@@ -112,7 +112,7 @@ public class TestDFSHAAdmin {
         FENCER_FALSE_COMMAND_WINDOWS : FENCER_FALSE_COMMAND_UNIX;
   }
 
-  @Before
+  @BeforeEach
   public void setup() throws IOException {
     mockProtocol = MockitoUtil.mockProtocol(HAServiceProtocol.class);
     mockZkfcProtocol = MockitoUtil.mockProtocol(ZKFCProtocol.class);

+ 15 - 16
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java

@@ -18,9 +18,9 @@
 package org.apache.hadoop.hdfs.tools;
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NN_NOT_BECOME_ACTIVE_IN_SAFEMODE;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.io.ByteArrayInputStream;
 import java.io.ByteArrayOutputStream;
@@ -42,9 +42,9 @@ import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Shell;
 import org.slf4j.event.Level;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 import org.apache.hadoop.thirdparty.com.google.common.base.Joiner;
 import org.apache.hadoop.thirdparty.com.google.common.io.Files;
@@ -69,7 +69,7 @@ public class TestDFSHAAdminMiniCluster {
 
   private int nn1Port;
 
-  @Before
+  @BeforeEach
   public void setup() throws IOException {
     conf = new Configuration();
     conf.setBoolean(DFS_HA_NN_NOT_BECOME_ACTIVE_IN_SAFEMODE, true);
@@ -84,7 +84,7 @@ public class TestDFSHAAdminMiniCluster {
     nn1Port = cluster.getNameNodePort(0);
   }
 
-  @After
+  @AfterEach
   public void shutdown() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
@@ -176,13 +176,13 @@ public class TestDFSHAAdminMiniCluster {
     System.setIn(new ByteArrayInputStream("yes\n".getBytes()));
     int result = admin.run(
         new String[]{"-transitionToObserver", "-forcemanual", "nn1"});
-    assertEquals("State transition returned: " + result, -1, result);
+    assertEquals(-1, result, "State transition returned: " + result);
 
     NameNodeAdapter.leaveSafeMode(cluster.getNameNode(0));
     System.setIn(new ByteArrayInputStream("yes\n".getBytes()));
     int result1 = admin.run(
         new String[]{"-transitionToObserver", "-forcemanual", "nn1"});
-    assertEquals("State transition returned: " + result1, 0, result1);
+    assertEquals(0, result1, "State transition returned: " + result1);
     assertFalse(cluster.getNameNode(0).isInSafeMode());
   }
 
@@ -194,9 +194,8 @@ public class TestDFSHAAdminMiniCluster {
 
     NameNodeAdapter.enterSafeMode(cluster.getNameNode(0), false);
     assertEquals(-1, runTool("-failover", "nn2", "nn1"));
-    assertTrue("Bad output: " + errOutput,
-        errOutput.contains("is not ready to become active: " +
-            "The NameNode is in safemode"));
+    assertTrue(errOutput.contains("is not ready to become active: " +
+        "The NameNode is in safemode"), "Bad output: " + errOutput);
   }
     
   /**
@@ -301,8 +300,8 @@ public class TestDFSHAAdminMiniCluster {
     runTool("-transitionToActive", "nn1");
     runTool("-transitionToActive", "nn2");
 
-    assertFalse("Both namenodes cannot be active", nn1.isActiveState() 
-        && nn2.isActiveState());
+    assertFalse(nn1.isActiveState()
+        && nn2.isActiveState(), "Both namenodes cannot be active");
    
     /*  In this test case, we have deliberately shut down nn1 and this will
         cause HAAAdmin#isOtherTargetNodeActive to throw an Exception 
@@ -318,7 +317,7 @@ public class TestDFSHAAdminMiniCluster {
     assertFalse(cluster.isNameNodeUp(0));
     
     runTool("-transitionToActive", "nn2", "--forceactive");
-    assertTrue("Namenode nn2 should be active", nn2.isActiveState());
+    assertTrue(nn2.isActiveState(), "Namenode nn2 should be active");
   }
   
   private int runTool(String ... args) throws Exception {

+ 18 - 12
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDebugAdmin.java

@@ -34,9 +34,10 @@ import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.hdfs.util.StripedBlockUtil;
 import org.apache.hadoop.io.IOUtils;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
 
 import java.io.ByteArrayOutputStream;
 import java.io.File;
@@ -45,8 +46,8 @@ import java.util.List;
 import java.util.Random;
 
 import static org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetTestUtil.*;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 public class TestDebugAdmin {
 
@@ -57,7 +58,7 @@ public class TestDebugAdmin {
   private MiniDFSCluster cluster;
   private DebugAdmin admin;
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     final File testRoot = new File(TEST_ROOT_DIR);
     testRoot.delete();
@@ -65,7 +66,7 @@ public class TestDebugAdmin {
     admin = new DebugAdmin(conf);
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
@@ -92,7 +93,8 @@ public class TestDebugAdmin {
         bytes.toString().replaceAll(System.lineSeparator(), "");
   }
 
-  @Test(timeout = 60000)
+  @Test
+  @Timeout(value = 60)
   public void testRecoverLease() throws Exception {
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
     cluster.waitActive();
@@ -106,7 +108,8 @@ public class TestDebugAdmin {
         runCmd(new String[]{"recoverLease", "-path", "/foo"}));
   }
 
-  @Test(timeout = 60000)
+  @Test
+  @Timeout(value = 60)
   public void testVerifyMetaCommand() throws Exception {
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
     cluster.waitActive();
@@ -135,7 +138,8 @@ public class TestDebugAdmin {
     );
   }
 
-  @Test(timeout = 60000)
+  @Test
+  @Timeout(value = 60)
   public void testComputeMetaCommand() throws Exception {
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
     cluster.waitActive();
@@ -177,7 +181,8 @@ public class TestDebugAdmin {
     assertTrue(outFile.length() > 0);
   }
 
-  @Test(timeout = 60000)
+  @Test
+  @Timeout(value = 60)
   public void testRecoverLeaseforFileNotFound() throws Exception {
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
     cluster.waitActive();
@@ -186,7 +191,8 @@ public class TestDebugAdmin {
         "Giving up on recoverLease for /foo after 1 try"));
   }
 
-  @Test(timeout = 60000)
+  @Test
+  @Timeout(value = 60)
   public void testVerifyECCommand() throws Exception {
     final ErasureCodingPolicy ecPolicy = SystemErasureCodingPolicies.getByID(
         SystemErasureCodingPolicies.RS_3_2_POLICY_ID);

+ 38 - 34
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDelegationTokenFetcher.java

@@ -19,9 +19,13 @@
 package org.apache.hadoop.hdfs.tools;
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.assertArrayEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
 import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.ArgumentMatchers.anyString;
 import static org.mockito.Mockito.doReturn;
@@ -45,10 +49,8 @@ import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.tools.FakeRenewer;
-import org.junit.Assert;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -58,26 +60,27 @@ public class TestDelegationTokenFetcher {
 
   private Configuration conf = new Configuration();
 
-  @Rule
-  public TemporaryFolder f = new TemporaryFolder();
   private static final String tokenFile = "token";
 
   /**
    * try to fetch token without http server with IOException
    */
-  @Test(expected = IOException.class)
-  public void testTokenFetchFail() throws Exception {
-    WebHdfsFileSystem fs = mock(WebHdfsFileSystem.class);
-    doThrow(new IOException()).when(fs).getDelegationToken(any());
-    Path p = new Path(f.getRoot().getAbsolutePath(), tokenFile);
-    DelegationTokenFetcher.saveDelegationToken(conf, fs, null, p);
+  @Test
+  public void testTokenFetchFail(@TempDir java.nio.file.Path folder) throws Exception {
+    assertThrows(IOException.class, () -> {
+      WebHdfsFileSystem fs = mock(WebHdfsFileSystem.class);
+      doThrow(new IOException()).when(fs).getDelegationToken(any());
+      Path p = new Path(folder.toAbsolutePath().toString(), tokenFile);
+      DelegationTokenFetcher.saveDelegationToken(conf, fs, null, p);
+    });
   }
 
   /**
    * Call fetch token using http server
    */
   @Test
-  public void expectedTokenIsRetrievedFromHttp() throws Exception {
+  public void expectedTokenIsRetrievedFromHttp(@TempDir java.nio.file.Path folder)
+      throws Exception {
     final Token<DelegationTokenIdentifier> testToken = new Token<DelegationTokenIdentifier>(
         "id".getBytes(), "pwd".getBytes(), FakeRenewer.KIND, new Text(
             "127.0.0.1:1234"));
@@ -85,24 +88,24 @@ public class TestDelegationTokenFetcher {
     WebHdfsFileSystem fs = mock(WebHdfsFileSystem.class);
 
     doReturn(testToken).when(fs).getDelegationToken(any());
-    Path p = new Path(f.getRoot().getAbsolutePath(), tokenFile);
+    Path p = new Path(folder.toAbsolutePath().toString(), tokenFile);
     DelegationTokenFetcher.saveDelegationToken(conf, fs, null, p);
 
     Credentials creds = Credentials.readTokenStorageFile(p, conf);
     Iterator<Token<?>> itr = creds.getAllTokens().iterator();
-    assertTrue("token not exist error", itr.hasNext());
+    assertTrue(itr.hasNext(), "token not exist error");
 
     Token<?> fetchedToken = itr.next();
-    Assert.assertArrayEquals("token wrong identifier error",
-        testToken.getIdentifier(), fetchedToken.getIdentifier());
-    Assert.assertArrayEquals("token wrong password error",
-        testToken.getPassword(), fetchedToken.getPassword());
+    assertArrayEquals(testToken.getIdentifier(), fetchedToken.getIdentifier(),
+        "token wrong identifier error");
+    assertArrayEquals(testToken.getPassword(), fetchedToken.getPassword(),
+        "token wrong password error");
 
     DelegationTokenFetcher.renewTokens(conf, p);
-    Assert.assertEquals(testToken, FakeRenewer.getLastRenewed());
+    assertEquals(testToken, FakeRenewer.getLastRenewed());
 
     DelegationTokenFetcher.cancelTokens(conf, p);
-    Assert.assertEquals(testToken, FakeRenewer.getLastCanceled());
+    assertEquals(testToken, FakeRenewer.getLastCanceled());
   }
 
   /**
@@ -110,18 +113,19 @@ public class TestDelegationTokenFetcher {
    * throw nullPointerException
    */
   @Test
-  public void testReturnedTokenIsNull() throws Exception {
+  public void testReturnedTokenIsNull(@TempDir java.nio.file.Path folder) throws Exception {
     WebHdfsFileSystem fs = mock(WebHdfsFileSystem.class);
     doReturn(null).when(fs).getDelegationToken(anyString());
-    Path p = new Path(f.getRoot().getAbsolutePath(), tokenFile);
+    Path p = new Path(folder.toAbsolutePath().toString(), tokenFile);
     DelegationTokenFetcher.saveDelegationToken(conf, fs, null, p);
     // When Token returned is null, TokenFile should not exist
-    Assert.assertFalse(p.getFileSystem(conf).exists(p));
+    assertFalse(p.getFileSystem(conf).exists(p));
 
   }
 
   @Test
-  public void testDelegationTokenWithoutRenewerViaRPC() throws Exception {
+  public void testDelegationTokenWithoutRenewerViaRPC(@TempDir java.nio.file.Path folder)
+      throws Exception {
     conf.setBoolean(DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
         .build();
@@ -130,23 +134,23 @@ public class TestDelegationTokenFetcher {
       DistributedFileSystem fs = cluster.getFileSystem();
       // Should be able to fetch token without renewer.
       LocalFileSystem localFileSystem = FileSystem.getLocal(conf);
-      Path p = new Path(f.getRoot().getAbsolutePath(), tokenFile);
+      Path p = new Path(folder.toAbsolutePath().toString(), tokenFile);
       p = localFileSystem.makeQualified(p);
       DelegationTokenFetcher.saveDelegationToken(conf, fs, null, p);
       Credentials creds = Credentials.readTokenStorageFile(p, conf);
       Iterator<Token<?>> itr = creds.getAllTokens().iterator();
-      assertTrue("token not exist error", itr.hasNext());
+      assertTrue(itr.hasNext(), "token not exist error");
       final Token token = itr.next();
-      assertNotNull("Token should be there without renewer", token);
+      assertNotNull(token, "Token should be there without renewer");
 
       // Test compatibility of DelegationTokenFetcher.printTokensToString
       String expectedNonVerbose = "Token (HDFS_DELEGATION_TOKEN token 1 for " +
           System.getProperty("user.name") + " with renewer ) for";
       String resNonVerbose =
           DelegationTokenFetcher.printTokensToString(conf, p, false);
-      assertTrue("The non verbose output is expected to start with \""
-          + expectedNonVerbose +"\"",
-          resNonVerbose.startsWith(expectedNonVerbose));
+      assertTrue(resNonVerbose.startsWith(expectedNonVerbose),
+          "The non verbose output is expected to start with \""
+              + expectedNonVerbose + "\"");
       LOG.info(resNonVerbose);
       LOG.info(
           DelegationTokenFetcher.printTokensToString(conf, p, true));

+ 81 - 83
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestECAdmin.java

@@ -21,26 +21,25 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.Timeout;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.ByteArrayOutputStream;
 import java.io.PrintStream;
-import java.util.concurrent.TimeUnit;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 /**
  * Tests some ECAdmin scenarios that are hard to test from
  * {@link org.apache.hadoop.cli.TestErasureCodingCLI}.
  */
+@Timeout(300)
 public class TestECAdmin {
   public static final Logger LOG = LoggerFactory.getLogger(TestECAdmin.class);
   private Configuration conf = new Configuration();
@@ -66,17 +65,13 @@ public class TestECAdmin {
       SystemErasureCodingPolicies.getByID(
           SystemErasureCodingPolicies.XOR_2_1_POLICY_ID).getName();
 
-  @Rule
-  public Timeout globalTimeout =
-      new Timeout(300000, TimeUnit.MILLISECONDS);
-
-  @Before
+  @BeforeEach
   public void setup() throws Exception {
     System.setOut(new PrintStream(out));
     System.setErr(new PrintStream(err));
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     try {
       System.out.flush();
@@ -101,7 +96,7 @@ public class TestECAdmin {
 
     cluster = DFSTestUtil.setupCluster(conf, numDataNodes, numRacks, 0);
     int ret = runCommandWithParams("-verifyClusterSetup");
-    assertEquals("Return value of the command is not successful", 2, ret);
+    assertEquals(2, ret, "Return value of the command is not successful");
     assertNotEnoughDataNodesMessage(RS_6_3, numDataNodes, expectedNumDataNodes);
   }
 
@@ -116,7 +111,7 @@ public class TestECAdmin {
     cluster.getFileSystem().disableErasureCodingPolicy(RS_6_3);
     cluster.getFileSystem().enableErasureCodingPolicy(testPolicy);
     int ret = runCommandWithParams("-verifyClusterSetup");
-    assertEquals("Return value of the command is not successful", 2, ret);
+    assertEquals(2, ret, "Return value of the command is not successful");
     assertNotEnoughRacksMessage(testPolicy, numRacks, expectedNumRacks);
   }
 
@@ -131,7 +126,7 @@ public class TestECAdmin {
     cluster.getFileSystem().disableErasureCodingPolicy(RS_6_3);
     cluster.getFileSystem().enableErasureCodingPolicy(testPolicy);
     int ret = runCommandWithParams("-verifyClusterSetup");
-    assertEquals("Return value of the command is not successful", 2, ret);
+    assertEquals(2, ret, "Return value of the command is not successful");
     assertNotEnoughRacksMessage(testPolicy, numRacks, expectedNumRacks);
   }
 
@@ -146,7 +141,7 @@ public class TestECAdmin {
     cluster.getFileSystem().disableErasureCodingPolicy(RS_6_3);
     cluster.getFileSystem().enableErasureCodingPolicy(testPolicy);
     int ret = runCommandWithParams("-verifyClusterSetup");
-    assertEquals("Return value of the command is not successful", 2, ret);
+    assertEquals(2, ret, "Return value of the command is not successful");
     assertNotEnoughRacksMessage(testPolicy, numRacks, expectedNumRacks);
   }
 
@@ -154,11 +149,12 @@ public class TestECAdmin {
   public void testRS63Good() throws Exception {
     cluster = DFSTestUtil.setupCluster(conf, 9, 3, 0);
     int ret = runCommandWithParams("-verifyClusterSetup");
-    assertEquals("Return value of the command is successful", 0, ret);
-    assertTrue("Result of cluster topology verify " +
-        "should be logged correctly", out.toString().contains(
-        "The cluster setup can support EC policies: " + RS_6_3));
-    assertTrue("Error output should be empty", err.toString().isEmpty());
+    assertEquals(0, ret, "Return value of the command is successful");
+    assertTrue(out.toString().contains(
+            "The cluster setup can support EC policies: " + RS_6_3),
+        "Result of cluster topology verify " +
+            "should be logged correctly");
+    assertTrue(err.toString().isEmpty(), "Error output should be empty");
   }
 
   @Test
@@ -166,11 +162,11 @@ public class TestECAdmin {
     cluster = DFSTestUtil.setupCluster(conf, 9, 3, 0);
     cluster.getFileSystem().disableErasureCodingPolicy(RS_6_3);
     int ret = runCommandWithParams("-verifyClusterSetup");
-    assertEquals("Return value of the command is successful", 0, ret);
-    assertTrue("Result of cluster topology verify " +
-            "should be logged correctly",
-        out.toString().contains("No erasure coding policy is given"));
-    assertTrue("Error output should be empty", err.toString().isEmpty());
+    assertEquals(0, ret, "Return value of the command is successful");
+    assertTrue(out.toString().contains("No erasure coding policy is given"),
+        "Result of cluster topology verify " +
+            "should be logged correctly");
+    assertTrue(err.toString().isEmpty(), "Error output should be empty");
   }
 
   @Test
@@ -184,16 +180,15 @@ public class TestECAdmin {
     final int ret = runCommandWithParams("-enablePolicy", "-policy",
         testPolicy);
 
-    assertEquals("Return value of the command is successful", 0, ret);
-    assertTrue("Enabling policy should be logged", out.toString()
-        .contains("Erasure coding policy " + testPolicy + " is enabled"));
-    assertTrue("Warning about cluster topology should be printed",
-        err.toString().contains("Warning: The cluster setup does not support " +
-        "EC policy " + testPolicy + ". Reason:"));
-    assertTrue("Warning about cluster topology should be printed",
-        err.toString()
-            .contains(" racks are required for the erasure coding policies: " +
-                testPolicy));
+    assertEquals(0, ret, "Return value of the command is successful");
+    assertTrue(out.toString().contains("Erasure coding policy " + testPolicy + " is enabled"),
+        "Enabling policy should be logged");
+    assertTrue(err.toString().contains("Warning: The cluster setup does not support " +
+            "EC policy " + testPolicy + ". Reason:"),
+        "Warning about cluster topology should be printed");
+    assertTrue(err.toString()
+        .contains(" racks are required for the erasure coding policies: " +
+            testPolicy), "Warning about cluster topology should be printed");
   }
 
   @Test
@@ -204,12 +199,13 @@ public class TestECAdmin {
     final int ret = runCommandWithParams("-enablePolicy", "-policy",
         testPolicy);
 
-    assertEquals("Return value of the command is successful", 0, ret);
-    assertTrue("Enabling policy should be logged", out.toString()
-        .contains("Erasure coding policy " + testPolicy + " is enabled"));
-    assertFalse("Warning about cluster topology should not be printed",
-        out.toString().contains("Warning: The cluster setup does not support"));
-    assertTrue("Error output should be empty", err.toString().isEmpty());
+    assertEquals(0, ret, "Return value of the command is successful");
+    assertTrue(out.toString()
+            .contains("Erasure coding policy " + testPolicy + " is enabled"),
+        "Enabling policy should be logged");
+    assertFalse(out.toString().contains("Warning: The cluster setup does not support"),
+        "Warning about cluster topology should not be printed");
+    assertTrue(err.toString().isEmpty(), "Error output should be empty");
   }
 
   @Test
@@ -219,12 +215,11 @@ public class TestECAdmin {
     final int ret = runCommandWithParams("-enablePolicy", "-policy",
         "NonExistentPolicy");
 
-    assertEquals("Return value of the command is unsuccessful", 2, ret);
-    assertFalse("Enabling policy should not be logged when " +
-        "it was unsuccessful", out.toString().contains("is enabled"));
-    assertTrue("Error message should be printed",
-        err.toString().contains("RemoteException: The policy name " +
-            "NonExistentPolicy does not exist"));
+    assertEquals(2, ret, "Return value of the command is unsuccessful");
+    assertFalse(out.toString().contains("is enabled"),
+        "Enabling policy should not be logged when " + "it was unsuccessful");
+    assertTrue(err.toString().contains("RemoteException: The policy name " +
+        "NonExistentPolicy does not exist"), "Error message should be printed");
   }
 
   @Test
@@ -234,30 +229,30 @@ public class TestECAdmin {
     cluster = DFSTestUtil.setupCluster(conf, numDataNodes, numRacks, 0);
 
     int ret = runCommandWithParams("-verifyClusterSetup", "-policy", RS_3_2);
-    assertEquals("Return value of the command is not successful", 2, ret);
+    assertEquals(2, ret, "Return value of the command is not successful");
     assertNotEnoughRacksMessage(RS_3_2, numRacks, 3);
 
     resetOutputs();
     ret = runCommandWithParams("-verifyClusterSetup", "-policy",
         RS_10_4, RS_3_2);
-    assertEquals("Return value of the command is not successful", 2, ret);
+    assertEquals(2, ret, "Return value of the command is not successful");
     assertNotEnoughDataNodesMessage(RS_10_4 + ", " + RS_3_2,
         numDataNodes, 14);
 
     resetOutputs();
     ret = runCommandWithParams("-verifyClusterSetup", "-policy",
         "invalidPolicy");
-    assertEquals("Return value of the command is not successful", -1, ret);
-    assertTrue("Error message should be logged", err.toString()
+    assertEquals(-1, ret, "Return value of the command is not successful");
+    assertTrue(err.toString()
         .contains("The given erasure coding policy invalidPolicy " +
-            "does not exist."));
+            "does not exist."), "Error message should be logged");
 
     resetOutputs();
     ret = runCommandWithParams("-verifyClusterSetup", "-policy");
-    assertEquals("Return value of the command is not successful", -1, ret);
-    assertTrue("Error message should be logged", err.toString()
+    assertEquals(-1, ret, "Return value of the command is not successful");
+    assertTrue(err.toString()
         .contains("NotEnoughArgumentsException: Not enough arguments: " +
-            "expected 1 but got 0"));
+            "expected 1 but got 0"), "Error message should be logged");
   }
 
   @Test
@@ -269,25 +264,27 @@ public class TestECAdmin {
     cluster.getFileSystem().enableErasureCodingPolicy(XOR_2_1);
 
     int ret = runCommandWithParams("-verifyClusterSetup", XOR_2_1);
-    assertEquals("Return value of the command is not successful", 1, ret);
-    assertTrue("Error message should be logged", err.toString().contains("Too many arguments"));
+    assertEquals(1, ret, "Return value of the command is not successful");
+    assertTrue(err.toString().contains("Too many arguments"), "Error message should be logged");
 
     resetOutputs();
     ret = runCommandWithParams("-verifyClusterSetup", "-policy");
-    assertEquals("Return value of the command is not successful", -1, ret);
-    assertTrue("Error message should be logged", err.toString()
-        .contains("NotEnoughArgumentsException: Not enough arguments: " + "expected 1 but got 0"));
+    assertEquals(-1, ret, "Return value of the command is not successful");
+    assertTrue(err.toString().contains("NotEnoughArgumentsException: Not enough arguments: " +
+            "expected 1 but got 0"),
+        "Error message should be logged");
 
     resetOutputs();
     ret = runCommandWithParams("-verifyClusterSetup", "-policy", XOR_2_1);
-    assertEquals("Return value of the command is successful", 0, ret);
-    assertTrue("Result of cluster topology verify " + "should be logged correctly",
-        out.toString().contains("The cluster setup can support EC policies: " + XOR_2_1));
-    assertTrue("Error output should be empty", err.toString().isEmpty());
+    assertEquals(0, ret, "Return value of the command is successful");
+    assertTrue(
+        out.toString().contains("The cluster setup can support EC policies: " + XOR_2_1),
+        "Result of cluster topology verify " + "should be logged correctly");
+    assertTrue(err.toString().isEmpty(), "Error output should be empty");
 
     resetOutputs();
     ret = runCommandWithParams("-verifyClusterSetup", "-policy", RS_6_3);
-    assertEquals("Return value of the command is not successful", 2, ret);
+    assertEquals(2, ret, "Return value of the command is not successful");
     assertNotEnoughDataNodesMessage(RS_6_3, numDataNodes, 9);
   }
 
@@ -299,25 +296,26 @@ public class TestECAdmin {
   private void assertNotEnoughDataNodesMessage(String policy,
                                                int numDataNodes,
                                                int expectedNumDataNodes) {
-    assertTrue("Result of cluster topology verify " +
-        "should be logged correctly", out.toString()
-        .contains(expectedNumDataNodes + " DataNodes are required " +
-            "for the erasure coding policies: " +
-            policy + ". The number of DataNodes is only " + numDataNodes));
-    assertTrue("Error output should be empty",
-        err.toString().isEmpty());
+    assertTrue(out.toString()
+            .contains(expectedNumDataNodes + " DataNodes are required " +
+                "for the erasure coding policies: " +
+                policy + ". The number of DataNodes is only " + numDataNodes),
+        "Result of cluster topology verify " +
+            "should be logged correctly");
+    assertTrue(
+        err.toString().isEmpty(), "Error output should be empty");
   }
 
   private void assertNotEnoughRacksMessage(String policy,
                                            int numRacks,
                                            int expectedNumRacks) {
-    assertTrue("Result of cluster topology verify " +
-        "should be logged correctly", out.toString()
-        .contains(expectedNumRacks + " racks are required for " +
-            "the erasure coding policies: " +
-            policy + ". The number of racks is only " + numRacks));
-    assertTrue("Error output should be empty",
-        err.toString().isEmpty());
+    assertTrue(out.toString()
+            .contains(expectedNumRacks + " racks are required for " +
+                "the erasure coding policies: " +
+                policy + ". The number of racks is only " + numRacks),
+        "Result of cluster topology verify " +
+            "should be logged correctly");
+    assertTrue(err.toString().isEmpty(), "Error output should be empty");
   }
 
   private int runCommandWithParams(String... args) throws Exception{

+ 46 - 32
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestGetConf.java

@@ -27,11 +27,11 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICES;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX;
 
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-
+import static org.junit.jupiter.api.Assertions.assertArrayEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 import java.io.ByteArrayOutputStream;
 import java.io.IOException;
 import java.io.PrintStream;
@@ -56,7 +56,8 @@ import org.apache.hadoop.hdfs.tools.GetConf.CommandHandler;
 import org.apache.hadoop.hdfs.util.HostsFileWriter;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.util.ToolRunner;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
 
 import org.apache.hadoop.thirdparty.com.google.common.base.Joiner;
 
@@ -148,10 +149,9 @@ public class TestGetConf {
       int ret = ToolRunner.run(new GetConf(conf, out, out), args);
       out.flush();
       System.err.println("Output: " + o.toString());
-      assertEquals("Expected " + (success?"success":"failure") +
+      assertEquals(success, ret == 0, "Expected " + (success ? "success" : "failure") +
           " for args: " + Joiner.on(" ").join(args) + "\n" +
-          "Output: " + o.toString(),
-          success, ret == 0);
+          "Output: " + o.toString());
       return o.toString();
     } finally {
       o.close();
@@ -249,7 +249,8 @@ public class TestGetConf {
   /**
    * Test empty configuration
    */
-  @Test(timeout=10000)
+  @Test
+  @Timeout(value = 10)
   public void testEmptyConf() throws Exception {
     HdfsConfiguration conf = new HdfsConfiguration(false);
     // Verify getting addresses fails
@@ -260,7 +261,7 @@ public class TestGetConf {
     for (Command cmd : Command.values()) {
       String arg = cmd.getName();
       CommandHandler handler = Command.getHandler(arg);
-      assertNotNull("missing handler: " + cmd, handler);
+      assertNotNull(handler, "missing handler: " + cmd);
       if (handler.key != null) {
         // First test with configuration missing the required key
         String[] args = {handler.key};
@@ -272,7 +273,8 @@ public class TestGetConf {
   /**
    * Test invalid argument to the tool
    */
-  @Test(timeout=10000)
+  @Test
+  @Timeout(value = 10)
   public void testInvalidArgument() throws Exception {
     HdfsConfiguration conf = new HdfsConfiguration();
     String[] args = {"-invalidArgument"};
@@ -284,7 +286,8 @@ public class TestGetConf {
    * Tests to make sure the returned addresses are correct in case of default
    * configuration with no federation
    */
-  @Test(timeout=10000)
+  @Test
+  @Timeout(value = 10)
   public void testNonFederation() throws Exception {
     HdfsConfiguration conf = new HdfsConfiguration(false);
   
@@ -319,7 +322,8 @@ public class TestGetConf {
    * Tests to make sure the returned addresses are correct in case of federation
    * of setup.
    */
-  @Test(timeout=10000)
+  @Test
+  @Timeout(value = 10)
   public void testFederation() throws Exception {
     final int nsCount = 10;
     HdfsConfiguration conf = new HdfsConfiguration(false);
@@ -363,7 +367,8 @@ public class TestGetConf {
    * Tests for journal node addresses.
    * @throws Exception
    */
-  @Test(timeout=10000)
+  @Test
+  @Timeout(value = 10)
   public void testGetJournalNodes() throws Exception {
 
     final int nsCount = 3;
@@ -487,32 +492,39 @@ public class TestGetConf {
   /*
    ** Test for unknown journal node host exception.
   */
-  @Test(expected = UnknownHostException.class, timeout = 10000)
+  @Test
+  @Timeout(value = 10)
   public void testUnknownJournalNodeHost()
       throws URISyntaxException, IOException {
-    String journalsBaseUri = "qjournal://jn1:8020;jn2:8020;jn3:8020";
-    HdfsConfiguration conf = new HdfsConfiguration(false);
-    conf.set(DFS_NAMENODE_SHARED_EDITS_DIR_KEY,
-        journalsBaseUri + "/jndata");
-    DFSUtil.getJournalNodeAddresses(conf);
+    assertThrows(UnknownHostException.class, () -> {
+      String journalsBaseUri = "qjournal://jn1:8020;jn2:8020;jn3:8020";
+      HdfsConfiguration conf = new HdfsConfiguration(false);
+      conf.set(DFS_NAMENODE_SHARED_EDITS_DIR_KEY,
+          journalsBaseUri + "/jndata");
+      DFSUtil.getJournalNodeAddresses(conf);
+    });
   }
 
   /*
    ** Test for malformed journal node urisyntax exception.
   */
-  @Test(expected = URISyntaxException.class, timeout = 10000)
+  @Test
+  @Timeout(value = 10)
   public void testJournalNodeUriError()
       throws URISyntaxException, IOException {
-    final int nsCount = 3;
-    String journalsBaseUri = "qjournal://jn0 :8020;jn1:8020;jn2:8020";
-    setupStaticHostResolution(nsCount, "jn");
-    HdfsConfiguration conf = new HdfsConfiguration(false);
-    conf.set(DFS_NAMENODE_SHARED_EDITS_DIR_KEY,
-        journalsBaseUri + "/jndata");
-    DFSUtil.getJournalNodeAddresses(conf);
+    assertThrows(URISyntaxException.class, () -> {
+      final int nsCount = 3;
+      String journalsBaseUri = "qjournal://jn0 :8020;jn1:8020;jn2:8020";
+      setupStaticHostResolution(nsCount, "jn");
+      HdfsConfiguration conf = new HdfsConfiguration(false);
+      conf.set(DFS_NAMENODE_SHARED_EDITS_DIR_KEY,
+          journalsBaseUri + "/jndata");
+      DFSUtil.getJournalNodeAddresses(conf);
+    });
   }
 
-  @Test(timeout=10000)
+  @Test
+  @Timeout(value = 10)
   public void testGetSpecificKey() throws Exception {
     HdfsConfiguration conf = new HdfsConfiguration();
     conf.set("mykey", " myval ");
@@ -521,7 +533,8 @@ public class TestGetConf {
     assertEquals(String.format("myval%n"), toolResult);
   }
   
-  @Test(timeout=10000)
+  @Test
+  @Timeout(value = 10)
   public void testExtraArgsThrowsError() throws Exception {
     HdfsConfiguration conf = new HdfsConfiguration();
     conf.set("mykey", "myval");
@@ -534,7 +547,8 @@ public class TestGetConf {
    * Tests commands other than {@link Command#NAMENODE}, {@link Command#BACKUP},
    * {@link Command#SECONDARY} and {@link Command#NNRPCADDRESSES}
    */
-  @Test(timeout=10000)
+  @Test
+  @Timeout(value = 10)
   public void testTool() throws Exception {
     HdfsConfiguration conf = new HdfsConfiguration(false);
     for (Command cmd : Command.values()) {

+ 5 - 5
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicyCommands.java

@@ -31,9 +31,9 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfierMode;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 /**
  * Test StoragePolicyAdmin commands
@@ -46,7 +46,7 @@ public class TestStoragePolicyCommands {
   protected static MiniDFSCluster cluster;
   protected static FileSystem fs;
 
-  @Before
+  @BeforeEach
   public void clusterSetUp() throws IOException, URISyntaxException {
     conf = new HdfsConfiguration();
     conf.set(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_MODE_KEY,
@@ -59,7 +59,7 @@ public class TestStoragePolicyCommands {
     fs = cluster.getFileSystem();
   }
 
-  @After
+  @AfterEach
   public void clusterShutdown() throws IOException{
     if(fs != null) {
       fs.close();

+ 10 - 7
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicySatisfyAdminCommands.java

@@ -34,9 +34,10 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.namenode.sps.Context;
 import org.apache.hadoop.hdfs.server.namenode.sps.StoragePolicySatisfier;
 import org.apache.hadoop.hdfs.server.sps.ExternalSPSContext;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
 
 /**
  * Test StoragePolicySatisfy admin commands.
@@ -50,7 +51,7 @@ public class TestStoragePolicySatisfyAdminCommands {
   private DistributedFileSystem dfs = null;
   private StoragePolicySatisfier externalSps = null;
 
-  @Before
+  @BeforeEach
   public void clusterSetUp() throws IOException, URISyntaxException {
     conf = new HdfsConfiguration();
     conf.set(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_MODE_KEY,
@@ -74,7 +75,7 @@ public class TestStoragePolicySatisfyAdminCommands {
     externalSps.start(StoragePolicySatisfierMode.EXTERNAL);
   }
 
-  @After
+  @AfterEach
   public void clusterShutdown() throws IOException{
     if(dfs != null) {
       dfs.close();
@@ -89,7 +90,8 @@ public class TestStoragePolicySatisfyAdminCommands {
     }
   }
 
-  @Test(timeout = 30000)
+  @Test
+  @Timeout(value = 30)
   public void testStoragePolicySatisfierCommand() throws Exception {
     final String file = "/testStoragePolicySatisfierCommand";
     DFSTestUtil.createFile(dfs, new Path(file), SIZE, REPL, 0);
@@ -110,7 +112,8 @@ public class TestStoragePolicySatisfyAdminCommands {
         dfs);
   }
 
-  @Test(timeout = 30000)
+  @Test
+  @Timeout(value = 30)
   public void testStoragePolicySatisfierCommandWithURI() throws Exception {
     final String file = "/testStoragePolicySatisfierCommandURI";
     DFSTestUtil.createFile(dfs, new Path(file), SIZE, REPL, 0);

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestViewFSStoragePolicyCommands.java

@@ -29,8 +29,8 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 import java.io.IOException;
 import java.net.InetSocketAddress;
@@ -40,7 +40,7 @@ import java.net.InetSocketAddress;
  */
 public class TestViewFSStoragePolicyCommands extends TestStoragePolicyCommands {
 
-  @Before
+  @BeforeEach
   public void clusterSetUp() throws IOException {
     conf = new HdfsConfiguration();
     String clusterName = "cluster";

+ 10 - 13
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestViewFileSystemOverloadSchemeWithDFSAdmin.java

@@ -17,10 +17,8 @@
  */
 package org.apache.hadoop.hdfs.tools;
 
-import static org.hamcrest.CoreMatchers.containsString;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertThat;
-
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 import java.io.ByteArrayOutputStream;
 import java.io.File;
 import java.io.IOException;
@@ -44,10 +42,9 @@ import org.apache.hadoop.test.PathUtils;
 import org.apache.hadoop.util.Lists;
 import org.apache.hadoop.util.ToolRunner;
 
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 /**
  * Tests DFSAdmin with ViewFileSystemOverloadScheme with configured mount links.
@@ -71,7 +68,7 @@ public class TestViewFileSystemOverloadSchemeWithDFSAdmin {
   /**
    * Sets up the configurations and starts the MiniDFSCluster.
    */
-  @Before
+  @BeforeEach
   public void startCluster() throws IOException {
     conf = new Configuration();
     conf.setInt(
@@ -86,10 +83,10 @@ public class TestViewFileSystemOverloadSchemeWithDFSAdmin {
     defaultFSURI =
         URI.create(conf.get(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY));
     localTargetDir = new File(TEST_ROOT_DIR, "/root/");
-    Assert.assertEquals(HDFS_SCHEME, defaultFSURI.getScheme()); // hdfs scheme.
+    assertEquals(HDFS_SCHEME, defaultFSURI.getScheme()); // hdfs scheme.
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws IOException {
     try {
       System.out.flush();
@@ -127,13 +124,13 @@ public class TestViewFileSystemOverloadSchemeWithDFSAdmin {
   private void assertErrMsg(String errorMsg, int line) {
     final List<String> errList = Lists.newArrayList();
     scanIntoList(err, errList);
-    assertThat(errList.get(line), containsString(errorMsg));
+    assertThat(errList.get(line)).contains(errorMsg);
   }
 
   private void assertOutMsg(String outMsg, int line) {
     final List<String> errList = Lists.newArrayList();
     scanIntoList(out, errList);
-    assertThat(errList.get(line), containsString(outMsg));
+    assertThat(errList.get(line)).contains(outMsg);
   }
 
   /**

+ 8 - 9
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestViewFileSystemOverloadSchemeWithFSCommands.java

@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.hdfs.tools;
 
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 import java.io.ByteArrayOutputStream;
 import java.io.File;
@@ -43,10 +43,9 @@ import org.apache.hadoop.test.PathUtils;
 import org.apache.hadoop.util.Lists;
 import org.apache.hadoop.util.ToolRunner;
 
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 /**
  * Tests HDFS commands with ViewFileSystemOverloadScheme with configured mount
@@ -71,7 +70,7 @@ public class TestViewFileSystemOverloadSchemeWithFSCommands {
   /**
    * Sets up the configurations and starts the MiniDFSCluster.
    */
-  @Before
+  @BeforeEach
   public void startCluster() throws IOException {
     conf = new Configuration();
     conf.setInt(
@@ -86,10 +85,10 @@ public class TestViewFileSystemOverloadSchemeWithFSCommands {
     defaultFSURI =
         URI.create(conf.get(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY));
     localTargetDir = new File(TEST_ROOT_DIR, "/root/");
-    Assert.assertEquals(HDFS_SCHEME, defaultFSURI.getScheme()); // hdfs scheme.
+    assertEquals(HDFS_SCHEME, defaultFSURI.getScheme()); // hdfs scheme.
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws IOException {
     try {
       System.out.flush();
@@ -165,7 +164,7 @@ public class TestViewFileSystemOverloadSchemeWithFSCommands {
       String msg =
           "DF was not calculated on all mounts. The left out mounts are: "
               + mounts;
-      assertEquals(msg, 0, mounts.size());
+      assertEquals(0, mounts.size(), msg);
     } finally {
       fsShell.close();
     }

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestWebHDFSStoragePolicyCommands.java

@@ -20,7 +20,7 @@ package org.apache.hadoop.hdfs.tools;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.hdfs.web.WebHdfsConstants;
 import org.apache.hadoop.hdfs.web.WebHdfsTestUtil;
-import org.junit.Before;
+import org.junit.jupiter.api.BeforeEach;
 
 import java.io.IOException;
 import java.net.URISyntaxException;
@@ -31,7 +31,7 @@ import java.net.URISyntaxException;
 public class TestWebHDFSStoragePolicyCommands
     extends TestStoragePolicyCommands {
 
-  @Before
+  @BeforeEach
   public void clusterSetUp() throws IOException, URISyntaxException {
     super.clusterSetUp();
     fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,

+ 42 - 50
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java

@@ -18,8 +18,10 @@
 
 package org.apache.hadoop.hdfs.tools.offlineEditsViewer;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.io.ByteArrayOutputStream;
 import java.io.File;
@@ -28,9 +30,11 @@ import java.io.IOException;
 import java.io.PrintStream;
 import java.nio.ByteBuffer;
 import java.nio.channels.FileChannel;
+import java.nio.file.Path;
 import java.util.Map;
 
 import org.apache.commons.io.FileUtils;
+import org.junit.jupiter.api.io.TempDir;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hdfs.DFSTestUtil;
@@ -40,12 +44,9 @@ import org.apache.hadoop.hdfs.server.namenode.OfflineEditsViewerHelper;
 import org.apache.hadoop.hdfs.tools.offlineEditsViewer.OfflineEditsViewer.Flags;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.test.PathUtils;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet;
 
@@ -81,15 +82,12 @@ public class TestOfflineEditsViewer {
     return b.build();
   }
 
-  @Rule
-  public final TemporaryFolder folder = new TemporaryFolder();
-
-  @Before
+  @BeforeEach
   public void setUp() throws IOException {
     nnHelper.startCluster(buildDir + "/dfs/");
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws IOException {
     nnHelper.shutdownCluster();
   }
@@ -98,16 +96,16 @@ public class TestOfflineEditsViewer {
    * Test the OfflineEditsViewer
    */
   @Test
-  public void testGenerated() throws IOException {
+  public void testGenerated(@TempDir Path folder) throws IOException {
     // edits generated by nnHelper (MiniDFSCluster), should have all op codes
     // binary, XML, reparsed binary
     String edits = nnHelper.generateEdits();
     LOG.info("Generated edits=" + edits);
-    String editsParsedXml = folder.newFile("editsParsed.xml").getAbsolutePath();
-    String editsReparsed = folder.newFile("editsParsed").getAbsolutePath();
+    String editsParsedXml = folder.resolve("editsParsed.xml").toString();
+    String editsReparsed = folder.resolve("editsParsed").toString();
     // capital case extension
     String editsParsedXML_caseInSensitive =
-        folder.newFile("editsRecoveredParsed.XML").getAbsolutePath();
+        folder.resolve("editsRecoveredParsed.XML").toString();
 
     // parse to XML then back to binary
     assertEquals(0, runOev(edits, editsParsedXml, "xml", false));
@@ -118,18 +116,16 @@ public class TestOfflineEditsViewer {
 
 
     // judgment time
-    assertTrue("Edits " + edits + " should have all op codes",
-        hasAllOpCodes(edits));
+    assertTrue(hasAllOpCodes(edits), "Edits " + edits + " should have all op codes");
     LOG.info("Comparing generated file " + editsReparsed
         + " with reference file " + edits);
-    assertTrue(
-        "Generated edits and reparsed (bin to XML to bin) should be same",
-        filesEqualIgnoreTrailingZeros(edits, editsReparsed));
+    assertTrue(filesEqualIgnoreTrailingZeros(edits, editsReparsed),
+        "Generated edits and reparsed (bin to XML to bin) should be same");
   }
 
 
   @Test
-  public void testRecoveryMode() throws IOException {
+  public void testRecoveryMode(@TempDir Path folder) throws IOException {
     // edits generated by nnHelper (MiniDFSCluster), should have all op codes
     // binary, XML, reparsed binary
     String edits = nnHelper.generateEdits();
@@ -138,12 +134,12 @@ public class TestOfflineEditsViewer {
     FileChannel editsFile = os.getChannel();
     editsFile.truncate(editsFile.size() - 5);
 
-    String editsParsedXml = folder.newFile("editsRecoveredParsed.xml")
-        .getAbsolutePath();
-    String editsReparsed = folder.newFile("editsRecoveredReparsed")
-        .getAbsolutePath();
-    String editsParsedXml2 = folder.newFile("editsRecoveredParsed2.xml")
-        .getAbsolutePath();
+    String editsParsedXml = folder.resolve("editsRecoveredParsed.xml")
+        .toString();
+    String editsReparsed = folder.resolve("editsRecoveredReparsed")
+        .toString();
+    String editsParsedXml2 = folder.resolve("editsRecoveredParsed2.xml")
+        .toString();
 
     // Can't read the corrupted file without recovery mode
     assertEquals(-1, runOev(edits, editsParsedXml, "xml", false));
@@ -154,8 +150,8 @@ public class TestOfflineEditsViewer {
     assertEquals(0, runOev(editsReparsed, editsParsedXml2, "xml", false));
 
     // judgment time
-    assertTrue("Test round trip", FileUtils.contentEqualsIgnoreEOL(
-        new File(editsParsedXml), new File(editsParsedXml2), "UTF-8"));
+    assertTrue(FileUtils.contentEqualsIgnoreEOL(
+        new File(editsParsedXml), new File(editsParsedXml2), "UTF-8"), "Test round trip");
 
     os.close();
   }
@@ -178,14 +174,12 @@ public class TestOfflineEditsViewer {
         runOev(editsStoredParsedXml, editsStoredReparsed, "binary", false));
 
     // judgement time
-    assertTrue("Edits " + editsStored + " should have all op codes",
-        hasAllOpCodes(editsStored));
-    assertTrue("Reference XML edits and parsed to XML should be same",
-        FileUtils.contentEqualsIgnoreEOL(new File(editsStoredXml),
-            new File(editsStoredParsedXml), "UTF-8"));
-    assertTrue(
-        "Reference edits and reparsed (bin to XML to bin) should be same",
-        filesEqualIgnoreTrailingZeros(editsStored, editsStoredReparsed));
+    assertTrue(hasAllOpCodes(editsStored), "Edits " + editsStored + " should have all op codes");
+    assertTrue(FileUtils.contentEqualsIgnoreEOL(new File(editsStoredXml),
+            new File(editsStoredParsedXml), "UTF-8"),
+        "Reference XML edits and parsed to XML should be same");
+    assertTrue(filesEqualIgnoreTrailingZeros(editsStored, editsStoredReparsed),
+        "Reference edits and reparsed (bin to XML to bin) should be same");
   }
 
   /**
@@ -299,11 +293,9 @@ public class TestOfflineEditsViewer {
     try {
       System.setOut(out);
       int status = new OfflineEditsViewer().run(new String[] { "-h" });
-      assertTrue("" + "Exit code returned for help option is incorrect",
-          status == 0);
-      Assert.assertFalse(
-          "Invalid Command error displayed when help option is passed.", bytes
-              .toString().contains("Error parsing command-line options"));
+      assertTrue(status == 0, "" + "Exit code returned for help option is incorrect");
+      assertFalse(bytes.toString().contains("Error parsing command-line options"),
+          "Invalid Command error displayed when help option is passed.");
     } finally {
       System.setOut(oldOut);
       IOUtils.closeStream(out);
@@ -322,7 +314,7 @@ public class TestOfflineEditsViewer {
     if (oev.go(editFilename, outFilename, "stats", new Flags(), visitor) == 0) {
       statisticsStr = visitor.getStatisticsString();
     }
-    Assert.assertNotNull(statisticsStr);
+    assertNotNull(statisticsStr);
 
     String str;
     Long count;
@@ -340,13 +332,13 @@ public class TestOfflineEditsViewer {
   }
 
   @Test
-  public void testProcessorWithSameTypeFormatFile() throws IOException {
+  public void testProcessorWithSameTypeFormatFile(@TempDir Path folder) throws IOException {
     String edits = nnHelper.generateEdits();
     LOG.info("Generated edits=" + edits);
-    String binaryEdits = folder.newFile("binaryEdits").getAbsolutePath();
-    String editsParsedXml = folder.newFile("editsParsed.xml").getAbsolutePath();
-    String editsReparsedXml = folder.newFile("editsReparsed.xml")
-        .getAbsolutePath();
+    String binaryEdits = folder.resolve("binaryEdits").toString();
+    String editsParsedXml = folder.resolve("editsParsed.xml").toString();
+    String editsReparsedXml = folder.resolve("editsReparsed.xml")
+        .toString();
 
     // Binary format input file is not allowed to be processed
     // by Binary processor.

+ 57 - 53
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java

@@ -109,10 +109,9 @@ import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap;
 import org.apache.hadoop.thirdparty.com.google.common.collect.Maps;
 import org.apache.hadoop.thirdparty.protobuf.ByteString;
 
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.event.Level;
@@ -143,9 +142,12 @@ import static org.apache.hadoop.hdfs.tools.offlineImageViewer.PBImageXmlWriter.E
 import static org.apache.hadoop.hdfs.tools.offlineImageViewer.PBImageXmlWriter.ERASURE_CODING_SECTION_SCHEMA;
 import static org.apache.hadoop.hdfs.tools.offlineImageViewer.PBImageXmlWriter.ERASURE_CODING_SECTION_SCHEMA_CODEC_NAME;
 import static org.apache.hadoop.hdfs.tools.offlineImageViewer.PBImageXmlWriter.ERASURE_CODING_SECTION_SCHEMA_OPTION;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertNull;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
 import static org.mockito.ArgumentMatchers.anyLong;
 import static org.mockito.Mockito.spy;
 import static org.mockito.Mockito.when;
@@ -175,7 +177,8 @@ public class TestOfflineImageViewer {
   // data structure and store its fsimage location.
   // We only want to generate the fsimage file once and use it for
   // multiple tests.
-  @BeforeClass
+  @SuppressWarnings("checkstyle:MethodLength")
+  @BeforeAll
   public static void createOriginalFSImage() throws IOException {
     defaultTimeZone = TimeZone.getDefault();
     TimeZone.setDefault(TimeZone.getTimeZone("UTC"));
@@ -384,7 +387,7 @@ public class TestOfflineImageViewer {
     }
   }
 
-  @AfterClass
+  @AfterAll
   public static void deleteOriginalFSImage() throws IOException {
     FileUtils.deleteQuietly(tempDir);
     if (originalFsimage != null && originalFsimage.exists()) {
@@ -402,15 +405,17 @@ public class TestOfflineImageViewer {
     return hdfs.getFileStatus(new Path(file));
   }
 
-  @Test(expected = IOException.class)
+  @Test
   public void testTruncatedFSImage() throws IOException {
-    File truncatedFile = new File(tempDir, "truncatedFsImage");
-    PrintStream output = new PrintStream(NullOutputStream.INSTANCE);
-    copyPartOfFile(originalFsimage, truncatedFile);
-    try (RandomAccessFile r = new RandomAccessFile(truncatedFile, "r")) {
-      new FileDistributionCalculator(new Configuration(), 0, 0, false, output)
-        .visit(r);
-    }
+    assertThrows(IOException.class, () -> {
+      File truncatedFile = new File(tempDir, "truncatedFsImage");
+      PrintStream output = new PrintStream(NullOutputStream.INSTANCE);
+      copyPartOfFile(originalFsimage, truncatedFile);
+      try (RandomAccessFile r = new RandomAccessFile(truncatedFile, "r")) {
+        new FileDistributionCalculator(new Configuration(), 0, 0, false, output)
+            .visit(r);
+      }
+    });
   }
 
   private void copyPartOfFile(File src, File dest) throws IOException {
@@ -524,14 +529,14 @@ public class TestOfflineImageViewer {
         if (currentInodeName != null && currentInodeName.length() > 0) {
           if (currentBlockType != null && currentBlockType.equalsIgnoreCase(
               BlockType.STRIPED.name())) {
-            Assert.assertEquals("INode '"
-                    + currentInodeName + "' has unexpected EC Policy!",
-                Byte.parseByte(currentECPolicy),
-                SystemErasureCodingPolicies.XOR_2_1_POLICY_ID);
-            Assert.assertEquals("INode '"
-                    + currentInodeName + "' has unexpected replication!",
-                currentRepl,
-                Short.toString(INodeFile.DEFAULT_REPL_FOR_STRIPED_BLOCKS));
+            assertEquals(Byte.parseByte(currentECPolicy),
+                SystemErasureCodingPolicies.XOR_2_1_POLICY_ID,
+                "INode '"
+                    + currentInodeName + "' has unexpected EC Policy!");
+            assertEquals(currentRepl,
+                Short.toString(INodeFile.DEFAULT_REPL_FOR_STRIPED_BLOCKS),
+                "INode '"
+                    + currentInodeName + "' has unexpected replication!");
           }
         }
         isInode = false;
@@ -888,8 +893,7 @@ public class TestOfflineImageViewer {
     int status =
         OfflineImageViewerPB.run(new String[] { "-i",
             originalFsimage.getAbsolutePath(), "-o", "-", "-p", "invalid" });
-    assertTrue("Exit code returned for invalid processor option is incorrect",
-        status != 0);
+    assertTrue(status != 0, "Exit code returned for invalid processor option is incorrect");
   }
 
   @Test
@@ -900,40 +904,40 @@ public class TestOfflineImageViewer {
     try {
       System.setOut(out);
       int status = OfflineImageViewerPB.run(new String[] { "-h" });
-      assertTrue("Exit code returned for help option is incorrect", status == 0);
-      Assert.assertFalse(
-          "Invalid Command error displayed when help option is passed.", bytes
-              .toString().contains("Error parsing command-line options"));
+      assertTrue(status == 0, "Exit code returned for help option is incorrect");
+      assertFalse(bytes.toString().contains("Error parsing command-line options"),
+          "Invalid Command error displayed when help option is passed.");
       status =
           OfflineImageViewerPB.run(new String[] { "-h", "-i",
               originalFsimage.getAbsolutePath(), "-o", "-", "-p",
               "FileDistribution", "-maxSize", "512", "-step", "8" });
-      Assert.assertTrue(
-          "Exit code returned for help with other option is incorrect",
-          status == -1);
+      assertTrue(status == -1,
+          "Exit code returned for help with other option is incorrect");
     } finally {
       System.setOut(oldOut);
       IOUtils.closeStream(out);
     }
   }
 
-  @Test(expected = IOException.class)
+  @Test
   public void testDelimitedWithExistingFolder() throws IOException,
       InterruptedException {
-    File tempDelimitedDir = null;
-    try {
-      String tempDelimitedDirName = "tempDirDelimited";
-      String tempDelimitedDirPath = new FileSystemTestHelper().
-          getTestRootDir() + "/" + tempDelimitedDirName;
-      tempDelimitedDir = new File(tempDelimitedDirPath);
-      Assert.assertTrue("Couldn't create temp directory!",
-          tempDelimitedDir.mkdirs());
-      testPBDelimitedWriter(tempDelimitedDirPath);
-    } finally {
-      if (tempDelimitedDir != null) {
-        FileUtils.deleteDirectory(tempDelimitedDir);
+    assertThrows(IOException.class, () -> {
+      File tempDelimitedDir = null;
+      try {
+        String tempDelimitedDirName = "tempDirDelimited";
+        String tempDelimitedDirPath = new FileSystemTestHelper().
+            getTestRootDir() + "/" + tempDelimitedDirName;
+        tempDelimitedDir = new File(tempDelimitedDirPath);
+        assertTrue(tempDelimitedDir.mkdirs(),
+            "Couldn't create temp directory!");
+        testPBDelimitedWriter(tempDelimitedDirPath);
+      } finally {
+        if (tempDelimitedDir != null) {
+          FileUtils.deleteDirectory(tempDelimitedDir);
+        }
       }
-    }
+    });
   }
 
   private void testPBDelimitedWriter(String db)
@@ -1227,7 +1231,7 @@ public class TestOfflineImageViewer {
     }
     // The XML file we wrote based on the re-created fsimage should be the
     // same as the one we dumped from the original fsimage.
-    Assert.assertEquals("",
+    assertEquals("",
         GenericTestUtils.getFilesDiff(reverseImageXml, reverseImage2Xml));
   }
 
@@ -1255,8 +1259,8 @@ public class TestOfflineImageViewer {
     }
     try {
       OfflineImageReconstructor.run(imageWrongVersion.getAbsolutePath(),
-          imageWrongVersion.getAbsolutePath() + ".out"); 
-      Assert.fail("Expected OfflineImageReconstructor to fail with " +
+          imageWrongVersion.getAbsolutePath() + ".out");
+      fail("Expected OfflineImageReconstructor to fail with " +
           "version mismatch.");
     } catch (Throwable t) {
       GenericTestUtils.assertExceptionContains("Layout version mismatch.", t);
@@ -1359,7 +1363,7 @@ public class TestOfflineImageViewer {
       // and don't need to do the following operations.
       OfflineImageViewer.main(new String[] {"-i", "-", "-o", "-", "-p",
           "FileDistribution", "-maxSize", "512", "-step", "8", "-h"});
-      Assert.assertFalse(bytes.toString().contains(
+      assertFalse(bytes.toString().contains(
           "Error parsing command-line options: "));
     } finally {
       System.setOut(oldOut);
@@ -1380,7 +1384,7 @@ public class TestOfflineImageViewer {
               "FileDistribution", "-maxSize", "512", "-step", "8",
               "-format"});
       assertEquals(0, status);
-      Assert.assertTrue(bytes.toString().contains("(0 B, 8 B]"));
+      assertTrue(bytes.toString().contains("(0 B, 8 B]"));
     } finally {
       System.setOut(oldOut);
       IOUtils.closeStream(out);

+ 6 - 6
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerForAcl.java

@@ -49,9 +49,9 @@ import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.util.Lists;
 import org.apache.hadoop.util.XMLUtils;
 
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 import org.xml.sax.InputSource;
 import org.xml.sax.helpers.DefaultHandler;
 
@@ -71,7 +71,7 @@ import static org.apache.hadoop.fs.permission.FsAction.READ_WRITE;
 import static org.apache.hadoop.fs.permission.FsAction.READ_EXECUTE;
 import static org.apache.hadoop.fs.permission.FsAction.NONE;
 import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.aclEntry;
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 /**
  * Tests OfflineImageViewer if the input fsimage has HDFS ACLs
@@ -92,7 +92,7 @@ public class TestOfflineImageViewerForAcl {
    * We only want to generate the fsimage file once and use it for
    * multiple tests.
    */
-  @BeforeClass
+  @BeforeAll
   public static void createOriginalFSImage() throws IOException {
     MiniDFSCluster cluster = null;
     try {
@@ -163,7 +163,7 @@ public class TestOfflineImageViewerForAcl {
     }
   }
 
-  @AfterClass
+  @AfterAll
   public static void deleteOriginalFSImage() throws IOException {
     if (originalFsimage != null && originalFsimage.exists()) {
       originalFsimage.delete();

+ 6 - 6
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerForContentSummary.java

@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.hdfs.tools.offlineImageViewer;
 
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 import java.io.File;
 import java.io.IOException;
 import java.net.HttpURLConnection;
@@ -36,9 +36,9 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
 import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
 import org.apache.hadoop.net.NetUtils;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 
 /**
  * Tests GETCONTENTSUMMARY operation for WebImageViewer
@@ -59,7 +59,7 @@ public class TestOfflineImageViewerForContentSummary {
    * data structure and store its fsimage location. We only want to generate
    * the fsimage file once and use it for multiple tests.
    */
-  @BeforeClass
+  @BeforeAll
   public static void createOriginalFSImage() throws IOException {
     MiniDFSCluster cluster = null;
     Configuration conf = new Configuration();
@@ -117,7 +117,7 @@ public class TestOfflineImageViewerForContentSummary {
     }
   }
 
-  @AfterClass
+  @AfterAll
   public static void deleteOriginalFSImage() {
     if (originalFsimage != null && originalFsimage.exists()) {
       originalFsimage.delete();

+ 6 - 6
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerForErasureCodingPolicy.java

@@ -26,9 +26,9 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -39,7 +39,7 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.io.InputStreamReader;
 
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 /**
  * Tests OfflineImageViewer if the input fsimage has HDFS ErasureCodingPolicy
@@ -57,7 +57,7 @@ public class TestOfflineImageViewerForErasureCodingPolicy {
    * Create a populated namespace for later testing. Save its contents to a
    * data structure and store its fsimage location.
    */
-  @BeforeClass
+  @BeforeAll
   public static void createOriginalFSImage() throws IOException {
     MiniDFSCluster cluster = null;
     try {
@@ -137,7 +137,7 @@ public class TestOfflineImageViewerForErasureCodingPolicy {
     }
   }
 
-  @AfterClass
+  @AfterAll
   public static void deleteOriginalFSImage() throws IOException {
     if (originalFsimage != null && originalFsimage.exists()) {
       originalFsimage.delete();

+ 6 - 6
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerForStoragePolicy.java

@@ -27,15 +27,15 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 
 import static org.apache.hadoop.hdfs.protocol.HdfsConstants.ALLSSD_STORAGE_POLICY_NAME;
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 import java.io.BufferedReader;
 import java.io.ByteArrayOutputStream;
@@ -60,7 +60,7 @@ public class TestOfflineImageViewerForStoragePolicy {
    * Create a populated namespace for later testing. Save its contents to a
    * data structure and store its fsimage location.
    */
-  @BeforeClass
+  @BeforeAll
   public static void createOriginalFSImage() throws IOException {
     MiniDFSCluster cluster = null;
     try {
@@ -134,7 +134,7 @@ public class TestOfflineImageViewerForStoragePolicy {
     }
   }
 
-  @AfterClass
+  @AfterAll
   public static void deleteOriginalFSImage() throws IOException {
     if (originalFsimage != null && originalFsimage.exists()) {
       originalFsimage.delete();

+ 13 - 15
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerForXAttr.java

@@ -17,8 +17,8 @@
  */
 package org.apache.hadoop.hdfs.tools.offlineImageViewer;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.io.File;
 import java.io.IOException;
@@ -45,9 +45,9 @@ import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
 import org.apache.hadoop.hdfs.web.JsonUtil;
 import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
 import org.apache.hadoop.net.NetUtils;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 
 /**
  * Tests OfflineImageViewer if the input fsimage has XAttributes
@@ -66,7 +66,7 @@ public class TestOfflineImageViewerForXAttr {
    * structure and store its fsimage location. We only want to generate the
    * fsimage file once and use it for multiple tests.
    */
-  @BeforeClass
+  @BeforeAll
   public static void createOriginalFSImage() throws IOException {
     MiniDFSCluster cluster = null;
     Configuration conf = new Configuration();
@@ -104,7 +104,7 @@ public class TestOfflineImageViewerForXAttr {
     }
   }
 
-  @AfterClass
+  @AfterAll
   public static void deleteOriginalFSImage() throws IOException {
     if (originalFsimage != null && originalFsimage.exists()) {
       originalFsimage.delete();
@@ -128,10 +128,10 @@ public class TestOfflineImageViewerForXAttr {
 
       String content = IOUtils.toString(connection.getInputStream(), StandardCharsets.UTF_8);
 
-      assertTrue("Missing user.attr1 in response ",
-          content.contains("user.attr1"));
-      assertTrue("Missing user.attr2 in response ",
-          content.contains("user.attr2"));
+      assertTrue(content.contains("user.attr1"),
+          "Missing user.attr1 in response ");
+      assertTrue(content.contains("user.attr2"),
+          "Missing user.attr2 in response ");
 
     }
   }
@@ -153,10 +153,8 @@ public class TestOfflineImageViewerForXAttr {
       assertEquals(HttpURLConnection.HTTP_OK, connection.getResponseCode());
       String content = IOUtils.toString(connection.getInputStream(), StandardCharsets.UTF_8);
 
-      assertTrue("Missing user.attr1 in response ",
-          content.contains("user.attr1"));
-      assertTrue("Missing user.attr2 in response ",
-          content.contains("user.attr2"));
+      assertTrue(content.contains("user.attr1"), "Missing user.attr1 in response ");
+      assertTrue(content.contains("user.attr2"), "Missing user.attr2 in response ");
     }
   }
 

+ 29 - 21
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerWithStripedBlocks.java

@@ -17,8 +17,8 @@
  */
 package org.apache.hadoop.hdfs.tools.offlineImageViewer;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.io.File;
 import java.io.IOException;
@@ -40,9 +40,10 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
 import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
 import org.apache.hadoop.hdfs.server.namenode.INodeFile;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
 
 public class TestOfflineImageViewerWithStripedBlocks {
   private final ErasureCodingPolicy ecPolicy =
@@ -56,7 +57,7 @@ public class TestOfflineImageViewerWithStripedBlocks {
   private final int stripesPerBlock = 3;
   private final int blockSize = cellSize * stripesPerBlock;
 
-  @Before
+  @BeforeEach
   public void setup() throws IOException {
     int numDNs = dataBlocks + parityBlocks + 2;
     Configuration conf = new Configuration();
@@ -72,48 +73,55 @@ public class TestOfflineImageViewerWithStripedBlocks {
     fs.mkdirs(eczone);
   }
 
-  @After
+  @AfterEach
   public void tearDown() {
     if (cluster != null) {
       cluster.shutdown();
     }
   }
 
-  @Test(timeout = 60000)
+  @Test
+  @Timeout(value = 60)
   public void testFileEqualToOneStripe() throws Exception {
     int numBytes = cellSize;
     testFileSize(numBytes);
   }
 
-  @Test(timeout = 60000)
+  @Test
+  @Timeout(value = 60)
   public void testFileLessThanOneStripe() throws Exception {
     int numBytes = cellSize - 100;
     testFileSize(numBytes);
   }
 
-  @Test(timeout = 60000)
+  @Test
+  @Timeout(value = 60)
   public void testFileHavingMultipleBlocks() throws Exception {
     int numBytes = blockSize * 3;
     testFileSize(numBytes);
   }
 
-  @Test(timeout = 60000)
+  @Test
+  @Timeout(value = 60)
   public void testFileLargerThanABlockGroup1() throws IOException {
     testFileSize(blockSize * dataBlocks + cellSize + 123);
   }
 
-  @Test(timeout = 60000)
+  @Test
+  @Timeout(value = 60)
   public void testFileLargerThanABlockGroup2() throws IOException {
     testFileSize(blockSize * dataBlocks * 3 + cellSize * dataBlocks + cellSize
         + 123);
   }
 
-  @Test(timeout = 60000)
+  @Test
+  @Timeout(value = 60)
   public void testFileFullBlockGroup() throws IOException {
     testFileSize(blockSize * dataBlocks);
   }
 
-  @Test(timeout = 60000)
+  @Test
+  @Timeout(value = 60)
   public void testFileMoreThanOneStripe() throws Exception {
     int numBytes = blockSize + blockSize / 2;
     testFileSize(numBytes);
@@ -148,23 +156,23 @@ public class TestOfflineImageViewerWithStripedBlocks {
     INodeFile fileNode = fsdir.getINode4Write(file.toString()).asFile();
     assertEquals(StripedFileTestUtil.getDefaultECPolicy().getId(),
         fileNode.getErasureCodingPolicyID());
-    assertTrue("Invalid block size", fileNode.getBlocks().length > 0);
+    assertTrue(fileNode.getBlocks().length > 0, "Invalid block size");
     long actualFileSize = 0;
     for (BlockInfo blockInfo : fileNode.getBlocks()) {
-      assertTrue("Didn't find block striped information",
-          blockInfo instanceof BlockInfoStriped);
+      assertTrue(blockInfo instanceof BlockInfoStriped,
+          "Didn't find block striped information");
       actualFileSize += blockInfo.getNumBytes();
     }
 
-    assertEquals("Wrongly computed file size contains striped blocks",
-        expectedFileSize, actualFileSize);
+    assertEquals(expectedFileSize, actualFileSize,
+        "Wrongly computed file size contains striped blocks");
 
     // Verify space consumed present in filestatus
     String EXPECTED_FILE_SIZE = "\"length\":"
         + String.valueOf(expectedFileSize);
     assertTrue(
+        fileStatus.contains(EXPECTED_FILE_SIZE),
         "Wrongly computed file size contains striped blocks, file status:"
-            + fileStatus + ". Expected file size is : " + EXPECTED_FILE_SIZE,
-        fileStatus.contains(EXPECTED_FILE_SIZE));
+            + fileStatus + ". Expected file size is : " + EXPECTED_FILE_SIZE);
   }
 }

+ 7 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestPBImageCorruption.java

@@ -17,9 +17,10 @@
  */
 package org.apache.hadoop.hdfs.tools.offlineImageViewer;
 
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertThrows;
 
 /**
  * Unit tests for PBImageCorruptionType, CorruptionEntryBuilder and
@@ -34,9 +35,11 @@ public class TestPBImageCorruption {
     assertEquals("CorruptNodeWithMissingChild", ct.getType());
   }
 
-  @Test(expected = IllegalArgumentException.class)
+  @Test
   public void testImproperCorruptionTypeCreation() {
-    PBImageCorruption ct = new PBImageCorruption(210, false, false, 2);
+    assertThrows(IllegalArgumentException.class, () -> {
+      PBImageCorruption ct = new PBImageCorruption(210, false, false, 2);
+    });
   }
 
   @Test

+ 19 - 24
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestAtomicFileOutputStream.java

@@ -18,10 +18,11 @@
 package org.apache.hadoop.hdfs.util;
 
 import static org.apache.hadoop.test.PlatformAssumptions.assumeWindows;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
 
 import java.io.File;
 import java.io.FileNotFoundException;
@@ -33,10 +34,8 @@ import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.test.PathUtils;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 import org.apache.hadoop.thirdparty.com.google.common.base.Joiner;
 
@@ -48,11 +47,8 @@ public class TestAtomicFileOutputStream {
   private static final File TEST_DIR = PathUtils.getTestDir(TestAtomicFileOutputStream.class);
   
   private static final File DST_FILE = new File(TEST_DIR, "test.txt");
-
-  @Rule
-  public ExpectedException exception = ExpectedException.none();
   
-  @Before
+  @BeforeEach
   public void cleanupTestDir() throws IOException {
     assertTrue(TEST_DIR.exists() || TEST_DIR.mkdirs());
     FileUtil.fullyDeleteContents(TEST_DIR);
@@ -80,11 +76,11 @@ public class TestAtomicFileOutputStream {
    */
   @Test
   public void testOverwriteFile() throws IOException {
-    assertTrue("Creating empty dst file", DST_FILE.createNewFile());
+    assertTrue(DST_FILE.createNewFile(), "Creating empty dst file");
     
     OutputStream fos = new AtomicFileOutputStream(DST_FILE);
     
-    assertTrue("Empty file still exists", DST_FILE.exists());
+    assertTrue(DST_FILE.exists(), "Empty file still exists");
     fos.write(TEST_STRING.getBytes());
     fos.flush();
     
@@ -121,9 +117,9 @@ public class TestAtomicFileOutputStream {
     
     // Should not have touched original file
     assertEquals(TEST_STRING_2, DFSTestUtil.readFile(DST_FILE));
-    
-    assertEquals("Temporary file should have been cleaned up",
-        DST_FILE.getName(), Joiner.on(",").join(TEST_DIR.list()));
+
+    assertEquals(DST_FILE.getName(), Joiner.on(",").join(TEST_DIR.list()),
+        "Temporary file should have been cleaned up");
   }
 
   @Test
@@ -134,13 +130,12 @@ public class TestAtomicFileOutputStream {
       fos = new AtomicFileOutputStream(DST_FILE);
       fos.write(TEST_STRING.getBytes());
       FileUtil.setWritable(TEST_DIR, false);
-      exception.expect(IOException.class);
-      exception.expectMessage("failure in native rename");
-      try {
-        fos.close();
-      } finally {
-        fos = null;
-      }
+      final OutputStream toClose = fos;
+      IOException ex = assertThrows(IOException.class, () -> {
+        toClose.close();
+      });
+      assertTrue(ex.getMessage().contains("failure in native rename"));
+      fos = null;
     } finally {
       IOUtils.cleanupWithLogger(null, fos);
       FileUtil.setWritable(TEST_DIR, true);

+ 5 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestBestEffortLongFile.java

@@ -23,17 +23,18 @@ import java.util.Random;
 
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.io.IOUtils;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
-import static org.junit.Assert.*;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 public class TestBestEffortLongFile {
 
   private static final File FILE = new File(MiniDFSCluster.getBaseDirectory() +
       File.separatorChar + "TestBestEffortLongFile");
 
-  @Before
+  @BeforeEach
   public void cleanup() {
     if (FILE.exists()) {
       assertTrue(FILE.delete());

+ 16 - 13
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestCombinedHostsFileReader.java

@@ -24,14 +24,15 @@ import java.util.concurrent.Callable;
 
 import org.apache.hadoop.hdfs.protocol.DatanodeAdminProperties;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Before;
-import org.junit.After;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
 import org.mockito.Mock;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertThrows;
 import static org.mockito.Mockito.when;
 import org.mockito.MockitoAnnotations;
 
@@ -54,12 +55,12 @@ public class TestCombinedHostsFileReader {
   @Mock
   private Callable<DatanodeAdminProperties[]> callable;
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     MockitoAnnotations.initMocks(this);
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     // Delete test file after running tests
     newFile.delete();
@@ -115,7 +116,7 @@ public class TestCombinedHostsFileReader {
    * When timeout is enabled, test for IOException when reading file exceeds
    * timeout limits
    */
-  @Test(expected = IOException.class)
+  @Test
   public void testReadFileWithTimeoutTimeoutException() throws Exception {
     when(callable.call()).thenAnswer(new Answer<Void>() {
       @Override
@@ -124,15 +125,16 @@ public class TestCombinedHostsFileReader {
         return null;
       }
     });
-
-    CombinedHostsFileReader.readFileWithTimeout(
-        jsonFile.getAbsolutePath(), 1);
+    assertThrows(IOException.class, () -> {
+      CombinedHostsFileReader.readFileWithTimeout(
+          jsonFile.getAbsolutePath(), 1);
+    });
   }
 
   /*
    * When timeout is enabled, test for IOException when execution is interrupted
    */
-  @Test(expected = IOException.class)
+  @Test
   public void testReadFileWithTimeoutInterruptedException() throws Exception {
     when(callable.call()).thenAnswer(new Answer<Void>() {
       @Override
@@ -140,8 +142,9 @@ public class TestCombinedHostsFileReader {
         throw new InterruptedException();
       }
     });
-
-    CombinedHostsFileReader.readFileWithTimeout(
-        jsonFile.getAbsolutePath(), 1);
+    assertThrows(IOException.class, () -> {
+      CombinedHostsFileReader.readFileWithTimeout(
+          jsonFile.getAbsolutePath(), 1);
+    });
   }
 }

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestCyclicIteration.java

@@ -16,7 +16,7 @@
  * limitations under the License.
  */
 package org.apache.hadoop.hdfs.util;
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -25,7 +25,7 @@ import java.util.Map;
 import java.util.NavigableMap;
 import java.util.TreeMap;
 
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 public class TestCyclicIteration {
   @Test
@@ -58,7 +58,7 @@ public class TestCyclicIteration {
       //verify results
       for(int i = 0; i < integers.length; i++) {
         final int j = ((start+2)/2 + i)%integers.length;
-        assertEquals("i=" + i + ", j=" + j, iteration.get(i), integers[j]);
+        assertEquals(iteration.get(i), integers[j], "i=" + i + ", j=" + j);
       }
     }
   }

+ 12 - 8
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestDiff.java

@@ -29,8 +29,11 @@ import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
 import org.apache.hadoop.hdfs.util.Diff;
 import org.apache.hadoop.hdfs.util.Diff.Container;
 import org.apache.hadoop.hdfs.util.Diff.UndoInfo;
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
+
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 /**
  * Test {@link Diff} with {@link INode}.
@@ -46,7 +49,8 @@ public class TestDiff {
   }
 
   /** Test directory diff. */
-  @Test(timeout=60000)
+  @Test
+  @Timeout(value = 60)
   public void testDiff() throws Exception {
     for(int startSize = 0; startSize <= 10000; startSize = nextStep(startSize)) {
       for(int m = 0; m <= 10000; m = nextStep(m)) {
@@ -192,7 +196,7 @@ public class TestDiff {
           final int j = Diff.search(previous, inode.getKey());
           final INode expected = j < 0? null: previous.get(j);
           // must be the same object (equals is not enough)
-          Assert.assertTrue(computed == expected);
+          assertTrue(computed == expected);
         }
 
         {// test accessCurrent
@@ -208,7 +212,7 @@ public class TestDiff {
           final int j = Diff.search(current, inode.getKey());
           final INode expected = j < 0? null: current.get(j);
           // must be the same object (equals is not enough)
-          Assert.assertTrue(computed == expected);
+          assertTrue(computed == expected);
         }
       }
     }
@@ -250,7 +254,7 @@ public class TestDiff {
   static void create(INode inode, final List<INode> current,
       Diff<byte[], INode> diff) {
     final int i = Diff.search(current, inode.getKey());
-    Assert.assertTrue(i < 0);
+    assertTrue(i < 0);
     current.add(-i - 1, inode);
     if (diff != null) {
       //test undo with 1/UNDO_TEST_P probability
@@ -303,7 +307,7 @@ public class TestDiff {
   static void modify(INode inode, final List<INode> current,
       Diff<byte[], INode> diff) {
     final int i = Diff.search(current, inode.getKey());
-    Assert.assertTrue(i >= 0);
+    assertTrue(i >= 0);
     final INodeDirectory oldinode = (INodeDirectory)current.get(i);
     final INodeDirectory newinode = new INodeDirectory(oldinode, false,
       oldinode.getFeatures());
@@ -333,6 +337,6 @@ public class TestDiff {
   }
   
   static void assertDiff(String s, Diff<byte[], INode> diff) {
-    Assert.assertEquals(s, diff.toString());
+    assertEquals(s, diff.toString());
   }
 }

+ 9 - 9
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestLightWeightHashSet.java

@@ -17,12 +17,12 @@
  */
 package org.apache.hadoop.hdfs.util;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertSame;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertNull;
+import static org.junit.jupiter.api.Assertions.assertSame;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.util.ArrayList;
 import java.util.Iterator;
@@ -33,8 +33,8 @@ import java.util.Random;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.util.Time;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 public class TestLightWeightHashSet{
 
@@ -45,7 +45,7 @@ public class TestLightWeightHashSet{
   private LightWeightHashSet<Integer> set;
   private Random rand;
 
-  @Before
+  @BeforeEach
   public void setUp() {
     float maxF = LightWeightHashSet.DEFAULT_MAX_LOAD_FACTOR;
     float minF = LightWeightHashSet.DEFAUT_MIN_LOAD_FACTOR;

+ 16 - 11
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestLightWeightLinkedSet.java

@@ -17,10 +17,10 @@
  */
 package org.apache.hadoop.hdfs.util;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.util.ArrayList;
 import java.util.Iterator;
@@ -30,8 +30,9 @@ import java.util.Random;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.util.Time;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
 
 public class TestLightWeightLinkedSet {
 
@@ -42,7 +43,7 @@ public class TestLightWeightLinkedSet {
   private LightWeightLinkedSet<Integer> set;
   private Random rand;
 
-  @Before
+  @BeforeEach
   public void setUp() {
     float maxF = LightWeightLinkedSet.DEFAULT_MAX_LOAD_FACTOR;
     float minF = LightWeightLinkedSet.DEFAUT_MIN_LOAD_FACTOR;
@@ -372,7 +373,8 @@ public class TestLightWeightLinkedSet {
     LOG.info("Test capacity - DONE");
   }
 
-  @Test(timeout=60000)
+  @Test
+  @Timeout(value = 60)
   public void testGetBookmarkReturnsBookmarkIterator() {
     LOG.info("Test getBookmark returns proper iterator");
     assertTrue(set.addAll(list));
@@ -389,7 +391,8 @@ public class TestLightWeightLinkedSet {
     assertEquals(bookmark2.next(), list.get(numAdvance));
   }
 
-  @Test(timeout=60000)
+  @Test
+  @Timeout(value = 60)
   public void testBookmarkAdvancesOnRemoveOfSameElement() {
     LOG.info("Test that the bookmark advances if we remove its element.");
     assertTrue(set.add(list.get(0)));
@@ -403,7 +406,8 @@ public class TestLightWeightLinkedSet {
     assertEquals(it.next(), list.get(2));
   }
 
-  @Test(timeout=60000)
+  @Test
+  @Timeout(value = 60)
   public void testBookmarkSetToHeadOnAddToEmpty() {
     LOG.info("Test bookmark is set after adding to previously empty set.");
     Iterator<Integer> it = set.getBookmark();
@@ -418,7 +422,8 @@ public class TestLightWeightLinkedSet {
     assertFalse(it.hasNext());
   }
 
-  @Test(timeout=60000)
+  @Test
+  @Timeout(value = 60)
   public void testResetBookmarkPlacesBookmarkAtHead() {
     set.addAll(list);
     Iterator<Integer> it = set.getBookmark();

+ 11 - 8
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestMD5FileUtils.java

@@ -17,9 +17,10 @@
  */
 package org.apache.hadoop.hdfs.util;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
 
 import java.io.File;
 import java.io.FileOutputStream;
@@ -30,8 +31,8 @@ import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.io.MD5Hash;
 import org.apache.hadoop.test.PathUtils;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 public class TestMD5FileUtils {
   private static final File TEST_DIR = PathUtils.getTestDir(TestMD5FileUtils.class);
@@ -43,7 +44,7 @@ public class TestMD5FileUtils {
     DFSTestUtil.generateSequentialBytes(0, TEST_DATA_LEN);
   private static final MD5Hash TEST_MD5 = MD5Hash.digest(TEST_DATA);
   
-  @Before
+  @BeforeEach
   public void setup() throws IOException {
     FileUtil.fullyDelete(TEST_DIR);
     assertTrue(TEST_DIR.mkdirs());
@@ -69,9 +70,11 @@ public class TestMD5FileUtils {
   /**
    * Test when .md5 file does not exist at all
    */
-  @Test(expected=IOException.class)
+  @Test
   public void testVerifyMD5FileMissing() throws Exception {
-    MD5FileUtils.verifySavedMD5(TEST_FILE, TEST_MD5);
+    assertThrows(IOException.class, () -> {
+      MD5FileUtils.verifySavedMD5(TEST_FILE, TEST_MD5);
+    });
   }
 
   /**

+ 17 - 16
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestReferenceCountMap.java

@@ -19,8 +19,9 @@
 package org.apache.hadoop.hdfs.util;
 
 import org.apache.hadoop.hdfs.server.namenode.AclFeature;
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
+
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 /**
  * Verify ReferenceCount map in concurrent scenarios.
@@ -37,28 +38,28 @@ public class TestReferenceCountMap {
     ReferenceCountMap<AclFeature> countMap = new ReferenceCountMap<>();
     countMap.put(aclFeature1);
     countMap.put(aclFeature2);
-    Assert.assertEquals(1, countMap.getReferenceCount(aclFeature1));
-    Assert.assertEquals(1, countMap.getReferenceCount(aclFeature2));
+    assertEquals(1, countMap.getReferenceCount(aclFeature1));
+    assertEquals(1, countMap.getReferenceCount(aclFeature2));
 
     countMap.put(aclFeature1);
     countMap.put(aclFeature2);
-    Assert.assertEquals(2, countMap.getReferenceCount(aclFeature1));
-    Assert.assertEquals(2, countMap.getReferenceCount(aclFeature2));
+    assertEquals(2, countMap.getReferenceCount(aclFeature1));
+    assertEquals(2, countMap.getReferenceCount(aclFeature2));
 
     countMap.put(aclFeature1);
-    Assert.assertEquals(3, countMap.getReferenceCount(aclFeature1));
+    assertEquals(3, countMap.getReferenceCount(aclFeature1));
     countMap.put(aclFeature1);
-    Assert.assertEquals(4, countMap.getReferenceCount(aclFeature1));
-    Assert.assertEquals(2, countMap.getReferenceCount(aclFeature2));
+    assertEquals(4, countMap.getReferenceCount(aclFeature1));
+    assertEquals(2, countMap.getReferenceCount(aclFeature2));
 
     //Delete operations:
     countMap.remove(aclFeature1);
     countMap.remove(aclFeature2);
-    Assert.assertEquals(3, countMap.getReferenceCount(aclFeature1));
-    Assert.assertEquals(1, countMap.getReferenceCount(aclFeature2));
+    assertEquals(3, countMap.getReferenceCount(aclFeature1));
+    assertEquals(1, countMap.getReferenceCount(aclFeature2));
 
     //Verify unique elements in map
-    Assert.assertEquals(2, countMap.getUniqueElementsSize());
+    assertEquals(2, countMap.getUniqueElementsSize());
   }
 
   @Test
@@ -73,15 +74,15 @@ public class TestReferenceCountMap {
 
     putThread1.join();
     putThread2.join();
-    Assert.assertEquals(2 * LOOP_COUNTER,
+    assertEquals(2 * LOOP_COUNTER,
         countMap.getReferenceCount(aclFeature1));
-    Assert.assertEquals(2 * LOOP_COUNTER,
+    assertEquals(2 * LOOP_COUNTER,
         countMap.getReferenceCount(aclFeature2));
 
     removeThread1.start();
     removeThread1.join();
-    Assert.assertEquals(LOOP_COUNTER, countMap.getReferenceCount(aclFeature1));
-    Assert.assertEquals(LOOP_COUNTER, countMap.getReferenceCount(aclFeature2));
+    assertEquals(LOOP_COUNTER, countMap.getReferenceCount(aclFeature1));
+    assertEquals(LOOP_COUNTER, countMap.getReferenceCount(aclFeature2));
   }
 
   class PutThread extends Thread {

+ 8 - 13
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestStripedBlockUtil.java

@@ -31,17 +31,15 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockIdManager;
 import static org.apache.hadoop.hdfs.util.StripedBlockUtil.*;
 
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.Timeout;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 import java.nio.ByteBuffer;
 import java.util.Random;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 /**
  * Need to cover the following combinations:
@@ -97,10 +95,7 @@ public class TestStripedBlockUtil {
   private int[] byteRangeStartOffsets;
   private int[] byteRangeSizes;
 
-  @Rule
-  public Timeout globalTimeout = new Timeout(300000);
-
-  @Before
+  @BeforeEach
   public void setup(){
     blockGroupSizes = new int[]{1, getDelta(cellSize), cellSize,
         getDelta(dataBlocks) * cellSize,
@@ -285,8 +280,8 @@ public class TestStripedBlockUtil {
             if (hashIntToByte(brStart + i) != assembled.get(i)) {
               System.out.println("Oops");
             }
-            assertEquals("Byte at " + (brStart + i) + " should be the same",
-                hashIntToByte(brStart + i), assembled.get(i));
+            assertEquals(hashIntToByte(brStart + i), assembled.get(i),
+                "Byte at " + (brStart + i) + " should be the same");
           }
         }
       }

+ 8 - 6
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestXMLUtils.java

@@ -18,16 +18,18 @@
 package org.apache.hadoop.hdfs.util;
 
 import org.apache.hadoop.hdfs.util.XMLUtils.UnmanglingError;
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
+
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.fail;
 
 public class TestXMLUtils {
   private static void testRoundTripImpl(String str, String expectedMangled,
                                     boolean encodeEntityRefs) {
     String mangled = XMLUtils.mangleXmlString(str, encodeEntityRefs);
-    Assert.assertEquals(expectedMangled, mangled);
+    assertEquals(expectedMangled, mangled);
     String unmangled = XMLUtils.unmangleXmlString(mangled, encodeEntityRefs);
-    Assert.assertEquals(str, unmangled);
+    assertEquals(str, unmangled);
   }
 
   private static void testRoundTrip(String str, String expectedMangled) {
@@ -65,13 +67,13 @@ public class TestXMLUtils {
   public void testInvalidSequence() throws Exception {
     try {
       XMLUtils.unmangleXmlString("\\000g;foo", false);
-      Assert.fail("expected an unmangling error");
+      fail("expected an unmangling error");
     } catch (UnmanglingError e) {
       // pass through
     }
     try {
       XMLUtils.unmangleXmlString("\\0", false);
-      Assert.fail("expected an unmangling error");
+      fail("expected an unmangling error");
     } catch (UnmanglingError e) {
       // pass through
     }

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestAuthFilter.java

@@ -22,14 +22,14 @@ import java.util.Map;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.http.FilterContainer;
 import org.apache.hadoop.security.authentication.server.PseudoAuthenticationHandler;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNull;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNull;
 
 public class TestAuthFilter {
 

+ 10 - 8
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestHttpsFileSystem.java

@@ -33,10 +33,12 @@ import org.apache.hadoop.http.HttpConfig;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
+
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 public class TestHttpsFileSystem {
   private static final String BASEDIR =
@@ -49,7 +51,7 @@ public class TestHttpsFileSystem {
   private static String sslConfDir;
   private static String nnAddr;
 
-  @BeforeClass
+  @BeforeAll
   public static void setUp() throws Exception {
     conf = new Configuration();
     conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name());
@@ -78,7 +80,7 @@ public class TestHttpsFileSystem {
     conf.set(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, nnAddr);
   }
 
-  @AfterClass
+  @AfterAll
   public static void tearDown() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
@@ -94,9 +96,9 @@ public class TestHttpsFileSystem {
     FSDataOutputStream os = fs.create(f);
     os.write(23);
     os.close();
-    Assert.assertTrue(fs.exists(f));
+    assertTrue(fs.exists(f));
     InputStream is = fs.open(f);
-    Assert.assertEquals(23, is.read());
+    assertEquals(23, is.read());
     is.close();
     fs.close();
   }

+ 44 - 40
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java

@@ -21,6 +21,11 @@ import static org.apache.hadoop.fs.permission.AclEntryScope.*;
 import static org.apache.hadoop.fs.permission.AclEntryType.*;
 import static org.apache.hadoop.fs.permission.FsAction.*;
 import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.*;
+import static org.junit.jupiter.api.Assertions.assertArrayEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
 
 import java.io.IOException;
 import java.util.EnumSet;
@@ -54,8 +59,7 @@ import org.apache.hadoop.util.ChunkedArrayList;
 import org.apache.hadoop.util.Lists;
 import org.apache.hadoop.util.Time;
 
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 import com.fasterxml.jackson.databind.ObjectMapper;
 import com.fasterxml.jackson.databind.ObjectReader;
@@ -107,9 +111,9 @@ public class TestJsonUtil {
     final FileStatus fs2 = toFileStatus(s2, parent);
     System.out.println("s2      = " + s2);
     System.out.println("fs2     = " + fs2);
-    Assert.assertEquals(status.getErasureCodingPolicy(),
+    assertEquals(status.getErasureCodingPolicy(),
         s2.getErasureCodingPolicy());
-    Assert.assertEquals(fstatus, fs2);
+    assertEquals(fstatus, fs2);
   }
 
   /**
@@ -129,7 +133,7 @@ public class TestJsonUtil {
         .isdir(true)
         .build();
 
-    Assert.assertFalse(hdfsFileStatus.isSymlink());
+    assertFalse(hdfsFileStatus.isSymlink());
     LambdaTestUtils.intercept(IOException.class,
         "Path " + hdfsFileStatus.getPath() + " is not a symbolic link",
         () -> hdfsFileStatus.getSymlink());
@@ -151,7 +155,7 @@ public class TestJsonUtil {
         .append("}")
         .toString();
 
-    Assert.assertEquals(expectString, hdfsFileStatus.toString());
+    assertEquals(expectString, hdfsFileStatus.toString());
   }
 
   @Test
@@ -173,7 +177,7 @@ public class TestJsonUtil {
         .path(DFSUtil.string2Bytes("foo"))
         .fileId(HdfsConstants.GRANDFATHER_INODE_ID)
         .build();
-    Assert.assertTrue(status.getErasureCodingPolicy() == null);
+    assertTrue(status.getErasureCodingPolicy() == null);
 
     final FileStatus fstatus = toFileStatus(status, parent);
     System.out.println("status  = " + status);
@@ -186,7 +190,7 @@ public class TestJsonUtil {
     System.out.println("s2      = " + s2);
     System.out.println("fs2     = " + fs2);
 
-    Assert.assertEquals(fstatus, fs2);
+    assertEquals(fstatus, fs2);
   }
   
   @Test
@@ -238,15 +242,15 @@ public class TestJsonUtil {
     response.put("cacheUsed", 321l);
 
     DatanodeInfo di = JsonUtilClient.toDatanodeInfo(response);
-    Assert.assertEquals(name, di.getXferAddr());
+    assertEquals(name, di.getXferAddr());
 
     // The encoded result should contain name, ipAddr and xferPort.
     Map<String, Object> r = JsonUtil.toJsonMap(di);
-    Assert.assertEquals(name, r.get("name"));
-    Assert.assertEquals("127.0.0.1", r.get("ipAddr"));
+    assertEquals(name, r.get("name"));
+    assertEquals("127.0.0.1", r.get("ipAddr"));
     // In this test, it is Integer instead of Long since json was not actually
     // involved in constructing the map.
-    Assert.assertEquals(1004, (int)(Integer)r.get("xferPort"));
+    assertEquals(1004, (int)(Integer)r.get("xferPort"));
 
     // Invalid names
     String[] badNames = {"127.0.0.1", "127.0.0.1:", ":", "127.0.0.1:sweet", ":123"};
@@ -282,8 +286,8 @@ public class TestJsonUtil {
     aclStatusBuilder.addEntries(aclSpec);
     aclStatusBuilder.stickyBit(false);
 
-    Assert.assertEquals("Should be equal", aclStatusBuilder.build(),
-        JsonUtilClient.toAclStatus(json));
+    assertEquals(aclStatusBuilder.build(),
+        JsonUtilClient.toAclStatus(json), "Should be equal");
   }
 
   @Test
@@ -300,7 +304,7 @@ public class TestJsonUtil {
             aclEntry(ACCESS, GROUP, READ_WRITE));
 
     aclStatusBuilder.addEntries(aclSpec);
-    Assert.assertEquals(jsonString,
+    assertEquals(jsonString,
         JsonUtil.toJsonString(aclStatusBuilder.build()));
 
   }
@@ -335,7 +339,7 @@ public class TestJsonUtil {
         .snapshotDirectoryCount(snapshotDirectoryCount)
         .snapshotSpaceConsumed(snapshotSpaceConsumed).build();
 
-    Assert.assertEquals(jsonString, JsonUtil.toJsonString(contentSummary));
+    assertEquals(jsonString, JsonUtil.toJsonString(contentSummary));
   }
 
   @Test
@@ -350,8 +354,8 @@ public class TestJsonUtil {
     List<XAttr> xAttrs = Lists.newArrayList();
     xAttrs.add(xAttr1);
     xAttrs.add(xAttr2);
-    
-    Assert.assertEquals(jsonString, JsonUtil.toJsonString(xAttrs, 
+
+    assertEquals(jsonString, JsonUtil.toJsonString(xAttrs,
         XAttrCodec.HEX));
   }
   
@@ -371,11 +375,11 @@ public class TestJsonUtil {
     Map<String, byte[]> xAttrMap = XAttrHelper.buildXAttrMap(xAttrs);
     Map<String, byte[]> parsedXAttrMap = JsonUtilClient.toXAttrs(json);
     
-    Assert.assertEquals(xAttrMap.size(), parsedXAttrMap.size());
+    assertEquals(xAttrMap.size(), parsedXAttrMap.size());
     Iterator<Entry<String, byte[]>> iter = xAttrMap.entrySet().iterator();
     while(iter.hasNext()) {
       Entry<String, byte[]> entry = iter.next();
-      Assert.assertArrayEquals(entry.getValue(), 
+      assertArrayEquals(entry.getValue(),
           parsedXAttrMap.get(entry.getKey()));
     }
   }
@@ -389,7 +393,7 @@ public class TestJsonUtil {
 
     // Get xattr: user.a2
     byte[] value = JsonUtilClient.getXAttr(json, "user.a2");
-    Assert.assertArrayEquals(XAttrCodec.decodeValue("0x313131"), value);
+    assertArrayEquals(XAttrCodec.decodeValue("0x313131"), value);
   }
 
   @Test
@@ -400,7 +404,7 @@ public class TestJsonUtil {
     SnapshotDiffReportListing parsed =
         JsonUtilClient.toSnapshotDiffReportListing(json);
 
-    assertEquals(report, parsed);
+    assertSnapshotListingEquals(report, parsed);
   }
 
   @Test
@@ -428,43 +432,43 @@ public class TestJsonUtil {
     SnapshotDiffReportListing parsed =
         JsonUtilClient.toSnapshotDiffReportListing(json);
 
-    assertEquals(report, parsed);
+    assertSnapshotListingEquals(report, parsed);
   }
 
-  private void assertEquals(
+  private void assertSnapshotListingEquals(
       SnapshotDiffReportListing expected, SnapshotDiffReportListing actual) {
-    Assert.assertEquals(expected.getLastIndex(), actual.getLastIndex());
-    Assert.assertEquals(expected.getIsFromEarlier(), actual.getIsFromEarlier());
-    assertEquals(expected.getModifyList(), actual.getModifyList());
-    assertEquals(expected.getCreateList(), actual.getCreateList());
-    assertEquals(expected.getDeleteList(), actual.getDeleteList());
-    Assert.assertArrayEquals(expected.getLastPath(), actual.getLastPath());
+    assertEquals(expected.getLastIndex(), actual.getLastIndex());
+    assertEquals(expected.getIsFromEarlier(), actual.getIsFromEarlier());
+    assertEntryListEquals(expected.getModifyList(), actual.getModifyList());
+    assertEntryListEquals(expected.getCreateList(), actual.getCreateList());
+    assertEntryListEquals(expected.getDeleteList(), actual.getDeleteList());
+    assertArrayEquals(expected.getLastPath(), actual.getLastPath());
   }
 
-  private void assertEquals(
+  private void assertEntryListEquals(
       List<DiffReportListingEntry> expected, List<DiffReportListingEntry> actual) {
-    Assert.assertEquals(expected.size(), actual.size());
+    assertEquals(expected.size(), actual.size());
 
     for (int i = 0; i < expected.size(); i++) {
       DiffReportListingEntry a = expected.get(i);
       DiffReportListingEntry b = actual.get(i);
 
-      Assert.assertEquals(a.getFileId(), b.getFileId());
-      Assert.assertEquals(a.getDirId(), b.getDirId());
-      Assert.assertEquals(a.isReference(), b.isReference());
+      assertEquals(a.getFileId(), b.getFileId());
+      assertEquals(a.getDirId(), b.getDirId());
+      assertEquals(a.isReference(), b.isReference());
       if (a.getSourcePath() != null) {
-        Assert.assertArrayEquals(
+        assertArrayEquals(
             DFSUtilClient.byteArray2bytes(a.getSourcePath()),
             DFSUtilClient.byteArray2bytes(b.getSourcePath()));
       } else {
-        Assert.assertArrayEquals(a.getSourcePath(), b.getSourcePath());
+        assertArrayEquals(a.getSourcePath(), b.getSourcePath());
       }
       if (a.getTargetPath() != null) {
-        Assert.assertArrayEquals(
+        assertArrayEquals(
             DFSUtilClient.byteArray2bytes(a.getTargetPath()),
             DFSUtilClient.byteArray2bytes(b.getTargetPath()));
       } else {
-        Assert.assertArrayEquals(a.getTargetPath(), b.getTargetPath());
+        assertArrayEquals(a.getTargetPath(), b.getTargetPath());
       }
     }
   }
@@ -472,7 +476,7 @@ public class TestJsonUtil {
   private void checkDecodeFailure(Map<String, Object> map) {
     try {
       JsonUtilClient.toDatanodeInfo(map);
-      Assert.fail("Exception not thrown against bad input.");
+      fail("Exception not thrown against bad input.");
     } catch (Exception e) {
       // expected
     }

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtilClient.java

@@ -20,15 +20,15 @@ package org.apache.hadoop.hdfs.web;
 import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.util.JsonSerialization;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
 import java.util.Map;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
 
 public class TestJsonUtilClient {
   @Test
@@ -36,7 +36,7 @@ public class TestJsonUtilClient {
     List<String> strList = new ArrayList<String>(Arrays.asList("aaa", "bbb", "ccc"));
 
     String[] strArr = JsonUtilClient.toStringArray(strList);
-    assertEquals("Expected 3 items in the array", 3, strArr.length);
+    assertEquals(3, strArr.length, "Expected 3 items in the array");
     assertEquals("aaa", strArr[0]);
     assertEquals("bbb", strArr[1]);
     assertEquals("ccc", strArr[2]);

+ 185 - 190
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java

@@ -30,12 +30,14 @@ import static org.apache.hadoop.hdfs.TestDistributedFileSystem.checkStatistics;
 import static org.apache.hadoop.hdfs.TestDistributedFileSystem.getOpStatistics;
 import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY;
 import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotEquals;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.assertArrayEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertNotEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
 
 import java.io.EOFException;
 import java.io.File;
@@ -141,9 +143,9 @@ import org.slf4j.event.Level;
 import org.codehaus.jettison.json.JSONArray;
 import org.codehaus.jettison.json.JSONException;
 import org.codehaus.jettison.json.JSONObject;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
 import org.mockito.Mockito;
 
 import com.fasterxml.jackson.databind.ObjectMapper;
@@ -168,7 +170,7 @@ public class TestWebHDFS {
 
   private static MiniDFSCluster cluster = null;
 
-  @After
+  @AfterEach
   public void tearDown() {
     if (null != cluster) {
       cluster.shutdown();
@@ -212,7 +214,8 @@ public class TestWebHDFS {
     }
   }
 
-  @Test(timeout=300000)
+  @Test
+  @Timeout(value = 300)
   public void testLargeFile() throws Exception {
     largeFileTest(200L << 20); //200MB file length
   }
@@ -229,7 +232,7 @@ public class TestWebHDFS {
     final FileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
         WebHdfsConstants.WEBHDFS_SCHEME);
     final Path dir = new Path("/test/largeFile");
-    Assert.assertTrue(fs.mkdirs(dir));
+    assertTrue(fs.mkdirs(dir));
 
     final byte[] data = new byte[1 << 20];
     RANDOM.nextBytes(data);
@@ -255,7 +258,7 @@ public class TestWebHDFS {
     }
     t.end(fileLength);
 
-    Assert.assertEquals(fileLength, fs.getFileStatus(p).getLen());
+    assertEquals(fileLength, fs.getFileStatus(p).getLen());
 
     final long smallOffset = RANDOM.nextInt(1 << 20) + (1 << 20);
     final long largeOffset = fileLength - smallOffset;
@@ -273,7 +276,7 @@ public class TestWebHDFS {
       int j = (int)(offset % actual.length);
       for(int i = 0; i < n; i++) {
         if (expected[j] != actual[i]) {
-          Assert.fail("expected[" + j + "]=" + expected[j]
+          fail("expected[" + j + "]=" + expected[j]
               + " != actual[" + i + "]=" + actual[i]
               + ", offset=" + offset + ", remaining=" + remaining + ", n=" + n);
         }
@@ -331,14 +334,16 @@ public class TestWebHDFS {
   }
 
   /** Test client retry with namenode restarting. */
-  @Test(timeout=300000)
+  @Test
+  @Timeout(value = 300)
   public void testNamenodeRestart() throws Exception {
     GenericTestUtils.setLogLevel(NamenodeWebHdfsMethods.LOG, Level.TRACE);
     final Configuration conf = WebHdfsTestUtil.createConf();
     TestDFSClientRetries.namenodeRestartTest(conf, true);
   }
   
-  @Test(timeout=300000)
+  @Test
+  @Timeout(value = 300)
   public void testLargeDirectory() throws Exception {
     final Configuration conf = WebHdfsTestUtil.createConf();
     final int listLimit = 2;
@@ -367,12 +372,12 @@ public class TestWebHDFS {
             FileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
                 WebHdfsConstants.WEBHDFS_SCHEME);
             Path d = new Path("/my-dir");
-            Assert.assertTrue(fs.mkdirs(d));
+            assertTrue(fs.mkdirs(d));
             // Iterator should have no items when dir is empty
             RemoteIterator<FileStatus> it = fs.listStatusIterator(d);
             assertFalse(it.hasNext());
             Path p = new Path(d, "file-" + 0);
-            Assert.assertTrue(fs.createNewFile(p));
+            assertTrue(fs.createNewFile(p));
             // Iterator should have an item when dir is not empty
             it = fs.listStatusIterator(d);
             assertTrue(it.hasNext());
@@ -380,11 +385,11 @@ public class TestWebHDFS {
             assertFalse(it.hasNext());
             for (int i = 1; i < listLimit * 3; i++) {
               p = new Path(d, "file-" + i);
-              Assert.assertTrue(fs.createNewFile(p));
+              assertTrue(fs.createNewFile(p));
             }
             // Check the FileStatus[] listing
             FileStatus[] statuses = fs.listStatus(d);
-            Assert.assertEquals(listLimit * 3, statuses.length);
+            assertEquals(listLimit * 3, statuses.length);
             // Check the iterator-based listing
             GenericTestUtils.setLogLevel(WebHdfsFileSystem.LOG, Level.TRACE);
             GenericTestUtils.setLogLevel(NamenodeWebHdfsMethods.LOG,
@@ -393,18 +398,18 @@ public class TestWebHDFS {
             int count = 0;
             while (it.hasNext()) {
               FileStatus stat = it.next();
-              assertEquals("FileStatuses not equal", statuses[count], stat);
+              assertEquals(statuses[count], stat, "FileStatuses not equal");
               count++;
             }
-            assertEquals("Different # of statuses!", statuses.length, count);
+            assertEquals(statuses.length, count, "Different # of statuses!");
             // Do some more basic iterator tests
             it = fs.listStatusIterator(d);
             // Try advancing the iterator without calling hasNext()
             for (int i = 0; i < statuses.length; i++) {
               FileStatus stat = it.next();
-              assertEquals("FileStatuses not equal", statuses[i], stat);
+              assertEquals(statuses[i], stat, "FileStatuses not equal");
             }
-            assertFalse("No more items expected", it.hasNext());
+            assertFalse(it.hasNext(), "No more items expected");
             // Try doing next when out of items
             try {
               it.next();
@@ -464,7 +469,8 @@ public class TestWebHDFS {
     }
   }
 
-  @Test(timeout=300000)
+  @Test
+  @Timeout(value = 300)
   public void testCustomizedUserAndGroupNames() throws Exception {
     final Configuration conf = WebHdfsTestUtil.createConf();
     conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
@@ -492,7 +498,7 @@ public class TestWebHDFS {
             FileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
                 WebHdfsConstants.WEBHDFS_SCHEME);
             Path d = new Path("/my-dir");
-            Assert.assertTrue(fs.mkdirs(d));
+            assertTrue(fs.mkdirs(d));
             // Test also specifying a default ACL with a numeric username
             // and another of a groupname with '@'
             fs.modifyAclEntries(d, ImmutableList.of(new AclEntry.Builder()
@@ -509,7 +515,8 @@ public class TestWebHDFS {
    * Test for catching "no datanode" IOException, when to create a file
    * but datanode is not running for some reason.
    */
-  @Test(timeout=300000)
+  @Test
+  @Timeout(value = 300)
   public void testCreateWithNoDN() throws Exception {
     final Configuration conf = WebHdfsTestUtil.createConf();
     try {
@@ -519,7 +526,7 @@ public class TestWebHDFS {
       FileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
           WebHdfsConstants.WEBHDFS_SCHEME);
       fs.create(new Path("/testnodatanode"));
-      Assert.fail("No exception was thrown");
+      fail("No exception was thrown");
     } catch (IOException ex) {
       GenericTestUtils.assertExceptionContains("Failed to find datanode", ex);
     }
@@ -580,7 +587,7 @@ public class TestWebHDFS {
     assertTrue(webHdfs.getFileStatus(bar).isSnapshotEnabled());
     webHdfs.createSnapshot(bar, "s1");
     final Path s1path = SnapshotTestHelper.getSnapshotRoot(bar, "s1");
-    Assert.assertTrue(webHdfs.exists(s1path));
+    assertTrue(webHdfs.exists(s1path));
     SnapshottableDirectoryStatus[] snapshottableDirs =
         dfs.getSnapshottableDirListing();
     assertEquals(1, snapshottableDirs.length);
@@ -616,7 +623,8 @@ public class TestWebHDFS {
     }
   }
 
-  @Test (timeout = 60000)
+  @Test
+  @Timeout(value = 60)
   public void testWebHdfsErasureCodingFiles() throws Exception {
     final Configuration conf = WebHdfsTestUtil.createConf();
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
@@ -641,43 +649,39 @@ public class TestWebHDFS {
 
     FileStatus expectedECDirStatus = dfs.getFileStatus(ecDir);
     FileStatus actualECDirStatus = webHdfs.getFileStatus(ecDir);
-    Assert.assertEquals(expectedECDirStatus.isErasureCoded(),
+    assertEquals(expectedECDirStatus.isErasureCoded(),
         actualECDirStatus.isErasureCoded());
     ContractTestUtils.assertErasureCoded(dfs, ecDir);
-    assertTrue(
+    assertTrue(actualECDirStatus.toString().contains("isErasureCoded=true"),
         ecDir + " should have erasure coding set in "
-            + "FileStatus#toString(): " + actualECDirStatus,
-        actualECDirStatus.toString().contains("isErasureCoded=true"));
+            + "FileStatus#toString(): " + actualECDirStatus);
 
     FileStatus expectedECFileStatus = dfs.getFileStatus(ecFile);
     FileStatus actualECFileStatus = webHdfs.getFileStatus(ecFile);
-    Assert.assertEquals(expectedECFileStatus.isErasureCoded(),
+    assertEquals(expectedECFileStatus.isErasureCoded(),
         actualECFileStatus.isErasureCoded());
     ContractTestUtils.assertErasureCoded(dfs, ecFile);
-    assertTrue(
+    assertTrue(actualECFileStatus.toString().contains("isErasureCoded=true"),
         ecFile + " should have erasure coding set in "
-            + "FileStatus#toString(): " + actualECFileStatus,
-        actualECFileStatus.toString().contains("isErasureCoded=true"));
+            + "FileStatus#toString(): " + actualECFileStatus);
 
     FileStatus expectedNormalDirStatus = dfs.getFileStatus(normalDir);
     FileStatus actualNormalDirStatus = webHdfs.getFileStatus(normalDir);
-    Assert.assertEquals(expectedNormalDirStatus.isErasureCoded(),
+    assertEquals(expectedNormalDirStatus.isErasureCoded(),
         actualNormalDirStatus.isErasureCoded());
     ContractTestUtils.assertNotErasureCoded(dfs, normalDir);
-    assertTrue(
+    assertTrue(actualNormalDirStatus.toString().contains("isErasureCoded=false"),
         normalDir + " should have erasure coding unset in "
-            + "FileStatus#toString(): " + actualNormalDirStatus,
-        actualNormalDirStatus.toString().contains("isErasureCoded=false"));
+            + "FileStatus#toString(): " + actualNormalDirStatus);
 
     FileStatus expectedNormalFileStatus = dfs.getFileStatus(normalFile);
     FileStatus actualNormalFileStatus = webHdfs.getFileStatus(normalDir);
-    Assert.assertEquals(expectedNormalFileStatus.isErasureCoded(),
+    assertEquals(expectedNormalFileStatus.isErasureCoded(),
         actualNormalFileStatus.isErasureCoded());
     ContractTestUtils.assertNotErasureCoded(dfs, normalFile);
-    assertTrue(
+    assertTrue(actualNormalFileStatus.toString().contains("isErasureCoded=false"),
         normalFile + " should have erasure coding unset in "
-            + "FileStatus#toString(): " + actualNormalFileStatus,
-        actualNormalFileStatus.toString().contains("isErasureCoded=false"));
+            + "FileStatus#toString(): " + actualNormalFileStatus);
   }
 
   /**
@@ -710,9 +714,9 @@ public class TestWebHDFS {
     // create snapshot without specifying name
     final Path spath = webHdfs.createSnapshot(foo, null);
 
-    Assert.assertTrue(webHdfs.exists(spath));
+    assertTrue(webHdfs.exists(spath));
     final Path s1path = SnapshotTestHelper.getSnapshotRoot(foo, "s1");
-    Assert.assertTrue(webHdfs.exists(s1path));
+    assertTrue(webHdfs.exists(s1path));
   }
 
   /**
@@ -733,16 +737,16 @@ public class TestWebHDFS {
 
     webHdfs.createSnapshot(foo, "s1");
     final Path spath = webHdfs.createSnapshot(foo, null);
-    Assert.assertTrue(webHdfs.exists(spath));
+    assertTrue(webHdfs.exists(spath));
     final Path s1path = SnapshotTestHelper.getSnapshotRoot(foo, "s1");
-    Assert.assertTrue(webHdfs.exists(s1path));
+    assertTrue(webHdfs.exists(s1path));
 
     // delete operation snapshot name as null
     try {
       webHdfs.deleteSnapshot(foo, null);
       fail("Expected IllegalArgumentException");
     } catch (RemoteException e) {
-      Assert.assertEquals("Required param snapshotname for "
+      assertEquals("Required param snapshotname for "
           + "op: DELETESNAPSHOT is null or empty", e.getLocalizedMessage());
     }
 
@@ -778,7 +782,7 @@ public class TestWebHDFS {
     dfs.allowSnapshot(foo);
     webHdfs.createSnapshot(foo, "s1");
     final Path s1path = SnapshotTestHelper.getSnapshotRoot(foo, "s1");
-    Assert.assertTrue(webHdfs.exists(s1path));
+    assertTrue(webHdfs.exists(s1path));
 
     Path file3 = new Path(foo, "file3");
     DFSTestUtil.createFile(dfs, file3, 100, (short) 1, 0);
@@ -791,9 +795,9 @@ public class TestWebHDFS {
     SnapshotDiffReport diffReport =
         webHdfs.getSnapshotDiffReport(foo, "s1", "s2");
 
-    Assert.assertEquals("/foo", diffReport.getSnapshotRoot());
-    Assert.assertEquals("s1", diffReport.getFromSnapshot());
-    Assert.assertEquals("s2", diffReport.getLaterSnapshotName());
+    assertEquals("/foo", diffReport.getSnapshotRoot());
+    assertEquals("s1", diffReport.getFromSnapshot());
+    assertEquals("s2", diffReport.getLaterSnapshotName());
     DiffReportEntry entry0 =
         new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes(""));
     DiffReportEntry entry1 =
@@ -804,18 +808,18 @@ public class TestWebHDFS {
         DFSUtil.string2Bytes("file2"), DFSUtil.string2Bytes("file4"));
     DiffReportEntry entry4 =
         new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("file3"));
-    Assert.assertTrue(diffReport.getDiffList().contains(entry0));
-    Assert.assertTrue(diffReport.getDiffList().contains(entry1));
-    Assert.assertTrue(diffReport.getDiffList().contains(entry2));
-    Assert.assertTrue(diffReport.getDiffList().contains(entry3));
-    Assert.assertTrue(diffReport.getDiffList().contains(entry4));
-    Assert.assertEquals(diffReport.getDiffList().size(), 5);
+    assertTrue(diffReport.getDiffList().contains(entry0));
+    assertTrue(diffReport.getDiffList().contains(entry1));
+    assertTrue(diffReport.getDiffList().contains(entry2));
+    assertTrue(diffReport.getDiffList().contains(entry3));
+    assertTrue(diffReport.getDiffList().contains(entry4));
+    assertEquals(diffReport.getDiffList().size(), 5);
 
     // Test with fromSnapshot and toSnapshot as null.
     diffReport = webHdfs.getSnapshotDiffReport(foo, null, "s2");
-    Assert.assertEquals(diffReport.getDiffList().size(), 0);
+    assertEquals(diffReport.getDiffList().size(), 0);
     diffReport = webHdfs.getSnapshotDiffReport(foo, "s1", null);
-    Assert.assertEquals(diffReport.getDiffList().size(), 5);
+    assertEquals(diffReport.getDiffList().size(), 5);
   }
 
   /**
@@ -835,7 +839,7 @@ public class TestWebHDFS {
     dfs.mkdirs(bar);
     SnapshottableDirectoryStatus[] statuses =
         webHdfs.getSnapshottableDirectoryList();
-    Assert.assertNull(statuses);
+    assertNull(statuses);
     dfs.allowSnapshot(foo);
     dfs.allowSnapshot(bar);
     Path file0 = new Path(foo, "file0");
@@ -847,37 +851,37 @@ public class TestWebHDFS {
         dfs.getSnapshottableDirListing();
 
     for (int i = 0; i < dfsStatuses.length; i++) {
-      Assert.assertEquals(statuses[i].getSnapshotNumber(),
+      assertEquals(statuses[i].getSnapshotNumber(),
           dfsStatuses[i].getSnapshotNumber());
-      Assert.assertEquals(statuses[i].getSnapshotQuota(),
+      assertEquals(statuses[i].getSnapshotQuota(),
           dfsStatuses[i].getSnapshotQuota());
-      Assert.assertTrue(Arrays.equals(statuses[i].getParentFullPath(),
+      assertTrue(Arrays.equals(statuses[i].getParentFullPath(),
           dfsStatuses[i].getParentFullPath()));
-      Assert.assertEquals(dfsStatuses[i].getDirStatus().getChildrenNum(),
+      assertEquals(dfsStatuses[i].getDirStatus().getChildrenNum(),
           statuses[i].getDirStatus().getChildrenNum());
-      Assert.assertEquals(dfsStatuses[i].getDirStatus().getModificationTime(),
+      assertEquals(dfsStatuses[i].getDirStatus().getModificationTime(),
           statuses[i].getDirStatus().getModificationTime());
-      Assert.assertEquals(dfsStatuses[i].getDirStatus().isDir(),
+      assertEquals(dfsStatuses[i].getDirStatus().isDir(),
           statuses[i].getDirStatus().isDir());
-      Assert.assertEquals(dfsStatuses[i].getDirStatus().getAccessTime(),
+      assertEquals(dfsStatuses[i].getDirStatus().getAccessTime(),
           statuses[i].getDirStatus().getAccessTime());
-      Assert.assertEquals(dfsStatuses[i].getDirStatus().getPermission(),
+      assertEquals(dfsStatuses[i].getDirStatus().getPermission(),
           statuses[i].getDirStatus().getPermission());
-      Assert.assertEquals(dfsStatuses[i].getDirStatus().getOwner(),
+      assertEquals(dfsStatuses[i].getDirStatus().getOwner(),
           statuses[i].getDirStatus().getOwner());
-      Assert.assertEquals(dfsStatuses[i].getDirStatus().getGroup(),
+      assertEquals(dfsStatuses[i].getDirStatus().getGroup(),
           statuses[i].getDirStatus().getGroup());
-      Assert.assertEquals(dfsStatuses[i].getDirStatus().getPath(),
+      assertEquals(dfsStatuses[i].getDirStatus().getPath(),
           statuses[i].getDirStatus().getPath());
-      Assert.assertEquals(dfsStatuses[i].getDirStatus().getFileId(),
+      assertEquals(dfsStatuses[i].getDirStatus().getFileId(),
           statuses[i].getDirStatus().getFileId());
-      Assert.assertEquals(dfsStatuses[i].getDirStatus().hasAcl(),
+      assertEquals(dfsStatuses[i].getDirStatus().hasAcl(),
           statuses[i].getDirStatus().hasAcl());
-      Assert.assertEquals(dfsStatuses[i].getDirStatus().isEncrypted(),
+      assertEquals(dfsStatuses[i].getDirStatus().isEncrypted(),
           statuses[i].getDirStatus().isEncrypted());
-      Assert.assertEquals(dfsStatuses[i].getDirStatus().isErasureCoded(),
+      assertEquals(dfsStatuses[i].getDirStatus().isErasureCoded(),
           statuses[i].getDirStatus().isErasureCoded());
-      Assert.assertEquals(dfsStatuses[i].getDirStatus().isSnapshotEnabled(),
+      assertEquals(dfsStatuses[i].getDirStatus().isSnapshotEnabled(),
           statuses[i].getDirStatus().isSnapshotEnabled());
     }
   }
@@ -901,37 +905,37 @@ public class TestWebHDFS {
       SnapshotStatus[] dfsStatuses = dfs.getSnapshotListing(foo);
 
       for (int i = 0; i < dfsStatuses.length; i++) {
-        Assert.assertEquals(statuses[i].getSnapshotID(),
+        assertEquals(statuses[i].getSnapshotID(),
             dfsStatuses[i].getSnapshotID());
-        Assert.assertEquals(statuses[i].isDeleted(),
+        assertEquals(statuses[i].isDeleted(),
             dfsStatuses[i].isDeleted());
-        Assert.assertTrue(Arrays.equals(statuses[i].getParentFullPath(),
+        assertTrue(Arrays.equals(statuses[i].getParentFullPath(),
             dfsStatuses[i].getParentFullPath()));
-        Assert.assertEquals(dfsStatuses[i].getDirStatus().getChildrenNum(),
+        assertEquals(dfsStatuses[i].getDirStatus().getChildrenNum(),
             statuses[i].getDirStatus().getChildrenNum());
-        Assert.assertEquals(dfsStatuses[i].getDirStatus().getModificationTime(),
+        assertEquals(dfsStatuses[i].getDirStatus().getModificationTime(),
             statuses[i].getDirStatus().getModificationTime());
-        Assert.assertEquals(dfsStatuses[i].getDirStatus().isDir(),
+        assertEquals(dfsStatuses[i].getDirStatus().isDir(),
             statuses[i].getDirStatus().isDir());
-        Assert.assertEquals(dfsStatuses[i].getDirStatus().getAccessTime(),
+        assertEquals(dfsStatuses[i].getDirStatus().getAccessTime(),
             statuses[i].getDirStatus().getAccessTime());
-        Assert.assertEquals(dfsStatuses[i].getDirStatus().getPermission(),
+        assertEquals(dfsStatuses[i].getDirStatus().getPermission(),
             statuses[i].getDirStatus().getPermission());
-        Assert.assertEquals(dfsStatuses[i].getDirStatus().getOwner(),
+        assertEquals(dfsStatuses[i].getDirStatus().getOwner(),
             statuses[i].getDirStatus().getOwner());
-        Assert.assertEquals(dfsStatuses[i].getDirStatus().getGroup(),
+        assertEquals(dfsStatuses[i].getDirStatus().getGroup(),
             statuses[i].getDirStatus().getGroup());
-        Assert.assertEquals(dfsStatuses[i].getDirStatus().getPath(),
+        assertEquals(dfsStatuses[i].getDirStatus().getPath(),
             statuses[i].getDirStatus().getPath());
-        Assert.assertEquals(dfsStatuses[i].getDirStatus().getFileId(),
+        assertEquals(dfsStatuses[i].getDirStatus().getFileId(),
             statuses[i].getDirStatus().getFileId());
-        Assert.assertEquals(dfsStatuses[i].getDirStatus().hasAcl(),
+        assertEquals(dfsStatuses[i].getDirStatus().hasAcl(),
             statuses[i].getDirStatus().hasAcl());
-        Assert.assertEquals(dfsStatuses[i].getDirStatus().isEncrypted(),
+        assertEquals(dfsStatuses[i].getDirStatus().isEncrypted(),
             statuses[i].getDirStatus().isEncrypted());
-        Assert.assertEquals(dfsStatuses[i].getDirStatus().isErasureCoded(),
+        assertEquals(dfsStatuses[i].getDirStatus().isErasureCoded(),
             statuses[i].getDirStatus().isErasureCoded());
-        Assert.assertEquals(dfsStatuses[i].getDirStatus().isSnapshotEnabled(),
+        assertEquals(dfsStatuses[i].getDirStatus().isSnapshotEnabled(),
             statuses[i].getDirStatus().isSnapshotEnabled());
       }
     } finally {
@@ -979,14 +983,14 @@ public class TestWebHDFS {
 
     webHdfs.createSnapshot(foo, "s1");
     final Path s1path = SnapshotTestHelper.getSnapshotRoot(foo, "s1");
-    Assert.assertTrue(webHdfs.exists(s1path));
+    assertTrue(webHdfs.exists(s1path));
 
     // rename s1 to s2 with oldsnapshotName as null
     try {
       webHdfs.renameSnapshot(foo, null, "s2");
       fail("Expected IllegalArgumentException");
     } catch (RemoteException e) {
-      Assert.assertEquals("Required param oldsnapshotname for "
+      assertEquals("Required param oldsnapshotname for "
           + "op: RENAMESNAPSHOT is null or empty", e.getLocalizedMessage());
     }
 
@@ -994,7 +998,7 @@ public class TestWebHDFS {
     webHdfs.renameSnapshot(foo, "s1", "s2");
     assertFalse(webHdfs.exists(s1path));
     final Path s2path = SnapshotTestHelper.getSnapshotRoot(foo, "s2");
-    Assert.assertTrue(webHdfs.exists(s2path));
+    assertTrue(webHdfs.exists(s2path));
 
     webHdfs.deleteSnapshot(foo, "s2");
     assertFalse(webHdfs.exists(s2path));
@@ -1035,7 +1039,7 @@ public class TestWebHDFS {
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
     final FileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
         WebHdfsConstants.WEBHDFS_SCHEME);
-    Assert.assertNull(webHdfs.getDelegationToken(null));
+    assertNull(webHdfs.getDelegationToken(null));
   }
 
   @Test
@@ -1048,7 +1052,7 @@ public class TestWebHDFS {
       webHdfs.getDelegationToken(null);
       fail("No exception is thrown.");
     } catch (AccessControlException ace) {
-      Assert.assertTrue(ace.getMessage().startsWith(
+      assertTrue(ace.getMessage().startsWith(
           WebHdfsFileSystem.CANT_FALLBACK_TO_INSECURE_MSG));
     }
   }
@@ -1074,12 +1078,12 @@ public class TestWebHDFS {
                 new LengthParam((long) LENGTH)));
     HttpURLConnection conn = (HttpURLConnection) url.openConnection();
     conn.setInstanceFollowRedirects(true);
-    Assert.assertEquals(LENGTH, conn.getContentLength());
+    assertEquals(LENGTH, conn.getContentLength());
     byte[] subContents = new byte[LENGTH];
     byte[] realContents = new byte[LENGTH];
     System.arraycopy(CONTENTS, OFFSET, subContents, 0, LENGTH);
     IOUtils.readFully(conn.getInputStream(), realContents);
-    Assert.assertArrayEquals(subContents, realContents);
+    assertArrayEquals(subContents, realContents);
   }
 
   @Test
@@ -1093,8 +1097,7 @@ public class TestWebHDFS {
     dfs.mkdirs(path);
     dfs.setQuotaByStorageType(path, StorageType.DISK, 100000);
     ContentSummary contentSummary = webHdfs.getContentSummary(path);
-    Assert
-        .assertTrue((contentSummary.getTypeQuota(StorageType.DISK) == 100000));
+    assertTrue((contentSummary.getTypeQuota(StorageType.DISK) == 100000));
   }
 
   /**
@@ -1230,16 +1233,16 @@ public class TestWebHDFS {
       byte[] buf = new byte[1024];
       try {
         in.readFully(1020, buf, 0, 5);
-        Assert.fail("EOF expected");
+        fail("EOF expected");
       } catch (EOFException ignored) {}
 
       // mix pread with stateful read
       int length = in.read(buf, 0, 512);
       in.readFully(100, new byte[1024], 0, 100);
       int preadLen = in.read(200, new byte[1024], 0, 200);
-      Assert.assertTrue(preadLen > 0);
+      assertTrue(preadLen > 0);
       IOUtils.readFully(in, buf, length, 1024 - length);
-      Assert.assertArrayEquals(content, buf);
+      assertArrayEquals(content, buf);
     } finally {
       if (in != null) {
         in.close();
@@ -1247,7 +1250,8 @@ public class TestWebHDFS {
     }
   }
 
-  @Test(timeout = 30000)
+  @Test
+  @Timeout(value = 30)
   public void testGetHomeDirectory() throws Exception {
     Configuration conf = new Configuration();
     cluster = new MiniDFSCluster.Builder(conf).build();
@@ -1291,7 +1295,7 @@ public class TestWebHDFS {
     BlockLocation[] locations = fs.getFileBlockLocations(PATH, OFFSET, LENGTH);
     for (BlockLocation location : locations) {
       StorageType[] storageTypes = location.getStorageTypes();
-      Assert.assertTrue(storageTypes != null && storageTypes.length > 0
+      assertTrue(storageTypes != null && storageTypes.length > 0
           && storageTypes[0] == StorageType.DISK);
     }
   }
@@ -1399,19 +1403,19 @@ public class TestWebHDFS {
     for(int i=0; i<locations1.length; i++) {
       BlockLocation location1 = locations1[i];
       BlockLocation location2 = locations2[i];
-      Assert.assertEquals(location1.getLength(),
+      assertEquals(location1.getLength(),
           location2.getLength());
-      Assert.assertEquals(location1.getOffset(),
+      assertEquals(location1.getOffset(),
           location2.getOffset());
-      Assert.assertArrayEquals(location1.getCachedHosts(),
+      assertArrayEquals(location1.getCachedHosts(),
           location2.getCachedHosts());
-      Assert.assertArrayEquals(location1.getHosts(),
+      assertArrayEquals(location1.getHosts(),
           location2.getHosts());
-      Assert.assertArrayEquals(location1.getNames(),
+      assertArrayEquals(location1.getNames(),
           location2.getNames());
-      Assert.assertArrayEquals(location1.getTopologyPaths(),
+      assertArrayEquals(location1.getTopologyPaths(),
           location2.getTopologyPaths());
-      Assert.assertArrayEquals(location1.getStorageTypes(),
+      assertArrayEquals(location1.getStorageTypes(),
           location2.getStorageTypes());
     }
   }
@@ -1448,7 +1452,8 @@ public class TestWebHDFS {
     });
   }
 
-  @Test(timeout=90000)
+  @Test
+  @Timeout(value = 90)
   public void testWebHdfsReadRetries() throws Exception {
     // ((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
     final Configuration conf = WebHdfsTestUtil.createConf();
@@ -1475,18 +1480,17 @@ public class TestWebHDFS {
 
     // get file status and check that it was written properly.
     final FileStatus s1 = fs.getFileStatus(file1);
-    assertEquals("Write failed for file " + file1, length, s1.getLen());
+    assertEquals(length, s1.getLen(), "Write failed for file " + file1);
 
     // Ensure file can be read through WebHdfsInputStream
     FSDataInputStream in = fs.open(file1);
-    assertTrue("Input stream is not an instance of class WebHdfsInputStream",
-        in.getWrappedStream() instanceof WebHdfsInputStream);
+    assertTrue(in.getWrappedStream() instanceof WebHdfsInputStream,
+        "Input stream is not an instance of class WebHdfsInputStream");
     int count = 0;
     for (; in.read() != -1; count++)
       ;
-    assertEquals("Read failed for file " + file1, s1.getLen(), count);
-    assertEquals("Sghould not be able to read beyond end of file", in.read(),
-        -1);
+    assertEquals(s1.getLen(), count, "Read failed for file " + file1);
+    assertEquals(in.read(), -1, "Sghould not be able to read beyond end of file");
     in.close();
     try {
       in.read();
@@ -1575,9 +1579,9 @@ public class TestWebHDFS {
     } catch (Exception e) {
       assertTrue(e.getMessage().contains(msg));
     }
-    assertEquals(msg + ": Read should " + (shouldAttemptRetry ? "" : "not ")
-                + "have called shouldRetry. ",
-        attemptedRetry, shouldAttemptRetry);
+    assertEquals(attemptedRetry, shouldAttemptRetry,
+        msg + ": Read should " + (shouldAttemptRetry ? "" : "not ")
+            + "have called shouldRetry. ");
 
     verify(rr, times(numTimesTried)).getResponse((HttpURLConnection) any());
     webIn.close();
@@ -1592,21 +1596,20 @@ public class TestWebHDFS {
     String response =
         IOUtils.toString(conn.getInputStream(), StandardCharsets.UTF_8);
     LOG.info("Response was : " + response);
-    Assert.assertEquals(
-      "Response wasn't " + HttpURLConnection.HTTP_OK,
-      HttpURLConnection.HTTP_OK, conn.getResponseCode());
+    assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode(),
+        "Response wasn't " + HttpURLConnection.HTTP_OK);
 
     JSONObject responseJson = new JSONObject(response);
-    Assert.assertTrue("Response didn't give us a location. " + response,
-      responseJson.has("Location"));
+    assertTrue(responseJson.has("Location"),
+        "Response didn't give us a location. " + response);
 
     //Test that the DN allows CORS on Create
     if(TYPE.equals("CREATE")) {
       URL dnLocation = new URL(responseJson.getString("Location"));
       HttpURLConnection dnConn = (HttpURLConnection) dnLocation.openConnection();
       dnConn.setRequestMethod("OPTIONS");
-      Assert.assertEquals("Datanode url : " + dnLocation + " didn't allow "
-        + "CORS", HttpURLConnection.HTTP_OK, dnConn.getResponseCode());
+      assertEquals(HttpURLConnection.HTTP_OK, dnConn.getResponseCode(),
+          "Datanode url : " + dnLocation + " didn't allow " + "CORS");
     }
   }
 
@@ -1758,7 +1761,7 @@ public class TestWebHDFS {
         WebHdfsConstants.WEBHDFS_SCHEME);
 
     // test getAllStoragePolicies
-    Assert.assertTrue(Arrays.equals(dfs.getAllStoragePolicies().toArray(),
+    assertTrue(Arrays.equals(dfs.getAllStoragePolicies().toArray(),
         webHdfs.getAllStoragePolicies().toArray()));
 
     // test get/set/unset policies
@@ -1771,12 +1774,12 @@ public class TestWebHDFS {
     BlockStoragePolicySpi dfsPolicy = dfs.getStoragePolicy(path);
     // get policy from webhdfs
     BlockStoragePolicySpi webHdfsPolicy = webHdfs.getStoragePolicy(path);
-    Assert.assertEquals(HdfsConstants.COLD_STORAGE_POLICY_NAME.toString(),
+    assertEquals(HdfsConstants.COLD_STORAGE_POLICY_NAME.toString(),
         webHdfsPolicy.getName());
-    Assert.assertEquals(webHdfsPolicy, dfsPolicy);
+    assertEquals(webHdfsPolicy, dfsPolicy);
     // unset policy
     webHdfs.unsetStoragePolicy(path);
-    Assert.assertEquals(defaultdfsPolicy, webHdfs.getStoragePolicy(path));
+    assertEquals(defaultdfsPolicy, webHdfs.getStoragePolicy(path));
   }
 
   @Test
@@ -1793,7 +1796,7 @@ public class TestWebHDFS {
           HdfsConstants.COLD_STORAGE_POLICY_NAME);
       fail("Should throw exception, when storage policy disabled");
     } catch (IOException e) {
-      Assert.assertTrue(e.getMessage().contains(
+      assertTrue(e.getMessage().contains(
           "Failed to set storage policy since"));
     }
   }
@@ -1807,14 +1810,14 @@ public class TestWebHDFS {
       if (policy.getPolicy().getName().equals(ecpolicy)) {
         found = true;
         if (state.equals("disable")) {
-          Assert.assertTrue(policy.isDisabled());
+          assertTrue(policy.isDisabled());
         } else if (state.equals("enable")) {
-          Assert.assertTrue(policy.isEnabled());
+          assertTrue(policy.isEnabled());
         }
         break;
       }
     }
-    Assert.assertTrue(found);
+    assertTrue(found);
   }
 
   // Test For Enable/Disable EC Policy in DFS.
@@ -1963,36 +1966,28 @@ public class TestWebHDFS {
 
   private void compareFsServerDefaults(FsServerDefaults serverDefaults1,
       FsServerDefaults serverDefaults2) throws Exception {
-    Assert.assertEquals("Block size is different",
-        serverDefaults1.getBlockSize(),
-        serverDefaults2.getBlockSize());
-    Assert.assertEquals("Bytes per checksum are different",
-        serverDefaults1.getBytesPerChecksum(),
-        serverDefaults2.getBytesPerChecksum());
-    Assert.assertEquals("Write packet size is different",
-        serverDefaults1.getWritePacketSize(),
-        serverDefaults2.getWritePacketSize());
-    Assert.assertEquals("Default replication is different",
-        serverDefaults1.getReplication(),
-        serverDefaults2.getReplication());
-    Assert.assertEquals("File buffer size are different",
-        serverDefaults1.getFileBufferSize(),
-        serverDefaults2.getFileBufferSize());
-    Assert.assertEquals("Encrypt data transfer key is different",
-        serverDefaults1.getEncryptDataTransfer(),
-        serverDefaults2.getEncryptDataTransfer());
-    Assert.assertEquals("Trash interval is different",
-        serverDefaults1.getTrashInterval(),
-        serverDefaults2.getTrashInterval());
-    Assert.assertEquals("Checksum type is different",
-        serverDefaults1.getChecksumType(),
-        serverDefaults2.getChecksumType());
-    Assert.assertEquals("Key provider uri is different",
-        serverDefaults1.getKeyProviderUri(),
-        serverDefaults2.getKeyProviderUri());
-    Assert.assertEquals("Default storage policy is different",
-        serverDefaults1.getDefaultStoragePolicyId(),
-        serverDefaults2.getDefaultStoragePolicyId());
+    assertEquals(serverDefaults1.getBlockSize(), serverDefaults2.getBlockSize(),
+        "Block size is different");
+    assertEquals(serverDefaults1.getBytesPerChecksum(),
+        serverDefaults2.getBytesPerChecksum(), "Bytes per checksum are different");
+    assertEquals(serverDefaults1.getWritePacketSize(),
+        serverDefaults2.getWritePacketSize(), "Write packet size is different");
+    assertEquals(serverDefaults1.getReplication(),
+        serverDefaults2.getReplication(), "Default replication is different");
+    assertEquals(serverDefaults1.getFileBufferSize(),
+        serverDefaults2.getFileBufferSize(), "File buffer size are different");
+    assertEquals(serverDefaults1.getEncryptDataTransfer(),
+        serverDefaults2.getEncryptDataTransfer(),
+        "Encrypt data transfer key is different");
+    assertEquals(serverDefaults1.getTrashInterval(),
+        serverDefaults2.getTrashInterval(), "Trash interval is different");
+    assertEquals(serverDefaults1.getChecksumType(),
+        serverDefaults2.getChecksumType(), "Checksum type is different");
+    assertEquals(serverDefaults1.getKeyProviderUri(),
+        serverDefaults2.getKeyProviderUri(), "Key provider uri is different");
+    assertEquals(serverDefaults1.getDefaultStoragePolicyId(),
+        serverDefaults2.getDefaultStoragePolicyId(),
+        "Default storage policy is different");
   }
 
   /**
@@ -2012,7 +2007,7 @@ public class TestWebHDFS {
         .thenThrow(new UnsupportedOperationException());
     try {
       webfs.getServerDefaults();
-      Assert.fail("should have thrown UnSupportedOperationException.");
+      fail("should have thrown UnSupportedOperationException.");
     } catch (UnsupportedOperationException uoe) {
       // Expected exception.
     }
@@ -2046,7 +2041,7 @@ public class TestWebHDFS {
 
     // get file status and check that it was written properly.
     final FileStatus s1 = fs.getFileStatus(file1);
-    assertEquals("Write failed for file " + file1, length, s1.getLen());
+    assertEquals(length, s1.getLen(), "Write failed for file " + file1);
 
     FSDataInputStream in = fs.open(file1);
     in.read(); // Connection is made only when the first read() occurs.
@@ -2074,7 +2069,8 @@ public class TestWebHDFS {
    * Tests that the LISTSTATUS ang GETFILESTATUS WebHDFS calls return the
    * ecPolicy for EC files.
    */
-  @Test(timeout=300000)
+  @Test
+  @Timeout(value = 300)
   public void testECPolicyInFileStatus() throws Exception {
     final Configuration conf = WebHdfsTestUtil.createConf();
     final ErasureCodingPolicy ecPolicy = SystemErasureCodingPolicies
@@ -2111,21 +2107,20 @@ public class TestWebHDFS {
     conn.setInstanceFollowRedirects(false);
     String listStatusResponse = IOUtils.toString(conn.getInputStream(),
         StandardCharsets.UTF_8);
-    Assert.assertEquals("Response wasn't " + HttpURLConnection.HTTP_OK,
-        HttpURLConnection.HTTP_OK, conn.getResponseCode());
+    assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode(),
+        "Response wasn't " + HttpURLConnection.HTTP_OK);
 
     // Verify that ecPolicy is set in the ListStatus response for ec file
     String ecpolicyForECfile = getECPolicyFromFileStatusJson(
         getFileStatusJson(listStatusResponse, ecFile.getName()));
-    assertEquals("EC policy for ecFile should match the set EC policy",
-        ecpolicyForECfile, ecPolicyName);
+    assertEquals(ecpolicyForECfile, ecPolicyName,
+        "EC policy for ecFile should match the set EC policy");
 
     // Verify that ecPolicy is not set in the ListStatus response for non-ec
     // file
     String ecPolicyForNonECfile = getECPolicyFromFileStatusJson(
         getFileStatusJson(listStatusResponse, nonEcFile.getName()));
-    assertEquals("EC policy for nonEcFile should be null (not set)",
-        ecPolicyForNonECfile, null);
+    assertEquals(ecPolicyForNonECfile, null, "EC policy for nonEcFile should be null (not set)");
 
     // Query webhdfs REST API to get fileStatus for ecFile
     URL getFileStatusUrl = new URL("http", addr.getHostString(), addr.getPort(),
@@ -2137,15 +2132,15 @@ public class TestWebHDFS {
     conn.setInstanceFollowRedirects(false);
     String getFileStatusResponse = IOUtils.toString(conn.getInputStream(),
         StandardCharsets.UTF_8);
-    Assert.assertEquals("Response wasn't " + HttpURLConnection.HTTP_OK,
-        HttpURLConnection.HTTP_OK, conn.getResponseCode());
+    assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode(),
+        "Response wasn't " + HttpURLConnection.HTTP_OK);
 
     // Verify that ecPolicy is set in getFileStatus response for ecFile
     JSONObject fileStatusObject = new JSONObject(getFileStatusResponse)
         .getJSONObject("FileStatus");
     ecpolicyForECfile = getECPolicyFromFileStatusJson(fileStatusObject);
-    assertEquals("EC policy for ecFile should match the set EC policy",
-        ecpolicyForECfile, ecPolicyName);
+    assertEquals(
+        ecpolicyForECfile, ecPolicyName, "EC policy for ecFile should match the set EC policy");
   }
 
   @Test
@@ -2276,16 +2271,16 @@ public class TestWebHDFS {
       }
 
       FsStatus webHdfsFsStatus = webHdfs.getStatus(new Path("/"));
-      Assert.assertNotNull(webHdfsFsStatus);
+      assertNotNull(webHdfsFsStatus);
 
       FsStatus dfsFsStatus = dfs.getStatus(new Path("/"));
-      Assert.assertNotNull(dfsFsStatus);
+      assertNotNull(dfsFsStatus);
 
       //Validate used free and capacity are the same as DistributedFileSystem
-      Assert.assertEquals(webHdfsFsStatus.getUsed(), dfsFsStatus.getUsed());
-      Assert.assertEquals(webHdfsFsStatus.getRemaining(),
+      assertEquals(webHdfsFsStatus.getUsed(), dfsFsStatus.getUsed());
+      assertEquals(webHdfsFsStatus.getRemaining(),
           dfsFsStatus.getRemaining());
-      Assert.assertEquals(webHdfsFsStatus.getCapacity(),
+      assertEquals(webHdfsFsStatus.getCapacity(),
           dfsFsStatus.getCapacity());
     } finally {
       cluster.shutdown();

+ 14 - 10
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSForHA.java

@@ -19,6 +19,9 @@
 package org.apache.hadoop.hdfs.web;
 
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY;
+import static org.junit.jupiter.api.Assertions.assertArrayEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.spy;
 import static org.mockito.Mockito.verify;
@@ -54,8 +57,8 @@ import org.apache.hadoop.ipc.StandbyException;
 import org.apache.hadoop.security.token.SecretManager;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.test.Whitebox;
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
 import org.eclipse.jetty.util.ajax.JSON;
 
 import javax.servlet.http.HttpServletResponse;
@@ -87,13 +90,13 @@ public class TestWebHDFSForHA {
       cluster.transitionToActive(0);
 
       final Path dir = new Path("/test");
-      Assert.assertTrue(fs.mkdirs(dir));
+      assertTrue(fs.mkdirs(dir));
 
       cluster.shutdownNameNode(0);
       cluster.transitionToActive(1);
 
       final Path dir2 = new Path("/test2");
-      Assert.assertTrue(fs.mkdirs(dir2));
+      assertTrue(fs.mkdirs(dir2));
     } finally {
       IOUtils.cleanupWithLogger(null, fs);
       if (cluster != null) {
@@ -174,7 +177,7 @@ public class TestWebHDFSForHA {
       } catch (IOException e) {
         // Mimic the UserProvider class logic (server side) by throwing
         // SecurityException here
-        Assert.assertTrue(e instanceof SecretManager.InvalidToken);
+        assertTrue(e instanceof SecretManager.InvalidToken);
         resp = eh.toResponse(new SecurityException(e));
       }
       // The Response (resp) below is what the server will send to client
@@ -197,7 +200,7 @@ public class TestWebHDFSForHA {
       Map<?, ?> m = (Map<?, ?>) JSON.parse(resp.getEntity().toString());
       RemoteException re = JsonUtilClient.toRemoteException(m);
       Exception unwrapped = re.unwrapRemoteException(StandbyException.class);
-      Assert.assertTrue(unwrapped instanceof StandbyException);
+      assertTrue(unwrapped instanceof StandbyException);
     } finally {
       IOUtils.cleanupWithLogger(null, fs);
       if (cluster != null) {
@@ -236,7 +239,7 @@ public class TestWebHDFSForHA {
       FSDataInputStream in = fs.open(p);
       byte[] buf = new byte[data.length];
       IOUtils.readFully(in, buf, 0, buf.length);
-      Assert.assertArrayEquals(data, buf);
+      assertArrayEquals(data, buf);
     } finally {
       IOUtils.cleanupWithLogger(null, fs);
       if (cluster != null) {
@@ -262,7 +265,7 @@ public class TestWebHDFSForHA {
       DFSTestUtil.setFakeHttpAddresses(conf, LOGICAL_NAME + "remote");
 
       fs = (WebHdfsFileSystem)FileSystem.get(WEBHDFS_URI, conf);
-      Assert.assertEquals(2, fs.getResolvedNNAddr().length);
+      assertEquals(2, fs.getResolvedNNAddr().length);
     } finally {
       IOUtils.cleanupWithLogger(null, fs);
       if (cluster != null) {
@@ -275,7 +278,8 @@ public class TestWebHDFSForHA {
    * Make sure the WebHdfsFileSystem will retry based on RetriableException when
    * rpcServer is null in NamenodeWebHdfsMethods while NameNode starts up.
    */
-  @Test (timeout=120000)
+  @Test
+  @Timeout(value = 120)
   public void testRetryWhileNNStartup() throws Exception {
     final Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME);
     MiniDFSCluster cluster = null;
@@ -319,7 +323,7 @@ public class TestWebHDFSForHA {
         while (!resultMap.containsKey("mkdirs")) {
           this.wait();
         }
-        Assert.assertTrue(resultMap.get("mkdirs"));
+        assertTrue(resultMap.get("mkdirs"));
       }
     } finally {
       if (cluster != null) {

+ 56 - 33
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java

@@ -18,7 +18,7 @@
 
 package org.apache.hadoop.hdfs.web;
 
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.fail;
 
 import java.io.BufferedReader;
 import java.io.IOException;
@@ -38,6 +38,9 @@ import java.util.Collection;
 import java.util.List;
 import java.util.concurrent.TimeoutException;
 
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.EnumSource;
+import org.junit.jupiter.params.provider.MethodSource;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -48,14 +51,9 @@ import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.authentication.client.ConnectionConfigurator;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
+import org.junit.jupiter.api.AfterEach;
 import org.junit.AssumptionViolatedException;
-import org.junit.Before;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-import org.junit.runners.Parameterized.Parameter;
-import org.junit.runners.Parameterized.Parameters;
-import org.junit.Test;
+import org.junit.jupiter.api.Timeout;
 
 /**
  * This test suite checks that WebHdfsFileSystem sets connection timeouts and
@@ -64,7 +62,6 @@ import org.junit.Test;
  * bogus server on the namenode HTTP port, which is rigged to not accept new
  * connections or to accept connections but not send responses.
  */
-@RunWith(Parameterized.class)
 public class TestWebHdfsTimeouts {
 
   private static final Logger LOG =
@@ -73,7 +70,6 @@ public class TestWebHdfsTimeouts {
   private static final int CLIENTS_TO_CONSUME_BACKLOG = 129;
   private static final int CONNECTION_BACKLOG = 1;
   private static final int SHORT_SOCKET_TIMEOUT = 200;
-  private static final int TEST_TIMEOUT = 100000;
 
   private List<SocketChannel> clients;
   private WebHdfsFileSystem fs;
@@ -97,7 +93,6 @@ public class TestWebHdfsTimeouts {
    * connection factory, and again with the timeouts set by
    * configuration options.
    */
-  @Parameters(name = "timeoutSource={0}")
   public static Collection<Object[]> data() {
     return Arrays.asList(new Object[][] {
       { TimeoutSource.ConnectionFactory },
@@ -105,11 +100,7 @@ public class TestWebHdfsTimeouts {
     });
   }
 
-  @Parameter
-  public TimeoutSource timeoutSource;
-
-  @Before
-  public void setUp() throws Exception {
+  public void setUp(TimeoutSource timeoutSource) throws Exception {
     Configuration conf = WebHdfsTestUtil.createConf();
     serverSocket = new ServerSocket(0, CONNECTION_BACKLOG);
     nnHttpAddress = new InetSocketAddress("localhost", serverSocket.getLocalPort());
@@ -130,7 +121,7 @@ public class TestWebHdfsTimeouts {
     failedToConsumeBacklog = false;
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     IOUtils.cleanupWithLogger(
         LOG, clients.toArray(new SocketChannel[clients.size()]));
@@ -150,8 +141,12 @@ public class TestWebHdfsTimeouts {
   /**
    * Expect connect timeout, because the connection backlog is consumed.
    */
-  @Test(timeout=TEST_TIMEOUT)
-  public void testConnectTimeout() throws Exception {
+  @MethodSource("data")
+  @ParameterizedTest
+  @EnumSource(TimeoutSource.class)
+  @Timeout(value = 100)
+  public void testConnectTimeout(TimeoutSource src) throws Exception {
+    setUp(src);
     consumeConnectionBacklog();
     try {
       fs.listFiles(new Path("/"), false);
@@ -165,8 +160,12 @@ public class TestWebHdfsTimeouts {
   /**
    * Expect read timeout, because the bogus server never sends a reply.
    */
-  @Test(timeout=TEST_TIMEOUT)
-  public void testReadTimeout() throws Exception {
+  @MethodSource("data")
+  @ParameterizedTest
+  @EnumSource(TimeoutSource.class)
+  @Timeout(value = 100)
+  public void testReadTimeout(TimeoutSource src) throws Exception {
+    setUp(src);
     try {
       fs.listFiles(new Path("/"), false);
       fail("expected timeout");
@@ -180,8 +179,12 @@ public class TestWebHdfsTimeouts {
    * Expect connect timeout on a URL that requires auth, because the connection
    * backlog is consumed.
    */
-  @Test(timeout=TEST_TIMEOUT)
-  public void testAuthUrlConnectTimeout() throws Exception {
+  @MethodSource("data")
+  @ParameterizedTest
+  @EnumSource(TimeoutSource.class)
+  @Timeout(value = 100)
+  public void testAuthUrlConnectTimeout(TimeoutSource src) throws Exception {
+    setUp(src);
     consumeConnectionBacklog();
     try {
       fs.getDelegationToken("renewer");
@@ -196,8 +199,12 @@ public class TestWebHdfsTimeouts {
    * Expect read timeout on a URL that requires auth, because the bogus server
    * never sends a reply.
    */
-  @Test(timeout=TEST_TIMEOUT)
-  public void testAuthUrlReadTimeout() throws Exception {
+  @MethodSource("data")
+  @ParameterizedTest
+  @EnumSource(TimeoutSource.class)
+  @Timeout(value = 100)
+  public void testAuthUrlReadTimeout(TimeoutSource src) throws Exception {
+    setUp(src);
     try {
       fs.getDelegationToken("renewer");
       fail("expected timeout");
@@ -211,8 +218,12 @@ public class TestWebHdfsTimeouts {
    * After a redirect, expect connect timeout accessing the redirect location,
    * because the connection backlog is consumed.
    */
-  @Test(timeout=TEST_TIMEOUT)
-  public void testRedirectConnectTimeout() throws Exception {
+  @MethodSource("data")
+  @ParameterizedTest
+  @EnumSource(TimeoutSource.class)
+  @Timeout(value = 100)
+  public void testRedirectConnectTimeout(TimeoutSource src) throws Exception {
+    setUp(src);
     startSingleTemporaryRedirectResponseThread(true);
     try {
       fs.getFileChecksum(new Path("/file"));
@@ -228,8 +239,12 @@ public class TestWebHdfsTimeouts {
    * After a redirect, expect read timeout accessing the redirect location,
    * because the bogus server never sends a reply.
    */
-  @Test(timeout=TEST_TIMEOUT)
-  public void testRedirectReadTimeout() throws Exception {
+  @MethodSource("data")
+  @ParameterizedTest
+  @EnumSource(TimeoutSource.class)
+  @Timeout(value = 100)
+  public void testRedirectReadTimeout(TimeoutSource src) throws Exception {
+    setUp(src);
     startSingleTemporaryRedirectResponseThread(false);
     try {
       fs.getFileChecksum(new Path("/file"));
@@ -244,8 +259,12 @@ public class TestWebHdfsTimeouts {
    * On the second step of two-step write, expect connect timeout accessing the
    * redirect location, because the connection backlog is consumed.
    */
-  @Test(timeout=TEST_TIMEOUT)
-  public void testTwoStepWriteConnectTimeout() throws Exception {
+  @MethodSource("data")
+  @ParameterizedTest
+  @EnumSource(TimeoutSource.class)
+  @Timeout(value = 100)
+  public void testTwoStepWriteConnectTimeout(TimeoutSource src) throws Exception {
+    setUp(src);
     startSingleTemporaryRedirectResponseThread(true);
     OutputStream os = null;
     try {
@@ -264,8 +283,12 @@ public class TestWebHdfsTimeouts {
    * On the second step of two-step write, expect read timeout accessing the
    * redirect location, because the bogus server never sends a reply.
    */
-  @Test(timeout=TEST_TIMEOUT)
-  public void testTwoStepWriteReadTimeout() throws Exception {
+  @MethodSource("data")
+  @ParameterizedTest
+  @EnumSource(TimeoutSource.class)
+  @Timeout(value = 100)
+  public void testTwoStepWriteReadTimeout(TimeoutSource src) throws Exception {
+    setUp(src);
     startSingleTemporaryRedirectResponseThread(false);
     OutputStream os = null;
     try {

+ 76 - 63
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTokens.java

@@ -36,9 +36,14 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIP
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.IGNORE_SECURE_PORTS_FOR_TESTING_KEY;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertNotSame;
+import static org.junit.jupiter.api.Assertions.assertNull;
+import static org.junit.jupiter.api.Assertions.assertSame;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
 import static org.mockito.Mockito.*;
 
 import java.io.File;
@@ -76,10 +81,10 @@ import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.Whitebox;
 import org.apache.hadoop.security.token.Token;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
 
 public class TestWebHdfsTokens {
   private static final String PREFIX = "hadoop.http.authentication.";
@@ -95,12 +100,12 @@ public class TestWebHdfsTokens {
   private static String keystoresDir;
   private static String sslConfDir;
 
-  @BeforeClass
+  @BeforeAll
   public static void setUp() {
     conf = new Configuration();
   }
 
-  @AfterClass
+  @AfterAll
   public static void destroy() throws Exception {
     if (kdc != null) {
       kdc.stop();
@@ -133,8 +138,8 @@ public class TestWebHdfsTokens {
         UserGroupInformation.AuthenticationMethod.KERBEROS, secureConf);
     UserGroupInformation.setConfiguration(secureConf);
     KerberosName.resetDefaultRealm();
-    assertTrue("Expected secureConfiguration to enable security",
-        UserGroupInformation.isSecurityEnabled());
+    assertTrue(UserGroupInformation.isSecurityEnabled(),
+        "Expected secureConfiguration to enable security");
 
     keytabFile = new File(baseDir, username + ".keytab");
     String keytab = keytabFile.getAbsolutePath();
@@ -187,7 +192,8 @@ public class TestWebHdfsTokens {
     return fs;
   }
 
-  @Test(timeout = 5000)
+  @Test
+  @Timeout(value = 5)
   public void testTokenForNonTokenOp() throws IOException {
     initEnv();
     WebHdfsFileSystem fs = spyWebhdfsInSecureSetup();
@@ -208,19 +214,22 @@ public class TestWebHdfsTokens {
     verify(fs, never()).setDelegationToken(token);
   }
 
-  @Test(timeout = 5000)
+  @Test
+  @Timeout(value = 5)
   public void testNoTokenForGetToken() throws IOException {
     initEnv();
     checkNoTokenForOperation(GetOpParam.Op.GETDELEGATIONTOKEN);
   }
 
-  @Test(timeout = 5000)
+  @Test
+  @Timeout(value = 5)
   public void testNoTokenForRenewToken() throws IOException {
     initEnv();
     checkNoTokenForOperation(PutOpParam.Op.RENEWDELEGATIONTOKEN);
   }
 
-  @Test(timeout = 5000)
+  @Test
+  @Timeout(value = 5)
   public void testNoTokenForCancelToken() throws IOException {
     initEnv();
     checkNoTokenForOperation(PutOpParam.Op.CANCELDELEGATIONTOKEN);
@@ -238,7 +247,8 @@ public class TestWebHdfsTokens {
     verify(fs, never()).setDelegationToken(any());
   }
 
-  @Test(timeout = 10000)
+  @Test
+  @Timeout(value = 10)
   public void testGetOpRequireAuth() {
     for (HttpOpParam.Op op : GetOpParam.Op.values()) {
       boolean expect = (op == GetOpParam.Op.GETDELEGATIONTOKEN);
@@ -246,7 +256,8 @@ public class TestWebHdfsTokens {
     }
   }
 
-  @Test(timeout = 10000)
+  @Test
+  @Timeout(value = 10)
   public void testPutOpRequireAuth() {
     for (HttpOpParam.Op op : PutOpParam.Op.values()) {
       boolean expect = (op == PutOpParam.Op.RENEWDELEGATIONTOKEN || op == PutOpParam.Op.CANCELDELEGATIONTOKEN);
@@ -254,14 +265,16 @@ public class TestWebHdfsTokens {
     }
   }
 
-  @Test(timeout = 10000)
+  @Test
+  @Timeout(value = 10)
   public void testPostOpRequireAuth() {
     for (HttpOpParam.Op op : PostOpParam.Op.values()) {
       assertFalse(op.getRequireAuth());
     }
   }
 
-  @Test(timeout = 10000)
+  @Test
+  @Timeout(value = 10)
   public void testDeleteOpRequireAuth() {
     for (HttpOpParam.Op op : DeleteOpParam.Op.values()) {
       assertFalse(op.getRequireAuth());
@@ -339,7 +352,7 @@ public class TestWebHdfsTokens {
       };
       Whitebox.setInternalState(fs, "connectionFactory", factory);
       Token<?> token1 = fs.getDelegationToken();
-      Assert.assertEquals(new Text("bar"), token1.getKind());
+      assertEquals(new Text("bar"), token1.getKind());
 
       final HttpOpParam.Op op = GetOpParam.Op.GETDELEGATIONTOKEN;
       Token<DelegationTokenIdentifier> token2 =
@@ -352,8 +365,8 @@ public class TestWebHdfsTokens {
             }
           }.run();
 
-      Assert.assertEquals(new Text("bar"), token2.getKind());
-      Assert.assertEquals(new Text("foo"), token2.getService());
+      assertEquals(new Text("bar"), token2.getKind());
+      assertEquals(new Text("foo"), token2.getService());
     } finally {
       if (cluster != null) {
         cluster.shutdown();
@@ -376,14 +389,14 @@ public class TestWebHdfsTokens {
       });
 
     // verify token ops don't get a token
-    Assert.assertNull(fs.getRenewToken());
+    assertNull(fs.getRenewToken());
     Token<?> token = fs.getDelegationToken(null);
     fs.renewDelegationToken(token);
     fs.cancelDelegationToken(token);
     verify(fs, never()).getDelegationToken();
     verify(fs, never()).replaceExpiredDelegationToken();
     verify(fs, never()).setDelegationToken(any());
-    Assert.assertNull(fs.getRenewToken());
+    assertNull(fs.getRenewToken());
     reset(fs);
 
     // verify first non-token op gets a token
@@ -394,9 +407,9 @@ public class TestWebHdfsTokens {
     verify(fs, times(1)).getDelegationToken(any());
     verify(fs, times(1)).setDelegationToken(any());
     token = fs.getRenewToken();
-    Assert.assertNotNull(token);      
-    Assert.assertEquals(testUser, getTokenOwner(token));
-    Assert.assertEquals(fs.getTokenKind(), token.getKind());
+    assertNotNull(token);
+    assertEquals(testUser, getTokenOwner(token));
+    assertEquals(fs.getTokenKind(), token.getKind());
     reset(fs);
 
     // verify prior token is reused
@@ -406,9 +419,9 @@ public class TestWebHdfsTokens {
     verify(fs, never()).getDelegationToken(anyString());
     verify(fs, never()).setDelegationToken(any());
     Token<?> token2 = fs.getRenewToken();
-    Assert.assertNotNull(token2);
-    Assert.assertEquals(fs.getTokenKind(), token.getKind());
-    Assert.assertSame(token, token2);
+    assertNotNull(token2);
+    assertEquals(fs.getTokenKind(), token.getKind());
+    assertSame(token, token2);
     reset(fs);
 
     // verify renew of expired token fails w/o getting a new token
@@ -416,37 +429,37 @@ public class TestWebHdfsTokens {
     fs.cancelDelegationToken(token);
     try {
       fs.renewDelegationToken(token);
-      Assert.fail("should have failed");
+      fail("should have failed");
     } catch (InvalidToken it) {
     } catch (Exception ex) {
-      Assert.fail("wrong exception:"+ex);
+      fail("wrong exception:"+ex);
     }
     verify(fs, never()).getDelegationToken();
     verify(fs, never()).replaceExpiredDelegationToken();
     verify(fs, never()).getDelegationToken(anyString());
     verify(fs, never()).setDelegationToken(any());
     token2 = fs.getRenewToken();
-    Assert.assertNotNull(token2);
-    Assert.assertEquals(fs.getTokenKind(), token.getKind());
-    Assert.assertSame(token, token2);
+    assertNotNull(token2);
+    assertEquals(fs.getTokenKind(), token.getKind());
+    assertSame(token, token2);
     reset(fs);
 
     // verify cancel of expired token fails w/o getting a new token
     try {
       fs.cancelDelegationToken(token);
-      Assert.fail("should have failed");
+      fail("should have failed");
     } catch (InvalidToken it) {
     } catch (Exception ex) {
-      Assert.fail("wrong exception:"+ex);
+      fail("wrong exception:"+ex);
     }
     verify(fs, never()).getDelegationToken();
     verify(fs, never()).replaceExpiredDelegationToken();
     verify(fs, never()).getDelegationToken(anyString());
     verify(fs, never()).setDelegationToken(any());
     token2 = fs.getRenewToken();
-    Assert.assertNotNull(token2);
-    Assert.assertEquals(fs.getTokenKind(), token.getKind());
-    Assert.assertSame(token, token2);
+    assertNotNull(token2);
+    assertEquals(fs.getTokenKind(), token.getKind());
+    assertSame(token, token2);
     reset(fs);
 
     // verify an expired token is replaced with a new token
@@ -458,10 +471,10 @@ public class TestWebHdfsTokens {
     verify(fs, times(1)).getDelegationToken(null);
     verify(fs, times(1)).setDelegationToken(any());
     token2 = fs.getRenewToken();
-    Assert.assertNotNull(token2);
-    Assert.assertNotSame(token, token2);
-    Assert.assertEquals(fs.getTokenKind(), token.getKind());
-    Assert.assertEquals(testUser, getTokenOwner(token2));
+    assertNotNull(token2);
+    assertNotSame(token, token2);
+    assertEquals(fs.getTokenKind(), token.getKind());
+    assertEquals(testUser, getTokenOwner(token2));
     reset(fs);
 
     // verify with open because it's a little different in how it
@@ -475,10 +488,10 @@ public class TestWebHdfsTokens {
     verify(fs, times(1)).getDelegationToken(null);
     verify(fs, times(1)).setDelegationToken(any());
     token2 = fs.getRenewToken();
-    Assert.assertNotNull(token2);
-    Assert.assertNotSame(token, token2);
-    Assert.assertEquals(fs.getTokenKind(), token.getKind());
-    Assert.assertEquals(testUser, getTokenOwner(token2));
+    assertNotNull(token2);
+    assertNotSame(token, token2);
+    assertEquals(fs.getTokenKind(), token.getKind());
+    assertEquals(testUser, getTokenOwner(token2));
     reset(fs);
 
     // verify fs close cancels the token
@@ -498,16 +511,16 @@ public class TestWebHdfsTokens {
         return spy((WebHdfsFileSystem) FileSystem.newInstance(uri, clusterConf));
       }
     });
-    Assert.assertNull(fs.getRenewToken());
+    assertNull(fs.getRenewToken());
     fs.getFileStatus(new Path("/"));
     verify(fs, times(1)).getDelegationToken();
     verify(fs, never()).replaceExpiredDelegationToken();
     verify(fs, never()).getDelegationToken(anyString());
     verify(fs, times(1)).setDelegationToken(eq(token));
     token2 = fs.getRenewToken();
-    Assert.assertNotNull(token2);
-    Assert.assertEquals(fs.getTokenKind(), token.getKind());
-    Assert.assertSame(token, token2);
+    assertNotNull(token2);
+    assertEquals(fs.getTokenKind(), token.getKind());
+    assertSame(token, token2);
     reset(fs);
 
     // verify it reuses the prior ugi token
@@ -517,9 +530,9 @@ public class TestWebHdfsTokens {
     verify(fs, never()).getDelegationToken(anyString());
     verify(fs, never()).setDelegationToken(any());
     token2 = fs.getRenewToken();
-    Assert.assertNotNull(token2);
-    Assert.assertEquals(fs.getTokenKind(), token.getKind());
-    Assert.assertSame(token, token2);
+    assertNotNull(token2);
+    assertEquals(fs.getTokenKind(), token.getKind());
+    assertSame(token, token2);
     reset(fs);
 
     // verify an expired ugi token is not replaced with a new token if the
@@ -528,19 +541,19 @@ public class TestWebHdfsTokens {
     for (int i=0; i<2; i++) {
       try {
         fs.getFileStatus(new Path("/"));
-        Assert.fail("didn't fail");
+        fail("didn't fail");
       } catch (InvalidToken it) {
       } catch (Exception ex) {
-        Assert.fail("wrong exception:"+ex);
+        fail("wrong exception:"+ex);
       }
       verify(fs, times(1)).getDelegationToken();
       verify(fs, times(1)).replaceExpiredDelegationToken();
       verify(fs, never()).getDelegationToken(anyString());
       verify(fs, never()).setDelegationToken(any());
       token2 = fs.getRenewToken();
-      Assert.assertNotNull(token2);
-      Assert.assertEquals(fs.getTokenKind(), token.getKind());
-      Assert.assertSame(token, token2);
+      assertNotNull(token2);
+      assertEquals(fs.getTokenKind(), token.getKind());
+      assertSame(token, token2);
       reset(fs);
     }
     
@@ -555,9 +568,9 @@ public class TestWebHdfsTokens {
     verify(fs, never()).getDelegationToken(anyString());
     verify(fs, times(1)).setDelegationToken(eq(token));
     token2 = fs.getRenewToken();
-    Assert.assertNotNull(token2);
-    Assert.assertEquals(fs.getTokenKind(), token.getKind());
-    Assert.assertSame(token, token2);
+    assertNotNull(token2);
+    assertEquals(fs.getTokenKind(), token.getKind());
+    assertSame(token, token2);
     reset(fs);
 
     // verify fs close does NOT cancel the ugi token

+ 42 - 35
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java

@@ -19,9 +19,9 @@
 package org.apache.hadoop.hdfs.web;
 
 import static org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod.KERBEROS;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 import static org.mockito.Mockito.mock;
 
 import java.io.IOException;
@@ -57,20 +57,21 @@ import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
 
 public class TestWebHdfsUrl {
   // NOTE: port is never used 
   final URI uri = URI.create(WebHdfsConstants.WEBHDFS_SCHEME + "://" + "127.0.0.1:0");
 
-  @Before
+  @BeforeEach
   public void resetUGI() {
     UserGroupInformation.setConfiguration(new Configuration());
   }
   
-  @Test(timeout=60000)
+  @Test
+  @Timeout(value = 60)
   public void testEncodedPathUrl() throws IOException, URISyntaxException{
     Configuration conf = new Configuration();
 
@@ -82,11 +83,12 @@ public class TestWebHdfsUrl {
     Path fsPath = new Path(pathName);
     URL encodedPathUrl = webhdfs.toUrl(PutOpParam.Op.CREATE, fsPath);
     // We should get back the original file path after cycling back and decoding
-    Assert.assertEquals(WebHdfsFileSystem.PATH_PREFIX + pathName,
+    assertEquals(WebHdfsFileSystem.PATH_PREFIX + pathName,
         encodedPathUrl.toURI().getPath());
   }
 
-  @Test(timeout=60000)
+  @Test
+  @Timeout(value = 60)
   public void testSimpleAuthParamsInUrl() throws IOException {
     Configuration conf = new Configuration();
 
@@ -107,7 +109,8 @@ public class TestWebHdfsUrl {
         fileStatusUrl);
   }
 
-  @Test(timeout=60000)
+  @Test
+  @Timeout(value = 60)
   public void testSimpleProxyAuthParamsInUrl() throws IOException {
     Configuration conf = new Configuration();
 
@@ -130,7 +133,8 @@ public class TestWebHdfsUrl {
         fileStatusUrl);
   }
 
-  @Test(timeout=60000)
+  @Test
+  @Timeout(value = 60)
   public void testSecureAuthParamsInUrl() throws IOException {
     Configuration conf = new Configuration();
     // fake turning on security so api thinks it should use tokens
@@ -150,8 +154,8 @@ public class TestWebHdfsUrl {
 
     // send user
     URL getTokenUrl = webhdfs.toUrl(GetOpParam.Op.GETDELEGATIONTOKEN, fsPath);
-    assertTrue("secure webhdfs SHOULD NOT use user.name parameter",
-        getTokenUrl.toString().indexOf(userParam) == -1);
+    assertTrue(getTokenUrl.toString().indexOf(userParam) == -1,
+        "secure webhdfs SHOULD NOT use user.name parameter");
     checkQueryParams(
         new String[]{
             GetOpParam.Op.GETDELEGATIONTOKEN.toQueryString(),
@@ -163,8 +167,8 @@ public class TestWebHdfsUrl {
     // send user
     URL renewTokenUrl = webhdfs.toUrl(PutOpParam.Op.RENEWDELEGATIONTOKEN,
         fsPath, new TokenArgumentParam(tokenString));
-    assertTrue("secure webhdfs SHOULD NOT use user.name parameter",
-        renewTokenUrl.toString().indexOf(userParam) == -1);
+    assertTrue(renewTokenUrl.toString().indexOf(userParam) == -1,
+        "secure webhdfs SHOULD NOT use user.name parameter");
     checkQueryParams(
         new String[]{
             PutOpParam.Op.RENEWDELEGATIONTOKEN.toQueryString(),
@@ -177,8 +181,8 @@ public class TestWebHdfsUrl {
     // send token
     URL cancelTokenUrl = webhdfs.toUrl(PutOpParam.Op.CANCELDELEGATIONTOKEN,
         fsPath, new TokenArgumentParam(tokenString));
-    assertTrue("secure webhdfs SHOULD NOT use user.name parameter",
-        cancelTokenUrl.toString().indexOf(userParam) == -1);
+    assertTrue(cancelTokenUrl.toString().indexOf(userParam) == -1,
+        "secure webhdfs SHOULD NOT use user.name parameter");
     checkQueryParams(
         new String[]{
             PutOpParam.Op.CANCELDELEGATIONTOKEN.toQueryString(),
@@ -202,8 +206,8 @@ public class TestWebHdfsUrl {
     // send user
     cancelTokenUrl = webhdfs.toUrl(PutOpParam.Op.CANCELDELEGATIONTOKEN,
         fsPath, new TokenArgumentParam(tokenString));
-    assertTrue("secure webhdfs SHOULD NOT use user.name parameter",
-        cancelTokenUrl.toString().indexOf(userParam) == -1);
+    assertTrue(cancelTokenUrl.toString().indexOf(userParam) == -1,
+        "secure webhdfs SHOULD NOT use user.name parameter");
     checkQueryParams(
         new String[]{
             PutOpParam.Op.CANCELDELEGATIONTOKEN.toQueryString(),
@@ -222,7 +226,8 @@ public class TestWebHdfsUrl {
         fileStatusUrl);    
   }
 
-  @Test(timeout=60000)
+  @Test
+  @Timeout(value = 60)
   public void testSecureProxyAuthParamsInUrl() throws IOException {
     Configuration conf = new Configuration();
     // fake turning on security so api thinks it should use tokens
@@ -244,8 +249,8 @@ public class TestWebHdfsUrl {
 
     // send real+effective
     URL getTokenUrl = webhdfs.toUrl(GetOpParam.Op.GETDELEGATIONTOKEN, fsPath);
-    assertTrue("secure webhdfs SHOULD NOT use user.name parameter",
-        getTokenUrl.toString().indexOf(userParam) == -1);
+    assertTrue(getTokenUrl.toString().indexOf(userParam) == -1,
+        "secure webhdfs SHOULD NOT use user.name parameter");
     checkQueryParams(
         new String[]{
             GetOpParam.Op.GETDELEGATIONTOKEN.toQueryString(),
@@ -258,8 +263,8 @@ public class TestWebHdfsUrl {
     // send real+effective
     URL renewTokenUrl = webhdfs.toUrl(PutOpParam.Op.RENEWDELEGATIONTOKEN,
         fsPath, new TokenArgumentParam(tokenString));
-    assertTrue("secure webhdfs SHOULD NOT use user.name parameter",
-        renewTokenUrl.toString().indexOf(userParam) == -1);
+    assertTrue(renewTokenUrl.toString().indexOf(userParam) == -1,
+        "secure webhdfs SHOULD NOT use user.name parameter");
     checkQueryParams(
         new String[]{
             PutOpParam.Op.RENEWDELEGATIONTOKEN.toQueryString(),
@@ -272,8 +277,8 @@ public class TestWebHdfsUrl {
     // send token
     URL cancelTokenUrl = webhdfs.toUrl(PutOpParam.Op.CANCELDELEGATIONTOKEN,
         fsPath, new TokenArgumentParam(tokenString));
-    assertTrue("secure webhdfs SHOULD NOT use user.name parameter",
-        cancelTokenUrl.toString().indexOf(userParam) == -1);
+    assertTrue(cancelTokenUrl.toString().indexOf(userParam) == -1,
+        "secure webhdfs SHOULD NOT use user.name parameter");
     checkQueryParams(
         new String[]{
             PutOpParam.Op.CANCELDELEGATIONTOKEN.toQueryString(),
@@ -298,8 +303,8 @@ public class TestWebHdfsUrl {
     // send real+effective
     cancelTokenUrl = webhdfs.toUrl(PutOpParam.Op.CANCELDELEGATIONTOKEN,
         fsPath, new TokenArgumentParam(tokenString));
-    assertTrue("secure webhdfs SHOULD NOT use user.name parameter",
-        cancelTokenUrl.toString().indexOf(userParam) == -1);
+    assertTrue(cancelTokenUrl.toString().indexOf(userParam) == -1,
+        "secure webhdfs SHOULD NOT use user.name parameter");
     checkQueryParams(
         new String[]{
             PutOpParam.Op.CANCELDELEGATIONTOKEN.toQueryString(),
@@ -319,7 +324,8 @@ public class TestWebHdfsUrl {
         fileStatusUrl);    
   }
 
-  @Test(timeout=60000)
+  @Test
+  @Timeout(value = 60)
   public void testCheckAccessUrl() throws IOException {
     Configuration conf = new Configuration();
 
@@ -341,7 +347,8 @@ public class TestWebHdfsUrl {
         checkAccessUrl);
   }
 
-  @Test(timeout=60000)
+  @Test
+  @Timeout(value = 60)
   public void testBatchedListingUrl() throws Exception {
     Configuration conf = new Configuration();
 
@@ -421,7 +428,7 @@ public class TestWebHdfsUrl {
 
       //get file status and check that it was written properly.
       final FileStatus s1 = fs.getFileStatus(file1);
-      assertEquals("Write failed for file " + file1, length, s1.getLen());
+      assertEquals(length, s1.getLen(), "Write failed for file " + file1);
 
       boolean found = false;
       RemoteIterator<LocatedFileStatus> statusRemoteIterator =
@@ -434,7 +441,7 @@ public class TestWebHdfsUrl {
           found = true;
         }
       }
-      assertFalse("Could not find file with special character", !found);
+      assertFalse(!found, "Could not find file with special character");
     } finally {
       cluster.shutdown();
     }
@@ -472,7 +479,7 @@ public class TestWebHdfsUrl {
 
       //get file status and check that it was written properly.
       final FileStatus s1 = fs.getFileStatus(file1);
-      assertEquals("Write failed for file " + file1, length, s1.getLen());
+      assertEquals(length, s1.getLen(), "Write failed for file " + file1);
 
       boolean found = false;
       RemoteIterator<LocatedFileStatus> statusRemoteIterator =
@@ -485,7 +492,7 @@ public class TestWebHdfsUrl {
           found = true;
         }
       }
-      assertFalse("Could not find file with special character", !found);
+      assertFalse(!found, "Could not find file with special character");
     } finally {
       cluster.shutdown();
     }

+ 8 - 7
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsWithAuthenticationFilter.java

@@ -38,10 +38,11 @@ import org.apache.hadoop.http.FilterContainer;
 import org.apache.hadoop.http.FilterInitializer;
 import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.net.NetUtils;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
+
+import static org.junit.jupiter.api.Assertions.fail;
 
 public class TestWebHdfsWithAuthenticationFilter {
   private static boolean authorized = false;
@@ -83,7 +84,7 @@ public class TestWebHdfsWithAuthenticationFilter {
   private static MiniDFSCluster cluster;
   private static FileSystem fs;
 
-  @BeforeClass
+  @BeforeAll
   public static void setUp() throws IOException {
     conf = new Configuration();
     conf.set(HttpServer2.FILTER_INITIALIZER_PROPERTY,
@@ -96,7 +97,7 @@ public class TestWebHdfsWithAuthenticationFilter {
     cluster.waitActive();
   }
 
-  @AfterClass
+  @AfterAll
   public static void tearDown() throws IOException {
     if (fs != null) {
       fs.close();
@@ -112,7 +113,7 @@ public class TestWebHdfsWithAuthenticationFilter {
     authorized = false;
     try {
       fs.getFileStatus(new Path("/"));
-      Assert.fail("The filter fails to block the request");
+      fail("The filter fails to block the request");
     } catch (IOException e) {
     }
     authorized = true;

+ 12 - 11
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsWithMultipleNameNodes.java

@@ -33,10 +33,11 @@ import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.slf4j.event.Level;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
+
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 /**
  * Test WebHDFS with multiple NameNodes
@@ -55,7 +56,7 @@ public class TestWebHdfsWithMultipleNameNodes {
   private static MiniDFSCluster cluster;
   private static WebHdfsFileSystem[] webhdfs;
 
-  @BeforeClass
+  @BeforeAll
   public static void setupTest() {
     setLogLevel();
     try {
@@ -84,7 +85,7 @@ public class TestWebHdfsWithMultipleNameNodes {
     }
   }
 
-  @AfterClass
+  @AfterAll
   public static void shutdownCluster() {
     if (cluster != null) {
       cluster.shutdown();
@@ -126,14 +127,14 @@ public class TestWebHdfsWithMultipleNameNodes {
     for(int i = 0; i < webhdfs.length; i++) {
       //check file length
       final long expected = writeStrings[i].length();
-      Assert.assertEquals(expected, webhdfs[i].getFileStatus(p).getLen());
+      assertEquals(expected, webhdfs[i].getFileStatus(p).getLen());
     }
 
     //test read: check file content for each namenode
     for(int i = 0; i < webhdfs.length; i++) {
       final FSDataInputStream in = webhdfs[i].open(p);
       for(int c, j = 0; (c = in.read()) != -1; j++) {
-        Assert.assertEquals(writeStrings[i].charAt(j), c);
+        assertEquals(writeStrings[i].charAt(j), c);
       }
       in.close();
     }
@@ -148,7 +149,7 @@ public class TestWebHdfsWithMultipleNameNodes {
     for(int i = 0; i < webhdfs.length; i++) {
       //check file length
       final long expected = writeStrings[i].length() + appendStrings[i].length();
-      Assert.assertEquals(expected, webhdfs[i].getFileStatus(p).getLen());
+      assertEquals(expected, webhdfs[i].getFileStatus(p).getLen());
     }
 
     //test read: check file content for each namenode
@@ -159,8 +160,8 @@ public class TestWebHdfsWithMultipleNameNodes {
         b.append((char)c);
       }
       final int wlen = writeStrings[i].length();
-      Assert.assertEquals(writeStrings[i], b.substring(0, wlen));
-      Assert.assertEquals(appendStrings[i], b.substring(wlen));
+      assertEquals(writeStrings[i], b.substring(0, wlen));
+      assertEquals(appendStrings[i], b.substring(wlen));
       in.close();
     }
   }

+ 60 - 46
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsWithRestCsrfPreventionFilter.java

@@ -20,8 +20,9 @@ package org.apache.hadoop.hdfs.web;
 import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_WEBHDFS_REST_CSRF_BROWSER_USERAGENTS_REGEX_KEY;
 import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_WEBHDFS_REST_CSRF_ENABLED_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTPSERVER_FILTER_HANDLERS;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.io.IOException;
 import java.net.InetSocketAddress;
@@ -37,14 +38,9 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.net.NetUtils;
 
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-import org.junit.runners.Parameterized.Parameters;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.MethodSource;
 
 /**
  * Tests use of the cross-site-request forgery (CSRF) prevention filter with
@@ -53,29 +49,25 @@ import org.junit.runners.Parameterized.Parameters;
  * WebHDFS client.  If the server is configured with CSRF prevention, but the
  * client is not, then protected operations are expected to fail.
  */
-@RunWith(Parameterized.class)
 public class TestWebHdfsWithRestCsrfPreventionFilter {
 
   private static final Path FILE = new Path("/file");
 
-  private final boolean nnRestCsrf;
-  private final boolean dnRestCsrf;
-  private final boolean clientRestCsrf;
+  private boolean nnRestCsrf;
+  private boolean dnRestCsrf;
+  private boolean clientRestCsrf;
 
   private MiniDFSCluster cluster;
   private FileSystem fs, webhdfs;
 
-  @Rule
-  public ExpectedException exception = ExpectedException.none();
-
-  public TestWebHdfsWithRestCsrfPreventionFilter(boolean nnRestCsrf,
-      boolean dnRestCsrf, boolean clientRestCsrf) {
-    this.nnRestCsrf = nnRestCsrf;
-    this.dnRestCsrf = dnRestCsrf;
-    this.clientRestCsrf = clientRestCsrf;
+  public void initTestWebHdfsWithRestCsrfPreventionFilter(boolean pNnRestCsrf,
+      boolean pDnRestCsrf, boolean pClientRestCsrf) throws Exception {
+    this.nnRestCsrf = pNnRestCsrf;
+    this.dnRestCsrf = pDnRestCsrf;
+    this.clientRestCsrf = pClientRestCsrf;
+    before();
   }
 
-  @Parameters
   public static Iterable<Object[]> data() {
     return Arrays.asList(new Object[][] {
         {false, false, false},
@@ -88,7 +80,6 @@ public class TestWebHdfsWithRestCsrfPreventionFilter {
         {false, false, true}});
   }
 
-  @Before
   public void before() throws Exception {
     Configuration nnConf = new Configuration();
     nnConf.setBoolean(DFS_WEBHDFS_REST_CSRF_ENABLED_KEY, nnRestCsrf);
@@ -114,7 +105,7 @@ public class TestWebHdfsWithRestCsrfPreventionFilter {
         NetUtils.getHostPortString(addr)), clientConf);
   }
 
-  @After
+  @AfterEach
   public void after() {
     IOUtils.closeStream(webhdfs);
     IOUtils.closeStream(fs);
@@ -123,51 +114,74 @@ public class TestWebHdfsWithRestCsrfPreventionFilter {
     }
   }
 
-  @Test
-  public void testCreate() throws Exception {
+  @MethodSource("data")
+  @ParameterizedTest
+  public void testCreate(boolean pNnRestCsrf, boolean pDnRestCsrf, boolean pClientRestCsrf)
+      throws Exception {
+    initTestWebHdfsWithRestCsrfPreventionFilter(pNnRestCsrf, pDnRestCsrf, pClientRestCsrf);
     // create is a HTTP PUT that redirects from NameNode to DataNode, so we
     // expect CSRF prevention on either server to block an unconfigured client.
     if ((nnRestCsrf || dnRestCsrf) && !clientRestCsrf) {
-      expectException();
+      IOException ex = assertThrows(IOException.class, () -> {
+        assertTrue(webhdfs.createNewFile(FILE));
+      });
+      assertTrue(ex.getMessage().contains("Missing Required Header"));
+    } else {
+      assertTrue(webhdfs.createNewFile(FILE));
     }
-    assertTrue(webhdfs.createNewFile(FILE));
   }
 
-  @Test
-  public void testDelete() throws Exception {
+  @MethodSource("data")
+  @ParameterizedTest
+  public void testDelete(boolean pNnRestCsrf, boolean pDnRestCsrf, boolean pClientRestCsrf)
+      throws Exception {
+    initTestWebHdfsWithRestCsrfPreventionFilter(pNnRestCsrf, pDnRestCsrf, pClientRestCsrf);
     DFSTestUtil.createFile(fs, FILE, 1024, (short)1, 0L);
     // delete is an HTTP DELETE that executes solely within the NameNode as a
     // metadata operation, so we expect CSRF prevention configured on the
     // NameNode to block an unconfigured client.
     if (nnRestCsrf && !clientRestCsrf) {
-      expectException();
+      IOException ex = assertThrows(IOException.class, () -> {
+        assertTrue(webhdfs.delete(FILE, false));
+      });
+      assertTrue(ex.getMessage().contains("Missing Required Header"));
+    } else {
+      assertTrue(webhdfs.delete(FILE, false));
     }
-    assertTrue(webhdfs.delete(FILE, false));
   }
 
-  @Test
-  public void testGetFileStatus() throws Exception {
+  @MethodSource("data")
+  @ParameterizedTest
+  public void testGetFileStatus(boolean pNnRestCsrf, boolean pDnRestCsrf, boolean pClientRestCsrf)
+      throws Exception {
+    initTestWebHdfsWithRestCsrfPreventionFilter(pNnRestCsrf, pDnRestCsrf, pClientRestCsrf);
     // getFileStatus is an HTTP GET, not subject to CSRF prevention, so we
     // expect it to succeed always, regardless of CSRF configuration.
     assertNotNull(webhdfs.getFileStatus(new Path("/")));
   }
 
-  @Test
-  public void testTruncate() throws Exception {
+  @MethodSource("data")
+  @ParameterizedTest
+  public void testTruncate(boolean pNnRestCsrf, boolean pDnRestCsrf, boolean pClientRestCsrf)
+      throws Exception {
+    initTestWebHdfsWithRestCsrfPreventionFilter(pNnRestCsrf, pDnRestCsrf, pClientRestCsrf);
     DFSTestUtil.createFile(fs, FILE, 1024, (short)1, 0L);
     // truncate is an HTTP POST that executes solely within the NameNode as a
     // metadata operation, so we expect CSRF prevention configured on the
     // NameNode to block an unconfigured client.
     if (nnRestCsrf && !clientRestCsrf) {
-      expectException();
+      IOException ex = assertThrows(IOException.class, () -> {
+        assertTrue(
+            webhdfs.hasPathCapability(FILE, CommonPathCapabilities.FS_TRUNCATE),
+            "WebHdfs supports truncate");
+        assertTrue(webhdfs.truncate(FILE, 0L));
+      });
+      assertTrue(ex.getMessage().contains("Missing Required Header"));
+    } else {
+      assertTrue(
+          webhdfs.hasPathCapability(FILE, CommonPathCapabilities.FS_TRUNCATE),
+          "WebHdfs supports truncate");
+      assertTrue(webhdfs.truncate(FILE, 0L));
     }
-    assertTrue("WebHdfs supports truncate",
-        webhdfs.hasPathCapability(FILE, CommonPathCapabilities.FS_TRUNCATE));
-    assertTrue(webhdfs.truncate(FILE, 0L));
-  }
-
-  private void expectException() {
-    exception.expect(IOException.class);
-    exception.expectMessage("Missing Required Header");
   }
 }

+ 77 - 75
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java

@@ -17,9 +17,12 @@
  */
 package org.apache.hadoop.hdfs.web.resources;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
+import static org.junit.jupiter.api.Assertions.assertArrayEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertNull;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.junit.jupiter.api.Assertions.fail;
 
 import java.io.IOException;
 import java.util.Arrays;
@@ -40,8 +43,7 @@ import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.StringUtils;
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 public class TestParam {
   public static final Logger LOG = LoggerFactory.getLogger(TestParam.class);
@@ -51,13 +53,13 @@ public class TestParam {
   @Test
   public void testAccessTimeParam() {
     final AccessTimeParam p = new AccessTimeParam(AccessTimeParam.DEFAULT);
-    Assert.assertEquals(-1L, p.getValue().longValue());
+    assertEquals(-1L, p.getValue().longValue());
 
     new AccessTimeParam(-1L);
 
     try {
       new AccessTimeParam(-2L);
-      Assert.fail();
+      fail();
     } catch(IllegalArgumentException e) {
       LOG.info("EXPECTED: " + e);
     }
@@ -66,8 +68,8 @@ public class TestParam {
   @Test
   public void testBlockSizeParam() {
     final BlockSizeParam p = new BlockSizeParam(BlockSizeParam.DEFAULT);
-    Assert.assertEquals(null, p.getValue());
-    Assert.assertEquals(
+    assertEquals(null, p.getValue());
+    assertEquals(
         conf.getLongBytes(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,
             DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT),
         p.getValue(conf));
@@ -76,7 +78,7 @@ public class TestParam {
 
     try {
       new BlockSizeParam(0L);
-      Assert.fail();
+      fail();
     } catch(IllegalArgumentException e) {
       LOG.info("EXPECTED: " + e);
     }
@@ -85,8 +87,8 @@ public class TestParam {
   @Test
   public void testBufferSizeParam() {
     final BufferSizeParam p = new BufferSizeParam(BufferSizeParam.DEFAULT);
-    Assert.assertEquals(null, p.getValue());
-    Assert.assertEquals(
+    assertEquals(null, p.getValue());
+    assertEquals(
         conf.getInt(CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY,
             CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT),
         p.getValue(conf));
@@ -95,7 +97,7 @@ public class TestParam {
 
     try {
       new BufferSizeParam(0);
-      Assert.fail();
+      fail();
     } catch(IllegalArgumentException e) {
       LOG.info("EXPECTED: " + e);
     }
@@ -104,19 +106,19 @@ public class TestParam {
   @Test
   public void testDelegationParam() {
     final DelegationParam p = new DelegationParam(DelegationParam.DEFAULT);
-    Assert.assertEquals(null, p.getValue());
+    assertEquals(null, p.getValue());
   }
 
   @Test
   public void testDestinationParam() {
     final DestinationParam p = new DestinationParam(DestinationParam.DEFAULT);
-    Assert.assertEquals(null, p.getValue());
+    assertEquals(null, p.getValue());
 
     new DestinationParam("/abc");
 
     try {
       new DestinationParam("abc");
-      Assert.fail();
+      fail();
     } catch(IllegalArgumentException e) {
       LOG.info("EXPECTED: " + e);
     }
@@ -125,19 +127,19 @@ public class TestParam {
   @Test
   public void testGroupParam() {
     final GroupParam p = new GroupParam(GroupParam.DEFAULT);
-    Assert.assertEquals(null, p.getValue());
+    assertEquals(null, p.getValue());
   }
 
   @Test
   public void testModificationTimeParam() {
     final ModificationTimeParam p = new ModificationTimeParam(ModificationTimeParam.DEFAULT);
-    Assert.assertEquals(-1L, p.getValue().longValue());
+    assertEquals(-1L, p.getValue().longValue());
 
     new ModificationTimeParam(-1L);
 
     try {
       new ModificationTimeParam(-2L);
-      Assert.fail();
+      fail();
     } catch(IllegalArgumentException e) {
       LOG.info("EXPECTED: " + e);
     }
@@ -146,13 +148,13 @@ public class TestParam {
   @Test
   public void testOverwriteParam() {
     final OverwriteParam p = new OverwriteParam(OverwriteParam.DEFAULT);
-    Assert.assertEquals(false, p.getValue());
+    assertEquals(false, p.getValue());
 
     new OverwriteParam("trUe");
 
     try {
       new OverwriteParam("abc");
-      Assert.fail();
+      fail();
     } catch(IllegalArgumentException e) {
       LOG.info("EXPECTED: " + e);
     }
@@ -161,20 +163,20 @@ public class TestParam {
   @Test
   public void testOwnerParam() {
     final OwnerParam p = new OwnerParam(OwnerParam.DEFAULT);
-    Assert.assertEquals(null, p.getValue());
+    assertEquals(null, p.getValue());
   }
 
   @Test
   public void testPermissionParam() {
     final PermissionParam p = new PermissionParam(PermissionParam.DEFAULT);
-    Assert.assertEquals(new FsPermission((short)0755), p.getDirFsPermission());
-    Assert.assertEquals(new FsPermission((short)0644), p.getFileFsPermission());
+    assertEquals(new FsPermission((short)0755), p.getDirFsPermission());
+    assertEquals(new FsPermission((short)0644), p.getFileFsPermission());
 
     new PermissionParam("0");
 
     try {
       new PermissionParam("-1");
-      Assert.fail();
+      fail();
     } catch(IllegalArgumentException e) {
       LOG.info("EXPECTED: " + e);
     }
@@ -183,21 +185,21 @@ public class TestParam {
 
     try {
       new PermissionParam("2000");
-      Assert.fail();
+      fail();
     } catch(IllegalArgumentException e) {
       LOG.info("EXPECTED: " + e);
     }
 
     try {
       new PermissionParam("8");
-      Assert.fail();
+      fail();
     } catch(IllegalArgumentException e) {
       LOG.info("EXPECTED: " + e);
     }
 
     try {
       new PermissionParam("abc");
-      Assert.fail();
+      fail();
     } catch(IllegalArgumentException e) {
       LOG.info("EXPECTED: " + e);
     }
@@ -206,13 +208,13 @@ public class TestParam {
   @Test
   public void testRecursiveParam() {
     final RecursiveParam p = new RecursiveParam(RecursiveParam.DEFAULT);
-    Assert.assertEquals(false, p.getValue());
+    assertEquals(false, p.getValue());
 
     new RecursiveParam("falSe");
 
     try {
       new RecursiveParam("abc");
-      Assert.fail();
+      fail();
     } catch(IllegalArgumentException e) {
       LOG.info("EXPECTED: " + e);
     }
@@ -221,14 +223,14 @@ public class TestParam {
   @Test
   public void testRenewerParam() {
     final RenewerParam p = new RenewerParam(RenewerParam.DEFAULT);
-    Assert.assertEquals(null, p.getValue());
+    assertEquals(null, p.getValue());
   }
 
   @Test
   public void testReplicationParam() {
     final ReplicationParam p = new ReplicationParam(ReplicationParam.DEFAULT);
-    Assert.assertEquals(null, p.getValue());
-    Assert.assertEquals(
+    assertEquals(null, p.getValue());
+    assertEquals(
         (short)conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY,
             DFSConfigKeys.DFS_REPLICATION_DEFAULT),
         p.getValue(conf));
@@ -237,7 +239,7 @@ public class TestParam {
 
     try {
       new ReplicationParam((short)0);
-      Assert.fail();
+      fail();
     } catch(IllegalArgumentException e) {
       LOG.info("EXPECTED: " + e);
     }
@@ -250,7 +252,7 @@ public class TestParam {
     Param<?, ?> equalParam = new RenewerParam("renewer=equal");
     final String expected = "&renewer=renewer%3Dequal&token=token%26ampersand";
     final String actual = Param.toSortedString(sep, equalParam, ampParam);
-    Assert.assertEquals(expected, actual);
+    assertEquals(expected, actual);
   }
 
   @Test
@@ -259,14 +261,18 @@ public class TestParam {
     assertNull(userParam.getValue());
   }
 
-  @Test(expected = IllegalArgumentException.class)
+  @Test
   public void userNameInvalidStart() {
-    new UserParam("1x");
+    assertThrows(IllegalArgumentException.class, () -> {
+      new UserParam("1x");
+    });
   }
 
-  @Test(expected = IllegalArgumentException.class)
+  @Test
   public void userNameInvalidDollarSign() {
-    new UserParam("1$x");
+    assertThrows(IllegalArgumentException.class, () -> {
+      new UserParam("1$x");
+    });
   }
 
   @Test
@@ -293,7 +299,7 @@ public class TestParam {
 
       final String expected = StringUtils.join(",", Arrays.asList(sub));
       final ConcatSourcesParam computed = new ConcatSourcesParam(paths);
-      Assert.assertEquals(expected, computed.getValue());
+      assertEquals(expected, computed.getValue());
     }
   }
 
@@ -319,13 +325,13 @@ public class TestParam {
     List<AclEntry> setAclList =
         AclEntry.parseAclSpec("user::rwx,group::r--,other::rwx,user:user1:rwx",
             true);
-    Assert.assertEquals(setAclList.toString(), p.getAclPermission(true)
+    assertEquals(setAclList.toString(), p.getAclPermission(true)
         .toString());
 
     new AclPermissionParam("user::rw-,group::rwx,other::rw-,user:user1:rwx");
     try {
       new AclPermissionParam("user::rw--,group::rwx-,other::rw-");
-      Assert.fail();
+      fail();
     } catch (IllegalArgumentException e) {
       LOG.info("EXPECTED: " + e);
     }
@@ -335,21 +341,21 @@ public class TestParam {
 
     try {
       new AclPermissionParam("user:r-,group:rwx,other:rw-");
-      Assert.fail();
+      fail();
     } catch (IllegalArgumentException e) {
       LOG.info("EXPECTED: " + e);
     }
 
     try {
       new AclPermissionParam("default:::r-,default:group::rwx,other::rw-");
-      Assert.fail();
+      fail();
     } catch (IllegalArgumentException e) {
       LOG.info("EXPECTED: " + e);
     }
 
     try {
       new AclPermissionParam("user:r-,group::rwx,other:rw-,mask:rw-,temp::rwx");
-      Assert.fail();
+      fail();
     } catch (IllegalArgumentException e) {
       LOG.info("EXPECTED: " + e);
     }
@@ -375,12 +381,12 @@ public class TestParam {
       String numericUserSpec = "user:110201:rwx";
       AclPermissionParam aclNumericUserParam =
           new AclPermissionParam(numericUserSpec);
-      Assert.assertEquals(numericUserSpec, aclNumericUserParam.getValue());
+      assertEquals(numericUserSpec, aclNumericUserParam.getValue());
 
       String oddGroupSpec = "group:foo@bar:rwx";
       AclPermissionParam aclGroupWithDomainParam =
           new AclPermissionParam(oddGroupSpec);
-      Assert.assertEquals(oddGroupSpec, aclGroupWithDomainParam.getValue());
+      assertEquals(oddGroupSpec, aclGroupWithDomainParam.getValue());
 
     } finally {
       // Revert back to the default rules for remainder of tests
@@ -392,22 +398,22 @@ public class TestParam {
   @Test
   public void testXAttrNameParam() {
     final XAttrNameParam p = new XAttrNameParam("user.a1");
-    Assert.assertEquals(p.getXAttrName(), "user.a1");
+    assertEquals(p.getXAttrName(), "user.a1");
   }
   
   @Test
   public void testXAttrValueParam() throws IOException {
     final XAttrValueParam p = new XAttrValueParam("0x313233");
-    Assert.assertArrayEquals(p.getXAttrValue(), 
+    assertArrayEquals(p.getXAttrValue(),
         XAttrCodec.decodeValue("0x313233"));
   }
   
   @Test
   public void testXAttrEncodingParam() {
     final XAttrEncodingParam p = new XAttrEncodingParam(XAttrCodec.BASE64);
-    Assert.assertEquals(p.getEncoding(), XAttrCodec.BASE64);
+    assertEquals(p.getEncoding(), XAttrCodec.BASE64);
     final XAttrEncodingParam p1 = new XAttrEncodingParam(p.getValueString());
-    Assert.assertEquals(p1.getEncoding(), XAttrCodec.BASE64);
+    assertEquals(p1.getEncoding(), XAttrCodec.BASE64);
   }
   
   @Test
@@ -415,9 +421,9 @@ public class TestParam {
     EnumSet<XAttrSetFlag> flag = EnumSet.of(
         XAttrSetFlag.CREATE, XAttrSetFlag.REPLACE);
     final XAttrSetFlagParam p = new XAttrSetFlagParam(flag);
-    Assert.assertEquals(p.getFlag(), flag);
+    assertEquals(p.getFlag(), flag);
     final XAttrSetFlagParam p1 = new XAttrSetFlagParam(p.getValueString());
-    Assert.assertEquals(p1.getFlag(), flag);
+    assertEquals(p1.getFlag(), flag);
   }
   
   @Test
@@ -426,7 +432,7 @@ public class TestParam {
         Options.Rename.OVERWRITE, Options.Rename.NONE);
     final RenameOptionSetParam p1 = new RenameOptionSetParam(
         p.getValueString());
-    Assert.assertEquals(p1.getValue(), EnumSet.of(
+    assertEquals(p1.getValue(), EnumSet.of(
         Options.Rename.OVERWRITE, Options.Rename.NONE));
   }
 
@@ -434,8 +440,8 @@ public class TestParam {
   public void testSnapshotNameParam() {
     final OldSnapshotNameParam s1 = new OldSnapshotNameParam("s1");
     final SnapshotNameParam s2 = new SnapshotNameParam("s2");
-    Assert.assertEquals("s1", s1.getValue());
-    Assert.assertEquals("s2", s2.getValue());
+    assertEquals("s1", s1.getValue());
+    assertEquals("s2", s2.getValue());
   }
 
   @Test
@@ -451,42 +457,42 @@ public class TestParam {
 
     try {
       new FsActionParam("rw");
-      Assert.fail();
+      fail();
     } catch(IllegalArgumentException e) {
       LOG.info("EXPECTED: " + e);
     }
 
     try {
       new FsActionParam("qwx");
-      Assert.fail();
+      fail();
     } catch(IllegalArgumentException e) {
       LOG.info("EXPECTED: " + e);
     }
 
     try {
       new FsActionParam("qrwx");
-      Assert.fail();
+      fail();
     } catch(IllegalArgumentException e) {
       LOG.info("EXPECTED: " + e);
     }
 
     try {
       new FsActionParam("rwxx");
-      Assert.fail();
+      fail();
     } catch(IllegalArgumentException e) {
       LOG.info("EXPECTED: " + e);
     }
 
     try {
       new FsActionParam("xwr");
-      Assert.fail();
+      fail();
     } catch(IllegalArgumentException e) {
       LOG.info("EXPECTED: " + e);
     }
 
     try {
       new FsActionParam("r-w");
-      Assert.fail();
+      fail();
     } catch(IllegalArgumentException e) {
       LOG.info("EXPECTED: " + e);
     }
@@ -496,15 +502,15 @@ public class TestParam {
   public void testStartAfterParam() throws Exception {
     String s = "/helloWorld";
     StartAfterParam param = new StartAfterParam(s);
-    Assert.assertEquals(s, param.getValue());
+    assertEquals(s, param.getValue());
   }
 
   @Test
   public void testStoragePolicyParam() {
     StoragePolicyParam p = new StoragePolicyParam(StoragePolicyParam.DEFAULT);
-    Assert.assertEquals(null, p.getValue());
+    assertEquals(null, p.getValue());
     p = new StoragePolicyParam("COLD");
-    Assert.assertEquals("COLD", p.getValue());
+    assertEquals("COLD", p.getValue());
   }
 
   @Test
@@ -537,17 +543,16 @@ public class TestParam {
   @Test
   public void testECPolicyParam() {
     ECPolicyParam p = new ECPolicyParam(ECPolicyParam.DEFAULT);
-    Assert.assertEquals(null, p.getValue());
+    assertEquals(null, p.getValue());
     p = new ECPolicyParam("RS-6-3-1024k");
-    Assert.assertEquals("RS-6-3-1024k", p.getValue());
+    assertEquals("RS-6-3-1024k", p.getValue());
   }
 
   @Test
   public void testHttpOpParams() {
     try {
       new PostOpParam("TEST");
-      Assert
-          .fail("Construct the PostOpParam with param value 'TEST' should be"
+      fail("Construct the PostOpParam with param value 'TEST' should be"
               + " failed.");
     } catch (IllegalArgumentException e) {
       GenericTestUtils.assertExceptionContains(
@@ -555,8 +560,7 @@ public class TestParam {
     }
     try {
       new PutOpParam("TEST");
-      Assert
-          .fail("Construct the PutOpParam with param value 'TEST' should be"
+      fail("Construct the PutOpParam with param value 'TEST' should be"
               + " failed.");
     } catch (IllegalArgumentException e) {
       GenericTestUtils.assertExceptionContains(
@@ -564,8 +568,7 @@ public class TestParam {
     }
     try {
       new DeleteOpParam("TEST");
-      Assert
-          .fail("Construct the DeleteOpParam with param value 'TEST' should be"
+      fail("Construct the DeleteOpParam with param value 'TEST' should be"
               + " failed.");
     } catch (IllegalArgumentException e) {
       GenericTestUtils.assertExceptionContains(
@@ -573,8 +576,7 @@ public class TestParam {
     }
     try {
       new GetOpParam("TEST");
-      Assert
-          .fail("Construct the GetOpParam with param value 'TEST' should be"
+      fail("Construct the GetOpParam with param value 'TEST' should be"
               + " failed.");
     } catch (IllegalArgumentException e) {
       GenericTestUtils.assertExceptionContains(