Browse Source

HDFS-17720.[JDK17] Upgrade JUnit from 4 to 5 in hadoop-hdfs-rbf Part2.

Closes #7531

Signed-off-by: Shilun Fan <slfan1989@apache.org>
Signed-off-by: Chris Nauroth <cnauroth@apache.org>
zhtttylz 1 month ago
parent
commit
18f761785a
70 changed files with 867 additions and 918 deletions
  1. 6 6
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/fairness/TestRouterAsyncRpcFairnessPolicyController.java
  2. 17 21
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestConnectionManager.java
  3. 5 5
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestDFSRouter.java
  4. 11 13
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestDisableNameservices.java
  5. 9 9
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestDisableRouterQuota.java
  6. 2 2
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestFederationUtil.java
  7. 6 6
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestNoNamenodesAvailableLongTime.java
  8. 63 73
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestObserverWithRouter.java
  9. 6 6
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRenewLeaseWithSameINodeId.java
  10. 7 7
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouter.java
  11. 11 11
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdmin.java
  12. 70 82
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java
  13. 17 18
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminGenericRefresh.java
  14. 12 16
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAllResolver.java
  15. 47 50
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterClientRejectOverload.java
  16. 27 33
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFaultTolerant.java
  17. 2 2
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFederatedState.java
  18. 13 11
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFederationRename.java
  19. 11 11
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFederationRenameInKerberosEnv.java
  20. 9 9
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFederationRenamePermission.java
  21. 11 11
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFsck.java
  22. 9 9
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterHeartbeatService.java
  23. 4 4
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterHttpServerXFrame.java
  24. 6 6
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMissingFolderMulti.java
  25. 11 11
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java
  26. 50 36
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTableCacheRefresh.java
  27. 11 12
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTableCacheRefreshSecure.java
  28. 11 11
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTableWithoutDefaultNS.java
  29. 7 7
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMultiRack.java
  30. 9 14
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNamenodeHeartbeat.java
  31. 12 16
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNamenodeMonitoring.java
  32. 8 8
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNamenodeWebScheme.java
  33. 5 5
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNetworkTopologyServlet.java
  34. 17 24
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterPolicyProvider.java
  35. 11 11
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuota.java
  36. 8 8
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuotaManager.java
  37. 11 14
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCClientRetries.java
  38. 21 23
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCMultipleDestinationMountTableResolver.java
  39. 7 7
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRefreshSuperUserGroupsConfiguration.java
  40. 8 8
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRetryCache.java
  41. 28 29
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java
  42. 20 16
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java
  43. 9 9
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcSingleNS.java
  44. 6 6
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcStoragePolicySatisfier.java
  45. 16 16
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterSafemode.java
  46. 12 12
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterTrash.java
  47. 15 15
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterUserMappings.java
  48. 7 7
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterWebHdfsMethods.java
  49. 6 10
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterWithSecureStartup.java
  50. 5 5
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestSafeMode.java
  51. 9 9
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/async/RouterAsyncProtocolTestBase.java
  52. 2 2
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/async/TestAsyncRouterAdmin.java
  53. 4 4
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/async/TestRouterAsyncCacheAdmin.java
  54. 6 6
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/async/TestRouterAsyncClientProtocol.java
  55. 13 13
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/async/TestRouterAsyncErasureCoding.java
  56. 2 2
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/async/TestRouterAsyncMountTable.java
  57. 5 5
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/async/TestRouterAsyncNamenodeProtocol.java
  58. 14 14
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/async/TestRouterAsyncQuota.java
  59. 6 6
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/async/TestRouterAsyncRPCMultipleDestinationMountTableResolver.java
  60. 6 6
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/async/TestRouterAsyncRpc.java
  61. 12 12
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/async/TestRouterAsyncRpcClient.java
  62. 4 4
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/async/TestRouterAsyncRpcMultiDestination.java
  63. 7 6
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/async/TestRouterAsyncRpcServer.java
  64. 4 4
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/async/TestRouterAsyncSnapshot.java
  65. 6 6
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/async/TestRouterAsyncStoragePolicy.java
  66. 4 4
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/async/TestRouterAsyncUserProtocol.java
  67. 2 2
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/async/TestRouterAsyncWebHdfsMethods.java
  68. 6 8
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/async/utils/TestAsyncUtil.java
  69. 43 43
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/security/token/TestSQLDelegationTokenSecretManagerImpl.java
  70. 0 1
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/driver/TestStateStoreFile.java

+ 6 - 6
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/fairness/TestRouterAsyncRpcFairnessPolicyController.java

@@ -24,7 +24,7 @@ import org.apache.hadoop.hdfs.server.federation.router.FederationUtil;
 import org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Time;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 import org.slf4j.LoggerFactory;
 
 import java.util.concurrent.TimeUnit;
@@ -34,9 +34,9 @@ import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_
 import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_ASYNC_RPC_MAX_ASYNC_CALL_PERMIT_DEFAULT;
 import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_FAIRNESS_ACQUIRE_TIMEOUT;
 import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_MONITOR_NAMENODE;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 /**
  * Test functionality of {@link RouterAsyncRpcFairnessPolicyController).
@@ -116,8 +116,8 @@ public class TestRouterAsyncRpcFairnessPolicyController {
     }
     String infoMsg = String.format(
         RouterAsyncRpcFairnessPolicyController.INIT_MSG, permits);
-    assertTrue("Should contain info message: " + infoMsg,
-        logs.getOutput().contains(infoMsg));
+    assertTrue(logs.getOutput().contains(infoMsg), "Should contain info message: " +
+        infoMsg);
   }
 
   private RouterRpcFairnessPolicyController getFairnessPolicyController(

+ 17 - 21
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestConnectionManager.java

@@ -27,11 +27,9 @@ import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.LambdaTestUtils;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.Rule;
-import org.junit.rules.ExpectedException;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 import java.io.IOException;
 import java.util.HashMap;
@@ -39,11 +37,12 @@ import java.util.Map;
 import java.util.concurrent.ArrayBlockingQueue;
 import java.util.concurrent.BlockingQueue;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-import static org.junit.Assert.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
 
 /**
  * Test functionalities of {@link ConnectionManager}, which manages a pool
@@ -62,7 +61,7 @@ public class TestConnectionManager {
   private static final String TEST_NN_ADDRESS = "nn1:8080";
   private static final String UNRESOLVED_TEST_NN_ADDRESS = "unknownhost:8080";
 
-  @Before
+  @BeforeEach
   public void setup() throws Exception {
     conf = new Configuration();
     connManager = new ConnectionManager(conf);
@@ -71,10 +70,7 @@ public class TestConnectionManager {
     connManager.start();
   }
 
-  @Rule
-  public ExpectedException exceptionRule = ExpectedException.none();
-
-  @After
+  @AfterEach
   public void shutdown() {
     if (connManager != null) {
       connManager.close();
@@ -199,13 +195,13 @@ public class TestConnectionManager {
   @Test
   public void testGetConnectionWithException() throws Exception {
     String exceptionCause = "java.net.UnknownHostException: unknownhost";
-    exceptionRule.expect(IllegalArgumentException.class);
-    exceptionRule.expectMessage(exceptionCause);
 
-    // Create a bad connection pool pointing to unresolvable namenode address.
-    ConnectionPool badPool = new ConnectionPool(
-        conf, UNRESOLVED_TEST_NN_ADDRESS, TEST_USER1, 1, 10, 0.5f,
-        ClientProtocol.class, null);
+    assertThrows(IllegalArgumentException.class, () -> {
+      // Create a bad connection pool pointing to unresolvable namenode address.
+      ConnectionPool badPool = new ConnectionPool(
+          conf, UNRESOLVED_TEST_NN_ADDRESS, TEST_USER1, 1, 10, 0.5f,
+          ClientProtocol.class, null);
+    }, exceptionCause);
   }
 
   @Test

+ 5 - 5
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestDFSRouter.java

@@ -17,8 +17,8 @@
  */
 package org.apache.hadoop.hdfs.server.federation.router;
 
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.ha.HAServiceProtocol;
@@ -29,7 +29,7 @@ import org.apache.hadoop.tools.fedbalance.FedBalanceConfigs;
 
 import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.createNamenodeReport;
 import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.FEDERATION_STORE_MEMBERSHIP_EXPIRATION_MS;
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 public class TestDFSRouter {
 
@@ -40,8 +40,8 @@ public class TestDFSRouter {
         configuration.get(FedBalanceConfigs.SCHEDULER_JOURNAL_URI);
     int workerThreads =
         configuration.getInt(FedBalanceConfigs.WORK_THREAD_NUM, -1);
-    Assert.assertEquals("hdfs://localhost:8020/tmp/procedure", journalUri);
-    Assert.assertEquals(10, workerThreads);
+    Assertions.assertEquals("hdfs://localhost:8020/tmp/procedure", journalUri);
+    Assertions.assertEquals(10, workerThreads);
   }
 
   @Test

+ 11 - 13
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestDisableNameservices.java

@@ -19,8 +19,8 @@ package org.apache.hadoop.hdfs.server.federation.router;
 
 import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.simulateSlowNamenode;
 import static org.apache.hadoop.util.Time.monotonicNow;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.io.IOException;
 import java.util.Iterator;
@@ -50,10 +50,10 @@ import org.apache.hadoop.hdfs.server.federation.store.protocol.DisableNameservic
 import org.apache.hadoop.hdfs.server.federation.store.records.MountTable;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.codehaus.jettison.json.JSONObject;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 
 /**
  * Test the behavior when disabling name services.
@@ -65,7 +65,7 @@ public class TestDisableNameservices {
   private static RouterClient routerAdminClient;
   private static ClientProtocol routerProtocol;
 
-  @BeforeClass
+  @BeforeAll
   public static void setUp() throws Exception {
     // Build and start a federated cluster
     cluster = new StateStoreDFSCluster(false, 2);
@@ -131,7 +131,7 @@ public class TestDisableNameservices {
     nn1.getFileSystem().mkdirs(new Path("/dirns1/1"));
   }
 
-  @AfterClass
+  @AfterAll
   public static void tearDown() {
     if (cluster != null) {
       cluster.stopRouter(routerContext);
@@ -140,7 +140,7 @@ public class TestDisableNameservices {
     }
   }
 
-  @After
+  @AfterEach
   public void cleanup() throws IOException {
     Router router = routerContext.getRouter();
     StateStoreService stateStore = router.getStateStore();
@@ -161,8 +161,7 @@ public class TestDisableNameservices {
     long t0 = monotonicNow();
     routerProtocol.renewLease("client0", null);
     long t = monotonicNow() - t0;
-    assertTrue("It took too little: " + t + "ms",
-        t > TimeUnit.SECONDS.toMillis(1));
+    assertTrue(t > TimeUnit.SECONDS.toMillis(1), "It took too little: " + t + "ms");
     // Return the results from all subclusters even if slow
     FileSystem routerFs = routerContext.getFileSystem();
     FileStatus[] filesStatus = routerFs.listStatus(new Path("/"));
@@ -180,8 +179,7 @@ public class TestDisableNameservices {
     long t0 = monotonicNow();
     routerProtocol.renewLease("client0", null);
     long t = monotonicNow() - t0;
-    assertTrue("It took too long: " + t + "ms",
-        t < TimeUnit.SECONDS.toMillis(1));
+    assertTrue(t < TimeUnit.SECONDS.toMillis(1), "It took too long: " + t + "ms");
     // We should not report anything from ns0
     FileSystem routerFs = routerContext.getFileSystem();
 

+ 9 - 9
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestDisableRouterQuota.java

@@ -24,13 +24,13 @@ import java.io.IOException;
 
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.LambdaTestUtils;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.fail;
 
 /**
  * Test the behavior when disabling the Router quota.
@@ -39,7 +39,7 @@ public class TestDisableRouterQuota {
 
   private static Router router;
 
-  @BeforeClass
+  @BeforeAll
   public static void setUp() throws Exception {
     // Build and start a router
     router = new Router();
@@ -53,7 +53,7 @@ public class TestDisableRouterQuota {
     router.start();
   }
 
-  @AfterClass
+  @AfterAll
   public static void tearDown() throws IOException {
     if (router != null) {
       router.stop();
@@ -61,7 +61,7 @@ public class TestDisableRouterQuota {
     }
   }
 
-  @Before
+  @BeforeEach
   public void checkDisableQuota() {
     assertFalse(router.isQuotaEnabled());
   }

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestFederationUtil.java

@@ -24,9 +24,9 @@ import org.apache.hadoop.hdfs.server.federation.MockResolver;
 import org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver;
 import org.apache.hadoop.hdfs.server.federation.resolver.FileSubclusterResolver;
 import org.apache.hadoop.hdfs.server.federation.store.StateStoreService;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
-import static org.junit.Assert.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
 import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.FEDERATION_FILE_RESOLVER_CLIENT_CLASS;
 import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.FEDERATION_NAMENODE_RESOLVER_CLIENT_CLASS;
 

+ 6 - 6
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestNoNamenodesAvailableLongTime.java

@@ -36,8 +36,8 @@ import org.apache.hadoop.hdfs.server.federation.resolver.FederationNamenodeServi
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.util.Lists;
-import org.junit.After;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Test;
 
 
 import java.io.IOException;
@@ -47,9 +47,9 @@ import java.util.List;
 import static org.apache.hadoop.ha.HAServiceProtocol.HAServiceState.ACTIVE;
 import static org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster.DEFAULT_HEARTBEAT_INTERVAL_MS;
 import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.aclEntry;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 /**
  * When failover occurs, the router may record that the ns has no active namenode
@@ -68,7 +68,7 @@ public class TestNoNamenodesAvailableLongTime {
   private RouterContext routerContext;
   private FederationRPCMetrics rpcMetrics;
 
-  @After
+  @AfterEach
   public void cleanup() throws IOException {
     rpcMetrics = null;
     routerContext = null;

+ 63 - 73
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestObserverWithRouter.java

@@ -21,10 +21,10 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICES;
 import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.NAMENODES;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.assertThrows;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertThrows;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICE_ID;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_STATE_CONTEXT_ENABLED_KEY;
@@ -215,8 +215,8 @@ public class TestObserverWithRouter {
     List<? extends FederationNamenodeContext> namenodes = routerContext
         .getRouter().getNamenodeResolver()
         .getNamenodesForNameserviceId(cluster.getNameservices().get(0), true);
-    assertEquals("First namenode should be observer", namenodes.get(0).getState(),
-        FederationNamenodeServiceState.OBSERVER);
+    assertEquals(namenodes.get(0).getState(), FederationNamenodeServiceState.OBSERVER,
+        "First namenode should be observer");
     Path path = new Path("/testFile");
     // Send create call
     fileSystem.create(path).close();
@@ -227,12 +227,12 @@ public class TestObserverWithRouter {
     long rpcCountForActive = routerContext.getRouter().getRpcServer()
         .getRPCMetrics().getActiveProxyOps();
     // Create and complete calls should be sent to active
-    assertEquals("Two calls should be sent to active", 2, rpcCountForActive);
+    assertEquals(2, rpcCountForActive, "Two calls should be sent to active");
 
     long rpcCountForObserver = routerContext.getRouter().getRpcServer()
         .getRPCMetrics().getObserverProxyOps();
     // getBlockLocations should be sent to observer
-    assertEquals("One call should be sent to observer", 1, rpcCountForObserver);
+    assertEquals(1, rpcCountForObserver, "One call should be sent to observer");
   }
 
   @EnumSource(ConfigSetting.class)
@@ -247,8 +247,8 @@ public class TestObserverWithRouter {
     List<? extends FederationNamenodeContext> namenodes = routerContext
         .getRouter().getNamenodeResolver()
         .getNamenodesForNameserviceId(cluster.getNameservices().get(0), true);
-    assertEquals("First namenode should be observer", namenodes.get(0).getState(),
-        FederationNamenodeServiceState.OBSERVER);
+    assertEquals(namenodes.get(0).getState(), FederationNamenodeServiceState.OBSERVER,
+        "First namenode should be observer");
     Path path = new Path("/testFile");
     // Send Create call to active
     fileSystem.create(path).close();
@@ -259,11 +259,11 @@ public class TestObserverWithRouter {
     long rpcCountForActive = routerContext.getRouter().getRpcServer()
         .getRPCMetrics().getActiveProxyOps();
     // Create, complete and getBlockLocations calls should be sent to active
-    assertEquals("Three calls should be sent to active", 3, rpcCountForActive);
+    assertEquals(3, rpcCountForActive, "Three calls should be sent to active");
 
     long rpcCountForObserver = routerContext.getRouter().getRpcServer()
         .getRPCMetrics().getObserverProxyOps();
-    assertEquals("No call should be sent to observer", 0, rpcCountForObserver);
+    assertEquals(0, rpcCountForObserver, "No call should be sent to observer");
   }
 
   @EnumSource(ConfigSetting.class)
@@ -284,11 +284,11 @@ public class TestObserverWithRouter {
     long rpcCountForActive = routerContext.getRouter().getRpcServer()
         .getRPCMetrics().getActiveProxyOps();
     // Create, complete and read calls should be sent to active
-    assertEquals("Three calls should be sent to active", 3, rpcCountForActive);
+    assertEquals(3, rpcCountForActive, "Three calls should be sent to active");
 
     long rpcCountForObserver = routerContext.getRouter().getRpcServer()
         .getRPCMetrics().getObserverProxyOps();
-    assertEquals("Zero calls should be sent to observer", 0, rpcCountForObserver);
+    assertEquals(0, rpcCountForObserver, "Zero calls should be sent to observer");
   }
 
   @EnumSource(ConfigSetting.class)
@@ -301,9 +301,9 @@ public class TestObserverWithRouter {
 
     // Stop observer NN
     int nnIndex = stopObserver(1);
-    assertNotEquals("No observer found", 3, nnIndex);
+    assertNotEquals(3, nnIndex, "No observer found");
     nnIndex = stopObserver(1);
-    assertNotEquals("No observer found", 4, nnIndex);
+    assertNotEquals(4, nnIndex, "No observer found");
 
     // Send read request
     fileSystem.open(path).close();
@@ -311,13 +311,11 @@ public class TestObserverWithRouter {
     long rpcCountForActive = routerContext.getRouter().getRpcServer()
         .getRPCMetrics().getActiveProxyOps();
     // Create, complete and getBlockLocation calls should be sent to active
-    assertEquals("Three calls should be sent to active", 3,
-        rpcCountForActive);
+    assertEquals(3, rpcCountForActive, "Three calls should be sent to active");
 
     long rpcCountForObserver = routerContext.getRouter().getRpcServer()
         .getRPCMetrics().getObserverProxyOps();
-    assertEquals("No call should send to observer", 0,
-        rpcCountForObserver);
+    assertEquals(0, rpcCountForObserver, "No call should send to observer");
   }
 
   @EnumSource(ConfigSetting.class)
@@ -341,14 +339,13 @@ public class TestObserverWithRouter {
     long expectedObserverRpc = 1;
 
     // Create and complete calls should be sent to active
-    assertEquals("Two calls should be sent to active",
-        expectedActiveRpc, rpcCountForActive);
+    assertEquals(expectedActiveRpc, rpcCountForActive, "Two calls should be sent to active");
 
     long rpcCountForObserver = routerContext.getRouter()
         .getRpcServer().getRPCMetrics().getObserverProxyOps();
     // getBlockLocation call should send to observer
-    assertEquals("Read should be success with another observer",
-        expectedObserverRpc, rpcCountForObserver);
+    assertEquals(expectedObserverRpc, rpcCountForObserver,
+        "Read should be success with another observer");
 
     // Stop one observer NN
     stopObserver(1);
@@ -361,13 +358,11 @@ public class TestObserverWithRouter {
 
     // getBlockLocation call should be sent to active
     expectedActiveRpc += 1;
-    assertEquals("One call should be sent to active", expectedActiveRpc,
-        rpcCountForActive);
+    assertEquals(expectedActiveRpc, rpcCountForActive, "One call should be sent to active");
     expectedObserverRpc += 0;
     rpcCountForObserver = routerContext.getRouter()
         .getRpcServer().getRPCMetrics().getObserverProxyOps();
-    assertEquals("No call should send to observer",
-        expectedObserverRpc, rpcCountForObserver);
+    assertEquals(expectedObserverRpc, rpcCountForObserver, "No call should send to observer");
   }
 
   private int stopObserver(int num) {
@@ -478,8 +473,7 @@ public class TestObserverWithRouter {
 
     // Create, complete and getBlockLocations
     // calls should be sent to active.
-    assertEquals("Three calls should be send to active",
-        3, rpcCountForActive);
+    assertEquals(3, rpcCountForActive, "Three calls should be send to active");
 
 
     boolean hasUnavailable = false;
@@ -494,7 +488,7 @@ public class TestObserverWithRouter {
     }
     // After attempting to communicate with unavailable observer namenode,
     // its state is updated to unavailable.
-    assertTrue("There must be unavailable namenodes", hasUnavailable);
+    assertTrue(hasUnavailable, "There must be unavailable namenodes");
   }
 
   @EnumSource(ConfigSetting.class)
@@ -508,16 +502,14 @@ public class TestObserverWithRouter {
     long rpcCountForActive = routerContext.getRouter().getRpcServer()
         .getRPCMetrics().getActiveProxyOps();
     // Create and complete calls should be sent to active
-    assertEquals("Two calls should be sent to active", 2,
-        rpcCountForActive);
+    assertEquals(2, rpcCountForActive, "Two calls should be sent to active");
 
     // Send msync
     fileSystem.msync();
     rpcCountForActive = routerContext.getRouter().getRpcServer()
         .getRPCMetrics().getActiveProxyOps();
     // 2 msync calls should be sent. One to each active namenode in the two namespaces.
-    assertEquals("Four calls should be sent to active", 4,
-        rpcCountForActive);
+    assertEquals(4, rpcCountForActive, "Four calls should be sent to active");
   }
 
   @EnumSource(ConfigSetting.class)
@@ -527,8 +519,8 @@ public class TestObserverWithRouter {
     List<? extends FederationNamenodeContext> namenodes = routerContext
         .getRouter().getNamenodeResolver()
         .getNamenodesForNameserviceId(cluster.getNameservices().get(0), true);
-    assertEquals("First namenode should be observer", namenodes.get(0).getState(),
-        FederationNamenodeServiceState.OBSERVER);
+    assertEquals(namenodes.get(0).getState(), FederationNamenodeServiceState.OBSERVER,
+        "First namenode should be observer");
     Path path = new Path("/");
 
     long rpcCountForActive;
@@ -541,12 +533,12 @@ public class TestObserverWithRouter {
     rpcCountForActive = routerContext.getRouter().getRpcServer()
         .getRPCMetrics().getActiveProxyOps();
     // getListingCall sent to active.
-    assertEquals("Only one call should be sent to active", 1, rpcCountForActive);
+    assertEquals(1, rpcCountForActive, "Only one call should be sent to active");
 
     rpcCountForObserver = routerContext.getRouter().getRpcServer()
         .getRPCMetrics().getObserverProxyOps();
     // getList call should be sent to observer
-    assertEquals("No calls should be sent to observer", 0, rpcCountForObserver);
+    assertEquals(0, rpcCountForObserver, "No calls should be sent to observer");
   }
 
   @Test
@@ -555,8 +547,8 @@ public class TestObserverWithRouter {
     List<? extends FederationNamenodeContext> namenodes = routerContext
         .getRouter().getNamenodeResolver()
         .getNamenodesForNameserviceId(cluster.getNameservices().get(0), true);
-    assertEquals("First namenode should be observer", namenodes.get(0).getState(),
-        FederationNamenodeServiceState.OBSERVER);
+    assertEquals(namenodes.get(0).getState(), FederationNamenodeServiceState.OBSERVER,
+        "First namenode should be observer");
     Path path = new Path("/");
 
     long rpcCountForActive;
@@ -569,12 +561,12 @@ public class TestObserverWithRouter {
     rpcCountForActive = routerContext.getRouter().getRpcServer()
         .getRPCMetrics().getActiveProxyOps();
     // Two msync calls to the active namenodes.
-    assertEquals("Two calls should be sent to active", 2, rpcCountForActive);
+    assertEquals(2, rpcCountForActive, "Two calls should be sent to active");
 
     rpcCountForObserver = routerContext.getRouter().getRpcServer()
         .getRPCMetrics().getObserverProxyOps();
     // getList call should be sent to observer
-    assertEquals("One call should be sent to observer", 1, rpcCountForObserver);
+    assertEquals(1, rpcCountForObserver, "One call should be sent to observer");
   }
 
   @Test
@@ -695,7 +687,7 @@ public class TestObserverWithRouter {
 
     // Get object storing state of the namespace in the shared RouterStateIdContext
     LongAccumulator namespaceStateId  = routerStateIdContext.getNamespaceStateId("ns0");
-    assertEquals("Router's shared should have progressed.", 21, namespaceStateId.get());
+    assertEquals(21, namespaceStateId.get(), "Router's shared should have progressed.");
   }
 
   @EnumSource(ConfigSetting.class)
@@ -736,9 +728,9 @@ public class TestObserverWithRouter {
         .getRPCMetrics().getObserverProxyOps();
 
     // First list status goes to active
-    assertEquals("One call should be sent to active", 1, rpcCountForActive);
+    assertEquals(1, rpcCountForActive, "One call should be sent to active");
     // Last two listStatuses  go to observer.
-    assertEquals("Two calls should be sent to observer", 2, rpcCountForObserver);
+    assertEquals(2, rpcCountForObserver, "Two calls should be sent to observer");
 
     Assertions.assertSame(namespaceStateId1, namespaceStateId2,
         "The same object should be used in the shared RouterStateIdContext");
@@ -811,8 +803,8 @@ public class TestObserverWithRouter {
         10000,
         "Timeout: Namespace state was never considered stale.");
     FileStatus[] rootFolderAfterMkdir = fileSystem.listStatus(rootPath);
-    assertEquals("List-status should show newly created directories.",
-        initialLengthOfRootListing + 10, rootFolderAfterMkdir.length);
+    assertEquals(initialLengthOfRootListing + 10, rootFolderAfterMkdir.length,
+        "List-status should show newly created directories.");
   }
 
   @EnumSource(ConfigSetting.class)
@@ -828,8 +820,8 @@ public class TestObserverWithRouter {
     List<? extends FederationNamenodeContext> namenodes = routerContext
         .getRouter().getNamenodeResolver()
         .getNamenodesForNameserviceId(cluster.getNameservices().get(0), true);
-    assertEquals("First namenode should be observer", namenodes.get(0).getState(),
-        FederationNamenodeServiceState.OBSERVER);
+    assertEquals(namenodes.get(0).getState(), FederationNamenodeServiceState.OBSERVER,
+        "First namenode should be observer");
     Path path = new Path("/");
 
     long rpcCountForActive;
@@ -851,18 +843,18 @@ public class TestObserverWithRouter {
     switch (configSetting) {
     case USE_NAMENODE_PROXY_FLAG:
       // First read goes to active.
-      assertEquals("Calls sent to the active", 1, rpcCountForActive);
+      assertEquals(1, rpcCountForActive, "Calls sent to the active");
       // The rest of the reads are sent to the observer.
-      assertEquals("Reads sent to observer", numListings - 1, rpcCountForObserver);
+      assertEquals(numListings - 1, rpcCountForObserver, "Reads sent to observer");
       break;
     case USE_ROUTER_OBSERVER_READ_PROXY_PROVIDER:
     case USE_ROUTER_OBSERVER_READ_CONFIGURED_FAILOVER_PROXY_PROVIDER:
       // An msync is sent to each active namenode for each read.
       // Total msyncs will be (numListings * num_of_nameservices).
-      assertEquals("Msyncs sent to the active namenodes",
-          NUM_NAMESERVICES * numListings, rpcCountForActive);
+      assertEquals(NUM_NAMESERVICES * numListings, rpcCountForActive,
+          "Msyncs sent to the active namenodes");
       // All reads should be sent of the observer.
-      assertEquals("Reads sent to observer", numListings, rpcCountForObserver);
+      assertEquals(numListings, rpcCountForObserver, "Reads sent to observer");
       break;
     default:
       Assertions.fail("Unknown config setting: " + configSetting);
@@ -882,8 +874,8 @@ public class TestObserverWithRouter {
     List<? extends FederationNamenodeContext> namenodes = routerContext
         .getRouter().getNamenodeResolver()
         .getNamenodesForNameserviceId(cluster.getNameservices().get(0), true);
-    assertEquals("First namenode should be observer", namenodes.get(0).getState(),
-        FederationNamenodeServiceState.OBSERVER);
+    assertEquals(namenodes.get(0).getState(), FederationNamenodeServiceState.OBSERVER,
+        "First namenode should be observer");
     Path path = new Path("/");
 
     long rpcCountForActive;
@@ -904,18 +896,17 @@ public class TestObserverWithRouter {
     switch (configSetting) {
     case USE_NAMENODE_PROXY_FLAG:
       // First read goes to active.
-      assertEquals("Calls sent to the active", 1, rpcCountForActive);
+      assertEquals(1, rpcCountForActive, "Calls sent to the active");
       // The rest of the reads are sent to the observer.
-      assertEquals("Reads sent to observer", 2, rpcCountForObserver);
+      assertEquals(2, rpcCountForObserver, "Reads sent to observer");
       break;
     case USE_ROUTER_OBSERVER_READ_PROXY_PROVIDER:
     case USE_ROUTER_OBSERVER_READ_CONFIGURED_FAILOVER_PROXY_PROVIDER:
       // 4 msyncs expected. 2 for the first read, and 2 for the third read
       // after the auto-msync period has elapsed during the sleep.
-      assertEquals("Msyncs sent to the active namenodes",
-          4, rpcCountForActive);
+      assertEquals(4, rpcCountForActive, "Msyncs sent to the active namenodes");
       // All three reads should be sent of the observer.
-      assertEquals("Reads sent to observer", 3, rpcCountForObserver);
+      assertEquals(3, rpcCountForObserver, "Reads sent to observer");
       break;
     default:
       Assertions.fail("Unknown config setting: " + configSetting);
@@ -935,8 +926,8 @@ public class TestObserverWithRouter {
     List<? extends FederationNamenodeContext> namenodes = routerContext
         .getRouter().getNamenodeResolver()
         .getNamenodesForNameserviceId(cluster.getNameservices().get(0), true);
-    assertEquals("First namenode should be observer", namenodes.get(0).getState(),
-        FederationNamenodeServiceState.OBSERVER);
+    assertEquals(namenodes.get(0).getState(), FederationNamenodeServiceState.OBSERVER,
+        "First namenode should be observer");
     Path path = new Path("/");
 
     long rpcCountForActive;
@@ -957,19 +948,18 @@ public class TestObserverWithRouter {
     switch (configSetting) {
     case USE_NAMENODE_PROXY_FLAG:
       // First listing and mkdir go to the active.
-      assertEquals("Calls sent to the active namenodes", 2, rpcCountForActive);
+      assertEquals(2, rpcCountForActive, "Calls sent to the active namenodes");
       // Second listing goes to the observer.
-      assertEquals("Read sent to observer", 1, rpcCountForObserver);
+      assertEquals(1, rpcCountForObserver, "Read sent to observer");
       break;
     case USE_ROUTER_OBSERVER_READ_PROXY_PROVIDER:
     case USE_ROUTER_OBSERVER_READ_CONFIGURED_FAILOVER_PROXY_PROVIDER:
       // 5 calls to the active namenodes expected. 4 msync and a mkdir.
       // Each of the 2 reads results in an msync to 2 nameservices.
       // The mkdir also goes to the active.
-      assertEquals("Calls sent to the active namenodes",
-          5, rpcCountForActive);
+      assertEquals(5, rpcCountForActive, "Calls sent to the active namenodes");
       // Both reads should be sent of the observer.
-      assertEquals("Reads sent to observer", 2, rpcCountForObserver);
+      assertEquals(2, rpcCountForObserver, "Reads sent to observer");
       break;
     default:
       Assertions.fail("Unknown config setting: " + configSetting);
@@ -994,7 +984,7 @@ public class TestObserverWithRouter {
     long rpcCountForActive = routerContext.getRouter().getRpcServer()
         .getRPCMetrics().getActiveProxyOps();
     // There should only be one call to the namespace that has an observer.
-    assertEquals("Only one call to the namespace with an observer", 1, rpcCountForActive);
+    assertEquals(1, rpcCountForActive, "Only one call to the namespace with an observer");
   }
 
   @EnumSource(ConfigSetting.class)
@@ -1014,7 +1004,7 @@ public class TestObserverWithRouter {
     long rpcCountForActive = routerContext.getRouter().getRpcServer()
         .getRPCMetrics().getActiveProxyOps();
     // There should no calls to any namespace.
-    assertEquals("No calls to any namespace", 0, rpcCountForActive);
+    assertEquals(0, rpcCountForActive, "No calls to any namespace");
   }
 
   @EnumSource(ConfigSetting.class)
@@ -1043,12 +1033,12 @@ public class TestObserverWithRouter {
 
     long observerCount2 = routerContext.getRouter().getRpcServer()
         .getRPCMetrics().getObserverProxyOps();
-    assertEquals("There should no extra calls to the observer", observerCount1, observerCount2);
+    assertEquals(observerCount1, observerCount2, "There should no extra calls to the observer");
 
     fileSystem.open(path).close();
     long observerCount3 = routerContext.getRouter().getRpcServer()
         .getRPCMetrics().getObserverProxyOps();
-    assertTrue("Old filesystem will send calls to observer", observerCount3 > observerCount2);
+    assertTrue(observerCount3 > observerCount2, "Old filesystem will send calls to observer");
   }
 
   void restartActiveWithStateIDContextDisabled() throws Exception {

+ 6 - 6
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRenewLeaseWithSameINodeId.java

@@ -25,13 +25,13 @@ import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster;
 import org.apache.hadoop.hdfs.server.federation.MockResolver;
 import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 
 import java.io.IOException;
 
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 /**
  * Testing DFSClient renewLease with same INodeId.
@@ -44,7 +44,7 @@ public class TestRenewLeaseWithSameINodeId {
   /** The first Router Context for this federated cluster. */
   private static MiniRouterDFSCluster.RouterContext routerContext;
 
-  @BeforeClass
+  @BeforeAll
   public static void globalSetUp() throws Exception {
     cluster = new MiniRouterDFSCluster(false, 2);
     cluster.setNumDatanodesPerNameservice(3);
@@ -65,7 +65,7 @@ public class TestRenewLeaseWithSameINodeId {
     routerContext = cluster.getRouters().get(0);
   }
 
-  @AfterClass
+  @AfterAll
   public static void tearDown() throws Exception {
     cluster.shutdown();
   }

+ 7 - 7
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouter.java

@@ -19,10 +19,10 @@ package org.apache.hadoop.hdfs.server.federation.router;
 
 import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains;
 import static org.apache.hadoop.test.LambdaTestUtils.intercept;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertNull;
+import static org.junit.jupiter.api.Assertions.fail;
 import static org.mockito.Mockito.mock;
 
 import java.io.IOException;
@@ -41,8 +41,8 @@ import org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver;
 import org.apache.hadoop.hdfs.server.federation.resolver.FileSubclusterResolver;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.service.Service.STATE;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 
 /**
  * The safe mode for the {@link Router} controlled by
@@ -52,7 +52,7 @@ public class TestRouter {
 
   private static Configuration conf;
 
-  @BeforeClass
+  @BeforeAll
   public static void create() throws IOException {
     // Basic configuration without the state store
     conf = new Configuration();

+ 11 - 11
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdmin.java

@@ -19,10 +19,10 @@ package org.apache.hadoop.hdfs.server.federation.router;
 
 import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.createNamenodeReport;
 import static org.apache.hadoop.hdfs.server.federation.store.FederationStateStoreTestUtils.synchronizeRecords;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.io.IOException;
 import java.lang.reflect.Field;
@@ -64,10 +64,10 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.LambdaTestUtils;
 import org.apache.hadoop.util.Lists;
 import org.apache.hadoop.util.Time;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 import org.mockito.Mockito;
 
 /**
@@ -85,7 +85,7 @@ public class TestRouterAdmin {
   protected static StateStoreService stateStore;
   protected static RouterRpcClient mockRpcClient;
 
-  @BeforeClass
+  @BeforeAll
   public static void globalSetUp() throws Exception {
     cluster = new StateStoreDFSCluster(false, 1);
     // Build and start a router with State Store + admin + RPC.
@@ -164,12 +164,12 @@ public class TestRouterAdmin {
     );
   }
 
-  @AfterClass
+  @AfterAll
   public static void tearDown() {
     cluster.stopRouter(routerContext);
   }
 
-  @Before
+  @BeforeEach
   public void testSetup() throws Exception {
     assertTrue(
         synchronizeRecords(stateStore, mockMountTable, MountTable.class));

+ 70 - 82
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminCLI.java

@@ -18,10 +18,10 @@
 package org.apache.hadoop.hdfs.server.federation.router;
 
 import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.createNamenodeReport;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertNotEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.io.ByteArrayOutputStream;
 import java.io.IOException;
@@ -63,10 +63,10 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.Whitebox;
 import org.apache.hadoop.util.ToolRunner;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 import org.mockito.Mockito;
 
 import java.util.function.Supplier;
@@ -90,7 +90,7 @@ public class TestRouterAdminCLI {
   private static final PrintStream OLD_OUT = System.out;
   private static final PrintStream OLD_ERR = System.err;
 
-  @BeforeClass
+  @BeforeAll
   public static void globalSetUp() throws Exception {
     cluster = new StateStoreDFSCluster(false, 1,
         MultipleDestinationMountTableResolver.class);
@@ -145,14 +145,14 @@ public class TestRouterAdminCLI {
 
   }
 
-  @AfterClass
+  @AfterAll
   public static void tearDownCluster() {
     cluster.stopRouter(routerContext);
     cluster.shutdown();
     cluster = null;
   }
 
-  @After
+  @AfterEach
   public void tearDown() {
     // set back system out/err
     System.setOut(OLD_OUT);
@@ -214,8 +214,8 @@ public class TestRouterAdminCLI {
     System.setOut(new PrintStream(out));
     assertEquals(0, ToolRunner.run(admin, argv));
     String response = out.toString();
-    assertTrue("The response should have " + src + ": " + response, response.contains(src));
-    assertTrue("The response should have " + dest + ": " + response, response.contains(dest));
+    assertTrue(response.contains(src), "The response should have " + src + ": " + response);
+    assertTrue(response.contains(dest), "The response should have " + dest + ": " + response);
   }
 
   @Test
@@ -347,21 +347,21 @@ public class TestRouterAdminCLI {
     argv = new String[] {"-ls", src};
     assertEquals(0, ToolRunner.run(admin, argv));
     String response = out.toString();
-    assertTrue("Wrong response: " + response, response.contains(src));
+    assertTrue(response.contains(src), "Wrong response: " + response);
 
     // Test with not-normalized src input
     argv = new String[] {"-ls", srcWithSlash};
     assertEquals(0, ToolRunner.run(admin, argv));
     response = out.toString();
-    assertTrue("Wrong response: " + response, response.contains(src));
+    assertTrue(response.contains(src), "Wrong response: " + response);
 
     // Test with wrong number of arguments
     argv = new String[] {"-ls", srcWithSlash, "check", "check2"};
     System.setErr(new PrintStream(err));
     ToolRunner.run(admin, argv);
     response = err.toString();
-    assertTrue("Wrong response: " + response,
-        response.contains("Too many arguments, Max=2 argument allowed"));
+    assertTrue(response.contains("Too many arguments, Max=2 argument allowed"),
+        "Wrong response: " + response);
 
     out.reset();
     GetMountTableEntriesRequest getRequest = GetMountTableEntriesRequest
@@ -374,11 +374,10 @@ public class TestRouterAdminCLI {
     argv = new String[] {"-ls"};
     assertEquals(0, ToolRunner.run(admin, argv));
     response = out.toString();
-    assertTrue("Wrong response: " + response, response.contains(src));
+    assertTrue(response.contains(src), "Wrong response: " + response);
     // verify if all the mount table are listed
     for (MountTable entry : getResponse.getEntries()) {
-      assertTrue("Wrong response: " + response,
-          response.contains(entry.getSourcePath()));
+      assertTrue(response.contains(entry.getSourcePath()), "Wrong response: " + response);
     }
   }
 
@@ -403,9 +402,8 @@ public class TestRouterAdminCLI {
     argv = new String[] {"-ls", "-d"};
     assertEquals(0, ToolRunner.run(admin, argv));
     response =  out.toString();
-    assertTrue("Wrong response: " + response, response.contains("Read-Only"));
-    assertTrue("Wrong response: " + response,
-        response.contains("Fault-Tolerant"));
+    assertTrue(response.contains("Read-Only"), "Wrong response: " + response);
+    assertTrue(response.contains("Fault-Tolerant"), "Wrong response: " + response);
   }
 
   @Test
@@ -581,8 +579,8 @@ public class TestRouterAdminCLI {
       // update mount table using normal user
       argv = new String[]{"-update", "/testpath3-1", "ns0", "/testdir3-2",
           "-owner", TEST_USER, "-group", TEST_USER, "-mode", "777"};
-      assertEquals("Normal user update mount table which created by " +
-          "superuser unexpected.", -1, ToolRunner.run(admin, argv));
+      assertEquals(-1, ToolRunner.run(admin, argv), "Normal user update mount table " +
+          "which created by superuser unexpected.");
     } finally {
       // set back login user
       UserGroupInformation.setLoginUser(superUser);
@@ -610,8 +608,7 @@ public class TestRouterAdminCLI {
 
       String[] argv = new String[]{"-add", "/testpath4-1", "ns0", "/testdir4-1",
           "-owner", testUserA, "-group", testGroup, "-mode", "775"};
-      assertEquals("Normal user can't add mount table unexpected.", 0,
-          ToolRunner.run(admin, argv));
+      assertEquals(0, ToolRunner.run(admin, argv), "Normal user can't add mount table unexpected.");
 
       stateStore.loadCache(MountTableStoreImpl.class, true);
       // update mount point with userb which is same group with owner.
@@ -620,8 +617,8 @@ public class TestRouterAdminCLI {
       UserGroupInformation.setLoginUser(userB);
       argv = new String[]{"-update", "/testpath4-1", "ns0", "/testdir4-2",
           "-owner", testUserA, "-group", testGroup, "-mode", "775"};
-      assertEquals("Another user in same group can't update mount table " +
-          "unexpected.", 0, ToolRunner.run(admin, argv));
+      assertEquals(0, ToolRunner.run(admin, argv), "Another user in same group can't update " +
+          "mount table unexpected.");
 
       stateStore.loadCache(MountTableStoreImpl.class, true);
       // update mount point with userc which is not same group with owner.
@@ -630,9 +627,9 @@ public class TestRouterAdminCLI {
       UserGroupInformation.setLoginUser(userC);
       argv = new String[]{"-update", "/testpath4-1", "ns0", "/testdir4-3",
           "-owner", testUserA, "-group", testGroup, "-mode", "775"};
-      assertEquals("Another user not in same group have no permission but " +
-              "update mount table successful unexpected.", -1,
-          ToolRunner.run(admin, argv));
+      assertEquals(-1, ToolRunner.run(admin, argv),
+          "Another user not in same group have no " +
+              "permission but update mount table successful unexpected.");
 
       stateStore.loadCache(MountTableStoreImpl.class, true);
       // add mount point with userd but immediate parent of mount point
@@ -643,8 +640,7 @@ public class TestRouterAdminCLI {
       argv = new String[]{"-add", "/testpath4-1/foo/bar", "ns0",
           "/testdir4-1/foo/bar", "-owner", testUserD, "-group", testGroup,
           "-mode", "775"};
-      assertEquals("Normal user can't add mount table unexpected.", 0,
-          ToolRunner.run(admin, argv));
+      assertEquals(0, ToolRunner.run(admin, argv), "Normal user can't add mount table unexpected.");
 
       // test remove mount point with userc.
       UserGroupInformation.setLoginUser(userC);
@@ -653,8 +649,8 @@ public class TestRouterAdminCLI {
 
       // test remove mount point with userb.
       UserGroupInformation.setLoginUser(userB);
-      assertEquals("Another user in same group can't remove mount table " +
-          "unexpected.", 0, ToolRunner.run(admin, argv));
+      assertEquals(0, ToolRunner.run(admin, argv), "Another user in same group can't " +
+          "remove mount table unexpected.");
     } finally {
       // set back login user
       UserGroupInformation.setLoginUser(superUser);
@@ -685,13 +681,13 @@ public class TestRouterAdminCLI {
       UserGroupInformation.setLoginUser(superUser);
       argv = new String[]{"-update", "/testpath5-1", "ns0", "/testdir5-2",
           "-owner", testUserA, "-group", testGroup, "-mode", "755"};
-      assertEquals("Super user can't update mount table unexpected.", 0,
-          ToolRunner.run(admin, argv));
+      assertEquals(0, ToolRunner.run(admin, argv),
+          "Super user can't update mount table unexpected.");
 
       // test remove mount point with super user.
       argv = new String[]{"-rm", "/testpath5-1"};
-      assertEquals("Super user can't remove mount table unexpected.", 0,
-          ToolRunner.run(admin, argv));
+      assertEquals(0, ToolRunner.run(admin, argv),
+          "Super user can't remove mount table unexpected.");
     } finally {
       // set back login user
       UserGroupInformation.setLoginUser(superUser);
@@ -817,20 +813,20 @@ public class TestRouterAdminCLI {
     System.setOut(new PrintStream(out));
     String[] argv = new String[] {"-add", src, nsId};
     assertEquals(-1, ToolRunner.run(admin, argv));
-    assertTrue("Wrong message: " + out, out.toString().contains(
+    assertTrue(out.toString().contains(
         "\t[-add <source> <nameservice1, nameservice2, ...> <destination> "
             + "[-readonly] [-faulttolerant] "
             + "[-order HASH|LOCAL|RANDOM|HASH_ALL|SPACE|LEADER_FOLLOWER] "
-            + "-owner <owner> -group <group> -mode <mode>]"));
+            + "-owner <owner> -group <group> -mode <mode>]"), "Wrong message: " + out);
     out.reset();
 
     argv = new String[] {"-update", src, nsId};
     assertEquals(-1, ToolRunner.run(admin, argv));
-    assertTrue("Wrong message: " + out, out.toString().contains(
+    assertTrue(out.toString().contains(
         "\t[-update <source> [<nameservice1, nameservice2, ...> <destination>] "
             + "[-readonly true|false] [-faulttolerant true|false] "
             + "[-order HASH|LOCAL|RANDOM|HASH_ALL|SPACE|LEADER_FOLLOWER] "
-            + "-owner <owner> -group <group> -mode <mode>]"));
+            + "-owner <owner> -group <group> -mode <mode>]"), "Wrong message: " + out);
     out.reset();
 
     argv = new String[] {"-rm"};
@@ -904,7 +900,7 @@ public class TestRouterAdminCLI {
         + "\t[-getDisabledNameservices]\n"
         + "\t[-refresh]\n"
         + "\t[-refreshRouterArgs <host:ipc_port> <key> [arg1..argn]]";
-    assertTrue("Wrong message: " + out, out.toString().contains(expected));
+    assertTrue(out.toString().contains(expected), "Wrong message: " + out);
     out.reset();
   }
 
@@ -1144,14 +1140,12 @@ public class TestRouterAdminCLI {
     out.reset();
     assertEquals(-1, ToolRunner.run(admin,
         new String[] {"-safemode", "get", "-random", "check" }));
-    assertTrue(err.toString(), err.toString()
-        .contains("safemode: Too many arguments, Max=1 argument allowed only"));
+    assertTrue(err.toString()
+        .contains("safemode: Too many arguments, Max=1 argument allowed only"), err.toString());
     err.reset();
 
-    assertEquals(-1,
-        ToolRunner.run(admin, new String[] {"-safemode", "check" }));
-    assertTrue(err.toString(),
-        err.toString().contains("safemode: Invalid argument: check"));
+    assertEquals(-1, ToolRunner.run(admin, new String[] {"-safemode", "check" }));
+    assertTrue(err.toString().contains("safemode: Invalid argument: check"), err.toString());
     err.reset();
   }
 
@@ -1170,8 +1164,7 @@ public class TestRouterAdminCLI {
     RBFMetrics metrics = router.getMetrics();
     String jsonString = metrics.getRouterStatus();
     String result = router.getNamenodeMetrics().getSafemode();
-    assertTrue("Wrong safe mode message: " + result,
-        result.startsWith("Safe mode is ON."));
+    assertTrue(result.startsWith("Safe mode is ON."), "Wrong safe mode message: " + result);
 
     // verify state using RBFMetrics
     assertEquals(RouterServiceState.SAFEMODE.toString(), jsonString);
@@ -1183,7 +1176,7 @@ public class TestRouterAdminCLI {
         ToolRunner.run(admin, new String[] {"-safemode", "leave" }));
     jsonString = metrics.getRouterStatus();
     result = router.getNamenodeMetrics().getSafemode();
-    assertEquals("Wrong safe mode message: " + result, "", result);
+    assertEquals("", result, "Wrong safe mode message: " + result);
 
     // verify state
     assertEquals(RouterServiceState.RUNNING.toString(), jsonString);
@@ -1273,8 +1266,7 @@ public class TestRouterAdminCLI {
     System.setOut(new PrintStream(out));
     assertEquals(0, ToolRunner.run(admin,
         new String[] {"-getDisabledNameservices"}));
-    assertTrue("ns0 should be reported: " + out,
-        out.toString().contains("ns0"));
+    assertTrue(out.toString().contains("ns0"), "ns0 should be reported: " + out);
 
     // Enable a name service and check if it's there
     assertEquals(0, ToolRunner.run(admin,
@@ -1284,22 +1276,20 @@ public class TestRouterAdminCLI {
     stateStore.loadCache(DisabledNameserviceStoreImpl.class, true);
     assertEquals(0, ToolRunner.run(admin,
         new String[] {"-getDisabledNameservices"}));
-    assertFalse("ns0 should not be reported: " + out,
-        out.toString().contains("ns0"));
+    assertFalse(out.toString().contains("ns0"), "ns0 should not be reported: " + out);
 
     // Wrong commands
     System.setErr(new PrintStream(err));
     assertEquals(-1, ToolRunner.run(admin,
         new String[] {"-nameservice", "enable"}));
     String msg = "Not enough parameters specificed for cmd -nameservice";
-    assertTrue("Got error: " + err.toString(),
-        err.toString().startsWith(msg));
+    assertTrue(err.toString().startsWith(msg), "Got error: " + err.toString());
 
     err.reset();
     assertEquals(-1, ToolRunner.run(admin,
         new String[] {"-nameservice", "wrong", "ns0"}));
-    assertTrue("Got error: " + err.toString(),
-        err.toString().startsWith("nameservice: Unknown command: wrong"));
+    assertTrue(err.toString().startsWith("nameservice: Unknown command: wrong"),
+        "Got error: " + err.toString());
 
     err.reset();
     ToolRunner.run(admin,
@@ -1357,8 +1347,8 @@ public class TestRouterAdminCLI {
     String[] argv = new String[] {"-update", src, nsId, dest};
     // Update shall fail if the mount entry doesn't exist.
     assertEquals(-1, ToolRunner.run(admin, argv));
-    assertTrue(err.toString(), err.toString()
-        .contains("update: /test-updateNonExistingMounttable doesn't exist."));
+    assertTrue(err.toString()
+        .contains("update: /test-updateNonExistingMounttable doesn't exist."), err.toString());
   }
 
   @Test
@@ -1510,30 +1500,28 @@ public class TestRouterAdminCLI {
     argv = new String[] {"-update", "/noMount", "-readonly", "false"};
     System.setErr(new PrintStream(err));
     assertEquals(-1, ToolRunner.run(admin, argv));
-    assertTrue(err.toString(),
-        err.toString().contains("update: /noMount doesn't exist."));
+    assertTrue(err.toString().contains("update: /noMount doesn't exist."), err.toString());
     err.reset();
 
     // Check update if no true/false value is passed for readonly.
     argv = new String[] {"-update", src, "-readonly", "check"};
     assertEquals(-1, ToolRunner.run(admin, argv));
-    assertTrue(err.toString(), err.toString().contains("update: "
-        + "Invalid argument: check. Please specify either true or false."));
+    assertTrue(err.toString().contains("update: "
+        + "Invalid argument: check. Please specify either true or false."), err.toString());
     err.reset();
 
     // Check update with missing value is passed for faulttolerant.
     argv = new String[] {"-update", src, "ns1", "/tmp", "-faulttolerant"};
     assertEquals(-1, ToolRunner.run(admin, argv));
-    assertTrue(err.toString(),
-        err.toString().contains("update: Unable to parse arguments:"
-            + " no value provided for -faulttolerant"));
+    assertTrue(err.toString().contains("update: Unable to parse arguments:"
+            + " no value provided for -faulttolerant"), err.toString());
     err.reset();
 
     // Check update with invalid order.
     argv = new String[] {"-update", src, "ns1", "/tmp", "-order", "Invalid"};
     assertEquals(-1, ToolRunner.run(admin, argv));
-    assertTrue(err.toString(), err.toString().contains(
-        "update: Unable to parse arguments: Cannot parse order: Invalid"));
+    assertTrue(err.toString().contains(
+        "update: Unable to parse arguments: Cannot parse order: Invalid"), err.toString());
     err.reset();
   }
 
@@ -1752,9 +1740,9 @@ public class TestRouterAdminCLI {
         counter.put(nsId, new AtomicInteger(1));
       }
     }
-    assertEquals("Wrong counter size: " + counter, 2, counter.size());
-    assertTrue(counter + " should contain ns0", counter.containsKey("ns0"));
-    assertTrue(counter + " should contain ns1", counter.containsKey("ns1"));
+    assertEquals(2, counter.size(), "Wrong counter size: " + counter);
+    assertTrue(counter.containsKey("ns0"), counter + " should contain ns0");
+    assertTrue(counter.containsKey("ns1"), counter + " should contain ns1");
 
     // Bad cases
     argv = new String[] {"-getDestination"};
@@ -1770,16 +1758,16 @@ public class TestRouterAdminCLI {
     String[] argv = new String[] {"-add", "/mntft", "ns01", "/tmp",
         "-faulttolerant"};
     assertEquals(-1, ToolRunner.run(admin, argv));
-    assertTrue(err.toString(), err.toString().contains(
-        "Invalid entry, fault tolerance requires multiple destinations"));
+    assertTrue(err.toString().contains(
+        "Invalid entry, fault tolerance requires multiple destinations"), err.toString());
     err.reset();
 
     System.setErr(new PrintStream(err));
     argv = new String[] {"-add", "/mntft", "ns0,ns1", "/tmp",
         "-order", "HASH", "-faulttolerant"};
     assertEquals(-1, ToolRunner.run(admin, argv));
-    assertTrue(err.toString(), err.toString().contains(
-        "Invalid entry, fault tolerance only supported for ALL order"));
+    assertTrue(err.toString().contains(
+        "Invalid entry, fault tolerance only supported for ALL order"), err.toString());
     err.reset();
 
     argv = new String[] {"-add", "/mntft", "ns0,ns1", "/tmp",
@@ -1946,8 +1934,8 @@ public class TestRouterAdminCLI {
     // mount points were already added
     assertNotEquals(0, ToolRunner.run(admin, argv));
 
-    assertTrue("The error message should return failed entries",
-        err.toString().contains("Cannot add mount points: [/testAddMultiMountPoints-01"));
+    assertTrue(err.toString().contains("Cannot add mount points: [/testAddMultiMountPoints-01"),
+        "The error message should return failed entries");
   }
 
   private void addMountTable(String src, String nsId, String dst)

+ 17 - 18
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdminGenericRefresh.java

@@ -24,17 +24,17 @@ import org.apache.hadoop.hdfs.tools.federation.RouterAdmin;
 import org.apache.hadoop.ipc.RefreshHandler;
 import org.apache.hadoop.ipc.RefreshRegistry;
 import org.apache.hadoop.ipc.RefreshResponse;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 import org.mockito.Mockito;
 
 import java.io.IOException;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 /**
  * Before all tests, a router is spun up.
@@ -49,7 +49,7 @@ public class TestRouterAdminGenericRefresh {
   private static RefreshHandler firstHandler;
   private static RefreshHandler secondHandler;
 
-  @BeforeClass
+  @BeforeAll
   public static void setUpBeforeClass() throws Exception {
 
     // Build and start a router with admin + RPC
@@ -63,7 +63,7 @@ public class TestRouterAdminGenericRefresh {
     admin = new RouterAdmin(config);
   }
 
-  @AfterClass
+  @AfterAll
   public static void tearDownBeforeClass() throws IOException {
     if (router != null) {
       router.stop();
@@ -71,7 +71,7 @@ public class TestRouterAdminGenericRefresh {
     }
   }
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     // Register Handlers, first one just sends an ok response
     firstHandler = Mockito.mock(RefreshHandler.class);
@@ -91,7 +91,7 @@ public class TestRouterAdminGenericRefresh {
     RefreshRegistry.defaultRegistry().register("secondHandler", secondHandler);
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     RefreshRegistry.defaultRegistry().unregisterAll("firstHandler");
     RefreshRegistry.defaultRegistry().unregisterAll("secondHandler");
@@ -101,7 +101,7 @@ public class TestRouterAdminGenericRefresh {
   public void testInvalidCommand() throws Exception {
     String[] args = new String[]{"-refreshRouterArgs", "nn"};
     int exitCode = admin.run(args);
-    assertEquals("RouterAdmin should fail due to bad args", -1, exitCode);
+    assertEquals(-1, exitCode, "RouterAdmin should fail due to bad args");
   }
 
   @Test
@@ -109,8 +109,7 @@ public class TestRouterAdminGenericRefresh {
     String[] argv = new String[]{"-refreshRouterArgs", "localhost:" +
         router.getAdminServerAddress().getPort(), "unregisteredIdentity"};
     int exitCode = admin.run(argv);
-    assertEquals("RouterAdmin should fail due to no handler registered",
-        -1, exitCode);
+    assertEquals(-1, exitCode, "RouterAdmin should fail due to no handler registered");
   }
 
   @Test
@@ -118,7 +117,7 @@ public class TestRouterAdminGenericRefresh {
     String[] args = new String[]{"-refreshRouterArgs", "localhost:" +
         router.getAdminServerAddress().getPort(), "firstHandler"};
     int exitCode = admin.run(args);
-    assertEquals("RouterAdmin should succeed", 0, exitCode);
+    assertEquals(0, exitCode, "RouterAdmin should succeed");
 
     Mockito.verify(firstHandler).handleRefresh("firstHandler", new String[]{});
     // Second handler was never called
@@ -131,12 +130,12 @@ public class TestRouterAdminGenericRefresh {
     String[] args = new String[]{"-refreshRouterArgs", "localhost:" +
         router.getAdminServerAddress().getPort(), "secondHandler", "one"};
     int exitCode = admin.run(args);
-    assertEquals("RouterAdmin should return 2", 2, exitCode);
+    assertEquals(2, exitCode, "RouterAdmin should return 2");
 
     exitCode = admin.run(new String[]{"-refreshRouterArgs", "localhost:" +
         router.getAdminServerAddress().getPort(),
         "secondHandler", "one", "two"});
-    assertEquals("RouterAdmin should now return 3", 3, exitCode);
+    assertEquals(3, exitCode, "RouterAdmin should now return 3");
 
     Mockito.verify(secondHandler).handleRefresh(
         "secondHandler", new String[]{"one"});
@@ -152,7 +151,7 @@ public class TestRouterAdminGenericRefresh {
     String[] args = new String[]{"-refreshRouterArgs", "localhost:" +
         router.getAdminServerAddress().getPort(), "firstHandler"};
     int exitCode = admin.run(args);
-    assertEquals("RouterAdmin should return -1", -1, exitCode);
+    assertEquals(-1, exitCode, "RouterAdmin should return -1");
   }
 
   @Test

+ 12 - 16
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAllResolver.java

@@ -17,8 +17,8 @@
  */
 package org.apache.hadoop.hdfs.server.federation.router;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.io.IOException;
 import java.net.URI;
@@ -48,9 +48,9 @@ import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntr
 import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesResponse;
 import org.apache.hadoop.hdfs.server.federation.store.records.MountTable;
 import org.apache.hadoop.hdfs.server.namenode.TestFileTruncate;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 /**
  * Tests the use of the resolvers that write in all subclusters from the
@@ -81,7 +81,7 @@ public class TestRouterAllResolver {
   private static List<FileSystem> nsFss = new LinkedList<>();
 
 
-  @Before
+  @BeforeEach
   public void setup() throws Exception {
     // 2 nameservices with 1 namenode each (no HA needed for this test)
     cluster = new StateStoreDFSCluster(
@@ -121,7 +121,7 @@ public class TestRouterAllResolver {
     assertEquals(NUM_NAMESPACES, nsFss.size());
   }
 
-  @After
+  @AfterEach
   public void cleanup() {
     cluster.shutdown();
     cluster = null;
@@ -185,11 +185,9 @@ public class TestRouterAllResolver {
     String testFile = path + "/dir2/dir22/dir220/file-append.txt";
     createTestFile(routerFs, testFile);
     Path testFilePath = new Path(testFile);
-    assertTrue("Created file is too small",
-        routerFs.getFileStatus(testFilePath).getLen() > 50);
+    assertTrue(routerFs.getFileStatus(testFilePath).getLen() > 50, "Created file is too small");
     appendTestFile(routerFs, testFile);
-    assertTrue("Append file is too small",
-        routerFs.getFileStatus(testFilePath).getLen() > 110);
+    assertTrue(routerFs.getFileStatus(testFilePath).getLen() > 110, "Append file is too small");
     assertDirsEverywhere(path, 9);
     assertFilesDistributed(path, 15);
 
@@ -200,8 +198,7 @@ public class TestRouterAllResolver {
     routerFs.truncate(testTruncateFilePath, 10);
     TestFileTruncate.checkBlockRecovery(testTruncateFilePath,
         (DistributedFileSystem) routerFs);
-    assertEquals("Truncate file fails", 10,
-        routerFs.getFileStatus(testTruncateFilePath).getLen());
+    assertEquals(10, routerFs.getFileStatus(testTruncateFilePath).getLen(), "Truncate file fails");
     assertDirsEverywhere(path, 9);
     assertFilesDistributed(path, 16);
 
@@ -238,8 +235,7 @@ public class TestRouterAllResolver {
         Path checkPath = getRelativePath(dirPath);
         for (FileSystem nsFs : nsFss) {
           FileStatus fileStatus1 = nsFs.getFileStatus(checkPath);
-          assertTrue(file + " should be a directory",
-              fileStatus1.isDirectory());
+          assertTrue(fileStatus1.isDirectory(), file + " should be a directory");
         }
       }
     }
@@ -279,7 +275,7 @@ public class TestRouterAllResolver {
     assertEquals(numRouterFiles, sumNsFiles);
     if (expectedNumFiles > 0) {
       for (int numFiles : numNsFiles) {
-        assertTrue("Files not distributed: " + numNsFiles, numFiles > 0);
+        assertTrue(numFiles > 0, "Files not distributed: " + numNsFiles);
       }
     }
   }

+ 47 - 50
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterClientRejectOverload.java

@@ -24,9 +24,11 @@ import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.simul
 import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.transitionClusterNSToStandby;
 import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.transitionClusterNSToActive;
 import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
 
 import java.io.IOException;
 import java.net.URI;
@@ -57,10 +59,8 @@ import org.apache.hadoop.test.GenericTestUtils;
 
 import com.fasterxml.jackson.databind.ObjectMapper;
 import org.apache.hadoop.util.Time;
-import org.junit.After;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -77,7 +77,7 @@ public class TestRouterClientRejectOverload {
 
   private StateStoreDFSCluster cluster;
 
-  @After
+  @AfterEach
   public void cleanup() {
     if (cluster != null) {
       cluster.shutdown();
@@ -85,9 +85,6 @@ public class TestRouterClientRejectOverload {
     }
   }
 
-  @Rule
-  public ExpectedException exceptionRule = ExpectedException.none();
-
   private void setupCluster(boolean overloadControl, boolean ha)
       throws Exception {
     // Build and start a federated cluster
@@ -174,8 +171,8 @@ public class TestRouterClientRejectOverload {
     long proxyOps0 = rpcMetrics0.getProxyOps() - iniProxyOps0;
     long proxyOps1 = rpcMetrics1.getProxyOps() - iniProxyOps1;
     assertEquals(2 * 10, proxyOps0 + proxyOps1);
-    assertTrue(proxyOps0 + " operations: not distributed", proxyOps0 >= 8);
-    assertTrue(proxyOps1 + " operations: not distributed", proxyOps1 >= 8);
+    assertTrue(proxyOps0 >= 8, proxyOps0 + " operations: not distributed");
+    assertTrue(proxyOps1 >= 8, proxyOps1 + " operations: not distributed");
   }
 
   private void testOverloaded(int expOverload) throws Exception {
@@ -221,7 +218,7 @@ public class TestRouterClientRejectOverload {
           routerProto.renewLease(clientName, null);
         } catch (RemoteException re) {
           IOException ioe = re.unwrapRemoteException();
-          assertTrue("Wrong exception: " + ioe, ioe instanceof StandbyException);
+          assertTrue(ioe instanceof StandbyException, "Wrong exception: " + ioe);
           assertExceptionContains("is overloaded", ioe);
           overloadException.incrementAndGet();
         } catch (IOException e) {
@@ -250,10 +247,8 @@ public class TestRouterClientRejectOverload {
     if (expOverloadMin == expOverloadMax) {
       assertEquals(expOverloadMin, num);
     } else {
-      assertTrue("Expected >=" + expOverloadMin + " but was " + num,
-          num >= expOverloadMin);
-      assertTrue("Expected <=" + expOverloadMax + " but was " + num,
-          num <= expOverloadMax);
+      assertTrue(num >= expOverloadMin, "Expected >=" + expOverloadMin + " but was " + num);
+      assertTrue(num <= expOverloadMax, "Expected <=" + expOverloadMax + " but was " + num);
     }
   }
 
@@ -301,7 +296,7 @@ public class TestRouterClientRejectOverload {
    * Client will success after some retries.
    */
   @Test
-  public void testNoNamenodesAvailable() throws Exception{
+  public void testNoNamenodesAvailable() throws Exception {
     setupCluster(false, true);
 
     transitionClusterNSToStandby(cluster);
@@ -322,44 +317,46 @@ public class TestRouterClientRejectOverload {
     FederationRPCMetrics rpcMetrics1 = cluster.getRouters().get(1)
         .getRouter().getRpcServer().getRPCMetrics();
 
-    // Original failures
-    long originalRouter0Failures = rpcMetrics0.getProxyOpNoNamenodes();
-    long originalRouter1Failures = rpcMetrics1.getProxyOpNoNamenodes();
-
     // GetFileInfo will throw Exception
     String exceptionMessage = "org.apache.hadoop.hdfs.server.federation."
         + "router.NoNamenodesAvailableException: No namenodes available "
         + "under nameservice ns0";
-    exceptionRule.expect(RemoteException.class);
-    exceptionRule.expectMessage(exceptionMessage);
-    routerClient.getFileInfo("/");
-
-    // Router 0 failures will increase
-    assertEquals(originalRouter0Failures + 4,
-        rpcMetrics0.getProxyOpNoNamenodes());
-    // Router 1 failures do not change
-    assertEquals(originalRouter1Failures,
-        rpcMetrics1.getProxyOpNoNamenodes());
-
-    // Make name services available
-    transitionClusterNSToActive(cluster, 0);
-    for (RouterContext routerContext : cluster.getRouters()) {
-      // Manually trigger the heartbeat
-      Collection<NamenodeHeartbeatService> heartbeatServices = routerContext
-          .getRouter().getNamenodeHeartbeatServices();
-      for (NamenodeHeartbeatService service : heartbeatServices) {
-        service.periodicInvoke();
+    RemoteException remoteException = assertThrows(RemoteException.class, () -> {
+
+      // Original failures
+      long originalRouter0Failures = rpcMetrics0.getProxyOpNoNamenodes();
+      long originalRouter1Failures = rpcMetrics1.getProxyOpNoNamenodes();
+
+      routerClient.getFileInfo("/");
+
+      // Router 0 failures will increase
+      assertEquals(originalRouter0Failures + 4,
+          rpcMetrics0.getProxyOpNoNamenodes());
+      // Router 1 failures do not change
+      assertEquals(originalRouter1Failures,
+          rpcMetrics1.getProxyOpNoNamenodes());
+
+      // Make name services available
+      transitionClusterNSToActive(cluster, 0);
+      for (RouterContext routerContext : cluster.getRouters()) {
+        // Manually trigger the heartbeat
+        Collection<NamenodeHeartbeatService> heartbeatServices = routerContext
+            .getRouter().getNamenodeHeartbeatServices();
+        for (NamenodeHeartbeatService service : heartbeatServices) {
+          service.periodicInvoke();
+        }
+        // Update service cache
+        routerContext.getRouter().getStateStore().refreshCaches(true);
       }
-      // Update service cache
-      routerContext.getRouter().getStateStore().refreshCaches(true);
-    }
 
-    originalRouter0Failures = rpcMetrics0.getProxyOpNoNamenodes();
+      originalRouter0Failures = rpcMetrics0.getProxyOpNoNamenodes();
 
-    // RPC call must be successful
-    routerClient.getFileInfo("/");
-    // Router 0 failures do not change
-    assertEquals(originalRouter0Failures, rpcMetrics0.getProxyOpNoNamenodes());
+      // RPC call must be successful
+      routerClient.getFileInfo("/");
+      // Router 0 failures do not change
+      assertEquals(originalRouter0Failures, rpcMetrics0.getProxyOpNoNamenodes());
+    });
+    assertThat(remoteException.getMessage()).contains(exceptionMessage);
   }
 
   /**

+ 27 - 33
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFaultTolerant.java

@@ -24,11 +24,11 @@ import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.getFi
 import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.refreshRoutersCaches;
 import static org.apache.hadoop.hdfs.server.federation.MockNamenode.registerSubclusters;
 import static org.apache.hadoop.hdfs.server.federation.store.FederationStateStoreTestUtils.getStateStoreConfiguration;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
@@ -76,9 +76,9 @@ import org.apache.hadoop.hdfs.server.federation.store.records.MountTable;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.LambdaTestUtils;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -106,7 +106,7 @@ public class TestRouterFaultTolerant {
   private ExecutorService service;
 
 
-  @Before
+  @BeforeEach
   public void setup() throws Exception {
     LOG.info("Start the Namenodes");
     Configuration nnConf = new HdfsConfiguration();
@@ -160,7 +160,7 @@ public class TestRouterFaultTolerant {
     service = Executors.newFixedThreadPool(10);
   }
 
-  @After
+  @AfterEach
   public void cleanup() throws Exception {
     LOG.info("Stopping the cluster");
     for (final MockNamenode nn : namenodes.values()) {
@@ -311,13 +311,13 @@ public class TestRouterFaultTolerant {
     int filesExpected = dirs0.length + results.getSuccess();
     tasks.add(getListSuccessTask(router1Fs, mountPoint, filesExpected));
     results = collectResults("List " + mountPoint, tasks);
-    assertEquals("Failed listing", 2, results.getSuccess());
+    assertEquals(2, results.getSuccess(), "Failed listing");
 
     tasks.add(getContentSummaryFailTask(router0Fs, mountPoint));
     tasks.add(getContentSummarySuccessTask(
         router1Fs, mountPoint, filesExpected));
     results = collectResults("Content summary "  + mountPoint, tasks);
-    assertEquals("Failed content summary", 2, results.getSuccess());
+    assertEquals(2, results.getSuccess(), "Failed content summary");
   }
 
   /**
@@ -346,15 +346,11 @@ public class TestRouterFaultTolerant {
 
     LOG.info("Check files results for {}: {}", dir0, results);
     if (faultTolerant) {
-      assertEquals("Not enough success in " + mountPoint,
-          NUM_FILES, results.getSuccess());
-      assertEquals("Nothing should fail in " + mountPoint, 0,
-          results.getFailure());
+      assertEquals(NUM_FILES, results.getSuccess(), "Not enough success in " + mountPoint);
+      assertEquals(0, results.getFailure(), "Nothing should fail in " + mountPoint);
     } else {
-      assertEquals("Nothing should succeed in " + mountPoint,
-          0, results.getSuccess());
-      assertEquals("Everything should fail in " + mountPoint,
-          NUM_FILES, results.getFailure());
+      assertEquals(0, results.getSuccess(), "Nothing should succeed in " + mountPoint);
+      assertEquals(NUM_FILES, results.getFailure(), "Everything should fail in " + mountPoint);
     }
 
     LOG.info("Check files listing for {}", dir0);
@@ -420,8 +416,7 @@ public class TestRouterFaultTolerant {
         // We don't write because we have no mock Datanodes
         os.close();
         FileStatus fileStatus = checkFs.getFileStatus(path);
-        assertTrue("File not created properly: " + fileStatus,
-            fileStatus.getLen() > 0);
+        assertTrue(fileStatus.getLen() > 0, "File not created properly: " + fileStatus);
         return true;
       } catch (RemoteException re) {
         return false;
@@ -474,7 +469,7 @@ public class TestRouterFaultTolerant {
       FileSystem fs, Path path, int expected) {
     return () -> {
       final FileStatus[] dirs = fs.listStatus(path);
-      assertEquals(toString(dirs), expected, dirs.length);
+      assertEquals(expected, dirs.length, toString(dirs));
       return true;
     };
   }
@@ -509,8 +504,7 @@ public class TestRouterFaultTolerant {
       FileSystem fs, Path path, int expected) {
     return () -> {
       ContentSummary summary = fs.getContentSummary(path);
-      assertEquals("Wrong summary for " + path,
-          expected, summary.getFileAndDirectoryCount());
+      assertEquals(expected, summary.getFileAndDirectoryCount(), "Wrong summary for " + path);
       return true;
     };
   }
@@ -590,9 +584,9 @@ public class TestRouterFaultTolerant {
    */
   private static void assertBothResults(String msg,
       int expected, TaskResults actual) {
-    assertEquals(msg, expected, actual.getTotal());
-    assertTrue("Expected some success for " + msg, actual.getSuccess() > 0);
-    assertTrue("Expected some failure for " + msg, actual.getFailure() > 0);
+    assertEquals(expected, actual.getTotal(), msg);
+    assertTrue(actual.getSuccess() > 0, "Expected some success for " + msg);
+    assertTrue(actual.getFailure() > 0, "Expected some failure for " + msg);
   }
 
   /**
@@ -641,7 +635,7 @@ public class TestRouterFaultTolerant {
 
     // We should be able to read existing files
     FSDataInputStream fsdis = fs.open(fileexisting);
-    assertNotNull("We should be able to read the file", fsdis);
+    assertNotNull(fsdis, "We should be able to read the file");
     // We shouldn't be able to read non-existing files
     LambdaTestUtils.intercept(FileNotFoundException.class,
         () -> fs.open(filenotexisting));
@@ -657,13 +651,13 @@ public class TestRouterFaultTolerant {
       try {
         FileStatus fileStatus = nnfs.getFileStatus(fileexisting);
         assertNotNull(fileStatus);
-        assertNull("The file cannot be in two subclusters", nsIdWithFile);
+        assertNull(nsIdWithFile, "The file cannot be in two subclusters");
         nsIdWithFile = nsId;
       } catch (FileNotFoundException fnfe) {
         LOG.debug("File not found in {}", nsId);
       }
     }
-    assertNotNull("The file has to be in one subcluster", nsIdWithFile);
+    assertNotNull(nsIdWithFile, "The file has to be in one subcluster");
 
     LOG.info("Stop {} to simulate an unavailable subcluster", nsIdWithFile);
     namenodes.get(nsIdWithFile).stop();
@@ -674,8 +668,8 @@ public class TestRouterFaultTolerant {
       fail("It should throw an unavailable cluster exception");
     } catch(RemoteException re) {
       IOException ioe = re.unwrapRemoteException();
-      assertTrue("Expected an unavailable exception for:" + ioe.getClass(),
-          RouterRpcClient.isUnavailableException(ioe));
+      assertTrue(RouterRpcClient.isUnavailableException(ioe),
+          "Expected an unavailable exception for:" + ioe.getClass());
     }
   }
 }

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFederatedState.java

@@ -28,9 +28,9 @@ import org.apache.hadoop.ipc.RpcConstants;
 import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos;
 import org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException;
 import org.apache.hadoop.util.ProtoUtil;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
-import static org.junit.Assert.*;
+import static org.junit.jupiter.api.Assertions.*;
 
 
 public class TestRouterFederatedState {

+ 13 - 11
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFederationRename.java

@@ -20,9 +20,9 @@ package org.apache.hadoop.hdfs.server.federation.router;
 import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.createFile;
 import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.verifyFileExists;
 import static org.apache.hadoop.test.GenericTestUtils.getMethodName;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.mockito.Mockito.verify;
 
 import java.io.IOException;
@@ -45,10 +45,11 @@ import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.security.GroupMappingServiceProvider;
 import org.apache.hadoop.test.LambdaTestUtils;
 import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
 import org.mockito.Mockito;
 
 /**
@@ -82,17 +83,17 @@ public class TestRouterFederationRename extends TestRouterFederationRenameBase {
   private FileSystem routerFS;
   private MiniRouterDFSCluster cluster;
 
-  @BeforeClass
+  @BeforeAll
   public static void before() throws Exception {
     globalSetUp();
   }
 
-  @AfterClass
+  @AfterAll
   public static void after() {
     tearDown();
   }
 
-  @Before
+  @BeforeEach
   public void testSetup() throws Exception {
     setup();
     router = getRouterContext();
@@ -302,7 +303,8 @@ public class TestRouterFederationRename extends TestRouterFederationRenameBase {
     getRouterFileSystem().delete(new Path(renamedDir), true);
   }
 
-  @Test(timeout = 20000)
+  @Test
+  @Timeout(value = 20)
   public void testCounter() throws Exception {
     final RouterRpcServer rpcServer = router.getRouter().getRpcServer();
     List<String> nss = cluster.getNameservices();

+ 11 - 11
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFederationRenameInKerberosEnv.java

@@ -36,11 +36,11 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authorize.AuthorizationException;
 import org.apache.hadoop.security.authorize.ImpersonationProvider;
 import org.apache.hadoop.tools.fedbalance.DistCpProcedure;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 
 import java.io.File;
 import java.io.IOException;
@@ -64,8 +64,8 @@ import static org.apache.hadoop.security.token.delegation.ZKDelegationTokenSecre
 import static org.apache.hadoop.test.GenericTestUtils.getMethodName;
 import static org.apache.hadoop.tools.fedbalance.FedBalanceConfigs.SCHEDULER_JOURNAL_URI;
 import static org.apache.hadoop.yarn.conf.YarnConfiguration.RM_PRINCIPAL;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 /**
  * Basic tests of router federation rename. Rename across namespaces.
@@ -94,7 +94,7 @@ public class TestRouterFederationRenameInKerberosEnv
   /** Random Router for this federated cluster. */
   private RouterContext router;
 
-  @BeforeClass
+  @BeforeAll
   public static void globalSetUp() throws Exception {
     // init KDC
     File workDir = new File(System.getProperty("test.dir", "target"));
@@ -151,20 +151,20 @@ public class TestRouterFederationRenameInKerberosEnv
     }
   }
 
-  @AfterClass
+  @AfterAll
   public static void globalTearDown() {
     kdc.stop();
     DistCpProcedure.disableForTest();
   }
 
-  @After
+  @AfterEach
   @Override
   public void tearDown() throws Exception {
     super.tearDown();
     cluster.shutdown();
   }
 
-  @Before
+  @BeforeEach
   @Override
   public void setUp() throws Exception {
     super.setUp();

+ 9 - 9
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFederationRenamePermission.java

@@ -21,8 +21,8 @@ import static org.apache.hadoop.fs.permission.FsAction.ALL;
 import static org.apache.hadoop.fs.permission.FsAction.READ_EXECUTE;
 import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.verifyFileExists;
 import static org.apache.hadoop.test.GenericTestUtils.getMethodName;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertFalse;
 
 import java.io.IOException;
 import java.util.List;
@@ -42,10 +42,10 @@ import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.LambdaTestUtils;
 import org.apache.hadoop.util.Lists;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 
 /**
  * Test permission check of router federation rename.
@@ -66,17 +66,17 @@ public class TestRouterFederationRenamePermission
   private FileSystem routerFS;
   private MiniRouterDFSCluster cluster;
 
-  @BeforeClass
+  @BeforeAll
   public static void before() throws Exception {
     globalSetUp();
   }
 
-  @AfterClass
+  @AfterAll
   public static void after() {
     tearDown();
   }
 
-  @Before
+  @BeforeEach
   public void testSetup() throws Exception {
     setup();
     cluster = getCluster();

+ 11 - 11
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFsck.java

@@ -23,10 +23,10 @@ import java.nio.charset.StandardCharsets;
 import java.util.Collections;
 import java.util.List;
 
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -57,10 +57,10 @@ import org.apache.http.impl.client.CloseableHttpClient;
 import org.apache.http.impl.client.HttpClients;
 import org.apache.http.util.EntityUtils;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 /**
  * End-to-end tests for fsck via DFSRouter.
@@ -77,7 +77,7 @@ public class TestRouterFsck {
   private static InetSocketAddress webAddress;
   private static List<MembershipState> memberships;
 
-  @BeforeClass
+  @BeforeAll
   public static void globalSetUp() throws Exception {
     // Build and start a federated cluster
     cluster = new StateStoreDFSCluster(false, 2);
@@ -111,7 +111,7 @@ public class TestRouterFsck {
     Collections.sort(memberships);
   }
 
-  @AfterClass
+  @AfterAll
   public static void tearDown() {
     if (cluster != null) {
       cluster.stopRouter(routerContext);
@@ -120,7 +120,7 @@ public class TestRouterFsck {
     }
   }
 
-  @After
+  @AfterEach
   public void clearMountTable() throws IOException {
     RouterClient client = routerContext.getAdminClient();
     MountTableManager mountTableManager = client.getMountTableManager();

+ 9 - 9
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterHeartbeatService.java

@@ -31,18 +31,18 @@ import org.apache.hadoop.hdfs.server.federation.store.protocol.GetRouterRegistra
 import org.apache.hadoop.hdfs.server.federation.store.protocol.GetRouterRegistrationResponse;
 import org.apache.hadoop.hdfs.server.federation.store.records.RouterState;
 import org.apache.hadoop.hdfs.server.federation.store.records.StateStoreVersion;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 import java.io.IOException;
 import java.util.concurrent.TimeUnit;
 
 import static org.apache.hadoop.hdfs.server.federation.store.FederationStateStoreTestUtils.waitStateStore;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 /**
  * Test cases for router heartbeat service.
@@ -53,7 +53,7 @@ public class TestRouterHeartbeatService {
   private TestingServer testingServer;
   private CuratorFramework curatorFramework;
 
-  @Before
+  @BeforeEach
   public void setup() throws Exception {
     router = new Router();
     router.setRouterId(routerId);
@@ -130,7 +130,7 @@ public class TestRouterHeartbeatService {
     assertNotNull(version);
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws IOException {
     if (curatorFramework != null) {
       curatorFramework.close();

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterHttpServerXFrame.java

@@ -24,8 +24,8 @@ import java.net.URI;
 import java.net.URL;
 
 import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder;
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -57,8 +57,8 @@ public class TestRouterHttpServerXFrame {
       conn.connect();
 
       String xfoHeader = conn.getHeaderField("X-FRAME-OPTIONS");
-      Assert.assertNotNull("X-FRAME-OPTIONS is absent in the header", xfoHeader);
-      Assert.assertTrue(xfoHeader.endsWith(SAMEORIGIN.toString()));
+      Assertions.assertNotNull(xfoHeader, "X-FRAME-OPTIONS is absent in the header");
+      Assertions.assertTrue(xfoHeader.endsWith(SAMEORIGIN.toString()));
     } finally {
       router.stop();
       router.close();

+ 6 - 6
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMissingFolderMulti.java

@@ -22,7 +22,7 @@ import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.creat
 import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.getFileSystem;
 import static org.apache.hadoop.hdfs.server.federation.MockNamenode.registerSubclusters;
 import static org.apache.hadoop.hdfs.server.federation.store.FederationStateStoreTestUtils.getStateStoreConfiguration;
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 import java.io.FileNotFoundException;
 import java.util.HashMap;
@@ -44,9 +44,9 @@ import org.apache.hadoop.hdfs.server.federation.resolver.MembershipNamenodeResol
 import org.apache.hadoop.hdfs.server.federation.resolver.MultipleDestinationMountTableResolver;
 import org.apache.hadoop.hdfs.server.federation.resolver.order.DestinationOrder;
 import org.apache.hadoop.test.LambdaTestUtils;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -69,7 +69,7 @@ public class TestRouterMissingFolderMulti {
   private Router router;
 
 
-  @Before
+  @BeforeEach
   public void setup() throws Exception {
     LOG.info("Start the Namenodes");
     Configuration nnConf = new HdfsConfiguration();
@@ -111,7 +111,7 @@ public class TestRouterMissingFolderMulti {
     registerSubclusters(router, namenodes.values());
   }
 
-  @After
+  @AfterEach
   public void cleanup() throws Exception {
     LOG.info("Stopping the cluster");
     for (final MockNamenode nn : namenodes.values()) {

+ 11 - 11
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java

@@ -17,10 +17,10 @@
  */
 package org.apache.hadoop.hdfs.server.federation.router;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
@@ -60,10 +60,10 @@ import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.LambdaTestUtils;
 import org.apache.hadoop.util.Time;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 
 /**
  * Test a router end-to-end including the MountTable.
@@ -82,7 +82,7 @@ public class TestRouterMountTable {
   protected static FileSystem nnFs1;
   protected static FileSystem routerFs;
 
-  @BeforeClass
+  @BeforeAll
   public static void globalSetUp() throws Exception {
     startTime = Time.now();
 
@@ -111,7 +111,7 @@ public class TestRouterMountTable {
     mountTable = (MountTableResolver) router.getSubclusterResolver();
   }
 
-  @AfterClass
+  @AfterAll
   public static void tearDown() {
     if (cluster != null) {
       cluster.stopRouter(routerContext);
@@ -120,7 +120,7 @@ public class TestRouterMountTable {
     }
   }
 
-  @After
+  @AfterEach
   public void clearMountTable() throws IOException {
     RouterClient client = routerContext.getAdminClient();
     MountTableManager mountTableManager = client.getMountTableManager();

+ 50 - 36
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTableCacheRefresh.java

@@ -17,10 +17,10 @@
  */
 package org.apache.hadoop.hdfs.server.federation.router;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.io.IOException;
 import java.net.InetSocketAddress;
@@ -54,29 +54,28 @@ import org.apache.hadoop.hdfs.server.federation.store.records.MountTable;
 import org.apache.hadoop.service.Service.STATE;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Time;
-import org.junit.After;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Timeout;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.MethodSource;
 
 /**
  * This test class verifies that mount table cache is updated on all the routers
  * when MountTableRefreshService is enabled and there is a change in mount table
  * entries.
  */
-@RunWith(Parameterized.class)
 public class TestRouterMountTableCacheRefresh {
   private static TestingServer curatorTestingServer;
   private static MiniRouterDFSCluster cluster;
   private static RouterContext routerContext;
   private static MountTableManager mountTableManager;
 
-  @Parameterized.Parameters
   public static Collection<Object> data() {
     return Arrays.asList(new Object[] {true, false});
   }
 
-  public TestRouterMountTableCacheRefresh(boolean useIpForHeartbeats) throws Exception {
+  public void initTestRouterMountTableCacheRefresh(boolean pUseIpForHeartbeats)
+      throws Exception {
     // Initialize only once per parameter
     if (curatorTestingServer != null) {
       return;
@@ -93,7 +92,7 @@ public class TestRouterMountTableCacheRefresh {
         FileSubclusterResolver.class);
     conf.set(RBFConfigKeys.FEDERATION_STORE_ZK_ADDRESS, connectString);
     conf.setBoolean(RBFConfigKeys.DFS_ROUTER_STORE_ENABLE, true);
-    conf.setBoolean(RBFConfigKeys.DFS_ROUTER_HEARTBEAT_WITH_IP_ENABLE, useIpForHeartbeats);
+    conf.setBoolean(RBFConfigKeys.DFS_ROUTER_HEARTBEAT_WITH_IP_ENABLE, pUseIpForHeartbeats);
     cluster.addRouterOverrides(conf);
     cluster.startCluster();
     cluster.startRouters();
@@ -107,8 +106,8 @@ public class TestRouterMountTableCacheRefresh {
         numNameservices, 60000);
   }
 
-  @Parameterized.AfterParam
-  public static void destroy() {
+  @AfterEach
+  public void destroy() {
     try {
       if (curatorTestingServer != null) {
         curatorTestingServer.close();
@@ -121,7 +120,7 @@ public class TestRouterMountTableCacheRefresh {
     }
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws IOException {
     clearEntries();
   }
@@ -140,10 +139,12 @@ public class TestRouterMountTableCacheRefresh {
    * addMountTableEntry API should internally update the cache on all the
    * routers.
    */
-  @Test
-  public void testMountTableEntriesCacheUpdatedAfterAddAPICall()
-      throws IOException {
+  @MethodSource("data")
+  @ParameterizedTest
+  public void testMountTableEntriesCacheUpdatedAfterAddAPICall(boolean pUseIpForHeartbeats)
+      throws Exception {
 
+    initTestRouterMountTableCacheRefresh(pUseIpForHeartbeats);
     // Existing mount table size
     int existingEntriesCount = getNumMountTableEntries();
     String srcPath = "/addPath";
@@ -168,9 +169,11 @@ public class TestRouterMountTableCacheRefresh {
    * removeMountTableEntry API should internally update the cache on all the
    * routers.
    */
-  @Test
-  public void testMountTableEntriesCacheUpdatedAfterRemoveAPICall()
-      throws IOException {
+  @MethodSource("data")
+  @ParameterizedTest
+  public void testMountTableEntriesCacheUpdatedAfterRemoveAPICall(boolean pUseIpForHeartbeats)
+      throws Exception {
+    initTestRouterMountTableCacheRefresh(pUseIpForHeartbeats);
     // add
     String srcPath = "/removePathSrc";
     MountTable newEntry = MountTable.newInstance(srcPath,
@@ -194,9 +197,11 @@ public class TestRouterMountTableCacheRefresh {
    * updateMountTableEntry API should internally update the cache on all the
    * routers.
    */
-  @Test
-  public void testMountTableEntriesCacheUpdatedAfterUpdateAPICall()
-      throws IOException {
+  @MethodSource("data")
+  @ParameterizedTest
+  public void testMountTableEntriesCacheUpdatedAfterUpdateAPICall(boolean pUseIpForHeartbeats)
+      throws Exception {
+    initTestRouterMountTableCacheRefresh(pUseIpForHeartbeats);
     // add
     String srcPath = "/updatePathSrc";
     MountTable newEntry = MountTable.newInstance(srcPath,
@@ -216,8 +221,7 @@ public class TestRouterMountTableCacheRefresh {
             UpdateMountTableEntryRequest.newInstance(upateEntry));
     assertTrue(updateMountTableEntry.getStatus());
     MountTable updatedMountTable = getMountTableEntry(srcPath);
-    assertNotNull("Updated mount table entrty cannot be null",
-        updatedMountTable);
+    assertNotNull(updatedMountTable, "Updated mount table entrty cannot be null");
     assertEquals(1, updatedMountTable.getDestinations().size());
     assertEquals(key,
         updatedMountTable.getDestinations().get(0).getNameserviceId());
@@ -229,9 +233,11 @@ public class TestRouterMountTableCacheRefresh {
    * successful on other available router. The router which is not running
    * should be ignored.
    */
-  @Test
-  public void testCachedRouterClientBehaviourAfterRouterStoped()
-      throws IOException {
+  @MethodSource("data")
+  @ParameterizedTest
+  public void testCachedRouterClientBehaviourAfterRouterStoped(boolean pUseIpForHeartbeats)
+      throws Exception {
+    initTestRouterMountTableCacheRefresh(pUseIpForHeartbeats);
     String srcPath = "/addPathClientCache";
     MountTable newEntry = MountTable.newInstance(srcPath,
         Collections.singletonMap("ns0", "/addPathClientCacheDest"), Time.now(),
@@ -282,8 +288,10 @@ public class TestRouterMountTableCacheRefresh {
     return result;
   }
 
-  @Test
-  public void testRefreshMountTableEntriesAPI() throws IOException {
+  @MethodSource("data")
+  @ParameterizedTest
+  public void testRefreshMountTableEntriesAPI(boolean pUseIpForHeartbeats) throws Exception {
+    initTestRouterMountTableCacheRefresh(pUseIpForHeartbeats);
     RefreshMountTableEntriesRequest request =
         RefreshMountTableEntriesRequest.newInstance();
     RefreshMountTableEntriesResponse refreshMountTableEntriesRes =
@@ -296,8 +304,12 @@ public class TestRouterMountTableCacheRefresh {
    * Verify cache update timeouts when any of the router takes more time than
    * the configured timeout period.
    */
-  @Test(timeout = 10000)
-  public void testMountTableEntriesCacheUpdateTimeout() throws IOException {
+  @MethodSource("data")
+  @ParameterizedTest
+  @Timeout(value = 100)
+  public void testMountTableEntriesCacheUpdateTimeout(boolean pUseIpForHeartbeats)
+      throws Exception {
+    initTestRouterMountTableCacheRefresh(pUseIpForHeartbeats);
     // Resources will be closed when router is closed
     @SuppressWarnings("resource")
     MountTableRefresherService mountTableRefresherService =
@@ -332,8 +344,10 @@ public class TestRouterMountTableCacheRefresh {
    * Verify Cached RouterClient connections are removed from cache and closed
    * when their max live time is elapsed.
    */
-  @Test
-  public void testRouterClientConnectionExpiration() throws Exception {
+  @MethodSource("data")
+  @ParameterizedTest
+  public void testRouterClientConnectionExpiration(boolean pUseIpForHeartbeats) throws Exception {
+    initTestRouterMountTableCacheRefresh(pUseIpForHeartbeats);
     final AtomicInteger createCounter = new AtomicInteger();
     final AtomicInteger removeCounter = new AtomicInteger();
     // Resources will be closed when router is closed
@@ -362,7 +376,7 @@ public class TestRouterMountTableCacheRefresh {
     mountTableRefresherService.init(config);
     // Do refresh to created RouterClient
     mountTableRefresherService.refresh();
-    assertNotEquals("No RouterClient is created.", 0, createCounter.get());
+    assertNotEquals(0, createCounter.get(), "No RouterClient is created.");
     /*
      * Wait for clients to expire. Let's wait triple the cache eviction period.
      * After cache eviction period all created client must be removed and

+ 11 - 12
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTableCacheRefreshSecure.java

@@ -18,9 +18,9 @@
 package org.apache.hadoop.hdfs.server.federation.router;
 
 import static org.apache.hadoop.fs.contract.router.SecurityConfUtil.initSecurity;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.io.IOException;
 import java.net.InetSocketAddress;
@@ -49,10 +49,10 @@ import org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateMountTableE
 import org.apache.hadoop.hdfs.server.federation.store.records.MountTable;
 import org.apache.hadoop.service.Service.STATE;
 import org.apache.hadoop.util.Time;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -71,7 +71,7 @@ public class TestRouterMountTableCacheRefreshSecure {
   private static RouterContext routerContext;
   private static MountTableManager mountTableManager;
 
-  @BeforeClass
+  @BeforeAll
   public static void setUp() throws Exception {
     curatorTestingServer = new TestingServer();
     curatorTestingServer.start();
@@ -101,7 +101,7 @@ public class TestRouterMountTableCacheRefreshSecure {
         numNameservices, 60000);
   }
 
-  @AfterClass
+  @AfterAll
   public static void destory() {
     try {
       curatorTestingServer.close();
@@ -111,7 +111,7 @@ public class TestRouterMountTableCacheRefreshSecure {
     }
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws IOException {
     clearEntries();
   }
@@ -234,8 +234,7 @@ public class TestRouterMountTableCacheRefreshSecure {
             UpdateMountTableEntryRequest.newInstance(upateEntry));
     assertTrue(updateMountTableEntry.getStatus());
     MountTable updatedMountTable = getMountTableEntry(srcPath);
-    assertNotNull("Updated mount table entrty cannot be null",
-        updatedMountTable);
+    assertNotNull(updatedMountTable, "Updated mount table entrty cannot be null");
 
     // When update entry is done, all the routers must have updated its mount
     // table entry

+ 11 - 11
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTableWithoutDefaultNS.java

@@ -38,20 +38,20 @@ import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntr
 import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryRequest;
 import org.apache.hadoop.hdfs.server.federation.store.records.MountTable;
 import org.apache.hadoop.test.LambdaTestUtils;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 
 import java.io.IOException;
 import java.util.Collections;
 import java.util.List;
 import java.util.Map;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
 
 /**
  * Test a router end-to-end including the MountTable without default nameservice.
@@ -64,7 +64,7 @@ public class TestRouterMountTableWithoutDefaultNS {
   private static FileSystem nnFs0;
   private static FileSystem nnFs1;
 
-  @BeforeClass
+  @BeforeAll
   public static void globalSetUp() throws Exception {
     // Build and start a federated cluster
     cluster = new StateStoreDFSCluster(false, 2);
@@ -89,7 +89,7 @@ public class TestRouterMountTableWithoutDefaultNS {
     mountTable = (MountTableResolver) router.getSubclusterResolver();
   }
 
-  @AfterClass
+  @AfterAll
   public static void tearDown() {
     if (cluster != null) {
       cluster.stopRouter(routerContext);
@@ -98,7 +98,7 @@ public class TestRouterMountTableWithoutDefaultNS {
     }
   }
 
-  @After
+  @AfterEach
   public void clearMountTable() throws IOException {
     RouterClient client = routerContext.getAdminClient();
     MountTableManager mountTableManager = client.getMountTableManager();

+ 7 - 7
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMultiRack.java

@@ -25,14 +25,14 @@ import org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster.RouterConte
 import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder;
 import org.apache.hadoop.hdfs.server.federation.StateStoreDFSCluster;
 import org.apache.hadoop.hdfs.server.federation.resolver.MultipleDestinationMountTableResolver;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 
 import java.io.IOException;
 
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 /**
  * Test class with clusters having multiple racks.
@@ -46,7 +46,7 @@ public class TestRouterMultiRack {
   private static DistributedFileSystem nnFs0;
   private static DistributedFileSystem nnFs1;
 
-  @BeforeClass
+  @BeforeAll
   public static void setUp() throws Exception {
 
     // Build and start a federated cluster
@@ -76,7 +76,7 @@ public class TestRouterMultiRack {
     nnFs1 = (DistributedFileSystem) nnContext1.getFileSystem();
   }
 
-  @AfterClass
+  @AfterAll
   public static void tearDown() {
     if (cluster != null) {
       cluster.stopRouter(routerContext);

+ 9 - 14
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNamenodeHeartbeat.java

@@ -25,10 +25,10 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICES;
 import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY;
 import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.NAMENODES;
 import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.NAMESERVICES;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertNull;
 
 import java.io.IOException;
 import java.util.ArrayList;
@@ -49,11 +49,9 @@ import org.apache.hadoop.net.MockDomainNameResolver;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.service.Service.STATE;
 import org.apache.hadoop.util.Shell;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TestName;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 
 /**
  * Test the service that heartbeats the state of the namenodes to the State
@@ -65,10 +63,7 @@ public class TestRouterNamenodeHeartbeat {
   private static ActiveNamenodeResolver namenodeResolver;
   private static List<NamenodeHeartbeatService> services;
 
-  @Rule
-  public TestName name = new TestName();
-
-  @BeforeClass
+  @BeforeAll
   public static void globalSetUp() throws Exception {
 
     cluster = new MiniRouterDFSCluster(true, 2);
@@ -96,7 +91,7 @@ public class TestRouterNamenodeHeartbeat {
     }
   }
 
-  @AfterClass
+  @AfterAll
   public static void tearDown() throws IOException {
     cluster.shutdown();
     for (NamenodeHeartbeatService service: services) {

+ 12 - 16
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNamenodeMonitoring.java

@@ -21,9 +21,9 @@ import static java.util.Arrays.asList;
 import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.getFileSystem;
 import static org.apache.hadoop.hdfs.server.federation.MockNamenode.registerSubclusters;
 import static org.apache.hadoop.hdfs.server.federation.store.FederationStateStoreTestUtils.getStateStoreConfiguration;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.fail;
 
 import java.io.IOException;
 import java.util.ArrayList;
@@ -58,9 +58,9 @@ import org.apache.hadoop.http.HttpConfig;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Time;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.event.Level;
@@ -87,7 +87,7 @@ public class TestRouterNamenodeMonitoring {
   private long initializedTime;
 
 
-  @Before
+  @BeforeEach
   public void setup() throws Exception {
     LOG.info("Initialize the Mock Namenodes to monitor");
     for (String nsId : nsIds) {
@@ -106,7 +106,7 @@ public class TestRouterNamenodeMonitoring {
     initializedTime = Time.now();
   }
 
-  @After
+  @AfterEach
   public void cleanup() throws Exception {
     for (Map<String, MockNamenode> nnNS : nns.values()) {
       for (MockNamenode nn : nnNS.values()) {
@@ -215,12 +215,10 @@ public class TestRouterNamenodeMonitoring {
           "nn0".equals(nnInfo.getNamenodeId())) {
         // The modified date won't be updated in ns0.nn0
         // since it isn't monitored by the Router.
-        assertTrue(nnInfo + " shouldn't be updated: " + diff,
-            modTime < initializedTime);
+        assertTrue(modTime < initializedTime, nnInfo + " shouldn't be updated: " + diff);
       } else {
         // other namnodes should be updated as expected
-        assertTrue(nnInfo + " should be updated: " + diff,
-            modTime > initializedTime);
+        assertTrue(modTime > initializedTime, nnInfo + " should be updated: " + diff);
       }
     }
   }
@@ -278,10 +276,8 @@ public class TestRouterNamenodeMonitoring {
       sb.append(report.getNamenodeId());
       actualSet.add(sb.toString());
     }
-    assertTrue(expected + " does not contain all " + actualSet,
-        expected.containsAll(actualSet));
-    assertTrue(actualSet + " does not contain all " + expected,
-        actualSet.containsAll(expected));
+    assertTrue(expected.containsAll(actualSet), expected + " does not contain all " + actualSet);
+    assertTrue(actualSet.containsAll(expected), actualSet + " does not contain all " + expected);
   }
 
   @Test

+ 8 - 8
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNamenodeWebScheme.java

@@ -29,9 +29,9 @@ import org.apache.hadoop.hdfs.server.federation.resolver.MembershipNamenodeResol
 import org.apache.hadoop.hdfs.server.federation.resolver.MountTableResolver;
 import org.apache.hadoop.http.HttpConfig;
 import org.apache.hadoop.util.StringUtils;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -47,7 +47,7 @@ import java.util.Set;
 
 import static java.util.Arrays.asList;
 import static org.apache.hadoop.hdfs.server.federation.store.FederationStateStoreTestUtils.getStateStoreConfiguration;
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 /**
  * Test the scheme of Http address of Namenodes displayed in Router.
@@ -65,7 +65,7 @@ public class TestRouterNamenodeWebScheme {
   /** Nameservices in the federated cluster. */
   private List<String> nsIds = asList("ns0", "ns1");
 
-  @Before
+  @BeforeEach
   public void setup() throws Exception {
     LOG.info("Initialize the Mock Namenodes to monitor");
     for (String nsId : nsIds) {
@@ -82,7 +82,7 @@ public class TestRouterNamenodeWebScheme {
     }
   }
 
-  @After
+  @AfterEach
   public void cleanup() throws Exception {
     for (Map<String, MockNamenode> nnNS : nns.values()) {
       for (MockNamenode nn : nnNS.values()) {
@@ -198,8 +198,8 @@ public class TestRouterNamenodeWebScheme {
       namespaceInfo.addAll(nnReports);
     }
     for (FederationNamenodeContext nnInfo : namespaceInfo) {
-      assertEquals("Unexpected scheme for Policy: " + httpPolicy.name(),
-          nnInfo.getWebScheme(), expectedScheme);
+      assertEquals(nnInfo.getWebScheme(), expectedScheme, "Unexpected scheme for Policy: "+
+          httpPolicy.name());
     }
   }
 }

+ 5 - 5
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterNetworkTopologyServlet.java

@@ -24,8 +24,8 @@ import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder;
 import org.apache.hadoop.hdfs.server.federation.StateStoreDFSCluster;
 import org.apache.hadoop.hdfs.server.federation.resolver.MultipleDestinationMountTableResolver;
 import org.apache.hadoop.io.IOUtils;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 
 import java.io.ByteArrayOutputStream;
 import java.net.HttpURLConnection;
@@ -34,15 +34,15 @@ import java.util.Iterator;
 import java.util.Map;
 
 import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_HTTP_ENABLE;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 public class TestRouterNetworkTopologyServlet {
 
   private static StateStoreDFSCluster clusterWithDatanodes;
   private static StateStoreDFSCluster clusterNoDatanodes;
 
-  @BeforeClass
+  @BeforeAll
   public static void setUp() throws Exception {
     // Builder configuration
     Configuration routerConf =

+ 17 - 24
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterPolicyProvider.java

@@ -19,7 +19,7 @@ package org.apache.hadoop.hdfs.server.federation.router;
 
 import org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer;
 
-import static org.junit.Assert.*;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.util.Arrays;
 import java.util.HashSet;
@@ -32,13 +32,10 @@ import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.security.authorize.Service;
 import org.apache.hadoop.util.Sets;
 
-import org.junit.BeforeClass;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TestName;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-import org.junit.runners.Parameterized.Parameters;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.TestInfo;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.MethodSource;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -50,19 +47,15 @@ import org.slf4j.LoggerFactory;
  * RouterPolicyProvider. This is a parameterized test repeated for multiple HDFS
  * RPC server classes.
  */
-@RunWith(Parameterized.class)
 public class TestRouterPolicyProvider {
   private static final Logger LOG = LoggerFactory.getLogger(
       TestRouterPolicyProvider.class);
 
   private static Set<Class<?>> policyProviderProtocols;
 
-  @Rule
-  public TestName testName = new TestName();
+  private Class<?> rpcServerClass;
 
-  private final Class<?> rpcServerClass;
-
-  @BeforeClass
+  @BeforeAll
   public static void initialize() {
     Service[] services = new RouterPolicyProvider().getServices();
     policyProviderProtocols = new HashSet<>(services.length);
@@ -71,19 +64,20 @@ public class TestRouterPolicyProvider {
     }
   }
 
-  public TestRouterPolicyProvider(Class<?> rpcServerClass) {
-    this.rpcServerClass = rpcServerClass;
+  public void initTestRouterPolicyProvider(Class<?> pRpcServerClass) {
+    this.rpcServerClass = pRpcServerClass;
   }
 
-  @Parameters(name = "protocolsForServer-{0}")
   public static List<Class<?>[]> data() {
     return Arrays.asList(new Class<?>[][] {{RouterRpcServer.class},
         {NameNodeRpcServer.class}, {DataNode.class},
         {RouterAdminServer.class}});
   }
 
-  @Test
-  public void testPolicyProviderForServer() {
+  @MethodSource("data")
+  @ParameterizedTest
+  public void testPolicyProviderForServer(Class<?> pRpcServerClass, TestInfo testInfo) {
+    initTestRouterPolicyProvider(pRpcServerClass);
     List<?> ifaces = ClassUtils.getAllInterfaces(rpcServerClass);
     Set<Class<?>> serverProtocols = new HashSet<>(ifaces.size());
     for (Object obj : ifaces) {
@@ -93,15 +87,14 @@ public class TestRouterPolicyProvider {
       }
     }
     LOG.info("Running test {} for RPC server {}.  Found server protocols {} "
-        + "and policy provider protocols {}.", testName.getMethodName(),
+        + "and policy provider protocols {}.", testInfo.getDisplayName(),
         rpcServerClass.getName(), serverProtocols, policyProviderProtocols);
-    assertFalse("Expected to find at least one protocol in server.",
-        serverProtocols.isEmpty());
+    assertFalse(serverProtocols.isEmpty(), "Expected to find at least one protocol in server.");
     final Set<Class<?>> differenceSet = Sets.difference(serverProtocols,
         policyProviderProtocols);
-    assertTrue(String.format(
+    assertTrue(differenceSet.isEmpty(), String.format(
         "Following protocols for server %s are not defined in " + "%s: %s",
         rpcServerClass.getName(), RouterPolicyProvider.class.getName(), Arrays
-            .toString(differenceSet.toArray())), differenceSet.isEmpty());
+            .toString(differenceSet.toArray())));
   }
 }

+ 11 - 11
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuota.java

@@ -18,12 +18,12 @@
 package org.apache.hadoop.hdfs.server.federation.router;
 
 import static org.apache.hadoop.test.LambdaTestUtils.intercept;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotEquals;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertNotEquals;
+import static org.junit.jupiter.api.Assertions.assertNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
 
 import java.io.IOException;
 import java.util.ArrayList;
@@ -72,9 +72,9 @@ import org.apache.hadoop.hdfs.server.federation.store.records.QueryResult;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.LambdaTestUtils;
 import org.apache.hadoop.util.Time;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 /**
  * Tests quota behaviors in Router-based Federation.
@@ -88,7 +88,7 @@ public class TestRouterQuota {
 
   private static final int BLOCK_SIZE = 512;
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
 
     // Build and start a federated cluster
@@ -119,7 +119,7 @@ public class TestRouterQuota {
     resolver = (MountTableResolver) router.getSubclusterResolver();
   }
 
-  @After
+  @AfterEach
   public void tearDown() {
     if (cluster != null) {
       cluster.stopRouter(routerContext);

+ 8 - 8
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterQuotaManager.java

@@ -17,16 +17,16 @@
  */
 package org.apache.hadoop.hdfs.server.federation.router;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.util.Set;
 
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 /**
  * Tests for class {@link RouterQuotaManager}.
@@ -34,12 +34,12 @@ import org.junit.Test;
 public class TestRouterQuotaManager {
   private static RouterQuotaManager manager;
 
-  @Before
+  @BeforeEach
   public void setup() {
     manager = new RouterQuotaManager();
   }
 
-  @After
+  @AfterEach
   public void cleanup() {
     manager.clear();
   }

+ 11 - 14
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCClientRetries.java

@@ -20,10 +20,10 @@ package org.apache.hadoop.hdfs.server.federation.router;
 import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.simulateSlowNamenode;
 import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains;
 import static org.apache.hadoop.test.GenericTestUtils.waitFor;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
 
 import java.io.IOException;
 import java.util.List;
@@ -48,15 +48,15 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.ipc.RemoteException;
 import org.codehaus.jettison.json.JSONException;
 import org.codehaus.jettison.json.JSONObject;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.Timeout;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Timeout;
+import org.junit.jupiter.api.Test;
 
 /**
  * Test retry behavior of the Router RPC Client.
  */
+@Timeout(100000)
 public class TestRouterRPCClientRetries {
 
   private static StateStoreDFSCluster cluster;
@@ -65,10 +65,7 @@ public class TestRouterRPCClientRetries {
   private static MembershipNamenodeResolver resolver;
   private static ClientProtocol routerProtocol;
 
-  @Rule
-  public final Timeout testTimeout = new Timeout(100000L, TimeUnit.MILLISECONDS);
-
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     // Build and start a federated cluster
     cluster = new StateStoreDFSCluster(false, 2);
@@ -104,7 +101,7 @@ public class TestRouterRPCClientRetries {
     routerProtocol = routerContext.getClient().getNamenode();
   }
 
-  @After
+  @AfterEach
   public void tearDown() {
     if (cluster != null) {
       cluster.stopRouter(routerContext);

+ 21 - 23
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRPCMultipleDestinationMountTableResolver.java

@@ -17,12 +17,12 @@
  */
 package org.apache.hadoop.hdfs.server.federation.router;
 
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertArrayEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
@@ -67,10 +67,10 @@ import org.apache.hadoop.hdfs.tools.federation.RouterAdmin;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.test.LambdaTestUtils;
 import org.apache.hadoop.util.ToolRunner;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 
 /**
  * Tests router rpc with multiple destination mount table resolver.
@@ -88,7 +88,7 @@ public class TestRouterRPCMultipleDestinationMountTableResolver {
   protected static DistributedFileSystem routerFs;
   protected static RouterRpcServer rpcServer;
 
-  @BeforeClass
+  @BeforeAll
   public static void setUp() throws Exception {
 
     // Build and start a federated cluster
@@ -119,7 +119,7 @@ public class TestRouterRPCMultipleDestinationMountTableResolver {
     rpcServer =routerContext.getRouter().getRpcServer();
   }
 
-  @AfterClass
+  @AfterAll
   public static void tearDown() {
     if (cluster != null) {
       cluster.stopRouter(routerContext);
@@ -150,7 +150,7 @@ public class TestRouterRPCMultipleDestinationMountTableResolver {
         1024L);
   }
 
-  @After
+  @AfterEach
   public void resetTestEnvironment() throws IOException {
     RouterClient client = routerContext.getAdminClient();
     MountTableManager mountTableManager = client.getMountTableManager();
@@ -298,8 +298,7 @@ public class TestRouterRPCMultipleDestinationMountTableResolver {
         nnFs0, name, value);
     boolean checkedDir2 = verifyDirectoryLevelInvocations(dirAll, nameSpaceDir,
         nnFs1, name, value);
-    assertTrue("The file didn't existed in either of the subclusters.",
-        checkedDir1 || checkedDir2);
+    assertTrue(checkedDir1 || checkedDir2, "The file didn't existed in either of the subclusters.");
     routerFs.unsetStoragePolicy(mountDir);
     routerFs.removeXAttr(mountDir, name);
     routerFs.unsetErasureCodingPolicy(mountDir);
@@ -308,8 +307,7 @@ public class TestRouterRPCMultipleDestinationMountTableResolver {
         verifyDirectoryLevelUnsetInvocations(dirAll, nnFs0, nameSpaceDir);
     checkedDir2 =
         verifyDirectoryLevelUnsetInvocations(dirAll, nnFs1, nameSpaceDir);
-    assertTrue("The file didn't existed in either of the subclusters.",
-        checkedDir1 || checkedDir2);
+    assertTrue(checkedDir1 || checkedDir2, "The file didn't existed in either of the subclusters.");
 
     // Check invocation for a file.
     routerFs.setOwner(mountFile, "testuser", "testgroup");
@@ -857,7 +855,7 @@ public class TestRouterRPCMultipleDestinationMountTableResolver {
     final Path pathRouterFile = new Path("/mount", pathFile);
     final Path pathLocalFile = new Path("/tmp", pathFile);
     FileStatus fileStatus = routerFs.getFileStatus(pathRouterFile);
-    assertTrue(fileStatus + " should be a file", fileStatus.isFile());
+    assertTrue(fileStatus.isFile(), fileStatus + " should be a file");
     GetDestinationResponse respFile = mountTableManager.getDestination(
         GetDestinationRequest.newInstance(pathRouterFile));
     if (expectFileLocation != null) {
@@ -886,7 +884,7 @@ public class TestRouterRPCMultipleDestinationMountTableResolver {
     final Path pathRouterNestedDir = new Path("/mount", pathNestedDir);
     final Path pathLocalNestedDir = new Path("/tmp", pathNestedDir);
     FileStatus dirStatus = routerFs.getFileStatus(pathRouterNestedDir);
-    assertTrue(dirStatus + " should be a directory", dirStatus.isDirectory());
+    assertTrue(dirStatus.isDirectory(), dirStatus + " should be a directory");
     GetDestinationResponse respDir = mountTableManager.getDestination(
         GetDestinationRequest.newInstance(pathRouterNestedDir));
     assertEqualsCollection(expectDirLocation, respDir.getDestinations());
@@ -905,15 +903,15 @@ public class TestRouterRPCMultipleDestinationMountTableResolver {
     for (String nsId : NS_IDS) {
       final FileSystem fs = getFileSystem(nsId);
       if (expectedLocations.contains(nsId)) {
-        assertTrue(path + " should exist in " + nsId, fs.exists(path));
+        assertTrue(fs.exists(path), path + " should exist in " + nsId);
         final FileStatus status = fs.getFileStatus(path);
         if (isDir) {
-          assertTrue(path + " should be a directory", status.isDirectory());
+          assertTrue(status.isDirectory(), path + " should be a directory");
         } else {
-          assertTrue(path + " should be a file", status.isFile());
+          assertTrue(status.isFile(), path + " should be a file");
         }
       } else {
-        assertFalse(path + " should not exist in " + nsId, fs.exists(path));
+        assertFalse(fs.exists(path), path + " should not exist in " + nsId);
       }
     }
   }

+ 7 - 7
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRefreshSuperUserGroupsConfiguration.java

@@ -31,9 +31,9 @@ import org.apache.hadoop.security.authorize.AuthorizationException;
 import org.apache.hadoop.security.authorize.ProxyUsers;
 
 import org.apache.hadoop.test.LambdaTestUtils;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -47,7 +47,7 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.LinkedHashSet;
 
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 
@@ -67,7 +67,7 @@ public class TestRouterRefreshSuperUserGroupsConfiguration {
   private static final String LOOPBACK_ADDRESS = "127.0.0.1";
 
   private String tempResource = null;
-  @Before
+  @BeforeEach
   public void setUpCluster() throws Exception {
     Configuration conf = new RouterConfigBuilder()
         .rpc()
@@ -78,7 +78,7 @@ public class TestRouterRefreshSuperUserGroupsConfiguration {
     cluster.startRouters();
   }
 
-  @After
+  @AfterEach
   public void tearDown() {
     if (cluster != null) {
       cluster.shutdown();
@@ -165,7 +165,7 @@ public class TestRouterRefreshSuperUserGroupsConfiguration {
     int clientRes =
         routerAdmin.run(new String[]{"-refreshSuperUserGroupsConfiguration"});
 
-    assertEquals("CLI command was not successful", 0, clientRes);
+    assertEquals(0, clientRes, "CLI command was not successful");
     ProxyUsers.authorize(ugi, LOOPBACK_ADDRESS);
   }
 

+ 8 - 8
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRetryCache.java

@@ -28,24 +28,24 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.io.retry.RetryInvocationHandler;
 import org.apache.hadoop.ipc.Client;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 import java.io.IOException;
 import java.security.PrivilegedExceptionAction;
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_IP_PROXY_USERS;
 import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.NAMENODES;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 public class TestRouterRetryCache {
   /** Federated HDFS cluster. */
   private MiniRouterDFSCluster cluster;
 
-  @Before
+  @BeforeEach
   public  void setup() throws Exception {
     UserGroupInformation routerUser = UserGroupInformation.getLoginUser();
     Configuration conf = new Configuration();
@@ -81,7 +81,7 @@ public class TestRouterRetryCache {
     cluster.waitActiveNamespaces();
   }
 
-  @After
+  @AfterEach
   public void teardown() throws IOException {
     if (cluster != null) {
       cluster.shutdown();

+ 28 - 29
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java

@@ -30,14 +30,14 @@ import static org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster.TEST
 import static org.apache.hadoop.ipc.CallerContext.PROXY_USER_PORT;
 import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains;
 import static org.assertj.core.api.Assertions.assertThat;
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.assertArrayEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertNotEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
 
 import java.io.IOException;
 import java.lang.reflect.Method;
@@ -140,11 +140,11 @@ import org.apache.hadoop.test.LambdaTestUtils;
 import org.codehaus.jettison.json.JSONArray;
 import org.codehaus.jettison.json.JSONException;
 import org.codehaus.jettison.json.JSONObject;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -213,7 +213,7 @@ public class TestRouterRpc {
   private String nnFile;
 
 
-  @BeforeClass
+  @BeforeAll
   public static void globalSetUp() throws Exception {
     // Start routers with only an RPC service
     Configuration routerConf = new RouterConfigBuilder()
@@ -273,18 +273,18 @@ public class TestRouterRpc {
         .getDatanodeManager().setHeartbeatExpireInterval(3000);
   }
 
-  @After
+  @AfterEach
   public void cleanup() {
     // clear client context
     CallerContext.setCurrent(null);
   }
 
-  @AfterClass
+  @AfterAll
   public static void tearDown() {
     cluster.shutdown();
   }
 
-  @Before
+  @BeforeEach
   public void testSetup() throws Exception {
 
     // Create mock locations
@@ -797,9 +797,8 @@ public class TestRouterRpc {
 
     // Verify the root listing has the item via the router
     FileStatus[] files = routerFS.listStatus(new Path("/"));
-    assertEquals(Arrays.toString(files) + " should be " +
-        Arrays.toString(filesInitial) + " + " + dirPath,
-        filesInitial.length + 1, files.length);
+    assertEquals(filesInitial.length + 1, files.length, Arrays.toString(files) + " should be " +
+        Arrays.toString(filesInitial) + " + " + dirPath);
     assertTrue(verifyFileExists(routerFS, dirPath));
 
     // Verify the directory is present in only 1 Namenode
@@ -1716,8 +1715,8 @@ public class TestRouterRpc {
       assertEquals(1, stats.getCorruptBlocks());
     }
     ReplicatedBlockStats routerStat = routerProtocol.getReplicatedBlockStats();
-    assertEquals("There should be 1 corrupt blocks for each NN",
-        cluster.getNameservices().size(), routerStat.getCorruptBlocks());
+    assertEquals(cluster.getNameservices().size(), routerStat.getCorruptBlocks(),
+        "There should be 1 corrupt blocks for each NN");
   }
 
   @Test
@@ -2183,15 +2182,15 @@ public class TestRouterRpc {
     // Login user, which is used as the router's user, is different from the realUser.
     assertNotEquals(loginUser.getUserName(), realUser.getUserName());
     // Login user is used in the audit log's ugi field.
-    assertTrue("The login user is the proxyUser in the UGI field",
-         logOutput.contains(String.format("ugi=%s (auth:PROXY) via %s (auth:SIMPLE)",
+    assertTrue(
+        logOutput.contains(String.format("ugi=%s (auth:PROXY) via %s (auth:SIMPLE)",
              proxyUser.getUserName(),
-             loginUser.getUserName())));
+             loginUser.getUserName())), "The login user is the proxyUser in the UGI field");
     // Real user is added to the caller context.
-    assertTrue("The audit log should contain the real user.",
-        logOutput.contains(String.format("realUser:%s", realUser.getUserName())));
-    assertTrue("The audit log should contain the proxyuser port.",
-        logOutput.contains(PROXY_USER_PORT));
+    assertTrue(logOutput.contains(String.format("realUser:%s", realUser.getUserName())),
+        "The audit log should contain the real user.");
+    assertTrue(logOutput.contains(PROXY_USER_PORT),
+        "The audit log should contain the proxyuser port.");
   }
 
   @Test

+ 20 - 16
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcMultiDestination.java

@@ -19,11 +19,11 @@ package org.apache.hadoop.hdfs.server.federation.router;
 
 import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.createFile;
 import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.verifyFileExists;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
 import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.Mockito.doThrow;
 import static org.mockito.Mockito.mock;
@@ -76,15 +76,21 @@ import org.apache.hadoop.ipc.CallerContext;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.StandbyException;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.MethodOrderer;
+import org.junit.jupiter.api.Order;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.TestMethodOrder;
 import org.slf4j.event.Level;
 
 /**
  * The RPC interface of the {@link getRouter()} implemented by
  * {@link RouterRpcServer}.
  */
+@TestMethodOrder(MethodOrderer.OrderAnnotation.class)
 public class TestRouterRpcMultiDestination extends TestRouterRpc {
 
+  @BeforeEach
   @Override
   public void testSetup() throws Exception {
 
@@ -190,9 +196,8 @@ public class TestRouterRpcMultiDestination extends TestRouterRpc {
     }
 
     // Verify the total number of results found/matched
-    assertEquals(
-        requiredPaths + " doesn't match " + Arrays.toString(partialListing),
-        requiredPaths.size(), partialListing.length);
+    assertEquals(requiredPaths.size(), partialListing.length,
+        requiredPaths + " doesn't match " + Arrays.toString(partialListing));
   }
 
   /**
@@ -412,11 +417,11 @@ public class TestRouterRpcMultiDestination extends TestRouterRpc {
   }
 
   @Test
+  @Order(1)
   public void testSubclusterDown() throws Exception {
     final int totalFiles = 6;
 
     List<RouterContext> routers = getCluster().getRouters();
-
     // Test the behavior when everything is fine
     FileSystem fs = getRouterFileSystem();
     FileStatus[] files = fs.listStatus(new Path("/"));
@@ -447,8 +452,7 @@ public class TestRouterRpcMultiDestination extends TestRouterRpc {
     // router1 should report partial results
     RouterContext router1 = routers.get(1);
     files = router1.getFileSystem().listStatus(new Path("/"));
-    assertTrue("Found " + files.length + " items, we should have less",
-        files.length < totalFiles);
+    assertTrue(files.length < totalFiles, "Found " + files.length + " items, we should have less");
 
 
     // Restore the HA context and the Router
@@ -483,8 +487,8 @@ public class TestRouterRpcMultiDestination extends TestRouterRpc {
       if (line.contains(auditFlag)) {
         // assert origin caller context exist in audit log
         String callerContext = line.substring(line.indexOf("callerContext="));
-        assertTrue(String.format("%s doesn't contain 'clientContext'", callerContext),
-            callerContext.contains("clientContext"));
+        assertTrue(callerContext.contains("clientContext"),
+            String.format("%s doesn't contain 'clientContext'", callerContext));
         // assert client ip info exist in caller context
         checkCallerContextContainsClientIp(clientIpInfos, callerContext);
       }
@@ -505,8 +509,8 @@ public class TestRouterRpcMultiDestination extends TestRouterRpc {
       if (callerContext.contains(curClientIpInfo)) {
         clientIpInfo = curClientIpInfo;
         // assert client ip info appears only once in caller context
-        assertEquals(String.format("%s contains %s more than once", callerContext, clientIpInfo),
-            callerContext.indexOf(clientIpInfo), callerContext.lastIndexOf(clientIpInfo));
+        assertEquals(callerContext.indexOf(clientIpInfo), callerContext.lastIndexOf(clientIpInfo),
+            String.format("%s contains %s more than once", callerContext, clientIpInfo));
         break;
       }
     }

+ 9 - 9
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcSingleNS.java

@@ -25,13 +25,13 @@ import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster;
 import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.io.IOException;
 import java.net.URISyntaxException;
@@ -104,7 +104,7 @@ public class TestRouterRpcSingleNS {
    */
   private String nnFile;
 
-  @BeforeClass
+  @BeforeAll
   public static void globalSetUp() throws Exception {
     cluster = new MiniRouterDFSCluster(false, 1);
     cluster.setNumDatanodesPerNameservice(2);
@@ -126,12 +126,12 @@ public class TestRouterRpcSingleNS {
     cluster.waitNamenodeRegistration();
   }
 
-  @AfterClass
+  @AfterAll
   public static void tearDown() {
     cluster.shutdown();
   }
 
-  @Before
+  @BeforeEach
   public void testSetup() throws Exception {
 
     // Create mock locations

+ 6 - 6
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpcStoragePolicySatisfier.java

@@ -34,13 +34,13 @@ import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder;
 import org.apache.hadoop.hdfs.server.namenode.sps.Context;
 import org.apache.hadoop.hdfs.server.namenode.sps.StoragePolicySatisfier;
 import org.apache.hadoop.hdfs.server.sps.ExternalSPSContext;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 
 import java.util.concurrent.TimeUnit;
 
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 /**
  * Test StoragePolicySatisfy through router rpc calls.
@@ -58,7 +58,7 @@ public class TestRouterRpcStoragePolicySatisfier {
   /** Filesystem interface to the Namenode. */
   private static FileSystem nnFS;
 
-  @BeforeClass
+  @BeforeAll
   public static void globalSetUp() throws Exception {
     cluster = new MiniRouterDFSCluster(false, 1);
     // Set storage types for the cluster
@@ -114,7 +114,7 @@ public class TestRouterRpcStoragePolicySatisfier {
     externalSps.start(HdfsConstants.StoragePolicySatisfierMode.EXTERNAL);
   }
 
-  @AfterClass
+  @AfterAll
   public static void tearDown() {
     cluster.shutdown();
   }

+ 16 - 16
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterSafemode.java

@@ -23,10 +23,10 @@ import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_
 import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_SAFEMODE_EXTENSION;
 import static org.apache.hadoop.hdfs.server.federation.store.FederationStateStoreTestUtils.deleteStateStore;
 import static org.apache.hadoop.hdfs.server.federation.store.FederationStateStoreTestUtils.getStateStoreConfiguration;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
 
 import java.io.IOException;
 import java.net.InetSocketAddress;
@@ -44,11 +44,11 @@ import org.apache.hadoop.service.Service.STATE;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.ToolRunner;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 
 /**
  * Test the safe mode for the {@link Router} controlled by
@@ -59,7 +59,7 @@ public class TestRouterSafemode {
   private Router router;
   private static Configuration conf;
 
-  @BeforeClass
+  @BeforeAll
   public static void create() throws IOException {
     // Wipe state store
     deleteStateStore();
@@ -95,18 +95,18 @@ public class TestRouterSafemode {
         .build();
   }
 
-  @AfterClass
+  @AfterAll
   public static void destroy() {
   }
 
-  @Before
+  @BeforeEach
   public void setup() throws IOException, URISyntaxException {
     router = new Router();
     router.init(conf);
     router.start();
   }
 
-  @After
+  @AfterEach
   public void cleanup() throws IOException {
     if (router != null) {
       router.stop();
@@ -203,7 +203,7 @@ public class TestRouterSafemode {
     } catch (StandbyException sme) {
       exception = true;
     }
-    assertTrue("We should have thrown a safe mode exception", exception);
+    assertTrue(exception, "We should have thrown a safe mode exception");
   }
 
   @Test
@@ -278,8 +278,8 @@ public class TestRouterSafemode {
       fail("We should have thrown a safe mode exception");
     } catch (StandbyException e) {
       String msg = e.getMessage();
-      assertTrue("Wrong message: " + msg,
-          msg.endsWith("is in safe mode and cannot handle READ requests"));
+      assertTrue(msg.endsWith("is in safe mode and cannot handle READ requests"),
+          "Wrong message: " + msg);
     }
   }
 }

+ 12 - 12
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterTrash.java

@@ -34,10 +34,10 @@ import org.apache.hadoop.hdfs.server.federation.store.protocol.*;
 import org.apache.hadoop.hdfs.server.federation.store.records.MountTable;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.Time;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -46,10 +46,10 @@ import java.net.URISyntaxException;
 import java.util.Collections;
 
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
 
 /**
  * This is a test through the Router move data to the Trash.
@@ -78,7 +78,7 @@ public class TestRouterTrash {
   private static final String TRASH_ROOT = "/user/" + TEST_USER + "/.Trash";
   private static final String CURRENT = "/Current";
 
-  @BeforeClass
+  @BeforeAll
   public static void globalSetUp() throws Exception {
     // Build and start a federated cluster
     cluster = new StateStoreDFSCluster(false, 2);
@@ -107,7 +107,7 @@ public class TestRouterTrash {
     mountTable = (MountTableResolver) router.getSubclusterResolver();
   }
 
-  @AfterClass
+  @AfterAll
   public static void tearDown() {
     if (cluster != null) {
       cluster.stopRouter(routerContext);
@@ -116,7 +116,7 @@ public class TestRouterTrash {
     }
   }
 
-  @After
+  @AfterEach
   public void clearMountTable() throws IOException {
     RouterClient client = routerContext.getAdminClient();
     MountTableManager mountTableManager = client.getMountTableManager();
@@ -131,7 +131,7 @@ public class TestRouterTrash {
     }
   }
 
-  @After
+  @AfterEach
   public void clearFile() throws IOException {
     FileStatus[] fileStatuses = nnFs.listStatus(new Path("/"));
     for (FileStatus file : fileStatuses) {

+ 15 - 15
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterUserMappings.java

@@ -39,9 +39,9 @@ import org.apache.hadoop.security.authorize.ProxyUsers;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.LambdaTestUtils;
 import org.apache.hadoop.tools.GetUserMappingsProtocol;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -63,11 +63,11 @@ import java.util.List;
 import java.util.Set;
 import java.util.concurrent.TimeUnit;
 
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.assertArrayEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 
@@ -127,7 +127,7 @@ public class TestRouterUserMappings {
     }
   }
 
-  @Before
+  @BeforeEach
   public void setUp() {
     conf = new Configuration(false);
     conf.setClass("hadoop.security.group.mapping",
@@ -295,8 +295,8 @@ public class TestRouterUserMappings {
     PrintStream oldOut = System.out;
     System.setOut(new PrintStream(out));
     new GetGroups(config).run(new String[]{username});
-    assertTrue("Wrong output: " + out,
-        out.toString().startsWith(username + " : " + username));
+    assertTrue(out.toString().startsWith(username + " : " + username),
+        "Wrong output: " + out);
     out.reset();
     System.setOut(oldOut);
   }
@@ -332,7 +332,7 @@ public class TestRouterUserMappings {
     List<String> g2 = groups.getGroups(user);
     LOG.info("Group 2 :{}", g2);
     for(int i = 0; i < g2.size(); i++) {
-      assertEquals("Should be same group ", g1.get(i), g2.get(i));
+      assertEquals(g1.get(i), g2.get(i), "Should be same group ");
     }
 
     // set fs.defaultFS point to router(s).
@@ -346,8 +346,8 @@ public class TestRouterUserMappings {
     List<String> g3 = groups.getGroups(user);
     LOG.info("Group 3:{}", g3);
     for(int i = 0; i < g3.size(); i++) {
-      assertNotEquals("Should be different group: "
-          + g1.get(i) + " and " + g3.get(i), g1.get(i), g3.get(i));
+      assertNotEquals(g1.get(i), g3.get(i), "Should be different group: "
+          + g1.get(i) + " and " + g3.get(i));
     }
 
     // Test timeout
@@ -397,7 +397,7 @@ public class TestRouterUserMappings {
     return tmp;
   }
 
-  @After
+  @AfterEach
   public void tearDown() {
     if (router != null) {
       router.shutDown();

+ 7 - 7
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterWebHdfsMethods.java

@@ -19,8 +19,8 @@
 package org.apache.hadoop.hdfs.server.federation.router;
 
 import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.createMountTableEntry;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.fail;
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
@@ -38,9 +38,9 @@ import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder;
 import org.apache.hadoop.hdfs.server.federation.StateStoreDFSCluster;
 import org.apache.hadoop.hdfs.server.federation.resolver.order.DestinationOrder;
 import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -56,7 +56,7 @@ public class TestRouterWebHdfsMethods {
   protected static RouterContext router;
   protected static String httpUri;
 
-  @BeforeClass
+  @BeforeAll
   public static void globalSetUp() throws Exception {
     cluster = new StateStoreDFSCluster(false, 2);
     Configuration conf = new RouterConfigBuilder()
@@ -74,7 +74,7 @@ public class TestRouterWebHdfsMethods {
     httpUri = "http://"+router.getHttpAddress();
   }
 
-  @AfterClass
+  @AfterAll
   public static void tearDown() {
     if (cluster != null) {
       cluster.shutdown();

+ 6 - 10
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterWithSecureStartup.java

@@ -20,14 +20,13 @@ package org.apache.hadoop.hdfs.server.federation.router;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.router.web.RouterWebHDFSContract;
-import org.junit.Rule;
-import org.junit.Test;
-import static org.junit.Assert.assertNotNull;
-import org.junit.rules.ExpectedException;
+import org.junit.jupiter.api.Test;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
 import java.io.IOException;
 
 import static org.apache.hadoop.fs.contract.router.SecurityConfUtil.initSecurity;
 import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_KEYTAB_FILE_KEY;
+import static org.junit.jupiter.api.Assertions.assertThrows;
 
 
 /**
@@ -38,9 +37,6 @@ public class TestRouterWithSecureStartup {
   private static final String HTTP_KERBEROS_PRINCIPAL_CONF_KEY =
       "hadoop.http.authentication.kerberos.principal";
 
-  @Rule
-  public ExpectedException exceptionRule = ExpectedException.none();
-
   /*
    * hadoop.http.authentication.kerberos.principal has default value, so if we
    * don't config the spnego principal, cluster will still start normally
@@ -71,8 +67,8 @@ public class TestRouterWithSecureStartup {
       throws Exception {
     Configuration conf = initSecurity();
     conf.unset(configToTest);
-    exceptionRule.expect(IOException.class);
-    exceptionRule.expectMessage(message);
-    RouterWebHDFSContract.createCluster(conf);
+    assertThrows(IOException.class, () -> {
+      RouterWebHDFSContract.createCluster(conf);
+    }, message);
   }
 }

+ 5 - 5
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestSafeMode.java

@@ -25,9 +25,9 @@ import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster;
 import org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster.RouterContext;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 /**
  * Test the SafeMode.
@@ -37,7 +37,7 @@ public class TestSafeMode {
   /** Federated HDFS cluster. */
   private MiniRouterDFSCluster cluster;
 
-  @Before
+  @BeforeEach
   public  void setup() throws Exception {
     cluster = new MiniRouterDFSCluster(true, 2);
 
@@ -64,7 +64,7 @@ public class TestSafeMode {
     cluster.waitActiveNamespaces();
   }
 
-  @After
+  @AfterEach
   public void teardown() throws IOException {
     if (cluster != null) {
       cluster.shutdown();

+ 9 - 9
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/async/RouterAsyncProtocolTestBase.java

@@ -27,10 +27,10 @@ import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder;
 import org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys;
 import org.apache.hadoop.hdfs.server.federation.router.RouterRpcServer;
 import org.apache.hadoop.ipc.CallerContext;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.BeforeAll;
 import org.mockito.Mockito;
 
 import java.io.IOException;
@@ -40,7 +40,7 @@ import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.NAMEN
 import static org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster.DEFAULT_HEARTBEAT_INTERVAL_MS;
 import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_ASYNC_RPC_HANDLER_COUNT_KEY;
 import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_ASYNC_RPC_RESPONDER_COUNT_KEY;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 /**
  * Used to test the functionality of async router rps.
@@ -58,7 +58,7 @@ public class RouterAsyncProtocolTestBase {
   private RouterRpcServer routerAsyncRpcServer;
   protected static final String TEST_DIR_PATH = "/testdir";
 
-  @BeforeClass
+  @BeforeAll
   public static void setUpCluster() throws Exception {
     cluster = new MiniRouterDFSCluster(true, 1, 2,
         DEFAULT_HEARTBEAT_INTERVAL_MS, 1000);
@@ -96,14 +96,14 @@ public class RouterAsyncProtocolTestBase {
     ns0 = cluster.getNameservices().get(0);
   }
 
-  @AfterClass
+  @AfterAll
   public static void shutdownCluster() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
     }
   }
 
-  @Before
+  @BeforeEach
   public void setUp() throws IOException {
     router = cluster.getRandomRouter();
     routerFs = router.getFileSystem();
@@ -124,7 +124,7 @@ public class RouterAsyncProtocolTestBase {
     routerFs.mkdirs(new Path(TEST_DIR_PATH), permission);
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws IOException {
     // clear client context
     CallerContext.setCurrent(null);

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/async/TestAsyncRouterAdmin.java

@@ -30,7 +30,7 @@ import org.apache.hadoop.hdfs.server.federation.router.RouterRpcServer;
 import org.apache.hadoop.hdfs.server.federation.router.TestRouterAdmin;
 import org.apache.hadoop.hdfs.server.federation.router.async.utils.AsyncUtil;
 import org.apache.hadoop.util.Lists;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.BeforeAll;
 import org.mockito.Mockito;
 
 import java.io.IOException;
@@ -43,7 +43,7 @@ import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_
 
 public class TestAsyncRouterAdmin extends TestRouterAdmin {
 
-  @BeforeClass
+  @BeforeAll
   public static void globalSetUp() throws Exception {
     cluster = new StateStoreDFSCluster(false, 1);
     // Build and start a router with State Store + admin + RPC.

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/async/TestRouterAsyncCacheAdmin.java

@@ -26,13 +26,13 @@ import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
 import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
 import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
 import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 import java.io.IOException;
 import java.util.EnumSet;
 import static org.apache.hadoop.hdfs.server.federation.router.async.utils.AsyncUtil.syncReturn;
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 /**
  * Used to test the functionality of {@link RouterAsyncCacheAdmin}.
@@ -40,7 +40,7 @@ import static org.junit.Assert.assertEquals;
 public class TestRouterAsyncCacheAdmin extends RouterAsyncProtocolTestBase {
   private RouterAsyncCacheAdmin asyncCacheAdmin;
 
-  @Before
+  @BeforeEach
   public void setup() throws IOException {
     asyncCacheAdmin = new RouterAsyncCacheAdmin(getRouterAsyncRpcServer());
     FSDataOutputStream fsDataOutputStream = getRouterFs().create(

+ 6 - 6
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/async/TestRouterAsyncClientProtocol.java

@@ -30,8 +30,8 @@ import org.apache.hadoop.hdfs.server.federation.resolver.RemoteLocation;
 import org.apache.hadoop.hdfs.server.federation.router.RouterClientProtocol;
 import org.apache.hadoop.io.EnumSetWritable;
 import org.apache.hadoop.util.Lists;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 import java.io.IOException;
 import java.util.EnumSet;
@@ -46,9 +46,9 @@ import static org.apache.hadoop.fs.permission.FsAction.READ;
 import static org.apache.hadoop.fs.permission.FsAction.READ_WRITE;
 import static org.apache.hadoop.hdfs.server.federation.router.async.utils.AsyncUtil.syncReturn;
 import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.aclEntry;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 /**
  * Used to test the functionality of {@link RouterAsyncClientProtocol}.
@@ -58,7 +58,7 @@ public class TestRouterAsyncClientProtocol extends RouterAsyncProtocolTestBase {
   private RouterClientProtocol clientProtocol;
   private final String testPath = TEST_DIR_PATH + "/test";
 
-  @Before
+  @BeforeEach
   public void setup() throws IOException {
     asyncClientProtocol = new RouterAsyncClientProtocol(getRouterConf(), getRouterAsyncRpcServer());
     clientProtocol = new RouterClientProtocol(getRouterConf(), getRouterRpcServer());

+ 13 - 13
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/async/TestRouterAsyncErasureCoding.java

@@ -36,11 +36,11 @@ import org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys;
 import org.apache.hadoop.hdfs.server.federation.router.RouterRpcServer;
 import org.apache.hadoop.io.erasurecode.ECSchema;
 import org.apache.hadoop.ipc.CallerContext;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 import org.mockito.Mockito;
 
 import java.io.IOException;
@@ -52,10 +52,10 @@ import static org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster.DEFA
 import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_ASYNC_RPC_HANDLER_COUNT_KEY;
 import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_ASYNC_RPC_RESPONDER_COUNT_KEY;
 import static org.apache.hadoop.hdfs.server.federation.router.async.utils.AsyncUtil.syncReturn;
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertArrayEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 public class TestRouterAsyncErasureCoding {
   private static Configuration routerConf;
@@ -71,7 +71,7 @@ public class TestRouterAsyncErasureCoding {
 
   private final String testfilePath = "/testdir/testAsyncErasureCoding.file";
 
-  @BeforeClass
+  @BeforeAll
   public static void setUpCluster() throws Exception {
     cluster = new MiniRouterDFSCluster(true, 1, 2,
         DEFAULT_HEARTBEAT_INTERVAL_MS, 1000);
@@ -110,14 +110,14 @@ public class TestRouterAsyncErasureCoding {
     ns0 = cluster.getNameservices().get(0);
   }
 
-  @AfterClass
+  @AfterAll
   public static void shutdownCluster() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
     }
   }
 
-  @Before
+  @BeforeEach
   public void setUp() throws IOException {
     router = cluster.getRandomRouter();
     routerFs = router.getFileSystem();
@@ -142,7 +142,7 @@ public class TestRouterAsyncErasureCoding {
     fsDataOutputStream.close();
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws IOException {
     // clear client context
     CallerContext.setCurrent(null);

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/async/TestRouterAsyncMountTable.java

@@ -25,7 +25,7 @@ import org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys;
 import org.apache.hadoop.hdfs.server.federation.router.Router;
 import org.apache.hadoop.hdfs.server.federation.router.TestRouterMountTable;
 import org.apache.hadoop.util.Time;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.BeforeAll;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -35,7 +35,7 @@ import org.slf4j.LoggerFactory;
 public class TestRouterAsyncMountTable extends TestRouterMountTable {
   public static final Logger LOG = LoggerFactory.getLogger(TestRouterAsyncMountTable.class);
 
-  @BeforeClass
+  @BeforeAll
   public static void globalSetUp() throws Exception {
     startTime = Time.now();
 

+ 5 - 5
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/async/TestRouterAsyncNamenodeProtocol.java

@@ -24,12 +24,12 @@ import org.apache.hadoop.hdfs.server.federation.router.RouterNamenodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 import static org.apache.hadoop.hdfs.server.federation.router.async.utils.AsyncUtil.syncReturn;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
 
 /**
  * Used to test the functionality of {@link RouterAsyncNamenodeProtocol}.
@@ -39,7 +39,7 @@ public class TestRouterAsyncNamenodeProtocol extends RouterAsyncProtocolTestBase
   private RouterAsyncNamenodeProtocol asyncNamenodeProtocol;
   private RouterNamenodeProtocol namenodeProtocol;
 
-  @Before
+  @BeforeEach
   public void setup() throws Exception {
     asyncNamenodeProtocol = new RouterAsyncNamenodeProtocol(getRouterAsyncRpcServer());
     namenodeProtocol = new RouterNamenodeProtocol(getRouterRpcServer());

+ 14 - 14
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/async/TestRouterAsyncQuota.java

@@ -30,12 +30,12 @@ import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder;
 import org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys;
 import org.apache.hadoop.hdfs.server.federation.router.RouterRpcServer;
 import org.apache.hadoop.ipc.CallerContext;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 import org.mockito.Mockito;
 
 import java.io.IOException;
@@ -47,7 +47,7 @@ import static org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster.DEFA
 import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_ASYNC_RPC_HANDLER_COUNT_KEY;
 import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_ASYNC_RPC_RESPONDER_COUNT_KEY;
 import static org.apache.hadoop.hdfs.server.federation.router.async.utils.AsyncUtil.syncReturn;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 public class TestRouterAsyncQuota {
   private static Configuration routerConf;
@@ -63,7 +63,7 @@ public class TestRouterAsyncQuota {
 
   private final String testfilePath = "/testdir/testAsyncQuota.file";
 
-  @BeforeClass
+  @BeforeAll
   public static void setUpCluster() throws Exception {
     cluster = new MiniRouterDFSCluster(true, 1, 2,
         DEFAULT_HEARTBEAT_INTERVAL_MS, 1000);
@@ -104,14 +104,14 @@ public class TestRouterAsyncQuota {
     ns0 = cluster.getNameservices().get(0);
   }
 
-  @AfterClass
+  @AfterAll
   public static void shutdownCluster() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
     }
   }
 
-  @Before
+  @BeforeEach
   public void setUp() throws IOException {
     router = cluster.getRandomRouter();
     routerFs = router.getFileSystem();
@@ -136,7 +136,7 @@ public class TestRouterAsyncQuota {
     fsDataOutputStream.close();
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws IOException {
     // clear client context
     CallerContext.setCurrent(null);
@@ -152,9 +152,9 @@ public class TestRouterAsyncQuota {
     asyncQuota.getQuotaUsage("/testdir");
     QuotaUsage quotaUsage = syncReturn(QuotaUsage.class);
     // 3-replication.
-    Assert.assertEquals(3 * 1024, quotaUsage.getSpaceConsumed());
+    Assertions.assertEquals(3 * 1024, quotaUsage.getSpaceConsumed());
     // We have one directory and one file.
-    Assert.assertEquals(2, quotaUsage.getFileAndDirectoryCount());
+    Assertions.assertEquals(2, quotaUsage.getFileAndDirectoryCount());
   }
 
   @Test
@@ -163,6 +163,6 @@ public class TestRouterAsyncQuota {
     syncReturn(void.class);
     asyncQuota.getQuotaUsage("/testdir");
     QuotaUsage quotaUsage = syncReturn(QuotaUsage.class);
-    Assert.assertEquals(8096, quotaUsage.getTypeQuota(StorageType.DISK));
+    Assertions.assertEquals(8096, quotaUsage.getTypeQuota(StorageType.DISK));
   }
 }

+ 6 - 6
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/async/TestRouterAsyncRPCMultipleDestinationMountTableResolver.java

@@ -37,8 +37,8 @@ import org.apache.hadoop.hdfs.server.federation.router.RouterQuotaUsage;
 import org.apache.hadoop.hdfs.server.federation.router.TestRouterRPCMultipleDestinationMountTableResolver;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryRequest;
 import org.apache.hadoop.hdfs.server.federation.store.records.MountTable;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -48,9 +48,9 @@ import java.util.Map;
 
 import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_ASYNC_RPC_ENABLE_KEY;
 import static org.apache.hadoop.hdfs.server.federation.router.async.utils.AsyncUtil.syncReturn;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 /**
  * Tests router async rpc with multiple destination mount table resolver.
@@ -61,7 +61,7 @@ public class TestRouterAsyncRPCMultipleDestinationMountTableResolver extends
   public static final Logger LOG =
       LoggerFactory.getLogger(TestRouterAsyncRPCMultipleDestinationMountTableResolver.class);
 
-  @BeforeClass
+  @BeforeAll
   public static void setUp() throws Exception {
 
     // Build and start a federated cluster.

+ 6 - 6
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/async/TestRouterAsyncRpc.java

@@ -25,16 +25,16 @@ import org.apache.hadoop.hdfs.server.federation.fairness.RouterRpcFairnessPolicy
 import org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys;
 import org.apache.hadoop.hdfs.server.federation.router.TestRouterRpc;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 
 import java.util.concurrent.TimeUnit;
 
 import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_ASYNC_RPC_ENABLE_KEY;
 import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_FAIRNESS_POLICY_CONTROLLER_CLASS;
 import static org.apache.hadoop.hdfs.server.federation.router.async.utils.AsyncUtil.syncReturn;
-import static org.junit.Assert.assertArrayEquals;
+import static org.junit.jupiter.api.Assertions.assertArrayEquals;
 
 /**
  * Testing the asynchronous RPC functionality of the router.
@@ -43,7 +43,7 @@ public class TestRouterAsyncRpc extends TestRouterRpc {
   private static MiniRouterDFSCluster cluster;
   private MiniRouterDFSCluster.RouterContext rndRouter;
 
-  @BeforeClass
+  @BeforeAll
   public static void globalSetUp() throws Exception {
     // Start routers with only an RPC service.
     Configuration routerConf = new RouterConfigBuilder()
@@ -62,7 +62,7 @@ public class TestRouterAsyncRpc extends TestRouterRpc {
     setUp(routerConf);
   }
 
-  @Before
+  @BeforeEach
   public void testSetup() throws Exception {
     super.testSetup();
     cluster = super.getCluster();

+ 12 - 12
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/async/TestRouterAsyncRpcClient.java

@@ -42,11 +42,11 @@ import org.apache.hadoop.ipc.StandbyException;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.LambdaTestUtils;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
@@ -60,9 +60,9 @@ import static org.apache.hadoop.hdfs.server.federation.MiniRouterDFSCluster.DEFA
 import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_ASYNC_RPC_HANDLER_COUNT_KEY;
 import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_ASYNC_RPC_RESPONDER_COUNT_KEY;
 import static org.apache.hadoop.hdfs.server.federation.router.async.utils.AsyncUtil.syncReturn;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 /**
  * Used to test the functionality of {@link RouterAsyncRpcClient}.
@@ -86,7 +86,7 @@ public class TestRouterAsyncRpcClient {
    * Start a cluster using a router service that includes 2 namespaces,
    * 6 namenodes and 6 datanodes.
    */
-  @BeforeClass
+  @BeforeAll
   public static void setUpCluster() throws Exception {
     cluster = new MiniRouterDFSCluster(true, 2, 3,
         DEFAULT_HEARTBEAT_INTERVAL_MS, 1000);
@@ -127,7 +127,7 @@ public class TestRouterAsyncRpcClient {
     ns1 = cluster.getNameservices().get(1);
   }
 
-  @AfterClass
+  @AfterAll
   public static void shutdownCluster() {
     if (cluster != null) {
       cluster.shutdown();
@@ -137,7 +137,7 @@ public class TestRouterAsyncRpcClient {
   /**
    * Initialize the mount table, create a RouterAsyncRpcClient object, and create test file.
    */
-  @Before
+  @BeforeEach
   public void setup() throws Exception {
     // Create mock locations
     installMockLocations();
@@ -161,7 +161,7 @@ public class TestRouterAsyncRpcClient {
     fsDataOutputStream.close();
   }
 
-  @After
+  @AfterEach
   public void down() throws IOException {
     // clear client context
     CallerContext.setCurrent(null);

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/async/TestRouterAsyncRpcMultiDestination.java

@@ -25,22 +25,22 @@ import org.apache.hadoop.hdfs.server.federation.fairness.RouterRpcFairnessPolicy
 import org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys;
 import org.apache.hadoop.hdfs.server.federation.router.TestRouterRpcMultiDestination;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 
 import java.util.concurrent.TimeUnit;
 
 import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_ASYNC_RPC_ENABLE_KEY;
 import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.DFS_ROUTER_FAIRNESS_POLICY_CONTROLLER_CLASS;
 import static org.apache.hadoop.hdfs.server.federation.router.async.utils.AsyncUtil.syncReturn;
-import static org.junit.Assert.assertArrayEquals;
+import static org.junit.jupiter.api.Assertions.assertArrayEquals;
 
 /**
  * Testing the asynchronous RPC functionality of the router with multiple mounts.
  */
 public class TestRouterAsyncRpcMultiDestination extends TestRouterRpcMultiDestination {
 
-  @BeforeClass
+  @BeforeAll
   public static void globalSetUp() throws Exception {
     // Start routers with only an RPC service
     Configuration routerConf = new RouterConfigBuilder()

+ 7 - 6
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/async/TestRouterAsyncRpcServer.java

@@ -24,16 +24,17 @@ import org.apache.hadoop.hdfs.server.federation.resolver.RemoteLocation;
 import org.apache.hadoop.hdfs.server.federation.router.RemoteMethod;
 import org.apache.hadoop.hdfs.server.federation.router.RouterRpcServer;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 import java.io.IOException;
 import java.util.List;
 import java.util.Map;
 
 import static org.apache.hadoop.hdfs.server.federation.router.async.utils.AsyncUtil.syncReturn;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertArrayEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
 
 /**
  * Used to test the async functionality of {@link RouterRpcServer}.
@@ -41,7 +42,7 @@ import static org.junit.Assert.assertNotNull;
 public class TestRouterAsyncRpcServer extends RouterAsyncProtocolTestBase {
   private RouterRpcServer asyncRouterRpcServer;
 
-  @Before
+  @BeforeEach
   public void setup() throws IOException {
     asyncRouterRpcServer = getRouterAsyncRpcServer();
   }
@@ -91,6 +92,6 @@ public class TestRouterAsyncRpcServer extends RouterAsyncProtocolTestBase {
 
     asyncRouterRpcServer.getSlowDatanodeReportAsync(true, 0);
     DatanodeInfo[] slowDatanodeReport2 = syncReturn(DatanodeInfo[].class);
-    assertEquals(slowDatanodeReport1, slowDatanodeReport2);
+    assertArrayEquals(slowDatanodeReport1, slowDatanodeReport2);
   }
 }

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/async/TestRouterAsyncSnapshot.java

@@ -26,13 +26,13 @@ import org.apache.hadoop.hdfs.protocol.SnapshotException;
 import org.apache.hadoop.hdfs.protocol.SnapshotStatus;
 import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
 import org.apache.hadoop.test.LambdaTestUtils;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import java.io.IOException;
 
 import static org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffType.MODIFY;
 import static org.apache.hadoop.hdfs.server.federation.router.async.utils.AsyncUtil.syncReturn;
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 /**
  * Used to test the functionality of {@link RouterAsyncSnapshot}.
@@ -42,7 +42,7 @@ public class TestRouterAsyncSnapshot extends RouterAsyncProtocolTestBase {
   private FileSystem routerFs;
   private RouterAsyncSnapshot asyncSnapshot;
 
-  @Before
+  @BeforeEach
   public void setup() throws IOException {
     routerFs = getRouterFs();
     asyncSnapshot = new RouterAsyncSnapshot(getRouterAsyncRpcServer());

+ 6 - 6
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/async/TestRouterAsyncStoragePolicy.java

@@ -20,14 +20,14 @@ package org.apache.hadoop.hdfs.server.federation.router.async;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import java.io.IOException;
 
 import static org.apache.hadoop.hdfs.server.federation.router.async.utils.AsyncUtil.syncReturn;
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotEquals;
+import static org.junit.jupiter.api.Assertions.assertArrayEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotEquals;
 
 /**
  * Used to test the functionality of {@link RouterAsyncStoragePolicy}.
@@ -36,7 +36,7 @@ public class TestRouterAsyncStoragePolicy extends RouterAsyncProtocolTestBase {
   private final String testfilePath = "/testdir/testAsyncStoragePolicy.file";
   private RouterAsyncStoragePolicy asyncStoragePolicy;
 
-  @Before
+  @BeforeEach
   public void setup() throws IOException {
     asyncStoragePolicy = new RouterAsyncStoragePolicy(getRouterAsyncRpcServer());
     FSDataOutputStream fsDataOutputStream = getRouterFs().create(

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/async/TestRouterAsyncUserProtocol.java

@@ -18,11 +18,11 @@
 package org.apache.hadoop.hdfs.server.federation.router.async;
 
 import org.apache.hadoop.security.UserGroupInformation;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 import static org.apache.hadoop.hdfs.server.federation.router.async.utils.AsyncUtil.syncReturn;
-import static org.junit.Assert.assertArrayEquals;
+import static org.junit.jupiter.api.Assertions.assertArrayEquals;
 
 /**
  * Used to test the functionality of {@link RouterAsyncUserProtocol}.
@@ -31,7 +31,7 @@ public class TestRouterAsyncUserProtocol extends RouterAsyncProtocolTestBase {
 
   private RouterAsyncUserProtocol asyncUserProtocol;
 
-  @Before
+  @BeforeEach
   public void setup() throws Exception {
     asyncUserProtocol = new RouterAsyncUserProtocol(getRouterAsyncRpcServer());
   }

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/async/TestRouterAsyncWebHdfsMethods.java

@@ -22,7 +22,7 @@ import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder;
 import org.apache.hadoop.hdfs.server.federation.StateStoreDFSCluster;
 import org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys;
 import org.apache.hadoop.hdfs.server.federation.router.TestRouterWebHdfsMethods;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.BeforeAll;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -33,7 +33,7 @@ public class TestRouterAsyncWebHdfsMethods extends TestRouterWebHdfsMethods {
   public static final Logger LOG =
       LoggerFactory.getLogger(TestRouterAsyncWebHdfsMethods.class);
 
-  @BeforeClass
+  @BeforeAll
   public static void globalSetUp() throws Exception {
     cluster = new StateStoreDFSCluster(false, 2);
     Configuration conf = new RouterConfigBuilder()

+ 6 - 8
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/async/utils/TestAsyncUtil.java

@@ -19,8 +19,7 @@ package org.apache.hadoop.hdfs.server.federation.router.async.utils;
 
 import org.apache.hadoop.test.LambdaTestUtils;
 import org.apache.hadoop.util.Time;
-import org.junit.After;
-import org.junit.Before;
+import org.junit.jupiter.api.AfterEach;
 import org.junit.jupiter.api.Assertions;
 import org.junit.jupiter.params.ParameterizedTest;
 import org.junit.jupiter.params.provider.EnumSource;
@@ -31,10 +30,10 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
 import java.util.concurrent.Callable;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 /**
  * The TestAsyncUtil class provides a suite of test cases for the
@@ -82,7 +81,6 @@ public class TestAsyncUtil {
     ASYNC
   }
 
-  @Before
   public void setUp(ExecutionMode mode) {
     if (mode.equals(ExecutionMode.ASYNC)) {
       baseClass = new AsyncClass(TIME_CONSUMING);
@@ -92,7 +90,7 @@ public class TestAsyncUtil {
     }
   }
 
-  @After
+  @AfterEach
   public void after() {
     baseClass = null;
     enableAsync = false;

+ 43 - 43
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/security/token/TestSQLDelegationTokenSecretManagerImpl.java

@@ -43,12 +43,12 @@ import org.apache.hadoop.security.token.delegation.web.DelegationTokenManager;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.LambdaTestUtils;
 import org.apache.hadoop.util.Time;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 
 
 public class TestSQLDelegationTokenSecretManagerImpl {
@@ -58,17 +58,17 @@ public class TestSQLDelegationTokenSecretManagerImpl {
   private static final int TOKEN_EXPIRATION_SCAN_SECONDS = 1;
   private static Configuration conf;
 
-  @Before
+  @BeforeEach
   public void init() throws SQLException {
     createTestDBTables();
   }
 
-  @After
+  @AfterEach
   public void cleanup() throws SQLException {
     dropTestDBTables();
   }
 
-  @BeforeClass
+  @BeforeAll
   public static void initDatabase() throws SQLException {
     DriverManager.getConnection(CONNECTION_URL + ";create=true");
 
@@ -82,7 +82,7 @@ public class TestSQLDelegationTokenSecretManagerImpl {
     conf.setInt(DelegationTokenManager.REMOVAL_SCAN_INTERVAL, TOKEN_EXPIRATION_SCAN_SECONDS);
   }
 
-  @AfterClass
+  @AfterAll
   public static void cleanupDatabase() {
     try {
       DriverManager.getConnection(CONNECTION_URL + ";drop=true");
@@ -226,10 +226,10 @@ public class TestSQLDelegationTokenSecretManagerImpl {
         }
       }, 100, 6000);
 
-      Assert.assertTrue("Renewed token must not be cleaned up",
-          isTokenInSQL(secretManager, tokenId1));
-      Assert.assertTrue("Token with future expiration must not be cleaned up",
-          isTokenInSQL(secretManager, tokenId3));
+      Assertions.assertTrue(isTokenInSQL(secretManager, tokenId1),
+          "Renewed token must not be cleaned up");
+      Assertions.assertTrue(isTokenInSQL(secretManager, tokenId3),
+          "Token with future expiration must not be cleaned up");
     } finally {
       stopTokenManager(tokenManager);
     }
@@ -262,7 +262,7 @@ public class TestSQLDelegationTokenSecretManagerImpl {
       TestDelegationTokenSecretManager secretManager, AbstractDelegationTokenIdentifier tokenId,
       boolean expectedInSQL) throws SQLException {
     secretManager.removeExpiredStoredToken(tokenId);
-    Assert.assertEquals(expectedInSQL, isTokenInSQL(secretManager, tokenId));
+    Assertions.assertEquals(expectedInSQL, isTokenInSQL(secretManager, tokenId));
   }
 
   private boolean isTokenInSQL(TestDelegationTokenSecretManager secretManager,
@@ -291,36 +291,36 @@ public class TestSQLDelegationTokenSecretManagerImpl {
         sequenceNums.addAll(sequenceNums3);
       }
 
-      Assert.assertEquals("Verify that all tokens were created with unique sequence numbers",
-          tokensPerManager * 3, sequenceNums.size());
-      Assert.assertEquals("Verify that tokenManager1 generated unique sequence numbers",
-          tokensPerManager, sequenceNums1.size());
-      Assert.assertEquals("Verify that tokenManager2 generated unique sequence number",
-          tokensPerManager, sequenceNums2.size());
-      Assert.assertEquals("Verify that tokenManager3 generated unique sequence numbers",
-          tokensPerManager, sequenceNums3.size());
+      Assertions.assertEquals(tokensPerManager * 3, sequenceNums.size(),
+          "Verify that all tokens were created with unique sequence numbers");
+      Assertions.assertEquals(tokensPerManager, sequenceNums1.size(),
+          "Verify that tokenManager1 generated unique sequence numbers");
+      Assertions.assertEquals(tokensPerManager, sequenceNums2.size(),
+          "Verify that tokenManager2 generated unique sequence number");
+      Assertions.assertEquals(tokensPerManager, sequenceNums3.size(),
+          "Verify that tokenManager3 generated unique sequence numbers");
 
       // Validate sequence number batches allocated in order to each token manager
       int batchSize = SQLDelegationTokenSecretManagerImpl.DEFAULT_SEQ_NUM_BATCH_SIZE;
       for (int seqNum = 1; seqNum < tokensPerManager;) {
         // First batch allocated tokenManager1
         for (int i = 0; i < batchSize; i++, seqNum++) {
-          Assert.assertTrue(sequenceNums1.contains(seqNum));
+          Assertions.assertTrue(sequenceNums1.contains(seqNum));
         }
         // Second batch allocated tokenManager2
         for (int i = 0; i < batchSize; i++, seqNum++) {
-          Assert.assertTrue(sequenceNums2.contains(seqNum));
+          Assertions.assertTrue(sequenceNums2.contains(seqNum));
         }
         // Third batch allocated tokenManager3
         for (int i = 0; i < batchSize; i++, seqNum++) {
-          Assert.assertTrue(sequenceNums3.contains(seqNum));
+          Assertions.assertTrue(sequenceNums3.contains(seqNum));
         }
       }
 
       SQLDelegationTokenSecretManagerImpl secretManager =
           (SQLDelegationTokenSecretManagerImpl) tokenManager1.getDelegationTokenSecretManager();
-      Assert.assertEquals("Verify that the counter is set to the highest sequence number",
-          tokensPerManager * 3, secretManager.getDelegationTokenSeqNum());
+      Assertions.assertEquals(tokensPerManager * 3, secretManager.getDelegationTokenSeqNum(),
+          "Verify that the counter is set to the highest sequence number");
     } finally {
       stopTokenManager(tokenManager1);
       stopTokenManager(tokenManager2);
@@ -343,13 +343,13 @@ public class TestSQLDelegationTokenSecretManagerImpl {
       // Allocate sequence numbers before they are rolled over
       for (int seqNum = Integer.MAX_VALUE - tokenBatch; seqNum < Integer.MAX_VALUE; seqNum++) {
         allocateSequenceNum(tokenManager, sequenceNums);
-        Assert.assertTrue(sequenceNums.contains(seqNum + 1));
+        Assertions.assertTrue(sequenceNums.contains(seqNum + 1));
       }
 
       // Allocate sequence numbers after they are rolled over
       for (int seqNum = 0; seqNum < tokenBatch; seqNum++) {
         allocateSequenceNum(tokenManager, sequenceNums);
-        Assert.assertTrue(sequenceNums.contains(seqNum + 1));
+        Assertions.assertTrue(sequenceNums.contains(seqNum + 1));
       }
     } finally {
       stopTokenManager(tokenManager);
@@ -381,7 +381,7 @@ public class TestSQLDelegationTokenSecretManagerImpl {
         ((TestDelegationTokenSecretManager) secretManager2).lockKeyRoll();
         int keyId2 = secretManager2.getCurrentKeyId();
 
-        Assert.assertNotEquals("Each secret manager has its own key", keyId1, keyId2);
+        Assertions.assertNotEquals(keyId1, keyId2, "Each secret manager has its own key");
 
         // Validate that latest key2 is assigned to tokenManager2 tokens
         Token<? extends AbstractDelegationTokenIdentifier> token2 =
@@ -416,7 +416,7 @@ public class TestSQLDelegationTokenSecretManagerImpl {
 
     // Verifying property is correctly set in datasource
     HikariDataSourceConnectionFactory factory2 = new HikariDataSourceConnectionFactory(hikariConf);
-    Assert.assertEquals(factory2.getDataSource().getMaximumPoolSize(),
+    Assertions.assertEquals(factory2.getDataSource().getMaximumPoolSize(),
         defaultMaximumPoolSize + 1);
     factory2.shutdown();
   }
@@ -434,7 +434,7 @@ public class TestSQLDelegationTokenSecretManagerImpl {
       // Reset counter and expect a single request when inserting a token
       TestRetryHandler.resetExecutionAttemptCounter();
       tokenManager.createToken(UserGroupInformation.getCurrentUser(), "foo");
-      Assert.assertEquals(1, TestRetryHandler.getExecutionAttempts());
+      Assertions.assertEquals(1, TestRetryHandler.getExecutionAttempts());
 
       // Breaking database connections to cause retries
       secretManager.setReadOnly(true);
@@ -442,7 +442,7 @@ public class TestSQLDelegationTokenSecretManagerImpl {
       // Reset counter and expect a multiple retries when failing to insert a token
       TestRetryHandler.resetExecutionAttemptCounter();
       tokenManager.createToken(UserGroupInformation.getCurrentUser(), "foo");
-      Assert.assertEquals(TEST_MAX_RETRIES + 1, TestRetryHandler.getExecutionAttempts());
+      Assertions.assertEquals(TEST_MAX_RETRIES + 1, TestRetryHandler.getExecutionAttempts());
     } finally {
       // Fix database connections
       secretManager.setReadOnly(false);
@@ -466,8 +466,8 @@ public class TestSQLDelegationTokenSecretManagerImpl {
     Token<? extends AbstractDelegationTokenIdentifier> token =
         tokenManager.createToken(UserGroupInformation.getCurrentUser(), "foo");
     AbstractDelegationTokenIdentifier tokenIdentifier = token.decodeIdentifier();
-    Assert.assertFalse("Verify sequence number is unique",
-        sequenceNums.contains(tokenIdentifier.getSequenceNumber()));
+    Assertions.assertFalse(sequenceNums.contains(tokenIdentifier.getSequenceNumber()),
+        "Verify sequence number is unique");
 
     sequenceNums.add(tokenIdentifier.getSequenceNumber());
   }
@@ -484,29 +484,29 @@ public class TestSQLDelegationTokenSecretManagerImpl {
 
     byte[] tokenInfo1 = secretManager.selectTokenInfo(tokenIdentifier.getSequenceNumber(),
         tokenIdentifier.getBytes());
-    Assert.assertNotNull("Verify token exists in database", tokenInfo1);
+    Assertions.assertNotNull(tokenInfo1, "Verify token exists in database");
 
     // Renew token using token manager
     tokenManager.renewToken(token, "foo");
 
     byte[] tokenInfo2 = secretManager.selectTokenInfo(tokenIdentifier.getSequenceNumber(),
         tokenIdentifier.getBytes());
-    Assert.assertNotNull("Verify token exists in database", tokenInfo2);
-    Assert.assertFalse("Verify token has been updated in database",
-        Arrays.equals(tokenInfo1, tokenInfo2));
+    Assertions.assertNotNull(tokenInfo2, "Verify token exists in database");
+    Assertions.assertFalse(Arrays.equals(tokenInfo1, tokenInfo2),
+        "Verify token has been updated in database");
 
     // Cancel token using token manager
     tokenManager.cancelToken(token, "foo");
     byte[] tokenInfo3 = secretManager.selectTokenInfo(tokenIdentifier.getSequenceNumber(),
         tokenIdentifier.getBytes());
-    Assert.assertNull("Verify token was removed from database", tokenInfo3);
+    Assertions.assertNull(tokenInfo3, "Verify token was removed from database");
   }
 
   private void validateKeyId(Token<? extends AbstractDelegationTokenIdentifier> token,
       int expectedKeyiD) throws IOException {
     AbstractDelegationTokenIdentifier tokenIdentifier = token.decodeIdentifier();
-    Assert.assertEquals("Verify that keyId is assigned to token",
-        tokenIdentifier.getMasterKeyId(), expectedKeyiD);
+    Assertions.assertEquals(tokenIdentifier.getMasterKeyId(), expectedKeyiD,
+        "Verify that keyId is assigned to token");
   }
 
   private static Connection getTestDBConnection() {

+ 0 - 1
hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/store/driver/TestStateStoreFile.java

@@ -20,7 +20,6 @@ package org.apache.hadoop.hdfs.server.federation.store.driver;
 import static org.apache.hadoop.hdfs.server.federation.store.FederationStateStoreTestUtils.getStateStoreConfiguration;
 import static org.apache.hadoop.hdfs.server.federation.router.RBFConfigKeys.FEDERATION_STORE_FILE_ASYNC_THREADS;
 
-import java.io.IOException;
 import java.util.Arrays;
 import java.util.List;