Просмотр исходного кода

HDFS-12431. [JDK17] Upgrade JUnit from 4 to 5 in hadoop-hdfs Part3. (#7626)

zhtttylz 1 неделя назад
Родитель
Сommit
5770647dc7
49 измененных файлов с 996 добавлено и 888 удалено
  1. 20 0
      hadoop-hdfs-project/hadoop-hdfs/pom.xml
  2. 25 19
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockTokenWrappingQOP.java
  3. 10 7
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultipleNNPortQOP.java
  4. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/BlockReaderTestUtil.java
  5. 55 54
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderFactory.java
  6. 4 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderIoProvider.java
  7. 65 52
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderLocal.java
  8. 17 15
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderLocalLegacy.java
  9. 15 11
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderLocalMetrics.java
  10. 9 7
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderRemote.java
  11. 5 5
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestClientBlockVerification.java
  12. 12 15
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/net/TestDFSNetworkTopology.java
  13. 9 9
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/net/TestDFSNetworkTopologyPerformance.java
  14. 6 6
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestAnnotations.java
  15. 10 12
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestBlockListAsLongs.java
  16. 36 32
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestLayoutVersion.java
  17. 5 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestLocatedBlock.java
  18. 4 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/TestPacketReceiver.java
  19. 5 5
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferTestCase.java
  20. 8 8
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestBlackListBasedTrustedChannelResolver.java
  21. 7 5
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestCustomizedCallbackHandler.java
  22. 28 30
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestSaslDataTransfer.java
  23. 11 15
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestSaslDataTransferExpiredBlockToken.java
  24. 50 53
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
  25. 10 11
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/QJMTestUtil.java
  26. 6 4
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestMiniJournalCluster.java
  27. 16 10
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestNNWithQJM.java
  28. 15 18
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestSecureNNWithQJM.java
  29. 6 5
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestEpochsAreUnique.java
  30. 10 5
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestIPCLoggerChannel.java
  31. 7 10
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQJMWithFaults.java
  32. 13 8
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumCall.java
  33. 13 10
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManagerUnit.java
  34. 2 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestSegmentRecoveryComparator.java
  35. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestGetJournalEditServlet.java
  36. 41 32
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournal.java
  37. 8 7
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeHttpServerXFrame.java
  38. 9 9
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeMXBean.java
  39. 27 25
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeRespectsBindHostKeys.java
  40. 19 14
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournaledEditsCache.java
  41. 89 88
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java
  42. 17 13
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java
  43. 18 20
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java
  44. 13 11
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestUpdateDataNodeCurrentKey.java
  45. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithSaslDataTransfer.java
  46. 42 44
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java
  47. 34 36
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecureNameNode.java
  48. 110 90
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java
  49. 49 42
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java

+ 20 - 0
hadoop-hdfs-project/hadoop-hdfs/pom.xml

@@ -241,6 +241,26 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
       <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-annotations</artifactId>
     </dependency>
+    <dependency>
+      <groupId>org.junit.jupiter</groupId>
+      <artifactId>junit-jupiter-api</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.junit.jupiter</groupId>
+      <artifactId>junit-jupiter-engine</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.junit.jupiter</groupId>
+      <artifactId>junit-jupiter-params</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.junit.platform</groupId>
+      <artifactId>junit-platform-launcher</artifactId>
+      <scope>test</scope>
+    </dependency>
   </dependencies>
 
   <build>

+ 25 - 19
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockTokenWrappingQOP.java

@@ -34,23 +34,22 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferTestCase;
 import org.apache.hadoop.io.EnumSetWritable;
 import org.apache.hadoop.security.TestPermission;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
+import org.junit.jupiter.api.AfterEach;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
-import static org.junit.Assert.*;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.MethodSource;
 
 
 /**
  * This tests enabling NN sending the established QOP back to client,
  * in encrypted message, using block access token key.
  */
-@RunWith(Parameterized.class)
 public class TestBlockTokenWrappingQOP extends SaslDataTransferTestCase {
   public static final Logger LOG = LoggerFactory.getLogger(TestPermission.class);
 
@@ -61,7 +60,6 @@ public class TestBlockTokenWrappingQOP extends SaslDataTransferTestCase {
   private String configKey;
   private String qopValue;
 
-  @Parameterized.Parameters
   public static Collection<Object[]> qopSettings() {
     // if configured with privacy, the negotiated QOP should auth-conf
     // similarly for the other two
@@ -72,12 +70,13 @@ public class TestBlockTokenWrappingQOP extends SaslDataTransferTestCase {
     });
   }
 
-  public TestBlockTokenWrappingQOP(String configKey, String qopValue) {
-    this.configKey = configKey;
-    this.qopValue = qopValue;
+  public void initTestBlockTokenWrappingQOP(String pconfigKey, String pqopValue)
+      throws Exception {
+    this.configKey = pconfigKey;
+    this.qopValue = pqopValue;
+    setup();
   }
 
-  @Before
   public void setup() throws Exception {
     conf = createSecureConfig(this.configKey);
     conf.set(DFS_NAMENODE_RPC_ADDRESS_AUXILIARY_KEY, "12000");
@@ -109,15 +108,17 @@ public class TestBlockTokenWrappingQOP extends SaslDataTransferTestCase {
     dfs = (DistributedFileSystem) FileSystem.get(uriAuxiliary, conf);
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
     }
   }
 
-  @Test
-  public void testAddBlockWrappingQOP() throws Exception {
+  @MethodSource("qopSettings")
+  @ParameterizedTest
+  public void testAddBlockWrappingQOP(String pconfigKey, String pqopValue) throws Exception {
+    initTestBlockTokenWrappingQOP(pconfigKey, pqopValue);
     final String src = "/testAddBlockWrappingQOP";
     final Path path = new Path(src);
 
@@ -132,8 +133,10 @@ public class TestBlockTokenWrappingQOP extends SaslDataTransferTestCase {
     assertEquals(this.qopValue, new String(secret));
   }
 
-  @Test
-  public void testAppendWrappingQOP() throws Exception {
+  @MethodSource("qopSettings")
+  @ParameterizedTest
+  public void testAppendWrappingQOP(String pconfigKey, String pqopValue) throws Exception {
+    initTestBlockTokenWrappingQOP(pconfigKey, pqopValue);
     final String src = "/testAppendWrappingQOP";
     final Path path = new Path(src);
 
@@ -155,8 +158,11 @@ public class TestBlockTokenWrappingQOP extends SaslDataTransferTestCase {
     assertEquals(this.qopValue, new String(secret));
   }
 
-  @Test
-  public void testGetBlockLocationWrappingQOP() throws Exception {
+  @MethodSource("qopSettings")
+  @ParameterizedTest
+  public void testGetBlockLocationWrappingQOP(String pconfigKey, String pqopValue)
+      throws Exception {
+    initTestBlockTokenWrappingQOP(pconfigKey, pqopValue);
     final String src = "/testGetBlockLocationWrappingQOP";
     final Path path = new Path(src);
 

+ 10 - 7
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultipleNNPortQOP.java

@@ -32,8 +32,8 @@ import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.io.DataInputBuffer;
 import org.apache.hadoop.security.token.Token;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_RPC_PROTECTION;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_OVERWRITE_DOWNSTREAM_DERIVED_QOP_KEY;
@@ -41,7 +41,10 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDR
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SEND_QOP_ENABLED;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_AUXILIARY_KEY;
 import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_ENCRYPT_DATA_OVERWRITE_DOWNSTREAM_NEW_QOP_KEY;
-import static org.junit.Assert.*;
+import static org.junit.jupiter.api.Assertions.assertArrayEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 
 /**
@@ -58,7 +61,7 @@ public class TestMultipleNNPortQOP extends SaslDataTransferTestCase {
 
   private static HdfsConfiguration clusterConf;
 
-  @Before
+  @BeforeEach
   public void setup() throws Exception {
     clusterConf = createSecureConfig(
         "authentication,integrity,privacy");
@@ -259,7 +262,7 @@ public class TestMultipleNNPortQOP extends SaslDataTransferTestCase {
       // datanodes become equal to auth.
       // Note that it is not necessarily the case for all datanodes,
       // since a datanode may be always at the last position in pipelines.
-      assertTrue("At least two qops should be auth", count >= 2);
+      assertTrue(count >= 2, "At least two qops should be auth");
 
       clientConf.set(HADOOP_RPC_PROTECTION, "integrity");
       FileSystem fsIntegrity = FileSystem.get(uriIntegrityPort, clientConf);
@@ -268,7 +271,7 @@ public class TestMultipleNNPortQOP extends SaslDataTransferTestCase {
           .map(dn -> dn.getSaslClient().getTargetQOP())
           .filter("auth"::equals)
           .count();
-      assertTrue("At least two qops should be auth", count >= 2);
+      assertTrue(count >= 2, "At least two qops should be auth");
 
       clientConf.set(HADOOP_RPC_PROTECTION, "authentication");
       FileSystem fsAuth = FileSystem.get(uriAuthPort, clientConf);
@@ -277,7 +280,7 @@ public class TestMultipleNNPortQOP extends SaslDataTransferTestCase {
           .map(dn -> dn.getSaslServer().getNegotiatedQOP())
           .filter("auth"::equals)
           .count();
-      assertEquals("All qops should be auth", 3, count);
+      assertEquals(3, count, "All qops should be auth");
     } finally {
       if (cluster != null) {
         cluster.shutdown();

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/BlockReaderTestUtil.java

@@ -18,8 +18,8 @@
 
 package org.apache.hadoop.hdfs.client.impl;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.io.DataOutputStream;
 import java.io.IOException;

+ 55 - 54
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderFactory.java

@@ -24,7 +24,13 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS;
 import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_DOMAIN_SOCKET_DISABLE_INTERVAL_SECOND_DEFAULT;
 import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_DOMAIN_SOCKET_DISABLE_INTERVAL_SECOND_KEY;
-import static org.hamcrest.CoreMatchers.equalTo;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
+import static org.junit.jupiter.api.Assumptions.assumeTrue;
 
 import java.io.File;
 import java.io.IOException;
@@ -61,37 +67,28 @@ import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.net.unix.DomainSocket;
 import org.apache.hadoop.net.unix.TemporarySocketDirectory;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.hamcrest.CoreMatchers;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Assume;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
 
 import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.Uninterruptibles;
-import org.junit.rules.ExpectedException;
-import org.junit.rules.Timeout;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+@Timeout(180)
 public class TestBlockReaderFactory {
   static final Logger LOG =
       LoggerFactory.getLogger(TestBlockReaderFactory.class);
 
-  @Rule
-  public final Timeout globalTimeout = new Timeout(180000);
 
-  @Rule
-  public ExpectedException thrown = ExpectedException.none();
-
-  @Before
+  @BeforeEach
   public void init() {
     DomainSocket.disableBindPathValidation();
-    Assume.assumeThat(DomainSocket.getLoadingFailureReason(), equalTo(null));
+    assumeTrue(DomainSocket.getLoadingFailureReason() == null);
   }
 
-  @After
+  @AfterEach
   public void cleanup() {
     DFSInputStream.tcpReadsDisabledForTesting = false;
     BlockReaderFactory.createShortCircuitReplicaInfoCallback = null;
@@ -117,7 +114,8 @@ public class TestBlockReaderFactory {
    * and short-circuit access fails, we should still be able to pass
    * data traffic over the UNIX domain socket.  Test this.
    */
-  @Test(timeout=60000)
+  @Test
+  @Timeout(value = 60)
   public void testFallbackFromShortCircuitToUnixDomainTraffic()
       throws Exception {
     DFSInputStream.tcpReadsDisabledForTesting = true;
@@ -145,7 +143,7 @@ public class TestBlockReaderFactory {
     byte contents[] = DFSTestUtil.readFileBuffer(dfs, new Path(TEST_FILE));
     byte expected[] = DFSTestUtil.
         calculateFileContentsFromSeed(SEED, TEST_FILE_LEN);
-    Assert.assertTrue(Arrays.equals(contents, expected));
+    assertTrue(Arrays.equals(contents, expected));
     cluster.shutdown();
     sockDir.close();
   }
@@ -154,7 +152,8 @@ public class TestBlockReaderFactory {
    * Test the case where address passed to DomainSocketFactory#getPathInfo is
    * unresolved. In such a case an exception should be thrown.
    */
-  @Test(timeout=60000)
+  @Test
+  @Timeout(value = 60)
   public void testGetPathInfoWithUnresolvedHost() throws Exception {
     TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
 
@@ -171,10 +170,11 @@ public class TestBlockReaderFactory {
     InetSocketAddress targetAddr =
         InetSocketAddress.createUnresolved("random", 32456);
 
-    thrown.expect(IOException.class);
-    thrown.expectMessage("Unresolved host: " + targetAddr);
-    domainSocketFactory.getPathInfo(targetAddr, shortCircuitConf);
-    sockDir.close();
+    IOException exception = assertThrows(IOException.class, () -> {
+      domainSocketFactory.getPathInfo(targetAddr, shortCircuitConf);
+      sockDir.close();
+    });
+    assertTrue(exception.getMessage().contains("Unresolved host: " + targetAddr));
   }
 
   /**
@@ -185,7 +185,8 @@ public class TestBlockReaderFactory {
    * createShortCircuitReplicaInfo.  This one replica should be shared
    * by all threads.
    */
-  @Test(timeout=60000)
+  @Test
+  @Timeout(value = 60)
   public void testMultipleWaitersOnShortCircuitCache()
       throws Exception {
     final CountDownLatch latch = new CountDownLatch(1);
@@ -198,7 +199,7 @@ public class TestBlockReaderFactory {
         public ShortCircuitReplicaInfo createShortCircuitReplicaInfo() {
           Uninterruptibles.awaitUninterruptibly(latch);
           if (!creationIsBlocked.compareAndSet(true, false)) {
-            Assert.fail("there were multiple calls to "
+            fail("there were multiple calls to "
                 + "createShortCircuitReplicaInfo.  Only one was expected.");
           }
           return null;
@@ -222,10 +223,10 @@ public class TestBlockReaderFactory {
       public void run() {
         try {
           byte contents[] = DFSTestUtil.readFileBuffer(dfs, new Path(TEST_FILE));
-          Assert.assertFalse(creationIsBlocked.get());
+          assertFalse(creationIsBlocked.get());
           byte expected[] = DFSTestUtil.
               calculateFileContentsFromSeed(SEED, TEST_FILE_LEN);
-          Assert.assertTrue(Arrays.equals(contents, expected));
+          assertTrue(Arrays.equals(contents, expected));
         } catch (Throwable e) {
           LOG.error("readerRunnable error", e);
           testFailed.set(true);
@@ -244,7 +245,7 @@ public class TestBlockReaderFactory {
     }
     cluster.shutdown();
     sockDir.close();
-    Assert.assertFalse(testFailed.get());
+    assertFalse(testFailed.get());
   }
 
   /**
@@ -303,11 +304,11 @@ public class TestBlockReaderFactory {
           try {
             blockReader = BlockReaderTestUtil.getBlockReader(
                 cluster.getFileSystem(), lblock, 0, TEST_FILE_LEN);
-            Assert.fail("expected getBlockReader to fail the first time.");
+            fail("expected getBlockReader to fail the first time.");
           } catch (Throwable t) {
-            Assert.assertTrue("expected to see 'TCP reads were disabled " +
-                "for testing' in exception " + t, t.getMessage().contains(
-                "TCP reads were disabled for testing"));
+            assertTrue(t.getMessage().contains("TCP reads were disabled for testing"),
+                "expected to see 'TCP reads were disabled "
+                    + "for testing' in exception " + t);
           } finally {
             if (blockReader != null) blockReader.close(); // keep findbugs happy
           }
@@ -344,7 +345,7 @@ public class TestBlockReaderFactory {
     }
     cluster.shutdown();
     sockDir.close();
-    Assert.assertFalse(testFailed.get());
+    assertFalse(testFailed.get());
   }
 
   /**
@@ -388,7 +389,7 @@ public class TestBlockReaderFactory {
             calculateFileContentsFromSeed(seed, testFileLen);
 
         try (FSDataInputStream in = dfs.open(testFile)) {
-          Assert.assertEquals(0,
+          assertEquals(0,
               dfs.getClient().getClientContext().getShortCircuitCache(0)
                   .getReplicaInfoMapSize());
 
@@ -402,9 +403,8 @@ public class TestBlockReaderFactory {
               .setMaxTotalSize(0);
           LOG.info("Unbuffering");
           in.unbuffer();
-          Assert.assertEquals(0,
-              dfs.getClient().getClientContext().getShortCircuitCache(0)
-                  .getReplicaInfoMapSize());
+          assertEquals(0, dfs.getClient().getClientContext().getShortCircuitCache(0)
+              .getReplicaInfoMapSize());
 
           DFSTestUtil.appendFile(dfs, testFile, "append more data");
 
@@ -430,8 +430,8 @@ public class TestBlockReaderFactory {
   private void validateReadResult(final DistributedFileSystem dfs,
       final byte[] expected, final byte[] actual,
       final int expectedScrRepMapSize) {
-    Assert.assertThat(expected, CoreMatchers.is(actual));
-    Assert.assertEquals(expectedScrRepMapSize,
+    assertThat(expected).isEqualTo(actual);
+    assertEquals(expectedScrRepMapSize,
         dfs.getClient().getClientContext().getShortCircuitCache(0)
             .getReplicaInfoMapSize());
   }
@@ -465,7 +465,7 @@ public class TestBlockReaderFactory {
     byte contents[] = DFSTestUtil.readFileBuffer(fs, new Path(TEST_FILE));
     byte expected[] = DFSTestUtil.
         calculateFileContentsFromSeed(SEED, TEST_FILE_LEN);
-    Assert.assertTrue(Arrays.equals(contents, expected));
+    assertTrue(Arrays.equals(contents, expected));
     final ShortCircuitCache cache =
         fs.getClient().getClientContext().getShortCircuitCache(0);
     final DatanodeInfo datanode = new DatanodeInfoBuilder()
@@ -475,11 +475,11 @@ public class TestBlockReaderFactory {
       @Override
       public void visit(HashMap<DatanodeInfo, PerDatanodeVisitorInfo> info)
           throws IOException {
-        Assert.assertEquals(1,  info.size());
+        assertEquals(1,  info.size());
         PerDatanodeVisitorInfo vinfo = info.get(datanode);
-        Assert.assertTrue(vinfo.disabled);
-        Assert.assertEquals(0, vinfo.full.size());
-        Assert.assertEquals(0, vinfo.notFull.size());
+        assertTrue(vinfo.disabled);
+        assertEquals(0, vinfo.full.size());
+        assertEquals(0, vinfo.notFull.size());
       }
     });
     cluster.shutdown();
@@ -514,10 +514,10 @@ public class TestBlockReaderFactory {
     byte contents[] = DFSTestUtil.readFileBuffer(fs, new Path(TEST_FILE));
     byte expected[] = DFSTestUtil.
         calculateFileContentsFromSeed(SEED, TEST_FILE_LEN);
-    Assert.assertTrue(Arrays.equals(contents, expected));
+    assertTrue(Arrays.equals(contents, expected));
     final ShortCircuitCache cache =
         fs.getClient().getClientContext().getShortCircuitCache(0);
-    Assert.assertEquals(null, cache.getDfsClientShmManager());
+    assertEquals(null, cache.getDfsClientShmManager());
     cluster.shutdown();
     sockDir.close();
   }
@@ -546,11 +546,11 @@ public class TestBlockReaderFactory {
     byte contents[] = DFSTestUtil.readFileBuffer(fs, new Path(TEST_FILE));
     byte expected[] = DFSTestUtil.
         calculateFileContentsFromSeed(SEED, TEST_FILE_LEN);
-    Assert.assertTrue(Arrays.equals(contents, expected));
+    assertTrue(Arrays.equals(contents, expected));
     final ShortCircuitCache cache =
         fs.getClient().getClientContext().getShortCircuitCache(0);
     cache.close();
-    Assert.assertTrue(cache.getDfsClientShmManager().
+    assertTrue(cache.getDfsClientShmManager().
         getDomainSocketWatcher().isClosed());
     cluster.shutdown();
     sockDir.close();
@@ -573,7 +573,8 @@ public class TestBlockReaderFactory {
    * are reading from the same replica and an InterruptedException is delivered
    * to one of them.
    */
-  @Test(timeout=120000)
+  @Test
+  @Timeout(value = 120)
   public void testPurgingClosedReplicas() throws Exception {
     BlockReaderTestUtil.enableBlockReaderFactoryTracing();
     final AtomicInteger replicasCreated = new AtomicInteger(0);
@@ -649,7 +650,7 @@ public class TestBlockReaderFactory {
       thread.interrupt();
       sem.release();
     }
-    Assert.assertFalse(testFailed.get());
+    assertFalse(testFailed.get());
 
     // We should be able to read from the file without
     // getting a ClosedChannelException.
@@ -663,10 +664,10 @@ public class TestBlockReaderFactory {
     }
     byte expected[] = DFSTestUtil.
         calculateFileContentsFromSeed(SEED, TEST_FILE_LEN);
-    Assert.assertTrue(Arrays.equals(buf, expected));
+    assertTrue(Arrays.equals(buf, expected));
 
     // Another ShortCircuitReplica object should have been created.
-    Assert.assertEquals(2, replicasCreated.get());
+    assertEquals(2, replicasCreated.get());
 
     dfs.close();
     cluster.shutdown();

+ 4 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderIoProvider.java

@@ -22,7 +22,8 @@ import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.client.impl.metrics.BlockReaderIoProvider;
 import org.apache.hadoop.hdfs.client.impl.metrics.BlockReaderLocalMetrics;
 import org.apache.hadoop.util.FakeTimer;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
 import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
@@ -45,7 +46,8 @@ public class TestBlockReaderIoProvider {
 
   private static final FakeTimer TIMER = new FakeTimer();
 
-  @Test(timeout = 300_000)
+  @Test
+  @Timeout(value = 300)
   public void testSlowShortCircuitReadsIsRecorded() throws IOException {
     HdfsConfiguration conf = new HdfsConfiguration();
     conf.setInt(HdfsClientConfigKeys.Read.ShortCircuit

+ 65 - 52
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderLocal.java

@@ -17,7 +17,6 @@
  */
 package org.apache.hadoop.hdfs.client.impl;
 
-import static org.hamcrest.CoreMatchers.equalTo;
 
 import java.io.EOFException;
 import java.io.File;
@@ -58,22 +57,27 @@ import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.unix.DomainSocket;
 import org.apache.hadoop.net.unix.TemporarySocketDirectory;
 import org.apache.hadoop.util.Time;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.Assume;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
+
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
+import static org.junit.jupiter.api.Assumptions.assumeTrue;
 
 public class TestBlockReaderLocal {
   private static TemporarySocketDirectory sockDir;
 
-  @BeforeClass
+  @BeforeAll
   public static void init() {
     sockDir = new TemporarySocketDirectory();
     DomainSocket.disableBindPathValidation();
   }
 
-  @AfterClass
+  @AfterAll
   public static void shutdown() throws IOException {
     sockDir.close();
   }
@@ -82,7 +86,7 @@ public class TestBlockReaderLocal {
       int off2, int len) {
     for (int i = 0; i < len; i++) {
       if (buf1[off1 + i] != buf2[off2 + i]) {
-        Assert.fail("arrays differ at byte " +  i + ". " +
+        fail("arrays differ at byte " +  i + ". " +
           "The first array has " + (int)buf1[off1 + i] +
           ", but the second array has " + (int)buf2[off2 + i]);
       }
@@ -138,7 +142,7 @@ public class TestBlockReaderLocal {
   public void runBlockReaderLocalTest(BlockReaderLocalTest test,
       boolean checksum, long readahead, int shortCircuitCachesNum)
           throws IOException {
-    Assume.assumeThat(DomainSocket.getLoadingFailureReason(), equalTo(null));
+    assumeTrue(DomainSocket.getLoadingFailureReason() == null);
     MiniDFSCluster cluster = null;
     HdfsConfiguration conf = new HdfsConfiguration();
     conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.SKIP_CHECKSUM_KEY,
@@ -170,10 +174,10 @@ public class TestBlockReaderLocal {
       try {
         DFSTestUtil.waitReplication(fs, TEST_PATH, (short)1);
       } catch (InterruptedException e) {
-        Assert.fail("unexpected InterruptedException during " +
+        fail("unexpected InterruptedException during " +
             "waitReplication: " + e);
       } catch (TimeoutException e) {
-        Assert.fail("unexpected TimeoutException during " +
+        fail("unexpected TimeoutException during " +
             "waitReplication: " + e);
       }
       fsIn = fs.open(TEST_PATH);
@@ -221,8 +225,8 @@ public class TestBlockReaderLocal {
         metaIn = null;
         test.doTest(blockReaderLocal, original, i * blockSize);
         // BlockReaderLocal should not alter the file position.
-        Assert.assertEquals(0, streams[0].getChannel().position());
-        Assert.assertEquals(0, streams[1].getChannel().position());
+        assertEquals(0, streams[0].getChannel().position());
+        assertEquals(0, streams[1].getChannel().position());
       }
       cluster.shutdown();
       cluster = null;
@@ -269,7 +273,7 @@ public class TestBlockReaderLocal {
       reader.readFully(buf, 1537, 514);
       assertArrayRegionsEqual(original, 1537, buf, 1537, 514);
       // Readahead is always at least the size of one chunk in this test.
-      Assert.assertTrue(reader.getMaxReadaheadLength() >=
+      assertTrue(reader.getMaxReadaheadLength() >=
           BlockReaderLocalTest.BYTES_PER_CHECKSUM);
     }
   }
@@ -489,7 +493,7 @@ public class TestBlockReaderLocal {
       if (usingChecksums) {
         try {
           reader.readFully(buf, 0, 10);
-          Assert.fail("did not detect corruption");
+          fail("did not detect corruption");
         } catch (IOException e) {
           // expected
         }
@@ -539,11 +543,11 @@ public class TestBlockReaderLocal {
         reader.readFully(buf, 816, 900);
         if (usingChecksums) {
           // We should detect the corruption when using a checksum file.
-          Assert.fail("did not detect corruption");
+          fail("did not detect corruption");
         }
       } catch (ChecksumException e) {
         if (!usingChecksums) {
-          Assert.fail("didn't expect to get ChecksumException: not " +
+          fail("didn't expect to get ChecksumException: not " +
               "using checksums.");
         }
       }
@@ -640,7 +644,7 @@ public class TestBlockReaderLocal {
     @Override
     public void doTest(BlockReaderLocal reader, byte original[])
         throws IOException {
-      Assert.assertTrue(!reader.getVerifyChecksum());
+      assertTrue(!reader.getVerifyChecksum());
       ByteBuffer buf = ByteBuffer.wrap(new byte[TEST_LENGTH]);
       reader.skip(1);
       readFully(reader, buf, 1, 9);
@@ -663,15 +667,15 @@ public class TestBlockReaderLocal {
     public void doTest(BlockReaderLocal reader, byte original[])
         throws IOException {
       byte emptyArr[] = new byte[0];
-      Assert.assertEquals(0, reader.read(emptyArr, 0, 0));
+      assertEquals(0, reader.read(emptyArr, 0, 0));
       ByteBuffer emptyBuf = ByteBuffer.wrap(emptyArr);
-      Assert.assertEquals(0, reader.read(emptyBuf));
+      assertEquals(0, reader.read(emptyBuf));
       reader.skip(1);
-      Assert.assertEquals(0, reader.read(emptyArr, 0, 0));
-      Assert.assertEquals(0, reader.read(emptyBuf));
+      assertEquals(0, reader.read(emptyArr, 0, 0));
+      assertEquals(0, reader.read(emptyBuf));
       reader.skip(BlockReaderLocalTest.TEST_LENGTH - 1);
-      Assert.assertEquals(-1, reader.read(emptyArr, 0, 0));
-      Assert.assertEquals(-1, reader.read(emptyBuf));
+      assertEquals(-1, reader.read(emptyArr, 0, 0));
+      assertEquals(-1, reader.read(emptyBuf));
     }
   }
 
@@ -732,18 +736,20 @@ public class TestBlockReaderLocal {
   }
 
 
-  @Test(timeout=60000)
+  @Test
+  @Timeout(value = 60)
   public void TestStatisticsForShortCircuitLocalRead() throws Exception {
     testStatistics(true);
   }
 
-  @Test(timeout=60000)
+  @Test
+  @Timeout(value = 60)
   public void TestStatisticsForLocalRead() throws Exception {
     testStatistics(false);
   }
 
   private void testStatistics(boolean isShortCircuit) throws Exception {
-    Assume.assumeTrue(DomainSocket.getLoadingFailureReason() == null);
+    assumeTrue(DomainSocket.getLoadingFailureReason() == null);
     HdfsConfiguration conf = new HdfsConfiguration();
     TemporarySocketDirectory sockDir = null;
     if (isShortCircuit) {
@@ -773,25 +779,25 @@ public class TestBlockReaderLocal {
       try {
         DFSTestUtil.waitReplication(fs, TEST_PATH, (short)1);
       } catch (InterruptedException e) {
-        Assert.fail("unexpected InterruptedException during " +
+        fail("unexpected InterruptedException during " +
             "waitReplication: " + e);
       } catch (TimeoutException e) {
-        Assert.fail("unexpected TimeoutException during " +
+        fail("unexpected TimeoutException during " +
             "waitReplication: " + e);
       }
       fsIn = fs.open(TEST_PATH);
       IOUtils.readFully(fsIn, original, 0,
           BlockReaderLocalTest.TEST_LENGTH);
       HdfsDataInputStream dfsIn = (HdfsDataInputStream)fsIn;
-      Assert.assertEquals(BlockReaderLocalTest.TEST_LENGTH,
+      assertEquals(BlockReaderLocalTest.TEST_LENGTH,
           dfsIn.getReadStatistics().getTotalBytesRead());
-      Assert.assertEquals(BlockReaderLocalTest.TEST_LENGTH,
+      assertEquals(BlockReaderLocalTest.TEST_LENGTH,
           dfsIn.getReadStatistics().getTotalLocalBytesRead());
       if (isShortCircuit) {
-        Assert.assertEquals(BlockReaderLocalTest.TEST_LENGTH,
+        assertEquals(BlockReaderLocalTest.TEST_LENGTH,
             dfsIn.getReadStatistics().getTotalShortCircuitBytesRead());
       } else {
-        Assert.assertEquals(0,
+        assertEquals(0,
             dfsIn.getReadStatistics().getTotalShortCircuitBytesRead());
       }
       fsIn.close();
@@ -805,7 +811,8 @@ public class TestBlockReaderLocal {
     }
   }
 
-  @Test(timeout = 60000)
+  @Test
+  @Timeout(value = 60)
   public void testStatisticsForErasureCodingRead() throws IOException {
     HdfsConfiguration conf = new HdfsConfiguration();
 
@@ -838,9 +845,9 @@ public class TestBlockReaderLocal {
         IOUtils.readFully(in, buf, 0, length);
 
         ReadStatistics stats = in.getReadStatistics();
-        Assert.assertEquals(BlockType.CONTIGUOUS, stats.getBlockType());
-        Assert.assertEquals(length, stats.getTotalBytesRead());
-        Assert.assertEquals(length, stats.getTotalLocalBytesRead());
+        assertEquals(BlockType.CONTIGUOUS, stats.getBlockType());
+        assertEquals(length, stats.getTotalBytesRead());
+        assertEquals(length, stats.getTotalLocalBytesRead());
       }
 
       Path ecFile = new Path(ecDir, "file2");
@@ -855,10 +862,10 @@ public class TestBlockReaderLocal {
         IOUtils.readFully(in, buf, 0, length);
 
         ReadStatistics stats = in.getReadStatistics();
-        Assert.assertEquals(BlockType.STRIPED, stats.getBlockType());
-        Assert.assertEquals(length, stats.getTotalLocalBytesRead());
-        Assert.assertEquals(length, stats.getTotalBytesRead());
-        Assert.assertTrue(stats.getTotalEcDecodingTimeMillis() > 0);
+        assertEquals(BlockType.STRIPED, stats.getBlockType());
+        assertEquals(length, stats.getTotalLocalBytesRead());
+        assertEquals(length, stats.getTotalBytesRead());
+        assertTrue(stats.getTotalEcDecodingTimeMillis() > 0);
       }
     }
   }
@@ -878,7 +885,7 @@ public class TestBlockReaderLocal {
       reader.readFully(buf, 1537, 514);
       assertArrayRegionsEqual(original, 1537 + shift, buf, 1537, 514);
       // Readahead is always at least the size of one chunk in this test.
-      Assert.assertTrue(reader.getMaxReadaheadLength() >=
+      assertTrue(reader.getMaxReadaheadLength() >=
               BlockReaderLocalTest.BYTES_PER_CHECKSUM);
     }
   }
@@ -920,20 +927,26 @@ public class TestBlockReaderLocal {
             false, 0, 5);
   }
 
-  @Test(expected = IllegalArgumentException.class)
+  @Test
   public void testBlockReaderShortCircutCachesOutOfRangeBelow()
-          throws IOException {
-    runBlockReaderLocalTest(new TestBlockReaderFiveShortCircutCachesReads(),
-            true, HdfsClientConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT,
-            0);
+      throws IOException {
+    assumeTrue(DomainSocket.getLoadingFailureReason() == null);
+    assertThrows(IllegalArgumentException.class, () -> {
+      runBlockReaderLocalTest(new TestBlockReaderFiveShortCircutCachesReads(),
+          true, HdfsClientConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT,
+          0);
+    });
   }
 
-  @Test(expected = IllegalArgumentException.class)
+  @Test
   public void testBlockReaderShortCircutCachesOutOfRangeAbove()
           throws IOException {
-    runBlockReaderLocalTest(new TestBlockReaderFiveShortCircutCachesReads(),
-            true, HdfsClientConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT,
-            555);
+    assumeTrue(DomainSocket.getLoadingFailureReason() == null);
+    assertThrows(IllegalArgumentException.class, () -> {
+      runBlockReaderLocalTest(new TestBlockReaderFiveShortCircutCachesReads(),
+          true, HdfsClientConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT,
+          555);
+    });
   }
 
 }

+ 17 - 15
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderLocalLegacy.java

@@ -17,8 +17,10 @@
  */
 package org.apache.hadoop.hdfs.client.impl;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertArrayEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assumptions.assumeTrue;
 
 import java.io.File;
 import java.io.IOException;
@@ -49,13 +51,12 @@ import org.apache.hadoop.net.unix.TemporarySocketDirectory;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Assert;
-import org.junit.Assume;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
 
 public class TestBlockReaderLocalLegacy {
-  @BeforeClass
+  @BeforeAll
   public static void setupCluster() throws IOException {
     DFSInputStream.tcpReadsDisabledForTesting = true;
     DomainSocket.disableBindPathValidation();
@@ -107,7 +108,7 @@ public class TestBlockReaderLocalLegacy {
 
     ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, path);
     int blockFilesCorrupted = cluster.corruptBlockOnDataNodes(block);
-    assertEquals("All replicas not corrupted", REPL_FACTOR, blockFilesCorrupted);
+    assertEquals(REPL_FACTOR, blockFilesCorrupted, "All replicas not corrupted");
 
     FSDataInputStream dis = cluster.getFileSystem().open(path);
     ByteBuffer buf = ByteBuffer.allocateDirect((int)FILE_LENGTH);
@@ -142,7 +143,7 @@ public class TestBlockReaderLocalLegacy {
   public void testBothOldAndNewShortCircuitConfigured() throws Exception {
     final short REPL_FACTOR = 1;
     final int FILE_LENGTH = 512;
-    Assume.assumeTrue(null == DomainSocket.getLoadingFailureReason());
+    assumeTrue(null == DomainSocket.getLoadingFailureReason());
     TemporarySocketDirectory socketDir = new TemporarySocketDirectory();
     HdfsConfiguration conf = getConfiguration(socketDir);
     MiniDFSCluster cluster =
@@ -164,12 +165,13 @@ public class TestBlockReaderLocalLegacy {
     byte buf[] = new byte[FILE_LENGTH];
     IOUtils.readFully(fis, buf, 0, FILE_LENGTH);
     fis.close();
-    Assert.assertArrayEquals(orig, buf);
+    assertArrayEquals(orig, buf);
     Arrays.equals(orig, buf);
     cluster.shutdown();
   }
 
-  @Test(timeout=20000)
+  @Test
+  @Timeout(value = 20)
   public void testBlockReaderLocalLegacyWithAppend() throws Exception {
     final short REPL_FACTOR = 1;
     final HdfsConfiguration conf = getConfiguration(null);
@@ -203,7 +205,7 @@ public class TestBlockReaderLocalLegacy {
 
       // test getBlockLocalPathInfo
       final BlockLocalPathInfo info = proxy.getBlockLocalPathInfo(blk, token);
-      Assert.assertEquals(originalGS, info.getBlock().getGenerationStamp());
+      assertEquals(originalGS, info.getBlock().getGenerationStamp());
     }
 
     { // append one byte
@@ -217,13 +219,13 @@ public class TestBlockReaderLocalLegacy {
       final LocatedBlock lb = cluster.getNameNode().getRpcServer()
           .getBlockLocations(path.toString(), 0, 1).get(0);
       final long newGS = lb.getBlock().getGenerationStamp();
-      Assert.assertTrue(newGS > originalGS);
+      assertTrue(newGS > originalGS);
 
       // getBlockLocalPathInfo using the original block.
-      Assert.assertEquals(originalGS, originalBlock.getGenerationStamp());
+      assertEquals(originalGS, originalBlock.getGenerationStamp());
       final BlockLocalPathInfo info = proxy.getBlockLocalPathInfo(
           originalBlock, token);
-      Assert.assertEquals(newGS, info.getBlock().getGenerationStamp());
+      assertEquals(newGS, info.getBlock().getGenerationStamp());
     }
     cluster.shutdown();
   }

+ 15 - 11
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderLocalMetrics.java

@@ -29,8 +29,9 @@ import org.apache.hadoop.test.GenericTestUtils;
 import static org.apache.hadoop.test.MetricsAsserts.getDoubleGauge;
 import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
 import org.apache.hadoop.util.FakeTimer;
-import static org.junit.Assert.assertTrue;
-import org.junit.Test;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
 import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.ArgumentMatchers.anyLong;
 import org.mockito.Mockito;
@@ -67,7 +68,8 @@ public class TestBlockReaderLocalMetrics {
     clientConf = new DfsClientConf(conf);
   }
 
-  @Test(timeout = 300_000)
+  @Test
+  @Timeout(value = 300)
   public void testSlowShortCircuitReadsStatsRecorded() throws IOException,
       InterruptedException, TimeoutException {
 
@@ -107,11 +109,12 @@ public class TestBlockReaderLocalMetrics {
         SHORT_CIRCUIT_READ_METRIC_REGISTERED_NAME);
     double averageLatency = getDoubleGauge(
         SHORT_CIRCUIT_LOCAL_READS_METRIC_VALUE_FULL_NAME, rb);
-    assertTrue("Average Latency of Short Circuit Reads lower than expected",
-        averageLatency >= SLOW_READ_DELAY);
+    assertTrue(averageLatency >= SLOW_READ_DELAY,
+        "Average Latency of Short Circuit Reads lower than expected");
   }
 
-  @Test(timeout = 300_000)
+  @Test
+  @Timeout(value = 300)
   public void testMutlipleBlockReaderIoProviderStats() throws IOException,
       InterruptedException, TimeoutException {
 
@@ -165,11 +168,12 @@ public class TestBlockReaderLocalMetrics {
     double averageLatency = getDoubleGauge(
         SHORT_CIRCUIT_LOCAL_READS_METRIC_VALUE_FULL_NAME, rb);
 
-    assertTrue("Average Latency of Short Circuit Reads lower than expected",
-        averageLatency >= SLOW_READ_DELAY*2);
+    assertTrue(averageLatency >= SLOW_READ_DELAY*2,
+        "Average Latency of Short Circuit Reads lower than expected");
   }
 
-  @Test(timeout = 300_000)
+  @Test
+  @Timeout(value = 300)
   public void testSlowShortCircuitReadsAverageLatencyValue() throws IOException,
       InterruptedException, TimeoutException {
 
@@ -220,7 +224,7 @@ public class TestBlockReaderLocalMetrics {
     double averageLatency = getDoubleGauge(
         SHORT_CIRCUIT_LOCAL_READS_METRIC_VALUE_FULL_NAME, rb);
 
-    assertTrue("Average Latency of Short Circuit Reads lower than expected",
-        averageLatency >= expectedAvgLatency);
+    assertTrue(averageLatency >= expectedAvgLatency,
+        "Average Latency of Short Circuit Reads lower than expected");
   }
 }

+ 9 - 7
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestBlockReaderRemote.java

@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.hdfs.client.impl;
 
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 import java.io.IOException;
 import java.util.Random;
@@ -28,9 +28,10 @@ import org.apache.hadoop.hdfs.BlockReader;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
 
 /**
  * This tests BlockReaderRemote.
@@ -57,7 +58,7 @@ public class TestBlockReaderRemote {
     return util.getBlockReader(block, 0, blockData.length);
   }
 
-  @Before
+  @BeforeEach
   public void setup() throws Exception {
     util = new BlockReaderTestUtil(1, new HdfsConfiguration());
     blockData = getBlockData();
@@ -70,12 +71,13 @@ public class TestBlockReaderRemote {
     reader = getBlockReader(blk);
   }
 
-  @After
+  @AfterEach
   public void shutdown() throws Exception {
     util.shutdown();
   }
 
-  @Test(timeout=60000)
+  @Test
+  @Timeout(value = 60)
   public void testSkip() throws IOException {
     Random random = new Random();
     byte [] buf = new byte[1];

+ 5 - 5
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/TestClientBlockVerification.java

@@ -29,9 +29,9 @@ import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 import org.slf4j.event.Level;
 
 public class TestClientBlockVerification {
@@ -44,7 +44,7 @@ public class TestClientBlockVerification {
   static {
     GenericTestUtils.setLogLevel(BlockReaderRemote.LOG, Level.TRACE);
   }
-  @BeforeClass
+  @BeforeAll
   public static void setupCluster() throws Exception {
     final int REPLICATION_FACTOR = 1;
     util = new BlockReaderTestUtil(REPLICATION_FACTOR);
@@ -118,7 +118,7 @@ public class TestClientBlockVerification {
   }
 
 
-  @AfterClass
+  @AfterAll
   public static void teardownCluster() throws Exception {
     util.shutdown();
   }

+ 12 - 15
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/net/TestDFSNetworkTopology.java

@@ -26,10 +26,9 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
 import org.apache.hadoop.net.Node;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.Timeout;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -39,16 +38,17 @@ import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Set;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 
 /**
  * This class tests the correctness of storage type info stored in
  * DFSNetworkTopology.
  */
+@Timeout(30)
 public class TestDFSNetworkTopology {
   private static final Logger LOG =
       LoggerFactory.getLogger(TestDFSNetworkTopology.class);
@@ -56,10 +56,7 @@ public class TestDFSNetworkTopology {
       DFSNetworkTopology.getInstance(new Configuration());
   private DatanodeDescriptor[] dataNodes;
 
-  @Rule
-  public Timeout testTimeout = new Timeout(30000);
-
-  @Before
+  @BeforeEach
   public void setupDatanodes() {
     final String[] racks = {
         "/l1/d1/r1", "/l1/d1/r1", "/l1/d1/r2", "/l1/d1/r2", "/l1/d1/r2",
@@ -635,7 +632,7 @@ public class TestDFSNetworkTopology {
     excluded.add(dns[1]);
     Node n = dfsCluster.chooseRandomWithStorageType("/default",
         "/default/rack1", excluded, StorageType.DISK);
-    assertNull("No node should have been selected.", n);
+    assertNull(n, "No node should have been selected.");
   }
 
   /**
@@ -665,7 +662,7 @@ public class TestDFSNetworkTopology {
     Node n = dfsCluster.chooseRandomWithStorageType(
         "/default/rack1/0.0.0.0:" + DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT,
         null, excluded, StorageType.DISK);
-    assertNull("No node should have been selected.", n);
+    assertNull(n, "No node should have been selected.");
   }
 
   @Test
@@ -684,6 +681,6 @@ public class TestDFSNetworkTopology {
     excluded.add(dns[1]);
     Node n = dfsCluster.chooseRandomWithStorageType("/default/rack1",
         null, excluded, StorageType.DISK);
-    assertNotNull("/default/rack1/host1 should be selected.", n);
+    assertNotNull(n, "/default/rack1/host1 should be selected.");
   }
 }

+ 9 - 9
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/net/TestDFSNetworkTopologyPerformance.java

@@ -24,10 +24,10 @@ import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.net.Node;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Ignore;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Disabled;
+import org.junit.jupiter.api.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -37,8 +37,8 @@ import java.util.HashSet;
 import java.util.Random;
 import java.util.Set;
 
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 /**
  * Performance test of the new DFSNetworkTopology chooseRandom.
@@ -49,7 +49,7 @@ import static org.junit.Assert.assertTrue;
  * tests without something reading the value. So disabled the tests to for now,
  * anyone interested in looking at the numbers can enable them.
  */
-@Ignore
+@Disabled
 public class TestDFSNetworkTopologyPerformance {
   public static final Logger LOG =
       LoggerFactory.getLogger(TestDFSNetworkTopologyPerformance.class);
@@ -83,7 +83,7 @@ public class TestDFSNetworkTopologyPerformance {
   private long localEnd;
 
 
-  @BeforeClass
+  @BeforeAll
   public static void init() throws Exception {
     racks = new String[NODE_NUM];
     hosts = new String[NODE_NUM];
@@ -95,7 +95,7 @@ public class TestDFSNetworkTopologyPerformance {
     }
   }
 
-  @Before
+  @BeforeEach
   public void setup() throws Exception {
     cluster = NetworkTopology.getInstance(new Configuration());
     dfscluster = DFSNetworkTopology.getInstance(new Configuration());

+ 6 - 6
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestAnnotations.java

@@ -22,8 +22,9 @@ import java.lang.reflect.Method;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.io.retry.AtMostOnce;
 import org.apache.hadoop.io.retry.Idempotent;
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
+
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 /**
  * Tests to make sure all the protocol class public methods have
@@ -34,10 +35,9 @@ public class TestAnnotations {
   public void checkAnnotations() {
     Method[] methods = NamenodeProtocols.class.getMethods();
     for (Method m : methods) {
-      Assert.assertTrue(
-          "Idempotent or AtMostOnce annotation is not present " + m,
-          m.isAnnotationPresent(Idempotent.class)
-              || m.isAnnotationPresent(AtMostOnce.class));
+      assertTrue(m.isAnnotationPresent(Idempotent.class)
+              || m.isAnnotationPresent(AtMostOnce.class),
+          "Idempotent or AtMostOnce annotation is not present " + m);
     }
   }
 }

+ 10 - 12
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestBlockListAsLongs.java

@@ -18,11 +18,11 @@
 
 package org.apache.hadoop.hdfs.protocol;
 
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertArrayEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.Mockito.doAnswer;
 import static org.mockito.Mockito.mock;
@@ -53,7 +53,7 @@ import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo.Capability;
 import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
 
@@ -177,12 +177,10 @@ public class TestBlockListAsLongs {
       assertNotNull(replica);
       Replica expected = reportReplicas.remove(replica.getBlockId());
       assertNotNull(expected);
-      assertEquals("wrong bytes",
-          expected.getNumBytes(), replica.getNumBytes());
-      assertEquals("wrong genstamp",
-          expected.getGenerationStamp(), replica.getGenerationStamp());
-      assertEquals("wrong replica state",
-          expected.getState(), replica.getState());
+      assertEquals(expected.getNumBytes(), replica.getNumBytes(), "wrong bytes");
+      assertEquals(expected.getGenerationStamp(), replica.getGenerationStamp(),
+          "wrong genstamp");
+      assertEquals(expected.getState(), replica.getState(), "wrong replica state");
     }
     assertTrue(reportReplicas.isEmpty());
   }

+ 36 - 32
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestLayoutVersion.java

@@ -17,9 +17,10 @@
  */
 package org.apache.hadoop.hdfs.protocol;
 
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.mockito.Mockito.*;
 
 import java.util.ArrayList;
@@ -34,7 +35,7 @@ import org.apache.hadoop.hdfs.protocol.LayoutVersion.FeatureInfo;
 import org.apache.hadoop.hdfs.protocol.LayoutVersion.LayoutFeature;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeLayoutVersion;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 /**
  * Test for {@link LayoutVersion}
@@ -132,19 +133,19 @@ public class TestLayoutVersion {
         NameNodeLayoutVersion.Feature.SNAPSHOT_MODIFICATION_TIME,
         NameNodeLayoutVersion.Feature.NVDIMM_SUPPORT);
     for (LayoutFeature f : compatibleFeatures) {
-      assertEquals(String.format("Expected minimum compatible layout version " +
-          "%d for feature %s.", baseLV, f), baseLV,
-          f.getInfo().getMinimumCompatibleLayoutVersion());
+      assertEquals(baseLV, f.getInfo().getMinimumCompatibleLayoutVersion(),
+          String.format("Expected minimum compatible layout version "
+              + "%d for feature %s.", baseLV, f));
     }
     List<LayoutFeature> features = new ArrayList<>();
     features.addAll(EnumSet.allOf(LayoutVersion.Feature.class));
     features.addAll(EnumSet.allOf(NameNodeLayoutVersion.Feature.class));
     for (LayoutFeature f : features) {
       if (!compatibleFeatures.contains(f)) {
-        assertEquals(String.format("Expected feature %s to have minimum " +
-            "compatible layout version set to itself.", f),
-            f.getInfo().getLayoutVersion(),
-            f.getInfo().getMinimumCompatibleLayoutVersion());
+        assertEquals(f.getInfo().getLayoutVersion(),
+            f.getInfo().getMinimumCompatibleLayoutVersion(),
+            String.format("Expected feature %s to have minimum "
+                + "compatible layout version set to itself.", f));
       }
     }
   }
@@ -161,10 +162,10 @@ public class TestLayoutVersion {
     LayoutFeature prevF = null;
     for (LayoutFeature f : EnumSet.allOf(NameNodeLayoutVersion.Feature.class)) {
       if (prevF != null) {
-        assertTrue(String.format("Features %s and %s not listed in order of " +
-            "minimum compatible layout version.", prevF, f),
-            f.getInfo().getMinimumCompatibleLayoutVersion() <=
-            prevF.getInfo().getMinimumCompatibleLayoutVersion());
+        assertTrue(f.getInfo().getMinimumCompatibleLayoutVersion() <=
+                prevF.getInfo().getMinimumCompatibleLayoutVersion(),
+            String.format("Features %s and %s not listed in order of " +
+                "minimum compatible layout version.", prevF, f));
       } else {
         prevF = f;
       }
@@ -175,17 +176,19 @@ public class TestLayoutVersion {
    * Tests that attempting to add a new NameNode feature out of order with
    * respect to minimum compatible layout version will fail fast.
    */
-  @Test(expected=AssertionError.class)
+  @Test
   public void testNameNodeFeatureMinimumCompatibleLayoutVersionOutOfOrder() {
-    FeatureInfo ancestorF = LayoutVersion.Feature.RESERVED_REL2_4_0.getInfo();
-    LayoutFeature f = mock(LayoutFeature.class);
-    when(f.getInfo()).thenReturn(new FeatureInfo(
-        ancestorF.getLayoutVersion() - 1, ancestorF.getLayoutVersion(),
-        ancestorF.getMinimumCompatibleLayoutVersion() + 1, "Invalid feature.",
-        false));
-    Map<Integer, SortedSet<LayoutFeature>> features = new HashMap<>();
-    LayoutVersion.updateMap(features, LayoutVersion.Feature.values());
-    LayoutVersion.updateMap(features, new LayoutFeature[] { f });
+    assertThrows(AssertionError.class, () -> {
+      FeatureInfo ancestorF = LayoutVersion.Feature.RESERVED_REL2_4_0.getInfo();
+      LayoutFeature f = mock(LayoutFeature.class);
+      when(f.getInfo()).thenReturn(new FeatureInfo(
+          ancestorF.getLayoutVersion() - 1, ancestorF.getLayoutVersion(),
+          ancestorF.getMinimumCompatibleLayoutVersion() + 1, "Invalid feature.",
+          false));
+      Map<Integer, SortedSet<LayoutFeature>> features = new HashMap<>();
+      LayoutVersion.updateMap(features, LayoutVersion.Feature.values());
+      LayoutVersion.updateMap(features, new LayoutFeature[]{f});
+    });
   }
 
   /**
@@ -201,10 +204,11 @@ public class TestLayoutVersion {
         .getLayoutVersion();
     int actualMinCompatLV = LayoutVersion.getMinimumCompatibleLayoutVersion(
         NameNodeLayoutVersion.Feature.values());
-    assertEquals("The minimum compatible layout version has changed.  " +
-        "Downgrade to prior versions is no longer possible.  Please either " +
-        "restore compatibility, or if the incompatibility is intentional, " +
-        "then update this assertion.", expectedMinCompatLV, actualMinCompatLV);
+    assertEquals(expectedMinCompatLV, actualMinCompatLV,
+        "The minimum compatible layout version has changed.  " +
+            "Downgrade to prior versions is no longer possible.  Please either " +
+            "restore compatibility, or if the incompatibility is intentional, " +
+            "then update this assertion.");
   }
 
   /**
@@ -218,9 +222,9 @@ public class TestLayoutVersion {
     SortedSet<LayoutFeature> ancestorSet = NameNodeLayoutVersion.getFeatures(ancestorLV);
     assertNotNull(ancestorSet);
     for (LayoutFeature  feature : ancestorSet) {
-      assertTrue("LV " + lv + " does nto support " + feature
-          + " supported by the ancestor LV " + info.getAncestorLayoutVersion(),
-          NameNodeLayoutVersion.supports(feature, lv));
+      assertTrue(NameNodeLayoutVersion.supports(feature, lv),
+          "LV " + lv + " does nto support " + feature
+              + " supported by the ancestor LV " + info.getAncestorLayoutVersion());
     }
   }
   

+ 5 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestLocatedBlock.java

@@ -21,15 +21,17 @@ package org.apache.hadoop.hdfs.protocol;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
 
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.fail;
 
 public class TestLocatedBlock {
   public static final Logger LOG =
       LoggerFactory.getLogger(TestLocatedBlock.class);
 
-  @Test(timeout = 10000)
+  @Test
+  @Timeout(value = 10)
   public void testAddCachedLocWhenEmpty() {
     DatanodeInfo[] ds = DatanodeInfo.EMPTY_ARRAY;
     ExtendedBlock b1 = new ExtendedBlock("bpid", 1, 1, 1);

+ 4 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/TestPacketReceiver.java

@@ -25,12 +25,14 @@ import java.nio.ByteBuffer;
 
 import org.apache.hadoop.hdfs.AppendTestUtil;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 import org.mockito.Mockito;
 
 import org.apache.hadoop.thirdparty.com.google.common.primitives.Ints;
 
-import static org.junit.Assert.*;
+import static org.junit.jupiter.api.Assertions.assertArrayEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+
 
 public class TestPacketReceiver {
 

+ 5 - 5
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferTestCase.java

@@ -30,7 +30,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIP
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY;
-import static org.junit.Assert.*;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.io.File;
 import java.util.Properties;
@@ -44,8 +44,8 @@ import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
 import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 public abstract class SaslDataTransferTestCase {
 
@@ -75,7 +75,7 @@ public abstract class SaslDataTransferTestCase {
     return hdfsKeytab;
   }
 
-  @BeforeClass
+  @BeforeAll
   public static void initKdc() throws Exception {
     baseDir = GenericTestUtils
         .getTestDir(SaslDataTransferTestCase.class.getSimpleName());
@@ -100,7 +100,7 @@ public abstract class SaslDataTransferTestCase {
     spnegoPrincipal = "HTTP/localhost@" + kdc.getRealm();
   }
 
-  @AfterClass
+  @AfterAll
   public static void shutdownKdc() throws Exception {
     if (kdc != null) {
       kdc.stop();

+ 8 - 8
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestBlackListBasedTrustedChannelResolver.java

@@ -17,9 +17,9 @@
  */
 package org.apache.hadoop.hdfs.protocol.datatransfer.sasl;
 
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
 
 import java.io.File;
 import java.io.IOException;
@@ -29,9 +29,9 @@ import org.apache.commons.io.FileUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.protocol.datatransfer.BlackListBasedTrustedChannelResolver;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 /**
  * Test class for  {@link BlackListBasedTrustedChannelResolver}.
@@ -43,7 +43,7 @@ public class TestBlackListBasedTrustedChannelResolver {
   private final static String BLACK_LISTED = "127.0.0.1\n216.58.216.174\n";
   private BlackListBasedTrustedChannelResolver resolver;
 
-  @Before
+  @BeforeEach
   public void setup() {
     blacklistFile = new File(GenericTestUtils.getTestDir(), FILE_NAME);
     resolver
@@ -55,7 +55,7 @@ public class TestBlackListBasedTrustedChannelResolver {
     }
   }
 
-  @After
+  @AfterEach
   public void cleanUp() {
     FileUtils.deleteQuietly(blacklistFile);
   }

+ 7 - 5
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestCustomizedCallbackHandler.java

@@ -22,8 +22,7 @@ import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferServer.
 import org.apache.hadoop.security.CustomizedCallbackHandler;
 import org.apache.hadoop.security.SaslRpcServer;
 import org.apache.hadoop.test.LambdaTestUtils;
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -34,6 +33,9 @@ import java.util.List;
 import java.util.concurrent.atomic.AtomicReference;
 
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_SASL_CUSTOMIZEDCALLBACKHANDLER_CLASS_KEY;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertSame;
 
 /** For testing {@link CustomizedCallbackHandler}. */
 public class TestCustomizedCallbackHandler {
@@ -54,10 +56,10 @@ public class TestCustomizedCallbackHandler {
   /** Assert if the callbacks in {@link #LAST_CALLBACKS} are the same as the expected callbacks. */
   static void assertCallbacks(Callback[] expected) {
     final List<Callback> computed = LAST_CALLBACKS.getAndSet(null);
-    Assert.assertNotNull(computed);
-    Assert.assertEquals(expected.length, computed.size());
+    assertNotNull(computed);
+    assertEquals(expected.length, computed.size());
     for (int i = 0; i < expected.length; i++) {
-      Assert.assertSame(expected[i], computed.get(i));
+      assertSame(expected[i], computed.get(i));
     }
   }
 

+ 28 - 30
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestSaslDataTransfer.java

@@ -20,9 +20,12 @@ package org.apache.hadoop.hdfs.protocol.datatransfer.sasl;
 import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_DATA_TRANSFER_PROTECTION_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HTTP_POLICY_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.IGNORE_SECURE_PORTS_FOR_TESTING_KEY;
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertArrayEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.times;
 import static org.mockito.Mockito.verify;
@@ -56,14 +59,12 @@ import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-import org.junit.rules.Timeout;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
 import org.mockito.Mockito;
 
+@Timeout(300)
 public class TestSaslDataTransfer extends SaslDataTransferTestCase {
 
   private static final int BLOCK_SIZE = 4096;
@@ -73,13 +74,7 @@ public class TestSaslDataTransfer extends SaslDataTransferTestCase {
   private MiniDFSCluster cluster;
   private FileSystem fs;
 
-  @Rule
-  public ExpectedException exception = ExpectedException.none();
-
-  @Rule
-  public Timeout timeout = new Timeout(300_000);
-
-  @After
+  @AfterEach
   public void shutdown() {
     IOUtils.cleanupWithLogger(null, fs);
     if (cluster != null) {
@@ -124,9 +119,11 @@ public class TestSaslDataTransfer extends SaslDataTransferTestCase {
     startCluster(clusterConf);
     HdfsConfiguration clientConf = new HdfsConfiguration(clusterConf);
     clientConf.set(DFS_DATA_TRANSFER_PROTECTION_KEY, "authentication");
-    exception.expect(IOException.class);
-    exception.expectMessage("could only be written to 0");
-    doTest(clientConf);
+    IOException exception = assertThrows(IOException.class, () -> {
+      doTest(clientConf);
+    });
+    assertTrue(exception.getMessage().contains("could only be written to 0"));
+
   }
 
   @Test
@@ -143,7 +140,7 @@ public class TestSaslDataTransfer extends SaslDataTransferTestCase {
         LoggerFactory.getLogger(DataNode.class));
     try {
       doTest(clientConf);
-      Assert.fail("Should fail if SASL data transfer protection is not " +
+      fail("Should fail if SASL data transfer protection is not " +
           "configured or not supported in client");
     } catch (IOException e) {
       GenericTestUtils.assertMatches(e.getMessage(), 
@@ -160,9 +157,9 @@ public class TestSaslDataTransfer extends SaslDataTransferTestCase {
   @Test
   public void testDataNodeAbortsIfNoSasl() throws Exception {
     HdfsConfiguration clusterConf = createSecureConfig("");
-    exception.expect(RuntimeException.class);
-    exception.expectMessage("Cannot start secure DataNode");
-    startCluster(clusterConf);
+    assertThrows(RuntimeException.class, () -> {
+      startCluster(clusterConf);
+    });
   }
 
   @Test
@@ -170,9 +167,9 @@ public class TestSaslDataTransfer extends SaslDataTransferTestCase {
     HdfsConfiguration clusterConf = createSecureConfig("authentication");
     clusterConf.set(DFS_HTTP_POLICY_KEY,
         HttpConfig.Policy.HTTP_AND_HTTPS.name());
-    exception.expect(RuntimeException.class);
-    exception.expectMessage("Cannot start secure DataNode");
-    startCluster(clusterConf);
+    assertThrows(RuntimeException.class, () -> {
+      startCluster(clusterConf);
+    });
   }
 
   @Test
@@ -226,7 +223,8 @@ public class TestSaslDataTransfer extends SaslDataTransferTestCase {
   /**
    * Verifies that peerFromSocketAndKey honors socket read timeouts.
    */
-  @Test(timeout=60000)
+  @Test
+  @Timeout(value = 60)
   public void TestPeerFromSocketAndKeyReadTimeout() throws Exception {
     HdfsConfiguration conf = createSecureConfig(
         "authentication,integrity,privacy");
@@ -253,7 +251,7 @@ public class TestSaslDataTransfer extends SaslDataTransferTestCase {
       Peer peer = DFSUtilClient.peerFromSocketAndKey(saslClient, socket,
           dataEncKeyFactory, new Token(), fakeDatanodeId, 1);
       peer.close();
-      Assert.fail("Expected DFSClient#peerFromSocketAndKey to time out.");
+      fail("Expected DFSClient#peerFromSocketAndKey to time out.");
     } catch (SocketTimeoutException e) {
       GenericTestUtils.assertExceptionContains("Read timed out", e);
     } finally {
@@ -304,7 +302,7 @@ public class TestSaslDataTransfer extends SaslDataTransferTestCase {
       saslClient.socketSend(socket, null, null, dataEncryptionKeyFactory,
           null, null);
 
-      Assert.fail("Expected IOException from "
+      fail("Expected IOException from "
           + "SaslDataTransferClient#checkTrustAndSend");
     } catch (IOException e) {
       GenericTestUtils.assertExceptionContains("Encryption enabled", e);
@@ -353,7 +351,7 @@ public class TestSaslDataTransfer extends SaslDataTransferTestCase {
       saslClient.socketSend(socket, null, null, dataEncryptionKeyFactory,
           null, null);
 
-      Assert.fail("Expected IOException from "
+      fail("Expected IOException from "
           + "SaslDataTransferClient#checkTrustAndSend");
     } catch (IOException e) {
       GenericTestUtils.assertExceptionContains("Encryption enabled", e);

+ 11 - 15
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestSaslDataTransferExpiredBlockToken.java

@@ -17,12 +17,11 @@
  */
 package org.apache.hadoop.hdfs.protocol.datatransfer.sasl;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.io.IOException;
 import java.util.Random;
-import java.util.concurrent.TimeUnit;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
@@ -37,12 +36,12 @@ import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.Retry;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.security.token.block.SecurityTestUtil;
 
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.Timeout;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
 
+@Timeout(60)
 public class TestSaslDataTransferExpiredBlockToken extends SaslDataTransferTestCase {
   private static final int BLOCK_SIZE = 4096;
   private static final int FILE_SIZE = 2 * BLOCK_SIZE;
@@ -51,10 +50,7 @@ public class TestSaslDataTransferExpiredBlockToken extends SaslDataTransferTestC
   private final byte[] rawData = new byte[FILE_SIZE];
   private MiniDFSCluster cluster;
 
-  @Rule
-  public Timeout timeout = new Timeout(60, TimeUnit.SECONDS);
-
-  @Before
+  @BeforeEach
   public void before() throws Exception {
     Random r = new Random();
     r.nextBytes(rawData);
@@ -73,7 +69,7 @@ public class TestSaslDataTransferExpiredBlockToken extends SaslDataTransferTestC
         1000L);
   }
 
-  @After
+  @AfterEach
   public void shutdown() {
     if (cluster != null) {
       cluster.shutdown();
@@ -99,7 +95,7 @@ public class TestSaslDataTransferExpiredBlockToken extends SaslDataTransferTestC
     } catch (IOException e) {
       return false;
     }
-    assertEquals("Cannot read file.", toRead.length, totalRead);
+    assertEquals(toRead.length, totalRead, "Cannot read file.");
     return checkFile(toRead);
   }
 
@@ -107,7 +103,7 @@ public class TestSaslDataTransferExpiredBlockToken extends SaslDataTransferTestC
   private boolean checkFile2(FSDataInputStream in) {
     byte[] toRead = new byte[FILE_SIZE];
     try {
-      assertEquals("Cannot read file", toRead.length, in.read(0, toRead, 0, toRead.length));
+      assertEquals(toRead.length, in.read(0, toRead, 0, toRead.length), "Cannot read file");
     } catch (IOException e) {
       return false;
     }

+ 50 - 53
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java

@@ -24,12 +24,15 @@ import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
 import org.apache.hadoop.hdfs.server.protocol.OutlierMetrics;
 import org.apache.hadoop.hdfs.server.protocol.SlowDiskReports;
 
-import static org.hamcrest.CoreMatchers.is;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertSame;
-import static org.junit.Assert.assertThat;
-import static org.junit.Assert.assertTrue;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.junit.jupiter.api.Assertions.assertArrayEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertNull;
+import static org.junit.jupiter.api.Assertions.assertSame;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -114,8 +117,7 @@ import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.Lists;
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 import org.apache.hadoop.thirdparty.com.google.common.base.Joiner;
 import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList;
@@ -204,9 +206,9 @@ public class TestPBHelper {
   }
 
   void compare(DatanodeStorage dns1, DatanodeStorage dns2) {
-    assertThat(dns2.getStorageID(), is(dns1.getStorageID()));
-    assertThat(dns2.getState(), is(dns1.getState()));
-    assertThat(dns2.getStorageType(), is(dns1.getStorageType()));
+    assertThat(dns2.getStorageID()).isEqualTo(dns1.getStorageID());
+    assertThat(dns2.getState()).isEqualTo(dns1.getState());
+    assertThat(dns2.getStorageType()).isEqualTo(dns1.getStorageType());
   }
 
   @Test
@@ -723,7 +725,7 @@ public class TestPBHelper {
     AclEntry[] actual = Lists.newArrayList(
         PBHelperClient.convertAclEntry(PBHelperClient.convertAclEntryProto(Lists
             .newArrayList(e1, e2, e3)))).toArray(new AclEntry[0]);
-    Assert.assertArrayEquals(expected, actual);
+    assertArrayEquals(expected, actual);
   }
 
   @Test
@@ -733,7 +735,7 @@ public class TestPBHelper {
         .setType(AclEntryType.OTHER).build();
     AclStatus s = new AclStatus.Builder().owner("foo").group("bar").addEntry(e)
         .build();
-    Assert.assertEquals(s, PBHelperClient.convert(PBHelperClient.convert(s)));
+    assertEquals(s, PBHelperClient.convert(PBHelperClient.convert(s)));
   }
   
   @Test
@@ -818,17 +820,14 @@ public class TestPBHelper {
             "peer3", outlierMetrics3));
     SlowPeerReports slowPeersConverted1 = PBHelper.convertSlowPeerInfo(
         PBHelper.convertSlowPeerInfo(slowPeers));
-    assertTrue(
-        "Expected map:" + slowPeers + ", got map:" +
-            slowPeersConverted1.getSlowPeers(),
-        slowPeersConverted1.equals(slowPeers));
+    assertTrue(slowPeersConverted1.equals(slowPeers),
+        "Expected map:" + slowPeers + ", got map:" + slowPeersConverted1.getSlowPeers());
 
     // Test with an empty map.
     SlowPeerReports slowPeersConverted2 = PBHelper.convertSlowPeerInfo(
         PBHelper.convertSlowPeerInfo(SlowPeerReports.EMPTY_REPORT));
-    assertTrue(
-        "Expected empty map:" + ", got map:" + slowPeersConverted2,
-        slowPeersConverted2.equals(SlowPeerReports.EMPTY_REPORT));
+    assertTrue(slowPeersConverted2.equals(SlowPeerReports.EMPTY_REPORT),
+        "Expected empty map:" + ", got map:" + slowPeersConverted2);
   }
 
   @Test
@@ -844,17 +843,14 @@ public class TestPBHelper {
                 SlowDiskReports.DiskOp.WRITE, 1.3)));
     SlowDiskReports slowDisksConverted1 = PBHelper.convertSlowDiskInfo(
         PBHelper.convertSlowDiskInfo(slowDisks));
-    assertTrue(
-        "Expected map:" + slowDisks + ", got map:" +
-            slowDisksConverted1.getSlowDisks(),
-        slowDisksConverted1.equals(slowDisks));
+    assertTrue(slowDisksConverted1.equals(slowDisks),
+        "Expected map:" + slowDisks + ", got map:" + slowDisksConverted1.getSlowDisks());
 
     // Test with an empty map
     SlowDiskReports slowDisksConverted2 = PBHelper.convertSlowDiskInfo(
         PBHelper.convertSlowDiskInfo(SlowDiskReports.EMPTY_REPORT));
-    assertTrue(
-        "Expected empty map:" + ", got map:" + slowDisksConverted2,
-        slowDisksConverted2.equals(SlowDiskReports.EMPTY_REPORT));
+    assertTrue(slowDisksConverted2.equals(SlowDiskReports.EMPTY_REPORT),
+        "Expected empty map:" + ", got map:" + slowDisksConverted2);
   }
 
   private void assertBlockECRecoveryInfoEquals(
@@ -927,12 +923,11 @@ public class TestPBHelper {
         DataChecksum.Type.valueOf(DFSConfigKeys.DFS_CHECKSUM_TYPE_DEFAULT).id));
     HdfsProtos.FsServerDefaultsProto proto = b.build();
 
-    assertFalse("KeyProvider uri is not supported",
-        proto.hasKeyProviderUri());
+    assertFalse(proto.hasKeyProviderUri(), "KeyProvider uri is not supported");
     FsServerDefaults fsServerDefaults = PBHelperClient.convert(proto);
-    Assert.assertNotNull("FsServerDefaults is null", fsServerDefaults);
-    Assert.assertNull("KeyProviderUri should be null",
-        fsServerDefaults.getKeyProviderUri());
+    assertNotNull(fsServerDefaults, "FsServerDefaults is null");
+    assertNull(fsServerDefaults.getKeyProviderUri(),
+        "KeyProviderUri should be null");
   }
 
   @Test
@@ -945,14 +940,14 @@ public class TestPBHelper {
       HdfsProtos.AddErasureCodingPolicyResponseProto proto = PBHelperClient
           .convertAddErasureCodingPolicyResponse(response);
       // Optional fields should not be set.
-      assertFalse("Unnecessary field is set.", proto.hasErrorMsg());
+      assertFalse(proto.hasErrorMsg(), "Unnecessary field is set.");
       // Convert proto back to an object and check for equality.
       AddErasureCodingPolicyResponse convertedResponse = PBHelperClient
           .convertAddErasureCodingPolicyResponse(proto);
-      assertEquals("Converted policy not equal", response.getPolicy(),
-          convertedResponse.getPolicy());
-      assertEquals("Converted policy not equal", response.isSucceed(),
-          convertedResponse.isSucceed());
+      assertEquals(response.getPolicy(), convertedResponse.getPolicy(),
+          "Converted policy not equal");
+      assertEquals(response.isSucceed(), convertedResponse.isSucceed(),
+          "Converted policy not equal");
     }
 
     ErasureCodingPolicy policy = SystemErasureCodingPolicies
@@ -964,10 +959,10 @@ public class TestPBHelper {
     // Convert proto back to an object and check for equality.
     AddErasureCodingPolicyResponse convertedResponse = PBHelperClient
         .convertAddErasureCodingPolicyResponse(proto);
-    assertEquals("Converted policy not equal", response.getPolicy(),
-        convertedResponse.getPolicy());
-    assertEquals("Converted policy not equal", response.getErrorMsg(),
-        convertedResponse.getErrorMsg());
+    assertEquals(response.getPolicy(), convertedResponse.getPolicy(),
+        "Converted policy not equal");
+    assertEquals(response.getErrorMsg(), convertedResponse.getErrorMsg(),
+        "Converted policy not equal");
   }
 
   @Test
@@ -978,13 +973,13 @@ public class TestPBHelper {
       HdfsProtos.ErasureCodingPolicyProto proto = PBHelperClient
           .convertErasureCodingPolicy(policy);
       // Optional fields should not be set.
-      assertFalse("Unnecessary field is set.", proto.hasName());
-      assertFalse("Unnecessary field is set.", proto.hasSchema());
-      assertFalse("Unnecessary field is set.", proto.hasCellSize());
+      assertFalse(proto.hasName(), "Unnecessary field is set.");
+      assertFalse(proto.hasSchema(), "Unnecessary field is set.");
+      assertFalse(proto.hasCellSize(), "Unnecessary field is set.");
       // Convert proto back to an object and check for equality.
       ErasureCodingPolicy convertedPolicy = PBHelperClient
           .convertErasureCodingPolicy(proto);
-      assertEquals("Converted policy not equal", policy, convertedPolicy);
+      assertEquals(policy, convertedPolicy, "Converted policy not equal");
     }
     // Check conversion of a non-built-in policy.
     ECSchema newSchema = new ECSchema("testcodec", 3, 2);
@@ -993,20 +988,22 @@ public class TestPBHelper {
     HdfsProtos.ErasureCodingPolicyProto proto = PBHelperClient
         .convertErasureCodingPolicy(newPolicy);
     // Optional fields should be set.
-    assertTrue("Optional field not set", proto.hasName());
-    assertTrue("Optional field not set", proto.hasSchema());
-    assertTrue("Optional field not set", proto.hasCellSize());
+    assertTrue(proto.hasName(), "Optional field not set");
+    assertTrue(proto.hasSchema(), "Optional field not set");
+    assertTrue(proto.hasCellSize(), "Optional field not set");
     ErasureCodingPolicy convertedPolicy = PBHelperClient
         .convertErasureCodingPolicy(proto);
     // Converted policy should be equal.
-    assertEquals("Converted policy not equal", newPolicy, convertedPolicy);
+    assertEquals(newPolicy, convertedPolicy, "Converted policy not equal");
   }
 
-  @Test(expected = UninitializedMessageException.class)
+  @Test
   public void testErasureCodingPolicyMissingId() throws Exception {
-    HdfsProtos.ErasureCodingPolicyProto.Builder builder =
-        HdfsProtos.ErasureCodingPolicyProto.newBuilder();
-    PBHelperClient.convertErasureCodingPolicy(builder.build());
+    assertThrows(UninitializedMessageException.class, () -> {
+      HdfsProtos.ErasureCodingPolicyProto.Builder builder =
+          HdfsProtos.ErasureCodingPolicyProto.newBuilder();
+      PBHelperClient.convertErasureCodingPolicy(builder.build());
+    });
   }
 
   @Test

+ 10 - 11
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/QJMTestUtil.java

@@ -17,10 +17,10 @@
  */
 package org.apache.hadoop.hdfs.qjournal;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.io.Closeable;
 import java.io.File;
@@ -128,9 +128,8 @@ public abstract class QJMTestUtil {
       
       FSEditLogOp op = stream.readOp();
       while (op == null) {
-        assertTrue("Expected to find txid " + expected + ", " +
-            "but no more streams available to read from",
-            iter.hasNext());
+        assertTrue(iter.hasNext(), "Expected to find txid "
+            + expected + ", " + "but no more streams available to read from");
         stream = iter.next();
         op = stream.readOp();
       }
@@ -140,8 +139,8 @@ public abstract class QJMTestUtil {
     }
     
     assertNull(stream.readOp());
-    assertFalse("Expected no more txns after " + lastTxnId +
-        " but more streams are available", iter.hasNext());
+    assertFalse(iter.hasNext(), "Expected no more txns after " + lastTxnId +
+        " but more streams are available");
   }
   
 
@@ -154,8 +153,8 @@ public abstract class QJMTestUtil {
         count++;
       }
     }
-    assertTrue("File " + fname + " should exist in a quorum of dirs",
-        count >= cluster.getQuorumSize());
+    assertTrue(count >= cluster.getQuorumSize(), "File "
+        + fname + " should exist in a quorum of dirs");
   }
 
   public static long recoverAndReturnLastTxn(QuorumJournalManager qjm)

+ 6 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestMiniJournalCluster.java

@@ -17,7 +17,6 @@
  */
 package org.apache.hadoop.hdfs.qjournal;
 
-import static org.junit.Assert.*;
 
 import java.io.File;
 import java.io.IOException;
@@ -31,10 +30,13 @@ import org.apache.hadoop.hdfs.qjournal.server.JournalNode;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.test.LambdaTestUtils;
 
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotEquals;
+
 public class TestMiniJournalCluster {
 
   private static final Logger LOG = LoggerFactory.getLogger(TestMiniJournalCluster.class);
@@ -103,8 +105,8 @@ public class TestMiniJournalCluster {
     LOG.info("Free socket ports: {}", httpAndRpcPorts);
 
     for (Integer httpAndRpcPort : httpAndRpcPorts) {
-      assertNotEquals("None of the acquired socket port should not be zero", 0,
-          httpAndRpcPort.intValue());
+      assertNotEquals(0, httpAndRpcPort.intValue(),
+          "None of the acquired socket port should not be zero");
     }
 
     final int[] httpPorts = new int[3];

+ 16 - 10
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestNNWithQJM.java

@@ -17,7 +17,6 @@
  */
 package org.apache.hadoop.hdfs.qjournal;
 
-import static org.junit.Assert.*;
 
 import java.io.File;
 import java.io.IOException;
@@ -33,9 +32,13 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.ExitUtil;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
+
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
 
 
 public class TestNNWithQJM {
@@ -44,18 +47,18 @@ public class TestNNWithQJM {
   private final Path TEST_PATH = new Path("/test-dir");
   private final Path TEST_PATH_2 = new Path("/test-dir-2");
 
-  @Before
+  @BeforeEach
   public void resetSystemExit() {
     ExitUtil.resetFirstExitException();
   }
   
-  @Before
+  @BeforeEach
   public void startJNs() throws Exception {
     mjc = new MiniJournalCluster.Builder(conf).build();
     mjc.waitActive();
   }
   
-  @After
+  @AfterEach
   public void stopJNs() throws Exception {
     if (mjc != null) {
       mjc.shutdown();
@@ -63,7 +66,8 @@ public class TestNNWithQJM {
     }
   }
   
-  @Test (timeout = 30000)
+  @Test
+  @Timeout(value = 30)
   public void testLogAndRestart() throws IOException {
     conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
         MiniDFSCluster.getBaseDirectory() + "/TestNNWithQJM/image");
@@ -93,7 +97,8 @@ public class TestNNWithQJM {
     }
   }
 
-  @Test (timeout = 30000)
+  @Test
+  @Timeout(value = 30)
   public void testNewNamenodeTakesOverWriter() throws Exception {
     File nn1Dir = new File(
         MiniDFSCluster.getBaseDirectory() + "/TestNNWithQJM/image-nn1");
@@ -167,7 +172,8 @@ public class TestNNWithQJM {
     }
   }
 
-  @Test (timeout = 30000)
+  @Test
+  @Timeout(value = 30)
   public void testMismatchedNNIsRejected() throws Exception {
     conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
         MiniDFSCluster.getBaseDirectory() + "/TestNNWithQJM/image");

+ 15 - 18
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestSecureNNWithQJM.java

@@ -17,7 +17,6 @@
  */
 package org.apache.hadoop.hdfs.qjournal;
 
-import static org.junit.Assert.*;
 
 import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_KEY;
 import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_DATA_TRANSFER_PROTECTION_KEY;
@@ -38,6 +37,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.io.File;
 import java.io.IOException;
@@ -58,14 +58,14 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
 import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.Timeout;
-
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
+
+@Timeout(180)
 public class TestSecureNNWithQJM {
 
   private static final Path TEST_PATH = new Path("/test-dir");
@@ -83,10 +83,7 @@ public class TestSecureNNWithQJM {
   private FileSystem fs;
   private MiniJournalCluster mjc;
 
-  @Rule
-  public Timeout timeout = new Timeout(180000);
-
-  @BeforeClass
+  @BeforeAll
   public static void init() throws Exception {
     baseDir =
         GenericTestUtils.getTestDir(TestSecureNNWithQJM.class.getSimpleName());
@@ -101,8 +98,8 @@ public class TestSecureNNWithQJM {
     SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS,
       baseConf);
     UserGroupInformation.setConfiguration(baseConf);
-    assertTrue("Expected configuration to enable security",
-      UserGroupInformation.isSecurityEnabled());
+    assertTrue(UserGroupInformation.isSecurityEnabled(),
+        "Expected configuration to enable security");
 
     String userName = UserGroupInformation.getLoginUser().getShortUserName();
     File keytabFile = new File(baseDir, userName + ".keytab");
@@ -147,7 +144,7 @@ public class TestSecureNNWithQJM {
         KeyStoreTestUtil.getServerSSLConfigFileName());
   }
 
-  @AfterClass
+  @AfterAll
   public static void destroy() throws Exception {
     if (kdc != null) {
       kdc.stop();
@@ -157,12 +154,12 @@ public class TestSecureNNWithQJM {
     UserGroupInformation.reset();
   }
 
-  @Before
+  @BeforeEach
   public void setup() throws Exception {
     conf = new HdfsConfiguration(baseConf);
   }
 
-  @After
+  @AfterEach
   public void shutdown() throws IOException {
     IOUtils.cleanupWithLogger(null, fs);
     if (cluster != null) {

+ 6 - 5
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestEpochsAreUnique.java

@@ -17,8 +17,6 @@
  */
 package org.apache.hadoop.hdfs.qjournal.client;
 
-import static org.junit.Assert.*;
-
 import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.net.URI;
@@ -31,7 +29,7 @@ import org.apache.hadoop.hdfs.qjournal.MiniJournalCluster;
 import org.apache.hadoop.hdfs.qjournal.client.AsyncLogger;
 import org.apache.hadoop.hdfs.qjournal.client.QuorumJournalManager;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
@@ -39,6 +37,9 @@ import org.mockito.stubbing.Answer;
 import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.Futures;
 import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListenableFuture;
 
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+
 
 public class TestEpochsAreUnique {
   private static final Logger LOG =
@@ -96,8 +97,8 @@ public class TestEpochsAreUnique {
           }
         }
         LOG.info("Created epoch " + newEpoch);
-        assertTrue("New epoch " + newEpoch + " should be greater than previous " +
-            prevEpoch, newEpoch > prevEpoch);
+        assertTrue(newEpoch > prevEpoch, "New epoch " + newEpoch
+            + " should be greater than previous " + prevEpoch);
         prevEpoch = newEpoch;
       }
     } finally {

+ 10 - 5
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestIPCLoggerChannel.java

@@ -17,8 +17,6 @@
  */
 package org.apache.hadoop.hdfs.qjournal.client;
 
-import static org.junit.Assert.*;
-
 import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.util.concurrent.ExecutionException;
@@ -37,12 +35,19 @@ import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils.DelayAnswer;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.mockito.Mockito;
 
 import java.util.function.Supplier;
 
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
+
 public class TestIPCLoggerChannel {
   private static final Logger LOG = LoggerFactory.getLogger(
       TestIPCLoggerChannel.class);
@@ -62,7 +67,7 @@ public class TestIPCLoggerChannel {
   private static final int LIMIT_QUEUE_SIZE_BYTES =
       LIMIT_QUEUE_SIZE_MB * 1024 * 1024;
   
-  @Before
+  @BeforeEach
   public void setupMock() {
     conf.setInt(DFSConfigKeys.DFS_QJOURNAL_QUEUE_SIZE_LIMIT_KEY,
         LIMIT_QUEUE_SIZE_MB);

+ 7 - 10
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQJMWithFaults.java

@@ -22,6 +22,7 @@ import static org.apache.hadoop.hdfs.qjournal.QJMTestUtil.JID;
 import static org.apache.hadoop.hdfs.qjournal.QJMTestUtil.writeSegment;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertThrows;
 
 import java.io.Closeable;
 import java.io.IOException;
@@ -52,9 +53,7 @@ import org.apache.hadoop.hdfs.util.Holder;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ipc.ProtobufRpcEngine2;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
+import org.junit.jupiter.api.Test;
 import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
@@ -82,7 +81,7 @@ public class TestQJMWithFaults {
   static {
     // Don't retry connections - it just slows down the tests.
     conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 0);
-    
+
     // Make tests run faster by avoiding fsync()
     EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
   }
@@ -128,9 +127,6 @@ public class TestQJMWithFaults {
     return ret;
   }
 
-  @Rule
-  public ExpectedException expectedException = ExpectedException.none();
-
   /**
    * Sets up two of the nodes to each drop a single RPC, at all
    * possible combinations of RPCs. This may result in the
@@ -196,9 +192,10 @@ public class TestQJMWithFaults {
    */
   @Test
   public void testUnresolvableHostName() throws Exception {
-    expectedException.expect(UnknownHostException.class);
-    new QuorumJournalManager(conf,
-        new URI("qjournal://" + "bogus.invalid:12345" + "/" + JID), FAKE_NSINFO);
+    assertThrows(UnknownHostException.class, () -> {
+      new QuorumJournalManager(conf,
+          new URI("qjournal://" + "bogus.invalid:12345" + "/" + JID), FAKE_NSINFO);
+    });
   }
 
   /**

+ 13 - 8
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumCall.java

@@ -17,21 +17,24 @@
  */
 package org.apache.hadoop.hdfs.qjournal.client;
 
-import static org.junit.Assert.*;
-
 import java.util.Map;
 import java.util.TreeMap;
 import java.util.concurrent.TimeoutException;
 
 import org.apache.hadoop.util.FakeTimer;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
 
 import org.apache.hadoop.thirdparty.com.google.common.base.Joiner;
 import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap;
 import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.SettableFuture;
 
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.fail;
+
 public class TestQuorumCall {
-  @Test(timeout=10000)
+  @Test
+  @Timeout(value = 10)
   public void testQuorums() throws Exception {
     Map<String, SettableFuture<String>> futures = ImmutableMap.of(
         "f1", SettableFuture.<String>create(),
@@ -66,14 +69,15 @@ public class TestQuorumCall {
       // expected
     }
   }
-  @Test(timeout=10000)
+  @Test
+  @Timeout(value = 10)
   public void testQuorumFailsWithoutResponse() throws Exception {
     Map<String, SettableFuture<String>> futures = ImmutableMap.of(
         "f1", SettableFuture.<String>create());
 
     QuorumCall<String, String> q = QuorumCall.create(futures);
-    assertEquals("The number of quorum calls for which a response has been"
-            + " received should be 0", 0, q.countResponses());
+    assertEquals(0, q.countResponses(), "The number of quorum calls for which a response has been"
+            + " received should be 0");
 
     try {
       q.waitFor(0, 1, 100, 10, "test");
@@ -83,7 +87,8 @@ public class TestQuorumCall {
     }
   }
 
-  @Test(timeout=10000)
+  @Test
+  @Timeout(value = 10)
   public void testQuorumSucceedsWithLongPause() throws Exception {
     final Map<String, SettableFuture<String>> futures = ImmutableMap.of(
         "f1", SettableFuture.<String>create());

+ 13 - 10
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManagerUnit.java

@@ -17,8 +17,10 @@
  */
 package org.apache.hadoop.hdfs.qjournal.client;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.junit.jupiter.api.Assertions.fail;
 import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.ArgumentMatchers.anyInt;
 import static org.mockito.ArgumentMatchers.anyLong;
@@ -33,7 +35,6 @@ import java.util.List;
 
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.util.Lists;
-import org.junit.Assert;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.permission.FsPermission;
@@ -53,8 +54,8 @@ import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.slf4j.event.Level;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.mockito.Mockito;
 import org.mockito.stubbing.Stubber;
 
@@ -84,7 +85,7 @@ public class TestQuorumJournalManagerUnit {
   private List<AsyncLogger> spyLoggers;
   private QuorumJournalManager qjm;
   
-  @Before
+  @BeforeEach
   public void setup() throws Exception {
     spyLoggers = ImmutableList.of(
         mockLogger(),
@@ -184,7 +185,7 @@ public class TestQuorumJournalManagerUnit {
     QuorumOutputStream os = (QuorumOutputStream) qjm.startLogSegment(1,
         NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
     String report = os.generateReport();
-    Assert.assertFalse("Report should be plain text", report.contains("<"));
+    assertFalse(report.contains("<"), "Report should be plain text");
   }
 
   @Test
@@ -216,10 +217,12 @@ public class TestQuorumJournalManagerUnit {
     stm.flush();
   }
 
-  @Test(expected = IllegalArgumentException.class)
+  @Test
   public void testSetOutputBufferCapacityTooLarge() throws Exception {
-    qjm.setOutputBufferCapacity(
-        CommonConfigurationKeys.IPC_MAXIMUM_DATA_LENGTH_DEFAULT + 1);
+    assertThrows(IllegalArgumentException.class, () -> {
+      qjm.setOutputBufferCapacity(
+          CommonConfigurationKeys.IPC_MAXIMUM_DATA_LENGTH_DEFAULT + 1);
+    });
   }
 
   // Regression test for HDFS-13977

+ 2 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestSegmentRecoveryComparator.java

@@ -17,19 +17,18 @@
  */
 package org.apache.hadoop.hdfs.qjournal.client;
 
-import static org.junit.Assert.*;
-
 import java.util.Map;
 import java.util.Map.Entry;
 
 import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto;
 import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 import org.mockito.Mockito;
 
 import org.apache.hadoop.thirdparty.com.google.common.collect.Maps;
 
 import static org.apache.hadoop.hdfs.qjournal.client.SegmentRecoveryComparator.INSTANCE;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 public class TestSegmentRecoveryComparator {
   

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestGetJournalEditServlet.java

@@ -21,8 +21,8 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.web.resources.UserParam;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 
 import javax.servlet.ServletConfig;
 import javax.servlet.ServletException;
@@ -39,7 +39,7 @@ public class TestGetJournalEditServlet {
 
   private final static GetJournalEditServlet SERVLET = new GetJournalEditServlet();
 
-  @BeforeClass
+  @BeforeAll
   public static void setUp() throws ServletException {
     // Configure Hadoop
     CONF.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "hdfs://localhost:4321/");

+ 41 - 32
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournal.java

@@ -17,11 +17,12 @@
  */
 package org.apache.hadoop.hdfs.qjournal.server;
 
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.assertArrayEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
+import static org.junit.jupiter.api.Assumptions.assumeTrue;
 
 import org.apache.hadoop.thirdparty.com.google.common.primitives.Bytes;
 import java.io.ByteArrayOutputStream;
@@ -50,11 +51,10 @@ import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Assume;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
 import org.mockito.Mockito;
 
 public class TestJournal {
@@ -75,7 +75,7 @@ public class TestJournal {
   private Journal journal;
 
   
-  @Before
+  @BeforeEach
   public void setup() throws Exception {
     FileUtil.fullyDelete(TEST_LOG_DIR);
     conf = new Configuration();
@@ -86,13 +86,13 @@ public class TestJournal {
     journal.format(FAKE_NSINFO, false);
   }
   
-  @After
+  @AfterEach
   public void verifyNoStorageErrors() throws Exception{
     Mockito.verify(mockErrorReporter, Mockito.never())
       .reportErrorOnFile(Mockito.<File>any());
   }
   
-  @After
+  @AfterEach
   public void cleanup() {
     IOUtils.closeStream(journal);
   }
@@ -115,15 +115,15 @@ public class TestJournal {
     // verify the in-progress editlog segment
     SegmentStateProto segmentState = journal.getSegmentInfo(1);
     assertTrue(segmentState.getIsInProgress());
-    Assert.assertEquals(numTxns, segmentState.getEndTxId());
-    Assert.assertEquals(1, segmentState.getStartTxId());
+    assertEquals(numTxns, segmentState.getEndTxId());
+    assertEquals(1, segmentState.getStartTxId());
     
     // finalize the segment and verify it again
     journal.finalizeLogSegment(makeRI(3), 1, numTxns);
     segmentState = journal.getSegmentInfo(1);
     assertFalse(segmentState.getIsInProgress());
-    Assert.assertEquals(numTxns, segmentState.getEndTxId());
-    Assert.assertEquals(1, segmentState.getStartTxId());
+    assertEquals(numTxns, segmentState.getEndTxId());
+    assertEquals(1, segmentState.getStartTxId());
   }
 
   /**
@@ -168,7 +168,8 @@ public class TestJournal {
     assertTrue(movedTo.exists());
   }
 
-  @Test (timeout = 10000)
+  @Test
+  @Timeout(value = 10)
   public void testEpochHandling() throws Exception {
     assertEquals(0, journal.getLastPromisedEpoch());
     NewEpochResponseProto newEpoch =
@@ -202,7 +203,8 @@ public class TestJournal {
     }
   }
   
-  @Test (timeout = 10000)
+  @Test
+  @Timeout(value = 10)
   public void testMaintainCommittedTxId() throws Exception {
     journal.newEpoch(FAKE_NSINFO, 1);
     journal.startLogSegment(makeRI(1), 1,
@@ -218,7 +220,8 @@ public class TestJournal {
     assertEquals(3, journal.getCommittedTxnId());
   }
   
-  @Test (timeout = 10000)
+  @Test
+  @Timeout(value = 10)
   public void testRestartJournal() throws Exception {
     journal.newEpoch(FAKE_NSINFO, 1);
     journal.startLogSegment(makeRI(1), 1,
@@ -244,7 +247,8 @@ public class TestJournal {
     assertEquals(1, newEpoch.getLastSegmentTxId());
   }
   
-  @Test (timeout = 10000)
+  @Test
+  @Timeout(value = 10)
   public void testFormatResetsCachedValues() throws Exception {
     journal.newEpoch(FAKE_NSINFO, 12345L);
     journal.startLogSegment(new RequestInfo(JID, null, 12345L, 1L, 0L), 1L,
@@ -271,7 +275,8 @@ public class TestJournal {
    * before any transactions are written, that the next newEpoch() call
    * returns the prior segment txid as its most recent segment.
    */
-  @Test (timeout = 10000)
+  @Test
+  @Timeout(value = 10)
   public void testNewEpochAtBeginningOfSegment() throws Exception {
     journal.newEpoch(FAKE_NSINFO, 1);
     journal.startLogSegment(makeRI(1), 1,
@@ -285,9 +290,10 @@ public class TestJournal {
     assertEquals(1, resp.getLastSegmentTxId());
   }
   
-  @Test (timeout = 10000)
+  @Test
+  @Timeout(value = 10)
   public void testJournalLocking() throws Exception {
-    Assume.assumeTrue(journal.getStorage().getStorageDir(0).isLockSupported());
+    assumeTrue(journal.getStorage().getStorageDir(0).isLockSupported());
     StorageDirectory sd = journal.getStorage().getStorageDir(0);
     File lockFile = new File(sd.getRoot(), Storage.STORAGE_FILE_LOCK);
     
@@ -318,7 +324,8 @@ public class TestJournal {
    * Test finalizing a segment after some batch of edits were missed.
    * This should fail, since we validate the log before finalization.
    */
-  @Test (timeout = 10000)
+  @Test
+  @Timeout(value = 10)
   public void testFinalizeWhenEditsAreMissed() throws Exception {
     journal.newEpoch(FAKE_NSINFO, 1);
     journal.startLogSegment(makeRI(1), 1,
@@ -354,7 +361,8 @@ public class TestJournal {
    * Ensure that finalizing a segment which doesn't exist throws the
    * appropriate exception.
    */
-  @Test (timeout = 10000)
+  @Test
+  @Timeout(value = 10)
   public void testFinalizeMissingSegment() throws Exception {
     journal.newEpoch(FAKE_NSINFO, 1);
     try {
@@ -375,7 +383,8 @@ public class TestJournal {
    * Eventually, the connection comes back, and the NN tries to start a new
    * segment at a higher txid. This should abort the old one and succeed.
    */
-  @Test (timeout = 10000)
+  @Test
+  @Timeout(value = 10)
   public void testAbortOldSegmentIfFinalizeIsMissed() throws Exception {
     journal.newEpoch(FAKE_NSINFO, 1);
     
@@ -406,7 +415,8 @@ public class TestJournal {
    * Test behavior of startLogSegment() when a segment with the
    * same transaction ID already exists.
    */
-  @Test (timeout = 10000)
+  @Test
+  @Timeout(value = 10)
   public void testStartLogSegmentWhenAlreadyExists() throws Exception {
     journal.newEpoch(FAKE_NSINFO, 1);
     
@@ -459,7 +469,8 @@ public class TestJournal {
     return new RequestInfo(JID, null, 1, serial, 0);
   }
   
-  @Test (timeout = 10000)
+  @Test
+  @Timeout(value = 10)
   public void testNamespaceVerification() throws Exception {
     journal.newEpoch(FAKE_NSINFO, 1);
 
@@ -517,9 +528,7 @@ public class TestJournal {
     EditLogFileOutputStream.writeHeader(layoutVersion,
         new DataOutputStream(headerBytes));
     assertEquals(expectedTxnCount, result.getTxnCount());
-    assertArrayEquals(
-        Bytes.concat(
-            headerBytes.toByteArray(),
+    assertArrayEquals(Bytes.concat(headerBytes.toByteArray(),
             QJMTestUtil.createTxnData(startTxn, expectedTxnCount)),
         result.getEditLog().toByteArray());
   }

+ 8 - 7
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeHttpServerXFrame.java

@@ -21,15 +21,16 @@ import java.io.IOException;
 import java.net.HttpURLConnection;
 import java.net.URL;
 
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Test;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.qjournal.MiniJournalCluster;
 import org.apache.hadoop.http.HttpServer2;
 
+import static org.junit.jupiter.api.Assertions.assertTrue;
+
 /**
  * Test that X-Frame-Options works correctly with JournalNodeHttpServer.
  */
@@ -45,8 +46,8 @@ public class TestJournalNodeHttpServerXFrame {
     cluster = createCluster(xFrameEnabled);
     HttpURLConnection conn = getConn(cluster);
     String xfoHeader = conn.getHeaderField("X-FRAME-OPTIONS");
-    Assert.assertTrue("X-FRAME-OPTIONS is absent in the header", xfoHeader != null);
-    Assert.assertTrue(xfoHeader.endsWith(HttpServer2.XFrameOption.SAMEORIGIN.toString()));
+    assertTrue(xfoHeader != null, "X-FRAME-OPTIONS is absent in the header");
+    assertTrue(xfoHeader.endsWith(HttpServer2.XFrameOption.SAMEORIGIN.toString()));
   }
 
   @Test
@@ -56,10 +57,10 @@ public class TestJournalNodeHttpServerXFrame {
     HttpURLConnection conn = getConn(cluster);
     String xfoHeader = conn.getHeaderField("X-FRAME-OPTIONS");
     System.out.println(xfoHeader);
-    Assert.assertTrue("unexpected X-FRAME-OPTION in header", xfoHeader == null);
+    assertTrue(xfoHeader == null, "unexpected X-FRAME-OPTION in header");
   }
 
-  @After
+  @AfterEach
   public void cleanup() throws IOException {
     if (cluster != null) {
       cluster.shutdown();

+ 9 - 9
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeMXBean.java

@@ -17,9 +17,9 @@
  */
 package org.apache.hadoop.hdfs.qjournal.server;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.io.IOException;
 import java.lang.management.ManagementFactory;
@@ -32,9 +32,9 @@ import javax.management.ObjectName;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.qjournal.MiniJournalCluster;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.eclipse.jetty.util.ajax.JSON;
 
 /**
@@ -49,7 +49,7 @@ public class TestJournalNodeMXBean {
   private MiniJournalCluster jCluster;
   private JournalNode jn;
   
-  @Before
+  @BeforeEach
   public void setup() throws IOException {
     // start 1 journal node
     jCluster = new MiniJournalCluster.Builder(new Configuration()).format(true)
@@ -58,7 +58,7 @@ public class TestJournalNodeMXBean {
     jn = jCluster.getJournalNode(0);
   }
   
-  @After
+  @AfterEach
   public void cleanup() throws IOException {
     if (jCluster != null) {
       jCluster.shutdown();
@@ -105,7 +105,7 @@ public class TestJournalNodeMXBean {
     assertEquals(jn.getClusterIds().size(), clusterId.length);
     assertEquals("mycluster", clusterId[0]);
     long startTime = (long) mbs.getAttribute(mxbeanName, "JNStartedTimeInMillis");
-    assertTrue("JournalNode start time should not be 0", startTime > 0);
+    assertTrue(startTime > 0, "JournalNode start time should not be 0");
     assertEquals(jn.getJNStartedTimeInMillis(), startTime);
     String version = (String) mbs.getAttribute(mxbeanName, "Version");
     assertEquals(jn.getVersion(), version);

+ 27 - 25
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeRespectsBindHostKeys.java

@@ -23,15 +23,13 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_JOURNALNODE_HTTPS_ADDRESS
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_JOURNALNODE_HTTP_BIND_HOST_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_JOURNALNODE_HTTPS_BIND_HOST_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_JOURNALNODE_RPC_BIND_HOST_KEY;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertThat;
-import static org.hamcrest.core.Is.is;
-import static org.hamcrest.core.IsNot.not;
+import static org.junit.jupiter.api.Assertions.assertFalse;
 
 import org.apache.hadoop.hdfs.qjournal.MiniJournalCluster;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
 
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -44,7 +42,8 @@ import java.io.IOException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.assertj.core.api.Assertions.assertThat;
 
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 
@@ -67,12 +66,12 @@ public class TestJournalNodeRespectsBindHostKeys {
   private MiniJournalCluster jCluster;
   private JournalNode jn;
 
-  @Before
+  @BeforeEach
   public void setUp() {
     conf = new HdfsConfiguration();
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws IOException {
     if (jCluster != null) {
       jCluster.shutdown();
@@ -86,7 +85,8 @@ public class TestJournalNodeRespectsBindHostKeys {
         toString();
   }
 
-  @Test (timeout=300000)
+  @Test
+  @Timeout(value = 300)
   public void testRpcBindHostKey() throws IOException {
     LOG.info("Testing without " + DFS_JOURNALNODE_RPC_BIND_HOST_KEY);
 
@@ -95,8 +95,8 @@ public class TestJournalNodeRespectsBindHostKeys {
         .numJournalNodes(NUM_JN).build();
     jn = jCluster.getJournalNode(0);
     String address = getRpcServerAddress(jn);
-    assertThat("Bind address not expected to be wildcard by default.",
-        address, not("/" + WILDCARD_ADDRESS));
+    assertThat(address).as("Bind address not expected to be wildcard by default.")
+        .isNotEqualTo("/" + WILDCARD_ADDRESS);
 
     LOG.info("Testing with " + DFS_JOURNALNODE_RPC_BIND_HOST_KEY);
 
@@ -108,11 +108,12 @@ public class TestJournalNodeRespectsBindHostKeys {
         .numJournalNodes(NUM_JN).build();
     jn = jCluster.getJournalNode(0);
     address = getRpcServerAddress(jn);
-    assertThat("Bind address " + address + " is not wildcard.",
-        address, is("/" + WILDCARD_ADDRESS));
+    assertThat(address).as("Bind address " + address + " is not wildcard.")
+        .isEqualTo("/" + WILDCARD_ADDRESS);
   }
 
-  @Test(timeout=300000)
+  @Test
+  @Timeout(value = 300)
   public void testHttpBindHostKey() throws IOException {
     LOG.info("Testing without " + DFS_JOURNALNODE_HTTP_BIND_HOST_KEY);
 
@@ -122,8 +123,8 @@ public class TestJournalNodeRespectsBindHostKeys {
         .numJournalNodes(NUM_JN).build();
     jn = jCluster.getJournalNode(0);
     String address = jn.getHttpAddress().toString();
-    assertFalse("HTTP Bind address not expected to be wildcard by default.",
-        address.startsWith(WILDCARD_ADDRESS));
+    assertFalse(address.startsWith(WILDCARD_ADDRESS),
+        "HTTP Bind address not expected to be wildcard by default.");
 
     LOG.info("Testing with " + DFS_JOURNALNODE_HTTP_BIND_HOST_KEY);
 
@@ -136,8 +137,8 @@ public class TestJournalNodeRespectsBindHostKeys {
         .numJournalNodes(NUM_JN).build();
     jn = jCluster.getJournalNode(0);
     address = jn.getHttpAddress().toString();
-    assertTrue("HTTP Bind address " + address + " is not wildcard.",
-        address.startsWith(WILDCARD_ADDRESS));
+    assertTrue(address.startsWith(WILDCARD_ADDRESS),
+        "HTTP Bind address " + address + " is not wildcard.");
   }
 
   private static final String BASEDIR = System.getProperty("test.build.dir",
@@ -166,7 +167,8 @@ public class TestJournalNodeRespectsBindHostKeys {
    * pick a different host/port combination.
    * @throws Exception
    */
-  @Test (timeout=300000)
+  @Test
+  @Timeout(value = 300)
   public void testHttpsBindHostKey() throws Exception {
     LOG.info("Testing behavior without " + DFS_JOURNALNODE_HTTPS_BIND_HOST_KEY);
 
@@ -180,8 +182,8 @@ public class TestJournalNodeRespectsBindHostKeys {
         .numJournalNodes(NUM_JN).build();
     jn = jCluster.getJournalNode(0);
     String address = jn.getHttpsAddress().toString();
-    assertFalse("HTTP Bind address not expected to be wildcard by default.",
-        address.startsWith(WILDCARD_ADDRESS));
+    assertFalse(address.startsWith(WILDCARD_ADDRESS),
+        "HTTP Bind address not expected to be wildcard by default.");
 
     LOG.info("Testing behavior with " + DFS_JOURNALNODE_HTTPS_BIND_HOST_KEY);
 
@@ -194,7 +196,7 @@ public class TestJournalNodeRespectsBindHostKeys {
         .numJournalNodes(NUM_JN).build();
     jn = jCluster.getJournalNode(0);
     address = jn.getHttpsAddress().toString();
-    assertTrue("HTTP Bind address " + address + " is not wildcard.",
-        address.startsWith(WILDCARD_ADDRESS));
+    assertTrue(address.startsWith(WILDCARD_ADDRESS),
+        "HTTP Bind address " + address + " is not wildcard.");
   }
 }

+ 19 - 14
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournaledEditsCache.java

@@ -32,16 +32,17 @@ import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion;
 import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
 import org.apache.hadoop.test.PathUtils;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 import static org.apache.hadoop.hdfs.qjournal.QJMTestUtil.createGabageTxns;
 import static org.apache.hadoop.hdfs.qjournal.QJMTestUtil.createTxnData;
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.assertArrayEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
 
 
 /**
@@ -56,7 +57,7 @@ public class TestJournaledEditsCache {
       PathUtils.getTestDir(TestJournaledEditsCache.class, false);
   private JournaledEditsCache cache;
 
-  @Before
+  @BeforeEach
   public void setup() throws Exception {
     Configuration conf = new Configuration();
     conf.setInt(DFSConfigKeys.DFS_JOURNALNODE_EDIT_CACHE_SIZE_KEY,
@@ -65,7 +66,7 @@ public class TestJournaledEditsCache {
     TEST_DIR.mkdirs();
   }
 
-  @After
+  @AfterEach
   public void cleanup() throws Exception {
     FileUtils.deleteQuietly(TEST_DIR);
   }
@@ -210,15 +211,19 @@ public class TestJournaledEditsCache {
     assertTxnCountAndContents(10, 10, 15);
   }
 
-  @Test(expected = JournaledEditsCache.CacheMissException.class)
+  @Test
   public void testReadUninitializedCache() throws Exception {
-    cache.retrieveEdits(1, 10, new ArrayList<>());
+    assertThrows(JournaledEditsCache.CacheMissException.class, () -> {
+      cache.retrieveEdits(1, 10, new ArrayList<>());
+    });
   }
 
-  @Test(expected = JournaledEditsCache.CacheMissException.class)
+  @Test
   public void testCacheMalformedInput() throws Exception {
-    storeEdits(1, 1);
-    cache.retrieveEdits(-1, 10, new ArrayList<>());
+    assertThrows(JournaledEditsCache.CacheMissException.class, () -> {
+      storeEdits(1, 1);
+      cache.retrieveEdits(-1, 10, new ArrayList<>());
+    });
   }
 
   @Test

+ 89 - 88
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java

@@ -19,12 +19,15 @@
 package org.apache.hadoop.hdfs.security;
 
 
-
 import static org.apache.hadoop.security.authentication.util.KerberosName.setRules;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertNotEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertNotSame;
+import static org.junit.jupiter.api.Assertions.assertSame;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
 import java.io.ByteArrayInputStream;
 import java.io.DataInputStream;
 import java.io.IOException;
@@ -62,10 +65,9 @@ import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.slf4j.event.Level;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 public class TestDelegationToken {
   private MiniDFSCluster cluster;
@@ -73,8 +75,8 @@ public class TestDelegationToken {
   private Configuration config;
   private static final Logger LOG =
       LoggerFactory.getLogger(TestDelegationToken.class);
-  
-  @Before
+
+  @BeforeEach
   public void setUp() throws Exception {
     config = new HdfsConfiguration();
     config.setLong(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_KEY, 10000);
@@ -89,9 +91,9 @@ public class TestDelegationToken {
         cluster.getNamesystem());
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
-    if(cluster!=null) {
+    if (cluster != null) {
       cluster.shutdown();
       cluster = null;
     }
@@ -103,60 +105,60 @@ public class TestDelegationToken {
         owner), new Text(renewer), null);
     return new Token<DelegationTokenIdentifier>(dtId, dtSecretManager);
   }
-  
+
   @Test
   public void testDelegationTokenSecretManager() throws Exception {
     Token<DelegationTokenIdentifier> token = generateDelegationToken(
         "SomeUser", "JobTracker");
     // Fake renewer should not be able to renew
     try {
-  	  dtSecretManager.renewToken(token, "FakeRenewer");
-  	  Assert.fail("should have failed");
+      dtSecretManager.renewToken(token, "FakeRenewer");
+      fail("should have failed");
     } catch (AccessControlException ace) {
       // PASS
     }
-	  dtSecretManager.renewToken(token, "JobTracker");
+    dtSecretManager.renewToken(token, "JobTracker");
     DelegationTokenIdentifier identifier = new DelegationTokenIdentifier();
     byte[] tokenId = token.getIdentifier();
     identifier.readFields(new DataInputStream(
-             new ByteArrayInputStream(tokenId)));
-    Assert.assertTrue(null != dtSecretManager.retrievePassword(identifier));
+        new ByteArrayInputStream(tokenId)));
+    assertTrue(null != dtSecretManager.retrievePassword(identifier));
     LOG.info("Sleep to expire the token");
-	  Thread.sleep(6000);
-	  //Token should be expired
-	  try {
-	    dtSecretManager.retrievePassword(identifier);
-	    //Should not come here
-	    Assert.fail("Token should have expired");
-	  } catch (InvalidToken e) {
-	    //Success
-	  }
-	  dtSecretManager.renewToken(token, "JobTracker");
-	  LOG.info("Sleep beyond the max lifetime");
-	  Thread.sleep(5000);
-	  try {
-  	  dtSecretManager.renewToken(token, "JobTracker");
-  	  Assert.fail("should have been expired");
-	  } catch (InvalidToken it) {
-	    // PASS
-	  }
+    Thread.sleep(6000);
+    //Token should be expired
+    try {
+      dtSecretManager.retrievePassword(identifier);
+      //Should not come here
+      fail("Token should have expired");
+    } catch (InvalidToken e) {
+      //Success
+    }
+    dtSecretManager.renewToken(token, "JobTracker");
+    LOG.info("Sleep beyond the max lifetime");
+    Thread.sleep(5000);
+    try {
+      dtSecretManager.renewToken(token, "JobTracker");
+      fail("should have been expired");
+    } catch (InvalidToken it) {
+      // PASS
+    }
   }
-  
-  @Test 
+
+  @Test
   public void testCancelDelegationToken() throws Exception {
     Token<DelegationTokenIdentifier> token = generateDelegationToken(
         "SomeUser", "JobTracker");
     //Fake renewer should not be able to renew
     try {
       dtSecretManager.cancelToken(token, "FakeCanceller");
-      Assert.fail("should have failed");
+      fail("should have failed");
     } catch (AccessControlException ace) {
       // PASS
     }
     dtSecretManager.cancelToken(token, "JobTracker");
     try {
       dtSecretManager.renewToken(token, "JobTracker");
-      Assert.fail("should have failed");
+      fail("should have failed");
     } catch (InvalidToken it) {
       // PASS
     }
@@ -188,15 +190,15 @@ public class TestDelegationToken {
     DistributedFileSystem dfs = cluster.getFileSystem();
     Credentials creds = new Credentials();
     final Token<?> tokens[] = dfs.addDelegationTokens("JobTracker", creds);
-    Assert.assertEquals(1, tokens.length);
-    Assert.assertEquals(1, creds.numberOfTokens());
+    assertEquals(1, tokens.length);
+    assertEquals(1, creds.numberOfTokens());
     checkTokenIdentifier(ugi, tokens[0]);
 
     final Token<?> tokens2[] = dfs.addDelegationTokens("JobTracker", creds);
-    Assert.assertEquals(0, tokens2.length); // already have token
-    Assert.assertEquals(1, creds.numberOfTokens());
+    assertEquals(0, tokens2.length); // already have token
+    assertEquals(1, creds.numberOfTokens());
   }
-  
+
   @Test
   public void testDelegationTokenWebHdfsApi() throws Exception {
     GenericTestUtils.setLogLevel(NamenodeWebHdfsMethods.LOG, Level.TRACE);
@@ -207,21 +209,21 @@ public class TestDelegationToken {
         "JobTracker", new String[]{"user"});
     final WebHdfsFileSystem webhdfs = ugi.doAs(
         new PrivilegedExceptionAction<WebHdfsFileSystem>() {
-      @Override
-      public WebHdfsFileSystem run() throws Exception {
-        return (WebHdfsFileSystem)FileSystem.get(new URI(uri), config);
-      }
-    });
+          @Override
+          public WebHdfsFileSystem run() throws Exception {
+            return (WebHdfsFileSystem) FileSystem.get(new URI(uri), config);
+          }
+        });
 
     { //test addDelegationTokens(..)
       Credentials creds = new Credentials();
       final Token<?> tokens[] = webhdfs.addDelegationTokens("JobTracker", creds);
-      Assert.assertEquals(1, tokens.length);
-      Assert.assertEquals(1, creds.numberOfTokens());
-      Assert.assertSame(tokens[0], creds.getAllTokens().iterator().next());
+      assertEquals(1, tokens.length);
+      assertEquals(1, creds.numberOfTokens());
+      assertSame(tokens[0], creds.getAllTokens().iterator().next());
       checkTokenIdentifier(ugi, tokens[0]);
       final Token<?> tokens2[] = webhdfs.addDelegationTokens("JobTracker", creds);
-      Assert.assertEquals(0, tokens2.length);
+      assertEquals(0, tokens2.length);
     }
   }
 
@@ -230,9 +232,8 @@ public class TestDelegationToken {
     final DistributedFileSystem dfs = cluster.getFileSystem();
     final Credentials creds = new Credentials();
     final Token<?> tokens[] = dfs.addDelegationTokens("JobTracker", creds);
-    Assert.assertEquals(1, tokens.length);
-    @SuppressWarnings("unchecked")
-    final Token<DelegationTokenIdentifier> token =
+    assertEquals(1, tokens.length);
+    @SuppressWarnings("unchecked") final Token<DelegationTokenIdentifier> token =
         (Token<DelegationTokenIdentifier>) tokens[0];
     final UserGroupInformation longUgi = UserGroupInformation
         .createRemoteUser("JobTracker/foo.com@FOO.COM");
@@ -244,7 +245,7 @@ public class TestDelegationToken {
         try {
           token.renew(config);
         } catch (Exception e) {
-          Assert.fail("Could not renew delegation token for user "+longUgi);
+          fail("Could not renew delegation token for user " + longUgi);
         }
         return null;
       }
@@ -262,7 +263,7 @@ public class TestDelegationToken {
         try {
           token.cancel(config);
         } catch (Exception e) {
-          Assert.fail("Could not cancel delegation token for user "+longUgi);
+          fail("Could not cancel delegation token for user " + longUgi);
         }
         return null;
       }
@@ -273,7 +274,7 @@ public class TestDelegationToken {
   public void testDelegationTokenUgi() throws Exception {
     final DistributedFileSystem dfs = cluster.getFileSystem();
     Token<?>[] tokens = dfs.addDelegationTokens("renewer", null);
-    Assert.assertEquals(1, tokens.length);
+    assertEquals(1, tokens.length);
     Token<?> token1 = tokens[0];
     DelegationTokenIdentifier ident =
         (DelegationTokenIdentifier) token1.decodeIdentifier();
@@ -281,21 +282,21 @@ public class TestDelegationToken {
 
     // get 2 new instances (clones) of the identifier, query their ugi
     // twice each, all ugi instances should be equivalent
-    for (int i=0; i<2; i++) {
+    for (int i = 0; i < 2; i++) {
       DelegationTokenIdentifier identClone =
-          (DelegationTokenIdentifier)token1.decodeIdentifier();
-      Assert.assertEquals(ident, identClone);
-      Assert.assertNotSame(ident, identClone);
-      Assert.assertSame(expectedUgi, identClone.getUser());
-      Assert.assertSame(expectedUgi, identClone.getUser());
+          (DelegationTokenIdentifier) token1.decodeIdentifier();
+      assertEquals(ident, identClone);
+      assertNotSame(ident, identClone);
+      assertSame(expectedUgi, identClone.getUser());
+      assertSame(expectedUgi, identClone.getUser());
     }
 
     // a new token must decode to a different ugi instance than the first token
     tokens = dfs.addDelegationTokens("renewer", null);
-    Assert.assertEquals(1, tokens.length);
+    assertEquals(1, tokens.length);
     Token<?> token2 = tokens[0];
-    Assert.assertNotEquals(token1, token2);
-    Assert.assertNotSame(expectedUgi, token2.decodeIdentifier().getUser());
+    assertNotEquals(token1, token2);
+    assertNotSame(expectedUgi, token2.decodeIdentifier().getUser());
   }
 
   /**
@@ -309,10 +310,10 @@ public class TestDelegationToken {
     cluster.startDataNodes(config, 1, true, StartupOption.REGULAR, null);
     FileSystem fs = cluster.getFileSystem();
     for (int i = 0; i < 5; i++) {
-      DFSTestUtil.createFile(fs, new Path("/test-" + i), 100, (short)1, 1L);
+      DFSTestUtil.createFile(fs, new Path("/test-" + i), 100, (short) 1, 1L);
     }
     cluster.getConfiguration(0).setInt(
-        DFSConfigKeys.DFS_NAMENODE_DELEGATION_KEY_UPDATE_INTERVAL_KEY, 500); 
+        DFSConfigKeys.DFS_NAMENODE_DELEGATION_KEY_UPDATE_INTERVAL_KEY, 500);
     cluster.getConfiguration(0).setInt(
         DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, 30000);
     cluster.setWaitSafeMode(false);
@@ -320,19 +321,18 @@ public class TestDelegationToken {
     NameNode nn = cluster.getNameNode();
     assertTrue(nn.isInSafeMode());
     DelegationTokenSecretManager sm =
-      NameNodeAdapter.getDtSecretManager(nn.getNamesystem());
-    assertFalse("Secret manager should not run in safe mode", sm.isRunning());
-    
+        NameNodeAdapter.getDtSecretManager(nn.getNamesystem());
+    assertFalse(sm.isRunning(), "Secret manager should not run in safe mode");
+
     NameNodeAdapter.leaveSafeMode(nn);
-    assertTrue("Secret manager should start when safe mode is exited",
-        sm.isRunning());
-    
+    assertTrue(sm.isRunning(), "Secret manager should start when safe mode is exited");
+
     LOG.info("========= entering safemode again");
-    
+
     NameNodeAdapter.enterSafeMode(nn, false);
-    assertFalse("Secret manager should stop again when safe mode " +
-        "is manually entered", sm.isRunning());
-    
+    assertFalse(sm.isRunning(), "Secret manager should stop again when safe mode " +
+        "is manually entered");
+
     // Set the cluster to leave safemode quickly on its own.
     cluster.getConfiguration(0).setInt(
         DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, 0);
@@ -344,11 +344,11 @@ public class TestDelegationToken {
     assertFalse(nn.isInSafeMode());
     assertTrue(sm.isRunning());
   }
-  
+
   @SuppressWarnings("unchecked")
   private void checkTokenIdentifier(UserGroupInformation ugi, final Token<?> token)
       throws Exception {
-    Assert.assertNotNull(token);
+    assertNotNull(token);
     // should be able to use token.decodeIdentifier() but webhdfs isn't
     // registered with the service loader for token decoding
     DelegationTokenIdentifier identifier = new DelegationTokenIdentifier();
@@ -359,9 +359,9 @@ public class TestDelegationToken {
     } finally {
       in.close();
     }
-    Assert.assertNotNull(identifier);
+    assertNotNull(identifier);
     LOG.info("A valid token should have non-null password, and should be renewed successfully");
-    Assert.assertTrue(null != dtSecretManager.retrievePassword(identifier));
+    assertTrue(null != dtSecretManager.retrievePassword(identifier));
     dtSecretManager.renewToken((Token<DelegationTokenIdentifier>) token, "JobTracker");
     ugi.doAs(
         new PrivilegedExceptionAction<Object>() {
@@ -378,8 +378,8 @@ public class TestDelegationToken {
   public void testDelegationTokenIdentifierToString() throws Exception {
     DelegationTokenIdentifier dtId = new DelegationTokenIdentifier(new Text(
         "SomeUser"), new Text("JobTracker"), null);
-    Assert.assertEquals("HDFS_DELEGATION_TOKEN token 0" +
-        " for SomeUser with renewer JobTracker",
+    assertEquals("HDFS_DELEGATION_TOKEN token 0" +
+            " for SomeUser with renewer JobTracker",
         dtId.toStringStable());
   }
 
@@ -402,6 +402,7 @@ public class TestDelegationToken {
       public DelegationTokenIdentifier createIdentifier() {
         return null;
       }
+
       public void logExpireTokens(Collection<DelegationTokenIdentifier> expiredTokens)
           throws IOException {
         super.logExpireTokens(expiredTokens);

+ 17 - 13
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java

@@ -50,10 +50,12 @@ import org.apache.hadoop.security.authorize.DefaultImpersonationProvider;
 import org.apache.hadoop.security.authorize.ProxyUsers;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.test.Whitebox;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
+
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 public class TestDelegationTokenForProxyUser {
   private static MiniDFSCluster cluster;
@@ -96,7 +98,7 @@ public class TestDelegationTokenForProxyUser {
         builder.toString());
   }
   
-  @BeforeClass
+  @BeforeAll
   public static void setUp() throws Exception {
     config = new HdfsConfiguration();
     config.setLong(
@@ -118,14 +120,15 @@ public class TestDelegationTokenForProxyUser {
         GROUP_NAMES);
   }
 
-  @AfterClass
+  @AfterAll
   public static void tearDown() throws Exception {
     if(cluster!=null) {
       cluster.shutdown();
     }
   }
  
-  @Test(timeout=20000)
+  @Test
+  @Timeout(value = 20)
   public void testDelegationTokenWithRealUser() throws IOException {
     try {
       Token<?>[] tokens = proxyUgi
@@ -139,15 +142,16 @@ public class TestDelegationTokenForProxyUser {
       byte[] tokenId = tokens[0].getIdentifier();
       identifier.readFields(new DataInputStream(new ByteArrayInputStream(
           tokenId)));
-      Assert.assertEquals(identifier.getUser().getUserName(), PROXY_USER);
-      Assert.assertEquals(identifier.getUser().getRealUser().getUserName(),
+      assertEquals(identifier.getUser().getUserName(), PROXY_USER);
+      assertEquals(identifier.getUser().getRealUser().getUserName(),
           REAL_USER);
     } catch (InterruptedException e) {
       //Do Nothing
     }
   }
   
-  @Test(timeout=5000)
+  @Test
+  @Timeout(value = 5)
   public void testWebHdfsDoAs() throws Exception {
     WebHdfsTestUtil.LOG.info("START: testWebHdfsDoAs()");
     WebHdfsTestUtil.LOG.info("ugi.getShortUserName()=" + ugi.getShortUserName());
@@ -161,7 +165,7 @@ public class TestDelegationTokenForProxyUser {
     {
       Path responsePath = webhdfs.getHomeDirectory();
       WebHdfsTestUtil.LOG.info("responsePath=" + responsePath);
-      Assert.assertEquals(webhdfs.getUri() + "/user/" + PROXY_USER, responsePath.toString());
+      assertEquals(webhdfs.getUri() + "/user/" + PROXY_USER, responsePath.toString());
     }
 
     final Path f = new Path("/testWebHdfsDoAs/a.txt");
@@ -172,7 +176,7 @@ public class TestDelegationTokenForProxyUser {
   
       final FileStatus status = webhdfs.getFileStatus(f);
       WebHdfsTestUtil.LOG.info("status.getOwner()=" + status.getOwner());
-      Assert.assertEquals(PROXY_USER, status.getOwner());
+      assertEquals(PROXY_USER, status.getOwner());
     }
 
     {
@@ -183,7 +187,7 @@ public class TestDelegationTokenForProxyUser {
       final FileStatus status = webhdfs.getFileStatus(f);
       WebHdfsTestUtil.LOG.info("status.getOwner()=" + status.getOwner());
       WebHdfsTestUtil.LOG.info("status.getLen()  =" + status.getLen());
-      Assert.assertEquals(PROXY_USER, status.getOwner());
+      assertEquals(PROXY_USER, status.getOwner());
     }
   }
 }

+ 18 - 20
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java

@@ -19,11 +19,12 @@
 package org.apache.hadoop.hdfs.security.token.block;
 
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION;
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.assertArrayEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
+import static org.junit.jupiter.api.Assumptions.assumeTrue;
 import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.Mockito.doAnswer;
 import static org.mockito.Mockito.mock;
@@ -87,10 +88,8 @@ import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Time;
-import org.junit.Assert;
-import org.junit.Assume;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
 
@@ -125,7 +124,7 @@ public class TestBlockToken {
   final ExtendedBlock block2 = new ExtendedBlock("10", 10L);
   final ExtendedBlock block3 = new ExtendedBlock("-10", -108L);
 
-  @Before
+  @BeforeEach
   public void disableKerberos() {
     Configuration conf = new Configuration();
     conf.set(HADOOP_SECURITY_AUTHENTICATION, "simple");
@@ -152,12 +151,12 @@ public class TestBlockToken {
           (GetReplicaVisibleLengthRequestProto) args[1];
       Set<TokenIdentifier> tokenIds = UserGroupInformation.getCurrentUser()
           .getTokenIdentifiers();
-      assertEquals("Only one BlockTokenIdentifier expected", 1, tokenIds.size());
+      assertEquals(1, tokenIds.size(), "Only one BlockTokenIdentifier expected");
       long result = 0;
       for (TokenIdentifier tokenId : tokenIds) {
         BlockTokenIdentifier id = (BlockTokenIdentifier) tokenId;
         LOG.info("Got: " + id.toString());
-        assertTrue("Received BlockTokenIdentifier is wrong", ident.equals(id));
+        assertTrue(ident.equals(id), "Received BlockTokenIdentifier is wrong");
         sm.checkAccess(id, null, PBHelperClient.convert(req.getBlock()),
             BlockTokenIdentifier.AccessMode.WRITE,
             new StorageType[]{StorageType.DEFAULT}, null);
@@ -376,7 +375,7 @@ public class TestBlockToken {
     conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
     UserGroupInformation.setConfiguration(conf);
 
-    Assume.assumeTrue(FD_DIR.exists());
+    assumeTrue(FD_DIR.exists());
     BlockTokenSecretManager sm = new BlockTokenSecretManager(
         blockKeyUpdateInterval, blockTokenLifetime, 0, 1, "fake-pool", null,
         enableProtobuf);
@@ -534,7 +533,7 @@ public class TestBlockToken {
       }
       Token<BlockTokenIdentifier> token = locatedBlocks.getLastLocatedBlock()
           .getBlockToken();
-      Assert.assertEquals(BlockTokenIdentifier.KIND_NAME, token.getKind());
+      assertEquals(BlockTokenIdentifier.KIND_NAME, token.getKind());
       out.close();
     } finally {
       cluster.shutdown();
@@ -866,10 +865,9 @@ public class TestBlockToken {
       int rangeStart = nnIdx * interval;
       for(int i = 0; i < interval * 3; i++) {
         int serialNo = sm.getSerialNoForTesting();
-        assertTrue(
-            "serialNo " + serialNo + " is not in the designated range: [" +
-                rangeStart + ", " + (rangeStart + interval) + ")",
-                serialNo >= rangeStart && serialNo < (rangeStart + interval));
+        assertTrue(serialNo >= rangeStart && serialNo < (rangeStart + interval),
+            "serialNo " + serialNo + " is not in the designated range: [" + rangeStart
+                + ", " + (rangeStart + interval) + ")");
         sm.updateKeys();
       }
     }
@@ -960,8 +958,8 @@ public class TestBlockToken {
       in.read(readData);
       // DFSInputStream#refetchLocations() minimum wait for 1sec to refetch
       // complete located blocks.
-      assertTrue("Should not wait for refetch complete located blocks",
-          1000L > (System.currentTimeMillis() - startTime));
+      assertTrue(1000L > (System.currentTimeMillis() - startTime),
+          "Should not wait for refetch complete located blocks");
     }
   }
 }

+ 13 - 11
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestUpdateDataNodeCurrentKey.java

@@ -24,19 +24,21 @@ import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 import java.io.IOException;
 
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+
 public class TestUpdateDataNodeCurrentKey {
   private static final short REPLICATION = (short)1;
   private MiniDFSCluster cluster = null;
   private Configuration config;
 
-  @Before
+  @BeforeEach
   public void setup() throws IOException {
     config = new Configuration();
     config.setInt(
@@ -58,7 +60,7 @@ public class TestUpdateDataNodeCurrentKey {
     cluster.waitActive();
   }
 
-  @After
+  @AfterEach
   public void shutDownCluster() throws IOException {
     if (cluster != null) {
       cluster.shutdown();
@@ -72,7 +74,7 @@ public class TestUpdateDataNodeCurrentKey {
     final DataNode dataNode = cluster.getDataNodes().get(0);
     BlockKey currentKey = dataNode.getBlockPoolTokenSecretManager().
         get(bpid).getCurrentKey();
-    Assert.assertTrue(currentKey != null);
+    assertTrue(currentKey != null);
   }
 
   @Test
@@ -88,7 +90,7 @@ public class TestUpdateDataNodeCurrentKey {
     final DataNode dataNode = cluster.getDataNodes().get(0);
     BlockKey currentKey = dataNode.getBlockPoolTokenSecretManager().
         get(bpid).getCurrentKey();
-    Assert.assertEquals(annCurrentKey, currentKey);
+    assertEquals(annCurrentKey, currentKey);
   }
 
   @Test
@@ -102,7 +104,7 @@ public class TestUpdateDataNodeCurrentKey {
 
     final DatanodeInfo[] dataNodeInfos = cluster.getNameNodeRpc(0).
         getDatanodeReport(HdfsConstants.DatanodeReportType.LIVE);
-    Assert.assertEquals(2, dataNodeInfos.length);
+    assertEquals(2, dataNodeInfos.length);
 
     //Simulate nameNode restart
     cluster.restartNameNode(1, true);
@@ -116,7 +118,7 @@ public class TestUpdateDataNodeCurrentKey {
         get(bpid).getCurrentKey();
     BlockKey dn2CurrentKey = newDataNode.getBlockPoolTokenSecretManager().
         get(bpid).getCurrentKey();
-    Assert.assertEquals(dnCurrentKey, dn2CurrentKey);
-    Assert.assertEquals(currentKey, dn2CurrentKey);
+    assertEquals(dnCurrentKey, dn2CurrentKey);
+    assertEquals(currentKey, dn2CurrentKey);
   }
 }

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithSaslDataTransfer.java

@@ -18,7 +18,7 @@
 package org.apache.hadoop.hdfs.server.balancer;
 
 import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferTestCase;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 public class TestBalancerWithSaslDataTransfer extends SaslDataTransferTestCase {
 

+ 42 - 44
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java

@@ -43,13 +43,13 @@ import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferTestCas
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 import org.eclipse.jetty.util.ajax.JSON;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 /**
  * Class for testing {@link DataNodeMXBean} implementation
  */
@@ -65,7 +65,7 @@ public class TestDataNodeMXBean extends SaslDataTransferTestCase {
 
     try {
       List<DataNode> datanodes = cluster.getDataNodes();
-      Assert.assertEquals(datanodes.size(), 1);
+      assertEquals(datanodes.size(), 1);
       DataNode datanode = datanodes.get(0);
 
       MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); 
@@ -73,54 +73,54 @@ public class TestDataNodeMXBean extends SaslDataTransferTestCase {
           "Hadoop:service=DataNode,name=DataNodeInfo");
       // get attribute "ClusterId"
       String clusterId = (String) mbs.getAttribute(mxbeanName, "ClusterId");
-      Assert.assertEquals(datanode.getClusterId(), clusterId);
+      assertEquals(datanode.getClusterId(), clusterId);
       // get attribute "Version"
       String version = (String)mbs.getAttribute(mxbeanName, "Version");
-      Assert.assertEquals(datanode.getVersion(),version);
+      assertEquals(datanode.getVersion(), version);
       // get attribute "DNStartedTimeInMillis"
       long startTime = (long) mbs.getAttribute(mxbeanName, "DNStartedTimeInMillis");
-      Assert.assertTrue("Datanode start time should not be 0", startTime > 0);
-      Assert.assertEquals(datanode.getDNStartedTimeInMillis(), startTime);
+      assertTrue(startTime > 0, "Datanode start time should not be 0");
+      assertEquals(datanode.getDNStartedTimeInMillis(), startTime);
       // get attribute "SotfwareVersion"
       String softwareVersion =
           (String)mbs.getAttribute(mxbeanName, "SoftwareVersion");
-      Assert.assertEquals(datanode.getSoftwareVersion(),softwareVersion);
-      Assert.assertEquals(version, softwareVersion
+      assertEquals(datanode.getSoftwareVersion(), softwareVersion);
+      assertEquals(version, softwareVersion
           + ", r" + datanode.getRevision());
       // get attribute "RpcPort"
       String rpcPort = (String)mbs.getAttribute(mxbeanName, "RpcPort");
-      Assert.assertEquals(datanode.getRpcPort(),rpcPort);
+      assertEquals(datanode.getRpcPort(), rpcPort);
       // get attribute "HttpPort"
       String httpPort = (String)mbs.getAttribute(mxbeanName, "HttpPort");
-      Assert.assertNotNull(httpPort);
-      Assert.assertEquals(datanode.getHttpPort(),httpPort);
+      assertNotNull(httpPort);
+      assertEquals(datanode.getHttpPort(), httpPort);
       // get attribute "NamenodeAddresses"
       String namenodeAddresses = (String)mbs.getAttribute(mxbeanName, 
           "NamenodeAddresses");
-      Assert.assertEquals(datanode.getNamenodeAddresses(),namenodeAddresses);
+      assertEquals(datanode.getNamenodeAddresses(), namenodeAddresses);
       // get attribute "getDatanodeHostname"
       String datanodeHostname = (String)mbs.getAttribute(mxbeanName,
           "DatanodeHostname");
-      Assert.assertEquals(datanode.getDatanodeHostname(),datanodeHostname);
+      assertEquals(datanode.getDatanodeHostname(), datanodeHostname);
       // get attribute "getVolumeInfo"
       String volumeInfo = (String)mbs.getAttribute(mxbeanName, "VolumeInfo");
-      Assert.assertEquals(replaceDigits(datanode.getVolumeInfo()),
+      assertEquals(replaceDigits(datanode.getVolumeInfo()),
           replaceDigits(volumeInfo));
       // Ensure mxbean's XceiverCount is same as the DataNode's
       // live value.
       int xceiverCount = (Integer)mbs.getAttribute(mxbeanName,
           "XceiverCount");
-      Assert.assertEquals(datanode.getXceiverCount(), xceiverCount);
+      assertEquals(datanode.getXceiverCount(), xceiverCount);
       // Ensure mxbean's XmitsInProgress is same as the DataNode's
       // live value.
       int xmitsInProgress =
           (Integer) mbs.getAttribute(mxbeanName, "XmitsInProgress");
-      Assert.assertEquals(datanode.getXmitsInProgress(), xmitsInProgress);
+      assertEquals(datanode.getXmitsInProgress(), xmitsInProgress);
       String bpActorInfo = (String)mbs.getAttribute(mxbeanName,
           "BPServiceActorInfo");
-      Assert.assertEquals(datanode.getBPServiceActorInfo(), bpActorInfo);
+      assertEquals(datanode.getBPServiceActorInfo(), bpActorInfo);
       String slowDisks = (String)mbs.getAttribute(mxbeanName, "SlowDisks");
-      Assert.assertEquals(datanode.getSlowDisks(), slowDisks);
+      assertEquals(datanode.getSlowDisks(), slowDisks);
     } finally {
       if (cluster != null) {
         cluster.shutdown();
@@ -137,7 +137,7 @@ public class TestDataNodeMXBean extends SaslDataTransferTestCase {
     try (MiniDFSCluster cluster =
                  new MiniDFSCluster.Builder(simpleConf).build()) {
       List<DataNode> datanodes = cluster.getDataNodes();
-      Assert.assertEquals(datanodes.size(), 1);
+      assertEquals(datanodes.size(), 1);
       DataNode datanode = datanodes.get(0);
 
       MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
@@ -146,15 +146,15 @@ public class TestDataNodeMXBean extends SaslDataTransferTestCase {
 
       boolean securityEnabled = (boolean) mbs.getAttribute(mxbeanName,
               "SecurityEnabled");
-      Assert.assertFalse(securityEnabled);
-      Assert.assertEquals(datanode.isSecurityEnabled(), securityEnabled);
+      assertFalse(securityEnabled);
+      assertEquals(datanode.isSecurityEnabled(), securityEnabled);
     }
 
     // get attribute "SecurityEnabled" with secure configuration
     try (MiniDFSCluster cluster =
                  new MiniDFSCluster.Builder(secureConf).build()) {
       List<DataNode> datanodes = cluster.getDataNodes();
-      Assert.assertEquals(datanodes.size(), 1);
+      assertEquals(datanodes.size(), 1);
       DataNode datanode = datanodes.get(0);
 
       MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
@@ -163,8 +163,8 @@ public class TestDataNodeMXBean extends SaslDataTransferTestCase {
 
       boolean securityEnabled = (boolean) mbs.getAttribute(mxbeanName,
               "SecurityEnabled");
-      Assert.assertTrue(securityEnabled);
-      Assert.assertEquals(datanode.isSecurityEnabled(), securityEnabled);
+      assertTrue(securityEnabled);
+      assertEquals(datanode.isSecurityEnabled(), securityEnabled);
     }
 
     // setting back the authentication method
@@ -193,7 +193,7 @@ public class TestDataNodeMXBean extends SaslDataTransferTestCase {
           "Hadoop:service=DataNode,name=DataNodeInfo");
       String bpActorInfo = (String)mbs.getAttribute(mxbeanName,
           "BPServiceActorInfo");
-      Assert.assertEquals(dn.getBPServiceActorInfo(), bpActorInfo);
+      assertEquals(dn.getBPServiceActorInfo(), bpActorInfo);
       LOG.info("bpActorInfo is " + bpActorInfo);
       TypeReference<ArrayList<Map<String, String>>> typeRef
           = new TypeReference<ArrayList<Map<String, String>>>() {};
@@ -208,12 +208,10 @@ public class TestDataNodeMXBean extends SaslDataTransferTestCase {
           Integer.valueOf(bpActorInfoList.get(0).get("maxBlockReportSize"));
       LOG.info("maxDataLength is " + maxDataLength);
       LOG.info("maxBlockReportSize is " + maxBlockReportSize);
-      assertTrue("maxBlockReportSize should be greater than zero",
-          maxBlockReportSize > 0);
-      assertEquals("maxDataLength should be exactly "
-          + "the same value of ipc.maximum.data.length",
-          confMaxDataLength,
-          maxDataLength);
+      assertTrue(maxBlockReportSize > 0,
+          "maxBlockReportSize should be greater than zero");
+      assertEquals(confMaxDataLength, maxDataLength, "maxDataLength should be exactly "
+          + "the same value of ipc.maximum.data.length");
     }
   }
 
@@ -234,10 +232,10 @@ public class TestDataNodeMXBean extends SaslDataTransferTestCase {
         DFSTestUtil.createFile(fs, new Path("/tmp.txt" + i), 1024, (short) 1,
                 1L);
       }
-      assertEquals("Before restart DN", 5, getTotalNumBlocks(mbs, mxbeanName));
+      assertEquals(5, getTotalNumBlocks(mbs, mxbeanName), "Before restart DN");
       cluster.restartDataNode(0);
       cluster.waitActive();
-      assertEquals("After restart DN", 5, getTotalNumBlocks(mbs, mxbeanName));
+      assertEquals(5, getTotalNumBlocks(mbs, mxbeanName), "After restart DN");
       fs.delete(new Path("/tmp.txt1"), true);
       // The total numBlocks should be updated after one file is deleted
       GenericTestUtils.waitFor(new Supplier<Boolean>() {
@@ -281,7 +279,7 @@ public class TestDataNodeMXBean extends SaslDataTransferTestCase {
 
     try {
       List<DataNode> datanodes = cluster.getDataNodes();
-      Assert.assertEquals(datanodes.size(), 1);
+      assertEquals(datanodes.size(), 1);
       DataNode datanode = datanodes.get(0);
       String slowDiskPath = "test/data1/slowVolume";
       datanode.getDiskMetrics().addSlowDiskForTesting(slowDiskPath, null);
@@ -291,8 +289,8 @@ public class TestDataNodeMXBean extends SaslDataTransferTestCase {
           "Hadoop:service=DataNode,name=DataNodeInfo");
 
       String slowDisks = (String)mbs.getAttribute(mxbeanName, "SlowDisks");
-      Assert.assertEquals(datanode.getSlowDisks(), slowDisks);
-      Assert.assertTrue(slowDisks.contains(slowDiskPath));
+      assertEquals(datanode.getSlowDisks(), slowDisks);
+      assertTrue(slowDisks.contains(slowDiskPath));
     } finally {
       if (cluster != null) {cluster.shutdown();}
     }
@@ -364,10 +362,10 @@ public class TestDataNodeMXBean extends SaslDataTransferTestCase {
     long lastHeartbeatSent2 =
         Long.parseLong(bpServiceActorInfo2.get(lastHeartbeat));
 
-    Assert.assertTrue(lastHeartbeat + " for first bp service actor is higher than 5s",
-        lastHeartbeatSent1 < 5L);
-    Assert.assertTrue(lastHeartbeat + " for second bp service actor is higher than 5s",
-        lastHeartbeatSent2 < 5L);
+    assertTrue(lastHeartbeatSent1 < 5L, lastHeartbeat
+        + " for first bp service actor is higher than 5s");
+    assertTrue(lastHeartbeatSent2 < 5L, lastHeartbeat
+        + " for second bp service actor is higher than 5s");
   }
 
 }

+ 34 - 36
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecureNameNode.java

@@ -17,9 +17,11 @@
 
 package org.apache.hadoop.hdfs.server.namenode;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 import java.io.IOException;
 import java.lang.management.ManagementFactory;
 import java.security.PrivilegedExceptionAction;
@@ -34,19 +36,13 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferTestCase;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
-import org.junit.Assert;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
+import org.junit.jupiter.api.Test;
 import javax.management.MBeanServer;
 import javax.management.ObjectName;
 
 public class TestSecureNameNode extends SaslDataTransferTestCase {
   final static private int NUM_OF_DATANODES = 0;
 
-  @Rule
-  public ExpectedException exception = ExpectedException.none();
-
 
   @Test
   public void testName() throws Exception {
@@ -78,14 +74,15 @@ public class TestSecureNameNode extends SaslDataTransferTestCase {
         }
       });
       Path p = new Path("/mydir");
-      exception.expect(IOException.class);
-      fs.mkdirs(p);
-
-      Path tmp = new Path("/tmp/alpha");
-      fs.mkdirs(tmp);
-      assertNotNull(fs.listStatus(tmp));
-      assertEquals(AuthenticationMethod.KERBEROS,
-          ugi.getAuthenticationMethod());
+      assertThrows(IOException.class, () -> {
+        fs.mkdirs(p);
+
+        Path tmp = new Path("/tmp/alpha");
+        fs.mkdirs(tmp);
+        assertNotNull(fs.listStatus(tmp));
+        assertEquals(AuthenticationMethod.KERBEROS,
+            ugi.getAuthenticationMethod());
+      });
     } finally {
       if (cluster != null) {
         cluster.shutdown();
@@ -103,21 +100,22 @@ public class TestSecureNameNode extends SaslDataTransferTestCase {
    */
   @Test
   public void testKerberosHdfsBlockTokenInconsistencyNNStartup() throws Exception {
-    MiniDFSCluster dfsCluster = null;
-    HdfsConfiguration conf = createSecureConfig(
-        "authentication,privacy");
-    try {
-      conf.setBoolean(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, false);
-      exception.expect(IOException.class);
-      exception.expectMessage("Security is enabled but block access tokens");
-      dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
-      dfsCluster.waitActive();
-    } finally {
-      if (dfsCluster != null) {
-        dfsCluster.shutdown();
+    IOException exception = assertThrows(IOException.class, () -> {
+      MiniDFSCluster dfsCluster = null;
+      HdfsConfiguration conf = createSecureConfig(
+          "authentication,privacy");
+      try {
+        conf.setBoolean(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, false);
+        dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+        dfsCluster.waitActive();
+      } finally {
+        if (dfsCluster != null) {
+          dfsCluster.shutdown();
+        }
       }
-    }
-    return;
+      return;
+    });
+    assertTrue(exception.getMessage().contains("Security is enabled but block access tokens"));
   }
 
   /**
@@ -145,8 +143,8 @@ public class TestSecureNameNode extends SaslDataTransferTestCase {
 
       boolean securityEnabled = (boolean) mbs.getAttribute(mxbeanName,
               "SecurityEnabled");
-      Assert.assertFalse(securityEnabled);
-      Assert.assertEquals(namenode.isSecurityEnabled(), securityEnabled);
+      assertFalse(securityEnabled);
+      assertEquals(namenode.isSecurityEnabled(), securityEnabled);
     }
 
     // get attribute "SecurityEnabled" with secure configuration
@@ -161,8 +159,8 @@ public class TestSecureNameNode extends SaslDataTransferTestCase {
 
       boolean securityEnabled = (boolean) mbs.getAttribute(mxbeanName,
               "SecurityEnabled");
-      Assert.assertTrue(securityEnabled);
-      Assert.assertEquals(namenode.isSecurityEnabled(), securityEnabled);
+      assertTrue(securityEnabled);
+      assertEquals(namenode.isSecurityEnabled(), securityEnabled);
     }
   }
 

+ 110 - 90
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java

@@ -21,7 +21,13 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY;
 import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_CONTEXT;
 import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY;
-import static org.hamcrest.CoreMatchers.equalTo;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
+import static org.junit.jupiter.api.Assumptions.assumeTrue;
 
 import java.io.DataOutputStream;
 import java.io.File;
@@ -83,9 +89,8 @@ import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.LambdaTestUtils;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.Time;
-import org.junit.Assert;
-import org.junit.Assume;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
 import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
@@ -166,14 +171,16 @@ public class TestShortCircuitCache {
     }
   }
 
-  @Test(timeout=60000)
+  @Test
+  @Timeout(value = 60)
   public void testCreateAndDestroy() throws Exception {
     ShortCircuitCache cache =
         new ShortCircuitCache(10, 1, 10, 1, 1, 10000, 0);
     cache.close();
   }
 
-  @Test(timeout=5000)
+  @Test
+  @Timeout(value = 5)
   public void testInvalidConfiguration() throws Exception {
     LambdaTestUtils.intercept(IllegalArgumentException.class,
         "maxTotalSize must be greater than zero.",
@@ -189,7 +196,8 @@ public class TestShortCircuitCache {
         () -> new ShortCircuitCache(10, 1, 10, -1, 1, 10000, 0));
   }
 
-  @Test(timeout=60000)
+  @Test
+  @Timeout(value = 60)
   public void testAddAndRetrieve() throws Exception {
     final ShortCircuitCache cache =
         new ShortCircuitCache(10, 10000000, 10, 10000000, 1, 10000, 0);
@@ -200,16 +208,16 @@ public class TestShortCircuitCache {
     Preconditions.checkNotNull(replicaInfo1.getReplica());
     Preconditions.checkState(replicaInfo1.getInvalidTokenException() == null);
     pair.compareWith(replicaInfo1.getReplica().getDataStream(),
-                     replicaInfo1.getReplica().getMetaStream());
+        replicaInfo1.getReplica().getMetaStream());
     ShortCircuitReplicaInfo replicaInfo2 =
-      cache.fetchOrCreate(new ExtendedBlockId(123, "test_bp1"),
-          new ShortCircuitReplicaCreator() {
-        @Override
-        public ShortCircuitReplicaInfo createShortCircuitReplicaInfo() {
-          Assert.fail("expected to use existing entry.");
-          return null;
-        }
-      });
+        cache.fetchOrCreate(new ExtendedBlockId(123, "test_bp1"),
+            new ShortCircuitReplicaCreator() {
+              @Override
+              public ShortCircuitReplicaInfo createShortCircuitReplicaInfo() {
+                fail("expected to use existing entry.");
+                return null;
+              }
+            });
     Preconditions.checkNotNull(replicaInfo2.getReplica());
     Preconditions.checkState(replicaInfo2.getInvalidTokenException() == null);
     Preconditions.checkState(replicaInfo1 == replicaInfo2);
@@ -222,14 +230,14 @@ public class TestShortCircuitCache {
     // around for a while (we have configured the expiry period to be really,
     // really long here)
     ShortCircuitReplicaInfo replicaInfo3 =
-      cache.fetchOrCreate(
-          new ExtendedBlockId(123, "test_bp1"), new ShortCircuitReplicaCreator() {
-        @Override
-        public ShortCircuitReplicaInfo createShortCircuitReplicaInfo() {
-          Assert.fail("expected to use existing entry.");
-          return null;
-        }
-      });
+        cache.fetchOrCreate(
+            new ExtendedBlockId(123, "test_bp1"), new ShortCircuitReplicaCreator() {
+              @Override
+              public ShortCircuitReplicaInfo createShortCircuitReplicaInfo() {
+                fail("expected to use existing entry.");
+                return null;
+              }
+            });
     Preconditions.checkNotNull(replicaInfo3.getReplica());
     Preconditions.checkState(replicaInfo3.getInvalidTokenException() == null);
     replicaInfo3.getReplica().unref();
@@ -238,7 +246,8 @@ public class TestShortCircuitCache {
     cache.close();
   }
 
-  @Test(timeout=100000)
+  @Test
+  @Timeout(value = 100)
   public void testExpiry() throws Exception {
     final ShortCircuitCache cache =
         new ShortCircuitCache(2, 1, 1, 10000000, 1, 10000000, 0);
@@ -272,7 +281,8 @@ public class TestShortCircuitCache {
   }
   
   
-  @Test(timeout=60000)
+  @Test
+  @Timeout(value = 60)
   public void testEviction() throws Exception {
     final ShortCircuitCache cache =
         new ShortCircuitCache(2, 10000000, 1, 10000000, 1, 10000, 0);
@@ -305,13 +315,13 @@ public class TestShortCircuitCache {
       final Integer iVal = i;
       replicaInfos[i] = cache.fetchOrCreate(
           new ExtendedBlockId(i, "test_bp1"),
-            new ShortCircuitReplicaCreator() {
-        @Override
-        public ShortCircuitReplicaInfo createShortCircuitReplicaInfo() {
-          Assert.fail("expected to use existing entry for " + iVal);
-          return null;
-        }
-      });
+          new ShortCircuitReplicaCreator() {
+            @Override
+            public ShortCircuitReplicaInfo createShortCircuitReplicaInfo() {
+              fail("expected to use existing entry for " + iVal);
+              return null;
+            }
+          });
       Preconditions.checkNotNull(replicaInfos[i].getReplica());
       Preconditions.checkState(replicaInfos[i].getInvalidTokenException() == null);
       pairs[i].compareWith(replicaInfos[i].getReplica().getDataStream(),
@@ -329,7 +339,7 @@ public class TestShortCircuitCache {
         }
       });
     Preconditions.checkState(replicaInfos[0].getReplica() == null);
-    Assert.assertTrue(calledCreate.isTrue());
+    assertTrue(calledCreate.isTrue());
     // Clean up
     for (int i = 1; i < pairs.length; i++) {
       replicaInfos[i].getReplica().unref();
@@ -340,7 +350,8 @@ public class TestShortCircuitCache {
     cache.close();
   }
   
-  @Test(timeout=60000)
+  @Test
+  @Timeout(value = 60)
   public void testTimeBasedStaleness() throws Exception {
     // Set up the cache with a short staleness time.
     final ShortCircuitCache cache =
@@ -400,13 +411,13 @@ public class TestShortCircuitCache {
     // Make sure that second replica did not go stale.
     ShortCircuitReplicaInfo info = cache.fetchOrCreate(
         new ExtendedBlockId(1, "test_bp1"), new ShortCircuitReplicaCreator() {
-      @Override
-      public ShortCircuitReplicaInfo createShortCircuitReplicaInfo() {
-        Assert.fail("second replica went stale, despite 1 " +
-            "hour staleness time.");
-        return null;
-      }
-    });
+          @Override
+          public ShortCircuitReplicaInfo createShortCircuitReplicaInfo() {
+            fail("second replica went stale, despite 1 " +
+                "hour staleness time.");
+            return null;
+          }
+        });
     info.getReplica().unref();
 
     // Clean up
@@ -429,7 +440,7 @@ public class TestShortCircuitCache {
     conf.setBoolean(DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC, false);
     DFSInputStream.tcpReadsDisabledForTesting = true;
     DomainSocket.disableBindPathValidation();
-    Assume.assumeThat(DomainSocket.getLoadingFailureReason(), equalTo(null));
+    assumeTrue(DomainSocket.getLoadingFailureReason() == null);
     return conf;
   }
   
@@ -440,7 +451,8 @@ public class TestShortCircuitCache {
     return new DomainPeer(sock);
   }
   
-  @Test(timeout=60000)
+  @Test
+  @Timeout(value = 60)
   public void testAllocShm() throws Exception {
     BlockReaderTestUtil.enableShortCircuitShmTracing();
     TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
@@ -456,7 +468,7 @@ public class TestShortCircuitCache {
       public void visit(HashMap<DatanodeInfo, PerDatanodeVisitorInfo> info)
           throws IOException {
         // The ClientShmManager starts off empty
-        Assert.assertEquals(0,  info.size());
+        assertEquals(0,  info.size());
       }
     });
     DomainPeer peer = getDomainPeerToDn(conf);
@@ -468,18 +480,18 @@ public class TestShortCircuitCache {
     // Allocating the first shm slot requires using up a peer.
     Slot slot = cache.allocShmSlot(datanode, peer, usedPeer,
                     blockId, "testAllocShm_client");
-    Assert.assertNotNull(slot);
-    Assert.assertTrue(usedPeer.booleanValue());
+    assertNotNull(slot);
+    assertTrue(usedPeer.booleanValue());
     cache.getDfsClientShmManager().visit(new Visitor() {
       @Override
       public void visit(HashMap<DatanodeInfo, PerDatanodeVisitorInfo> info)
           throws IOException {
         // The ClientShmManager starts off empty
-        Assert.assertEquals(1,  info.size());
+        assertEquals(1,  info.size());
         PerDatanodeVisitorInfo vinfo = info.get(datanode);
-        Assert.assertFalse(vinfo.disabled);
-        Assert.assertEquals(0, vinfo.full.size());
-        Assert.assertEquals(1, vinfo.notFull.size());
+        assertFalse(vinfo.disabled);
+        assertEquals(0, vinfo.full.size());
+        assertEquals(1, vinfo.notFull.size());
       }
     });
     cache.scheduleSlotReleaser(slot);
@@ -510,7 +522,8 @@ public class TestShortCircuitCache {
     sockDir.close();
   }
 
-  @Test(timeout=60000)
+  @Test
+  @Timeout(value = 60)
   public void testShmBasedStaleness() throws Exception {
     BlockReaderTestUtil.enableShortCircuitShmTracing();
     TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
@@ -530,7 +543,7 @@ public class TestShortCircuitCache {
     int first = fis.read();
     final ExtendedBlock block =
         DFSTestUtil.getFirstBlock(fs, new Path(TEST_FILE));
-    Assert.assertTrue(first != -1);
+    assertTrue(first != -1);
     cache.accept(new CacheVisitor() {
       @Override
       public void visit(int numOutstandingMmaps,
@@ -540,8 +553,8 @@ public class TestShortCircuitCache {
           LinkedMap evictableMmapped) {
         ShortCircuitReplica replica = replicas.get(
             ExtendedBlockId.fromExtendedBlock(block));
-        Assert.assertNotNull(replica);
-        Assert.assertTrue(replica.getSlot().isValid());
+        assertNotNull(replica);
+        assertTrue(replica.getSlot().isValid());
       }
     });
     // Stop the Namenode.  This will close the socket keeping the client's
@@ -556,8 +569,8 @@ public class TestShortCircuitCache {
           LinkedMap evictableMmapped) {
         ShortCircuitReplica replica = replicas.get(
             ExtendedBlockId.fromExtendedBlock(block));
-        Assert.assertNotNull(replica);
-        Assert.assertFalse(replica.getSlot().isValid());
+        assertNotNull(replica);
+        assertFalse(replica.getSlot().isValid());
       }
     });
     cluster.shutdown();
@@ -569,7 +582,8 @@ public class TestShortCircuitCache {
    * The DataNode will notify the DFSClient that the replica is stale via the
    * ShortCircuitShm.
    */
-  @Test(timeout=60000)
+  @Test
+  @Timeout(value = 60)
   public void testUnlinkingReplicasInFileDescriptorCache() throws Exception {
     BlockReaderTestUtil.enableShortCircuitShmTracing();
     TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
@@ -590,7 +604,7 @@ public class TestShortCircuitCache {
       public void visit(HashMap<DatanodeInfo, PerDatanodeVisitorInfo> info)
           throws IOException {
         // The ClientShmManager starts off empty.
-        Assert.assertEquals(0,  info.size());
+        assertEquals(0,  info.size());
       }
     });
     final Path TEST_PATH = new Path("/test_file");
@@ -601,7 +615,7 @@ public class TestShortCircuitCache {
     byte contents[] = DFSTestUtil.readFileBuffer(fs, TEST_PATH);
     byte expected[] = DFSTestUtil.
         calculateFileContentsFromSeed(SEED, TEST_FILE_LEN);
-    Assert.assertTrue(Arrays.equals(contents, expected));
+    assertTrue(Arrays.equals(contents, expected));
     // Loading this file brought the ShortCircuitReplica into our local
     // replica cache.
     final DatanodeInfo datanode = new DatanodeInfoBuilder()
@@ -611,12 +625,12 @@ public class TestShortCircuitCache {
       @Override
       public void visit(HashMap<DatanodeInfo, PerDatanodeVisitorInfo> info)
           throws IOException {
-        Assert.assertTrue(info.get(datanode).full.isEmpty());
-        Assert.assertFalse(info.get(datanode).disabled);
-        Assert.assertEquals(1, info.get(datanode).notFull.values().size());
+        assertTrue(info.get(datanode).full.isEmpty());
+        assertFalse(info.get(datanode).disabled);
+        assertEquals(1, info.get(datanode).notFull.values().size());
         DfsClientShm shm =
             info.get(datanode).notFull.values().iterator().next();
-        Assert.assertFalse(shm.isDisconnected());
+        assertFalse(shm.isDisconnected());
       }
     });
     // Remove the file whose blocks we just read.
@@ -633,9 +647,9 @@ public class TestShortCircuitCache {
             @Override
             public void visit(HashMap<DatanodeInfo,
                   PerDatanodeVisitorInfo> info) throws IOException {
-              Assert.assertTrue(info.get(datanode).full.isEmpty());
-              Assert.assertFalse(info.get(datanode).disabled);
-              Assert.assertEquals(1,
+              assertTrue(info.get(datanode).full.isEmpty());
+              assertFalse(info.get(datanode).disabled);
+              assertEquals(1,
                   info.get(datanode).notFull.values().size());
               DfsClientShm shm = info.get(datanode).notFull.values().
                   iterator().next();
@@ -687,7 +701,8 @@ public class TestShortCircuitCache {
   }
 
   // Regression test for HDFS-7915
-  @Test(timeout=60000)
+  @Test
+  @Timeout(value = 60)
   public void testDataXceiverCleansUpSlotsOnFailure() throws Exception {
     BlockReaderTestUtil.enableShortCircuitShmTracing();
     TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
@@ -729,7 +744,8 @@ public class TestShortCircuitCache {
   }
 
   // Regression test for HADOOP-11802
-  @Test(timeout=60000)
+  @Test
+  @Timeout(value = 60)
   public void testDataXceiverHandlesRequestShortCircuitShmFailure()
       throws Exception {
     BlockReaderTestUtil.enableShortCircuitShmTracing();
@@ -762,7 +778,7 @@ public class TestShortCircuitCache {
       // The shared memory segment allocation will fail because of the failure
       // injector.
       DFSTestUtil.readFileBuffer(fs, TEST_PATH1);
-      Assert.fail("expected readFileBuffer to fail, but it succeeded.");
+      fail("expected readFileBuffer to fail, but it succeeded.");
     } catch (Throwable t) {
       GenericTestUtils.assertExceptionContains("TCP reads were disabled for " +
           "testing, but we failed to do a non-TCP read.", t);
@@ -796,7 +812,8 @@ public class TestShortCircuitCache {
   }
 
   // Regression test for HDFS-8070
-  @Test(timeout=60000)
+  @Test
+  @Timeout(value = 60)
   public void testPreReceiptVerificationDfsClientCanDoScr() throws Exception {
     BlockReaderTestUtil.enableShortCircuitShmTracing();
     TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
@@ -915,7 +932,7 @@ public class TestShortCircuitCache {
       DatanodeInfo[] nodes = blk.getLocations();
 
       try {
-        Assert.assertNull(new BlockReaderFactory(new DfsClientConf(conf))
+        assertNull(new BlockReaderFactory(new DfsClientConf(conf))
             .setInetSocketAddress(NetUtils.createSocketAddr(nodes[0]
                 .getXferAddr()))
             .setClientCacheContext(clientContext)
@@ -924,13 +941,14 @@ public class TestShortCircuitCache {
             .setBlockToken(new Token())
             .createShortCircuitReplicaInfo());
       } catch (NullPointerException ex) {
-        Assert.fail("Should not throw NPE when the native library is unable " +
+        fail("Should not throw NPE when the native library is unable " +
             "to create new files!");
       }
     }
   }
 
-  @Test(timeout = 60000)
+  @Test
+  @Timeout(value = 60)
   public void testDomainSocketClosedByDN() throws Exception {
     TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
     Configuration conf =
@@ -966,16 +984,17 @@ public class TestShortCircuitCache {
       Thread.sleep(2000);
       cache.scheduleSlotReleaser(slot2);
       Thread.sleep(2000);
-      Assert.assertEquals(0,
+      assertEquals(0,
           cluster.getDataNodes().get(0).getShortCircuitRegistry().getShmNum());
-      Assert.assertEquals(0, cache.getDfsClientShmManager().getShmNum());
+      assertEquals(0, cache.getDfsClientShmManager().getShmNum());
     } finally {
       cluster.shutdown();
     }
   }
 
   // Regression test for HDFS-16535
-  @Test(timeout = 60000)
+  @Test
+  @Timeout(value = 60)
   public void testDomainSocketClosedByMultipleDNs() throws Exception {
     TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
     String testName = "testDomainSocketClosedByMultipleDNs";
@@ -1025,33 +1044,34 @@ public class TestShortCircuitCache {
       dn1.getShortCircuitRegistry()
           .registerSlot(blockId1, slot3.getSlotId(), false);
 
-      Assert.assertEquals(2, cache.getDfsClientShmManager().getShmNum());
-      Assert.assertEquals(1, dn0.getShortCircuitRegistry().getShmNum());
-      Assert.assertEquals(1, dn1.getShortCircuitRegistry().getShmNum());
+      assertEquals(2, cache.getDfsClientShmManager().getShmNum());
+      assertEquals(1, dn0.getShortCircuitRegistry().getShmNum());
+      assertEquals(1, dn1.getShortCircuitRegistry().getShmNum());
 
       // Release the slot of DataNode-1 first.
       cache.scheduleSlotReleaser(slot3);
       Thread.sleep(2000);
-      Assert.assertEquals(1, cache.getDfsClientShmManager().getShmNum());
+      assertEquals(1, cache.getDfsClientShmManager().getShmNum());
 
       // Release the slots of DataNode-0.
       cache.scheduleSlotReleaser(slot1);
       Thread.sleep(2000);
-      Assert.assertEquals("0 ShmNum means the shm of DataNode-0 is shutdown" +
-              " due to slot release failures.",
-          1, cache.getDfsClientShmManager().getShmNum());
+      assertEquals(1, cache.getDfsClientShmManager().getShmNum(),
+          "0 ShmNum means the shm of DataNode-0 is shutdown"
+              + " due to slot release failures.");
       cache.scheduleSlotReleaser(slot2);
       Thread.sleep(2000);
 
-      Assert.assertEquals(0, dn0.getShortCircuitRegistry().getShmNum());
-      Assert.assertEquals(0, dn1.getShortCircuitRegistry().getShmNum());
-      Assert.assertEquals(0, cache.getDfsClientShmManager().getShmNum());
+      assertEquals(0, dn0.getShortCircuitRegistry().getShmNum());
+      assertEquals(0, dn1.getShortCircuitRegistry().getShmNum());
+      assertEquals(0, cache.getDfsClientShmManager().getShmNum());
     } finally {
       cluster.shutdown();
     }
   }
 
-  @Test(timeout = 60000)
+  @Test
+  @Timeout(value = 60)
   public void testDNRestart() throws Exception {
     TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
     Configuration conf = createShortCircuitConf("testDNRestart", sockDir);
@@ -1089,9 +1109,9 @@ public class TestShortCircuitCache {
       }
       cache.scheduleSlotReleaser(slot2);
       Thread.sleep(2000);
-      Assert.assertEquals(0,
+      assertEquals(0,
           cluster.getDataNodes().get(0).getShortCircuitRegistry().getShmNum());
-      Assert.assertEquals(0, cache.getDfsClientShmManager().getShmNum());
+      assertEquals(0, cache.getDfsClientShmManager().getShmNum());
     } finally {
       cluster.shutdown();
     }

+ 49 - 42
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java

@@ -17,9 +17,10 @@
  */
 package org.apache.hadoop.hdfs.shortcircuit;
 
-import static org.hamcrest.CoreMatchers.equalTo;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
+import static org.junit.jupiter.api.Assumptions.assumeTrue;
 
 import java.io.EOFException;
 import java.io.File;
@@ -61,12 +62,11 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Time;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.Assume;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Timeout;
 
 /**
  * Test for short circuit read functionality using {@link BlockReaderLocal}.
@@ -78,20 +78,20 @@ import org.junit.Test;
 public class TestShortCircuitLocalRead {
   private static TemporarySocketDirectory sockDir;
 
-  @BeforeClass
+  @BeforeAll
   public static void init() {
     sockDir = new TemporarySocketDirectory();
     DomainSocket.disableBindPathValidation();
   }
 
-  @AfterClass
+  @AfterAll
   public static void shutdown() throws IOException {
     sockDir.close();
   }
 
-  @Before
+  @BeforeEach
   public void before() {
-    Assume.assumeThat(DomainSocket.getLoadingFailureReason(), equalTo(null));
+    assumeTrue(DomainSocket.getLoadingFailureReason() == null);
   }
 
   static final long seed = 0xDEADBEEFL;
@@ -115,7 +115,7 @@ public class TestShortCircuitLocalRead {
       int len, String message) {
     for (int idx = 0; idx < len; idx++) {
       if (expected[from + idx] != actual[idx]) {
-        Assert.fail(message + " byte " + (from + idx) + " differs. expected " +
+        fail(message + " byte " + (from + idx) + " differs. expected " +
             expected[from + idx] + " actual " + actual[idx] +
             "\nexpected: " +
             StringUtils.byteToHexString(expected, from, from + len) +
@@ -275,8 +275,7 @@ public class TestShortCircuitLocalRead {
     try {
       // check that / exists
       Path path = new Path("/");
-      assertTrue("/ should be a directory",
-          fs.getFileStatus(path).isDirectory());
+      assertTrue(fs.getFileStatus(path).isDirectory(), "/ should be a directory");
 
       byte[] fileData = AppendTestUtil.randomBytes(seed, size);
       Path file1 = fs.makeQualified(new Path("filelocal.dat"));
@@ -295,17 +294,20 @@ public class TestShortCircuitLocalRead {
     }
   }
 
-  @Test(timeout=60000)
+  @Test
+  @Timeout(value = 60)
   public void testFileLocalReadNoChecksum() throws Exception {
     doTestShortCircuitRead(true, 3*blockSize+100, 0);
   }
 
-  @Test(timeout=60000)
+  @Test
+  @Timeout(value = 60)
   public void testFileLocalReadChecksum() throws Exception {
     doTestShortCircuitRead(false, 3*blockSize+100, 0);
   }
 
-  @Test(timeout=60000)
+  @Test
+  @Timeout(value = 60)
   public void testSmallFileLocalRead() throws Exception {
     doTestShortCircuitRead(false, 13, 0);
     doTestShortCircuitRead(false, 13, 5);
@@ -313,7 +315,8 @@ public class TestShortCircuitLocalRead {
     doTestShortCircuitRead(true, 13, 5);
   }
 
-  @Test(timeout=60000)
+  @Test
+  @Timeout(value = 60)
   public void testLocalReadLegacy() throws Exception {
     doTestShortCircuitReadLegacy(true, 13, 0, getCurrentUser(),
         getCurrentUser(), false);
@@ -324,19 +327,22 @@ public class TestShortCircuitLocalRead {
    * to use short circuit. The test ensures reader falls back to non
    * shortcircuit reads when shortcircuit is disallowed.
    */
-  @Test(timeout=60000)
+  @Test
+  @Timeout(value = 60)
   public void testLocalReadFallback() throws Exception {
     doTestShortCircuitReadLegacy(
         true, 13, 0, getCurrentUser(), "notallowed", true);
   }
 
-  @Test(timeout=60000)
+  @Test
+  @Timeout(value = 60)
   public void testReadFromAnOffset() throws Exception {
     doTestShortCircuitRead(false, 3*blockSize+100, 777);
     doTestShortCircuitRead(true, 3*blockSize+100, 777);
   }
 
-  @Test(timeout=60000)
+  @Test
+  @Timeout(value = 60)
   public void testLongFile() throws Exception {
     doTestShortCircuitRead(false, 10*blockSize+100, 777);
     doTestShortCircuitRead(true, 10*blockSize+100, 777);
@@ -353,7 +359,8 @@ public class TestShortCircuitLocalRead {
     });
   }
 
-  @Test(timeout=60000)
+  @Test
+  @Timeout(value = 60)
   public void testDeprecatedGetBlockLocalPathInfoRpc() throws IOException {
     final Configuration conf = new Configuration();
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
@@ -374,11 +381,11 @@ public class TestShortCircuitLocalRead {
               dnInfo, conf, 60000, false);
       try {
         proxy.getBlockLocalPathInfo(blk, token);
-        Assert.fail("The call should have failed as this user "
+        fail("The call should have failed as this user "
             + " is not configured in "
             + DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY);
       } catch (IOException ex) {
-        Assert.assertTrue(ex.getMessage().contains(
+        assertTrue(ex.getMessage().contains(
             "not configured in "
             + DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY));
       }
@@ -388,7 +395,8 @@ public class TestShortCircuitLocalRead {
     }
   }
 
-  @Test(timeout=60000)
+  @Test
+  @Timeout(value = 60)
   public void testSkipWithVerifyChecksum() throws IOException {
     int size = blockSize;
     Configuration conf = new Configuration();
@@ -405,8 +413,7 @@ public class TestShortCircuitLocalRead {
     try {
       // check that / exists
       Path path = new Path("/");
-      assertTrue("/ should be a directory",
-          fs.getFileStatus(path).isDirectory());
+      assertTrue(fs.getFileStatus(path).isDirectory(), "/ should be a directory");
 
       byte[] fileData = AppendTestUtil.randomBytes(seed, size*3);
       // create a new file in home directory. Do not close it.
@@ -433,7 +440,8 @@ public class TestShortCircuitLocalRead {
     }
   }
 
-  @Test(timeout=120000)
+  @Test
+  @Timeout(value = 120)
   public void testHandleTruncatedBlockFile() throws IOException {
     MiniDFSCluster cluster = null;
     HdfsConfiguration conf = new HdfsConfiguration();
@@ -467,10 +475,10 @@ public class TestShortCircuitLocalRead {
       try {
         DFSTestUtil.waitReplication(fs, TEST_PATH, (short)1);
       } catch (InterruptedException e) {
-        Assert.fail("unexpected InterruptedException during " +
+        fail("unexpected InterruptedException during " +
             "waitReplication: " + e);
       } catch (TimeoutException e) {
-        Assert.fail("unexpected TimeoutException during " +
+        fail("unexpected TimeoutException during " +
             "waitReplication: " + e);
       }
       ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, TEST_PATH);
@@ -489,7 +497,7 @@ public class TestShortCircuitLocalRead {
         byte buf[] = new byte[100];
         fsIn.seek(2000);
         fsIn.readFully(buf, 0, buf.length);
-        Assert.fail("shouldn't be able to read from corrupt 0-length " +
+        fail("shouldn't be able to read from corrupt 0-length " +
             "block file.");
       } catch (IOException e) {
         DFSClient.LOG.error("caught exception ", e);
@@ -585,7 +593,8 @@ public class TestShortCircuitLocalRead {
     fs.delete(file1, false);
   }
 
-  @Test(timeout=60000)
+  @Test
+  @Timeout(value = 60)
   public void testReadWithRemoteBlockReader2()
       throws IOException, InterruptedException {
     doTestShortCircuitReadWithRemoteBlockReader2(3 * blockSize + 100,
@@ -610,8 +619,7 @@ public class TestShortCircuitLocalRead {
     // check that / exists
     Path path = new Path("/");
     URI uri = cluster.getURI();
-    assertTrue(
-        "/ should be a directory", fs.getFileStatus(path).isDirectory());
+    assertTrue(fs.getFileStatus(path).isDirectory(), "/ should be a directory");
 
     byte[] fileData = AppendTestUtil.randomBytes(seed, size);
     Path file1 = new Path("filelocal.dat");
@@ -620,16 +628,15 @@ public class TestShortCircuitLocalRead {
     stm.write(fileData);
     stm.close();
     try {
-      checkFileContent(uri, file1, fileData, readOffset, shortCircuitUser, 
+      checkFileContent(uri, file1, fileData, readOffset, shortCircuitUser,
           conf, shortCircuitFails);
       //BlockReaderRemote2 have unsupported method read(ByteBuffer bf)
-      assertFalse(
-          "BlockReaderRemote2 unsupported method read(ByteBuffer bf) error",
-          checkUnsupportedMethod(fs, file1, fileData, readOffset));
-    } catch(IOException e) {
+      assertFalse(checkUnsupportedMethod(fs, file1, fileData, readOffset),
+          "BlockReaderRemote2 unsupported method read(ByteBuffer bf) error");
+    } catch (IOException e) {
       throw new IOException(
           "doTestShortCircuitReadWithRemoteBlockReader ex error ", e);
-    } catch(InterruptedException inEx) {
+    } catch (InterruptedException inEx) {
       throw inEx;
     } finally {
       fs.close();