浏览代码

Test PR: Ran rewrite plugin

Akira Ajisaka 3 年之前
父节点
当前提交
a8b5d1d648
共有 100 个文件被更改,包括 1489 次插入1504 次删除
  1. 71 6
      hadoop-hdfs-project/hadoop-hdfs/pom.xml
  2. 13 19
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestGenericRefresh.java
  3. 12 15
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestRefreshCallQueue.java
  4. 5 5
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestAclCLI.java
  5. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestAclCLIWithPosixAclInheritance.java
  6. 8 8
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCacheAdminCLI.java
  7. 8 8
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCryptoAdminCLI.java
  8. 8 8
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestDeleteCLI.java
  9. 5 5
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestErasureCodingCLI.java
  10. 8 8
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestHDFSCLI.java
  11. 8 8
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestXAttrCLI.java
  12. 68 69
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestEnhancedByteBufferAccess.java
  13. 8 8
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsCreateMkdir.java
  14. 8 8
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsPermission.java
  15. 22 21
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsSetUMask.java
  16. 54 50
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java
  17. 23 22
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java
  18. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHdfsNativeCodeLoader.java
  19. 10 10
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestResolveHdfsSymlink.java
  20. 4 4
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSWebHdfsFileContextMainOperations.java
  21. 6 6
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfs.java
  22. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfsDisable.java
  23. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfsFileContext.java
  24. 8 9
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfsFileSystem.java
  25. 7 7
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUnbuffer.java
  26. 10 12
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUrlStreamHandler.java
  27. 9 9
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestWebHdfsFileContextMainOperations.java
  28. 5 5
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/HDFSContract.java
  29. 4 4
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractAppend.java
  30. 4 4
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractConcat.java
  31. 4 4
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractCreate.java
  32. 4 4
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractDelete.java
  33. 4 4
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractGetFileStatus.java
  34. 4 4
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractMkdir.java
  35. 4 4
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractMultipartUploader.java
  36. 4 4
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractOpen.java
  37. 7 5
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractPathHandle.java
  38. 4 4
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractRename.java
  39. 4 4
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractRootDirectory.java
  40. 4 4
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractSeek.java
  41. 4 4
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractSetTimes.java
  42. 4 5
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractUnbuffer.java
  43. 3 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/loadGenerator/TestLoadGenerator.java
  44. 8 11
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/permission/TestStickyBit.java
  45. 7 7
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/shell/TestHdfsTextCommand.java
  46. 4 4
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestNNStartupWhenViewFSOverloadSchemeEnabled.java
  47. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFSOverloadSchemeWithMountTableConfigInHDFS.java
  48. 6 6
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemAtHdfsRoot.java
  49. 27 30
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java
  50. 35 39
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemLinkFallback.java
  51. 12 18
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemLinkMergeSlash.java
  52. 9 13
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemLinkRegex.java
  53. 8 10
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemOverloadSchemeHdfsFileSystemContract.java
  54. 34 34
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemOverloadSchemeWithHdfsScheme.java
  55. 7 11
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemWithAcls.java
  56. 9 9
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemWithTruncate.java
  57. 7 11
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemWithXAttrs.java
  58. 6 6
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsAtHdfsRoot.java
  59. 6 8
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsDefaultValue.java
  60. 13 13
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsFileStatusHdfs.java
  61. 10 10
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsHdfs.java
  62. 10 13
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsLinkFallback.java
  63. 8 11
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsWithAcls.java
  64. 7 11
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsWithXAttrs.java
  65. 7 7
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AdminStatesBaseTest.java
  66. 8 10
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AppendTestUtil.java
  67. 48 51
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
  68. 5 5
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/FileAppendTest4.java
  69. 4 4
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/ParameterizedTestDFSStripedOutputStreamWithFailure.java
  70. 8 8
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/ReadStripedFileWithDecodingHelper.java
  71. 22 22
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java
  72. 9 9
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAbandonBlock.java
  73. 164 165
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAclsEndToEnd.java
  74. 6 6
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendDifferentChecksum.java
  75. 10 11
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java
  76. 7 7
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestApplyingStoragePolicy.java
  77. 5 5
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBalancerBandwidth.java
  78. 17 19
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBatchedListDirectories.java
  79. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockMissingException.java
  80. 67 67
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java
  81. 7 6
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockTokenWrappingQOP.java
  82. 12 12
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java
  83. 6 9
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestByteBufferPread.java
  84. 31 30
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java
  85. 14 14
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java
  86. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClose.java
  87. 9 9
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java
  88. 11 13
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestCrcCorruption.java
  89. 5 5
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java
  90. 8 8
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientExcludedNodes.java
  91. 17 20
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientFailover.java
  92. 26 29
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
  93. 8 10
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientSocketSize.java
  94. 5 5
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java
  95. 168 168
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInotifyEventInputStream.java
  96. 11 14
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInotifyEventInputStreamKerberized.java
  97. 7 10
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInputStream.java
  98. 30 34
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInputStreamBlockLocations.java
  99. 10 10
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSMkdirs.java
  100. 19 22
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSOutputStream.java

+ 71 - 6
hadoop-hdfs-project/hadoop-hdfs/pom.xml

@@ -61,11 +61,27 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
       <artifactId>zookeeper</artifactId>
       <type>test-jar</type>
       <scope>test</scope>
+      <exclusions>
+        <exclusion>
+          <groupId>org.junit.vintage</groupId>
+          <artifactId>junit-vintage-engine</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>junit</groupId>
+          <artifactId>junit</artifactId>
+        </exclusion>
+      </exclusions>
     </dependency>
     <dependency>
       <groupId>io.dropwizard.metrics</groupId>
       <artifactId>metrics-core</artifactId>
       <scope>provided</scope>
+      <exclusions>
+        <exclusion>
+          <groupId>junit</groupId>
+          <artifactId>junit</artifactId>
+        </exclusion>
+      </exclusions>
     </dependency>
     <dependency>
       <groupId>org.xerial.snappy</groupId>
@@ -154,12 +170,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
         </exclusion>
       </exclusions>
     </dependency>
-
-    <dependency>
-      <groupId>junit</groupId>
-      <artifactId>junit</artifactId>
-      <scope>test</scope>
-    </dependency>
     <dependency>
       <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-minikdc</artifactId>
@@ -169,11 +179,23 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
       <groupId>org.mockito</groupId>
       <artifactId>mockito-core</artifactId>
       <scope>test</scope>
+      <exclusions>
+        <exclusion>
+          <groupId>junit</groupId>
+          <artifactId>junit</artifactId>
+        </exclusion>
+      </exclusions>
     </dependency>
     <dependency>
       <groupId>org.slf4j</groupId>
       <artifactId>slf4j-log4j12</artifactId>
       <scope>provided</scope>
+      <exclusions>
+        <exclusion>
+          <groupId>junit</groupId>
+          <artifactId>junit</artifactId>
+        </exclusion>
+      </exclusions>
     </dependency>
     <dependency>
       <groupId>io.netty</groupId>
@@ -184,6 +206,16 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
       <groupId>io.netty</groupId>
       <artifactId>netty-all</artifactId>
       <scope>compile</scope>
+      <exclusions>
+        <exclusion>
+          <groupId>org.junit.vintage</groupId>
+          <artifactId>junit-vintage-engine</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>junit</groupId>
+          <artifactId>junit</artifactId>
+        </exclusion>
+      </exclusions>
     </dependency>
     <dependency>
       <groupId>org.apache.hadoop</groupId>
@@ -209,11 +241,27 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
     <dependency>
       <groupId>com.fasterxml.jackson.core</groupId>
       <artifactId>jackson-databind</artifactId>
+      <exclusions>
+        <exclusion>
+          <groupId>junit</groupId>
+          <artifactId>junit</artifactId>
+        </exclusion>
+      </exclusions>
     </dependency>
     <dependency>
       <groupId>org.apache.curator</groupId>
       <artifactId>curator-test</artifactId>
       <scope>test</scope>
+      <exclusions>
+        <exclusion>
+          <groupId>org.junit.vintage</groupId>
+          <artifactId>junit-vintage-engine</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>junit</groupId>
+          <artifactId>junit</artifactId>
+        </exclusion>
+      </exclusions>
     </dependency>
     <dependency>
         <groupId>org.assertj</groupId>
@@ -447,6 +495,23 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
           </filesets>
         </configuration>
       </plugin>
+      <plugin>
+        <groupId>org.openrewrite.maven</groupId>
+        <artifactId>rewrite-maven-plugin</artifactId>
+        <version>4.9.0</version>
+        <configuration>
+          <activeRecipes>
+            <recipe>org.openrewrite.java.testing.junit5.JUnit5BestPractices</recipe>
+          </activeRecipes>
+        </configuration>
+        <dependencies>
+          <dependency>
+            <groupId>org.openrewrite.recipe</groupId>
+            <artifactId>rewrite-testing-frameworks</artifactId>
+            <version>1.7.1</version>
+          </dependency>
+        </dependencies>
+      </plugin>
     </plugins>
   </build>
 

+ 13 - 19
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestGenericRefresh.java

@@ -18,10 +18,8 @@
 
 package org.apache.hadoop;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
@@ -31,11 +29,7 @@ import org.apache.hadoop.ipc.RefreshHandler;
 
 import org.apache.hadoop.ipc.RefreshRegistry;
 import org.apache.hadoop.ipc.RefreshResponse;
-import org.junit.Test;
-import org.junit.Before;
-import org.junit.After;
-import org.junit.BeforeClass;
-import org.junit.AfterClass;
+import org.junit.jupiter.api.*;
 import org.mockito.Mockito;
 
 /**
@@ -51,7 +45,7 @@ public class TestGenericRefresh {
   private static RefreshHandler firstHandler;
   private static RefreshHandler secondHandler;
 
-  @BeforeClass
+  @BeforeAll
   public static void setUpBeforeClass() throws Exception {
     config = new Configuration();
     config.set("hadoop.security.authorization", "true");
@@ -61,14 +55,14 @@ public class TestGenericRefresh {
     cluster.waitActive();
   }
 
-  @AfterClass
+  @AfterAll
   public static void tearDownBeforeClass() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
     }
   }
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     // Register Handlers, first one just sends an ok response
     firstHandler = Mockito.mock(RefreshHandler.class);
@@ -85,7 +79,7 @@ public class TestGenericRefresh {
     RefreshRegistry.defaultRegistry().register("secondHandler", secondHandler);
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     RefreshRegistry.defaultRegistry().unregisterAll("firstHandler");
     RefreshRegistry.defaultRegistry().unregisterAll("secondHandler");
@@ -96,7 +90,7 @@ public class TestGenericRefresh {
     DFSAdmin admin = new DFSAdmin(config);
     String [] args = new String[]{"-refresh", "nn"};
     int exitCode = admin.run(args);
-    assertEquals("DFSAdmin should fail due to bad args", -1, exitCode);
+      assertEquals(-1, exitCode, "DFSAdmin should fail due to bad args");
   }
 
   @Test
@@ -105,7 +99,7 @@ public class TestGenericRefresh {
     String [] args = new String[]{"-refresh", "localhost:" + 
         cluster.getNameNodePort(), "unregisteredIdentity"};
     int exitCode = admin.run(args);
-    assertEquals("DFSAdmin should fail due to no handler registered", -1, exitCode);
+      assertEquals(-1, exitCode, "DFSAdmin should fail due to no handler registered");
   }
 
   @Test
@@ -114,7 +108,7 @@ public class TestGenericRefresh {
     String[] args = new String[]{"-refresh",
         "localhost:" + cluster.getNameNodePort(), "firstHandler"};
     int exitCode = admin.run(args);
-    assertEquals("DFSAdmin should succeed", 0, exitCode);
+      assertEquals(0, exitCode, "DFSAdmin should succeed");
 
     Mockito.verify(firstHandler).handleRefresh("firstHandler", new String[]{});
     // Second handler was never called
@@ -128,11 +122,11 @@ public class TestGenericRefresh {
     String[] args = new String[]{"-refresh", "localhost:" +
         cluster.getNameNodePort(), "secondHandler", "one"};
     int exitCode = admin.run(args);
-    assertEquals("DFSAdmin should return 2", 2, exitCode);
+      assertEquals(2, exitCode, "DFSAdmin should return 2");
 
     exitCode = admin.run(new String[]{"-refresh", "localhost:" +
         cluster.getNameNodePort(), "secondHandler", "one", "two"});
-    assertEquals("DFSAdmin should now return 3", 3, exitCode);
+      assertEquals(3, exitCode, "DFSAdmin should now return 3");
 
     Mockito.verify(secondHandler).handleRefresh("secondHandler", new String[]{"one"});
     Mockito.verify(secondHandler).handleRefresh("secondHandler", new String[]{"one", "two"});
@@ -147,7 +141,7 @@ public class TestGenericRefresh {
     String[] args = new String[]{"-refresh", "localhost:" +
         cluster.getNameNodePort(), "firstHandler"};
     int exitCode = admin.run(args);
-    assertEquals("DFSAdmin should return -1", -1, exitCode);
+      assertEquals(-1, exitCode, "DFSAdmin should return -1");
   }
 
   @Test

+ 12 - 15
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestRefreshCallQueue.java

@@ -18,10 +18,7 @@
 
 package org.apache.hadoop;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.IOException;
 import java.net.BindException;
@@ -40,8 +37,8 @@ import org.apache.hadoop.hdfs.tools.DFSAdmin;
 import org.apache.hadoop.ipc.FairCallQueue;
 import org.apache.hadoop.metrics2.MetricsException;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
-import org.junit.After;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Test;
 
 public class TestRefreshCallQueue {
   private MiniDFSCluster cluster;
@@ -77,7 +74,7 @@ public class TestRefreshCallQueue {
     }
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws IOException {
     if (cluster != null) {
       cluster.shutdown();
@@ -115,9 +112,9 @@ public class TestRefreshCallQueue {
     mockQueuePuts = 0;
     setUp(MockCallQueue.class);
 
-    assertTrue("Mock queue should have been constructed",
-        mockQueueConstructions > 0);
-    assertTrue("Puts are routed through MockQueue", canPutInMockQueue());
+      assertTrue(
+              mockQueueConstructions > 0, "Mock queue should have been constructed");
+      assertTrue(canPutInMockQueue(), "Puts are routed through MockQueue");
     int lastMockQueueConstructions = mockQueueConstructions;
 
     // Replace queue with the queue specified in core-site.xml, which would be
@@ -125,13 +122,13 @@ public class TestRefreshCallQueue {
     DFSAdmin admin = new DFSAdmin(config);
     String [] args = new String[]{"-refreshCallQueue"};
     int exitCode = admin.run(args);
-    assertEquals("DFSAdmin should return 0", 0, exitCode);
+      assertEquals(0, exitCode, "DFSAdmin should return 0");
 
-    assertEquals("Mock queue should have no additional constructions",
-        lastMockQueueConstructions, mockQueueConstructions);
+      assertEquals(
+              lastMockQueueConstructions, mockQueueConstructions, "Mock queue should have no additional constructions");
     try {
-      assertFalse("Puts are routed through LBQ instead of MockQueue",
-          canPutInMockQueue());
+        assertFalse(
+                canPutInMockQueue(), "Puts are routed through LBQ instead of MockQueue");
     } catch (IOException ioe) {
       fail("Could not put into queue at all");
     }

+ 5 - 5
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestAclCLI.java

@@ -22,9 +22,9 @@ import org.apache.hadoop.cli.util.CommandExecutor.Result;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 public class TestAclCLI extends CLITestHelperDFS {
   private MiniDFSCluster cluster = null;
@@ -38,7 +38,7 @@ public class TestAclCLI extends CLITestHelperDFS {
         DFSConfigKeys.DFS_NAMENODE_POSIX_ACL_INHERITANCE_ENABLED_KEY, false);
   }
 
-  @Before
+  @BeforeEach
   @Override
   public void setUp() throws Exception {
     super.setUp();
@@ -49,7 +49,7 @@ public class TestAclCLI extends CLITestHelperDFS {
     username = System.getProperty("user.name");
   }
 
-  @After
+  @AfterEach
   @Override
   public void tearDown() throws Exception {
     super.tearDown();

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestAclCLIWithPosixAclInheritance.java

@@ -17,9 +17,9 @@
  */
 package org.apache.hadoop.cli;
 
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_POSIX_ACL_INHERITANCE_ENABLED_KEY;
+import org.junit.jupiter.api.Test;
 
-import org.junit.Test;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_POSIX_ACL_INHERITANCE_ENABLED_KEY;
 
 /**
  * Test ACL CLI with POSIX ACL inheritance enabled.

+ 8 - 8
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCacheAdminCLI.java

@@ -18,7 +18,7 @@
 
 package org.apache.hadoop.cli;
 
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -37,9 +37,9 @@ import org.apache.hadoop.hdfs.HDFSPolicyProvider;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.tools.CacheAdmin;
 import org.apache.hadoop.security.authorize.PolicyProvider;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.xml.sax.SAXException;
 
 public class TestCacheAdminCLI extends CLITestHelper {
@@ -51,7 +51,7 @@ public class TestCacheAdminCLI extends CLITestHelper {
   protected FileSystem fs = null;
   protected String namenode = null;
 
-  @Before
+  @BeforeEach
   @Override
   public void setUp() throws Exception {
     super.setUp();
@@ -68,11 +68,11 @@ public class TestCacheAdminCLI extends CLITestHelper {
     username = System.getProperty("user.name");
 
     fs = dfsCluster.getFileSystem();
-    assertTrue("Not a HDFS: "+fs.getUri(),
-               fs instanceof DistributedFileSystem);
+      assertTrue(
+              fs instanceof DistributedFileSystem, "Not a HDFS: " + fs.getUri());
   }
 
-  @After
+  @AfterEach
   @Override
   public void tearDown() throws Exception {
     if (fs != null) {

+ 8 - 8
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCryptoAdminCLI.java

@@ -23,7 +23,7 @@ import java.io.IOException;
 import java.security.NoSuchAlgorithmException;
 import java.util.UUID;
 
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import org.apache.hadoop.cli.util.CLICommand;
 import org.apache.hadoop.cli.util.CLICommandCryptoAdmin;
@@ -45,9 +45,9 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.tools.CryptoAdmin;
 import org.apache.hadoop.security.authorize.PolicyProvider;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.xml.sax.SAXException;
 
 public class TestCryptoAdminCLI extends CLITestHelperDFS {
@@ -56,7 +56,7 @@ public class TestCryptoAdminCLI extends CLITestHelperDFS {
   protected String namenode = null;
   private static File tmpDir;
 
-  @Before
+  @BeforeEach
   @Override
   public void setUp() throws Exception {
     super.setUp();
@@ -78,11 +78,11 @@ public class TestCryptoAdminCLI extends CLITestHelperDFS {
     username = System.getProperty("user.name");
 
     fs = dfsCluster.getFileSystem();
-    assertTrue("Not an HDFS: " + fs.getUri(),
-        fs instanceof DistributedFileSystem);
+      assertTrue(
+              fs instanceof DistributedFileSystem, "Not an HDFS: " + fs.getUri());
   }
 
-  @After
+  @AfterEach
   @Override
   public void tearDown() throws Exception {
     if (fs != null) {

+ 8 - 8
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestDeleteCLI.java

@@ -18,7 +18,7 @@
 
 package org.apache.hadoop.cli;
 
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import org.apache.hadoop.cli.util.CLICommand;
 import org.apache.hadoop.cli.util.CommandExecutor.Result;
@@ -27,16 +27,16 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 public class TestDeleteCLI extends CLITestHelperDFS {
   protected MiniDFSCluster dfsCluster = null;
   protected FileSystem fs = null;
   protected String namenode = null;
 
-  @Before
+  @BeforeEach
   @Override
   public void setUp() throws Exception {
     super.setUp();
@@ -49,11 +49,11 @@ public class TestDeleteCLI extends CLITestHelperDFS {
     namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///");
 
     fs = dfsCluster.getFileSystem();
-    assertTrue("Not an HDFS: " + fs.getUri(),
-        fs instanceof DistributedFileSystem);
+      assertTrue(
+              fs instanceof DistributedFileSystem, "Not an HDFS: " + fs.getUri());
   }
 
-  @After
+  @AfterEach
   @Override
   public void tearDown() throws Exception {
     if (fs != null) {

+ 5 - 5
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestErasureCodingCLI.java

@@ -24,10 +24,10 @@ import org.apache.hadoop.cli.util.CommandExecutor.Result;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Rule;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.junit.rules.Timeout;
 import org.xml.sax.SAXException;
 
@@ -40,7 +40,7 @@ public class TestErasureCodingCLI extends CLITestHelper {
   @Rule
   public Timeout globalTimeout = new Timeout(300000);
 
-  @Before
+  @BeforeEach
   @Override
   public void setUp() throws Exception {
     super.setUp();
@@ -62,7 +62,7 @@ public class TestErasureCodingCLI extends CLITestHelper {
     return "testErasureCodingConf.xml";
   }
 
-  @After
+  @AfterEach
   @Override
   public void tearDown() throws Exception {
     if (fs != null) {

+ 8 - 8
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestHDFSCLI.java

@@ -18,7 +18,7 @@
 
 package org.apache.hadoop.cli;
 
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import org.apache.hadoop.cli.util.CLICommand;
 import org.apache.hadoop.cli.util.CommandExecutor.Result;
@@ -28,9 +28,9 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HDFSPolicyProvider;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.security.authorize.PolicyProvider;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 public class TestHDFSCLI extends CLITestHelperDFS {
 
@@ -38,7 +38,7 @@ public class TestHDFSCLI extends CLITestHelperDFS {
   protected FileSystem fs = null;
   protected String namenode = null;
   
-  @Before
+  @BeforeEach
   @Override
   public void setUp() throws Exception {
     super.setUp();
@@ -63,8 +63,8 @@ public class TestHDFSCLI extends CLITestHelperDFS {
     username = System.getProperty("user.name");
 
     fs = dfsCluster.getFileSystem();
-    assertTrue("Not a HDFS: "+fs.getUri(),
-               fs instanceof DistributedFileSystem);
+      assertTrue(
+              fs instanceof DistributedFileSystem, "Not a HDFS: " + fs.getUri());
   }
 
   @Override
@@ -72,7 +72,7 @@ public class TestHDFSCLI extends CLITestHelperDFS {
     return "testHDFSConf.xml";
   }
   
-  @After
+  @AfterEach
   @Override
   public void tearDown() throws Exception {
     if (fs != null) {

+ 8 - 8
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestXAttrCLI.java

@@ -18,7 +18,7 @@
 
 package org.apache.hadoop.cli;
 
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import org.apache.hadoop.cli.util.CLICommand;
 import org.apache.hadoop.cli.util.CommandExecutor.Result;
@@ -28,16 +28,16 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HDFSPolicyProvider;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.security.authorize.PolicyProvider;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 public class TestXAttrCLI  extends CLITestHelperDFS {
   protected MiniDFSCluster dfsCluster = null;
   protected FileSystem fs = null;
   protected String namenode = null;
   
-  @Before
+  @BeforeEach
   @Override
   public void setUp() throws Exception {
     super.setUp();
@@ -53,8 +53,8 @@ public class TestXAttrCLI  extends CLITestHelperDFS {
     username = System.getProperty("user.name");
 
     fs = dfsCluster.getFileSystem();
-    assertTrue("Not a HDFS: "+fs.getUri(), 
-        fs instanceof DistributedFileSystem);
+      assertTrue(
+              fs instanceof DistributedFileSystem, "Not a HDFS: " + fs.getUri());
   }
 
   @Override
@@ -62,7 +62,7 @@ public class TestXAttrCLI  extends CLITestHelperDFS {
     return "testXAttrConf.xml";
   }
   
-  @After
+  @AfterEach
   @Override
   public void tearDown() throws Exception {
     if (fs != null) {

+ 68 - 69
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestEnhancedByteBufferAccess.java

@@ -65,12 +65,11 @@ import org.apache.hadoop.net.unix.DomainSocket;
 import org.apache.hadoop.net.unix.TemporarySocketDirectory;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.Assume;
-import org.junit.BeforeClass;
 import org.junit.Test;
-
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Assumptions;
+import org.junit.jupiter.api.BeforeAll;
 import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions;
 import java.util.function.Supplier;
 
@@ -85,7 +84,7 @@ public class TestEnhancedByteBufferAccess {
 
   static private CacheManipulator prevCacheManipulator;
 
-  @BeforeClass
+  @BeforeAll
   public static void init() {
     sockDir = new TemporarySocketDirectory();
     DomainSocket.disableBindPathValidation();
@@ -99,7 +98,7 @@ public class TestEnhancedByteBufferAccess {
     });
   }
 
-  @AfterClass
+  @AfterAll
   public static void teardown() {
     // Restore the original CacheManipulator
     NativeIO.POSIX.setCacheManipulator(prevCacheManipulator);
@@ -116,8 +115,8 @@ public class TestEnhancedByteBufferAccess {
       (int) NativeIO.POSIX.getCacheManipulator().getOperatingSystemPageSize();
   
   public static HdfsConfiguration initZeroCopyTest() {
-    Assume.assumeTrue(NativeIO.isAvailable());
-    Assume.assumeTrue(SystemUtils.IS_OS_UNIX);
+    Assumptions.assumeTrue(NativeIO.isAvailable());
+    Assumptions.assumeTrue(SystemUtils.IS_OS_UNIX);
     HdfsConfiguration conf = new HdfsConfiguration();
     conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, true);
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
@@ -152,10 +151,10 @@ public class TestEnhancedByteBufferAccess {
       try {
         DFSTestUtil.waitReplication(fs, TEST_PATH, (short)1);
       } catch (InterruptedException e) {
-        Assert.fail("unexpected InterruptedException during " +
+        Assertions.fail("unexpected InterruptedException during " +
             "waitReplication: " + e);
       } catch (TimeoutException e) {
-        Assert.fail("unexpected TimeoutException during " +
+        Assertions.fail("unexpected TimeoutException during " +
             "waitReplication: " + e);
       }
       fsIn = fs.open(TEST_PATH);
@@ -165,13 +164,13 @@ public class TestEnhancedByteBufferAccess {
       fsIn = fs.open(TEST_PATH);
       ByteBuffer result = fsIn.read(null, BLOCK_SIZE,
           EnumSet.of(ReadOption.SKIP_CHECKSUMS));
-      Assert.assertEquals(BLOCK_SIZE, result.remaining());
+      Assertions.assertEquals(BLOCK_SIZE, result.remaining());
       HdfsDataInputStream dfsIn = (HdfsDataInputStream)fsIn;
-      Assert.assertEquals(BLOCK_SIZE,
+      Assertions.assertEquals(BLOCK_SIZE,
           dfsIn.getReadStatistics().getTotalBytesRead());
-      Assert.assertEquals(BLOCK_SIZE,
+      Assertions.assertEquals(BLOCK_SIZE,
           dfsIn.getReadStatistics().getTotalZeroCopyBytesRead());
-      Assert.assertArrayEquals(Arrays.copyOfRange(original, 0, BLOCK_SIZE),
+      Assertions.assertArrayEquals(Arrays.copyOfRange(original, 0, BLOCK_SIZE),
           byteBufferToArray(result));
       fsIn.releaseBuffer(result);
     } finally {
@@ -198,10 +197,10 @@ public class TestEnhancedByteBufferAccess {
       try {
         DFSTestUtil.waitReplication(fs, TEST_PATH, (short)1);
       } catch (InterruptedException e) {
-        Assert.fail("unexpected InterruptedException during " +
+        Assertions.fail("unexpected InterruptedException during " +
             "waitReplication: " + e);
       } catch (TimeoutException e) {
-        Assert.fail("unexpected TimeoutException during " +
+        Assertions.fail("unexpected TimeoutException during " +
             "waitReplication: " + e);
       }
       fsIn = fs.open(TEST_PATH);
@@ -214,20 +213,20 @@ public class TestEnhancedByteBufferAccess {
       HdfsDataInputStream dfsIn = (HdfsDataInputStream)fsIn;
       ByteBuffer result =
         dfsIn.read(null, 2 * BLOCK_SIZE, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
-      Assert.assertEquals(BLOCK_SIZE, result.remaining());
-      Assert.assertEquals(BLOCK_SIZE,
+      Assertions.assertEquals(BLOCK_SIZE, result.remaining());
+      Assertions.assertEquals(BLOCK_SIZE,
           dfsIn.getReadStatistics().getTotalBytesRead());
-      Assert.assertEquals(BLOCK_SIZE,
+      Assertions.assertEquals(BLOCK_SIZE,
           dfsIn.getReadStatistics().getTotalZeroCopyBytesRead());
-      Assert.assertArrayEquals(Arrays.copyOfRange(original, 0, BLOCK_SIZE),
+      Assertions.assertArrayEquals(Arrays.copyOfRange(original, 0, BLOCK_SIZE),
           byteBufferToArray(result));
       dfsIn.releaseBuffer(result);
       
       // Try to read (1 + ${BLOCK_SIZE}), but only get ${BLOCK_SIZE} because of the block size.
       result = 
           dfsIn.read(null, 1 + BLOCK_SIZE, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
-      Assert.assertEquals(BLOCK_SIZE, result.remaining());
-      Assert.assertArrayEquals(Arrays.copyOfRange(original, BLOCK_SIZE, 2 * BLOCK_SIZE),
+      Assertions.assertEquals(BLOCK_SIZE, result.remaining());
+      Assertions.assertArrayEquals(Arrays.copyOfRange(original, BLOCK_SIZE, 2 * BLOCK_SIZE),
           byteBufferToArray(result));
       dfsIn.releaseBuffer(result);
     } finally {
@@ -255,10 +254,10 @@ public class TestEnhancedByteBufferAccess {
       try {
         DFSTestUtil.waitReplication(fs, TEST_PATH, (short)1);
       } catch (InterruptedException e) {
-        Assert.fail("unexpected InterruptedException during " +
+        Assertions.fail("unexpected InterruptedException during " +
             "waitReplication: " + e);
       } catch (TimeoutException e) {
-        Assert.fail("unexpected TimeoutException during " +
+        Assertions.fail("unexpected TimeoutException during " +
             "waitReplication: " + e);
       }
       fsIn = fs.open(TEST_PATH);
@@ -270,17 +269,17 @@ public class TestEnhancedByteBufferAccess {
       ByteBuffer result;
       try {
         result = dfsIn.read(null, BLOCK_SIZE + 1, EnumSet.noneOf(ReadOption.class));
-        Assert.fail("expected UnsupportedOperationException");
+        Assertions.fail("expected UnsupportedOperationException");
       } catch (UnsupportedOperationException e) {
         // expected
       }
       result = dfsIn.read(null, BLOCK_SIZE, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
-      Assert.assertEquals(BLOCK_SIZE, result.remaining());
-      Assert.assertEquals(BLOCK_SIZE,
+      Assertions.assertEquals(BLOCK_SIZE, result.remaining());
+      Assertions.assertEquals(BLOCK_SIZE,
           dfsIn.getReadStatistics().getTotalBytesRead());
-      Assert.assertEquals(BLOCK_SIZE,
+      Assertions.assertEquals(BLOCK_SIZE,
           dfsIn.getReadStatistics().getTotalZeroCopyBytesRead());
-      Assert.assertArrayEquals(Arrays.copyOfRange(original, 0, BLOCK_SIZE),
+      Assertions.assertArrayEquals(Arrays.copyOfRange(original, 0, BLOCK_SIZE),
           byteBufferToArray(result));
     } finally {
       if (fsIn != null) fsIn.close();
@@ -311,16 +310,16 @@ public class TestEnhancedByteBufferAccess {
         LinkedMap evictable,
         LinkedMap evictableMmapped) {
       if (expectedNumOutstandingMmaps >= 0) {
-        Assert.assertEquals(expectedNumOutstandingMmaps, numOutstandingMmaps);
+        Assertions.assertEquals(expectedNumOutstandingMmaps, numOutstandingMmaps);
       }
       if (expectedNumReplicas >= 0) {
-        Assert.assertEquals(expectedNumReplicas, replicas.size());
+        Assertions.assertEquals(expectedNumReplicas, replicas.size());
       }
       if (expectedNumEvictable >= 0) {
-        Assert.assertEquals(expectedNumEvictable, evictable.size());
+        Assertions.assertEquals(expectedNumEvictable, evictable.size());
       }
       if (expectedNumMmapedEvictable >= 0) {
-        Assert.assertEquals(expectedNumMmapedEvictable, evictableMmapped.size());
+        Assertions.assertEquals(expectedNumMmapedEvictable, evictableMmapped.size());
       }
     }
   }
@@ -346,10 +345,10 @@ public class TestEnhancedByteBufferAccess {
     try {
       DFSTestUtil.waitReplication(fs, TEST_PATH, (short)1);
     } catch (InterruptedException e) {
-      Assert.fail("unexpected InterruptedException during " +
+      Assertions.fail("unexpected InterruptedException during " +
           "waitReplication: " + e);
     } catch (TimeoutException e) {
-      Assert.fail("unexpected TimeoutException during " +
+      Assertions.fail("unexpected TimeoutException during " +
           "waitReplication: " + e);
     }
     fsIn = fs.open(TEST_PATH);
@@ -378,10 +377,10 @@ public class TestEnhancedByteBufferAccess {
           LinkedMap evictableMmapped) {
         ShortCircuitReplica replica = replicas.get(
             new ExtendedBlockId(firstBlock.getBlockId(), firstBlock.getBlockPoolId()));
-        Assert.assertNotNull(replica);
-        Assert.assertTrue(replica.hasMmap());
+        Assertions.assertNotNull(replica);
+        Assertions.assertTrue(replica.hasMmap());
         // The replica should not yet be evictable, since we have it open.
-        Assert.assertNull(replica.getEvictableTimeNs());
+        Assertions.assertNull(replica.getEvictableTimeNs());
       }
     });
 
@@ -449,10 +448,10 @@ public class TestEnhancedByteBufferAccess {
       try {
         DFSTestUtil.waitReplication(fs, TEST_PATH, (short)1);
       } catch (InterruptedException e) {
-        Assert.fail("unexpected InterruptedException during " +
+        Assertions.fail("unexpected InterruptedException during " +
             "waitReplication: " + e);
       } catch (TimeoutException e) {
-        Assert.fail("unexpected TimeoutException during " +
+        Assertions.fail("unexpected TimeoutException during " +
             "waitReplication: " + e);
       }
       fsIn = fs.open(TEST_PATH);
@@ -493,22 +492,22 @@ public class TestEnhancedByteBufferAccess {
             stream instanceof ByteBufferReadable);
 
     ByteBuffer result = ByteBufferUtil.fallbackRead(stream, bufferPool, 10);
-    Assert.assertEquals(10, result.remaining());
-    Assert.assertArrayEquals(Arrays.copyOfRange(original, 0, 10),
+    Assertions.assertEquals(10, result.remaining());
+    Assertions.assertArrayEquals(Arrays.copyOfRange(original, 0, 10),
         byteBufferToArray(result));
 
     result = ByteBufferUtil.fallbackRead(stream, bufferPool, 5000);
-    Assert.assertEquals(5000, result.remaining());
-    Assert.assertArrayEquals(Arrays.copyOfRange(original, 10, 5010),
+    Assertions.assertEquals(5000, result.remaining());
+    Assertions.assertArrayEquals(Arrays.copyOfRange(original, 10, 5010),
         byteBufferToArray(result));
 
     result = ByteBufferUtil.fallbackRead(stream, bufferPool, 9999999);
-    Assert.assertEquals(11375, result.remaining());
-    Assert.assertArrayEquals(Arrays.copyOfRange(original, 5010, 16385),
+    Assertions.assertEquals(11375, result.remaining());
+    Assertions.assertArrayEquals(Arrays.copyOfRange(original, 5010, 16385),
         byteBufferToArray(result));
 
     result = ByteBufferUtil.fallbackRead(stream, bufferPool, 10);
-    Assert.assertNull(result);
+    Assertions.assertNull(result);
   }
 
   /**
@@ -533,10 +532,10 @@ public class TestEnhancedByteBufferAccess {
       try {
         DFSTestUtil.waitReplication(fs, TEST_PATH, (short)1);
       } catch (InterruptedException e) {
-        Assert.fail("unexpected InterruptedException during " +
+        Assertions.fail("unexpected InterruptedException during " +
             "waitReplication: " + e);
       } catch (TimeoutException e) {
-        Assert.fail("unexpected TimeoutException during " +
+        Assertions.fail("unexpected TimeoutException during " +
             "waitReplication: " + e);
       }
       fsIn = fs.open(TEST_PATH);
@@ -618,7 +617,7 @@ public class TestEnhancedByteBufferAccess {
     try {
       result = fsIn.read(null, TEST_FILE_LENGTH / 2,
           EnumSet.noneOf(ReadOption.class));
-      Assert.fail("expected UnsupportedOperationException");
+      Assertions.fail("expected UnsupportedOperationException");
     } catch (UnsupportedOperationException e) {
       // expected
     }
@@ -637,9 +636,9 @@ public class TestEnhancedByteBufferAccess {
       result = fsIn.read(null, TEST_FILE_LENGTH,
           EnumSet.noneOf(ReadOption.class));
     } catch (UnsupportedOperationException e) {
-      Assert.fail("expected to be able to read cached file via zero-copy");
+      Assertions.fail("expected to be able to read cached file via zero-copy");
     }
-    Assert.assertArrayEquals(Arrays.copyOfRange(original, 0,
+    Assertions.assertArrayEquals(Arrays.copyOfRange(original, 0,
         BLOCK_SIZE), byteBufferToArray(result));
     // Test that files opened after the cache operation has finished
     // still get the benefits of zero-copy (regression test for HDFS-6086)
@@ -648,9 +647,9 @@ public class TestEnhancedByteBufferAccess {
       result2 = fsIn2.read(null, TEST_FILE_LENGTH,
           EnumSet.noneOf(ReadOption.class));
     } catch (UnsupportedOperationException e) {
-      Assert.fail("expected to be able to read cached file via zero-copy");
+      Assertions.fail("expected to be able to read cached file via zero-copy");
     }
-    Assert.assertArrayEquals(Arrays.copyOfRange(original, 0,
+    Assertions.assertArrayEquals(Arrays.copyOfRange(original, 0,
         BLOCK_SIZE), byteBufferToArray(result2));
     fsIn2.releaseBuffer(result2);
     fsIn2.close();
@@ -688,10 +687,10 @@ public class TestEnhancedByteBufferAccess {
               Map<ExtendedBlockId, InvalidToken> failedLoads,
               LinkedMap evictable,
               LinkedMap evictableMmapped) {
-            Assert.assertEquals(expectedOutstandingMmaps, numOutstandingMmaps);
+            Assertions.assertEquals(expectedOutstandingMmaps, numOutstandingMmaps);
             ShortCircuitReplica replica =
                 replicas.get(ExtendedBlockId.fromExtendedBlock(block));
-            Assert.assertNotNull(replica);
+            Assertions.assertNotNull(replica);
             Slot slot = replica.getSlot();
             if ((expectedIsAnchorable != slot.isAnchorable()) ||
                 (expectedIsAnchored != slot.isAnchored())) {
@@ -734,7 +733,7 @@ public class TestEnhancedByteBufferAccess {
       fsIn = fs.open(TEST_PATH);
       try {
         fsIn.read(null, 1, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
-        Assert.fail("expected zero-copy read to fail when client mmaps " +
+        Assertions.fail("expected zero-copy read to fail when client mmaps " +
             "were disabled.");
       } catch (UnsupportedOperationException e) {
       }
@@ -764,7 +763,7 @@ public class TestEnhancedByteBufferAccess {
       // Test EOF behavior
       IOUtils.skipFully(fsIn, TEST_FILE_LENGTH - 1);
       buf = fsIn.read(null, 1, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
-      Assert.assertEquals(null, buf);
+      Assertions.assertEquals(null, buf);
     } finally {
       if (fsIn != null) fsIn.close();
       if (fs != null) fs.close();
@@ -774,7 +773,7 @@ public class TestEnhancedByteBufferAccess {
   
   @Test
   public void test2GBMmapLimit() throws Exception {
-    Assume.assumeTrue(BlockReaderTestUtil.shouldTestLargeFiles());
+    Assumptions.assumeTrue(BlockReaderTestUtil.shouldTestLargeFiles());
     HdfsConfiguration conf = initZeroCopyTest();
     final long TEST_FILE_LENGTH = 2469605888L;
     conf.set(DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY, "NULL");
@@ -795,20 +794,20 @@ public class TestEnhancedByteBufferAccess {
       
       fsIn = fs.open(TEST_PATH);
       buf1 = fsIn.read(null, 1, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
-      Assert.assertEquals(1, buf1.remaining());
+      Assertions.assertEquals(1, buf1.remaining());
       fsIn.releaseBuffer(buf1);
       buf1 = null;
       fsIn.seek(2147483640L);
       buf1 = fsIn.read(null, 1024, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
-      Assert.assertEquals(7, buf1.remaining());
-      Assert.assertEquals(Integer.MAX_VALUE, buf1.limit());
+      Assertions.assertEquals(7, buf1.remaining());
+      Assertions.assertEquals(Integer.MAX_VALUE, buf1.limit());
       fsIn.releaseBuffer(buf1);
       buf1 = null;
-      Assert.assertEquals(2147483647L, fsIn.getPos());
+      Assertions.assertEquals(2147483647L, fsIn.getPos());
       try {
         buf1 = fsIn.read(null, 1024,
             EnumSet.of(ReadOption.SKIP_CHECKSUMS));
-        Assert.fail("expected UnsupportedOperationException");
+        Assertions.fail("expected UnsupportedOperationException");
       } catch (UnsupportedOperationException e) {
         // expected; can't read past 2GB boundary.
       }
@@ -825,13 +824,13 @@ public class TestEnhancedByteBufferAccess {
       fsIn2 = fs.open(TEST_PATH2);
       fsIn2.seek(2147483640L);
       buf2 = fsIn2.read(null, 1024, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
-      Assert.assertEquals(8, buf2.remaining());
-      Assert.assertEquals(2147483648L, fsIn2.getPos());
+      Assertions.assertEquals(8, buf2.remaining());
+      Assertions.assertEquals(2147483648L, fsIn2.getPos());
       fsIn2.releaseBuffer(buf2);
       buf2 = null;
       buf2 = fsIn2.read(null, 1024, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
-      Assert.assertEquals(1024, buf2.remaining());
-      Assert.assertEquals(2147484672L, fsIn2.getPos());
+      Assertions.assertEquals(1024, buf2.remaining());
+      Assertions.assertEquals(2147484672L, fsIn2.getPos());
       fsIn2.releaseBuffer(buf2);
       buf2 = null;
     } finally {

+ 8 - 8
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsCreateMkdir.java

@@ -27,10 +27,10 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
 
 public class TestFcHdfsCreateMkdir extends
                     FileContextCreateMkdirBaseTest {
@@ -44,7 +44,7 @@ public class TestFcHdfsCreateMkdir extends
   }
 
 
-  @BeforeClass
+  @BeforeAll
   public static void clusterSetupAtBegining()
                                     throws IOException, LoginException, URISyntaxException  {
     Configuration conf = new HdfsConfiguration();
@@ -56,7 +56,7 @@ public class TestFcHdfsCreateMkdir extends
   }
 
       
-  @AfterClass
+  @AfterAll
   public static void ClusterShutdownAtEnd() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
@@ -64,13 +64,13 @@ public class TestFcHdfsCreateMkdir extends
   }
   
   @Override
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     super.setUp();
   }
   
   @Override
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     super.tearDown();
   }

+ 8 - 8
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsPermission.java

@@ -27,10 +27,10 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
 
 public class TestFcHdfsPermission extends FileContextPermissionBase {
   
@@ -51,7 +51,7 @@ public class TestFcHdfsPermission extends FileContextPermissionBase {
     return fc;
   }
   
-  @BeforeClass
+  @BeforeAll
   public static void clusterSetupAtBegining()
                                     throws IOException, LoginException, URISyntaxException  {
     Configuration conf = new HdfsConfiguration();
@@ -63,7 +63,7 @@ public class TestFcHdfsPermission extends FileContextPermissionBase {
   }
 
       
-  @AfterClass
+  @AfterAll
   public static void ClusterShutdownAtEnd() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
@@ -71,13 +71,13 @@ public class TestFcHdfsPermission extends FileContextPermissionBase {
   }
   
   @Override
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     super.setUp();
   }
   
   @Override
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     super.tearDown();
   }

+ 22 - 21
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsSetUMask.java

@@ -30,12 +30,13 @@ import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.security.UserGroupInformation;
 import static org.apache.hadoop.fs.FileContextTestHelper.*;
-import org.junit.After;
-import org.junit.AfterClass;
 import org.junit.Assert;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 public class TestFcHdfsSetUMask {
   
@@ -78,7 +79,7 @@ public class TestFcHdfsSetUMask {
   private static final FsPermission WIDE_OPEN_TEST_UMASK = FsPermission
       .createImmutable((short) (0777 ^ 0777));
   
-  @BeforeClass
+  @BeforeAll
   public static void clusterSetupAtBegining()
         throws IOException, LoginException, URISyntaxException  {
     Configuration conf = new HdfsConfiguration();
@@ -91,20 +92,20 @@ public class TestFcHdfsSetUMask {
     fc.mkdir(defaultWorkingDirectory, FileContext.DEFAULT_PERM, true);
   }
 
-  @AfterClass
+  @AfterAll
   public static void ClusterShutdownAtEnd() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
     }
   }
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     fc.setUMask(WIDE_OPEN_TEST_UMASK);
     fc.mkdir(fileContextTestHelper.getTestRootPath(fc), FileContext.DEFAULT_PERM, true);
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     fc.delete(fileContextTestHelper.getTestRootPath(fc), true);
   }
@@ -194,8 +195,8 @@ public class TestFcHdfsSetUMask {
     fc.setUMask(umask);
     fc.mkdir(f, FileContext.DEFAULT_PERM, true);
     Assert.assertTrue(isDir(fc, f));
-    Assert.assertEquals("permissions on directory are wrong",  
-        expectedPerms, fc.getFileStatus(f).getPermission());
+      Assertions.assertEquals(
+              expectedPerms, fc.getFileStatus(f).getPermission(), "permissions on directory are wrong");
   }
   
   public void testMkdirRecursiveWithNonExistingDir(FsPermission umask,
@@ -205,11 +206,11 @@ public class TestFcHdfsSetUMask {
     fc.setUMask(umask);
     fc.mkdir(f, FileContext.DEFAULT_PERM, true);
     Assert.assertTrue(isDir(fc, f));
-    Assert.assertEquals("permissions on directory are wrong",  
-        expectedPerms, fc.getFileStatus(f).getPermission());
+      Assertions.assertEquals(
+              expectedPerms, fc.getFileStatus(f).getPermission(), "permissions on directory are wrong");
     Path fParent = fileContextTestHelper.getTestRootPath(fc, "NonExistant2");
-    Assert.assertEquals("permissions on parent directory are wrong",  
-        expectedParentPerms, fc.getFileStatus(fParent).getPermission());
+      Assertions.assertEquals(
+              expectedParentPerms, fc.getFileStatus(fParent).getPermission(), "permissions on parent directory are wrong");
   }
 
 
@@ -219,8 +220,8 @@ public class TestFcHdfsSetUMask {
     fc.setUMask(umask);
     createFile(fc, f);
     Assert.assertTrue(isFile(fc, f));
-    Assert.assertEquals("permissions on file are wrong",  
-        expectedPerms , fc.getFileStatus(f).getPermission());
+      Assertions.assertEquals(
+              expectedPerms, fc.getFileStatus(f).getPermission(), "permissions on file are wrong");
   }
   
   
@@ -233,10 +234,10 @@ public class TestFcHdfsSetUMask {
     fc.setUMask(umask);
     createFile(fc, f);
     Assert.assertTrue(isFile(fc, f));
-    Assert.assertEquals("permissions on file are wrong",  
-        expectedFilePerms, fc.getFileStatus(f).getPermission());
-    Assert.assertEquals("permissions on parent directory are wrong",  
-        expectedDirPerms, fc.getFileStatus(fParent).getPermission());
+      Assertions.assertEquals(
+              expectedFilePerms, fc.getFileStatus(f).getPermission(), "permissions on file are wrong");
+      Assertions.assertEquals(
+              expectedDirPerms, fc.getFileStatus(fParent).getPermission(), "permissions on parent directory are wrong");
   }
  
 }

+ 54 - 50
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java

@@ -18,7 +18,7 @@
 package org.apache.hadoop.fs;
 
 import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows;
-import static org.junit.Assert.*;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.IOException;
 import java.util.ArrayList;
@@ -26,6 +26,7 @@ import java.util.UUID;
 import java.util.regex.Pattern;
 
 import org.apache.hadoop.thirdparty.com.google.common.collect.Ordering;
+import org.junit.jupiter.api.AfterAll;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.permission.FsPermission;
@@ -34,7 +35,10 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.server.namenode.INodeId;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.junit.*;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Disabled;
+import org.junit.jupiter.api.Test;
 
 public class TestGlobPaths {
 
@@ -65,7 +69,7 @@ public class TestGlobPaths {
   static private String USER_DIR;
   private final Path[] path = new Path[NUM_OF_PATHS];
 
-  @BeforeClass
+  @BeforeAll
   public static void setUp() throws Exception {
     final Configuration conf = new HdfsConfiguration();
     dfsCluster = new MiniDFSCluster.Builder(conf).build();
@@ -81,7 +85,7 @@ public class TestGlobPaths {
     USER_DIR = fs.getHomeDirectory().toUri().getPath().toString();
   }
   
-  @AfterClass
+  @AfterAll
   public static void tearDown() throws Exception {
     if(dfsCluster!=null) {
       dfsCluster.shutdown();
@@ -102,8 +106,8 @@ public class TestGlobPaths {
     fs.createNewFile(fNormal);
     fs.createNewFile(fWithCR);
     statuses = fs.globStatus(new Path(d1, "f1*"));
-    assertEquals("Expected both normal and CR-carrying files in result: ",
-        2, statuses.length);
+      assertEquals(
+              2, statuses.length, "Expected both normal and CR-carrying files in result: ");
     cleanupDFS();
   }
 
@@ -892,14 +896,14 @@ public class TestGlobPaths {
       // Test simple glob
       FileStatus[] statuses = wrap.globStatus(new Path(USER_DIR + "/alpha/*"),
           new AcceptAllPathFilter());
-      Assert.assertEquals(1, statuses.length);
-      Assert.assertEquals(USER_DIR + "/alpha/beta", statuses[0].getPath()
+      Assertions.assertEquals(1, statuses.length);
+      Assertions.assertEquals(USER_DIR + "/alpha/beta", statuses[0].getPath()
           .toUri().getPath());
       // Test glob through symlink
       statuses = wrap.globStatus(new Path(USER_DIR + "/alphaLink/*"),
           new AcceptAllPathFilter());
-      Assert.assertEquals(1, statuses.length);
-      Assert.assertEquals(USER_DIR + "/alphaLink/beta", statuses[0].getPath()
+      Assertions.assertEquals(1, statuses.length);
+      Assertions.assertEquals(USER_DIR + "/alphaLink/beta", statuses[0].getPath()
           .toUri().getPath());
       // If the terminal path component in a globbed path is a symlink,
       // we don't dereference that link.
@@ -907,20 +911,20 @@ public class TestGlobPaths {
           + "/alphaLink/betaLink"), false);
       statuses = wrap.globStatus(new Path(USER_DIR + "/alpha/betaLi*"),
           new AcceptAllPathFilter());
-      Assert.assertEquals(1, statuses.length);
-      Assert.assertEquals(USER_DIR + "/alpha/betaLink", statuses[0].getPath()
+      Assertions.assertEquals(1, statuses.length);
+      Assertions.assertEquals(USER_DIR + "/alpha/betaLink", statuses[0].getPath()
           .toUri().getPath());
       // todo: test symlink-to-symlink-to-dir, etc.
     }
   }
 
-  @Ignore
+  @Disabled
   @Test
   public void testGlobWithSymlinksOnFS() throws Exception {
     testOnFileSystem(new TestGlobWithSymlinks(false));
   }
 
-  @Ignore
+  @Disabled
   @Test
   public void testGlobWithSymlinksOnFC() throws Exception {
     testOnFileContext(new TestGlobWithSymlinks(true));
@@ -951,20 +955,20 @@ public class TestGlobPaths {
       // Test glob through symlink to a symlink to a directory
       FileStatus statuses[] = wrap.globStatus(new Path(USER_DIR
           + "/alphaLinkLink"), new AcceptAllPathFilter());
-      Assert.assertEquals(1, statuses.length);
-      Assert.assertEquals(USER_DIR + "/alphaLinkLink", statuses[0].getPath()
+      Assertions.assertEquals(1, statuses.length);
+      Assertions.assertEquals(USER_DIR + "/alphaLinkLink", statuses[0].getPath()
           .toUri().getPath());
       statuses = wrap.globStatus(new Path(USER_DIR + "/alphaLinkLink/*"),
           new AcceptAllPathFilter());
-      Assert.assertEquals(1, statuses.length);
-      Assert.assertEquals(USER_DIR + "/alphaLinkLink/beta", statuses[0]
+      Assertions.assertEquals(1, statuses.length);
+      Assertions.assertEquals(USER_DIR + "/alphaLinkLink/beta", statuses[0]
           .getPath().toUri().getPath());
       // Test glob of dangling symlink (theta does not actually exist)
       wrap.createSymlink(new Path(USER_DIR + "theta"), new Path(USER_DIR
           + "/alpha/kappa"), false);
       statuses = wrap.globStatus(new Path(USER_DIR + "/alpha/kappa/kappa"),
           new AcceptAllPathFilter());
-      Assert.assertNull(statuses);
+      Assertions.assertNull(statuses);
       // Test glob of symlinks
       wrap.createFile(USER_DIR + "/alpha/beta/gamma");
       wrap.createSymlink(new Path(USER_DIR + "gamma"), new Path(USER_DIR
@@ -975,8 +979,8 @@ public class TestGlobPaths {
           USER_DIR + "/alpha/beta/gammaLinkLinkLink"), false);
       statuses = wrap.globStatus(new Path(USER_DIR
           + "/alpha/*/gammaLinkLinkLink"), new AcceptAllPathFilter());
-      Assert.assertEquals(1, statuses.length);
-      Assert.assertEquals(USER_DIR + "/alpha/beta/gammaLinkLinkLink",
+      Assertions.assertEquals(1, statuses.length);
+      Assertions.assertEquals(USER_DIR + "/alpha/beta/gammaLinkLinkLink",
           statuses[0].getPath().toUri().getPath());
       statuses = wrap.globStatus(new Path(USER_DIR + "/alpha/beta/*"),
           new AcceptAllPathFilter());
@@ -992,17 +996,17 @@ public class TestGlobPaths {
       statuses = wrap.globStatus(
           new Path(USER_DIR + "/tweedledee/unobtainium"),
           new AcceptAllPathFilter());
-      Assert.assertNull(statuses);
+      Assertions.assertNull(statuses);
     }
   }
 
-  @Ignore
+  @Disabled
   @Test
   public void testGlobWithSymlinksToSymlinksOnFS() throws Exception {
     testOnFileSystem(new TestGlobWithSymlinksToSymlinks(false));
   }
 
-  @Ignore
+  @Disabled
   @Test
   public void testGlobWithSymlinksToSymlinksOnFC() throws Exception {
     testOnFileContext(new TestGlobWithSymlinksToSymlinks(true));
@@ -1032,11 +1036,11 @@ public class TestGlobPaths {
       // PathFilter
       FileStatus statuses[] = wrap.globStatus(
           new Path(USER_DIR + "/alpha/beta"), new AcceptPathsEndingInZ());
-      Assert.assertNull(statuses);
+      Assertions.assertNull(statuses);
       statuses = wrap.globStatus(new Path(USER_DIR + "/alphaLinkz/betaz"),
           new AcceptPathsEndingInZ());
-      Assert.assertEquals(1, statuses.length);
-      Assert.assertEquals(USER_DIR + "/alphaLinkz/betaz", statuses[0].getPath()
+      Assertions.assertEquals(1, statuses.length);
+      Assertions.assertEquals(USER_DIR + "/alphaLinkz/betaz", statuses[0].getPath()
           .toUri().getPath());
       statuses = wrap.globStatus(new Path(USER_DIR + "/*/*"),
           new AcceptPathsEndingInZ());
@@ -1050,13 +1054,13 @@ public class TestGlobPaths {
     }
   }
 
-  @Ignore
+  @Disabled
   @Test
   public void testGlobSymlinksWithCustomPathFilterOnFS() throws Exception {
     testOnFileSystem(new TestGlobSymlinksWithCustomPathFilter(false));
   }
 
-  @Ignore
+  @Disabled
   @Test
   public void testGlobSymlinksWithCustomPathFilterOnFC() throws Exception {
     testOnFileContext(new TestGlobSymlinksWithCustomPathFilter(true));
@@ -1078,22 +1082,22 @@ public class TestGlobPaths {
           + "/alphaLink"), false);
       FileStatus statuses[] = wrap.globStatus(
           new Path(USER_DIR + "/alphaLink"), new AcceptAllPathFilter());
-      Assert.assertEquals(1, statuses.length);
+      Assertions.assertEquals(1, statuses.length);
       Path path = statuses[0].getPath();
-      Assert.assertEquals(USER_DIR + "/alpha", path.toUri().getPath());
-      Assert.assertEquals("hdfs", path.toUri().getScheme());
+      Assertions.assertEquals(USER_DIR + "/alpha", path.toUri().getPath());
+      Assertions.assertEquals("hdfs", path.toUri().getScheme());
 
       // FileContext can list a file:/// URI.
       // Since everyone should have the root directory, we list that.
       statuses = fc.util().globStatus(new Path("file:///"),
           new AcceptAllPathFilter());
-      Assert.assertEquals(1, statuses.length);
+      Assertions.assertEquals(1, statuses.length);
       Path filePath = statuses[0].getPath();
-      Assert.assertEquals("file", filePath.toUri().getScheme());
-      Assert.assertEquals("/", filePath.toUri().getPath());
+      Assertions.assertEquals("file", filePath.toUri().getScheme());
+      Assertions.assertEquals("/", filePath.toUri().getPath());
 
       // The FileSystem should have scheme 'hdfs'
-      Assert.assertEquals("hdfs", fs.getScheme());
+      Assertions.assertEquals("hdfs", fs.getScheme());
     }
   }
 
@@ -1176,7 +1180,7 @@ public class TestGlobPaths {
       try {
         wrap.globStatus(new Path("/no*/*"),
             new AcceptAllPathFilter());
-        Assert.fail("expected to get an AccessControlException when " +
+        Assertions.fail("expected to get an AccessControlException when " +
             "globbing through a directory we don't have permissions " +
             "to list.");
       } catch (AccessControlException ioe) {
@@ -1240,8 +1244,8 @@ public class TestGlobPaths {
       privWrap.setOwner(new Path("/"), newOwner, null);
       FileStatus[] status = 
           wrap.globStatus(rootPath, new AcceptAllPathFilter());
-      Assert.assertEquals(1, status.length);
-      Assert.assertEquals(newOwner, status[0].getOwner());
+      Assertions.assertEquals(1, status.length);
+      Assertions.assertEquals(newOwner, status[0].getOwner());
       privWrap.setOwner(new Path("/"), oldRootStatus.getOwner(), null);
     }
   }
@@ -1273,8 +1277,8 @@ public class TestGlobPaths {
         FileStatus[] statuses =
             wrap.globStatus(new Path("/filed*/alpha"),
                   new AcceptAllPathFilter());
-        Assert.assertEquals(1, statuses.length);
-        Assert.assertEquals("/filed_away/alpha", statuses[0].getPath()
+        Assertions.assertEquals(1, statuses.length);
+        Assertions.assertEquals("/filed_away/alpha", statuses[0].getPath()
             .toUri().getPath());
         privWrap.mkdir(new Path("/filed_away/alphabet"),
             new FsPermission((short)0777), true);
@@ -1282,8 +1286,8 @@ public class TestGlobPaths {
             new FsPermission((short)0777), true);
         statuses = wrap.globStatus(new Path("/filed*/alph*/*b*"),
                   new AcceptAllPathFilter());
-        Assert.assertEquals(1, statuses.length);
-        Assert.assertEquals("/filed_away/alphabet/abc", statuses[0].getPath()
+        Assertions.assertEquals(1, statuses.length);
+        Assertions.assertEquals("/filed_away/alphabet/abc", statuses[0].getPath()
             .toUri().getPath());
       } finally {
         privWrap.delete(new Path("/filed"), true);
@@ -1308,12 +1312,12 @@ public class TestGlobPaths {
     FileSystem fs = FileSystem.getLocal(conf);
     String localTmp = System.getProperty("java.io.tmpdir");
     Path base = new Path(new Path(localTmp), UUID.randomUUID().toString());
-    Assert.assertTrue(fs.mkdirs(base));
-    Assert.assertTrue(fs.mkdirs(new Path(base, "e")));
-    Assert.assertTrue(fs.mkdirs(new Path(base, "c")));
-    Assert.assertTrue(fs.mkdirs(new Path(base, "a")));
-    Assert.assertTrue(fs.mkdirs(new Path(base, "d")));
-    Assert.assertTrue(fs.mkdirs(new Path(base, "b")));
+    Assertions.assertTrue(fs.mkdirs(base));
+    Assertions.assertTrue(fs.mkdirs(new Path(base, "e")));
+    Assertions.assertTrue(fs.mkdirs(new Path(base, "c")));
+    Assertions.assertTrue(fs.mkdirs(new Path(base, "a")));
+    Assertions.assertTrue(fs.mkdirs(new Path(base, "d")));
+    Assertions.assertTrue(fs.mkdirs(new Path(base, "b")));
     fs.deleteOnExit(base);
     FileStatus[] status = fs.globStatus(new Path(base, "*"));
     ArrayList list = new ArrayList();
@@ -1321,7 +1325,7 @@ public class TestGlobPaths {
         list.add(f.getPath().toString());
     }
     boolean sorted = Ordering.natural().isOrdered(list);
-    Assert.assertTrue(sorted);
+    Assertions.assertTrue(sorted);
   }
 }
 

+ 23 - 22
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java

@@ -19,7 +19,7 @@
 package org.apache.hadoop.fs;
 
 import static org.apache.hadoop.fs.FileContextTestHelper.exists;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.fail;
 
 import java.io.IOException;
 import java.net.URI;
@@ -36,12 +36,13 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.junit.After;
-import org.junit.AfterClass;
 import org.junit.Assert;
-import org.junit.Before;
-import org.junit.BeforeClass;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
 
 public class TestHDFSFileContextMainOperations extends
     FileContextMainOperationsBaseTest {
@@ -54,7 +55,7 @@ public class TestHDFSFileContextMainOperations extends
     return new FileContextTestHelper("/tmp/TestHDFSFileContextMainOperations");
   }
 
-  @BeforeClass
+  @BeforeAll
   public static void clusterSetupAtBegining() throws IOException,
       LoginException, URISyntaxException {
     cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(2).build();
@@ -80,7 +81,7 @@ public class TestHDFSFileContextMainOperations extends
     fc.mkdir(defaultWorkingDirectory, FileContext.DEFAULT_PERM, true);
   }
       
-  @AfterClass
+  @AfterAll
   public static void ClusterShutdownAtEnd() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
@@ -89,13 +90,13 @@ public class TestHDFSFileContextMainOperations extends
   }
   
   @Override
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     super.setUp();
   }
   
   @Override
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     super.tearDown();
   }
@@ -134,16 +135,16 @@ public class TestHDFSFileContextMainOperations extends
 
     boolean isReady = fc.truncate(file, newLength);
 
-    Assert.assertTrue("Recovery is not expected.", isReady);
+      Assertions.assertTrue(isReady, "Recovery is not expected.");
 
     FileStatus fileStatus = fc.getFileStatus(file);
-    Assert.assertEquals(fileStatus.getLen(), newLength);
+    Assertions.assertEquals(fileStatus.getLen(), newLength);
     AppendTestUtil.checkFullFile(fs, file, newLength, data, file.toString());
 
     ContentSummary cs = fs.getContentSummary(dir);
-    Assert.assertEquals("Bad disk space usage", cs.getSpaceConsumed(),
-        newLength * repl);
-    Assert.assertTrue(fs.delete(dir, true));
+      Assertions.assertEquals(cs.getSpaceConsumed(),
+              newLength * repl, "Bad disk space usage");
+    Assertions.assertTrue(fs.delete(dir, true));
   }
 
   @Test
@@ -280,8 +281,8 @@ public class TestHDFSFileContextMainOperations extends
     fs = cluster.getFileSystem();
     src1 = getTestRootPath(fc, "testEditsLogOldRename/srcdir/src1");
     dst1 = getTestRootPath(fc, "testEditsLogOldRename/dstdir/dst1");
-    Assert.assertFalse(fs.exists(src1));   // ensure src1 is already renamed
-    Assert.assertTrue(fs.exists(dst1));    // ensure rename dst exists
+    Assertions.assertFalse(fs.exists(src1));   // ensure src1 is already renamed
+    Assertions.assertTrue(fs.exists(dst1));    // ensure rename dst exists
   }
   
   /**
@@ -309,8 +310,8 @@ public class TestHDFSFileContextMainOperations extends
     fs = cluster.getFileSystem();
     src1 = getTestRootPath(fc, "testEditsLogRename/srcdir/src1");
     dst1 = getTestRootPath(fc, "testEditsLogRename/dstdir/dst1");
-    Assert.assertFalse(fs.exists(src1));   // ensure src1 is already renamed
-    Assert.assertTrue(fs.exists(dst1));    // ensure rename dst exists
+    Assertions.assertFalse(fs.exists(src1));   // ensure src1 is already renamed
+    Assertions.assertTrue(fs.exists(dst1));    // ensure rename dst exists
   }
 
   @Test
@@ -323,8 +324,8 @@ public class TestHDFSFileContextMainOperations extends
     };
 
     for (String invalidName: invalidNames) {
-      Assert.assertFalse(invalidName + " is not valid",
-        fc.getDefaultFileSystem().isValidName(invalidName));
+        Assert.assertFalse(
+                fc.getDefaultFileSystem().isValidName(invalidName), invalidName + " is not valid");
     }
   }
 
@@ -332,9 +333,9 @@ public class TestHDFSFileContextMainOperations extends
       boolean exception) throws Exception {
     DistributedFileSystem fs = cluster.getFileSystem();
     try {
-      Assert.assertEquals(renameSucceeds, fs.rename(src, dst));
+      Assertions.assertEquals(renameSucceeds, fs.rename(src, dst));
     } catch (Exception ex) {
-      Assert.assertTrue(exception);
+      Assertions.assertTrue(exception);
     }
     Assert.assertEquals(renameSucceeds, !exists(fc, src));
     Assert.assertEquals(renameSucceeds, exists(fc, dst));

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHdfsNativeCodeLoader.java

@@ -17,12 +17,12 @@
  */
 package org.apache.hadoop.fs;
 
-import org.junit.Test;
-import static org.junit.Assert.*;
+import static org.junit.jupiter.api.Assertions.fail;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.util.NativeCodeLoader;
+import org.junit.jupiter.api.Test;
 
 public class TestHdfsNativeCodeLoader {
   static final Logger LOG =

+ 10 - 10
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestResolveHdfsSymlink.java

@@ -19,14 +19,14 @@
 package org.apache.hadoop.fs;
 
 import java.io.File;
-import static org.junit.Assert.fail;
-
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.EnumSet;
 import java.util.List;
 import java.util.Set;
 
+import static org.junit.jupiter.api.Assertions.fail;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -39,10 +39,10 @@ import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
 import org.apache.hadoop.test.PathUtils;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 
 /**
  * Tests whether FileContext can resolve an hdfs path that has a symlink to
@@ -53,7 +53,7 @@ public class TestResolveHdfsSymlink {
   private static final FileContextTestHelper helper = new FileContextTestHelper();
   private static MiniDFSCluster cluster = null;
 
-  @BeforeClass
+  @BeforeAll
   public static void setUp() throws IOException {
     Configuration conf = new HdfsConfiguration();
     conf.setBoolean(
@@ -63,7 +63,7 @@ public class TestResolveHdfsSymlink {
 
   }
 
-  @AfterClass
+  @AfterAll
   public static void tearDown() {
     if (cluster != null) {
       cluster.shutdown();
@@ -100,11 +100,11 @@ public class TestResolveHdfsSymlink {
 
     Set<AbstractFileSystem> afsList = fcHdfs
         .resolveAbstractFileSystems(alphaHdfsPathViaLink);
-    Assert.assertEquals(2, afsList.size());
+    Assertions.assertEquals(2, afsList.size());
     for (AbstractFileSystem afs : afsList) {
       if ((!afs.equals(fcHdfs.getDefaultFileSystem()))
           && (!afs.equals(fcLocal.getDefaultFileSystem()))) {
-        Assert.fail("Failed to resolve AFS correctly");
+        Assertions.fail("Failed to resolve AFS correctly");
       }
     }
   }

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSWebHdfsFileContextMainOperations.java

@@ -25,8 +25,8 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
 import org.apache.hadoop.security.ssl.SSLFactory;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 import javax.security.auth.login.LoginException;
 import java.io.File;
@@ -57,7 +57,7 @@ public class TestSWebHdfsFileContextMainOperations
   protected static final byte[] data = getFileData(numBlocks,
       getDefaultBlockSize());
 
-  @BeforeClass
+  @BeforeAll
   public static void clusterSetupAtBeginning()
       throws IOException, LoginException, URISyntaxException {
 
@@ -104,7 +104,7 @@ public class TestSWebHdfsFileContextMainOperations
     return webhdfsUrl;
   }
 
-  @AfterClass
+  @AfterAll
   public static void ClusterShutdownAtEnd() throws Exception {
     if (cluster != null) {
       cluster.shutdown();

+ 6 - 6
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfs.java

@@ -17,8 +17,8 @@
  */
 package org.apache.hadoop.fs;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.fail;
 
 import java.io.IOException;
 import java.net.URI;
@@ -38,9 +38,9 @@ import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
 import org.apache.hadoop.hdfs.web.WebHdfsTestUtil;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 import org.slf4j.event.Level;
 
 /**
@@ -84,7 +84,7 @@ abstract public class TestSymlinkHdfs extends SymlinkBaseTest {
     return e;
   }
 
-  @BeforeClass
+  @BeforeAll
   public static void beforeClassSetup() throws Exception {
     Configuration conf = new HdfsConfiguration();
     conf.set(FsPermission.UMASK_LABEL, "000");
@@ -94,7 +94,7 @@ abstract public class TestSymlinkHdfs extends SymlinkBaseTest {
     dfs = cluster.getFileSystem();
   }
 
-  @AfterClass
+  @AfterAll
   public static void afterClassTeardown() throws Exception {
     if (cluster != null) {
       cluster.shutdown();

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfsDisable.java

@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.fs;
 
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.fail;
 
 import java.io.IOException;
 

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfsFileContext.java

@@ -17,18 +17,18 @@
  */
 package org.apache.hadoop.fs;
 
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.fail;
 
 import java.io.IOException;
 
-import org.junit.BeforeClass;
 import org.junit.Test;
+import org.junit.jupiter.api.BeforeAll;
 
 public class TestSymlinkHdfsFileContext extends TestSymlinkHdfs {
 
   private static FileContext fc;
 
-  @BeforeClass
+  @BeforeAll
   public static void testSetup() throws Exception {
     fc = FileContext.getFileContext(cluster.getURI(0));
     wrapper = new FileContextTestWrapper(fc, "/tmp/TestSymlinkHdfsFileContext");

+ 8 - 9
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfsFileSystem.java

@@ -17,28 +17,27 @@
  */
 package org.apache.hadoop.fs;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.io.IOException;
 
-import org.junit.BeforeClass;
-import org.junit.Ignore;
 import org.junit.Test;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Disabled;
 
 public class TestSymlinkHdfsFileSystem extends TestSymlinkHdfs {
 
-  @BeforeClass
+  @BeforeAll
   public static void testSetup() throws Exception {
     wrapper = new FileSystemTestWrapper(dfs, "/tmp/TestSymlinkHdfsFileSystem");
   }
 
   @Override
-  @Ignore("FileSystem adds missing authority in absolute URIs")
+  @Disabled("FileSystem adds missing authority in absolute URIs")
   @Test(timeout=10000)
   public void testCreateWithPartQualPathFails() throws IOException {}
 
-  @Ignore("FileSystem#create creates parent directories," +
+  @Disabled("FileSystem#create creates parent directories," +
       " so dangling links to directories are created")
   @Override
   @Test(timeout=10000)
@@ -56,7 +55,7 @@ public class TestSymlinkHdfsFileSystem extends TestSymlinkHdfs {
     wrapper.createSymlink(file, link, false);
     // Attempt recoverLease through a symlink
     boolean closed = dfs.recoverLease(link);
-    assertTrue("Expected recoverLease to return true", closed);
+      assertTrue(closed, "Expected recoverLease to return true");
   }
 
   @Test(timeout=10000)
@@ -69,7 +68,7 @@ public class TestSymlinkHdfsFileSystem extends TestSymlinkHdfs {
     wrapper.createSymlink(file, link, false);
     // Attempt recoverLease through a symlink
     boolean closed = dfs.isFileClosed(link);
-    assertTrue("Expected isFileClosed to return true", closed);
+      assertTrue(closed, "Expected isFileClosed to return true");
   }
 
   @Test(timeout=10000)

+ 7 - 7
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUnbuffer.java

@@ -26,9 +26,9 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.PeerCache;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.io.IOUtils;
-import org.junit.Assert;
 import org.junit.Rule;
-import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
 import org.junit.rules.ExpectedException;
 import org.mockito.Mockito;
 
@@ -72,19 +72,19 @@ public class TestUnbuffer {
       // Read a byte.  This will trigger the creation of a block reader.
       stream.seek(2);
       int b = stream.read();
-      Assert.assertTrue(-1 != b);
+      Assertions.assertTrue(-1 != b);
 
       // The Peer cache should start off empty.
       PeerCache cache = dfs.getClient().getClientContext().getPeerCache();
-      Assert.assertEquals(0, cache.size());
+      Assertions.assertEquals(0, cache.size());
 
       // Unbuffer should clear the block reader and return the socket to the
       // cache.
       stream.unbuffer();
       stream.seek(2);
-      Assert.assertEquals(1, cache.size());
+      Assertions.assertEquals(1, cache.size());
       int b2 = stream.read();
-      Assert.assertEquals(b, b2);
+      Assertions.assertEquals(b, b2);
     } finally {
       if (stream != null) {
         IOUtils.cleanupWithLogger(null, stream);
@@ -117,7 +117,7 @@ public class TestUnbuffer {
       for (int i = 0; i < NUM_OPENS; i++) {
         streams[i] = dfs.open(TEST_PATH);
         LOG.info("opening file " + i + "...");
-        Assert.assertTrue(-1 != streams[i].read());
+        Assertions.assertTrue(-1 != streams[i].read());
         streams[i].unbuffer();
       }
     } finally {

+ 10 - 12
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUrlStreamHandler.java

@@ -17,9 +17,7 @@
  */
 package org.apache.hadoop.fs;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.File;
 import java.io.IOException;
@@ -34,8 +32,8 @@ import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.test.PathUtils;
 
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 
 /**
  * Test of the URL stream handler.
@@ -48,7 +46,7 @@ public class TestUrlStreamHandler {
   private static final FsUrlStreamHandlerFactory HANDLER_FACTORY
       = new FsUrlStreamHandlerFactory();
 
-  @BeforeClass
+  @BeforeAll
   public static void setupHandler() {
 
     // Setup our own factory
@@ -166,20 +164,20 @@ public class TestUrlStreamHandler {
 
   @Test
   public void testHttpDefaultHandler() throws Throwable {
-    assertNull("Handler for HTTP is the Hadoop one",
-        HANDLER_FACTORY.createURLStreamHandler("http"));
+      assertNull(
+              HANDLER_FACTORY.createURLStreamHandler("http"), "Handler for HTTP is the Hadoop one");
   }
 
   @Test
   public void testHttpsDefaultHandler() throws Throwable {
-    assertNull("Handler for HTTPS is the Hadoop one",
-        HANDLER_FACTORY.createURLStreamHandler("https"));
+      assertNull(
+              HANDLER_FACTORY.createURLStreamHandler("https"), "Handler for HTTPS is the Hadoop one");
   }
 
   @Test
   public void testUnknownProtocol() throws Throwable {
-    assertNull("Unknown protocols are not handled",
-        HANDLER_FACTORY.createURLStreamHandler("gopher"));
+      assertNull(
+              HANDLER_FACTORY.createURLStreamHandler("gopher"), "Unknown protocols are not handled");
   }
 
 }

+ 9 - 9
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestWebHdfsFileContextMainOperations.java

@@ -24,10 +24,10 @@ import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 import javax.security.auth.login.LoginException;
 import java.io.IOException;
@@ -38,8 +38,8 @@ import java.util.EnumSet;
 import static org.apache.hadoop.fs.CreateFlag.CREATE;
 import static org.apache.hadoop.fs.FileContextTestHelper.getDefaultBlockSize;
 import static org.apache.hadoop.fs.FileContextTestHelper.getFileData;
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertArrayEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 /**
  * Test of FileContext apis on Webhdfs.
@@ -71,7 +71,7 @@ public class TestWebHdfsFileContextMainOperations
     return webhdfsUrl;
   }
 
-  @BeforeClass
+  @BeforeAll
   public static void clusterSetupAtBeginning()
       throws IOException, LoginException, URISyntaxException {
 
@@ -85,7 +85,7 @@ public class TestWebHdfsFileContextMainOperations
     fc.mkdir(defaultWorkingDirectory, FileContext.DEFAULT_PERM, true);
   }
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     URI webhdfsUrlReal = getWebhdfsUrl();
     Path testBuildData = new Path(
@@ -153,7 +153,7 @@ public class TestWebHdfsFileContextMainOperations
     assertArrayEquals(data, bb);
   }
 
-  @AfterClass
+  @AfterAll
   public static void ClusterShutdownAtEnd() throws Exception {
     if (cluster != null) {
       cluster.shutdown();

+ 5 - 5
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/HDFSContract.java

@@ -27,7 +27,7 @@ import org.apache.hadoop.fs.contract.ContractOptions;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.junit.Assert;
+import org.junit.jupiter.api.Assertions;
 
 import java.io.IOException;
 
@@ -73,14 +73,14 @@ public class HDFSContract extends AbstractFSContract {
   @Override
   public void init() throws IOException {
     super.init();
-    Assert.assertTrue("contract options not loaded",
-                      isSupported(ContractOptions.IS_CASE_SENSITIVE, false));
+      Assertions.assertTrue(
+              isSupported(ContractOptions.IS_CASE_SENSITIVE, false), "contract options not loaded");
   }
 
   @Override
   public FileSystem getTestFileSystem() throws IOException {
-    //assumes cluster is not null
-    Assert.assertNotNull("cluster not created", cluster);
+      //assumes cluster is not null
+      Assertions.assertNotNull(cluster, "cluster not created");
     return cluster.getFileSystem();
   }
 

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractAppend.java

@@ -17,19 +17,19 @@ package org.apache.hadoop.fs.contract.hdfs;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractAppendTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 import java.io.IOException;
 
 public class TestHDFSContractAppend extends AbstractContractAppendTest {
 
-  @BeforeClass
+  @BeforeAll
   public static void createCluster() throws IOException {
     HDFSContract.createCluster();
   }
 
-  @AfterClass
+  @AfterAll
   public static void teardownCluster() throws IOException {
     HDFSContract.destroyCluster();
   }

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractConcat.java

@@ -21,8 +21,8 @@ package org.apache.hadoop.fs.contract.hdfs;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractConcatTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 import java.io.IOException;
 
@@ -31,14 +31,14 @@ import java.io.IOException;
  */
 public class TestHDFSContractConcat extends AbstractContractConcatTest {
 
-  @BeforeClass
+  @BeforeAll
   public static void createCluster() throws IOException {
     HDFSContract.createCluster();
     // perform a simple operation on the cluster to verify it is up
     HDFSContract.getCluster().getFileSystem().getDefaultBlockSize();
   }
 
-  @AfterClass
+  @AfterAll
   public static void teardownCluster() throws IOException {
     HDFSContract.destroyCluster();
   }

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractCreate.java

@@ -21,19 +21,19 @@ package org.apache.hadoop.fs.contract.hdfs;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractCreateTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 import java.io.IOException;
 
 public class TestHDFSContractCreate extends AbstractContractCreateTest {
 
-  @BeforeClass
+  @BeforeAll
   public static void createCluster() throws IOException {
     HDFSContract.createCluster();
   }
 
-  @AfterClass
+  @AfterAll
   public static void teardownCluster() throws IOException {
     HDFSContract.destroyCluster();
   }

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractDelete.java

@@ -21,8 +21,8 @@ package org.apache.hadoop.fs.contract.hdfs;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractDeleteTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 import java.io.IOException;
 
@@ -31,12 +31,12 @@ import java.io.IOException;
  */
 public class TestHDFSContractDelete extends AbstractContractDeleteTest {
 
-  @BeforeClass
+  @BeforeAll
   public static void createCluster() throws IOException {
     HDFSContract.createCluster();
   }
 
-  @AfterClass
+  @AfterAll
   public static void teardownCluster() throws IOException {
     HDFSContract.destroyCluster();
   }

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractGetFileStatus.java

@@ -21,20 +21,20 @@ package org.apache.hadoop.fs.contract.hdfs;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractGetFileStatusTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 import java.io.IOException;
 
 public class TestHDFSContractGetFileStatus extends
     AbstractContractGetFileStatusTest {
 
-  @BeforeClass
+  @BeforeAll
   public static void createCluster() throws IOException {
     HDFSContract.createCluster();
   }
 
-  @AfterClass
+  @AfterAll
   public static void teardownCluster() throws IOException {
     HDFSContract.destroyCluster();
   }

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractMkdir.java

@@ -21,8 +21,8 @@ package org.apache.hadoop.fs.contract.hdfs;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractMkdirTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 import java.io.IOException;
 
@@ -31,12 +31,12 @@ import java.io.IOException;
  */
 public class TestHDFSContractMkdir extends AbstractContractMkdirTest {
 
-  @BeforeClass
+  @BeforeAll
   public static void createCluster() throws IOException {
     HDFSContract.createCluster();
   }
 
-  @AfterClass
+  @AfterAll
   public static void teardownCluster() throws IOException {
     HDFSContract.destroyCluster();
   }

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractMultipartUploader.java

@@ -19,14 +19,14 @@ package org.apache.hadoop.fs.contract.hdfs;
 
 import java.io.IOException;
 
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.BeforeAll;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractMultipartUploaderTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
+import org.junit.jupiter.api.AfterAll;
 
 /**
  * Test MultipartUploader tests on HDFS.
@@ -37,12 +37,12 @@ public class TestHDFSContractMultipartUploader extends
   protected static final Logger LOG =
       LoggerFactory.getLogger(TestHDFSContractMultipartUploader.class);
 
-  @BeforeClass
+  @BeforeAll
   public static void createCluster() throws IOException {
     HDFSContract.createCluster();
   }
 
-  @AfterClass
+  @AfterAll
   public static void teardownCluster() throws IOException {
     HDFSContract.destroyCluster();
   }

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractOpen.java

@@ -21,8 +21,8 @@ package org.apache.hadoop.fs.contract.hdfs;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractOpenTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 import java.io.IOException;
 
@@ -31,12 +31,12 @@ import java.io.IOException;
  */
 public class TestHDFSContractOpen extends AbstractContractOpenTest {
 
-  @BeforeClass
+  @BeforeAll
   public static void createCluster() throws IOException {
     HDFSContract.createCluster();
   }
 
-  @AfterClass
+  @AfterAll
   public static void teardownCluster() throws IOException {
     HDFSContract.destroyCluster();
   }

+ 7 - 5
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractPathHandle.java

@@ -21,11 +21,13 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Options;
 import org.apache.hadoop.fs.contract.AbstractContractPathHandleTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 import java.io.IOException;
 
+import static org.junit.jupiter.api.Assertions.super;
+
 /**
  * Verify HDFS compliance with {@link org.apache.hadoop.fs.PathHandle}
  * semantics.
@@ -35,15 +37,15 @@ public class TestHDFSContractPathHandle
 
   public TestHDFSContractPathHandle(String testname, Options.HandleOpt[] opts,
       boolean serialized) {
-    super(testname, opts, serialized);
+      super(opts, serialized, testname);
   }
 
-  @BeforeClass
+  @BeforeAll
   public static void createCluster() throws IOException {
     HDFSContract.createCluster();
   }
 
-  @AfterClass
+  @AfterAll
   public static void teardownCluster() throws IOException {
     HDFSContract.destroyCluster();
   }

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractRename.java

@@ -21,19 +21,19 @@ package org.apache.hadoop.fs.contract.hdfs;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractRenameTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 import java.io.IOException;
 
 public class TestHDFSContractRename extends AbstractContractRenameTest {
 
-  @BeforeClass
+  @BeforeAll
   public static void createCluster() throws IOException {
     HDFSContract.createCluster();
   }
 
-  @AfterClass
+  @AfterAll
   public static void teardownCluster() throws IOException {
     HDFSContract.destroyCluster();
   }

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractRootDirectory.java

@@ -21,8 +21,8 @@ package org.apache.hadoop.fs.contract.hdfs;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractRootDirectoryTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 import java.io.IOException;
 
@@ -32,12 +32,12 @@ import java.io.IOException;
 public class TestHDFSContractRootDirectory extends
     AbstractContractRootDirectoryTest {
 
-  @BeforeClass
+  @BeforeAll
   public static void createCluster() throws IOException {
     HDFSContract.createCluster();
   }
 
-  @AfterClass
+  @AfterAll
   public static void teardownCluster() throws IOException {
     HDFSContract.destroyCluster();
   }

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractSeek.java

@@ -21,8 +21,8 @@ package org.apache.hadoop.fs.contract.hdfs;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractSeekTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 import java.io.IOException;
 
@@ -31,12 +31,12 @@ import java.io.IOException;
  */
 public class TestHDFSContractSeek extends AbstractContractSeekTest {
 
-  @BeforeClass
+  @BeforeAll
   public static void createCluster() throws IOException {
     HDFSContract.createCluster();
   }
 
-  @AfterClass
+  @AfterAll
   public static void teardownCluster() throws IOException {
     HDFSContract.destroyCluster();
   }

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractSetTimes.java

@@ -21,19 +21,19 @@ package org.apache.hadoop.fs.contract.hdfs;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractSetTimesTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 import java.io.IOException;
 
 public class TestHDFSContractSetTimes extends AbstractContractSetTimesTest {
 
-  @BeforeClass
+  @BeforeAll
   public static void createCluster() throws IOException {
     HDFSContract.createCluster();
   }
 
-  @AfterClass
+  @AfterAll
   public static void teardownCluster() throws IOException {
     HDFSContract.destroyCluster();
   }

+ 4 - 5
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/contract/hdfs/TestHDFSContractUnbuffer.java

@@ -21,20 +21,19 @@ package org.apache.hadoop.fs.contract.hdfs;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.contract.AbstractContractUnbufferTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
-
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
 
 import java.io.IOException;
 
 public class TestHDFSContractUnbuffer extends AbstractContractUnbufferTest {
 
-  @BeforeClass
+  @BeforeAll
   public static void createCluster() throws IOException {
     HDFSContract.createCluster();
   }
 
-  @AfterClass
+  @AfterAll
   public static void teardownCluster() throws IOException {
     HDFSContract.destroyCluster();
   }

+ 3 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/loadGenerator/TestLoadGenerator.java

@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.fs.loadGenerator;
 
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 import java.io.BufferedReader;
 import java.io.File;
@@ -33,7 +33,8 @@ import org.apache.hadoop.test.PathUtils;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
+
 /**
  * This class tests if a balancer schedules tasks correctly.
  */

+ 8 - 11
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/permission/TestStickyBit.java

@@ -22,10 +22,7 @@ import static org.apache.hadoop.fs.permission.AclEntryScope.DEFAULT;
 import static org.apache.hadoop.fs.permission.AclEntryType.USER;
 import static org.apache.hadoop.fs.permission.FsAction.ALL;
 import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.aclEntry;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.IOException;
 import java.util.Arrays;
@@ -45,10 +42,10 @@ import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -66,7 +63,7 @@ public class TestStickyBit {
   private static FileSystem hdfsAsUser1;
   private static FileSystem hdfsAsUser2;
 
-  @BeforeClass
+  @BeforeAll
   public static void init() throws Exception {
     conf = new HdfsConfiguration();
     conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true);
@@ -85,7 +82,7 @@ public class TestStickyBit {
     assertTrue(hdfsAsUser2 instanceof DistributedFileSystem);
   }
 
-  @Before
+  @BeforeEach
   public void setup() throws Exception {
     if (hdfs != null) {
       for (FileStatus stat: hdfs.listStatus(new Path("/"))) {
@@ -94,7 +91,7 @@ public class TestStickyBit {
     }
   }
 
-  @AfterClass
+  @AfterAll
   public static void shutdown() throws Exception {
     IOUtils.cleanupWithLogger(null, hdfs, hdfsAsUser1, hdfsAsUser2);
     if (cluster != null) {

+ 7 - 7
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/shell/TestHdfsTextCommand.java

@@ -18,8 +18,8 @@
 
 package org.apache.hadoop.fs.shell;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.io.IOException;
 import java.io.InputStream;
@@ -33,9 +33,9 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 
 /**
@@ -48,7 +48,7 @@ public class TestHdfsTextCommand {
   private static MiniDFSCluster cluster;
   private static FileSystem fs;
   
-  @Before
+  @BeforeEach
     public void setUp() throws IOException{
     Configuration conf = new HdfsConfiguration();
     cluster = new MiniDFSCluster.Builder(conf).build();
@@ -56,7 +56,7 @@ public class TestHdfsTextCommand {
     fs = cluster.getFileSystem();
   }
 
-  @After
+  @AfterEach
     public void tearDown() throws IOException{
     if(fs != null){
       fs.close();

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestNNStartupWhenViewFSOverloadSchemeEnabled.java

@@ -24,9 +24,9 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
-import org.junit.After;
-import org.junit.BeforeClass;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeAll;
 
 /**
  * Tests that the NN startup is successful with ViewFSOverloadScheme.
@@ -37,7 +37,7 @@ public class TestNNStartupWhenViewFSOverloadSchemeEnabled {
   private static final String HDFS_SCHEME = "hdfs";
   private static final Configuration CONF = new Configuration();
 
-  @BeforeClass
+  @BeforeAll
   public static void setUp() {
     CONF.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
     CONF.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
@@ -78,7 +78,7 @@ public class TestNNStartupWhenViewFSOverloadSchemeEnabled {
     cluster.waitActive();
   }
 
-  @After
+  @AfterEach
   public void shutdownCluster() {
     if (cluster != null) {
       cluster.shutdown();

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFSOverloadSchemeWithMountTableConfigInHDFS.java

@@ -25,7 +25,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.junit.Before;
+import org.junit.jupiter.api.BeforeEach;
 
 /**
  * Tests ViewFileSystemOverloadScheme with configured mount links.
@@ -35,7 +35,7 @@ public class TestViewFSOverloadSchemeWithMountTableConfigInHDFS
   private Path oldVersionMountTablePath;
   private Path newVersionMountTablePath;
 
-  @Before
+  @BeforeEach
   @Override
   public void setUp() throws IOException {
     super.setUp();

+ 6 - 6
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemAtHdfsRoot.java

@@ -29,9 +29,9 @@ import org.apache.hadoop.fs.FileSystemTestHelper;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
 
 /**
  * Make sure that ViewFileSystem works when the root of an FS is mounted to a
@@ -48,7 +48,7 @@ public class TestViewFileSystemAtHdfsRoot extends ViewFileSystemBaseTest {
     return new FileSystemTestHelper("/tmp/TestViewFileSystemAtHdfsRoot");
   }
   
-  @BeforeClass
+  @BeforeAll
   public static void clusterSetupAtBegining() throws IOException,
       LoginException, URISyntaxException {
     SupportsBlocks = true;
@@ -63,7 +63,7 @@ public class TestViewFileSystemAtHdfsRoot extends ViewFileSystemBaseTest {
     fHdfs = cluster.getFileSystem();
   }
       
-  @AfterClass
+  @AfterAll
   public static void clusterShutdownAtEnd() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
@@ -71,7 +71,7 @@ public class TestViewFileSystemAtHdfsRoot extends ViewFileSystemBaseTest {
   }
 
   @Override
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     fsTarget = fHdfs;
     super.setUp();

+ 27 - 30
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java

@@ -56,12 +56,9 @@ import org.apache.hadoop.test.GenericTestUtils;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY;
 
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import static org.junit.Assert.*;
+import org.junit.jupiter.api.*;
+
+import static org.junit.jupiter.api.Assertions.*;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -85,7 +82,7 @@ public class TestViewFileSystemHdfs extends ViewFileSystemBaseTest {
     return new FileSystemTestHelper("/tmp/TestViewFileSystemHdfs");
   }
 
-  @BeforeClass
+  @BeforeAll
   public static void clusterSetupAtBegining() throws IOException,
       LoginException, URISyntaxException {
 
@@ -129,7 +126,7 @@ public class TestViewFileSystemHdfs extends ViewFileSystemBaseTest {
   }
 
       
-  @AfterClass
+  @AfterAll
   public static void ClusterShutdownAtEnd() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
@@ -137,7 +134,7 @@ public class TestViewFileSystemHdfs extends ViewFileSystemBaseTest {
   }
 
   @Override
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     // create the test root on local_fs
     fsTarget = fHdfs;
@@ -147,7 +144,7 @@ public class TestViewFileSystemHdfs extends ViewFileSystemBaseTest {
   }
 
   @Override
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     super.tearDown();
   }
@@ -253,15 +250,15 @@ public class TestViewFileSystemHdfs extends ViewFileSystemBaseTest {
         viewFs.getFileChecksum(mountDataFilePath);
     FileChecksum fileChecksumViaTargetFs =
         fsTarget.getFileChecksum(fsTargetFilePath);
-    assertTrue("File checksum not matching!",
-        fileChecksumViaViewFs.equals(fileChecksumViaTargetFs));
+      assertTrue(
+              fileChecksumViaViewFs.equals(fileChecksumViaTargetFs), "File checksum not matching!");
 
     fileChecksumViaViewFs =
         viewFs.getFileChecksum(mountDataFilePath, fileLength / 2);
     fileChecksumViaTargetFs =
         fsTarget.getFileChecksum(fsTargetFilePath, fileLength / 2);
-    assertTrue("File checksum not matching!",
-        fileChecksumViaViewFs.equals(fileChecksumViaTargetFs));
+      assertTrue(
+              fileChecksumViaViewFs.equals(fileChecksumViaTargetFs), "File checksum not matching!");
   }
 
   //Rename should fail on across different fileSystems
@@ -276,7 +273,7 @@ public class TestViewFileSystemHdfs extends ViewFileSystemBaseTest {
     fsView.create(filePath);
     try {
       fsView.rename(filePath, hdfFilepath);
-      ContractTestUtils.fail("Should thrown IOE on Renames across filesytems");
+      Assertions.fail("Should thrown IOE on Renames across filesytems");
     } catch (IOException e) {
       GenericTestUtils
           .assertExceptionContains("Renames across Mount points not supported",
@@ -325,7 +322,7 @@ public class TestViewFileSystemHdfs extends ViewFileSystemBaseTest {
     // 1. test mkdirs
     final Path testDir = new Path("testdir1/sub1/sub3");
     final Path testDir_tmp = new Path("testdir1/sub1/sub3_temp");
-    assertTrue(testDir + ": Failed to create!", nfly.mkdirs(testDir));
+      assertTrue(nfly.mkdirs(testDir), testDir + ": Failed to create!");
 
     // Test renames
     assertTrue(nfly.rename(testDir, testDir_tmp));
@@ -333,7 +330,7 @@ public class TestViewFileSystemHdfs extends ViewFileSystemBaseTest {
 
     for (final URI testUri : testUris) {
       final FileSystem fs = FileSystem.get(testUri, testConf);
-      assertTrue(testDir + " should exist!", fs.exists(testDir));
+        assertTrue(fs.exists(testDir), testDir + " should exist!");
     }
 
     // 2. test write
@@ -349,7 +346,7 @@ public class TestViewFileSystemHdfs extends ViewFileSystemBaseTest {
       final FileSystem fs = FileSystem.get(testUri, testConf);
       final FSDataInputStream fsdis = fs.open(testFile);
       try {
-        assertEquals("Wrong file content", testString, fsdis.readUTF());
+          assertEquals(testString, fsdis.readUTF(), "Wrong file content");
       } finally {
         fsdis.close();
       }
@@ -364,7 +361,7 @@ public class TestViewFileSystemHdfs extends ViewFileSystemBaseTest {
       FSDataInputStream fsDis = null;
       try {
         fsDis = nfly.open(testFile);
-        assertEquals("Wrong file content", testString, fsDis.readUTF());
+          assertEquals(testString, fsDis.readUTF(), "Wrong file content");
       } finally {
         IOUtils.cleanupWithLogger(LOG, fsDis);
         cluster.restartNameNode(i);
@@ -378,7 +375,7 @@ public class TestViewFileSystemHdfs extends ViewFileSystemBaseTest {
     FSDataInputStream fsDis = null;
     try {
       fsDis = nfly.open(testFile);
-      assertEquals("Wrong file content", testString, fsDis.readUTF());
+        assertEquals(testString, fsDis.readUTF(), "Wrong file content");
       assertTrue(fs1.exists(testFile));
     } finally {
       IOUtils.cleanupWithLogger(LOG, fsDis);
@@ -393,18 +390,18 @@ public class TestViewFileSystemHdfs extends ViewFileSystemBaseTest {
       for (final URI testUri : testUris) {
         final FileSystem fs = FileSystem.get(testUri, conf);
         fs.setTimes(testFile, 1L, 1L);
-        assertEquals(testUri + "Set mtime failed!", 1L,
-            fs.getFileStatus(testFile).getModificationTime());
-        assertEquals("nfly file status wrong", expectedMtime,
-            nfly.getFileStatus(testFile).getModificationTime());
+          assertEquals(1L,
+                  fs.getFileStatus(testFile).getModificationTime(), testUri + "Set mtime failed!");
+          assertEquals(expectedMtime,
+                  nfly.getFileStatus(testFile).getModificationTime(), "nfly file status wrong");
         FSDataInputStream fsDis2 = null;
         try {
           fsDis2 = nfly.open(testFile);
-          assertEquals("Wrong file content", testString, fsDis2.readUTF());
-          // repair is done, now trying via normal fs
-          //
-          assertEquals("Repair most recent failed!", expectedMtime,
-              fs.getFileStatus(testFile).getModificationTime());
+            assertEquals(testString, fsDis2.readUTF(), "Wrong file content");
+            // repair is done, now trying via normal fs
+            //
+            assertEquals(expectedMtime,
+                    fs.getFileStatus(testFile).getModificationTime(), "Repair most recent failed!");
         } finally {
           IOUtils.cleanupWithLogger(LOG, fsDis2);
         }
@@ -476,7 +473,7 @@ public class TestViewFileSystemHdfs extends ViewFileSystemBaseTest {
     FileSystem otherfs = map.get("user1");
     otherfs.mkdirs(user1Path);
     String owner = otherfs.getFileStatus(user1Path).getOwner();
-    assertEquals("The owner did not match ", owner, userUgi.getShortUserName());
+      assertEquals(owner, userUgi.getShortUserName(), "The owner did not match ");
     otherfs.delete(user1Path, false);
   }
 }

+ 35 - 39
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemLinkFallback.java

@@ -18,11 +18,7 @@
 package org.apache.hadoop.fs.viewfs;
 
 import static org.apache.hadoop.test.LambdaTestUtils.intercept;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.File;
 import java.io.IOException;
@@ -45,11 +41,11 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.BeforeClass;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -79,7 +75,7 @@ public class TestViewFileSystemLinkFallback extends ViewFileSystemBaseTest {
     return new FileSystemTestHelper(TEST_BASE_PATH);
   }
 
-  @BeforeClass
+  @BeforeAll
   public static void clusterSetupAtBeginning() throws IOException,
       LoginException, URISyntaxException {
     SupportsBlocks = true;
@@ -100,7 +96,7 @@ public class TestViewFileSystemLinkFallback extends ViewFileSystemBaseTest {
         Constants.CONFIG_VIEWFS_DEFAULT_MOUNT_TABLE, "/", null, null);
   }
 
-  @AfterClass
+  @AfterAll
   public static void clusterShutdownAtEnd() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
@@ -108,7 +104,7 @@ public class TestViewFileSystemLinkFallback extends ViewFileSystemBaseTest {
   }
 
   @Override
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     fsTarget = fsDefault;
     super.setUp();
@@ -179,10 +175,10 @@ public class TestViewFileSystemLinkFallback extends ViewFileSystemBaseTest {
     FileStatus baseFileRelStat = vfs.getFileStatus(new Path(viewFsUri.toString()
         + testBaseFileRelative.toUri().toString()));
     LOG.info("BaseFileRelStat: " + baseFileRelStat);
-    Assert.assertEquals("Unexpected file length for " + testBaseFile,
-        1, baseFileStat.getLen());
-    Assert.assertEquals("Unexpected file length for " + testBaseFileRelative,
-        baseFileStat.getLen(), baseFileRelStat.getLen());
+      Assertions.assertEquals(
+              1, baseFileStat.getLen(), "Unexpected file length for " + testBaseFile);
+      Assertions.assertEquals(
+              baseFileStat.getLen(), baseFileRelStat.getLen(), "Unexpected file length for " + testBaseFileRelative);
     FileStatus level2FileStat = vfs.getFileStatus(new Path(viewFsUri.toString()
         + testLevel2File.toUri().toString()));
     LOG.info("Level2FileStat: " + level2FileStat);
@@ -228,8 +224,8 @@ public class TestViewFileSystemLinkFallback extends ViewFileSystemBaseTest {
     FileStatus baseFileStat = vfs.getFileStatus(
         new Path(viewFsUri.toString() + testBaseFile.toUri().toString()));
     LOG.info("BaseFileStat: " + baseFileStat);
-    Assert.assertEquals("Unexpected file length for " + testBaseFile,
-        0, baseFileStat.getLen());
+      Assertions.assertEquals(
+              0, baseFileStat.getLen(), "Unexpected file length for " + testBaseFile);
     FileStatus level2FileStat = vfs.getFileStatus(new Path(viewFsUri.toString()
         + testLevel2File.toUri().toString()));
     LOG.info("Level2FileStat: " + level2FileStat);
@@ -240,8 +236,8 @@ public class TestViewFileSystemLinkFallback extends ViewFileSystemBaseTest {
 
     FileStatus level2FileStatAfterWrite = vfs.getFileStatus(
         new Path(viewFsUri.toString() + testLevel2File.toUri().toString()));
-    Assert.assertTrue("Unexpected file length for " + testLevel2File,
-        level2FileStatAfterWrite.getLen() > level2FileStat.getLen());
+      Assertions.assertTrue(
+              level2FileStatAfterWrite.getLen() > level2FileStat.getLen(), "Unexpected file length for " + testLevel2File);
 
     vfs.close();
   }
@@ -265,8 +261,8 @@ public class TestViewFileSystemLinkFallback extends ViewFileSystemBaseTest {
       FileSystem.get(viewFsUri, conf);
       fail("Shouldn't allow linkMergeSlash to take extra mount points!");
     } catch (IOException e) {
-      assertTrue("Unexpected error: " + e.getMessage(),
-          e.getMessage().contains(expectedErrorMsg));
+        assertTrue(
+                e.getMessage().contains(expectedErrorMsg), "Unexpected error: " + e.getMessage());
     }
   }
 
@@ -299,13 +295,13 @@ public class TestViewFileSystemLinkFallback extends ViewFileSystemBaseTest {
         afterFallback.add(stat.getPath());
       }
       afterFallback.removeAll(beforeFallback);
-      assertTrue("Listing didn't include fallback link",
-          afterFallback.size() == 1);
+        assertTrue(
+                afterFallback.size() == 1, "Listing didn't include fallback link");
       Path[] fallbackArray = new Path[afterFallback.size()];
       afterFallback.toArray(fallbackArray);
       Path expected = new Path(viewFsUri.toString(), "dir1");
-      assertEquals("Path did not match",
-          expected, fallbackArray[0]);
+        assertEquals(
+                expected, fallbackArray[0], "Path did not match");
 
       // Create a directory using the returned fallback path and verify
       Path childDir = new Path(fallbackArray[0], "child");
@@ -349,13 +345,13 @@ public class TestViewFileSystemLinkFallback extends ViewFileSystemBaseTest {
         afterFallback.add(stat.getPath());
       }
       afterFallback.removeAll(beforeFallback);
-      assertEquals("The same directory name in fallback link should be shaded",
-          1, afterFallback.size());
+        assertEquals(
+                1, afterFallback.size(), "The same directory name in fallback link should be shaded");
       Path[] fallbackArray = new Path[afterFallback.size()];
       // Only user1 should be listed as fallback link
       Path expected = new Path(viewFsDefaultClusterUri.toString(), "user1");
-      assertEquals("Path did not match",
-          expected, afterFallback.toArray(fallbackArray)[0]);
+        assertEquals(
+                expected, afterFallback.toArray(fallbackArray)[0], "Path did not match");
 
       // Create a directory using the returned fallback path and verify
       Path childDir = new Path(fallbackArray[0], "child");
@@ -430,8 +426,8 @@ public class TestViewFileSystemLinkFallback extends ViewFileSystemBaseTest {
       }
       //viewfs://default/user1/hive/warehouse
       afterFallback.removeAll(beforeFallback);
-      assertEquals("The same directory name in fallback link should be shaded",
-          1, afterFallback.size());
+        assertEquals(
+                1, afterFallback.size(), "The same directory name in fallback link should be shaded");
     }
   }
 
@@ -502,8 +498,8 @@ public class TestViewFileSystemLinkFallback extends ViewFileSystemBaseTest {
         }
       }
       afterFallback.removeAll(beforeFallback);
-      assertEquals("Just to make sure paths are same.", 0,
-          afterFallback.size());
+        assertEquals(0,
+                afterFallback.size(), "Just to make sure paths are same.");
     }
   }
 
@@ -559,14 +555,14 @@ public class TestViewFileSystemLinkFallback extends ViewFileSystemBaseTest {
           assertEquals(FsPermission.valueOf("-rwxr--rw-"),
               stat.getPermission());
         } else {
-          assertEquals("Path is: " + stat.getPath(),
-              FsPermission.valueOf("-rwxr--r--"), stat.getPermission());
+            assertEquals(
+                    FsPermission.valueOf("-rwxr--r--"), stat.getPermission(), "Path is: " + stat.getPath());
         }
       }
       afterFallback.removeAll(beforeFallback);
       assertEquals(1, afterFallback.size());
-      assertEquals("/user2 dir from fallback should be listed.", "user2",
-          afterFallback.iterator().next().getName());
+        assertEquals("user2",
+                afterFallback.iterator().next().getName(), "/user2 dir from fallback should be listed.");
     }
   }
 
@@ -908,7 +904,7 @@ public class TestViewFileSystemLinkFallback extends ViewFileSystemBaseTest {
       assertFalse(fsTarget.exists(Path.mergePaths(fallbackTarget, vfsTestDir)));
       try {
         vfs.create(vfsTestDir);
-        Assert.fail("Should fail to create file as this is an internal dir.");
+        Assertions.fail("Should fail to create file as this is an internal dir.");
       } catch (NotInMountpointException e){
         // This tree is part of internal tree. The above exception will be
         // thrown from getDefaultReplication, getDefaultBlockSize APIs which was

+ 12 - 18
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemLinkMergeSlash.java

@@ -34,17 +34,11 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.*;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 import javax.security.auth.login.LoginException;
 
@@ -74,7 +68,7 @@ public class TestViewFileSystemLinkMergeSlash extends ViewFileSystemBaseTest {
     return new FileSystemTestHelper(TEST_TEMP_PATH);
   }
 
-  @BeforeClass
+  @BeforeAll
   public static void clusterSetupAtBeginning() throws IOException,
       LoginException, URISyntaxException {
     SupportsBlocks = true;
@@ -93,7 +87,7 @@ public class TestViewFileSystemLinkMergeSlash extends ViewFileSystemBaseTest {
     fsDefault = FS_HDFS[FS_INDEX_DEFAULT];
   }
 
-  @AfterClass
+  @AfterAll
   public static void clusterShutdownAtEnd() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
@@ -101,7 +95,7 @@ public class TestViewFileSystemLinkMergeSlash extends ViewFileSystemBaseTest {
   }
 
   @Override
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     fsTarget = fsDefault;
     super.setUp();
@@ -191,9 +185,9 @@ public class TestViewFileSystemLinkMergeSlash extends ViewFileSystemBaseTest {
       fail("Shouldn't allow both merge slash link and regular link on same "
           + "mount table.");
     } catch (IOException e) {
-      assertTrue("Unexpected error message: " + e.getMessage(),
-          e.getMessage().contains(expectedErrorMsg1) || e.getMessage()
-              .contains(expectedErrorMsg2));
+        assertTrue(
+                e.getMessage().contains(expectedErrorMsg1) || e.getMessage()
+                        .contains(expectedErrorMsg2), "Unexpected error message: " + e.getMessage());
     }
   }
 
@@ -226,9 +220,9 @@ public class TestViewFileSystemLinkMergeSlash extends ViewFileSystemBaseTest {
         LINK_MERGE_SLASH_CLUSTER_1_NAME, "/", null, null);
     FileSystem fs = FileSystem.get(viewFsUri, conf);
     FileSystem[] childFs = fs.getChildFileSystems();
-    Assert.assertEquals("Unexpected number of child filesystems!",
-        1, childFs.length);
-    Assert.assertEquals("Unexpected child filesystem!",
-        DistributedFileSystem.class, childFs[0].getClass());
+      Assertions.assertEquals(
+              1, childFs.length, "Unexpected number of child filesystems!");
+      Assertions.assertEquals(
+              DistributedFileSystem.class, childFs[0].getClass(), "Unexpected child filesystem!");
   }
 }

+ 9 - 13
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemLinkRegex.java

@@ -36,16 +36,12 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.*;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import static org.apache.hadoop.fs.viewfs.RegexMountPoint.INTERCEPTOR_INTERNAL_SEP;
-import static org.junit.Assert.assertSame;
+import static org.junit.jupiter.api.Assertions.assertSame;
 
 /**
  * Test linkRegex node type for view file system.
@@ -73,7 +69,7 @@ public class TestViewFileSystemLinkRegex extends ViewFileSystemBaseTest {
     return new FileSystemTestHelper(TEST_BASE_PATH);
   }
 
-  @BeforeClass
+  @BeforeAll
   public static void clusterSetupAtBeginning() throws IOException {
     SupportsBlocks = true;
     clusterConfig = ViewFileSystemTestSetup.createConfig();
@@ -91,7 +87,7 @@ public class TestViewFileSystemLinkRegex extends ViewFileSystemBaseTest {
     fsDefault = FS_HDFS[FS_INDEX_DEFAULT];
   }
 
-  @AfterClass
+  @AfterAll
   public static void clusterShutdownAtEnd() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
@@ -99,7 +95,7 @@ public class TestViewFileSystemLinkRegex extends ViewFileSystemBaseTest {
   }
 
   @Override
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     fsTarget = fsDefault;
     super.setUp();
@@ -157,7 +153,7 @@ public class TestViewFileSystemLinkRegex extends ViewFileSystemBaseTest {
   private void createDirWithChildren(
       FileSystem fileSystem, Path dir, List<Path> childrenFiles)
       throws IOException {
-    Assert.assertTrue(fileSystem.mkdirs(dir));
+    Assertions.assertTrue(fileSystem.mkdirs(dir));
     int index = 0;
     for (Path childFile : childrenFiles) {
       createFile(fileSystem, childFile, index, true);
@@ -224,11 +220,11 @@ public class TestViewFileSystemLinkRegex extends ViewFileSystemBaseTest {
     URI viewFsUri = new URI(
         FsConstants.VIEWFS_SCHEME, CLUSTER_NAME, "/", null, null);
     try (FileSystem vfs = FileSystem.get(viewFsUri, config)) {
-      Assert.assertEquals(expectedResolveResult.toString(),
+      Assertions.assertEquals(expectedResolveResult.toString(),
           vfs.resolvePath(dirPathBeforeMountPoint).toString());
-      Assert.assertTrue(
+      Assertions.assertTrue(
           vfs.getFileStatus(dirPathBeforeMountPoint).isDirectory());
-      Assert.assertEquals(
+      Assertions.assertEquals(
           childrenFilesCnt, vfs.listStatus(dirPathBeforeMountPoint).length);
 
       // Test Inner cache, the resolved result's filesystem should be the same.

+ 8 - 10
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemOverloadSchemeHdfsFileSystemContract.java

@@ -19,8 +19,6 @@ package org.apache.hadoop.fs.viewfs;
 
 import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_IGNORE_PORT_IN_MOUNT_TABLE_NAME;
 import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_IGNORE_PORT_IN_MOUNT_TABLE_NAME_DEFAULT;
-import static org.junit.Assume.assumeTrue;
-
 import java.io.File;
 import java.io.IOException;
 import java.net.URI;
@@ -39,11 +37,11 @@ import org.apache.hadoop.hdfs.TestHDFSFileSystemContract;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Ignore;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Disabled;
 
 /**
  * Tests ViewFileSystemOverloadScheme with file system contract tests.
@@ -55,7 +53,7 @@ public class TestViewFileSystemOverloadSchemeHdfsFileSystemContract
   private static String defaultWorkingDirectory;
   private static Configuration conf = new HdfsConfiguration();
 
-  @BeforeClass
+  @BeforeAll
   public static void init() throws IOException {
     final File basedir = GenericTestUtils.getRandomizedTestDir();
     conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY,
@@ -67,7 +65,7 @@ public class TestViewFileSystemOverloadSchemeHdfsFileSystemContract
         "/user/" + UserGroupInformation.getCurrentUser().getShortUserName();
   }
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     conf.set(String.format("fs.%s.impl", "hdfs"),
         ViewFileSystemOverloadScheme.class.getName());
@@ -89,7 +87,7 @@ public class TestViewFileSystemOverloadSchemeHdfsFileSystemContract
     fs = FileSystem.get(conf);
   }
 
-  @AfterClass
+  @AfterAll
   public static void tearDownAfter() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
@@ -129,7 +127,7 @@ public class TestViewFileSystemOverloadSchemeHdfsFileSystemContract
   }
 
   @Override
-  @Ignore // This test same as above in this case.
+  @Disabled // This test same as above in this case.
   public void testLSRootDir() throws Throwable {
   }
 }

+ 34 - 34
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemOverloadSchemeWithHdfsScheme.java

@@ -39,16 +39,16 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.test.LambdaTestUtils;
 import org.apache.hadoop.test.PathUtils;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.BeforeClass;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
 
 import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_IGNORE_PORT_IN_MOUNT_TABLE_NAME;
 import static org.apache.hadoop.fs.viewfs.Constants.CONFIG_VIEWFS_IGNORE_PORT_IN_MOUNT_TABLE_NAME_DEFAULT;
-import static org.junit.Assert.*;
+import static org.junit.jupiter.api.Assertions.*;
 
 
 /**
@@ -67,7 +67,7 @@ public class TestViewFileSystemOverloadSchemeWithHdfsScheme {
   private static final String HDFS_USER_FOLDER = "/HDFSUser";
   private static final String LOCAL_FOLDER = "/local";
 
-  @BeforeClass
+  @BeforeAll
   public static void init() throws IOException {
     cluster =
         new MiniDFSCluster.Builder(new Configuration()).numDataNodes(2).build();
@@ -77,7 +77,7 @@ public class TestViewFileSystemOverloadSchemeWithHdfsScheme {
   /**
    * Sets up the configurations and starts the MiniDFSCluster.
    */
-  @Before
+  @BeforeEach
   public void setUp() throws IOException {
     Configuration config = getNewConf();
     config.setInt(
@@ -91,10 +91,10 @@ public class TestViewFileSystemOverloadSchemeWithHdfsScheme {
         URI.create(config.get(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY));
     localTargetDir = new File(TEST_ROOT_DIR, "/root/");
     localTargetDir.mkdirs();
-    Assert.assertEquals(HDFS_SCHEME, defaultFSURI.getScheme()); // hdfs scheme.
+    Assertions.assertEquals(HDFS_SCHEME, defaultFSURI.getScheme()); // hdfs scheme.
   }
 
-  @After
+  @AfterEach
   public void cleanUp() throws IOException {
     if (cluster != null) {
       FileSystem fs = new DistributedFileSystem();
@@ -102,7 +102,7 @@ public class TestViewFileSystemOverloadSchemeWithHdfsScheme {
       try {
         FileStatus[] statuses = fs.listStatus(new Path("/"));
         for (FileStatus st : statuses) {
-          Assert.assertTrue(fs.delete(st.getPath(), true));
+          Assertions.assertTrue(fs.delete(st.getPath(), true));
         }
       } finally {
         fs.close();
@@ -111,7 +111,7 @@ public class TestViewFileSystemOverloadSchemeWithHdfsScheme {
     }
   }
 
-  @AfterClass
+  @AfterAll
   public static void tearDown() throws IOException {
     if (cluster != null) {
       FileSystem.closeAll();
@@ -154,7 +154,7 @@ public class TestViewFileSystemOverloadSchemeWithHdfsScheme {
 
     try (FileSystem fs
         =  FileSystem.get(conf)) {
-      Assert.assertEquals(2, fs.getChildFileSystems().length);
+      Assertions.assertEquals(2, fs.getChildFileSystems().length);
       fs.createNewFile(hdfsFile); // /HDFSUser/testfile
       fs.mkdirs(localDir); // /local/test
     }
@@ -162,20 +162,20 @@ public class TestViewFileSystemOverloadSchemeWithHdfsScheme {
     // Initialize HDFS and test files exist in ls or not
     try (DistributedFileSystem dfs = new DistributedFileSystem()) {
       dfs.initialize(defaultFSURI, conf);
-      Assert.assertTrue(dfs.exists(
+      Assertions.assertTrue(dfs.exists(
           new Path(Path.getPathWithoutSchemeAndAuthority(hdfsTargetPath),
               hdfsFile.getName()))); // should be in hdfs.
-      Assert.assertFalse(dfs.exists(
+      Assertions.assertFalse(dfs.exists(
           new Path(Path.getPathWithoutSchemeAndAuthority(localTragetPath),
               localDir.getName()))); // should not be in local fs.
     }
 
     try (RawLocalFileSystem lfs = new RawLocalFileSystem()) {
       lfs.initialize(localTragetPath.toUri(), conf);
-      Assert.assertFalse(lfs.exists(
+      Assertions.assertFalse(lfs.exists(
           new Path(Path.getPathWithoutSchemeAndAuthority(hdfsTargetPath),
               hdfsFile.getName()))); // should not be in hdfs.
-      Assert.assertTrue(lfs.exists(
+      Assertions.assertTrue(lfs.exists(
           new Path(Path.getPathWithoutSchemeAndAuthority(localTragetPath),
               localDir.getName()))); // should be in local fs.
     }
@@ -210,7 +210,7 @@ public class TestViewFileSystemOverloadSchemeWithHdfsScheme {
       });
     } else {
       try (FileSystem fs = FileSystem.get(conf)) {
-        Assert.assertEquals("hdfs", fs.getScheme());
+        Assertions.assertEquals("hdfs", fs.getScheme());
       }
     }
   }
@@ -241,14 +241,14 @@ public class TestViewFileSystemOverloadSchemeWithHdfsScheme {
     try (FileSystem fs = FileSystem.get(conf)) {
       fs.mkdirs(hdfsTargetPath);
       FileStatus[] ls = fs.listStatus(new Path("/"));
-      Assert.assertEquals(2, ls.length);
+      Assertions.assertEquals(2, ls.length);
       String lsPath1 =
           Path.getPathWithoutSchemeAndAuthority(ls[0].getPath()).toString();
       String lsPath2 =
           Path.getPathWithoutSchemeAndAuthority(ls[1].getPath()).toString();
-      Assert.assertTrue(
+      Assertions.assertTrue(
           HDFS_USER_FOLDER.equals(lsPath1) || LOCAL_FOLDER.equals(lsPath1));
-      Assert.assertTrue(
+      Assertions.assertTrue(
           HDFS_USER_FOLDER.equals(lsPath2) || LOCAL_FOLDER.equals(lsPath2));
     }
   }
@@ -270,7 +270,7 @@ public class TestViewFileSystemOverloadSchemeWithHdfsScheme {
 
     try (FileSystem fs = FileSystem.get(conf)) {
       fs.listStatus(new Path("/nonMount"));
-      Assert.fail("It should fail as no mount link with /nonMount");
+      Assertions.fail("It should fail as no mount link with /nonMount");
     }
   }
 
@@ -349,8 +349,8 @@ public class TestViewFileSystemOverloadSchemeWithHdfsScheme {
     try (FileSystem fs = FileSystem.get(conf)) {
       fs.createNewFile(new Path("/nonMount/myfile"));
       FileStatus[] ls = fs.listStatus(new Path("/nonMount"));
-      Assert.assertEquals(1, ls.length);
-      Assert.assertEquals(
+      Assertions.assertEquals(1, ls.length);
+      Assertions.assertEquals(
           Path.getPathWithoutSchemeAndAuthority(ls[0].getPath()).getName(),
           "myfile");
     }
@@ -376,7 +376,7 @@ public class TestViewFileSystemOverloadSchemeWithHdfsScheme {
             localTargetDir.toURI().toString()}, conf);
     try (FileSystem fs = FileSystem.get(conf)) {
       if (fallbackExist) {
-        Assert.assertTrue(fs.createNewFile(new Path("/newFileOnRoot")));
+        Assertions.assertTrue(fs.createNewFile(new Path("/newFileOnRoot")));
       } else {
         LambdaTestUtils.intercept(NotInMountpointException.class, () -> {
           fs.createNewFile(new Path("/newFileOnRoot"));
@@ -422,7 +422,7 @@ public class TestViewFileSystemOverloadSchemeWithHdfsScheme {
 
     try (FileSystem fs = FileSystem.get(conf)) {
       fs.createNewFile(new Path("/onRootWhenFallBack"));
-      Assert.fail("OverloadScheme target fs should be valid.");
+      Assertions.fail("OverloadScheme target fs should be valid.");
     }
   }
 
@@ -446,7 +446,7 @@ public class TestViewFileSystemOverloadSchemeWithHdfsScheme {
     try (FileSystem fs = FileSystem.get(conf)) {
       Path testFile = new Path(HDFS_USER_FOLDER + "/testFile");
       fs.createNewFile(testFile);
-      Assert.assertTrue(fs.exists(testFile));
+      Assertions.assertTrue(fs.exists(testFile));
     }
   }
 
@@ -470,13 +470,13 @@ public class TestViewFileSystemOverloadSchemeWithHdfsScheme {
 
     // 1. Only 1 hdfs child file system should be there with cache.
     try (FileSystem vfs = FileSystem.get(conf)) {
-      Assert.assertEquals(1, vfs.getChildFileSystems().length);
+      Assertions.assertEquals(1, vfs.getChildFileSystems().length);
     }
 
     // 2. Two hdfs file systems should be there if no cache.
     conf.setBoolean(Constants.CONFIG_VIEWFS_ENABLE_INNER_CACHE, false);
     try (FileSystem vfs = FileSystem.get(conf)) {
-      Assert.assertEquals(isFallBackExist(conf) ? 3 : 2,
+      Assertions.assertEquals(isFallBackExist(conf) ? 3 : 2,
           vfs.getChildFileSystems().length);
     }
   }
@@ -509,7 +509,7 @@ public class TestViewFileSystemOverloadSchemeWithHdfsScheme {
     conf.setBoolean(Constants.CONFIG_VIEWFS_ENABLE_INNER_CACHE, false);
     // Two hdfs file systems should be there if no cache.
     try (FileSystem vfs = FileSystem.get(conf)) {
-      Assert.assertEquals(isFallBackExist(conf) ? 3 : 2,
+      Assertions.assertEquals(isFallBackExist(conf) ? 3 : 2,
           vfs.getChildFileSystems().length);
     }
   }
@@ -537,7 +537,7 @@ public class TestViewFileSystemOverloadSchemeWithHdfsScheme {
     // cache should work.
     conf.setBoolean(Constants.CONFIG_VIEWFS_ENABLE_INNER_CACHE, false);
     try (FileSystem vfs = FileSystem.get(conf)) {
-      Assert.assertEquals(isFallBackExist(conf) ? 2 : 1,
+      Assertions.assertEquals(isFallBackExist(conf) ? 2 : 1,
           vfs.getChildFileSystems().length);
     }
   }
@@ -561,7 +561,7 @@ public class TestViewFileSystemOverloadSchemeWithHdfsScheme {
 
     final Path testDir = new Path("/nflyroot/testdir1/sub1/sub3");
     final Path testDirTmp = new Path("/nflyroot/testdir1/sub1/sub3_temp");
-    assertTrue(testDir + ": Failed to create!", nfly.mkdirs(testDir));
+      assertTrue(nfly.mkdirs(testDir), testDir + ": Failed to create!");
 
     // Test renames
     assertTrue(nfly.rename(testDir, testDirTmp));
@@ -570,7 +570,7 @@ public class TestViewFileSystemOverloadSchemeWithHdfsScheme {
     final URI[] testUris = new URI[] {uri1, uri2 };
     for (final URI testUri : testUris) {
       final FileSystem fs = FileSystem.get(testUri, conf);
-      assertTrue(testDir + " should exist!", fs.exists(testDir));
+        assertTrue(fs.exists(testDir), testDir + " should exist!");
     }
   }
 
@@ -688,7 +688,7 @@ public class TestViewFileSystemOverloadSchemeWithHdfsScheme {
   private void readString(final FileSystem nfly, final Path testFile,
       final String testString, final URI testUri) throws IOException {
     try (FSDataInputStream fsDis = nfly.open(testFile)) {
-      assertEquals("Wrong file content", testString, fsDis.readUTF());
+        assertEquals(testString, fsDis.readUTF(), "Wrong file content");
     }
   }
 

+ 7 - 11
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemWithAcls.java

@@ -28,11 +28,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.util.Lists;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.*;
 
 import java.io.IOException;
 import java.util.List;
@@ -42,8 +38,8 @@ import static org.apache.hadoop.fs.permission.AclEntryScope.DEFAULT;
 import static org.apache.hadoop.fs.permission.AclEntryType.*;
 import static org.apache.hadoop.fs.permission.FsAction.*;
 import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.aclEntry;
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertArrayEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 /**
  * Verify ACL through ViewFileSystem functionality.
@@ -61,7 +57,7 @@ public class TestViewFileSystemWithAcls {
   private FileSystemTestHelper fileSystemTestHelper =
       new FileSystemTestHelper("/tmp/TestViewFileSystemWithAcls");
 
-  @BeforeClass
+  @BeforeAll
   public static void clusterSetupAtBeginning() throws IOException {
     clusterConf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
     cluster = new MiniDFSCluster.Builder(clusterConf)
@@ -74,14 +70,14 @@ public class TestViewFileSystemWithAcls {
     fHdfs2 = cluster.getFileSystem(1);
   }
 
-  @AfterClass
+  @AfterAll
   public static void ClusterShutdownAtEnd() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
     }
   }
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     fsTarget = fHdfs;
     fsTarget2 = fHdfs2;
@@ -105,7 +101,7 @@ public class TestViewFileSystemWithAcls {
     ConfigUtil.addLink(fsViewConf, mountOnNn2.toString(), targetTestRoot2.toUri());
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     fsTarget.delete(fileSystemTestHelper.getTestRootPath(fsTarget), true);
     fsTarget2.delete(fileSystemTestHelper.getTestRootPath(fsTarget2), true);

+ 9 - 9
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemWithTruncate.java

@@ -30,13 +30,13 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
 
-import static org.junit.Assert.*;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 /**
  * Verify truncate through ViewFileSystem functionality.
@@ -53,7 +53,7 @@ public class TestViewFileSystemWithTruncate {
   private FileSystemTestHelper fileSystemTestHelper =
       new FileSystemTestHelper("/tmp/TestViewFileSystemWithXAttrs");
 
-  @BeforeClass
+  @BeforeAll
   public static void clusterSetupAtBeginning() throws IOException {
     cluster = new MiniDFSCluster.Builder(clusterConf)
         .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2))
@@ -63,14 +63,14 @@ public class TestViewFileSystemWithTruncate {
     fHdfs = cluster.getFileSystem(0);
   }
 
-  @AfterClass
+  @AfterAll
   public static void clusterShutdownAtEnd() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
     }
   }
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     fsTarget = fHdfs;
     targetTestRoot = fileSystemTestHelper.getAbsoluteTestRootPath(fsTarget);
@@ -89,7 +89,7 @@ public class TestViewFileSystemWithTruncate {
         .addLink(fsViewConf, mountOnNn1.toString(), targetTestRoot.toUri());
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     fsTarget.delete(fileSystemTestHelper.getTestRootPath(fsTarget), true);
   }

+ 7 - 11
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemWithXAttrs.java

@@ -24,16 +24,12 @@ import org.apache.hadoop.fs.FsConstants;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.*;
 
 import java.io.IOException;
 
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertArrayEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 /**
  * Verify XAttrs through ViewFileSystem functionality.
@@ -57,7 +53,7 @@ public class TestViewFileSystemWithXAttrs {
   protected static final String name2 = "user.a2";
   protected static final byte[] value2 = {0x37, 0x38, 0x39};
 
-  @BeforeClass
+  @BeforeAll
   public static void clusterSetupAtBeginning() throws IOException {
     cluster = new MiniDFSCluster.Builder(clusterConf)
         .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2))
@@ -69,14 +65,14 @@ public class TestViewFileSystemWithXAttrs {
     fHdfs2 = cluster.getFileSystem(1);
   }
 
-  @AfterClass
+  @AfterAll
   public static void ClusterShutdownAtEnd() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
     }
   }
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     fsTarget = fHdfs;
     fsTarget2 = fHdfs2;
@@ -102,7 +98,7 @@ public class TestViewFileSystemWithXAttrs {
         targetTestRoot2.toUri());
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     fsTarget.delete(fileSystemTestHelper.getTestRootPath(fsTarget), true);
     fsTarget2.delete(fileSystemTestHelper.getTestRootPath(fsTarget2), true);

+ 6 - 6
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsAtHdfsRoot.java

@@ -30,9 +30,9 @@ import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
 
 /**
  * Make sure that ViewFs works when the root of an FS is mounted to a ViewFs
@@ -49,7 +49,7 @@ public class TestViewFsAtHdfsRoot extends ViewFsBaseTest {
     return new FileContextTestHelper("/tmp/TestViewFsAtHdfsRoot");
   }
 
-  @BeforeClass
+  @BeforeAll
   public static void clusterSetupAtBegining() throws IOException,
       LoginException, URISyntaxException {
     SupportsBlocks = true;
@@ -62,7 +62,7 @@ public class TestViewFsAtHdfsRoot extends ViewFsBaseTest {
   }
 
       
-  @AfterClass
+  @AfterAll
   public static void ClusterShutdownAtEnd() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
@@ -70,7 +70,7 @@ public class TestViewFsAtHdfsRoot extends ViewFsBaseTest {
   }
 
   @Override
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     // create the test root on local_fs
     fcTarget = fc;

+ 6 - 8
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsDefaultValue.java

@@ -28,9 +28,7 @@ import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_WRIT
 import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.IOException;
 import java.net.URI;
@@ -49,9 +47,9 @@ import org.apache.hadoop.fs.QuotaUsage;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 
 /**
  * Tests for viewfs implementation of default fs level values.
@@ -74,7 +72,7 @@ public class TestViewFsDefaultValue {
   // Use NotInMountpoint path to trigger the exception
   private static Path notInMountpointPath;
 
-  @BeforeClass
+  @BeforeAll
   public static void clusterSetupAtBegining() throws IOException,
       LoginException, URISyntaxException {
 
@@ -218,7 +216,7 @@ public class TestViewFsDefaultValue {
     assertTrue(qu.getSpaceConsumed() > 0);
   }
 
-  @AfterClass
+  @AfterAll
   public static void cleanup() throws IOException {
     fHdfs.delete(new Path(testFileName), true);
     fHdfs.delete(notInMountpointPath, true);

+ 13 - 13
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsFileStatusHdfs.java

@@ -23,8 +23,8 @@ package org.apache.hadoop.fs.viewfs;
  * Since viewfs has overlayed ViewFsFileStatus, we ran into
  * serialization problems. THis test is test the fix.
  */
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
 
 import java.io.IOException;
 import java.net.URI;
@@ -43,9 +43,9 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.io.DataInputBuffer;
 import org.apache.hadoop.io.DataOutputBuffer;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 
 public class TestViewFsFileStatusHdfs {
   
@@ -59,7 +59,7 @@ public class TestViewFsFileStatusHdfs {
   private static FileSystem fHdfs;
   private static FileSystem vfs;
   
-  @BeforeClass
+  @BeforeAll
   public static void clusterSetupAtBegining() throws IOException,
       LoginException, URISyntaxException {
     cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(2).build();
@@ -108,15 +108,15 @@ public class TestViewFsFileStatusHdfs {
     // Get checksum of different file in HDFS
     FileChecksum otherHdfsFileCheckSum = fHdfs.getFileChecksum(
       new Path(someFile+"other"));
-    // Checksums of the same file (got through HDFS and ViewFS should be same)
-    assertEquals("HDFS and ViewFS checksums were not the same", viewFSCheckSum,
-      hdfsCheckSum);
-    // Checksum of different files should be different.
-    assertFalse("Some other HDFS file which should not have had the same " +
-      "checksum as viewFS did!", viewFSCheckSum.equals(otherHdfsFileCheckSum));
+      // Checksums of the same file (got through HDFS and ViewFS should be same)
+      assertEquals(viewFSCheckSum,
+              hdfsCheckSum, "HDFS and ViewFS checksums were not the same");
+      // Checksum of different files should be different.
+      assertFalse(viewFSCheckSum.equals(otherHdfsFileCheckSum), "Some other HDFS file which should not have had the same " +
+              "checksum as viewFS did!");
   }
 
-  @AfterClass
+  @AfterAll
   public static void cleanup() throws IOException {
     fHdfs.delete(new Path(testfilename), true);
     fHdfs.delete(new Path(someFile), true);

+ 10 - 10
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsHdfs.java

@@ -36,13 +36,13 @@ import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.fail;
 
 public class TestViewFsHdfs extends ViewFsBaseTest {
 
@@ -56,7 +56,7 @@ public class TestViewFsHdfs extends ViewFsBaseTest {
   }
 
 
-  @BeforeClass
+  @BeforeAll
   public static void clusterSetupAtBegining() throws IOException,
       LoginException, URISyntaxException {
     SupportsBlocks = true;
@@ -72,7 +72,7 @@ public class TestViewFsHdfs extends ViewFsBaseTest {
   }
 
       
-  @AfterClass
+  @AfterAll
   public static void ClusterShutdownAtEnd() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
@@ -80,7 +80,7 @@ public class TestViewFsHdfs extends ViewFsBaseTest {
   }
 
   @Override
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     // create the test root on local_fs
     fcTarget = fc;
@@ -160,7 +160,7 @@ public class TestViewFsHdfs extends ViewFsBaseTest {
     FileContext otherfs = map.get("user1");
     otherfs.mkdir(user1Path, FileContext.DEFAULT_PERM, false);
     String owner = otherfs.getFileStatus(user1Path).getOwner();
-    assertEquals("The owner did not match ", owner, userUgi.getShortUserName());
+      assertEquals(owner, userUgi.getShortUserName(), "The owner did not match ");
     otherfs.delete(user1Path, false);
   }
  

+ 10 - 13
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsLinkFallback.java

@@ -18,10 +18,7 @@
 package org.apache.hadoop.fs.viewfs;
 
 import static org.apache.hadoop.fs.CreateFlag.CREATE;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
@@ -46,11 +43,11 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.test.LambdaTestUtils;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.BeforeClass;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
 
 /**
  * Test for viewfs with LinkFallback mount table entries.
@@ -62,7 +59,7 @@ public class TestViewFsLinkFallback {
   private static URI viewFsDefaultClusterUri;
   private Path targetTestRoot;
 
-  @BeforeClass
+  @BeforeAll
   public static void clusterSetupAtBeginning()
       throws IOException, URISyntaxException {
     int nameSpacesCount = 3;
@@ -88,14 +85,14 @@ public class TestViewFsLinkFallback {
 
   }
 
-  @AfterClass
+  @AfterAll
   public static void clusterShutdownAtEnd() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
     }
   }
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     fsTarget = fsDefault;
     initializeTargetTestRoot();
@@ -295,7 +292,7 @@ public class TestViewFsLinkFallback {
       // attempt to create in fallback.
       vfs.mkdir(nextLevelToInternalDir, FsPermission.getDirDefault(),
           false);
-      Assert.fail("It should throw IOE when fallback fs not available.");
+      Assertions.fail("It should throw IOE when fallback fs not available.");
     } catch (IOException e) {
       cluster.restartNameNodes();
       // should succeed when fallback fs is back to normal.
@@ -570,7 +567,7 @@ public class TestViewFsLinkFallback {
     fs.rename(src, dst, Options.Rename.OVERWRITE);
     LambdaTestUtils
         .intercept(FileNotFoundException.class, () -> fs.getFileStatus(src));
-    Assert.assertNotNull(fs.getFileStatus(dst));
+    Assertions.assertNotNull(fs.getFileStatus(dst));
   }
 
 }

+ 8 - 11
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsWithAcls.java

@@ -29,11 +29,8 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.util.Lists;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.*;
+
 import java.util.List;
 
 import java.io.IOException;
@@ -44,8 +41,8 @@ import static org.apache.hadoop.fs.permission.AclEntryType.*;
 import static org.apache.hadoop.fs.permission.FsAction.*;
 import static org.apache.hadoop.fs.permission.FsAction.NONE;
 import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.aclEntry;
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertArrayEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 /**
  * Verify ACL through ViewFs functionality.
@@ -61,7 +58,7 @@ public class TestViewFsWithAcls {
   private FileContextTestHelper fileContextTestHelper =
       new FileContextTestHelper("/tmp/TestViewFsWithAcls");
 
-  @BeforeClass
+  @BeforeAll
   public static void clusterSetupAtBeginning() throws IOException {
     clusterConf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
     cluster = new MiniDFSCluster.Builder(clusterConf)
@@ -74,14 +71,14 @@ public class TestViewFsWithAcls {
     fc2 = FileContext.getFileContext(cluster.getURI(1), clusterConf);
   }
 
-  @AfterClass
+  @AfterAll
   public static void ClusterShutdownAtEnd() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
     }
   }
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     fcTarget = fc;
     fcTarget2 = fc2;
@@ -105,7 +102,7 @@ public class TestViewFsWithAcls {
     ConfigUtil.addLink(fsViewConf, mountOnNn2.toString(), targetTestRoot2.toUri());
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     fcTarget.delete(fileContextTestHelper.getTestRootPath(fcTarget), true);
     fcTarget2.delete(fileContextTestHelper.getTestRootPath(fcTarget2), true);

+ 7 - 11
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsWithXAttrs.java

@@ -25,16 +25,12 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.*;
 
 import java.io.IOException;
 
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertArrayEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 /**
  * Verify XAttrs through ViewFs functionality.
@@ -56,7 +52,7 @@ public class TestViewFsWithXAttrs {
   protected static final String name2 = "user.a2";
   protected static final byte[] value2 = {0x37, 0x38, 0x39};
 
-  @BeforeClass
+  @BeforeAll
   public static void clusterSetupAtBeginning() throws IOException {
     cluster = new MiniDFSCluster.Builder(clusterConf)
         .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2))
@@ -68,14 +64,14 @@ public class TestViewFsWithXAttrs {
     fc2 = FileContext.getFileContext(cluster.getURI(1), clusterConf);
   }
 
-  @AfterClass
+  @AfterAll
   public static void ClusterShutdownAtEnd() throws Exception {
     if (cluster != null) {
       cluster.shutdown();
     }
   }
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     fcTarget = fc;
     fcTarget2 = fc2;
@@ -99,7 +95,7 @@ public class TestViewFsWithXAttrs {
     ConfigUtil.addLink(fsViewConf, mountOnNn2.toString(), targetTestRoot2.toUri());
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     fcTarget.delete(fileContextTestHelper.getTestRootPath(fcTarget), true);
     fcTarget2.delete(fileContextTestHelper.getTestRootPath(fcTarget2), true);

+ 7 - 7
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AdminStatesBaseTest.java

@@ -17,8 +17,8 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.io.IOException;
 import java.util.ArrayList;
@@ -29,6 +29,7 @@ import java.util.Map;
 import java.util.Random;
 
 import org.apache.hadoop.util.Lists;
+import org.junit.jupiter.api.AfterEach;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -46,8 +47,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.HostConfigManager;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.hdfs.util.HostsFileWriter;
-import org.junit.After;
-import org.junit.Before;
+import org.junit.jupiter.api.BeforeEach;
 
 /**
  * This class provide utilities for testing of the admin operations of nodes.
@@ -81,7 +81,7 @@ public class AdminStatesBaseTest {
     return cluster;
   }
 
-  @Before
+  @BeforeEach
   public void setup() throws IOException {
     // Set up the hosts/exclude files.
     hostsFileWriter = new HostsFileWriter();
@@ -108,7 +108,7 @@ public class AdminStatesBaseTest {
 
   }
 
-  @After
+  @AfterEach
   public void teardown() throws IOException {
     hostsFileWriter.cleanup();
     shutdownCluster();
@@ -381,7 +381,7 @@ public class AdminStatesBaseTest {
   protected static void validateCluster(DFSClient client, int numDNs)
       throws IOException {
     DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
-    assertEquals("Number of Datanodes ", numDNs, info.length);
+      assertEquals(numDNs, info.length, "Number of Datanodes ");
   }
 
   /** Start a MiniDFSCluster.

+ 8 - 10
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AppendTestUtil.java

@@ -17,9 +17,7 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.IOException;
 import java.io.OutputStream;
@@ -97,9 +95,9 @@ public class AppendTestUtil {
     }
     
     LOG.info("partition=" + Arrays.toString(p));
-    assertTrue("i=0", p[0] > 0 && p[0] < n);
+      assertTrue(p[0] > 0 && p[0] < n, "i=0");
     for(int i = 1; i < p.length; i++) {
-      assertTrue("i=" + i, p[i] > p[i - 1] && p[i] < n);
+        assertTrue(p[i] > p[i - 1] && p[i] < n, "i=" + i);
     }
     return p;
   }
@@ -217,8 +215,8 @@ public class AppendTestUtil {
       boolean checkFileStatus) throws IOException {
     if (checkFileStatus) {
       final FileStatus status = fs.getFileStatus(name);
-      assertEquals("len=" + len + " but status.getLen()=" + status.getLen(),
-          len, status.getLen());
+        assertEquals(
+                len, status.getLen(), "len=" + len + " but status.getLen()=" + status.getLen());
     }
 
     FSDataInputStream stm = fs.open(name);
@@ -231,9 +229,9 @@ public class AppendTestUtil {
   private static void checkData(final byte[] actual, int from,
                                 final byte[] expected, String message) {
     for (int idx = 0; idx < actual.length; idx++) {
-      assertEquals(message+" byte "+(from+idx)+" differs. expected "+
-                   expected[from+idx]+" actual "+actual[idx],
-                   expected[from+idx], actual[idx]);
+        assertEquals(
+                expected[from + idx], actual[idx], message + " byte " + (from + idx) + " differs. expected " +
+                expected[from + idx] + " actual " + actual[idx]);
       actual[idx] = 0;
     }
   }

+ 48 - 51
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java

@@ -25,10 +25,7 @@ import static org.apache.hadoop.fs.CreateFlag.OVERWRITE;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.BufferedOutputStream;
 import java.io.BufferedReader;
@@ -190,8 +187,8 @@ import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.VersionInfo;
-import org.junit.Assert;
-import org.junit.Assume;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Assumptions;
 import org.apache.hadoop.util.ToolRunner;
 
 import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting;
@@ -1672,15 +1669,15 @@ public class DFSTestUtil {
   }
 
   public static void checkComponentsEquals(byte[][] expected, byte[][] actual) {
-    assertEquals("expected: " + DFSUtil.byteArray2PathString(expected)
-        + ", actual: " + DFSUtil.byteArray2PathString(actual), expected.length,
-        actual.length);
+      assertEquals(expected.length,
+              actual.length, "expected: " + DFSUtil.byteArray2PathString(expected)
+              + ", actual: " + DFSUtil.byteArray2PathString(actual));
     int i = 0;
     for (byte[] e : expected) {
       byte[] actualComponent = actual[i++];
-      assertTrue("expected: " + DFSUtil.bytes2String(e) + ", actual: "
-          + DFSUtil.bytes2String(actualComponent),
-          Arrays.equals(e, actualComponent));
+        assertTrue(
+                Arrays.equals(e, actualComponent), "expected: " + DFSUtil.bytes2String(e) + ", actual: "
+                + DFSUtil.bytes2String(actualComponent));
     }
   }
 
@@ -1699,7 +1696,7 @@ public class DFSTestUtil {
       this.sockDir = new TemporarySocketDirectory();
       DomainSocket.disableBindPathValidation();
       formerTcpReadsDisabled = DFSInputStream.tcpReadsDisabledForTesting;
-      Assume.assumeTrue(DomainSocket.getLoadingFailureReason() == null);
+      Assumptions.assumeTrue(DomainSocket.getLoadingFailureReason() == null);
     }
     
     public Configuration newConfiguration() {
@@ -1737,7 +1734,7 @@ public class DFSTestUtil {
     try (FSDataInputStream in1 = fs.open(p1);
          FSDataInputStream in2 = fs.open(p2)) {
       for (int i = 0; i < len; i++) {
-        assertEquals("Mismatch at byte " + i, in1.read(), in2.read());
+          assertEquals(in1.read(), in2.read(), "Mismatch at byte " + i);
       }
     }
   }
@@ -1813,32 +1810,32 @@ public class DFSTestUtil {
         client.getReplicatedBlockStats();
     ECBlockGroupStats ecBlockGroupStats = client.getECBlockGroupStats();
 
-    assertEquals("Under replicated stats not matching!",
-        aggregatedStats[ClientProtocol.GET_STATS_LOW_REDUNDANCY_IDX],
-        aggregatedStats[ClientProtocol.GET_STATS_UNDER_REPLICATED_IDX]);
-    assertEquals("Low redundancy stats not matching!",
-        aggregatedStats[ClientProtocol.GET_STATS_LOW_REDUNDANCY_IDX],
-        replicatedBlockStats.getLowRedundancyBlocks() +
-            ecBlockGroupStats.getLowRedundancyBlockGroups());
-    assertEquals("Corrupt blocks stats not matching!",
-        aggregatedStats[ClientProtocol.GET_STATS_CORRUPT_BLOCKS_IDX],
-        replicatedBlockStats.getCorruptBlocks() +
-            ecBlockGroupStats.getCorruptBlockGroups());
-    assertEquals("Missing blocks stats not matching!",
-        aggregatedStats[ClientProtocol.GET_STATS_MISSING_BLOCKS_IDX],
-        replicatedBlockStats.getMissingReplicaBlocks() +
-            ecBlockGroupStats.getMissingBlockGroups());
-    assertEquals("Missing blocks with replication factor one not matching!",
-        aggregatedStats[ClientProtocol.GET_STATS_MISSING_REPL_ONE_BLOCKS_IDX],
-        replicatedBlockStats.getMissingReplicationOneBlocks());
-    assertEquals("Bytes in future blocks stats not matching!",
-        aggregatedStats[ClientProtocol.GET_STATS_BYTES_IN_FUTURE_BLOCKS_IDX],
-        replicatedBlockStats.getBytesInFutureBlocks() +
-            ecBlockGroupStats.getBytesInFutureBlockGroups());
-    assertEquals("Pending deletion blocks stats not matching!",
-        aggregatedStats[ClientProtocol.GET_STATS_PENDING_DELETION_BLOCKS_IDX],
-        replicatedBlockStats.getPendingDeletionBlocks() +
-            ecBlockGroupStats.getPendingDeletionBlocks());
+      assertEquals(
+              aggregatedStats[ClientProtocol.GET_STATS_LOW_REDUNDANCY_IDX],
+              aggregatedStats[ClientProtocol.GET_STATS_UNDER_REPLICATED_IDX], "Under replicated stats not matching!");
+      assertEquals(
+              aggregatedStats[ClientProtocol.GET_STATS_LOW_REDUNDANCY_IDX],
+              replicatedBlockStats.getLowRedundancyBlocks() +
+                      ecBlockGroupStats.getLowRedundancyBlockGroups(), "Low redundancy stats not matching!");
+      assertEquals(
+              aggregatedStats[ClientProtocol.GET_STATS_CORRUPT_BLOCKS_IDX],
+              replicatedBlockStats.getCorruptBlocks() +
+                      ecBlockGroupStats.getCorruptBlockGroups(), "Corrupt blocks stats not matching!");
+      assertEquals(
+              aggregatedStats[ClientProtocol.GET_STATS_MISSING_BLOCKS_IDX],
+              replicatedBlockStats.getMissingReplicaBlocks() +
+                      ecBlockGroupStats.getMissingBlockGroups(), "Missing blocks stats not matching!");
+      assertEquals(
+              aggregatedStats[ClientProtocol.GET_STATS_MISSING_REPL_ONE_BLOCKS_IDX],
+              replicatedBlockStats.getMissingReplicationOneBlocks(), "Missing blocks with replication factor one not matching!");
+      assertEquals(
+              aggregatedStats[ClientProtocol.GET_STATS_BYTES_IN_FUTURE_BLOCKS_IDX],
+              replicatedBlockStats.getBytesInFutureBlocks() +
+                      ecBlockGroupStats.getBytesInFutureBlockGroups(), "Bytes in future blocks stats not matching!");
+      assertEquals(
+              aggregatedStats[ClientProtocol.GET_STATS_PENDING_DELETION_BLOCKS_IDX],
+              replicatedBlockStats.getPendingDeletionBlocks() +
+                      ecBlockGroupStats.getPendingDeletionBlocks(), "Pending deletion blocks stats not matching!");
   }
 
   /**
@@ -1884,8 +1881,8 @@ public class DFSTestUtil {
       ExtendedBlock blk) {
     BlockManager bm0 = nn.getNamesystem().getBlockManager();
     BlockInfo storedBlock = bm0.getStoredBlock(blk.getLocalBlock());
-    assertTrue("Block " + blk + " should be under construction, " +
-        "got: " + storedBlock, !storedBlock.isComplete());
+      assertTrue(!storedBlock.isComplete(), "Block " + blk + " should be under construction, " +
+              "got: " + storedBlock);
     // We expect that the replica with the most recent heart beat will be
     // the one to be in charge of the synchronization / recovery protocol.
     final DatanodeStorageInfo[] storages = storedBlock
@@ -1933,8 +1930,8 @@ public class DFSTestUtil {
     }
     assertEquals(retcode, ret);
     if (contain != null) {
-      assertTrue("The real output is: " + output + ".\n It should contain: "
-          + contain, output.contains(contain));
+        assertTrue(output.contains(contain), "The real output is: " + output + ".\n It should contain: "
+                + contain);
     }
   }
 
@@ -2338,23 +2335,23 @@ public class DFSTestUtil {
 
   public static void verifyDelete(FsShell shell, FileSystem fs, Path path,
       Path trashPath, boolean shouldExistInTrash) throws Exception {
-    assertTrue(path + " file does not exist", fs.exists(path));
+      assertTrue(fs.exists(path), path + " file does not exist");
 
     // Verify that trashPath has a path component named ".Trash"
     Path checkTrash = trashPath;
     while (!checkTrash.isRoot() && !checkTrash.getName().equals(".Trash")) {
       checkTrash = checkTrash.getParent();
     }
-    assertEquals("No .Trash component found in trash path " + trashPath,
-        ".Trash", checkTrash.getName());
+      assertEquals(
+              ".Trash", checkTrash.getName(), "No .Trash component found in trash path " + trashPath);
 
     String[] argv = new String[]{"-rm", "-r", path.toString()};
     int res = ToolRunner.run(shell, argv);
-    assertEquals("rm failed", 0, res);
+      assertEquals(0, res, "rm failed");
     if (shouldExistInTrash) {
-      assertTrue("File not in trash : " + trashPath, fs.exists(trashPath));
+        assertTrue(fs.exists(trashPath), "File not in trash : " + trashPath);
     } else {
-      assertFalse("File in trash : " + trashPath, fs.exists(trashPath));
+        assertFalse(fs.exists(trashPath), "File in trash : " + trashPath);
     }
   }
 
@@ -2563,7 +2560,7 @@ public class DFSTestUtil {
       Path filePath, int namenodeCount, boolean createMoverPath)
           throws IOException {
     final Collection<URI> namenodes = DFSUtil.getInternalNsRpcUris(conf);
-    Assert.assertEquals(namenodeCount, namenodes.size());
+    Assertions.assertEquals(namenodeCount, namenodes.size());
     NameNodeConnector.checkOtherInstanceRunning(createMoverPath);
     while (true) {
       try {

+ 5 - 5
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/FileAppendTest4.java

@@ -26,9 +26,9 @@ import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 
 /** This is a comprehensive append test that tries
  * all combinations of file length and number of appended bytes
@@ -59,7 +59,7 @@ public class FileAppendTest4 {
     conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, PACKET_SIZE);
   }
   
-  @BeforeClass
+  @BeforeAll
   public static void startUp () throws IOException {
     conf = new HdfsConfiguration();
     init(conf);
@@ -67,7 +67,7 @@ public class FileAppendTest4 {
     fs = cluster.getFileSystem();
   }
 
-  @AfterClass
+  @AfterAll
   public static void tearDown() {
     if (cluster != null) {
       cluster.shutdown();

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/ParameterizedTestDFSStripedOutputStreamWithFailure.java

@@ -27,7 +27,7 @@ import java.util.ArrayList;
 import java.util.Collection;
 import java.util.List;
 
-import static org.junit.Assume.assumeTrue;
+import static org.junit.jupiter.api.Assumptions.assumeTrue;
 
 /**
  * Test striped file write operation with data node failures with parameterized
@@ -62,9 +62,9 @@ public class ParameterizedTestDFSStripedOutputStreamWithFailure extends
     }
     final int i = base;
     final Integer length = getLength(i);
-    assumeTrue("Skip test " + i + " since length=null.", length != null);
-    assumeTrue("Test " + i + ", length=" + length
-        + ", is not chosen to run.", RANDOM.nextInt(16) != 0);
+    assumeTrue(length != null, "Skip test " + i + " since length=null.");
+    assumeTrue(RANDOM.nextInt(16) != 0, "Test " + i + ", length=" + length
+        + ", is not chosen to run.");
     System.out.println("Run test " + i + ", length=" + length);
     runTest(length);
   }

+ 8 - 8
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/ReadStripedFileWithDecodingHelper.java

@@ -30,7 +30,7 @@ import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.util.StripedBlockUtil;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Assert;
+import org.junit.jupiter.api.Assertions;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.event.Level;
@@ -200,11 +200,11 @@ abstract public class ReadStripedFileWithDecodingHelper {
         + ", parityBlkDelNum = " + parityBlkDelNum
         + ", deleteBlockFile? " + deleteBlockFile);
     int recoverBlkNum = dataBlkDelNum + parityBlkDelNum;
-    Assert.assertTrue("dataBlkDelNum and parityBlkDelNum should be positive",
-        dataBlkDelNum >= 0 && parityBlkDelNum >= 0);
-    Assert.assertTrue("The sum of dataBlkDelNum and parityBlkDelNum " +
-        "should be between 1 ~ " + NUM_PARITY_UNITS, recoverBlkNum <=
-        NUM_PARITY_UNITS);
+      Assertions.assertTrue(
+              dataBlkDelNum >= 0 && parityBlkDelNum >= 0, "dataBlkDelNum and parityBlkDelNum should be positive");
+      Assertions.assertTrue(recoverBlkNum <=
+              NUM_PARITY_UNITS, "The sum of dataBlkDelNum and parityBlkDelNum " +
+              "should be between 1 ~ " + NUM_PARITY_UNITS);
 
     // write a file with the length of writeLen
     Path srcPath = new Path(src);
@@ -232,10 +232,10 @@ abstract public class ReadStripedFileWithDecodingHelper {
 
     int[] delDataBlkIndices = StripedFileTestUtil.randomArray(0, NUM_DATA_UNITS,
         dataBlkDelNum);
-    Assert.assertNotNull(delDataBlkIndices);
+    Assertions.assertNotNull(delDataBlkIndices);
     int[] delParityBlkIndices = StripedFileTestUtil.randomArray(NUM_DATA_UNITS,
         NUM_DATA_UNITS + NUM_PARITY_UNITS, parityBlkDelNum);
-    Assert.assertNotNull(delParityBlkIndices);
+    Assertions.assertNotNull(delParityBlkIndices);
 
     int[] delBlkIndices = new int[recoverBlkNum];
     System.arraycopy(delDataBlkIndices, 0,

+ 22 - 22
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/StripedFileTestUtil.java

@@ -37,7 +37,7 @@ import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.erasurecode.CodecUtil;
 import org.apache.hadoop.io.erasurecode.ErasureCoderOptions;
 import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureEncoder;
-import org.junit.Assert;
+import org.junit.jupiter.api.Assertions;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -55,7 +55,7 @@ import java.util.Set;
 import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicInteger;
 
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 public class StripedFileTestUtil {
   public static final Logger LOG =
@@ -77,7 +77,7 @@ public class StripedFileTestUtil {
   static void verifyLength(FileSystem fs, Path srcPath, int fileLength)
       throws IOException {
     FileStatus status = fs.getFileStatus(srcPath);
-    assertEquals("File length should be the same", fileLength, status.getLen());
+      assertEquals(fileLength, status.getLen(), "File length should be the same");
   }
 
   static void verifyPread(DistributedFileSystem fs, Path srcPath,
@@ -109,9 +109,9 @@ public class StripedFileTestUtil {
           offset += target;
         }
         for (int i = 0; i < fileLength - startOffset; i++) {
-          assertEquals("Byte at " + (startOffset + i) + " is different, "
-              + "the startOffset is " + startOffset, expected[startOffset + i],
-              result[i]);
+            assertEquals(expected[startOffset + i],
+                    result[i], "Byte at " + (startOffset + i) + " is different, "
+                    + "the startOffset is " + startOffset);
         }
       }
     }
@@ -127,8 +127,8 @@ public class StripedFileTestUtil {
         System.arraycopy(buf, 0, result, readLen, ret);
         readLen += ret;
       }
-      assertEquals("The length of file should be the same to write size", fileLength, readLen);
-      Assert.assertArrayEquals(expected, result);
+        assertEquals(fileLength, readLen, "The length of file should be the same to write size");
+      Assertions.assertArrayEquals(expected, result);
     }
   }
 
@@ -144,8 +144,8 @@ public class StripedFileTestUtil {
         result.put(buf);
         buf.clear();
       }
-      assertEquals("The length of file should be the same to write size", fileLength, readLen);
-      Assert.assertArrayEquals(expected, result.array());
+        assertEquals(fileLength, readLen, "The length of file should be the same to write size");
+      Assertions.assertArrayEquals(expected, result.array());
     }
   }
 
@@ -185,14 +185,14 @@ public class StripedFileTestUtil {
       if (!(in.getWrappedStream() instanceof WebHdfsInputStream)) {
         try {
           in.seek(-1);
-          Assert.fail("Should be failed if seek to negative offset");
+          Assertions.fail("Should be failed if seek to negative offset");
         } catch (EOFException e) {
           // expected
         }
 
         try {
           in.seek(fileLength + 1);
-          Assert.fail("Should be failed if seek after EOF");
+          Assertions.fail("Should be failed if seek after EOF");
         } catch (EOFException e) {
           // expected
         }
@@ -206,8 +206,8 @@ public class StripedFileTestUtil {
     byte[] buf = new byte[writeBytes - pos];
     IOUtils.readFully(fsdis, buf, 0, buf.length);
     for (int i = 0; i < buf.length; i++) {
-      assertEquals("Byte at " + i + " should be the same",
-          StripedFileTestUtil.getByte(pos + i), buf[i]);
+        assertEquals(
+                StripedFileTestUtil.getByte(pos + i), buf[i], "Byte at " + i + " should be the same");
     }
   }
 
@@ -225,7 +225,7 @@ public class StripedFileTestUtil {
       final DatanodeInfo[] datanodes = streamer.getNodes();
       if (datanodes != null) {
         assertEquals(1, datanodes.length);
-        Assert.assertNotNull(datanodes[0]);
+        Assertions.assertNotNull(datanodes[0]);
         return datanodes[0];
       }
       try {
@@ -377,13 +377,13 @@ public class StripedFileTestUtil {
     final int parityBlkNum = ecPolicy.getNumParityUnits();
     int index = 0;
     for (LocatedBlock firstBlock : lbs.getLocatedBlocks()) {
-      Assert.assertTrue(firstBlock instanceof LocatedStripedBlock);
+      Assertions.assertTrue(firstBlock instanceof LocatedStripedBlock);
 
       final long gs = firstBlock.getBlock().getGenerationStamp();
       final long oldGS = oldGSList != null ? oldGSList.get(index++) : -1L;
       final String s = "gs=" + gs + ", oldGS=" + oldGS;
       LOG.info(s);
-      Assert.assertTrue(s, gs >= oldGS);
+        Assertions.assertTrue(gs >= oldGS, s);
 
       LocatedBlock[] blocks = StripedBlockUtil.parseStripedBlockGroup(
           (LocatedStripedBlock) firstBlock, cellSize,
@@ -456,7 +456,7 @@ public class StripedFileTestUtil {
         for (int posInBlk = 0; posInBlk < actual.length; posInBlk++) {
           final long posInFile = StripedBlockUtil.offsetInBlkToOffsetInBG(
               cellSize, dataBlkNum, posInBlk, i) + groupPosInFile;
-          Assert.assertTrue(posInFile < length);
+          Assertions.assertTrue(posInFile < length);
           final byte expected = getByte(posInFile);
 
           if (killed) {
@@ -466,7 +466,7 @@ public class StripedFileTestUtil {
               String s = "expected=" + expected + " but actual=" + actual[posInBlk]
                   + ", posInFile=" + posInFile + ", posInBlk=" + posInBlk
                   + ". group=" + group + ", i=" + i;
-              Assert.fail(s);
+              Assertions.fail(s);
             }
           }
         }
@@ -507,12 +507,12 @@ public class StripedFileTestUtil {
     try {
       encoder.encode(dataBytes, expectedParityBytes);
     } catch (IOException e) {
-      Assert.fail("Unexpected IOException: " + e.getMessage());
+      Assertions.fail("Unexpected IOException: " + e.getMessage());
     }
     for (int i = 0; i < parityBytes.length; i++) {
       if (checkSet.contains(i + dataBytes.length)){
-        Assert.assertArrayEquals("i=" + i, expectedParityBytes[i],
-            parityBytes[i]);
+          Assertions.assertArrayEquals(expectedParityBytes[i],
+                  parityBytes[i], "i=" + i);
       }
     }
   }

+ 9 - 9
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAbandonBlock.java

@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.fail;
 
 import java.io.IOException;
 
@@ -30,10 +30,10 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 /**
  * Test abandoning blocks, which clients do on pipeline creation failure.
@@ -48,14 +48,14 @@ public class TestAbandonBlock {
   private MiniDFSCluster cluster;
   private DistributedFileSystem fs;
 
-  @Before
+  @BeforeEach
   public void setUp() throws Exception {
     cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(2).build();
     fs = cluster.getFileSystem();
     cluster.waitActive();
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     if (fs != null) {
       fs.close();
@@ -100,8 +100,8 @@ public class TestAbandonBlock {
     cluster.restartNameNode();
     blocks = dfsclient.getNamenode().getBlockLocations(src, 0,
         Integer.MAX_VALUE);
-    Assert.assertEquals("Blocks " + b + " has not been abandoned.",
-        orginalNumBlocks, blocks.locatedBlockCount() + 1);
+      Assertions.assertEquals(
+              orginalNumBlocks, blocks.locatedBlockCount() + 1, "Blocks " + b + " has not been abandoned.");
   }
 
   @Test

+ 164 - 165
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAclsEndToEnd.java

@@ -43,12 +43,11 @@ import org.apache.hadoop.fs.FileSystemTestHelper;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authorize.ProxyUsers;
-import org.junit.Assert;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import static org.junit.jupiter.api.Assertions.*;
+
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 
 /**
  * This class tests the ACLs system through the full code path.  It overlaps
@@ -89,7 +88,7 @@ public class TestAclsEndToEnd {
   private MiniDFSCluster cluster;
   private DistributedFileSystem fs;
 
-  @BeforeClass
+  @BeforeAll
   public static void captureUser() throws IOException {
     realUgi = UserGroupInformation.getCurrentUser();
     realUser = System.getProperty("user.name");
@@ -174,7 +173,7 @@ public class TestAclsEndToEnd {
 
       kmsDir = new File(fsHelper.getTestRootDir()).getAbsoluteFile();
 
-      Assert.assertTrue(kmsDir.mkdirs());
+      Assertions.assertTrue(kmsDir.mkdirs());
     }
 
     writeConf(kmsDir, conf);
@@ -411,66 +410,66 @@ public class TestAclsEndToEnd {
     try {
       setup(conf);
 
-      // Create a test key
-      assertTrue("Exception during creation of key " + KEY1 + " by "
-          + keyadminUgi.getUserName(), createKey(keyadminUgi, KEY1, conf));
+        // Create a test key
+        assertTrue(createKey(keyadminUgi, KEY1, conf), "Exception during creation of key " + KEY1 + " by "
+                + keyadminUgi.getUserName());
 
-      // Fail to create a test key
-      assertFalse("Allowed creation of key " + KEY2 + " by "
-          + hdfsUgi.getUserName(), createKey(hdfsUgi, KEY2, conf));
-      assertFalse("Allowed creation of key " + KEY2 + " by "
-          + userUgi.getUserName(), createKey(userUgi, KEY2, conf));
+        // Fail to create a test key
+        assertFalse(createKey(hdfsUgi, KEY2, conf), "Allowed creation of key " + KEY2 + " by "
+                + hdfsUgi.getUserName());
+        assertFalse(createKey(userUgi, KEY2, conf), "Allowed creation of key " + KEY2 + " by "
+                + userUgi.getUserName());
 
       // Create a directory and chown it to the normal user.
       fs.mkdirs(ZONE1);
       fs.setOwner(ZONE1, userUgi.getUserName(),
           userUgi.getPrimaryGroupName());
 
-      // Create an EZ
-      assertTrue("Exception during creation of EZ " + ZONE1 + " by "
-          + hdfsUgi.getUserName() + " using key " + KEY1,
-            createEncryptionZone(hdfsUgi, KEY1, ZONE1));
-
-      // Fail to create an EZ
-      assertFalse("Allowed creation of EZ " + ZONE2 + " by "
-          + keyadminUgi.getUserName() + " using key " + KEY1,
-            createEncryptionZone(keyadminUgi, KEY1, ZONE2));
-      assertFalse("Allowed creation of EZ " + ZONE2 + " by "
-          + userUgi.getUserName() + " using key " + KEY1,
-            createEncryptionZone(userUgi, KEY1, ZONE2));
-
-      // Create a file in the zone
-      assertTrue("Exception during creation of file " + FILE1 + " by "
-          + userUgi.getUserName(), createFile(userUgi, FILE1, TEXT));
-
-      // Fail to create a file in the zone
-      assertFalse("Allowed creation of file " + FILE1A + " by "
-          + hdfsUgi.getUserName(), createFile(hdfsUgi, FILE1A, TEXT));
-      assertFalse("Allowed creation of file " + FILE1A + " by "
-          + keyadminUgi.getUserName(), createFile(keyadminUgi, FILE1A, TEXT));
-
-      // Read a file in the zone
-      assertTrue("Exception while reading file " + FILE1 + " by "
-          + userUgi.getUserName(), compareFile(userUgi, FILE1, TEXT));
-
-      // Fail to read a file in the zone
-      assertFalse("Allowed reading of file " + FILE1 + " by "
-          + hdfsUgi.getUserName(), compareFile(hdfsUgi, FILE1, TEXT));
-      assertFalse("Allowed reading of file " + FILE1 + " by "
-          + keyadminUgi.getUserName(), compareFile(keyadminUgi, FILE1, TEXT));
+        // Create an EZ
+        assertTrue(
+                createEncryptionZone(hdfsUgi, KEY1, ZONE1), "Exception during creation of EZ " + ZONE1 + " by "
+                + hdfsUgi.getUserName() + " using key " + KEY1);
+
+        // Fail to create an EZ
+        assertFalse(
+                createEncryptionZone(keyadminUgi, KEY1, ZONE2), "Allowed creation of EZ " + ZONE2 + " by "
+                + keyadminUgi.getUserName() + " using key " + KEY1);
+        assertFalse(
+                createEncryptionZone(userUgi, KEY1, ZONE2), "Allowed creation of EZ " + ZONE2 + " by "
+                + userUgi.getUserName() + " using key " + KEY1);
+
+        // Create a file in the zone
+        assertTrue(createFile(userUgi, FILE1, TEXT), "Exception during creation of file " + FILE1 + " by "
+                + userUgi.getUserName());
+
+        // Fail to create a file in the zone
+        assertFalse(createFile(hdfsUgi, FILE1A, TEXT), "Allowed creation of file " + FILE1A + " by "
+                + hdfsUgi.getUserName());
+        assertFalse(createFile(keyadminUgi, FILE1A, TEXT), "Allowed creation of file " + FILE1A + " by "
+                + keyadminUgi.getUserName());
+
+        // Read a file in the zone
+        assertTrue(compareFile(userUgi, FILE1, TEXT), "Exception while reading file " + FILE1 + " by "
+                + userUgi.getUserName());
+
+        // Fail to read a file in the zone
+        assertFalse(compareFile(hdfsUgi, FILE1, TEXT), "Allowed reading of file " + FILE1 + " by "
+                + hdfsUgi.getUserName());
+        assertFalse(compareFile(keyadminUgi, FILE1, TEXT), "Allowed reading of file " + FILE1 + " by "
+                + keyadminUgi.getUserName());
 
       // Remove the zone
       fs.delete(ZONE1, true);
 
-      // Fail to remove the key
-      assertFalse("Allowed deletion of file " + FILE1 + " by "
-          + hdfsUgi.getUserName(), deleteKey(hdfsUgi, KEY1));
-      assertFalse("Allowed deletion of file " + FILE1 + " by "
-          + userUgi.getUserName(), deleteKey(userUgi, KEY1));
+        // Fail to remove the key
+        assertFalse(deleteKey(hdfsUgi, KEY1), "Allowed deletion of file " + FILE1 + " by "
+                + hdfsUgi.getUserName());
+        assertFalse(deleteKey(userUgi, KEY1), "Allowed deletion of file " + FILE1 + " by "
+                + userUgi.getUserName());
 
-      // Remove
-      assertTrue("Exception during deletion of file " + FILE1 + " by "
-          + keyadminUgi.getUserName(), deleteKey(keyadminUgi, KEY1));
+        // Remove
+        assertTrue(deleteKey(keyadminUgi, KEY1), "Exception during deletion of file " + FILE1 + " by "
+                + keyadminUgi.getUserName());
     } finally {
       fs.delete(ZONE1, true);
       fs.delete(ZONE2, true);
@@ -495,8 +494,8 @@ public class TestAclsEndToEnd {
     try {
       setup(conf);
 
-      assertTrue("Exception during key creation with correct config"
-          + " using whitelist key ACLs", createKey(realUgi, KEY1, conf));
+        assertTrue(createKey(realUgi, KEY1, conf), "Exception during key creation with correct config"
+                + " using whitelist key ACLs");
     } finally {
       teardown();
     }
@@ -512,8 +511,8 @@ public class TestAclsEndToEnd {
     try {
       setup(conf);
 
-      assertTrue("Exception during key creation with correct config"
-          + " using default key ACLs", createKey(realUgi, KEY2, conf));
+        assertTrue(createKey(realUgi, KEY2, conf), "Exception during key creation with correct config"
+                + " using default key ACLs");
     } finally {
       teardown();
     }
@@ -531,8 +530,8 @@ public class TestAclsEndToEnd {
     try {
       setup(conf);
 
-      assertFalse("Allowed key creation with blacklist for CREATE",
-          createKey(realUgi, KEY3, conf));
+        assertFalse(
+                createKey(realUgi, KEY3, conf), "Allowed key creation with blacklist for CREATE");
     } finally {
       teardown();
     }
@@ -547,8 +546,8 @@ public class TestAclsEndToEnd {
     try {
       setup(conf);
 
-      assertFalse("Allowed key creation without CREATE KMS ACL",
-          createKey(realUgi, KEY3, conf));
+        assertFalse(
+                createKey(realUgi, KEY3, conf), "Allowed key creation without CREATE KMS ACL");
     } finally {
       teardown();
     }
@@ -562,8 +561,8 @@ public class TestAclsEndToEnd {
     try {
       setup(conf);
 
-      assertFalse("Allowed key creation without MANAGMENT key ACL",
-          createKey(realUgi, KEY3, conf));
+        assertFalse(
+                createKey(realUgi, KEY3, conf), "Allowed key creation without MANAGMENT key ACL");
     } finally {
       teardown();
     }
@@ -581,8 +580,8 @@ public class TestAclsEndToEnd {
     try {
       setup(conf);
 
-      assertFalse("Allowed key creation when default key ACL should have been"
-          + " overridden by key ACL", createKey(realUgi, KEY3, conf));
+        assertFalse(createKey(realUgi, KEY3, conf), "Allowed key creation when default key ACL should have been"
+                + " overridden by key ACL");
     } finally {
       teardown();
     }
@@ -596,8 +595,8 @@ public class TestAclsEndToEnd {
     try {
       setup(conf);
 
-      assertTrue("Exception during key creation with default KMS ACLs",
-          createKey(realUgi, KEY3, conf));
+        assertTrue(
+                createKey(realUgi, KEY3, conf), "Exception during key creation with default KMS ACLs");
     } finally {
       teardown();
     }
@@ -620,8 +619,8 @@ public class TestAclsEndToEnd {
     try {
       setup(conf);
 
-      assertTrue("Exception during key creation",
-          createKey(realUgi, KEY1, conf));
+        assertTrue(
+                createKey(realUgi, KEY1, conf), "Exception during key creation");
     } finally {
       teardown();
     }
@@ -647,8 +646,8 @@ public class TestAclsEndToEnd {
 
       fs.mkdirs(ZONE1);
 
-      assertTrue("Exception during zone creation with correct config using"
-          + " whitelist key ACLs", createEncryptionZone(realUgi, KEY1, ZONE1));
+        assertTrue(createEncryptionZone(realUgi, KEY1, ZONE1), "Exception during zone creation with correct config using"
+                + " whitelist key ACLs");
     } finally {
       fs.delete(ZONE1, true);
       teardown();
@@ -671,8 +670,8 @@ public class TestAclsEndToEnd {
 
       fs.mkdirs(ZONE2);
 
-      assertTrue("Exception during zone creation with correct config using"
-          + " default key ACLs", createEncryptionZone(realUgi, KEY1, ZONE2));
+        assertTrue(createEncryptionZone(realUgi, KEY1, ZONE2), "Exception during zone creation with correct config using"
+                + " default key ACLs");
     } finally {
       fs.delete(ZONE2, true);
       teardown();
@@ -697,9 +696,9 @@ public class TestAclsEndToEnd {
 
       fs.mkdirs(ZONE3);
 
-      assertFalse("Allowed creation of zone when default key ACLs should have"
-          + " been overridden by key ACL",
-            createEncryptionZone(realUgi, KEY1, ZONE3));
+        assertFalse(
+                createEncryptionZone(realUgi, KEY1, ZONE3), "Allowed creation of zone when default key ACLs should have"
+                + " been overridden by key ACL");
     } finally {
       fs.delete(ZONE3, true);
       teardown();
@@ -724,8 +723,8 @@ public class TestAclsEndToEnd {
 
       fs.mkdirs(ZONE3);
 
-      assertFalse("Allowed zone creation of zone with blacklisted GET_METADATA",
-          createEncryptionZone(realUgi, KEY1, ZONE3));
+        assertFalse(
+                createEncryptionZone(realUgi, KEY1, ZONE3), "Allowed zone creation of zone with blacklisted GET_METADATA");
     } finally {
       fs.delete(ZONE3, true);
       teardown();
@@ -750,8 +749,8 @@ public class TestAclsEndToEnd {
 
       fs.mkdirs(ZONE3);
 
-      assertFalse("Allowed zone creation of zone with blacklisted GENERATE_EEK",
-          createEncryptionZone(realUgi, KEY1, ZONE3));
+        assertFalse(
+                createEncryptionZone(realUgi, KEY1, ZONE3), "Allowed zone creation of zone with blacklisted GENERATE_EEK");
     } finally {
       fs.delete(ZONE3, true);
       teardown();
@@ -771,8 +770,8 @@ public class TestAclsEndToEnd {
 
       fs.mkdirs(ZONE3);
 
-      assertTrue("Exception during zone creation with default KMS ACLs",
-          createEncryptionZone(realUgi, KEY1, ZONE3));
+        assertTrue(
+                createEncryptionZone(realUgi, KEY1, ZONE3), "Exception during zone creation with default KMS ACLs");
     } finally {
       fs.delete(ZONE3, true);
       teardown();
@@ -794,8 +793,8 @@ public class TestAclsEndToEnd {
 
       fs.mkdirs(ZONE4);
 
-      assertFalse("Allowed zone creation without GET_METADATA KMS ACL",
-          createEncryptionZone(realUgi, KEY1, ZONE4));
+        assertFalse(
+                createEncryptionZone(realUgi, KEY1, ZONE4), "Allowed zone creation without GET_METADATA KMS ACL");
     } finally {
       fs.delete(ZONE4, true);
       teardown();
@@ -817,8 +816,8 @@ public class TestAclsEndToEnd {
 
       fs.mkdirs(ZONE4);
 
-      assertFalse("Allowed zone creation without GENERATE_EEK KMS ACL",
-          createEncryptionZone(realUgi, KEY1, ZONE4));
+        assertFalse(
+                createEncryptionZone(realUgi, KEY1, ZONE4), "Allowed zone creation without GENERATE_EEK KMS ACL");
     } finally {
       fs.delete(ZONE4, true);
       teardown();
@@ -839,8 +838,8 @@ public class TestAclsEndToEnd {
 
       fs.mkdirs(ZONE4);
 
-      assertFalse("Allowed zone creation without READ ACL",
-          createEncryptionZone(realUgi, KEY1, ZONE4));
+        assertFalse(
+                createEncryptionZone(realUgi, KEY1, ZONE4), "Allowed zone creation without READ ACL");
     } finally {
       fs.delete(ZONE4, true);
       teardown();
@@ -861,8 +860,8 @@ public class TestAclsEndToEnd {
 
       fs.mkdirs(ZONE4);
 
-      assertFalse("Allowed zone creation without GENERATE_EEK ACL",
-          createEncryptionZone(realUgi, KEY1, ZONE4));
+        assertFalse(
+                createEncryptionZone(realUgi, KEY1, ZONE4), "Allowed zone creation without GENERATE_EEK ACL");
     } finally {
       fs.delete(ZONE4, true);
       teardown();
@@ -896,20 +895,20 @@ public class TestAclsEndToEnd {
     try {
       setup(conf);
 
-      assertTrue("Exception during key creation",
-          createKey(realUgi, KEY1, conf));
+        assertTrue(
+                createKey(realUgi, KEY1, conf), "Exception during key creation");
       fs.mkdirs(ZONE1);
-      assertTrue("Exception during zone creation",
-          createEncryptionZone(realUgi, KEY1, ZONE1));
+        assertTrue(
+                createEncryptionZone(realUgi, KEY1, ZONE1), "Exception during zone creation");
       fs.mkdirs(ZONE2);
-      assertTrue("Exception during zone creation",
-          createEncryptionZone(realUgi, KEY1, ZONE2));
+        assertTrue(
+                createEncryptionZone(realUgi, KEY1, ZONE2), "Exception during zone creation");
       fs.mkdirs(ZONE3);
-      assertTrue("Exception during zone creation",
-          createEncryptionZone(realUgi, KEY1, ZONE3));
+        assertTrue(
+                createEncryptionZone(realUgi, KEY1, ZONE3), "Exception during zone creation");
       fs.mkdirs(ZONE4);
-      assertTrue("Exception during zone creation",
-          createEncryptionZone(realUgi, KEY1, ZONE4));
+        assertTrue(
+                createEncryptionZone(realUgi, KEY1, ZONE4), "Exception during zone creation");
     } catch (Throwable ex) {
       fs.delete(ZONE1, true);
       fs.delete(ZONE2, true);
@@ -941,8 +940,8 @@ public class TestAclsEndToEnd {
     try {
       setup(conf, false, false);
 
-      assertTrue("Exception during file creation with correct config"
-          + " using whitelist ACL", createFile(realUgi, FILE1, TEXT));
+        assertTrue(createFile(realUgi, FILE1, TEXT), "Exception during file creation with correct config"
+                + " using whitelist ACL");
     } finally {
       fs.delete(ZONE1, true);
       teardown();
@@ -963,8 +962,8 @@ public class TestAclsEndToEnd {
     try {
       setup(conf, false, false);
 
-      assertTrue("Exception during file creation with correct config"
-          + " using whitelist ACL", createFile(realUgi, FILE2, TEXT));
+        assertTrue(createFile(realUgi, FILE2, TEXT), "Exception during file creation with correct config"
+                + " using whitelist ACL");
     } finally {
       fs.delete(ZONE2, true);
       teardown();
@@ -987,8 +986,8 @@ public class TestAclsEndToEnd {
     try {
       setup(conf, false, false);
 
-      assertFalse("Allowed file creation when default key ACLs should have been"
-          + " overridden by key ACL", createFile(realUgi, FILE3, TEXT));
+        assertFalse(createFile(realUgi, FILE3, TEXT), "Allowed file creation when default key ACLs should have been"
+                + " overridden by key ACL");
     } catch (Exception ex) {
       fs.delete(ZONE3, true);
 
@@ -1014,8 +1013,8 @@ public class TestAclsEndToEnd {
     try {
       setup(conf, false, false);
 
-      assertFalse("Allowed file creation with blacklist for GENERATE_EEK",
-          createFile(realUgi, FILE3, TEXT));
+        assertFalse(
+                createFile(realUgi, FILE3, TEXT), "Allowed file creation with blacklist for GENERATE_EEK");
     } catch (Exception ex) {
       fs.delete(ZONE3, true);
 
@@ -1041,8 +1040,8 @@ public class TestAclsEndToEnd {
     try {
       setup(conf, false, false);
 
-      assertFalse("Allowed file creation with blacklist for DECRYPT_EEK",
-          createFile(realUgi, FILE3, TEXT));
+        assertFalse(
+                createFile(realUgi, FILE3, TEXT), "Allowed file creation with blacklist for DECRYPT_EEK");
     } catch (Exception ex) {
       fs.delete(ZONE3, true);
 
@@ -1062,8 +1061,8 @@ public class TestAclsEndToEnd {
     try {
       setup(conf, false, false);
 
-      assertTrue("Exception during file creation with default KMS ACLs",
-          createFile(realUgi, FILE3, TEXT));
+        assertTrue(
+                createFile(realUgi, FILE3, TEXT), "Exception during file creation with default KMS ACLs");
     } catch (Exception ex) {
       fs.delete(ZONE3, true);
 
@@ -1086,8 +1085,8 @@ public class TestAclsEndToEnd {
     try {
       setup(conf, false, false);
 
-      assertFalse("Allowed file creation without GENERATE_EEK KMS ACL",
-          createFile(realUgi, FILE4, TEXT));
+        assertFalse(
+                createFile(realUgi, FILE4, TEXT), "Allowed file creation without GENERATE_EEK KMS ACL");
     } catch (Exception ex) {
       fs.delete(ZONE3, true);
 
@@ -1110,8 +1109,8 @@ public class TestAclsEndToEnd {
     try {
       setup(conf, false, false);
 
-      assertFalse("Allowed file creation without DECRYPT_EEK KMS ACL",
-          createFile(realUgi, FILE3, TEXT));
+        assertFalse(
+                createFile(realUgi, FILE3, TEXT), "Allowed file creation without DECRYPT_EEK KMS ACL");
     } catch (Exception ex) {
       fs.delete(ZONE3, true);
 
@@ -1133,8 +1132,8 @@ public class TestAclsEndToEnd {
     try {
       setup(conf, false, false);
 
-      assertFalse("Allowed file creation without GENERATE_EEK key ACL",
-          createFile(realUgi, FILE3, TEXT));
+        assertFalse(
+                createFile(realUgi, FILE3, TEXT), "Allowed file creation without GENERATE_EEK key ACL");
     } catch (Exception ex) {
       fs.delete(ZONE3, true);
 
@@ -1156,8 +1155,8 @@ public class TestAclsEndToEnd {
     try {
       setup(conf, false, false);
 
-      assertFalse("Allowed file creation without DECRYPT_EEK key ACL",
-          createFile(realUgi, FILE3, TEXT));
+        assertFalse(
+                createFile(realUgi, FILE3, TEXT), "Allowed file creation without DECRYPT_EEK key ACL");
     } catch (Exception ex) {
       fs.delete(ZONE3, true);
 
@@ -1198,13 +1197,13 @@ public class TestAclsEndToEnd {
     try {
       setup(conf);
 
-      assertTrue("Exception during key creation",
-          createKey(realUgi, KEY1, conf));
+        assertTrue(
+                createKey(realUgi, KEY1, conf), "Exception during key creation");
       fs.mkdirs(ZONE1);
-      assertTrue("Exception during zone creation",
-          createEncryptionZone(realUgi, KEY1, ZONE1));
-      assertTrue("Exception during file creation",
-              createFile(realUgi, FILE1, TEXT));
+        assertTrue(
+                createEncryptionZone(realUgi, KEY1, ZONE1), "Exception during zone creation");
+        assertTrue(
+                createFile(realUgi, FILE1, TEXT), "Exception during file creation");
     } catch (Throwable ex) {
       fs.delete(ZONE1, true);
 
@@ -1229,8 +1228,8 @@ public class TestAclsEndToEnd {
     try {
       setup(conf, false, false);
 
-      assertTrue("Exception while reading file with correct config with"
-          + " whitelist ACLs", compareFile(realUgi, FILE1, TEXT));
+        assertTrue(compareFile(realUgi, FILE1, TEXT), "Exception while reading file with correct config with"
+                + " whitelist ACLs");
     } catch (Throwable ex) {
       fs.delete(ZONE1, true);
 
@@ -1250,8 +1249,8 @@ public class TestAclsEndToEnd {
     try {
       setup(conf, false, false);
 
-      assertTrue("Exception while reading file with correct config"
-          + " with default ACLs", compareFile(realUgi, FILE1, TEXT));
+        assertTrue(compareFile(realUgi, FILE1, TEXT), "Exception while reading file with correct config"
+                + " with default ACLs");
     } catch (Throwable ex) {
       fs.delete(ZONE1, true);
 
@@ -1273,8 +1272,8 @@ public class TestAclsEndToEnd {
     try {
       setup(conf, false, false);
 
-      assertFalse("Allowed file read when default key ACLs should have been"
-          + " overridden by key ACL", compareFile(realUgi, FILE1, TEXT));
+        assertFalse(compareFile(realUgi, FILE1, TEXT), "Allowed file read when default key ACLs should have been"
+                + " overridden by key ACL");
     } catch (Throwable ex) {
       fs.delete(ZONE1, true);
 
@@ -1296,8 +1295,8 @@ public class TestAclsEndToEnd {
     try {
       setup(conf, false, false);
 
-      assertFalse("Allowed file read with blacklist for DECRYPT_EEK",
-          compareFile(realUgi, FILE1, TEXT));
+        assertFalse(
+                compareFile(realUgi, FILE1, TEXT), "Allowed file read with blacklist for DECRYPT_EEK");
     } catch (Throwable ex) {
       fs.delete(ZONE1, true);
 
@@ -1315,8 +1314,8 @@ public class TestAclsEndToEnd {
     try {
       setup(conf, false, false);
 
-      assertTrue("Exception while reading file with default KMS ACLs",
-          compareFile(realUgi, FILE1, TEXT));
+        assertTrue(
+                compareFile(realUgi, FILE1, TEXT), "Exception while reading file with default KMS ACLs");
     } catch (Throwable ex) {
       fs.delete(ZONE1, true);
 
@@ -1335,8 +1334,8 @@ public class TestAclsEndToEnd {
     try {
       setup(conf, false, false);
 
-      assertFalse("Allowed file read without DECRYPT_EEK KMS ACL",
-          compareFile(realUgi, FILE1, TEXT));
+        assertFalse(
+                compareFile(realUgi, FILE1, TEXT), "Allowed file read without DECRYPT_EEK KMS ACL");
     } catch (Throwable ex) {
       fs.delete(ZONE1, true);
 
@@ -1351,8 +1350,8 @@ public class TestAclsEndToEnd {
     try {
       setup(conf, false, false);
 
-      assertFalse("Allowed file read without DECRYPT_EEK key ACL",
-          compareFile(realUgi, FILE1, TEXT));
+        assertFalse(
+                compareFile(realUgi, FILE1, TEXT), "Allowed file read without DECRYPT_EEK key ACL");
     } catch (Throwable ex) {
       fs.delete(ZONE1, true);
 
@@ -1379,12 +1378,12 @@ public class TestAclsEndToEnd {
     try {
       setup(conf);
 
-      assertTrue("Exception during key creation",
-          createKey(realUgi, KEY1, conf));
-      assertTrue("Exception during key creation",
-          createKey(realUgi, KEY2, conf));
-      assertTrue("Exception during key creation",
-          createKey(realUgi, KEY3, conf));
+        assertTrue(
+                createKey(realUgi, KEY1, conf), "Exception during key creation");
+        assertTrue(
+                createKey(realUgi, KEY2, conf), "Exception during key creation");
+        assertTrue(
+                createKey(realUgi, KEY3, conf), "Exception during key creation");
     } finally {
       teardown();
     }
@@ -1405,8 +1404,8 @@ public class TestAclsEndToEnd {
     try {
       setup(conf, false);
 
-      assertTrue("Exception during key deletion with correct config"
-          + " using whitelist key ACLs", deleteKey(realUgi, KEY1));
+        assertTrue(deleteKey(realUgi, KEY1), "Exception during key deletion with correct config"
+                + " using whitelist key ACLs");
     } finally {
       teardown();
     }
@@ -1422,8 +1421,8 @@ public class TestAclsEndToEnd {
     try {
       setup(conf, false);
 
-      assertTrue("Exception during key deletion with correct config"
-          + " using default key ACLs", deleteKey(realUgi, KEY2));
+        assertTrue(deleteKey(realUgi, KEY2), "Exception during key deletion with correct config"
+                + " using default key ACLs");
     } finally {
       teardown();
     }
@@ -1441,8 +1440,8 @@ public class TestAclsEndToEnd {
     try {
       setup(conf, false);
 
-      assertFalse("Allowed key deletion with blacklist for DELETE",
-          deleteKey(realUgi, KEY3));
+        assertFalse(
+                deleteKey(realUgi, KEY3), "Allowed key deletion with blacklist for DELETE");
     } finally {
       teardown();
     }
@@ -1457,8 +1456,8 @@ public class TestAclsEndToEnd {
     try {
       setup(conf, false);
 
-      assertFalse("Allowed key deletion without DELETE KMS ACL",
-          deleteKey(realUgi, KEY3));
+        assertFalse(
+                deleteKey(realUgi, KEY3), "Allowed key deletion without DELETE KMS ACL");
     } finally {
       teardown();
     }
@@ -1473,8 +1472,8 @@ public class TestAclsEndToEnd {
     try {
       setup(conf, false);
 
-      assertFalse("Allowed key deletion without MANAGMENT key ACL",
-          deleteKey(realUgi, KEY3));
+        assertFalse(
+                deleteKey(realUgi, KEY3), "Allowed key deletion without MANAGMENT key ACL");
     } finally {
       teardown();
     }
@@ -1492,8 +1491,8 @@ public class TestAclsEndToEnd {
     try {
       setup(conf, false);
 
-      assertFalse("Allowed key deletion when default key ACL should have been"
-          + " overridden by key ACL", deleteKey(realUgi, KEY3));
+        assertFalse(deleteKey(realUgi, KEY3), "Allowed key deletion when default key ACL should have been"
+                + " overridden by key ACL");
     } finally {
       teardown();
     }
@@ -1507,8 +1506,8 @@ public class TestAclsEndToEnd {
     try {
       setup(conf, false);
 
-      assertTrue("Exception during key deletion with default KMS ACLs",
-          deleteKey(realUgi, KEY3));
+        assertTrue(
+                deleteKey(realUgi, KEY3), "Exception during key deletion with default KMS ACLs");
     } finally {
       teardown();
     }
@@ -1596,8 +1595,8 @@ public class TestAclsEndToEnd {
         FSDataInputStream din =  cluster.getFileSystem().open(file);
         BufferedReader in = new BufferedReader(new InputStreamReader(din));
 
-        assertEquals("The text read does not match the text written",
-            text, in.readLine());
+          assertEquals(
+                  text, in.readLine(), "The text read does not match the text written");
       }
     });
   }

+ 6 - 6
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendDifferentChecksum.java

@@ -26,10 +26,10 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.util.Time;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Ignore;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Disabled;
 
 /**
  * Test cases for trying to append to a file with a different
@@ -44,7 +44,7 @@ public class TestAppendDifferentChecksum {
   private static FileSystem fs; 
   
 
-  @BeforeClass
+  @BeforeAll
   public static void setupCluster() throws IOException {
     Configuration conf = new HdfsConfiguration();
     conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 4096);
@@ -55,7 +55,7 @@ public class TestAppendDifferentChecksum {
     fs = cluster.getFileSystem();
   }
   
-  @AfterClass
+  @AfterAll
   public static void teardown() throws IOException {
     if (cluster != null) {
       cluster.shutdown();
@@ -68,7 +68,7 @@ public class TestAppendDifferentChecksum {
    * difficulties in doing so.
    */
   @Test
-  @Ignore("this is not implemented! See HDFS-2130")
+  @Disabled("this is not implemented! See HDFS-2130")
   public void testSwitchChunkSize() throws IOException {
     FileSystem fsWithSmallChunk = createFsWithChecksum("CRC32", 512);
     FileSystem fsWithBigChunk = createFsWithChecksum("CRC32", 1024);

+ 10 - 11
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java

@@ -45,11 +45,10 @@ import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.BlockWrite.ReplaceData
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.TestFileTruncate;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
 import org.junit.Test;
-
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
 import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions;
 import org.slf4j.event.Level;
 
@@ -78,7 +77,7 @@ public class TestAppendSnapshotTruncate {
   static MiniDFSCluster cluster;
   static DistributedFileSystem dfs;
 
-  @BeforeClass
+  @BeforeAll
   public static void startUp() throws IOException {
     conf = new HdfsConfiguration();
     conf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, BLOCK_SIZE);
@@ -95,7 +94,7 @@ public class TestAppendSnapshotTruncate {
     dfs = cluster.getFileSystem();
   }
 
-  @AfterClass
+  @AfterAll
   public static void tearDown() throws IOException {
     if(dfs != null) {
       dfs.close();
@@ -167,7 +166,7 @@ public class TestAppendSnapshotTruncate {
       {
         //copy all local files to a sub dir to simulate snapshot. 
         final File subDir = new File(localDir, snapshot);
-        Assert.assertFalse(subDir.exists());
+        Assertions.assertFalse(subDir.exists());
         subDir.mkdir();
 
         for(File f : localDir.listFiles(FILE_ONLY)) {
@@ -185,12 +184,12 @@ public class TestAppendSnapshotTruncate {
           .append(snapshot);
 
       final File subDir = new File(localDir, snapshot);
-      Assert.assertTrue(subDir.exists());
+      Assertions.assertTrue(subDir.exists());
       
       final File[] localFiles = subDir.listFiles(FILE_ONLY);
       final Path p = snapshotPaths.get(snapshot);
       final FileStatus[] statuses = dfs.listStatus(p);
-      Assert.assertEquals(localFiles.length, statuses.length);
+      Assertions.assertEquals(localFiles.length, statuses.length);
       b.append(p).append(" vs ").append(subDir).append(", ")
        .append(statuses.length).append(" entries");
       
@@ -374,8 +373,8 @@ public class TestAppendSnapshotTruncate {
 
     static int checkLength(Path file, File localFile) throws IOException {
       final long length = dfs.getFileStatus(file).getLen();
-      Assert.assertEquals(localFile.length(), length);
-      Assert.assertTrue(length <= Integer.MAX_VALUE);
+      Assertions.assertEquals(localFile.length(), length);
+      Assertions.assertTrue(length <= Integer.MAX_VALUE);
       return (int)length;
     }
     

+ 7 - 7
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestApplyingStoragePolicy.java

@@ -17,8 +17,8 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
@@ -27,9 +27,9 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 public class TestApplyingStoragePolicy {
   private static final short REPL = 1;
@@ -39,7 +39,7 @@ public class TestApplyingStoragePolicy {
   private static MiniDFSCluster cluster;
   private static DistributedFileSystem fs;
 
-  @Before
+  @BeforeEach
   public void clusterSetUp() throws IOException {
     conf = new HdfsConfiguration();
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPL).build();
@@ -47,7 +47,7 @@ public class TestApplyingStoragePolicy {
     fs = cluster.getFileSystem();
   }
 
-  @After
+  @AfterEach
   public void clusterShutdown() throws IOException{
     if(fs != null) {
       fs.close();

+ 5 - 5
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBalancerBandwidth.java

@@ -17,8 +17,8 @@
 */
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.io.ByteArrayOutputStream;
 import java.io.PrintStream;
@@ -34,7 +34,7 @@ import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.tools.DFSAdmin;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.ToolRunner;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 /**
  * This test ensures that the balancer bandwidth is dynamically adjusted
@@ -131,11 +131,11 @@ public class TestBalancerBandwidth {
     try {
       System.setOut(outStream);
       int exitCode = admin.run(args);
-      assertEquals("DFSAdmin should return 0", 0, exitCode);
+        assertEquals(0, exitCode, "DFSAdmin should return 0");
       String bandwidthOutMsg = "Balancer bandwidth is " + expectedBandwidth
           + " bytes per second.";
       String strOut = new String(outContent.toByteArray(), UTF8);
-      assertTrue("Wrong balancer bandwidth!", strOut.contains(bandwidthOutMsg));
+        assertTrue(strOut.contains(bandwidthOutMsg), "Wrong balancer bandwidth!");
     } finally {
       System.setOut(initialStdOut);
     }

+ 17 - 19
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBatchedListDirectories.java

@@ -33,10 +33,10 @@ import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.Lists;
 import org.hamcrest.core.StringContains;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
 import org.junit.Rule;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 import org.junit.rules.ExpectedException;
 
 import java.io.FileNotFoundException;
@@ -46,9 +46,7 @@ import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 /**
  * Tests for the batched listing API.
@@ -85,15 +83,15 @@ public class TestBatchedListDirectories {
   private static void assertSubDirEquals(int i, int j, Path p) {
     assertTrue(p.toString().startsWith("hdfs://"));
     Path expected = getSubDirName(i, j);
-    assertEquals("Unexpected subdir name",
-        expected.toString(), p.toUri().getPath());
+      assertEquals(
+              expected.toString(), p.toUri().getPath(), "Unexpected subdir name");
   }
 
   private static void assertFileEquals(int i, int j, int k, Path p) {
     assertTrue(p.toString().startsWith("hdfs://"));
     Path expected = getFileName(i, j, k);
-    assertEquals("Unexpected file name",
-        expected.toString(), p.toUri().getPath());
+      assertEquals(
+              expected.toString(), p.toUri().getPath(), "Unexpected file name");
   }
 
   private static void loadData() throws Exception {
@@ -119,7 +117,7 @@ public class TestBatchedListDirectories {
     dfs.setPermission(INACCESSIBLE_DIR_PATH, new FsPermission(0000));
   }
 
-  @BeforeClass
+  @BeforeAll
   public static void beforeClass() throws Exception {
     conf = new HdfsConfiguration();
     conf.setInt(DFSConfigKeys.DFS_LIST_LIMIT, 7);
@@ -132,7 +130,7 @@ public class TestBatchedListDirectories {
     loadData();
   }
 
-  @AfterClass
+  @AfterAll
   public static void afterClass() {
     if (cluster != null) {
       cluster.shutdown();
@@ -233,8 +231,8 @@ public class TestBatchedListDirectories {
     dfs.setWorkingDirectory(new Path("/dir0"));
     List<Path> paths = Lists.newArrayList(new Path("."));
     List<FileStatus> statuses = getStatuses(paths);
-    assertEquals("Wrong number of items",
-        SECOND_LEVEL_DIRS, statuses.size());
+      assertEquals(
+              SECOND_LEVEL_DIRS, statuses.size(), "Wrong number of items");
     for (int i = 0; i < SECOND_LEVEL_DIRS; i++) {
       FileStatus stat = statuses.get(i);
       assertSubDirEquals(0, i, stat.getPath());
@@ -246,8 +244,8 @@ public class TestBatchedListDirectories {
     dfs.setWorkingDirectory(new Path("/dir0"));
     List<Path> paths = Lists.newArrayList(new Path("subdir0"));
     List<FileStatus> statuses = getStatuses(paths);
-    assertEquals("Wrong number of items",
-        FILES_PER_DIR, statuses.size());
+      assertEquals(
+              FILES_PER_DIR, statuses.size(), "Wrong number of items");
     for (int i = 0; i < FILES_PER_DIR; i++) {
       FileStatus stat = statuses.get(i);
       assertFileEquals(0, 0, i, stat.getPath());
@@ -256,9 +254,9 @@ public class TestBatchedListDirectories {
 
   @Test
   public void testDFSHasCapability() throws Throwable {
-    assertTrue("FS does not declare PathCapability support",
-        dfs.hasPathCapability(new Path("/"),
-            CommonPathCapabilities.FS_EXPERIMENTAL_BATCH_LISTING));
+      assertTrue(
+              dfs.hasPathCapability(new Path("/"),
+                      CommonPathCapabilities.FS_EXPERIMENTAL_BATCH_LISTING), "FS does not declare PathCapability support");
   }
 
   private void listFilesInternal(int numFiles) throws Exception {

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockMissingException.java

@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.io.IOException;
 
@@ -31,7 +31,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 public class TestBlockMissingException {
   final static Logger LOG =
@@ -116,6 +116,6 @@ public class TestBlockMissingException {
       gotException = true;
     }
     stm.close();
-    assertTrue("Expected BlockMissingException ", gotException);
+      assertTrue(gotException, "Expected BlockMissingException ");
   }
 }

+ 67 - 67
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java

@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs;
 
 import static org.apache.hadoop.hdfs.protocol.HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
+import static org.junit.jupiter.api.Assertions.fail;
 
 import java.io.File;
 import java.io.FileNotFoundException;
@@ -47,9 +48,8 @@ import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.PathUtils;
 import org.apache.hadoop.util.Lists;
 import org.apache.hadoop.util.Sets;
-import org.junit.Assert;
-import static org.junit.Assert.fail;
 import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
 
 /** Test {@link BlockStoragePolicy} */
 public class TestBlockStoragePolicy {
@@ -160,10 +160,10 @@ public class TestBlockStoragePolicy {
       final BlockStoragePolicy policy = POLICY_SUITE.getPolicy(i); 
       if (policy != null) {
         final String s = policy.toString();
-        Assert.assertEquals(expectedPolicyStrings.get(i), s);
+        Assertions.assertEquals(expectedPolicyStrings.get(i), s);
       }
     }
-    Assert.assertEquals(POLICY_SUITE.getPolicy(HOT), POLICY_SUITE.getDefaultPolicy());
+    Assertions.assertEquals(POLICY_SUITE.getPolicy(HOT), POLICY_SUITE.getDefaultPolicy());
     
     // check Cold policy
     final BlockStoragePolicy cold = POLICY_SUITE.getPolicy(COLD);
@@ -260,11 +260,11 @@ public class TestBlockStoragePolicy {
 
   static void assertStorageType(List<StorageType> computed, short replication,
       StorageType... answers) {
-    Assert.assertEquals(replication, computed.size());
+    Assertions.assertEquals(replication, computed.size());
     final StorageType last = answers[answers.length - 1];
     for(int i = 0; i < computed.size(); i++) {
       final StorageType expected = i < answers.length? answers[i]: last;
-      Assert.assertEquals(expected, computed.get(i));
+      Assertions.assertEquals(expected, computed.get(i));
     }
   }
 
@@ -272,27 +272,27 @@ public class TestBlockStoragePolicy {
       StorageType noneExpected, StorageType archiveExpected,
       StorageType diskExpected, StorageType ssdExpected,
       StorageType disk_archiveExpected, StorageType nvdimmExpected) {
-    Assert.assertEquals(noneExpected, policy.getCreationFallback(none));
-    Assert.assertEquals(archiveExpected, policy.getCreationFallback(archive));
-    Assert.assertEquals(diskExpected, policy.getCreationFallback(disk));
-    Assert.assertEquals(ssdExpected, policy.getCreationFallback(ssd));
-    Assert.assertEquals(nvdimmExpected, policy.getCreationFallback(nvdimm));
-    Assert.assertEquals(disk_archiveExpected,
+    Assertions.assertEquals(noneExpected, policy.getCreationFallback(none));
+    Assertions.assertEquals(archiveExpected, policy.getCreationFallback(archive));
+    Assertions.assertEquals(diskExpected, policy.getCreationFallback(disk));
+    Assertions.assertEquals(ssdExpected, policy.getCreationFallback(ssd));
+    Assertions.assertEquals(nvdimmExpected, policy.getCreationFallback(nvdimm));
+    Assertions.assertEquals(disk_archiveExpected,
         policy.getCreationFallback(disk_archive));
-    Assert.assertEquals(null, policy.getCreationFallback(all));
+    Assertions.assertEquals(null, policy.getCreationFallback(all));
   }
 
   static void assertReplicationFallback(BlockStoragePolicy policy,
       StorageType noneExpected, StorageType archiveExpected,
       StorageType diskExpected, StorageType ssdExpected,
       StorageType nvdimmExpected) {
-    Assert.assertEquals(noneExpected, policy.getReplicationFallback(none));
-    Assert
+    Assertions.assertEquals(noneExpected, policy.getReplicationFallback(none));
+    Assertions
         .assertEquals(archiveExpected, policy.getReplicationFallback(archive));
-    Assert.assertEquals(diskExpected, policy.getReplicationFallback(disk));
-    Assert.assertEquals(ssdExpected, policy.getReplicationFallback(ssd));
-    Assert.assertEquals(nvdimmExpected, policy.getReplicationFallback(nvdimm));
-    Assert.assertEquals(null, policy.getReplicationFallback(all));
+    Assertions.assertEquals(diskExpected, policy.getReplicationFallback(disk));
+    Assertions.assertEquals(ssdExpected, policy.getReplicationFallback(ssd));
+    Assertions.assertEquals(nvdimmExpected, policy.getReplicationFallback(nvdimm));
+    Assertions.assertEquals(null, policy.getReplicationFallback(all));
   }
 
   private static interface CheckChooseStorageTypes {
@@ -879,7 +879,7 @@ public class TestBlockStoragePolicy {
   static void assertStorageTypes(StorageType[] computed, StorageType... expected) {
     Arrays.sort(expected);
     Arrays.sort(computed);
-    Assert.assertArrayEquals(expected, computed);
+    Assertions.assertArrayEquals(expected, computed);
   }
 
   @Test
@@ -924,9 +924,9 @@ public class TestBlockStoragePolicy {
   }
 
   private void checkDirectoryListing(HdfsFileStatus[] stats, byte... policies) {
-    Assert.assertEquals(stats.length, policies.length);
+    Assertions.assertEquals(stats.length, policies.length);
     for (int i = 0; i < stats.length; i++) {
-      Assert.assertEquals(stats[i].getStoragePolicy(), policies[i]);
+      Assertions.assertEquals(stats[i].getStoragePolicy(), policies[i]);
     }
   }
 
@@ -949,7 +949,7 @@ public class TestBlockStoragePolicy {
       final String invalidPolicyName = "INVALID-POLICY";
       try {
         fs.setStoragePolicy(fooFile, invalidPolicyName);
-        Assert.fail("Should throw a HadoopIllegalArgumentException");
+        Assertions.fail("Should throw a HadoopIllegalArgumentException");
       } catch (RemoteException e) {
         GenericTestUtils.assertExceptionContains(invalidPolicyName, e);
       }
@@ -967,14 +967,14 @@ public class TestBlockStoragePolicy {
       final Path invalidPath = new Path("/invalidPath");
       try {
         fs.setStoragePolicy(invalidPath, HdfsConstants.WARM_STORAGE_POLICY_NAME);
-        Assert.fail("Should throw a FileNotFoundException");
+        Assertions.fail("Should throw a FileNotFoundException");
       } catch (FileNotFoundException e) {
         GenericTestUtils.assertExceptionContains(invalidPath.toString(), e);
       }
 
       try {
         fs.getStoragePolicy(invalidPath);
-        Assert.fail("Should throw a FileNotFoundException");
+        Assertions.fail("Should throw a FileNotFoundException");
       } catch (FileNotFoundException e) {
         GenericTestUtils.assertExceptionContains(invalidPath.toString(), e);
       }
@@ -982,15 +982,15 @@ public class TestBlockStoragePolicy {
       fs.setStoragePolicy(fooFile, HdfsConstants.COLD_STORAGE_POLICY_NAME);
       fs.setStoragePolicy(barDir, HdfsConstants.WARM_STORAGE_POLICY_NAME);
       fs.setStoragePolicy(barFile2, HdfsConstants.HOT_STORAGE_POLICY_NAME);
-      Assert.assertEquals("File storage policy should be COLD",
-          HdfsConstants.COLD_STORAGE_POLICY_NAME,
-          fs.getStoragePolicy(fooFile).getName());
-      Assert.assertEquals("File storage policy should be WARM",
-          HdfsConstants.WARM_STORAGE_POLICY_NAME,
-          fs.getStoragePolicy(barDir).getName());
-      Assert.assertEquals("File storage policy should be HOT",
-          HdfsConstants.HOT_STORAGE_POLICY_NAME,
-          fs.getStoragePolicy(barFile2).getName());
+        Assertions.assertEquals(
+                HdfsConstants.COLD_STORAGE_POLICY_NAME,
+                fs.getStoragePolicy(fooFile).getName(), "File storage policy should be COLD");
+        Assertions.assertEquals(
+                HdfsConstants.WARM_STORAGE_POLICY_NAME,
+                fs.getStoragePolicy(barDir).getName(), "File storage policy should be WARM");
+        Assertions.assertEquals(
+                HdfsConstants.HOT_STORAGE_POLICY_NAME,
+                fs.getStoragePolicy(barFile2).getName(), "File storage policy should be HOT");
 
       dirList = fs.getClient().listPaths(dir.toString(),
           HdfsFileStatus.EMPTY_NAME).getPartialListing();
@@ -1040,8 +1040,8 @@ public class TestBlockStoragePolicy {
           HdfsConstants.COLD_STORAGE_POLICY_NAME);
       String policyName = client.getStoragePolicy("/testGetStoragePolicy/foo")
           .getName();
-      Assert.assertEquals("File storage policy should be COLD",
-          HdfsConstants.COLD_STORAGE_POLICY_NAME, policyName);
+        Assertions.assertEquals(
+                HdfsConstants.COLD_STORAGE_POLICY_NAME, policyName, "File storage policy should be COLD");
     } finally {
       cluster.shutdown();
     }
@@ -1140,14 +1140,14 @@ public class TestBlockStoragePolicy {
     List<StorageType> typeList = Lists.newArrayList();
     Collections.addAll(typeList, types);
     LocatedBlocks lbs = status.getLocatedBlocks();
-    Assert.assertEquals(blockNum, lbs.getLocatedBlocks().size());
+    Assertions.assertEquals(blockNum, lbs.getLocatedBlocks().size());
     for (LocatedBlock lb : lbs.getLocatedBlocks()) {
-      Assert.assertEquals(replicaNum, lb.getStorageTypes().length);
+      Assertions.assertEquals(replicaNum, lb.getStorageTypes().length);
       for (StorageType type : lb.getStorageTypes()) {
-        Assert.assertTrue(typeList.remove(type));
+        Assertions.assertTrue(typeList.remove(type));
       }
     }
-    Assert.assertTrue(typeList.isEmpty());
+    Assertions.assertTrue(typeList.isEmpty());
   }
 
   private void testChangeFileRep(String policyName, byte policyId,
@@ -1285,12 +1285,12 @@ public class TestBlockStoragePolicy {
         dataNodes[0], Collections.<DatanodeStorageInfo>emptyList(), false,
         new HashSet<Node>(), 0, policy1, null);
     System.out.println(Arrays.asList(targets));
-    Assert.assertEquals(3, targets.length);
+    Assertions.assertEquals(3, targets.length);
     targets = replicator.chooseTarget("/foo", 3,
         dataNodes[0], Collections.<DatanodeStorageInfo>emptyList(), false,
         new HashSet<Node>(), 0, policy2, null);
     System.out.println(Arrays.asList(targets));
-    Assert.assertEquals(3, targets.length);
+    Assertions.assertEquals(3, targets.length);
   }
 
   @Test
@@ -1332,9 +1332,9 @@ public class TestBlockStoragePolicy {
         dataNodes[0], Collections.<DatanodeStorageInfo>emptyList(), false,
         new HashSet<Node>(), 0, policy, null);
     System.out.println(policy.getName() + ": " + Arrays.asList(targets));
-    Assert.assertEquals(2, targets.length);
-    Assert.assertEquals(StorageType.SSD, targets[0].getStorageType());
-    Assert.assertEquals(StorageType.DISK, targets[1].getStorageType());
+    Assertions.assertEquals(2, targets.length);
+    Assertions.assertEquals(StorageType.SSD, targets[0].getStorageType());
+    Assertions.assertEquals(StorageType.DISK, targets[1].getStorageType());
   }
 
   @Test
@@ -1360,17 +1360,17 @@ public class TestBlockStoragePolicy {
       // 4. Set Dir policy
       fs.setStoragePolicy(dir, "HOT");
       HdfsFileStatus status = fs.getClient().getFileInfo(file);
-      // 5. get file policy, it should be parent policy.
-      Assert
-          .assertTrue("File storage policy should be HOT",
-              status.getStoragePolicy() == HOT);
+        // 5. get file policy, it should be parent policy.
+        Assertions
+                .assertTrue(
+                        status.getStoragePolicy() == HOT, "File storage policy should be HOT");
       // 6. restart NameNode for reloading edits logs.
       cluster.restartNameNode(true);
       // 7. get file policy, it should be parent policy.
       status = fs.getClient().getFileInfo(file);
-      Assert
-          .assertTrue("File storage policy should be HOT",
-              status.getStoragePolicy() == HOT);
+        Assertions
+                .assertTrue(
+                        status.getStoragePolicy() == HOT, "File storage policy should be HOT");
 
     } finally {
       cluster.shutdown();
@@ -1408,8 +1408,8 @@ public class TestBlockStoragePolicy {
       }
 
       // Ensure that we got the same set of policies in both cases.
-      Assert.assertTrue(Sets.difference(policyNamesSet1, policyNamesSet2).isEmpty());
-      Assert.assertTrue(Sets.difference(policyNamesSet2, policyNamesSet1).isEmpty());
+      Assertions.assertTrue(Sets.difference(policyNamesSet1, policyNamesSet2).isEmpty());
+      Assertions.assertTrue(Sets.difference(policyNamesSet2, policyNamesSet1).isEmpty());
     } finally {
       cluster.shutdown();
     }
@@ -1428,21 +1428,21 @@ public class TestBlockStoragePolicy {
 
     {
       final Iterator<StorageType> i = map.keySet().iterator();
-      Assert.assertEquals(StorageType.RAM_DISK, i.next());
-      Assert.assertEquals(StorageType.SSD, i.next());
-      Assert.assertEquals(StorageType.DISK, i.next());
-      Assert.assertEquals(StorageType.ARCHIVE, i.next());
-      Assert.assertEquals(StorageType.NVDIMM, i.next());
+      Assertions.assertEquals(StorageType.RAM_DISK, i.next());
+      Assertions.assertEquals(StorageType.SSD, i.next());
+      Assertions.assertEquals(StorageType.DISK, i.next());
+      Assertions.assertEquals(StorageType.ARCHIVE, i.next());
+      Assertions.assertEquals(StorageType.NVDIMM, i.next());
     }
 
     {
       final Iterator<Map.Entry<StorageType, Integer>> i
           = map.entrySet().iterator();
-      Assert.assertEquals(StorageType.RAM_DISK, i.next().getKey());
-      Assert.assertEquals(StorageType.SSD, i.next().getKey());
-      Assert.assertEquals(StorageType.DISK, i.next().getKey());
-      Assert.assertEquals(StorageType.ARCHIVE, i.next().getKey());
-      Assert.assertEquals(StorageType.NVDIMM, i.next().getKey());
+      Assertions.assertEquals(StorageType.RAM_DISK, i.next().getKey());
+      Assertions.assertEquals(StorageType.SSD, i.next().getKey());
+      Assertions.assertEquals(StorageType.DISK, i.next().getKey());
+      Assertions.assertEquals(StorageType.ARCHIVE, i.next().getKey());
+      Assertions.assertEquals(StorageType.NVDIMM, i.next().getKey());
     }
   }
 
@@ -1600,7 +1600,7 @@ public class TestBlockStoragePolicy {
   public void testCreateDefaultPoliciesFromConf() {
     BlockStoragePolicySuite suite =
         BlockStoragePolicySuite.createDefaultSuite();
-    Assert.assertEquals(HdfsConstants.StoragePolicy.HOT.value(),
+    Assertions.assertEquals(HdfsConstants.StoragePolicy.HOT.value(),
         suite.getDefaultPolicy().getId());
 
     Configuration newConf = new Configuration();
@@ -1608,7 +1608,7 @@ public class TestBlockStoragePolicy {
         HdfsConstants.StoragePolicy.ONE_SSD);
     BlockStoragePolicySuite suiteConf =
         BlockStoragePolicySuite.createDefaultSuite(newConf);
-    Assert.assertEquals(HdfsConstants.StoragePolicy.ONE_SSD.value(),
+    Assertions.assertEquals(HdfsConstants.StoragePolicy.ONE_SSD.value(),
         suiteConf.getDefaultPolicy().getId());
   }
 
@@ -1627,7 +1627,7 @@ public class TestBlockStoragePolicy {
       DFSTestUtil.createFile(newfs, fooFile, 0, REPLICATION, 0L);
 
       String policy = newfs.getStoragePolicy(fooFile).getName();
-      Assert.assertEquals(HdfsConstants.StoragePolicy.WARM.name(), policy);
+      Assertions.assertEquals(HdfsConstants.StoragePolicy.WARM.name(), policy);
     } finally {
       cluster.shutdown();
     }

+ 7 - 6
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockTokenWrappingQOP.java

@@ -36,14 +36,15 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferTestCase;
 import org.apache.hadoop.io.EnumSetWritable;
 import org.apache.hadoop.security.TestPermission;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
-import static org.junit.Assert.*;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 
 /**
@@ -77,7 +78,7 @@ public class TestBlockTokenWrappingQOP extends SaslDataTransferTestCase {
     this.qopValue = qopValue;
   }
 
-  @Before
+  @BeforeEach
   public void setup() throws Exception {
     conf = createSecureConfig(this.configKey);
     conf.set(DFS_NAMENODE_RPC_ADDRESS_AUXILIARY_KEY, "12000");
@@ -109,7 +110,7 @@ public class TestBlockTokenWrappingQOP extends SaslDataTransferTestCase {
     dfs = (DistributedFileSystem) FileSystem.get(uriAuxiliary, conf);
   }
 
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     if (cluster != null) {
       cluster.shutdown();

+ 12 - 12
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java

@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 import java.io.IOException;
 import java.util.ArrayList;
@@ -35,8 +35,8 @@ import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
-import org.junit.After;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Test;
 
 /**
  * This class tests DatanodeDescriptor.getBlocksScheduled() at the
@@ -47,7 +47,7 @@ public class TestBlocksScheduledCounter {
   MiniDFSCluster cluster = null;
   FileSystem fs = null;
 
-  @After
+  @AfterEach
   public void tearDown() throws IOException {
     if (fs != null) {
       fs.close();
@@ -104,8 +104,8 @@ public class TestBlocksScheduledCounter {
     ArrayList<DatanodeDescriptor> dnList = new ArrayList<DatanodeDescriptor>();
     datanodeManager.fetchDatanodes(dnList, dnList, false);
     for (DatanodeDescriptor descriptor : dnList) {
-      assertEquals("Blocks scheduled should be 0 for " + descriptor.getName(),
-          0, descriptor.getBlocksScheduled());
+        assertEquals(
+                0, descriptor.getBlocksScheduled(), "Blocks scheduled should be 0 for " + descriptor.getName());
     }
 
     cluster.getDataNodes().get(0).shutdown();
@@ -120,21 +120,21 @@ public class TestBlocksScheduledCounter {
 
     DatanodeDescriptor abandonedDn = datanodeManager.getDatanode(cluster
         .getDataNodes().get(0).getDatanodeId());
-    assertEquals("for the abandoned dn scheduled counts should be 0", 0,
-        abandonedDn.getBlocksScheduled());
+      assertEquals(0,
+              abandonedDn.getBlocksScheduled(), "for the abandoned dn scheduled counts should be 0");
 
     for (DatanodeDescriptor descriptor : dnList) {
       if (descriptor.equals(abandonedDn)) {
         continue;
       }
-      assertEquals("Blocks scheduled should be 1 for " + descriptor.getName(),
-          1, descriptor.getBlocksScheduled());
+        assertEquals(
+                1, descriptor.getBlocksScheduled(), "Blocks scheduled should be 1 for " + descriptor.getName());
     }
     // close the file and the counter should go to zero.
     out.close();
     for (DatanodeDescriptor descriptor : dnList) {
-      assertEquals("Blocks scheduled should be 0 for " + descriptor.getName(),
-          0, descriptor.getBlocksScheduled());
+        assertEquals(
+                0, descriptor.getBlocksScheduled(), "Blocks scheduled should be 0 for " + descriptor.getName());
     }
   }
 

+ 6 - 9
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestByteBufferPread.java

@@ -28,14 +28,11 @@ import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
 
-import static org.junit.Assert.assertArrayEquals;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
 
 /**
  * This class tests the DFS positional read functionality on a single node
@@ -55,7 +52,7 @@ public class TestByteBufferPread {
   private static final int BLOCK_SIZE = 4096;
   private static final int FILE_SIZE = 12 * BLOCK_SIZE;
 
-  @BeforeClass
+  @BeforeAll
   public static void setup() throws IOException {
     // Setup the cluster with a small block size so we can create small files
     // that span multiple blocks
@@ -278,7 +275,7 @@ public class TestByteBufferPread {
     }
   }
 
-  @AfterClass
+  @AfterAll
   public static void shutdown() throws IOException {
     try {
       fs.delete(testFile, false);

+ 31 - 30
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java

@@ -17,9 +17,8 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
 
 import java.io.IOException;
 import java.util.ArrayList;
@@ -31,6 +30,8 @@ import java.util.concurrent.atomic.AtomicBoolean;
 
 import java.util.function.Supplier;
 
+import static org.junit.jupiter.api.Assertions.assertFalse;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
@@ -54,8 +55,8 @@ import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.hdfs.tools.DFSAdmin;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Assert;
 import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
 import org.mockito.Mockito;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -84,9 +85,9 @@ public class TestClientProtocolForPipelineRecovery {
       // test getNewStampAndToken on a finalized block
       try {
         namenode.updateBlockForPipeline(firstBlock, "");
-        Assert.fail("Can not get a new GS from a finalized block");
+        Assertions.fail("Can not get a new GS from a finalized block");
       } catch (IOException e) {
-        Assert.assertTrue(e.getMessage().contains(
+        Assertions.assertTrue(e.getMessage().contains(
             "not " + BlockUCState.UNDER_CONSTRUCTION));
       }
       
@@ -96,9 +97,9 @@ public class TestClientProtocolForPipelineRecovery {
         ExtendedBlock newBlock = new ExtendedBlock(firstBlock.getBlockPoolId(),
             newBlockId, 0, firstBlock.getGenerationStamp());
         namenode.updateBlockForPipeline(newBlock, "");
-        Assert.fail("Cannot get a new GS from a non-existent block");
+        Assertions.fail("Cannot get a new GS from a non-existent block");
       } catch (IOException e) {
-        Assert.assertTrue(e.getMessage().contains("does not exist"));
+        Assertions.assertTrue(e.getMessage().contains("does not exist"));
       }
 
       
@@ -122,17 +123,17 @@ public class TestClientProtocolForPipelineRecovery {
         DFSClient dfs = ((DistributedFileSystem)fileSys).dfs;
         try {
           namenode.updateBlockForPipeline(firstBlock, "test" + dfs.clientName);
-          Assert.fail("Cannot get a new GS for a non lease holder");
+          Assertions.fail("Cannot get a new GS for a non lease holder");
         } catch (LeaseExpiredException e) {
-          Assert.assertTrue(e.getMessage().startsWith("Lease mismatch"));
+          Assertions.assertTrue(e.getMessage().startsWith("Lease mismatch"));
         }
 
         // test null lease holder
         try {
           namenode.updateBlockForPipeline(firstBlock, null);
-          Assert.fail("Cannot get a new GS for a null lease holder");
+          Assertions.fail("Cannot get a new GS for a null lease holder");
         } catch (LeaseExpiredException e) {
-          Assert.assertTrue(e.getMessage().startsWith("Lease mismatch"));
+          Assertions.assertTrue(e.getMessage().startsWith("Lease mismatch"));
         }
 
         // test getNewStampAndToken on a rbw block
@@ -177,7 +178,7 @@ public class TestClientProtocolForPipelineRecovery {
         // Test will fail with BlockMissingException if NN does not update the
         // replica state based on the latest report.
       } catch (org.apache.hadoop.hdfs.BlockMissingException bme) {
-        Assert.fail("Block is missing because the file was closed with"
+        Assertions.fail("Block is missing because the file was closed with"
             + " corrupt replicas.");
       }
     } finally {
@@ -239,7 +240,7 @@ public class TestClientProtocolForPipelineRecovery {
           contains = true;
         }
       }
-      Assert.assertTrue(contains);
+      Assertions.assertTrue(contains);
     } finally {
       DataNodeFaultInjector.set(oldDnInjector);
       if (cluster != null) {
@@ -322,7 +323,7 @@ public class TestClientProtocolForPipelineRecovery {
       final String dnAddr = dn.getDatanodeId().getIpcAddr(false);
       // issue shutdown to the datanode.
       final String[] args1 = {"-shutdownDatanode", dnAddr, "upgrade" };
-      Assert.assertEquals(0, dfsadmin.run(args1));
+      Assertions.assertEquals(0, dfsadmin.run(args1));
       // Wait long enough to receive an OOB ack before closing the file.
       GenericTestUtils.waitForThreadTermination(
           "Async datanode shutdown thread", 100, 10000);
@@ -358,23 +359,23 @@ public class TestClientProtocolForPipelineRecovery {
       // get nodes in the pipeline
       DFSOutputStream dfsOut = (DFSOutputStream)out.getWrappedStream();
       DatanodeInfo[] nodes = dfsOut.getPipeline();
-      Assert.assertEquals(2, nodes.length);
+      Assertions.assertEquals(2, nodes.length);
       String dnAddr = nodes[1].getIpcAddr(false);
 
       // evict the writer from the second datanode and wait until
       // the pipeline is rebuilt.
       DFSAdmin dfsadmin = new DFSAdmin(conf);
       final String[] args1 = {"-evictWriters", dnAddr };
-      Assert.assertEquals(0, dfsadmin.run(args1));
+      Assertions.assertEquals(0, dfsadmin.run(args1));
       out.write(0x31);
       out.hflush();
 
       // get the new pipline and check the node is not in there.
       nodes = dfsOut.getPipeline();
       try {
-        Assert.assertTrue(nodes.length > 0 );
+        Assertions.assertTrue(nodes.length > 0 );
         for (int i = 0; i < nodes.length; i++) {
-          Assert.assertFalse(dnAddr.equals(nodes[i].getIpcAddr(false)));
+          Assertions.assertFalse(dnAddr.equals(nodes[i].getIpcAddr(false)));
         }
       } finally {
         out.close();
@@ -410,7 +411,7 @@ public class TestClientProtocolForPipelineRecovery {
       final String dnAddr1 = dn.getDatanodeId().getIpcAddr(false);
       // issue shutdown to the datanode.
       final String[] args1 = {"-shutdownDatanode", dnAddr1, "upgrade" };
-      Assert.assertEquals(0, dfsadmin.run(args1));
+      Assertions.assertEquals(0, dfsadmin.run(args1));
       GenericTestUtils.waitForThreadTermination(
           "Async datanode shutdown thread", 100, 10000);
       // This should succeed without restarting the node. The restart will
@@ -427,7 +428,7 @@ public class TestClientProtocolForPipelineRecovery {
       final String dnAddr2 = dn.getDatanodeId().getIpcAddr(false);
       // issue shutdown to the datanode.
       final String[] args2 = {"-shutdownDatanode", dnAddr2, "upgrade" };
-      Assert.assertEquals(0, dfsadmin.run(args2));
+      Assertions.assertEquals(0, dfsadmin.run(args2));
       GenericTestUtils.waitForThreadTermination(
           "Async datanode shutdown thread", 100, 10000);
       try {
@@ -480,8 +481,8 @@ public class TestClientProtocolForPipelineRecovery {
           return out.getBlock().getGenerationStamp() > oldGs;
         }
       }, 100, 10000);
-      Assert.assertEquals("The pipeline recovery count shouldn't increase",
-          0, out.getStreamer().getPipelineRecoveryCount());
+        Assertions.assertEquals(
+                0, out.getStreamer().getPipelineRecoveryCount(), "The pipeline recovery count shouldn't increase");
       out.write(1);
       out.close();
       // Ensure that subsequent closes are idempotent and do not throw errors
@@ -539,7 +540,7 @@ public class TestClientProtocolForPipelineRecovery {
       Thread.sleep(1000);
       DatanodeInfo[] pipeline = out.getPipeline();
       for (DatanodeInfo node : pipeline) {
-        assertFalse("Write should be going on", failed.get());
+          assertFalse(failed.get(), "Write should be going on");
         ArrayList<DataNode> dataNodes = cluster.getDataNodes();
         int indexToShutdown = 0;
         for (int i = 0; i < dataNodes.size(); i++) {
@@ -564,15 +565,15 @@ public class TestClientProtocolForPipelineRecovery {
             return out.getBlock().getGenerationStamp() > oldGs;
           }
         }, 100, 10000);
-        Assert.assertEquals("The pipeline recovery count shouldn't increase", 0,
-            out.getStreamer().getPipelineRecoveryCount());
+          Assertions.assertEquals(0,
+                  out.getStreamer().getPipelineRecoveryCount(), "The pipeline recovery count shouldn't increase");
       }
-      assertFalse("Write should be going on", failed.get());
+        assertFalse(failed.get(), "Write should be going on");
       running.set(false);
       t.join();
       out.write("testagain".getBytes());
-      assertTrue("There should be atleast 2 nodes in pipeline still", out
-          .getPipeline().length >= 2);
+        assertTrue(out
+                .getPipeline().length >= 2, "There should be atleast 2 nodes in pipeline still");
       out.close();
     } finally {
       DFSClientFaultInjector.set(old);
@@ -723,7 +724,7 @@ public class TestClientProtocolForPipelineRecovery {
         o.hflush();
       }
 
-      assertTrue("Expected a failure in the pipeline", failed.get());
+        assertTrue(failed.get(), "Expected a failure in the pipeline");
       DatanodeInfo[] newNodes = dfsO.getStreamer().getNodes();
       o.close();
       // Trigger block report to NN

+ 14 - 14
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java

@@ -41,10 +41,10 @@ import org.apache.hadoop.hdfs.server.namenode.NamenodeFsck;
 import org.apache.hadoop.hdfs.tools.DFSck;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.util.ToolRunner;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 
 /**
  * Class is used to test client reporting corrupted block replica to name node.
@@ -67,7 +67,7 @@ public class TestClientReportBadBlock {
 
   Random rand = new Random();
 
-  @Before
+  @BeforeEach
   public void startUpCluster() throws IOException {
     // disable block scanner
     conf.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1); 
@@ -80,7 +80,7 @@ public class TestClientReportBadBlock {
     buffersize = conf.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096);
   }
 
-  @After
+  @AfterEach
   public void shutDownCluster() throws IOException {
     if (dfs != null) {
       dfs.close();
@@ -211,7 +211,7 @@ public class TestClientReportBadBlock {
     // Locate the file blocks by asking name node
     final LocatedBlocks locatedblocks = dfs.dfs.getNamenode()
         .getBlockLocations(filePath.toString(), 0L, BLOCK_SIZE);
-    Assert.assertEquals(repl, locatedblocks.get(0).getLocations().length);
+    Assertions.assertEquals(repl, locatedblocks.get(0).getLocations().length);
     // The file only has one block
     LocatedBlock lblock = locatedblocks.get(0);
     DatanodeInfo[] datanodeinfos = lblock.getLocations();
@@ -236,7 +236,7 @@ public class TestClientReportBadBlock {
     final LocatedBlocks locatedBlocks = dfs.dfs.getNamenode()
         .getBlockLocations(filePath.toUri().getPath(), 0, Long.MAX_VALUE);
     final LocatedBlock firstLocatedBlock = locatedBlocks.get(0);
-    Assert.assertEquals(isCorrupted, firstLocatedBlock.isCorrupt());
+    Assertions.assertEquals(isCorrupted, firstLocatedBlock.isCorrupt());
   }
 
   /**
@@ -250,7 +250,7 @@ public class TestClientReportBadBlock {
         filePath.toUri().getPath(), 0, Long.MAX_VALUE);
     // we expect only the first block of the file is used for this test
     LocatedBlock firstLocatedBlock = lBlocks.get(0);
-    Assert.assertEquals(expectedReplicas,
+    Assertions.assertEquals(expectedReplicas,
         firstLocatedBlock.getLocations().length);
   }
 
@@ -300,23 +300,23 @@ public class TestClientReportBadBlock {
     // Make sure filesystem is in healthy state
     String outStr = runFsck(conf, 0, true, "/");
     LOG.info(outStr);
-    Assert.assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
+    Assertions.assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
     if (!expected.equals("")) {
-      Assert.assertTrue(outStr.contains(expected));
+      Assertions.assertTrue(outStr.contains(expected));
     }
   }
 
   private static void verifyFsckBlockCorrupted() throws Exception {
     String outStr = runFsck(conf, 1, true, "/");
     LOG.info(outStr);
-    Assert.assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
+    Assertions.assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
   }
   
   private static void testFsckListCorruptFilesBlocks(Path filePath, int errorCode) throws Exception{
     String outStr = runFsck(conf, errorCode, true, filePath.toString(), "-list-corruptfileblocks");
     LOG.info("fsck -list-corruptfileblocks out: " + outStr);
     if (errorCode != 0) {
-      Assert.assertTrue(outStr.contains("CORRUPT blocks"));
+      Assertions.assertTrue(outStr.contains("CORRUPT blocks"));
     }
   }
 
@@ -326,7 +326,7 @@ public class TestClientReportBadBlock {
     PrintStream out = new PrintStream(bStream, true);
     int errCode = ToolRunner.run(new DFSck(conf, out), path);
     if (checkErrorCode)
-      Assert.assertEquals(expectedErrCode, errCode);
+      Assertions.assertEquals(expectedErrCode, errCode);
     return bStream.toString();
   }
 }

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClose.java

@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.fail;
 
 import java.io.IOException;
 import java.io.OutputStream;
@@ -26,7 +26,7 @@ import java.nio.channels.ClosedChannelException;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 public class TestClose {
 

+ 9 - 9
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java

@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
 
 import java.io.IOException;
 import java.net.InetSocketAddress;
@@ -27,8 +27,8 @@ import org.slf4j.LoggerFactory;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.client.impl.BlockReaderTestUtil;
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
 
 /**
  * This class tests the client connection caching in a single node
@@ -52,7 +52,7 @@ public class TestConnCache {
                      int length,
                      byte[] authenticData)
       throws IOException {
-    Assert.assertTrue("Test buffer too small", buffer.length >= offset + length);
+      Assertions.assertTrue(buffer.length >= offset + length, "Test buffer too small");
 
     if (pos >= 0)
       in.seek(pos);
@@ -62,7 +62,7 @@ public class TestConnCache {
 
     while (length > 0) {
       int cnt = in.read(buffer, offset, length);
-      Assert.assertTrue("Error in read", cnt > 0);
+        Assertions.assertTrue(cnt > 0, "Error in read");
       offset += cnt;
       length -= cnt;
     }
@@ -71,9 +71,9 @@ public class TestConnCache {
     for (int i = 0; i < length; ++i) {
       byte actual = buffer[i];
       byte expect = authenticData[(int)pos + i];
-      assertEquals("Read data mismatch at file offset " + (pos + i) +
-                   ". Expects " + expect + "; got " + actual,
-                   actual, expect);
+        assertEquals(
+                actual, expect, "Read data mismatch at file offset " + (pos + i) +
+                ". Expects " + expect + "; got " + actual);
     }
   }
 
@@ -116,7 +116,7 @@ public class TestConnCache {
 
     in.close();
     client.close();
-    Assert.assertEquals(1,
+    Assertions.assertEquals(1,
         ClientContext.getFromConf(configuration).getPeerCache().size());
   }
 }

+ 11 - 13
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestCrcCorruption.java

@@ -18,9 +18,7 @@
 
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.IOException;
 import java.util.List;
@@ -36,8 +34,8 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo;
 import org.apache.hadoop.io.IOUtils;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
 import org.mockito.Mockito;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -75,7 +73,7 @@ public class TestCrcCorruption {
 
   private DFSClientFaultInjector faultInjector;
 
-  @Before
+  @BeforeEach
   public void setUp() throws IOException {
     faultInjector = Mockito.mock(DFSClientFaultInjector.class);
     DFSClientFaultInjector.set(faultInjector);
@@ -174,7 +172,7 @@ public class TestCrcCorruption {
       final String bpid = cluster.getNamesystem().getBlockPoolId();
       List<ReplicaInfo> replicas =
           dn.getFSDataset().getFinalizedBlocks(bpid);
-      assertTrue("Replicas do not exist", !replicas.isEmpty());
+        assertTrue(!replicas.isEmpty(), "Replicas do not exist");
 
       for (int idx = 0; idx < replicas.size(); idx++) {
         ReplicaInfo replica = replicas.get(idx);
@@ -192,12 +190,12 @@ public class TestCrcCorruption {
         }
       }
 
-      //
-      // Only one replica is possibly corrupted. The other replica should still
-      // be good. Verify.
-      //
-      assertTrue("Corrupted replicas not handled properly.",
-                 util.checkFiles(fs, "/srcdat"));
+        //
+        // Only one replica is possibly corrupted. The other replica should still
+        // be good. Verify.
+        //
+        assertTrue(
+                util.checkFiles(fs, "/srcdat"), "Corrupted replicas not handled properly.");
       LOG.info("All File still have a valid replica");
 
       //
@@ -287,7 +285,7 @@ public class TestCrcCorruption {
 
       ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, file);
       int blockFilesCorrupted = cluster.corruptBlockOnDataNodes(block);
-      assertEquals("All replicas not corrupted", replFactor, blockFilesCorrupted);
+        assertEquals(replFactor, blockFilesCorrupted, "All replicas not corrupted");
 
       try {
         IOUtils.copyBytes(fs.open(file), new IOUtils.NullOutputStream(), conf,

+ 5 - 5
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java

@@ -27,8 +27,8 @@ package org.apache.hadoop.hdfs;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 import java.io.IOException;
 import java.util.ArrayList;
@@ -37,7 +37,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 
 public class TestDFSAddressConfig {
@@ -67,7 +67,7 @@ public class TestDFSAddressConfig {
      *------------------------------------------------------------------------*/
     for (int i = 0; i < dns.size(); i++) {
       DataNodeProperties dnp = cluster.stopDataNode(i);
-      assertNotNull("Should have been able to stop simulated datanode", dnp);
+        assertNotNull(dnp, "Should have been able to stop simulated datanode");
     }
 
     conf.unset(DFS_DATANODE_ADDRESS_KEY);
@@ -92,7 +92,7 @@ public class TestDFSAddressConfig {
      *------------------------------------------------------------------------*/
     for (int i = 0; i < dns.size(); i++) {
       DataNodeProperties dnp = cluster.stopDataNode(i);
-      assertNotNull("Should have been able to stop simulated datanode", dnp);
+        assertNotNull(dnp, "Should have been able to stop simulated datanode");
     }
 
     conf.set(DFS_DATANODE_ADDRESS_KEY, "0.0.0.0:0");

+ 8 - 8
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientExcludedNodes.java

@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.fail;
 
 import java.io.IOException;
 import java.io.OutputStream;
@@ -29,10 +29,10 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.util.ThreadUtil;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
 
 
 /**
@@ -44,13 +44,13 @@ public class TestDFSClientExcludedNodes {
   private MiniDFSCluster cluster;
   private Configuration conf;
 
-  @Before
+  @BeforeEach
   public void setUp() {
     cluster = null;
     conf = new HdfsConfiguration();
   }
 
-  @After
+  @AfterEach
   public void tearDown() {
     if (cluster != null) {
       cluster.shutdown();
@@ -122,8 +122,8 @@ public class TestDFSClientExcludedNodes {
 
     // Bring back the older DNs, since they are gonna be forgiven only
     // afterwards of this previous block write.
-    Assert.assertEquals(true, cluster.restartDataNode(one, true));
-    Assert.assertEquals(true, cluster.restartDataNode(two, true));
+    Assertions.assertEquals(true, cluster.restartDataNode(one, true));
+    Assertions.assertEquals(true, cluster.restartDataNode(two, true));
     cluster.waitActive();
 
     // Sleep for 5s, to let the excluded nodes be expired

+ 17 - 20
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientFailover.java

@@ -17,10 +17,7 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.IOException;
 import java.lang.reflect.Field;
@@ -55,10 +52,10 @@ import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.StringUtils;
-import org.junit.After;
-import org.junit.Assume;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assumptions;
+import org.junit.jupiter.api.BeforeEach;
 import org.mockito.ArgumentMatcher;
 import org.mockito.Mockito;
 
@@ -74,7 +71,7 @@ public class TestDFSClientFailover {
   private final Configuration conf = new Configuration();
   private MiniDFSCluster cluster;
   
-  @Before
+  @BeforeEach
   public void setUpCluster() throws IOException {
     cluster = new MiniDFSCluster.Builder(conf)
       .nnTopology(MiniDFSNNTopology.simpleHATopology())
@@ -83,7 +80,7 @@ public class TestDFSClientFailover {
     cluster.waitActive();
   }
   
-  @After
+  @AfterEach
   public void tearDownCluster() throws IOException {
     if (cluster != null) {
       cluster.shutdown();
@@ -91,7 +88,7 @@ public class TestDFSClientFailover {
     }
   }
 
-  @After
+  @AfterEach
   public void clearConfig() {
     SecurityUtil.setTokenServiceUseIp(true);
   }
@@ -217,9 +214,9 @@ public class TestDFSClientFailover {
       fail("Successfully got proxy provider for misconfigured FS");
     } catch (IOException ioe) {
       LOG.info("got expected exception", ioe);
-      assertTrue("expected exception did not contain helpful message",
-          StringUtils.stringifyException(ioe).contains(
-          "Could not find any configured addresses for URI " + uri));
+        assertTrue(
+                StringUtils.stringifyException(ioe).contains(
+                        "Could not find any configured addresses for URI " + uri), "expected exception did not contain helpful message");
     }
   }
 
@@ -233,7 +230,7 @@ public class TestDFSClientFailover {
     try {
       Field f = InetAddress.class.getDeclaredField("nameServices");
       f.setAccessible(true);
-      Assume.assumeNotNull(f);
+      Assumptions.assumeNotNull(f);
       @SuppressWarnings("unchecked")
       List<NameService> nsList = (List<NameService>) f.get(null);
 
@@ -248,7 +245,7 @@ public class TestDFSClientFailover {
       LOG.info("Unable to spy on DNS. Skipping test.", t);
       // In case the JDK we're testing on doesn't work like Sun's, just
       // skip the test.
-      Assume.assumeNoException(t);
+      Assumptions.assumeNoException(t);
       throw new RuntimeException(t);
     }
   }
@@ -377,9 +374,9 @@ public class TestDFSClientFailover {
     // not to use IP address for token service
     SecurityUtil.setTokenServiceUseIp(false);
 
-    // Logical URI should be used.
-    assertTrue("Legacy proxy providers should use logical URI.",
-        HAUtil.useLogicalUri(config, p.toUri()));
+      // Logical URI should be used.
+      assertTrue(
+              HAUtil.useLogicalUri(config, p.toUri()), "Legacy proxy providers should use logical URI.");
   }
 
   /**
@@ -394,8 +391,8 @@ public class TestDFSClientFailover {
         nnUri.getHost(),
         IPFailoverProxyProvider.class.getName());
 
-    assertFalse("IPFailoverProxyProvider should not use logical URI.",
-        HAUtil.useLogicalUri(config, nnUri));
+      assertFalse(
+              HAUtil.useLogicalUri(config, nnUri), "IPFailoverProxyProvider should not use logical URI.");
   }
 
 }

+ 26 - 29
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java

@@ -18,10 +18,7 @@
 package org.apache.hadoop.hdfs;
 
 import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.ArgumentMatchers.anyBoolean;
 import static org.mockito.ArgumentMatchers.anyLong;
@@ -90,9 +87,9 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Time;
-import org.junit.Assert;
-import org.junit.Before;
 import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
 import org.mockito.Mockito;
 import org.mockito.internal.stubbing.answers.ThrowsException;
 import org.mockito.invocation.InvocationOnMock;
@@ -160,7 +157,7 @@ public class TestDFSClientRetries {
     }
   }
   
-  @Before
+  @BeforeEach
   public void setupConf(){
     conf = new HdfsConfiguration();
   }
@@ -285,8 +282,8 @@ public class TestDFSClientRetries {
     try {
       os.close();
     } catch (Exception e) {
-      assertTrue("Retries are not being stopped correctly: " + e.getMessage(),
-           e.getMessage().equals(exceptionMsg));
+        assertTrue(
+                e.getMessage().equals(exceptionMsg), "Retries are not being stopped correctly: " + e.getMessage());
     }
   }
 
@@ -632,7 +629,7 @@ public class TestDFSClientRetries {
     timestamp = Time.now();
     pass = busyTest(xcievers, threads, fileLen, timeWin, retries);
     timestamp2 = Time.now();
-    assertTrue("Something wrong! Test 2 got Exception with maxmum retries!", pass);
+      assertTrue(pass, "Something wrong! Test 2 got Exception with maxmum retries!");
     LOG.info("Test 2 succeeded! Time spent: "  + (timestamp2-timestamp)/1000.0 + " sec.");
     
     //
@@ -657,7 +654,7 @@ public class TestDFSClientRetries {
     timestamp = Time.now();
     pass = busyTest(xcievers, threads, fileLen, timeWin, retries);
     timestamp2 = Time.now();
-    assertTrue("Something wrong! Test 4 got Exception with maxmum retries!", pass);
+      assertTrue(pass, "Something wrong! Test 4 got Exception with maxmum retries!");
     LOG.info("Test 4 succeeded! Time spent: "  + (timestamp2-timestamp)/1000.0 + " sec.");
   }
 
@@ -692,10 +689,10 @@ public class TestDFSClientRetries {
                                          bufferSize,
                                          replicationFactor,
                                          blockSize);
-      
-      // verify that file exists in FS namespace
-      assertTrue(file1 + " should be a file", 
-                  fs.getFileStatus(file1).isFile());
+
+        // verify that file exists in FS namespace
+        assertTrue(
+                fs.getFileStatus(file1).isFile(), file1 + " should be a file");
       System.out.println("Path : \"" + file1 + "\"");
       LOG.info("Path : \"" + file1 + "\"");
 
@@ -706,10 +703,10 @@ public class TestDFSClientRetries {
 
       // verify that file size has changed to the full size
       long len = fs.getFileStatus(file1).getLen();
-      
-      assertTrue(file1 + " should be of size " + fileLen +
-                 " but found to be of size " + len, 
-                  len == fileLen);
+
+        assertTrue(
+                len == fileLen, file1 + " should be of size " + fileLen +
+                " but found to be of size " + len);
       
       // read back and check data integrigy
       byte[] read_buf = new byte[fileLen];
@@ -809,11 +806,11 @@ public class TestDFSClientRetries {
         in.close();
         fs.close();
 
-        assertTrue("hashed keys are not the same size",
-                   hash_sha.length == expected_sha.length);
+          assertTrue(
+                  hash_sha.length == expected_sha.length, "hashed keys are not the same size");
 
-        assertTrue("hashed keys are not equal",
-                   Arrays.equals(hash_sha, expected_sha));
+          assertTrue(
+                  Arrays.equals(hash_sha, expected_sha), "hashed keys are not equal");
         
         counter.inc(); // count this thread as successful
         
@@ -928,8 +925,8 @@ public class TestDFSClientRetries {
 
       ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, path);
       int blockFilesCorrupted = cluster.corruptBlockOnDataNodes(block);
-      assertEquals("All replicas not corrupted", REPL_FACTOR,
-          blockFilesCorrupted);
+        assertEquals(REPL_FACTOR,
+                blockFilesCorrupted, "All replicas not corrupted");
 
       InetSocketAddress nnAddr =
         new InetSocketAddress("localhost", cluster.getNameNodePort());
@@ -1107,13 +1104,13 @@ public class TestDFSClientRetries {
         final FSDataInputStream in = fs.open(file4);
         int count = 0;
         for(int r; (r = in.read()) != -1; count++) {
-          Assert.assertEquals(String.format("count=%d", count),
-              bytes[count % bytes.length], (byte)r);
+            Assertions.assertEquals(
+                    bytes[count % bytes.length], (byte) r, String.format("count=%d", count));
         }
         if (!isWebHDFS) {
-          Assert.assertEquals(5 * bytes.length, count);
+          Assertions.assertEquals(5 * bytes.length, count);
         } else {
-          Assert.assertEquals(2 * bytes.length, count);
+          Assertions.assertEquals(2 * bytes.length, count);
         }
         in.close();
       }

+ 8 - 10
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientSocketSize.java

@@ -20,9 +20,7 @@ package org.apache.hadoop.hdfs;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
 import org.apache.hadoop.test.GenericTestUtils;
-
-import org.junit.Test;
-
+import org.junit.jupiter.api.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.event.Level;
@@ -31,7 +29,7 @@ import java.io.IOException;
 import java.net.Socket;
 
 import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_SOCKET_SEND_BUFFER_SIZE_KEY;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertTrue;
 
 public class TestDFSClientSocketSize {
   private static final Logger LOG = LoggerFactory.getLogger(
@@ -49,8 +47,8 @@ public class TestDFSClientSocketSize {
     final int sendBufferSize = getSendBufferSize(new Configuration());
     LOG.info("If not specified, the auto tuned send buffer size is: {}",
         sendBufferSize);
-    assertTrue("Send buffer size should be non-negative value which is " +
-        "determined by system (kernel).", sendBufferSize > 0);
+      assertTrue(sendBufferSize > 0, "Send buffer size should be non-negative value which is " +
+              "determined by system (kernel).");
   }
 
   /**
@@ -69,8 +67,8 @@ public class TestDFSClientSocketSize {
 
     LOG.info("Large buf size is {}, small is {}",
         sendBufferSize1, sendBufferSize2);
-    assertTrue("Larger specified send buffer should have effect",
-        sendBufferSize1 > sendBufferSize2);
+      assertTrue(
+              sendBufferSize1 > sendBufferSize2, "Larger specified send buffer should have effect");
   }
 
   /**
@@ -83,8 +81,8 @@ public class TestDFSClientSocketSize {
     conf.setInt(DFS_CLIENT_SOCKET_SEND_BUFFER_SIZE_KEY, 0);
     final int sendBufferSize = getSendBufferSize(conf);
     LOG.info("The auto tuned send buffer size is: {}", sendBufferSize);
-    assertTrue("Send buffer size should be non-negative value which is " +
-        "determined by system (kernel).", sendBufferSize > 0);
+      assertTrue(sendBufferSize > 0, "Send buffer size should be non-negative value which is " +
+              "determined by system (kernel).");
   }
 
   private int getSendBufferSize(Configuration conf) throws IOException {

+ 5 - 5
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java

@@ -18,8 +18,8 @@
 package org.apache.hadoop.hdfs;
 
 import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.DATA_NODE;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
 
 import java.io.File;
 import java.util.Collections;
@@ -33,8 +33,8 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
 import org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceStorage;
 import org.apache.hadoop.hdfs.server.datanode.DataStorage;
-import org.junit.After;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Test;
 
 /**
  * This test ensures the appropriate response from the system when 
@@ -184,7 +184,7 @@ public class TestDFSFinalize {
     } // end numDir loop
   }
  
-  @After
+  @AfterEach
   public void tearDown() throws Exception {
     LOG.info("Shutting down MiniDFSCluster");
     if (cluster != null) {

+ 168 - 168
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInotifyEventInputStream.java

@@ -35,8 +35,8 @@ import org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes;
 import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
 import org.apache.hadoop.util.ExitUtil;
-import org.junit.Assert;
 import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
 
 import java.io.IOException;
 import java.io.OutputStream;
@@ -60,8 +60,8 @@ public class TestDFSInotifyEventInputStream {
   }
 
   private static long checkTxid(EventBatch batch, long prevTxid){
-    Assert.assertTrue("Previous txid " + prevTxid + " was not less than " +
-        "new txid " + batch.getTxid(), prevTxid < batch.getTxid());
+      Assertions.assertTrue(prevTxid < batch.getTxid(), "Previous txid " + prevTxid + " was not less than " +
+              "new txid " + batch.getTxid());
     return batch.getTxid();
   }
 
@@ -73,7 +73,7 @@ public class TestDFSInotifyEventInputStream {
    */
   @Test
   public void testOpcodeCount() {
-    Assert.assertEquals(54, FSEditLogOpCodes.values().length);
+    Assertions.assertEquals(54, FSEditLogOpCodes.values().length);
   }
 
 
@@ -146,287 +146,287 @@ public class TestDFSInotifyEventInputStream {
 
       // RenameOp
       batch = waitForNextEvents(eis);
-      Assert.assertEquals(1, batch.getEvents().length);
+      Assertions.assertEquals(1, batch.getEvents().length);
       long txid = batch.getTxid();
-      Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.RENAME);
+      Assertions.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.RENAME);
       Event.RenameEvent re = (Event.RenameEvent) batch.getEvents()[0];
-      Assert.assertEquals("/file4", re.getDstPath());
-      Assert.assertEquals("/file", re.getSrcPath());
-      Assert.assertTrue(re.getTimestamp() > 0);
+      Assertions.assertEquals("/file4", re.getDstPath());
+      Assertions.assertEquals("/file", re.getSrcPath());
+      Assertions.assertTrue(re.getTimestamp() > 0);
       LOG.info(re.toString());
-      Assert.assertTrue(re.toString().startsWith("RenameEvent [srcPath="));
+      Assertions.assertTrue(re.toString().startsWith("RenameEvent [srcPath="));
 
       long eventsBehind = eis.getTxidsBehindEstimate();
 
       // RenameOldOp
       batch = waitForNextEvents(eis);
-      Assert.assertEquals(1, batch.getEvents().length);
+      Assertions.assertEquals(1, batch.getEvents().length);
       txid = checkTxid(batch, txid);
-      Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.RENAME);
+      Assertions.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.RENAME);
       Event.RenameEvent re2 = (Event.RenameEvent) batch.getEvents()[0];
-      Assert.assertTrue(re2.getDstPath().equals("/file2"));
-      Assert.assertTrue(re2.getSrcPath().equals("/file4"));
-      Assert.assertTrue(re2.getTimestamp() > 0);
+      Assertions.assertTrue(re2.getDstPath().equals("/file2"));
+      Assertions.assertTrue(re2.getSrcPath().equals("/file4"));
+      Assertions.assertTrue(re2.getTimestamp() > 0);
       LOG.info(re2.toString());
 
       // AddOp with overwrite
       batch = waitForNextEvents(eis);
-      Assert.assertEquals(1, batch.getEvents().length);
+      Assertions.assertEquals(1, batch.getEvents().length);
       txid = checkTxid(batch, txid);
-      Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.CREATE);
+      Assertions.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.CREATE);
       Event.CreateEvent ce = (Event.CreateEvent) batch.getEvents()[0];
-      Assert.assertTrue(ce.getiNodeType() == Event.CreateEvent.INodeType.FILE);
-      Assert.assertTrue(ce.getPath().equals("/file2"));
-      Assert.assertTrue(ce.getCtime() > 0);
-      Assert.assertTrue(ce.getReplication() > 0);
-      Assert.assertTrue(ce.getSymlinkTarget() == null);
-      Assert.assertTrue(ce.getOverwrite());
-      Assert.assertEquals(BLOCK_SIZE, ce.getDefaultBlockSize());
-      Assert.assertTrue(ce.isErasureCoded().isPresent());
-      Assert.assertFalse(ce.isErasureCoded().get());
+      Assertions.assertTrue(ce.getiNodeType() == Event.CreateEvent.INodeType.FILE);
+      Assertions.assertTrue(ce.getPath().equals("/file2"));
+      Assertions.assertTrue(ce.getCtime() > 0);
+      Assertions.assertTrue(ce.getReplication() > 0);
+      Assertions.assertTrue(ce.getSymlinkTarget() == null);
+      Assertions.assertTrue(ce.getOverwrite());
+      Assertions.assertEquals(BLOCK_SIZE, ce.getDefaultBlockSize());
+      Assertions.assertTrue(ce.isErasureCoded().isPresent());
+      Assertions.assertFalse(ce.isErasureCoded().get());
       LOG.info(ce.toString());
-      Assert.assertTrue(ce.toString().startsWith("CreateEvent [INodeType="));
+      Assertions.assertTrue(ce.toString().startsWith("CreateEvent [INodeType="));
 
       // CloseOp
       batch = waitForNextEvents(eis);
-      Assert.assertEquals(1, batch.getEvents().length);
+      Assertions.assertEquals(1, batch.getEvents().length);
       txid = checkTxid(batch, txid);
-      Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.CLOSE);
+      Assertions.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.CLOSE);
       Event.CloseEvent ce2 = (Event.CloseEvent) batch.getEvents()[0];
-      Assert.assertTrue(ce2.getPath().equals("/file2"));
-      Assert.assertTrue(ce2.getFileSize() > 0);
-      Assert.assertTrue(ce2.getTimestamp() > 0);
+      Assertions.assertTrue(ce2.getPath().equals("/file2"));
+      Assertions.assertTrue(ce2.getFileSize() > 0);
+      Assertions.assertTrue(ce2.getTimestamp() > 0);
       LOG.info(ce2.toString());
-      Assert.assertTrue(ce2.toString().startsWith("CloseEvent [path="));
+      Assertions.assertTrue(ce2.toString().startsWith("CloseEvent [path="));
 
       // AppendOp
       batch = waitForNextEvents(eis);
-      Assert.assertEquals(1, batch.getEvents().length);
+      Assertions.assertEquals(1, batch.getEvents().length);
       txid = checkTxid(batch, txid);
-      Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.APPEND);
+      Assertions.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.APPEND);
       Event.AppendEvent append2 = (Event.AppendEvent)batch.getEvents()[0];
-      Assert.assertEquals("/file2", append2.getPath());
-      Assert.assertFalse(append2.toNewBlock());
+      Assertions.assertEquals("/file2", append2.getPath());
+      Assertions.assertFalse(append2.toNewBlock());
       LOG.info(append2.toString());
-      Assert.assertTrue(append2.toString().startsWith("AppendEvent [path="));
+      Assertions.assertTrue(append2.toString().startsWith("AppendEvent [path="));
 
       // CloseOp
       batch = waitForNextEvents(eis);
-      Assert.assertEquals(1, batch.getEvents().length);
+      Assertions.assertEquals(1, batch.getEvents().length);
       txid = checkTxid(batch, txid);
-      Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.CLOSE);
-      Assert.assertTrue(((Event.CloseEvent) batch.getEvents()[0]).getPath().equals("/file2"));
+      Assertions.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.CLOSE);
+      Assertions.assertTrue(((Event.CloseEvent) batch.getEvents()[0]).getPath().equals("/file2"));
 
       // TimesOp
       batch = waitForNextEvents(eis);
-      Assert.assertEquals(1, batch.getEvents().length);
+      Assertions.assertEquals(1, batch.getEvents().length);
       txid = checkTxid(batch, txid);
-      Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.METADATA);
+      Assertions.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.METADATA);
       Event.MetadataUpdateEvent mue = (Event.MetadataUpdateEvent) batch.getEvents()[0];
-      Assert.assertTrue(mue.getPath().equals("/file2"));
-      Assert.assertTrue(mue.getMetadataType() ==
+      Assertions.assertTrue(mue.getPath().equals("/file2"));
+      Assertions.assertTrue(mue.getMetadataType() ==
           Event.MetadataUpdateEvent.MetadataType.TIMES);
       LOG.info(mue.toString());
-      Assert.assertTrue(mue.toString().startsWith("MetadataUpdateEvent [path="));
+      Assertions.assertTrue(mue.toString().startsWith("MetadataUpdateEvent [path="));
 
       // SetReplicationOp
       batch = waitForNextEvents(eis);
-      Assert.assertEquals(1, batch.getEvents().length);
+      Assertions.assertEquals(1, batch.getEvents().length);
       txid = checkTxid(batch, txid);
-      Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.METADATA);
+      Assertions.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.METADATA);
       Event.MetadataUpdateEvent mue2 = (Event.MetadataUpdateEvent) batch.getEvents()[0];
-      Assert.assertTrue(mue2.getPath().equals("/file2"));
-      Assert.assertTrue(mue2.getMetadataType() ==
+      Assertions.assertTrue(mue2.getPath().equals("/file2"));
+      Assertions.assertTrue(mue2.getMetadataType() ==
           Event.MetadataUpdateEvent.MetadataType.REPLICATION);
-      Assert.assertTrue(mue2.getReplication() == 1);
+      Assertions.assertTrue(mue2.getReplication() == 1);
       LOG.info(mue2.toString());
 
       // ConcatDeleteOp
       batch = waitForNextEvents(eis);
-      Assert.assertEquals(3, batch.getEvents().length);
+      Assertions.assertEquals(3, batch.getEvents().length);
       txid = checkTxid(batch, txid);
-      Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.APPEND);
-      Assert.assertTrue(((Event.AppendEvent) batch.getEvents()[0]).getPath().equals("/file2"));
-      Assert.assertTrue(batch.getEvents()[1].getEventType() == Event.EventType.UNLINK);
+      Assertions.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.APPEND);
+      Assertions.assertTrue(((Event.AppendEvent) batch.getEvents()[0]).getPath().equals("/file2"));
+      Assertions.assertTrue(batch.getEvents()[1].getEventType() == Event.EventType.UNLINK);
       Event.UnlinkEvent ue2 = (Event.UnlinkEvent) batch.getEvents()[1];
-      Assert.assertTrue(ue2.getPath().equals("/file3"));
-      Assert.assertTrue(ue2.getTimestamp() > 0);
+      Assertions.assertTrue(ue2.getPath().equals("/file3"));
+      Assertions.assertTrue(ue2.getTimestamp() > 0);
       LOG.info(ue2.toString());
-      Assert.assertTrue(ue2.toString().startsWith("UnlinkEvent [path="));
-      Assert.assertTrue(batch.getEvents()[2].getEventType() == Event.EventType.CLOSE);
+      Assertions.assertTrue(ue2.toString().startsWith("UnlinkEvent [path="));
+      Assertions.assertTrue(batch.getEvents()[2].getEventType() == Event.EventType.CLOSE);
       Event.CloseEvent ce3 = (Event.CloseEvent) batch.getEvents()[2];
-      Assert.assertTrue(ce3.getPath().equals("/file2"));
-      Assert.assertTrue(ce3.getTimestamp() > 0);
+      Assertions.assertTrue(ce3.getPath().equals("/file2"));
+      Assertions.assertTrue(ce3.getTimestamp() > 0);
 
       // DeleteOp
       batch = waitForNextEvents(eis);
-      Assert.assertEquals(1, batch.getEvents().length);
+      Assertions.assertEquals(1, batch.getEvents().length);
       txid = checkTxid(batch, txid);
-      Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.UNLINK);
+      Assertions.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.UNLINK);
       Event.UnlinkEvent ue = (Event.UnlinkEvent) batch.getEvents()[0];
-      Assert.assertTrue(ue.getPath().equals("/file2"));
-      Assert.assertTrue(ue.getTimestamp() > 0);
+      Assertions.assertTrue(ue.getPath().equals("/file2"));
+      Assertions.assertTrue(ue.getTimestamp() > 0);
       LOG.info(ue.toString());
 
       // MkdirOp
       batch = waitForNextEvents(eis);
-      Assert.assertEquals(1, batch.getEvents().length);
+      Assertions.assertEquals(1, batch.getEvents().length);
       txid = checkTxid(batch, txid);
-      Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.CREATE);
+      Assertions.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.CREATE);
       Event.CreateEvent ce4 = (Event.CreateEvent) batch.getEvents()[0];
-      Assert.assertTrue(ce4.getiNodeType() ==
+      Assertions.assertTrue(ce4.getiNodeType() ==
           Event.CreateEvent.INodeType.DIRECTORY);
-      Assert.assertTrue(ce4.getPath().equals("/dir"));
-      Assert.assertTrue(ce4.getCtime() > 0);
-      Assert.assertTrue(ce4.getReplication() == 0);
-      Assert.assertTrue(ce4.getSymlinkTarget() == null);
+      Assertions.assertTrue(ce4.getPath().equals("/dir"));
+      Assertions.assertTrue(ce4.getCtime() > 0);
+      Assertions.assertTrue(ce4.getReplication() == 0);
+      Assertions.assertTrue(ce4.getSymlinkTarget() == null);
       LOG.info(ce4.toString());
 
       // SetPermissionsOp
       batch = waitForNextEvents(eis);
-      Assert.assertEquals(1, batch.getEvents().length);
+      Assertions.assertEquals(1, batch.getEvents().length);
       txid = checkTxid(batch, txid);
-      Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.METADATA);
+      Assertions.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.METADATA);
       Event.MetadataUpdateEvent mue3 = (Event.MetadataUpdateEvent) batch.getEvents()[0];
-      Assert.assertTrue(mue3.getPath().equals("/dir"));
-      Assert.assertTrue(mue3.getMetadataType() ==
+      Assertions.assertTrue(mue3.getPath().equals("/dir"));
+      Assertions.assertTrue(mue3.getMetadataType() ==
           Event.MetadataUpdateEvent.MetadataType.PERMS);
-      Assert.assertTrue(mue3.getPerms().toString().contains("rw-rw-rw-"));
+      Assertions.assertTrue(mue3.getPerms().toString().contains("rw-rw-rw-"));
       LOG.info(mue3.toString());
 
       // SetOwnerOp
       batch = waitForNextEvents(eis);
-      Assert.assertEquals(1, batch.getEvents().length);
+      Assertions.assertEquals(1, batch.getEvents().length);
       txid = checkTxid(batch, txid);
-      Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.METADATA);
+      Assertions.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.METADATA);
       Event.MetadataUpdateEvent mue4 = (Event.MetadataUpdateEvent) batch.getEvents()[0];
-      Assert.assertTrue(mue4.getPath().equals("/dir"));
-      Assert.assertTrue(mue4.getMetadataType() ==
+      Assertions.assertTrue(mue4.getPath().equals("/dir"));
+      Assertions.assertTrue(mue4.getMetadataType() ==
           Event.MetadataUpdateEvent.MetadataType.OWNER);
-      Assert.assertTrue(mue4.getOwnerName().equals("username"));
-      Assert.assertTrue(mue4.getGroupName().equals("groupname"));
+      Assertions.assertTrue(mue4.getOwnerName().equals("username"));
+      Assertions.assertTrue(mue4.getGroupName().equals("groupname"));
       LOG.info(mue4.toString());
 
       // SymlinkOp
       batch = waitForNextEvents(eis);
-      Assert.assertEquals(1, batch.getEvents().length);
+      Assertions.assertEquals(1, batch.getEvents().length);
       txid = checkTxid(batch, txid);
-      Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.CREATE);
+      Assertions.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.CREATE);
       Event.CreateEvent ce5 = (Event.CreateEvent) batch.getEvents()[0];
-      Assert.assertTrue(ce5.getiNodeType() ==
+      Assertions.assertTrue(ce5.getiNodeType() ==
           Event.CreateEvent.INodeType.SYMLINK);
-      Assert.assertTrue(ce5.getPath().equals("/dir2"));
-      Assert.assertTrue(ce5.getCtime() > 0);
-      Assert.assertTrue(ce5.getReplication() == 0);
-      Assert.assertTrue(ce5.getSymlinkTarget().equals("/dir"));
+      Assertions.assertTrue(ce5.getPath().equals("/dir2"));
+      Assertions.assertTrue(ce5.getCtime() > 0);
+      Assertions.assertTrue(ce5.getReplication() == 0);
+      Assertions.assertTrue(ce5.getSymlinkTarget().equals("/dir"));
       LOG.info(ce5.toString());
 
       // SetXAttrOp
       batch = waitForNextEvents(eis);
-      Assert.assertEquals(1, batch.getEvents().length);
+      Assertions.assertEquals(1, batch.getEvents().length);
       txid = checkTxid(batch, txid);
-      Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.METADATA);
+      Assertions.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.METADATA);
       Event.MetadataUpdateEvent mue5 = (Event.MetadataUpdateEvent) batch.getEvents()[0];
-      Assert.assertTrue(mue5.getPath().equals("/file5"));
-      Assert.assertTrue(mue5.getMetadataType() ==
+      Assertions.assertTrue(mue5.getPath().equals("/file5"));
+      Assertions.assertTrue(mue5.getMetadataType() ==
           Event.MetadataUpdateEvent.MetadataType.XATTRS);
-      Assert.assertTrue(mue5.getxAttrs().size() == 1);
-      Assert.assertTrue(mue5.getxAttrs().get(0).getName().contains("field"));
-      Assert.assertTrue(!mue5.isxAttrsRemoved());
+      Assertions.assertTrue(mue5.getxAttrs().size() == 1);
+      Assertions.assertTrue(mue5.getxAttrs().get(0).getName().contains("field"));
+      Assertions.assertTrue(!mue5.isxAttrsRemoved());
       LOG.info(mue5.toString());
 
       // RemoveXAttrOp
       batch = waitForNextEvents(eis);
-      Assert.assertEquals(1, batch.getEvents().length);
+      Assertions.assertEquals(1, batch.getEvents().length);
       txid = checkTxid(batch, txid);
-      Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.METADATA);
+      Assertions.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.METADATA);
       Event.MetadataUpdateEvent mue6 = (Event.MetadataUpdateEvent) batch.getEvents()[0];
-      Assert.assertTrue(mue6.getPath().equals("/file5"));
-      Assert.assertTrue(mue6.getMetadataType() ==
+      Assertions.assertTrue(mue6.getPath().equals("/file5"));
+      Assertions.assertTrue(mue6.getMetadataType() ==
           Event.MetadataUpdateEvent.MetadataType.XATTRS);
-      Assert.assertTrue(mue6.getxAttrs().size() == 1);
-      Assert.assertTrue(mue6.getxAttrs().get(0).getName().contains("field"));
-      Assert.assertTrue(mue6.isxAttrsRemoved());
+      Assertions.assertTrue(mue6.getxAttrs().size() == 1);
+      Assertions.assertTrue(mue6.getxAttrs().get(0).getName().contains("field"));
+      Assertions.assertTrue(mue6.isxAttrsRemoved());
       LOG.info(mue6.toString());
 
       // SetAclOp (1)
       batch = waitForNextEvents(eis);
-      Assert.assertEquals(1, batch.getEvents().length);
+      Assertions.assertEquals(1, batch.getEvents().length);
       txid = checkTxid(batch, txid);
-      Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.METADATA);
+      Assertions.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.METADATA);
       Event.MetadataUpdateEvent mue7 = (Event.MetadataUpdateEvent) batch.getEvents()[0];
-      Assert.assertTrue(mue7.getPath().equals("/file5"));
-      Assert.assertTrue(mue7.getMetadataType() ==
+      Assertions.assertTrue(mue7.getPath().equals("/file5"));
+      Assertions.assertTrue(mue7.getMetadataType() ==
           Event.MetadataUpdateEvent.MetadataType.ACLS);
-      Assert.assertTrue(mue7.getAcls().contains(
+      Assertions.assertTrue(mue7.getAcls().contains(
           AclEntry.parseAclEntry("user::rwx", true)));
       LOG.info(mue7.toString());
 
       // SetAclOp (2)
       batch = waitForNextEvents(eis);
-      Assert.assertEquals(1, batch.getEvents().length);
+      Assertions.assertEquals(1, batch.getEvents().length);
       txid = checkTxid(batch, txid);
-      Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.METADATA);
+      Assertions.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.METADATA);
       Event.MetadataUpdateEvent mue8 = (Event.MetadataUpdateEvent) batch.getEvents()[0];
-      Assert.assertTrue(mue8.getPath().equals("/file5"));
-      Assert.assertTrue(mue8.getMetadataType() ==
+      Assertions.assertTrue(mue8.getPath().equals("/file5"));
+      Assertions.assertTrue(mue8.getMetadataType() ==
           Event.MetadataUpdateEvent.MetadataType.ACLS);
-      Assert.assertTrue(mue8.getAcls() == null);
+      Assertions.assertTrue(mue8.getAcls() == null);
       LOG.info(mue8.toString());
 
       // RenameOp (2)
       batch = waitForNextEvents(eis);
-      Assert.assertEquals(1, batch.getEvents().length);
+      Assertions.assertEquals(1, batch.getEvents().length);
       txid = checkTxid(batch, txid);
-      Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.RENAME);
+      Assertions.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.RENAME);
       Event.RenameEvent re3 = (Event.RenameEvent) batch.getEvents()[0];
-      Assert.assertTrue(re3.getDstPath().equals("/dir/file5"));
-      Assert.assertTrue(re3.getSrcPath().equals("/file5"));
-      Assert.assertTrue(re3.getTimestamp() > 0);
+      Assertions.assertTrue(re3.getDstPath().equals("/dir/file5"));
+      Assertions.assertTrue(re3.getSrcPath().equals("/file5"));
+      Assertions.assertTrue(re3.getTimestamp() > 0);
       LOG.info(re3.toString());
 
       // TruncateOp
       batch = waitForNextEvents(eis);
-      Assert.assertEquals(1, batch.getEvents().length);
+      Assertions.assertEquals(1, batch.getEvents().length);
       txid = checkTxid(batch, txid);
-      Assert
+      Assertions
           .assertTrue(batch.getEvents()[0].getEventType() ==
           Event.EventType.TRUNCATE);
       Event.TruncateEvent et = ((Event.TruncateEvent) batch.getEvents()[0]);
-      Assert.assertTrue(et.getPath().equals("/truncate_file"));
-      Assert.assertTrue(et.getFileSize() == BLOCK_SIZE);
-      Assert.assertTrue(et.getTimestamp() > 0);
+      Assertions.assertTrue(et.getPath().equals("/truncate_file"));
+      Assertions.assertTrue(et.getFileSize() == BLOCK_SIZE);
+      Assertions.assertTrue(et.getTimestamp() > 0);
       LOG.info(et.toString());
-      Assert.assertTrue(et.toString().startsWith("TruncateEvent [path="));
+      Assertions.assertTrue(et.toString().startsWith("TruncateEvent [path="));
 
       // CreateEvent without overwrite
       batch = waitForNextEvents(eis);
-      Assert.assertEquals(1, batch.getEvents().length);
+      Assertions.assertEquals(1, batch.getEvents().length);
       txid = checkTxid(batch, txid);
-      Assert.assertTrue(batch.getEvents()[0].getEventType()
+      Assertions.assertTrue(batch.getEvents()[0].getEventType()
               == Event.EventType.CREATE);
       ce = (Event.CreateEvent) batch.getEvents()[0];
-      Assert.assertTrue(ce.getiNodeType() == Event.CreateEvent.INodeType.FILE);
-      Assert.assertTrue(ce.getPath().equals("/file_ec_test1"));
-      Assert.assertTrue(ce.getCtime() > 0);
-      Assert.assertTrue(ce.getReplication() > 0);
-      Assert.assertTrue(ce.getSymlinkTarget() == null);
-      Assert.assertFalse(ce.getOverwrite());
-      Assert.assertEquals(BLOCK_SIZE, ce.getDefaultBlockSize());
-      Assert.assertTrue(ce.isErasureCoded().isPresent());
-      Assert.assertFalse(ce.isErasureCoded().get());
+      Assertions.assertTrue(ce.getiNodeType() == Event.CreateEvent.INodeType.FILE);
+      Assertions.assertTrue(ce.getPath().equals("/file_ec_test1"));
+      Assertions.assertTrue(ce.getCtime() > 0);
+      Assertions.assertTrue(ce.getReplication() > 0);
+      Assertions.assertTrue(ce.getSymlinkTarget() == null);
+      Assertions.assertFalse(ce.getOverwrite());
+      Assertions.assertEquals(BLOCK_SIZE, ce.getDefaultBlockSize());
+      Assertions.assertTrue(ce.isErasureCoded().isPresent());
+      Assertions.assertFalse(ce.isErasureCoded().get());
       LOG.info(ce.toString());
-      Assert.assertTrue(ce.toString().startsWith("CreateEvent [INodeType="));
+      Assertions.assertTrue(ce.toString().startsWith("CreateEvent [INodeType="));
 
       // Returns null when there are no further events
-      Assert.assertTrue(eis.poll() == null);
+      Assertions.assertTrue(eis.poll() == null);
 
       // make sure the estimate hasn't changed since the above assertion
       // tells us that we are fully caught up to the current namesystem state
       // and we should not have been behind at all when eventsBehind was set
       // either, since there were few enough events that they should have all
       // been read to the client during the first poll() call
-      Assert.assertTrue(eis.getTxidsBehindEstimate() == eventsBehind);
+      Assertions.assertTrue(eis.getTxidsBehindEstimate() == eventsBehind);
 
     } finally {
       cluster.shutdown();
@@ -470,41 +470,41 @@ public class TestDFSInotifyEventInputStream {
       EventBatch batch = null;
 
       batch = waitForNextEvents(eis);
-      Assert.assertEquals(1, batch.getEvents().length);
+      Assertions.assertEquals(1, batch.getEvents().length);
       long txid = batch.getTxid();
       long eventsBehind = eis.getTxidsBehindEstimate();
-      Assert.assertTrue(batch.getEvents()[0].getEventType()
+      Assertions.assertTrue(batch.getEvents()[0].getEventType()
               == Event.EventType.CREATE);
       Event.CreateEvent ce = (Event.CreateEvent) batch.getEvents()[0];
-      Assert.assertTrue(ce.getiNodeType() == Event.CreateEvent.INodeType.FILE);
-      Assert.assertTrue(ce.getPath().equals("/ecdir/file_ec_test2"));
-      Assert.assertTrue(ce.getCtime() > 0);
-      Assert.assertEquals(1, ce.getReplication());
-      Assert.assertTrue(ce.getSymlinkTarget() == null);
-      Assert.assertTrue(ce.getOverwrite());
-      Assert.assertEquals(ecPolicy.getCellSize(), ce.getDefaultBlockSize());
-      Assert.assertTrue(ce.isErasureCoded().isPresent());
-      Assert.assertTrue(ce.isErasureCoded().get());
+      Assertions.assertTrue(ce.getiNodeType() == Event.CreateEvent.INodeType.FILE);
+      Assertions.assertTrue(ce.getPath().equals("/ecdir/file_ec_test2"));
+      Assertions.assertTrue(ce.getCtime() > 0);
+      Assertions.assertEquals(1, ce.getReplication());
+      Assertions.assertTrue(ce.getSymlinkTarget() == null);
+      Assertions.assertTrue(ce.getOverwrite());
+      Assertions.assertEquals(ecPolicy.getCellSize(), ce.getDefaultBlockSize());
+      Assertions.assertTrue(ce.isErasureCoded().isPresent());
+      Assertions.assertTrue(ce.isErasureCoded().get());
       LOG.info(ce.toString());
-      Assert.assertTrue(ce.toString().startsWith("CreateEvent [INodeType="));
+      Assertions.assertTrue(ce.toString().startsWith("CreateEvent [INodeType="));
 
       batch = waitForNextEvents(eis);
-      Assert.assertEquals(1, batch.getEvents().length);
+      Assertions.assertEquals(1, batch.getEvents().length);
       txid = checkTxid(batch, txid);
-      Assert.assertTrue(batch.getEvents()[0].getEventType()
+      Assertions.assertTrue(batch.getEvents()[0].getEventType()
               == Event.EventType.CLOSE);
-      Assert.assertTrue(((Event.CloseEvent) batch.getEvents()[0]).getPath()
+      Assertions.assertTrue(((Event.CloseEvent) batch.getEvents()[0]).getPath()
               .equals("/ecdir/file_ec_test2"));
 
       // Returns null when there are no further events
-      Assert.assertTrue(eis.poll() == null);
+      Assertions.assertTrue(eis.poll() == null);
 
       // make sure the estimate hasn't changed since the above assertion
       // tells us that we are fully caught up to the current namesystem state
       // and we should not have been behind at all when eventsBehind was set
       // either, since there were few enough events that they should have all
       // been read to the client during the first poll() call
-      Assert.assertTrue(eis.getTxidsBehindEstimate() == eventsBehind);
+      Assertions.assertTrue(eis.getTxidsBehindEstimate() == eventsBehind);
     } finally {
       cluster.shutdown();
     }
@@ -532,12 +532,12 @@ public class TestDFSInotifyEventInputStream {
       // active
       for (int i = 0; i < 10; i++) {
         batch = waitForNextEvents(eis);
-        Assert.assertEquals(1, batch.getEvents().length);
-        Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.CREATE);
-        Assert.assertTrue(((Event.CreateEvent) batch.getEvents()[0]).getPath().equals("/dir" +
+        Assertions.assertEquals(1, batch.getEvents().length);
+        Assertions.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.CREATE);
+        Assertions.assertTrue(((Event.CreateEvent) batch.getEvents()[0]).getPath().equals("/dir" +
             i));
       }
-      Assert.assertTrue(eis.poll() == null);
+      Assertions.assertTrue(eis.poll() == null);
     } finally {
       cluster.shutdown();
     }
@@ -571,12 +571,12 @@ public class TestDFSInotifyEventInputStream {
       EventBatch batch = null;
       for (int i = 0; i < 10; i++) {
         batch = waitForNextEvents(eis);
-        Assert.assertEquals(1, batch.getEvents().length);
-        Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.CREATE);
-        Assert.assertTrue(((Event.CreateEvent) batch.getEvents()[0]).getPath().equals("/dir" +
+        Assertions.assertEquals(1, batch.getEvents().length);
+        Assertions.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.CREATE);
+        Assertions.assertTrue(((Event.CreateEvent) batch.getEvents()[0]).getPath().equals("/dir" +
             i));
       }
-      Assert.assertTrue(eis.poll() == null);
+      Assertions.assertTrue(eis.poll() == null);
     } finally {
       try {
         cluster.shutdown();
@@ -615,10 +615,10 @@ public class TestDFSInotifyEventInputStream {
       // a very generous wait period -- the edit will definitely have been
       // processed by the time this is up
       EventBatch batch = eis.poll(5, TimeUnit.SECONDS);
-      Assert.assertNotNull(batch);
-      Assert.assertEquals(1, batch.getEvents().length);
-      Assert.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.CREATE);
-      Assert.assertEquals("/dir", ((Event.CreateEvent) batch.getEvents()[0]).getPath());
+      Assertions.assertNotNull(batch);
+      Assertions.assertEquals(1, batch.getEvents().length);
+      Assertions.assertTrue(batch.getEvents()[0].getEventType() == Event.EventType.CREATE);
+      Assertions.assertEquals("/dir", ((Event.CreateEvent) batch.getEvents()[0]).getPath());
     } finally {
       cluster.shutdown();
     }

+ 11 - 14
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInotifyEventInputStreamKerberized.java

@@ -33,10 +33,10 @@ import org.apache.hadoop.security.AuthenticationFilterInitializer;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
-import org.junit.After;
-import org.junit.Before;
 import org.junit.Rule;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.junit.rules.Timeout;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -64,10 +64,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIP
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
 
 /**
  * Class for Kerberized test cases for {@link DFSInotifyEventInputStream}.
@@ -131,7 +128,7 @@ public class TestDFSInotifyEventInputStreamKerberized {
           while ((batch = eis.poll()) != null) {
             LOG.info("txid: " + batch.getTxid());
           }
-          assertNull("poll should not return anything", eis.poll());
+            assertNull(eis.poll(), "poll should not return anything");
 
           Thread.sleep(6000);
           LOG.info("Slept 6 seconds to make sure the TGT has expired.");
@@ -143,16 +140,16 @@ public class TestDFSInotifyEventInputStreamKerberized {
 
           // verify we can poll after a tgt expiration interval
           batch = eis.poll();
-          assertNotNull("poll should return something", batch);
+            assertNotNull(batch, "poll should return something");
           assertEquals(1, batch.getEvents().length);
-          assertNull("poll should not return anything", eis.poll());
+            assertNull(eis.poll(), "poll should not return anything");
           return null;
         }
       }
     });
   }
 
-  @Before
+  @BeforeEach
   public void initKerberizedCluster() throws Exception {
     baseDir = new File(System.getProperty("test.build.dir", "target/test-dir"),
         TestDFSInotifyEventInputStreamKerberized.class.getSimpleName());
@@ -169,8 +166,8 @@ public class TestDFSInotifyEventInputStreamKerberized {
     SecurityUtil.setAuthenticationMethod(
         UserGroupInformation.AuthenticationMethod.KERBEROS, baseConf);
     UserGroupInformation.setConfiguration(baseConf);
-    assertTrue("Expected configuration to enable security",
-        UserGroupInformation.isSecurityEnabled());
+      assertTrue(
+              UserGroupInformation.isSecurityEnabled(), "Expected configuration to enable security");
 
     final String userName = "hdfs";
     nnKeytabFile = new File(baseDir, userName + ".keytab");
@@ -218,7 +215,7 @@ public class TestDFSInotifyEventInputStreamKerberized {
         KeyStoreTestUtil.getServerSSLConfigFileName());
   }
 
-  @After
+  @AfterEach
   public void shutdownCluster() throws Exception {
     if (cluster != null) {
       cluster.shutdown();

+ 7 - 10
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInputStream.java

@@ -18,10 +18,7 @@
 package org.apache.hadoop.hdfs;
 
 import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_READ_USE_CACHE_PRIORITY;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
 import static org.hamcrest.CoreMatchers.equalTo;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
@@ -47,8 +44,8 @@ import org.apache.hadoop.net.unix.TemporarySocketDirectory;
 import org.apache.hadoop.hdfs.client.impl.DfsClientConf;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.Retry;
 
-import org.junit.Assume;
 import org.junit.Test;
+import org.junit.jupiter.api.Assumptions;
 
 public class TestDFSInputStream {
   private void testSkipInner(MiniDFSCluster cluster) throws IOException {
@@ -108,7 +105,7 @@ public class TestDFSInputStream {
 
   @Test(timeout=60000)
   public void testSkipWithLocalBlockReader() throws IOException {
-    Assume.assumeThat(DomainSocket.getLoadingFailureReason(), equalTo(null));
+    Assumptions.assumeThat(DomainSocket.getLoadingFailureReason(), equalTo(null));
     TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
     DomainSocket.disableBindPathValidation();
     Configuration conf = new Configuration();
@@ -218,10 +215,10 @@ public class TestDFSInputStream {
       final List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
       cluster.getNameNode().getNamesystem().getBlockManager()
           .getDatanodeManager().fetchDatanodes(live, null, false);
-      assertTrue("DN start should be success and live dn should be 2",
-          live.size() == 2);
-      assertTrue("File size should be " + chunkSize,
-          fs.getFileStatus(file).getLen() == chunkSize);
+        assertTrue(
+                live.size() == 2, "DN start should be success and live dn should be 2");
+        assertTrue(
+                fs.getFileStatus(file).getLen() == chunkSize, "File size should be " + chunkSize);
     } finally {
       cluster.shutdown();
     }

+ 30 - 34
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInputStreamBlockLocations.java

@@ -19,11 +19,7 @@
 package org.apache.hadoop.hdfs;
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.IOException;
 import java.net.InetSocketAddress;
@@ -41,10 +37,10 @@ import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.util.Time;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 
@@ -84,7 +80,7 @@ public class TestDFSInputStreamBlockLocations {
     enableBlkExpiration = enableExpiration;
   }
 
-  @Before
+  @BeforeEach
   public void setup() throws IOException {
     conf = new HdfsConfiguration();
     conf.setBoolean(
@@ -119,7 +115,7 @@ public class TestDFSInputStreamBlockLocations {
     fs = dfsCluster.getFileSystem();
   }
 
-  @After
+  @AfterEach
   public void teardown() throws IOException {
     if (dfsClient != null) {
       dfsClient.close();
@@ -172,21 +168,21 @@ public class TestDFSInputStreamBlockLocations {
       DatanodeInfo[] firstBlkDNInfos = firstLocatedBlk.getLocations();
       while (fin.getPos() < firstBlockMark) {
         bytesRead = fin.read(readBuffer);
-        Assert.assertTrue("Unexpected number of read bytes",
-            chunkReadSize >= bytesRead);
+          Assertions.assertTrue(
+                  chunkReadSize >= bytesRead, "Unexpected number of read bytes");
         if (currDNInfo == null) {
           currDNInfo = fin.getCurrentDatanode();
-          assertNotNull("current FIS datanode is null", currDNInfo);
+            assertNotNull(currDNInfo, "current FIS datanode is null");
           continue;
         }
         prevDNInfo = currDNInfo;
         currDNInfo = fin.getCurrentDatanode();
-        assertEquals("the DFSInput stream does not read from same node",
-            prevDNInfo, currDNInfo);
+          assertEquals(
+                  prevDNInfo, currDNInfo, "the DFSInput stream does not read from same node");
       }
 
-      assertEquals("InputStream exceeds expected position",
-          firstBlockMark, fin.getPos());
+        assertEquals(
+                firstBlockMark, fin.getPos(), "InputStream exceeds expected position");
       // get the second block locations
       LocatedBlock secondLocatedBlk =
           fin.locatedBlocks.getLocatedBlocks().get(1);
@@ -216,23 +212,23 @@ public class TestDFSInputStreamBlockLocations {
       }
       while (fin.getPos() < secondBlockMark) {
         bytesRead = fin.read(readBuffer);
-        assertTrue("dead node used to read at position: " + fin.getPos(),
-            fin.deadNodesContain(deadNodeInfo));
-        Assert.assertTrue("Unexpected number of read bytes",
-            chunkReadSize >= bytesRead);
+          assertTrue(
+                  fin.deadNodesContain(deadNodeInfo), "dead node used to read at position: " + fin.getPos());
+          Assertions.assertTrue(
+                  chunkReadSize >= bytesRead, "Unexpected number of read bytes");
         prevDNInfo = currDNInfo;
         currDNInfo = fin.getCurrentDatanode();
         assertNotEquals(deadNodeInfo, currDNInfo);
         if (firstIteration) {
-          // currDNInfo has to be different unless first block locs is different
-          assertFalse("FSInputStream should pick a different DN",
-              firstBlkDNInfos[0].equals(deadNodeInfo)
-                  && prevDNInfo.equals(currDNInfo));
+            // currDNInfo has to be different unless first block locs is different
+            assertFalse(
+                    firstBlkDNInfos[0].equals(deadNodeInfo)
+                            && prevDNInfo.equals(currDNInfo), "FSInputStream should pick a different DN");
           firstIteration = false;
         }
       }
-      assertEquals("InputStream exceeds expected position",
-          secondBlockMark, fin.getPos());
+        assertEquals(
+                secondBlockMark, fin.getPos(), "InputStream exceeds expected position");
       // restart the dead node with the same port
       assertTrue(dfsCluster.restartDataNode(stoppedDNProps, true));
       dfsCluster.waitActive();
@@ -244,13 +240,13 @@ public class TestDFSInputStreamBlockLocations {
       while (fin.getPos() < thirdBlockMark) {
         bytesRead = fin.read(readBuffer);
         if (this.enableBlkExpiration) {
-          assertEquals("node is removed from deadNodes after 1st iteration",
-              firstIteration, fin.deadNodesContain(deadNodeInfo));
+            assertEquals(
+                    firstIteration, fin.deadNodesContain(deadNodeInfo), "node is removed from deadNodes after 1st iteration");
         } else {
           assertTrue(fin.deadNodesContain(deadNodeInfo));
         }
-        Assert.assertTrue("Unexpected number of read bytes",
-            chunkReadSize >= bytesRead);
+          Assertions.assertTrue(
+                  chunkReadSize >= bytesRead, "Unexpected number of read bytes");
         prevDNInfo = currDNInfo;
         currDNInfo = fin.getCurrentDatanode();
         if (!this.enableBlkExpiration) {
@@ -266,8 +262,8 @@ public class TestDFSInputStreamBlockLocations {
           }
         }
       }
-      assertEquals("InputStream exceeds expected position",
-          thirdBlockMark, fin.getPos());
+        assertEquals(
+                thirdBlockMark, fin.getPos(), "InputStream exceeds expected position");
     } finally {
       if (fout != null) {
         fout.close();

+ 10 - 10
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSMkdirs.java

@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.junit.Assert.*;
+import static org.junit.jupiter.api.Assertions.*;
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
@@ -30,7 +30,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.util.Time;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
 
 /**
  * This class tests that the DFS command mkdirs only creates valid
@@ -106,10 +106,10 @@ public class TestDFSMkdirs {
       } catch (IOException e) {
         expectedException = e;
       }
-      assertTrue("Create a directory when parent dir exists as file using"
-          + " mkdir() should throw ParentNotDirectoryException ",
-          expectedException != null
-              && expectedException instanceof ParentNotDirectoryException);
+        assertTrue(
+                expectedException != null
+                        && expectedException instanceof ParentNotDirectoryException, "Create a directory when parent dir exists as file using"
+                + " mkdir() should throw ParentNotDirectoryException ");
       // Create a dir in a non-exist directory, should fail
       expectedException = null;
       try {
@@ -118,10 +118,10 @@ public class TestDFSMkdirs {
       } catch (IOException e) {
         expectedException = e;
       }
-      assertTrue("Create a directory in a non-exist parent dir using"
-          + " mkdir() should throw FileNotFoundException ",
-          expectedException != null
-              && expectedException instanceof FileNotFoundException);
+        assertTrue(
+                expectedException != null
+                        && expectedException instanceof FileNotFoundException, "Create a directory in a non-exist parent dir using"
+                + " mkdir() should throw FileNotFoundException ");
     } finally {
       dfs.close();
       cluster.shutdown();

+ 19 - 22
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSOutputStream.java

@@ -57,22 +57,19 @@ import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.PathUtils;
 import org.apache.hadoop.test.Whitebox;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
 import org.junit.Test;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
 
 import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.Write.RECOVER_LEASE_ON_CLOSE_EXCEPTION_KEY;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.*;
 import static org.mockito.ArgumentMatchers.anyBoolean;
 import static org.mockito.ArgumentMatchers.anyLong;
 import org.mockito.Mockito;
 import static org.mockito.Mockito.times;
 import static org.mockito.Mockito.verify;
 
-import static org.junit.Assert.assertEquals;
 import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.doThrow;
@@ -85,7 +82,7 @@ import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_WRIT
 public class TestDFSOutputStream {
   static MiniDFSCluster cluster;
 
-  @BeforeClass
+  @BeforeAll
   public static void setup() throws IOException {
     Configuration conf = new Configuration();
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
@@ -107,7 +104,7 @@ public class TestDFSOutputStream {
     LastExceptionInStreamer ex = (LastExceptionInStreamer) Whitebox
         .getInternalState(streamer, "lastException");
     Throwable thrown = (Throwable) Whitebox.getInternalState(ex, "thrown");
-    Assert.assertNull(thrown);
+    Assertions.assertNull(thrown);
 
     dos.close();
 
@@ -119,7 +116,7 @@ public class TestDFSOutputStream {
       assertEquals(e, dummy);
     }
     thrown = (Throwable) Whitebox.getInternalState(ex, "thrown");
-    Assert.assertNull(thrown);
+    Assertions.assertNull(thrown);
     dos.close();
   }
 
@@ -145,10 +142,10 @@ public class TestDFSOutputStream {
     Field field = dos.getClass().getDeclaredField("packetSize");
     field.setAccessible(true);
 
-    Assert.assertTrue((Integer) field.get(dos) + 33 < packetSize);
+    Assertions.assertTrue((Integer) field.get(dos) + 33 < packetSize);
     // If PKT_MAX_HEADER_LEN is 257, actual packet size come to over 64KB
     // without a fix on HDFS-7308.
-    Assert.assertTrue((Integer) field.get(dos) + 257 < packetSize);
+    Assertions.assertTrue((Integer) field.get(dos) + 257 < packetSize);
   }
 
   /**
@@ -246,21 +243,21 @@ public class TestDFSOutputStream {
       final Field writePacketSizeField = dos.getClass()
           .getDeclaredField("writePacketSize");
       writePacketSizeField.setAccessible(true);
-      Assert.assertEquals(writePacketSizeField.getInt(dos),
+      Assertions.assertEquals(writePacketSizeField.getInt(dos),
           finalWritePacketSize);
 
       /* get and verify chunksPerPacket */
       final Field chunksPerPacketField = dos.getClass()
           .getDeclaredField("chunksPerPacket");
       chunksPerPacketField.setAccessible(true);
-      Assert.assertEquals(chunksPerPacketField.getInt(dos),
+      Assertions.assertEquals(chunksPerPacketField.getInt(dos),
           (finalWritePacketSize - packateMaxHeaderLength) / chunkSize);
 
       /* get and verify packetSize */
       final Field packetSizeField = dos.getClass()
           .getDeclaredField("packetSize");
       packetSizeField.setAccessible(true);
-      Assert.assertEquals(packetSizeField.getInt(dos),
+      Assertions.assertEquals(packetSizeField.getInt(dos),
           chunksPerPacketField.getInt(dos) * chunkSize);
     } finally {
       if (dfsCluster != null) {
@@ -297,7 +294,7 @@ public class TestDFSOutputStream {
     DFSPacket packet = mock(DFSPacket.class);
     dataQueue.add(packet);
     stream.run();
-    Assert.assertTrue(congestedNodes.isEmpty());
+    Assertions.assertTrue(congestedNodes.isEmpty());
   }
 
   @Test
@@ -359,11 +356,11 @@ public class TestDFSOutputStream {
   public void testStreamFlush() throws Exception {
     FileSystem fs = cluster.getFileSystem();
     FSDataOutputStream os = fs.create(new Path("/normal-file"));
-    // Verify output stream supports hsync() and hflush().
-    assertTrue("DFSOutputStream should support hflush()!",
-        os.hasCapability(StreamCapability.HFLUSH.getValue()));
-    assertTrue("DFSOutputStream should support hsync()!",
-        os.hasCapability(StreamCapability.HSYNC.getValue()));
+      // Verify output stream supports hsync() and hflush().
+      assertTrue(
+              os.hasCapability(StreamCapability.HFLUSH.getValue()), "DFSOutputStream should support hflush()!");
+      assertTrue(
+              os.hasCapability(StreamCapability.HSYNC.getValue()), "DFSOutputStream should support hsync()!");
     byte[] bytes = new byte[1024];
     InputStream is = new ByteArrayInputStream(bytes);
     IOUtils.copyBytes(is, os, bytes.length);
@@ -422,7 +419,7 @@ public class TestDFSOutputStream {
     }
   }
 
-  @AfterClass
+  @AfterAll
   public static void tearDown() {
     if (cluster != null) {
       cluster.shutdown();

部分文件因为文件数量过多而无法显示