|
@@ -30,12 +30,14 @@ import static org.apache.hadoop.hdfs.TestDistributedFileSystem.checkStatistics;
|
|
import static org.apache.hadoop.hdfs.TestDistributedFileSystem.getOpStatistics;
|
|
import static org.apache.hadoop.hdfs.TestDistributedFileSystem.getOpStatistics;
|
|
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY;
|
|
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY;
|
|
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY;
|
|
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY;
|
|
-import static org.junit.Assert.assertEquals;
|
|
|
|
-import static org.junit.Assert.assertFalse;
|
|
|
|
-import static org.junit.Assert.assertNotEquals;
|
|
|
|
-import static org.junit.Assert.assertNull;
|
|
|
|
-import static org.junit.Assert.assertTrue;
|
|
|
|
-import static org.junit.Assert.fail;
|
|
|
|
|
|
+import static org.junit.jupiter.api.Assertions.assertArrayEquals;
|
|
|
|
+import static org.junit.jupiter.api.Assertions.assertEquals;
|
|
|
|
+import static org.junit.jupiter.api.Assertions.assertFalse;
|
|
|
|
+import static org.junit.jupiter.api.Assertions.assertNotEquals;
|
|
|
|
+import static org.junit.jupiter.api.Assertions.assertNotNull;
|
|
|
|
+import static org.junit.jupiter.api.Assertions.assertNull;
|
|
|
|
+import static org.junit.jupiter.api.Assertions.assertTrue;
|
|
|
|
+import static org.junit.jupiter.api.Assertions.fail;
|
|
|
|
|
|
import java.io.EOFException;
|
|
import java.io.EOFException;
|
|
import java.io.File;
|
|
import java.io.File;
|
|
@@ -141,9 +143,9 @@ import org.slf4j.event.Level;
|
|
import org.codehaus.jettison.json.JSONArray;
|
|
import org.codehaus.jettison.json.JSONArray;
|
|
import org.codehaus.jettison.json.JSONException;
|
|
import org.codehaus.jettison.json.JSONException;
|
|
import org.codehaus.jettison.json.JSONObject;
|
|
import org.codehaus.jettison.json.JSONObject;
|
|
-import org.junit.After;
|
|
|
|
-import org.junit.Assert;
|
|
|
|
-import org.junit.Test;
|
|
|
|
|
|
+import org.junit.jupiter.api.AfterEach;
|
|
|
|
+import org.junit.jupiter.api.Test;
|
|
|
|
+import org.junit.jupiter.api.Timeout;
|
|
import org.mockito.Mockito;
|
|
import org.mockito.Mockito;
|
|
|
|
|
|
import com.fasterxml.jackson.databind.ObjectMapper;
|
|
import com.fasterxml.jackson.databind.ObjectMapper;
|
|
@@ -168,7 +170,7 @@ public class TestWebHDFS {
|
|
|
|
|
|
private static MiniDFSCluster cluster = null;
|
|
private static MiniDFSCluster cluster = null;
|
|
|
|
|
|
- @After
|
|
|
|
|
|
+ @AfterEach
|
|
public void tearDown() {
|
|
public void tearDown() {
|
|
if (null != cluster) {
|
|
if (null != cluster) {
|
|
cluster.shutdown();
|
|
cluster.shutdown();
|
|
@@ -212,7 +214,8 @@ public class TestWebHDFS {
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
- @Test(timeout=300000)
|
|
|
|
|
|
+ @Test
|
|
|
|
+ @Timeout(value = 300)
|
|
public void testLargeFile() throws Exception {
|
|
public void testLargeFile() throws Exception {
|
|
largeFileTest(200L << 20); //200MB file length
|
|
largeFileTest(200L << 20); //200MB file length
|
|
}
|
|
}
|
|
@@ -229,7 +232,7 @@ public class TestWebHDFS {
|
|
final FileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
|
|
final FileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
|
|
WebHdfsConstants.WEBHDFS_SCHEME);
|
|
WebHdfsConstants.WEBHDFS_SCHEME);
|
|
final Path dir = new Path("/test/largeFile");
|
|
final Path dir = new Path("/test/largeFile");
|
|
- Assert.assertTrue(fs.mkdirs(dir));
|
|
|
|
|
|
+ assertTrue(fs.mkdirs(dir));
|
|
|
|
|
|
final byte[] data = new byte[1 << 20];
|
|
final byte[] data = new byte[1 << 20];
|
|
RANDOM.nextBytes(data);
|
|
RANDOM.nextBytes(data);
|
|
@@ -255,7 +258,7 @@ public class TestWebHDFS {
|
|
}
|
|
}
|
|
t.end(fileLength);
|
|
t.end(fileLength);
|
|
|
|
|
|
- Assert.assertEquals(fileLength, fs.getFileStatus(p).getLen());
|
|
|
|
|
|
+ assertEquals(fileLength, fs.getFileStatus(p).getLen());
|
|
|
|
|
|
final long smallOffset = RANDOM.nextInt(1 << 20) + (1 << 20);
|
|
final long smallOffset = RANDOM.nextInt(1 << 20) + (1 << 20);
|
|
final long largeOffset = fileLength - smallOffset;
|
|
final long largeOffset = fileLength - smallOffset;
|
|
@@ -273,7 +276,7 @@ public class TestWebHDFS {
|
|
int j = (int)(offset % actual.length);
|
|
int j = (int)(offset % actual.length);
|
|
for(int i = 0; i < n; i++) {
|
|
for(int i = 0; i < n; i++) {
|
|
if (expected[j] != actual[i]) {
|
|
if (expected[j] != actual[i]) {
|
|
- Assert.fail("expected[" + j + "]=" + expected[j]
|
|
|
|
|
|
+ fail("expected[" + j + "]=" + expected[j]
|
|
+ " != actual[" + i + "]=" + actual[i]
|
|
+ " != actual[" + i + "]=" + actual[i]
|
|
+ ", offset=" + offset + ", remaining=" + remaining + ", n=" + n);
|
|
+ ", offset=" + offset + ", remaining=" + remaining + ", n=" + n);
|
|
}
|
|
}
|
|
@@ -331,14 +334,16 @@ public class TestWebHDFS {
|
|
}
|
|
}
|
|
|
|
|
|
/** Test client retry with namenode restarting. */
|
|
/** Test client retry with namenode restarting. */
|
|
- @Test(timeout=300000)
|
|
|
|
|
|
+ @Test
|
|
|
|
+ @Timeout(value = 300)
|
|
public void testNamenodeRestart() throws Exception {
|
|
public void testNamenodeRestart() throws Exception {
|
|
GenericTestUtils.setLogLevel(NamenodeWebHdfsMethods.LOG, Level.TRACE);
|
|
GenericTestUtils.setLogLevel(NamenodeWebHdfsMethods.LOG, Level.TRACE);
|
|
final Configuration conf = WebHdfsTestUtil.createConf();
|
|
final Configuration conf = WebHdfsTestUtil.createConf();
|
|
TestDFSClientRetries.namenodeRestartTest(conf, true);
|
|
TestDFSClientRetries.namenodeRestartTest(conf, true);
|
|
}
|
|
}
|
|
|
|
|
|
- @Test(timeout=300000)
|
|
|
|
|
|
+ @Test
|
|
|
|
+ @Timeout(value = 300)
|
|
public void testLargeDirectory() throws Exception {
|
|
public void testLargeDirectory() throws Exception {
|
|
final Configuration conf = WebHdfsTestUtil.createConf();
|
|
final Configuration conf = WebHdfsTestUtil.createConf();
|
|
final int listLimit = 2;
|
|
final int listLimit = 2;
|
|
@@ -367,12 +372,12 @@ public class TestWebHDFS {
|
|
FileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
|
|
FileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
|
|
WebHdfsConstants.WEBHDFS_SCHEME);
|
|
WebHdfsConstants.WEBHDFS_SCHEME);
|
|
Path d = new Path("/my-dir");
|
|
Path d = new Path("/my-dir");
|
|
- Assert.assertTrue(fs.mkdirs(d));
|
|
|
|
|
|
+ assertTrue(fs.mkdirs(d));
|
|
// Iterator should have no items when dir is empty
|
|
// Iterator should have no items when dir is empty
|
|
RemoteIterator<FileStatus> it = fs.listStatusIterator(d);
|
|
RemoteIterator<FileStatus> it = fs.listStatusIterator(d);
|
|
assertFalse(it.hasNext());
|
|
assertFalse(it.hasNext());
|
|
Path p = new Path(d, "file-" + 0);
|
|
Path p = new Path(d, "file-" + 0);
|
|
- Assert.assertTrue(fs.createNewFile(p));
|
|
|
|
|
|
+ assertTrue(fs.createNewFile(p));
|
|
// Iterator should have an item when dir is not empty
|
|
// Iterator should have an item when dir is not empty
|
|
it = fs.listStatusIterator(d);
|
|
it = fs.listStatusIterator(d);
|
|
assertTrue(it.hasNext());
|
|
assertTrue(it.hasNext());
|
|
@@ -380,11 +385,11 @@ public class TestWebHDFS {
|
|
assertFalse(it.hasNext());
|
|
assertFalse(it.hasNext());
|
|
for (int i = 1; i < listLimit * 3; i++) {
|
|
for (int i = 1; i < listLimit * 3; i++) {
|
|
p = new Path(d, "file-" + i);
|
|
p = new Path(d, "file-" + i);
|
|
- Assert.assertTrue(fs.createNewFile(p));
|
|
|
|
|
|
+ assertTrue(fs.createNewFile(p));
|
|
}
|
|
}
|
|
// Check the FileStatus[] listing
|
|
// Check the FileStatus[] listing
|
|
FileStatus[] statuses = fs.listStatus(d);
|
|
FileStatus[] statuses = fs.listStatus(d);
|
|
- Assert.assertEquals(listLimit * 3, statuses.length);
|
|
|
|
|
|
+ assertEquals(listLimit * 3, statuses.length);
|
|
// Check the iterator-based listing
|
|
// Check the iterator-based listing
|
|
GenericTestUtils.setLogLevel(WebHdfsFileSystem.LOG, Level.TRACE);
|
|
GenericTestUtils.setLogLevel(WebHdfsFileSystem.LOG, Level.TRACE);
|
|
GenericTestUtils.setLogLevel(NamenodeWebHdfsMethods.LOG,
|
|
GenericTestUtils.setLogLevel(NamenodeWebHdfsMethods.LOG,
|
|
@@ -393,18 +398,18 @@ public class TestWebHDFS {
|
|
int count = 0;
|
|
int count = 0;
|
|
while (it.hasNext()) {
|
|
while (it.hasNext()) {
|
|
FileStatus stat = it.next();
|
|
FileStatus stat = it.next();
|
|
- assertEquals("FileStatuses not equal", statuses[count], stat);
|
|
|
|
|
|
+ assertEquals(statuses[count], stat, "FileStatuses not equal");
|
|
count++;
|
|
count++;
|
|
}
|
|
}
|
|
- assertEquals("Different # of statuses!", statuses.length, count);
|
|
|
|
|
|
+ assertEquals(statuses.length, count, "Different # of statuses!");
|
|
// Do some more basic iterator tests
|
|
// Do some more basic iterator tests
|
|
it = fs.listStatusIterator(d);
|
|
it = fs.listStatusIterator(d);
|
|
// Try advancing the iterator without calling hasNext()
|
|
// Try advancing the iterator without calling hasNext()
|
|
for (int i = 0; i < statuses.length; i++) {
|
|
for (int i = 0; i < statuses.length; i++) {
|
|
FileStatus stat = it.next();
|
|
FileStatus stat = it.next();
|
|
- assertEquals("FileStatuses not equal", statuses[i], stat);
|
|
|
|
|
|
+ assertEquals(statuses[i], stat, "FileStatuses not equal");
|
|
}
|
|
}
|
|
- assertFalse("No more items expected", it.hasNext());
|
|
|
|
|
|
+ assertFalse(it.hasNext(), "No more items expected");
|
|
// Try doing next when out of items
|
|
// Try doing next when out of items
|
|
try {
|
|
try {
|
|
it.next();
|
|
it.next();
|
|
@@ -464,7 +469,8 @@ public class TestWebHDFS {
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
- @Test(timeout=300000)
|
|
|
|
|
|
+ @Test
|
|
|
|
+ @Timeout(value = 300)
|
|
public void testCustomizedUserAndGroupNames() throws Exception {
|
|
public void testCustomizedUserAndGroupNames() throws Exception {
|
|
final Configuration conf = WebHdfsTestUtil.createConf();
|
|
final Configuration conf = WebHdfsTestUtil.createConf();
|
|
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
|
|
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
|
|
@@ -492,7 +498,7 @@ public class TestWebHDFS {
|
|
FileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
|
|
FileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
|
|
WebHdfsConstants.WEBHDFS_SCHEME);
|
|
WebHdfsConstants.WEBHDFS_SCHEME);
|
|
Path d = new Path("/my-dir");
|
|
Path d = new Path("/my-dir");
|
|
- Assert.assertTrue(fs.mkdirs(d));
|
|
|
|
|
|
+ assertTrue(fs.mkdirs(d));
|
|
// Test also specifying a default ACL with a numeric username
|
|
// Test also specifying a default ACL with a numeric username
|
|
// and another of a groupname with '@'
|
|
// and another of a groupname with '@'
|
|
fs.modifyAclEntries(d, ImmutableList.of(new AclEntry.Builder()
|
|
fs.modifyAclEntries(d, ImmutableList.of(new AclEntry.Builder()
|
|
@@ -509,7 +515,8 @@ public class TestWebHDFS {
|
|
* Test for catching "no datanode" IOException, when to create a file
|
|
* Test for catching "no datanode" IOException, when to create a file
|
|
* but datanode is not running for some reason.
|
|
* but datanode is not running for some reason.
|
|
*/
|
|
*/
|
|
- @Test(timeout=300000)
|
|
|
|
|
|
+ @Test
|
|
|
|
+ @Timeout(value = 300)
|
|
public void testCreateWithNoDN() throws Exception {
|
|
public void testCreateWithNoDN() throws Exception {
|
|
final Configuration conf = WebHdfsTestUtil.createConf();
|
|
final Configuration conf = WebHdfsTestUtil.createConf();
|
|
try {
|
|
try {
|
|
@@ -519,7 +526,7 @@ public class TestWebHDFS {
|
|
FileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
|
|
FileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
|
|
WebHdfsConstants.WEBHDFS_SCHEME);
|
|
WebHdfsConstants.WEBHDFS_SCHEME);
|
|
fs.create(new Path("/testnodatanode"));
|
|
fs.create(new Path("/testnodatanode"));
|
|
- Assert.fail("No exception was thrown");
|
|
|
|
|
|
+ fail("No exception was thrown");
|
|
} catch (IOException ex) {
|
|
} catch (IOException ex) {
|
|
GenericTestUtils.assertExceptionContains("Failed to find datanode", ex);
|
|
GenericTestUtils.assertExceptionContains("Failed to find datanode", ex);
|
|
}
|
|
}
|
|
@@ -580,7 +587,7 @@ public class TestWebHDFS {
|
|
assertTrue(webHdfs.getFileStatus(bar).isSnapshotEnabled());
|
|
assertTrue(webHdfs.getFileStatus(bar).isSnapshotEnabled());
|
|
webHdfs.createSnapshot(bar, "s1");
|
|
webHdfs.createSnapshot(bar, "s1");
|
|
final Path s1path = SnapshotTestHelper.getSnapshotRoot(bar, "s1");
|
|
final Path s1path = SnapshotTestHelper.getSnapshotRoot(bar, "s1");
|
|
- Assert.assertTrue(webHdfs.exists(s1path));
|
|
|
|
|
|
+ assertTrue(webHdfs.exists(s1path));
|
|
SnapshottableDirectoryStatus[] snapshottableDirs =
|
|
SnapshottableDirectoryStatus[] snapshottableDirs =
|
|
dfs.getSnapshottableDirListing();
|
|
dfs.getSnapshottableDirListing();
|
|
assertEquals(1, snapshottableDirs.length);
|
|
assertEquals(1, snapshottableDirs.length);
|
|
@@ -616,7 +623,8 @@ public class TestWebHDFS {
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
- @Test (timeout = 60000)
|
|
|
|
|
|
+ @Test
|
|
|
|
+ @Timeout(value = 60)
|
|
public void testWebHdfsErasureCodingFiles() throws Exception {
|
|
public void testWebHdfsErasureCodingFiles() throws Exception {
|
|
final Configuration conf = WebHdfsTestUtil.createConf();
|
|
final Configuration conf = WebHdfsTestUtil.createConf();
|
|
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
|
|
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
|
|
@@ -641,43 +649,39 @@ public class TestWebHDFS {
|
|
|
|
|
|
FileStatus expectedECDirStatus = dfs.getFileStatus(ecDir);
|
|
FileStatus expectedECDirStatus = dfs.getFileStatus(ecDir);
|
|
FileStatus actualECDirStatus = webHdfs.getFileStatus(ecDir);
|
|
FileStatus actualECDirStatus = webHdfs.getFileStatus(ecDir);
|
|
- Assert.assertEquals(expectedECDirStatus.isErasureCoded(),
|
|
|
|
|
|
+ assertEquals(expectedECDirStatus.isErasureCoded(),
|
|
actualECDirStatus.isErasureCoded());
|
|
actualECDirStatus.isErasureCoded());
|
|
ContractTestUtils.assertErasureCoded(dfs, ecDir);
|
|
ContractTestUtils.assertErasureCoded(dfs, ecDir);
|
|
- assertTrue(
|
|
|
|
|
|
+ assertTrue(actualECDirStatus.toString().contains("isErasureCoded=true"),
|
|
ecDir + " should have erasure coding set in "
|
|
ecDir + " should have erasure coding set in "
|
|
- + "FileStatus#toString(): " + actualECDirStatus,
|
|
|
|
- actualECDirStatus.toString().contains("isErasureCoded=true"));
|
|
|
|
|
|
+ + "FileStatus#toString(): " + actualECDirStatus);
|
|
|
|
|
|
FileStatus expectedECFileStatus = dfs.getFileStatus(ecFile);
|
|
FileStatus expectedECFileStatus = dfs.getFileStatus(ecFile);
|
|
FileStatus actualECFileStatus = webHdfs.getFileStatus(ecFile);
|
|
FileStatus actualECFileStatus = webHdfs.getFileStatus(ecFile);
|
|
- Assert.assertEquals(expectedECFileStatus.isErasureCoded(),
|
|
|
|
|
|
+ assertEquals(expectedECFileStatus.isErasureCoded(),
|
|
actualECFileStatus.isErasureCoded());
|
|
actualECFileStatus.isErasureCoded());
|
|
ContractTestUtils.assertErasureCoded(dfs, ecFile);
|
|
ContractTestUtils.assertErasureCoded(dfs, ecFile);
|
|
- assertTrue(
|
|
|
|
|
|
+ assertTrue(actualECFileStatus.toString().contains("isErasureCoded=true"),
|
|
ecFile + " should have erasure coding set in "
|
|
ecFile + " should have erasure coding set in "
|
|
- + "FileStatus#toString(): " + actualECFileStatus,
|
|
|
|
- actualECFileStatus.toString().contains("isErasureCoded=true"));
|
|
|
|
|
|
+ + "FileStatus#toString(): " + actualECFileStatus);
|
|
|
|
|
|
FileStatus expectedNormalDirStatus = dfs.getFileStatus(normalDir);
|
|
FileStatus expectedNormalDirStatus = dfs.getFileStatus(normalDir);
|
|
FileStatus actualNormalDirStatus = webHdfs.getFileStatus(normalDir);
|
|
FileStatus actualNormalDirStatus = webHdfs.getFileStatus(normalDir);
|
|
- Assert.assertEquals(expectedNormalDirStatus.isErasureCoded(),
|
|
|
|
|
|
+ assertEquals(expectedNormalDirStatus.isErasureCoded(),
|
|
actualNormalDirStatus.isErasureCoded());
|
|
actualNormalDirStatus.isErasureCoded());
|
|
ContractTestUtils.assertNotErasureCoded(dfs, normalDir);
|
|
ContractTestUtils.assertNotErasureCoded(dfs, normalDir);
|
|
- assertTrue(
|
|
|
|
|
|
+ assertTrue(actualNormalDirStatus.toString().contains("isErasureCoded=false"),
|
|
normalDir + " should have erasure coding unset in "
|
|
normalDir + " should have erasure coding unset in "
|
|
- + "FileStatus#toString(): " + actualNormalDirStatus,
|
|
|
|
- actualNormalDirStatus.toString().contains("isErasureCoded=false"));
|
|
|
|
|
|
+ + "FileStatus#toString(): " + actualNormalDirStatus);
|
|
|
|
|
|
FileStatus expectedNormalFileStatus = dfs.getFileStatus(normalFile);
|
|
FileStatus expectedNormalFileStatus = dfs.getFileStatus(normalFile);
|
|
FileStatus actualNormalFileStatus = webHdfs.getFileStatus(normalDir);
|
|
FileStatus actualNormalFileStatus = webHdfs.getFileStatus(normalDir);
|
|
- Assert.assertEquals(expectedNormalFileStatus.isErasureCoded(),
|
|
|
|
|
|
+ assertEquals(expectedNormalFileStatus.isErasureCoded(),
|
|
actualNormalFileStatus.isErasureCoded());
|
|
actualNormalFileStatus.isErasureCoded());
|
|
ContractTestUtils.assertNotErasureCoded(dfs, normalFile);
|
|
ContractTestUtils.assertNotErasureCoded(dfs, normalFile);
|
|
- assertTrue(
|
|
|
|
|
|
+ assertTrue(actualNormalFileStatus.toString().contains("isErasureCoded=false"),
|
|
normalFile + " should have erasure coding unset in "
|
|
normalFile + " should have erasure coding unset in "
|
|
- + "FileStatus#toString(): " + actualNormalFileStatus,
|
|
|
|
- actualNormalFileStatus.toString().contains("isErasureCoded=false"));
|
|
|
|
|
|
+ + "FileStatus#toString(): " + actualNormalFileStatus);
|
|
}
|
|
}
|
|
|
|
|
|
/**
|
|
/**
|
|
@@ -710,9 +714,9 @@ public class TestWebHDFS {
|
|
// create snapshot without specifying name
|
|
// create snapshot without specifying name
|
|
final Path spath = webHdfs.createSnapshot(foo, null);
|
|
final Path spath = webHdfs.createSnapshot(foo, null);
|
|
|
|
|
|
- Assert.assertTrue(webHdfs.exists(spath));
|
|
|
|
|
|
+ assertTrue(webHdfs.exists(spath));
|
|
final Path s1path = SnapshotTestHelper.getSnapshotRoot(foo, "s1");
|
|
final Path s1path = SnapshotTestHelper.getSnapshotRoot(foo, "s1");
|
|
- Assert.assertTrue(webHdfs.exists(s1path));
|
|
|
|
|
|
+ assertTrue(webHdfs.exists(s1path));
|
|
}
|
|
}
|
|
|
|
|
|
/**
|
|
/**
|
|
@@ -733,16 +737,16 @@ public class TestWebHDFS {
|
|
|
|
|
|
webHdfs.createSnapshot(foo, "s1");
|
|
webHdfs.createSnapshot(foo, "s1");
|
|
final Path spath = webHdfs.createSnapshot(foo, null);
|
|
final Path spath = webHdfs.createSnapshot(foo, null);
|
|
- Assert.assertTrue(webHdfs.exists(spath));
|
|
|
|
|
|
+ assertTrue(webHdfs.exists(spath));
|
|
final Path s1path = SnapshotTestHelper.getSnapshotRoot(foo, "s1");
|
|
final Path s1path = SnapshotTestHelper.getSnapshotRoot(foo, "s1");
|
|
- Assert.assertTrue(webHdfs.exists(s1path));
|
|
|
|
|
|
+ assertTrue(webHdfs.exists(s1path));
|
|
|
|
|
|
// delete operation snapshot name as null
|
|
// delete operation snapshot name as null
|
|
try {
|
|
try {
|
|
webHdfs.deleteSnapshot(foo, null);
|
|
webHdfs.deleteSnapshot(foo, null);
|
|
fail("Expected IllegalArgumentException");
|
|
fail("Expected IllegalArgumentException");
|
|
} catch (RemoteException e) {
|
|
} catch (RemoteException e) {
|
|
- Assert.assertEquals("Required param snapshotname for "
|
|
|
|
|
|
+ assertEquals("Required param snapshotname for "
|
|
+ "op: DELETESNAPSHOT is null or empty", e.getLocalizedMessage());
|
|
+ "op: DELETESNAPSHOT is null or empty", e.getLocalizedMessage());
|
|
}
|
|
}
|
|
|
|
|
|
@@ -778,7 +782,7 @@ public class TestWebHDFS {
|
|
dfs.allowSnapshot(foo);
|
|
dfs.allowSnapshot(foo);
|
|
webHdfs.createSnapshot(foo, "s1");
|
|
webHdfs.createSnapshot(foo, "s1");
|
|
final Path s1path = SnapshotTestHelper.getSnapshotRoot(foo, "s1");
|
|
final Path s1path = SnapshotTestHelper.getSnapshotRoot(foo, "s1");
|
|
- Assert.assertTrue(webHdfs.exists(s1path));
|
|
|
|
|
|
+ assertTrue(webHdfs.exists(s1path));
|
|
|
|
|
|
Path file3 = new Path(foo, "file3");
|
|
Path file3 = new Path(foo, "file3");
|
|
DFSTestUtil.createFile(dfs, file3, 100, (short) 1, 0);
|
|
DFSTestUtil.createFile(dfs, file3, 100, (short) 1, 0);
|
|
@@ -791,9 +795,9 @@ public class TestWebHDFS {
|
|
SnapshotDiffReport diffReport =
|
|
SnapshotDiffReport diffReport =
|
|
webHdfs.getSnapshotDiffReport(foo, "s1", "s2");
|
|
webHdfs.getSnapshotDiffReport(foo, "s1", "s2");
|
|
|
|
|
|
- Assert.assertEquals("/foo", diffReport.getSnapshotRoot());
|
|
|
|
- Assert.assertEquals("s1", diffReport.getFromSnapshot());
|
|
|
|
- Assert.assertEquals("s2", diffReport.getLaterSnapshotName());
|
|
|
|
|
|
+ assertEquals("/foo", diffReport.getSnapshotRoot());
|
|
|
|
+ assertEquals("s1", diffReport.getFromSnapshot());
|
|
|
|
+ assertEquals("s2", diffReport.getLaterSnapshotName());
|
|
DiffReportEntry entry0 =
|
|
DiffReportEntry entry0 =
|
|
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes(""));
|
|
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes(""));
|
|
DiffReportEntry entry1 =
|
|
DiffReportEntry entry1 =
|
|
@@ -804,18 +808,18 @@ public class TestWebHDFS {
|
|
DFSUtil.string2Bytes("file2"), DFSUtil.string2Bytes("file4"));
|
|
DFSUtil.string2Bytes("file2"), DFSUtil.string2Bytes("file4"));
|
|
DiffReportEntry entry4 =
|
|
DiffReportEntry entry4 =
|
|
new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("file3"));
|
|
new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("file3"));
|
|
- Assert.assertTrue(diffReport.getDiffList().contains(entry0));
|
|
|
|
- Assert.assertTrue(diffReport.getDiffList().contains(entry1));
|
|
|
|
- Assert.assertTrue(diffReport.getDiffList().contains(entry2));
|
|
|
|
- Assert.assertTrue(diffReport.getDiffList().contains(entry3));
|
|
|
|
- Assert.assertTrue(diffReport.getDiffList().contains(entry4));
|
|
|
|
- Assert.assertEquals(diffReport.getDiffList().size(), 5);
|
|
|
|
|
|
+ assertTrue(diffReport.getDiffList().contains(entry0));
|
|
|
|
+ assertTrue(diffReport.getDiffList().contains(entry1));
|
|
|
|
+ assertTrue(diffReport.getDiffList().contains(entry2));
|
|
|
|
+ assertTrue(diffReport.getDiffList().contains(entry3));
|
|
|
|
+ assertTrue(diffReport.getDiffList().contains(entry4));
|
|
|
|
+ assertEquals(diffReport.getDiffList().size(), 5);
|
|
|
|
|
|
// Test with fromSnapshot and toSnapshot as null.
|
|
// Test with fromSnapshot and toSnapshot as null.
|
|
diffReport = webHdfs.getSnapshotDiffReport(foo, null, "s2");
|
|
diffReport = webHdfs.getSnapshotDiffReport(foo, null, "s2");
|
|
- Assert.assertEquals(diffReport.getDiffList().size(), 0);
|
|
|
|
|
|
+ assertEquals(diffReport.getDiffList().size(), 0);
|
|
diffReport = webHdfs.getSnapshotDiffReport(foo, "s1", null);
|
|
diffReport = webHdfs.getSnapshotDiffReport(foo, "s1", null);
|
|
- Assert.assertEquals(diffReport.getDiffList().size(), 5);
|
|
|
|
|
|
+ assertEquals(diffReport.getDiffList().size(), 5);
|
|
}
|
|
}
|
|
|
|
|
|
/**
|
|
/**
|
|
@@ -835,7 +839,7 @@ public class TestWebHDFS {
|
|
dfs.mkdirs(bar);
|
|
dfs.mkdirs(bar);
|
|
SnapshottableDirectoryStatus[] statuses =
|
|
SnapshottableDirectoryStatus[] statuses =
|
|
webHdfs.getSnapshottableDirectoryList();
|
|
webHdfs.getSnapshottableDirectoryList();
|
|
- Assert.assertNull(statuses);
|
|
|
|
|
|
+ assertNull(statuses);
|
|
dfs.allowSnapshot(foo);
|
|
dfs.allowSnapshot(foo);
|
|
dfs.allowSnapshot(bar);
|
|
dfs.allowSnapshot(bar);
|
|
Path file0 = new Path(foo, "file0");
|
|
Path file0 = new Path(foo, "file0");
|
|
@@ -847,37 +851,37 @@ public class TestWebHDFS {
|
|
dfs.getSnapshottableDirListing();
|
|
dfs.getSnapshottableDirListing();
|
|
|
|
|
|
for (int i = 0; i < dfsStatuses.length; i++) {
|
|
for (int i = 0; i < dfsStatuses.length; i++) {
|
|
- Assert.assertEquals(statuses[i].getSnapshotNumber(),
|
|
|
|
|
|
+ assertEquals(statuses[i].getSnapshotNumber(),
|
|
dfsStatuses[i].getSnapshotNumber());
|
|
dfsStatuses[i].getSnapshotNumber());
|
|
- Assert.assertEquals(statuses[i].getSnapshotQuota(),
|
|
|
|
|
|
+ assertEquals(statuses[i].getSnapshotQuota(),
|
|
dfsStatuses[i].getSnapshotQuota());
|
|
dfsStatuses[i].getSnapshotQuota());
|
|
- Assert.assertTrue(Arrays.equals(statuses[i].getParentFullPath(),
|
|
|
|
|
|
+ assertTrue(Arrays.equals(statuses[i].getParentFullPath(),
|
|
dfsStatuses[i].getParentFullPath()));
|
|
dfsStatuses[i].getParentFullPath()));
|
|
- Assert.assertEquals(dfsStatuses[i].getDirStatus().getChildrenNum(),
|
|
|
|
|
|
+ assertEquals(dfsStatuses[i].getDirStatus().getChildrenNum(),
|
|
statuses[i].getDirStatus().getChildrenNum());
|
|
statuses[i].getDirStatus().getChildrenNum());
|
|
- Assert.assertEquals(dfsStatuses[i].getDirStatus().getModificationTime(),
|
|
|
|
|
|
+ assertEquals(dfsStatuses[i].getDirStatus().getModificationTime(),
|
|
statuses[i].getDirStatus().getModificationTime());
|
|
statuses[i].getDirStatus().getModificationTime());
|
|
- Assert.assertEquals(dfsStatuses[i].getDirStatus().isDir(),
|
|
|
|
|
|
+ assertEquals(dfsStatuses[i].getDirStatus().isDir(),
|
|
statuses[i].getDirStatus().isDir());
|
|
statuses[i].getDirStatus().isDir());
|
|
- Assert.assertEquals(dfsStatuses[i].getDirStatus().getAccessTime(),
|
|
|
|
|
|
+ assertEquals(dfsStatuses[i].getDirStatus().getAccessTime(),
|
|
statuses[i].getDirStatus().getAccessTime());
|
|
statuses[i].getDirStatus().getAccessTime());
|
|
- Assert.assertEquals(dfsStatuses[i].getDirStatus().getPermission(),
|
|
|
|
|
|
+ assertEquals(dfsStatuses[i].getDirStatus().getPermission(),
|
|
statuses[i].getDirStatus().getPermission());
|
|
statuses[i].getDirStatus().getPermission());
|
|
- Assert.assertEquals(dfsStatuses[i].getDirStatus().getOwner(),
|
|
|
|
|
|
+ assertEquals(dfsStatuses[i].getDirStatus().getOwner(),
|
|
statuses[i].getDirStatus().getOwner());
|
|
statuses[i].getDirStatus().getOwner());
|
|
- Assert.assertEquals(dfsStatuses[i].getDirStatus().getGroup(),
|
|
|
|
|
|
+ assertEquals(dfsStatuses[i].getDirStatus().getGroup(),
|
|
statuses[i].getDirStatus().getGroup());
|
|
statuses[i].getDirStatus().getGroup());
|
|
- Assert.assertEquals(dfsStatuses[i].getDirStatus().getPath(),
|
|
|
|
|
|
+ assertEquals(dfsStatuses[i].getDirStatus().getPath(),
|
|
statuses[i].getDirStatus().getPath());
|
|
statuses[i].getDirStatus().getPath());
|
|
- Assert.assertEquals(dfsStatuses[i].getDirStatus().getFileId(),
|
|
|
|
|
|
+ assertEquals(dfsStatuses[i].getDirStatus().getFileId(),
|
|
statuses[i].getDirStatus().getFileId());
|
|
statuses[i].getDirStatus().getFileId());
|
|
- Assert.assertEquals(dfsStatuses[i].getDirStatus().hasAcl(),
|
|
|
|
|
|
+ assertEquals(dfsStatuses[i].getDirStatus().hasAcl(),
|
|
statuses[i].getDirStatus().hasAcl());
|
|
statuses[i].getDirStatus().hasAcl());
|
|
- Assert.assertEquals(dfsStatuses[i].getDirStatus().isEncrypted(),
|
|
|
|
|
|
+ assertEquals(dfsStatuses[i].getDirStatus().isEncrypted(),
|
|
statuses[i].getDirStatus().isEncrypted());
|
|
statuses[i].getDirStatus().isEncrypted());
|
|
- Assert.assertEquals(dfsStatuses[i].getDirStatus().isErasureCoded(),
|
|
|
|
|
|
+ assertEquals(dfsStatuses[i].getDirStatus().isErasureCoded(),
|
|
statuses[i].getDirStatus().isErasureCoded());
|
|
statuses[i].getDirStatus().isErasureCoded());
|
|
- Assert.assertEquals(dfsStatuses[i].getDirStatus().isSnapshotEnabled(),
|
|
|
|
|
|
+ assertEquals(dfsStatuses[i].getDirStatus().isSnapshotEnabled(),
|
|
statuses[i].getDirStatus().isSnapshotEnabled());
|
|
statuses[i].getDirStatus().isSnapshotEnabled());
|
|
}
|
|
}
|
|
}
|
|
}
|
|
@@ -901,37 +905,37 @@ public class TestWebHDFS {
|
|
SnapshotStatus[] dfsStatuses = dfs.getSnapshotListing(foo);
|
|
SnapshotStatus[] dfsStatuses = dfs.getSnapshotListing(foo);
|
|
|
|
|
|
for (int i = 0; i < dfsStatuses.length; i++) {
|
|
for (int i = 0; i < dfsStatuses.length; i++) {
|
|
- Assert.assertEquals(statuses[i].getSnapshotID(),
|
|
|
|
|
|
+ assertEquals(statuses[i].getSnapshotID(),
|
|
dfsStatuses[i].getSnapshotID());
|
|
dfsStatuses[i].getSnapshotID());
|
|
- Assert.assertEquals(statuses[i].isDeleted(),
|
|
|
|
|
|
+ assertEquals(statuses[i].isDeleted(),
|
|
dfsStatuses[i].isDeleted());
|
|
dfsStatuses[i].isDeleted());
|
|
- Assert.assertTrue(Arrays.equals(statuses[i].getParentFullPath(),
|
|
|
|
|
|
+ assertTrue(Arrays.equals(statuses[i].getParentFullPath(),
|
|
dfsStatuses[i].getParentFullPath()));
|
|
dfsStatuses[i].getParentFullPath()));
|
|
- Assert.assertEquals(dfsStatuses[i].getDirStatus().getChildrenNum(),
|
|
|
|
|
|
+ assertEquals(dfsStatuses[i].getDirStatus().getChildrenNum(),
|
|
statuses[i].getDirStatus().getChildrenNum());
|
|
statuses[i].getDirStatus().getChildrenNum());
|
|
- Assert.assertEquals(dfsStatuses[i].getDirStatus().getModificationTime(),
|
|
|
|
|
|
+ assertEquals(dfsStatuses[i].getDirStatus().getModificationTime(),
|
|
statuses[i].getDirStatus().getModificationTime());
|
|
statuses[i].getDirStatus().getModificationTime());
|
|
- Assert.assertEquals(dfsStatuses[i].getDirStatus().isDir(),
|
|
|
|
|
|
+ assertEquals(dfsStatuses[i].getDirStatus().isDir(),
|
|
statuses[i].getDirStatus().isDir());
|
|
statuses[i].getDirStatus().isDir());
|
|
- Assert.assertEquals(dfsStatuses[i].getDirStatus().getAccessTime(),
|
|
|
|
|
|
+ assertEquals(dfsStatuses[i].getDirStatus().getAccessTime(),
|
|
statuses[i].getDirStatus().getAccessTime());
|
|
statuses[i].getDirStatus().getAccessTime());
|
|
- Assert.assertEquals(dfsStatuses[i].getDirStatus().getPermission(),
|
|
|
|
|
|
+ assertEquals(dfsStatuses[i].getDirStatus().getPermission(),
|
|
statuses[i].getDirStatus().getPermission());
|
|
statuses[i].getDirStatus().getPermission());
|
|
- Assert.assertEquals(dfsStatuses[i].getDirStatus().getOwner(),
|
|
|
|
|
|
+ assertEquals(dfsStatuses[i].getDirStatus().getOwner(),
|
|
statuses[i].getDirStatus().getOwner());
|
|
statuses[i].getDirStatus().getOwner());
|
|
- Assert.assertEquals(dfsStatuses[i].getDirStatus().getGroup(),
|
|
|
|
|
|
+ assertEquals(dfsStatuses[i].getDirStatus().getGroup(),
|
|
statuses[i].getDirStatus().getGroup());
|
|
statuses[i].getDirStatus().getGroup());
|
|
- Assert.assertEquals(dfsStatuses[i].getDirStatus().getPath(),
|
|
|
|
|
|
+ assertEquals(dfsStatuses[i].getDirStatus().getPath(),
|
|
statuses[i].getDirStatus().getPath());
|
|
statuses[i].getDirStatus().getPath());
|
|
- Assert.assertEquals(dfsStatuses[i].getDirStatus().getFileId(),
|
|
|
|
|
|
+ assertEquals(dfsStatuses[i].getDirStatus().getFileId(),
|
|
statuses[i].getDirStatus().getFileId());
|
|
statuses[i].getDirStatus().getFileId());
|
|
- Assert.assertEquals(dfsStatuses[i].getDirStatus().hasAcl(),
|
|
|
|
|
|
+ assertEquals(dfsStatuses[i].getDirStatus().hasAcl(),
|
|
statuses[i].getDirStatus().hasAcl());
|
|
statuses[i].getDirStatus().hasAcl());
|
|
- Assert.assertEquals(dfsStatuses[i].getDirStatus().isEncrypted(),
|
|
|
|
|
|
+ assertEquals(dfsStatuses[i].getDirStatus().isEncrypted(),
|
|
statuses[i].getDirStatus().isEncrypted());
|
|
statuses[i].getDirStatus().isEncrypted());
|
|
- Assert.assertEquals(dfsStatuses[i].getDirStatus().isErasureCoded(),
|
|
|
|
|
|
+ assertEquals(dfsStatuses[i].getDirStatus().isErasureCoded(),
|
|
statuses[i].getDirStatus().isErasureCoded());
|
|
statuses[i].getDirStatus().isErasureCoded());
|
|
- Assert.assertEquals(dfsStatuses[i].getDirStatus().isSnapshotEnabled(),
|
|
|
|
|
|
+ assertEquals(dfsStatuses[i].getDirStatus().isSnapshotEnabled(),
|
|
statuses[i].getDirStatus().isSnapshotEnabled());
|
|
statuses[i].getDirStatus().isSnapshotEnabled());
|
|
}
|
|
}
|
|
} finally {
|
|
} finally {
|
|
@@ -979,14 +983,14 @@ public class TestWebHDFS {
|
|
|
|
|
|
webHdfs.createSnapshot(foo, "s1");
|
|
webHdfs.createSnapshot(foo, "s1");
|
|
final Path s1path = SnapshotTestHelper.getSnapshotRoot(foo, "s1");
|
|
final Path s1path = SnapshotTestHelper.getSnapshotRoot(foo, "s1");
|
|
- Assert.assertTrue(webHdfs.exists(s1path));
|
|
|
|
|
|
+ assertTrue(webHdfs.exists(s1path));
|
|
|
|
|
|
// rename s1 to s2 with oldsnapshotName as null
|
|
// rename s1 to s2 with oldsnapshotName as null
|
|
try {
|
|
try {
|
|
webHdfs.renameSnapshot(foo, null, "s2");
|
|
webHdfs.renameSnapshot(foo, null, "s2");
|
|
fail("Expected IllegalArgumentException");
|
|
fail("Expected IllegalArgumentException");
|
|
} catch (RemoteException e) {
|
|
} catch (RemoteException e) {
|
|
- Assert.assertEquals("Required param oldsnapshotname for "
|
|
|
|
|
|
+ assertEquals("Required param oldsnapshotname for "
|
|
+ "op: RENAMESNAPSHOT is null or empty", e.getLocalizedMessage());
|
|
+ "op: RENAMESNAPSHOT is null or empty", e.getLocalizedMessage());
|
|
}
|
|
}
|
|
|
|
|
|
@@ -994,7 +998,7 @@ public class TestWebHDFS {
|
|
webHdfs.renameSnapshot(foo, "s1", "s2");
|
|
webHdfs.renameSnapshot(foo, "s1", "s2");
|
|
assertFalse(webHdfs.exists(s1path));
|
|
assertFalse(webHdfs.exists(s1path));
|
|
final Path s2path = SnapshotTestHelper.getSnapshotRoot(foo, "s2");
|
|
final Path s2path = SnapshotTestHelper.getSnapshotRoot(foo, "s2");
|
|
- Assert.assertTrue(webHdfs.exists(s2path));
|
|
|
|
|
|
+ assertTrue(webHdfs.exists(s2path));
|
|
|
|
|
|
webHdfs.deleteSnapshot(foo, "s2");
|
|
webHdfs.deleteSnapshot(foo, "s2");
|
|
assertFalse(webHdfs.exists(s2path));
|
|
assertFalse(webHdfs.exists(s2path));
|
|
@@ -1035,7 +1039,7 @@ public class TestWebHDFS {
|
|
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
|
|
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
|
|
final FileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
|
|
final FileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
|
|
WebHdfsConstants.WEBHDFS_SCHEME);
|
|
WebHdfsConstants.WEBHDFS_SCHEME);
|
|
- Assert.assertNull(webHdfs.getDelegationToken(null));
|
|
|
|
|
|
+ assertNull(webHdfs.getDelegationToken(null));
|
|
}
|
|
}
|
|
|
|
|
|
@Test
|
|
@Test
|
|
@@ -1048,7 +1052,7 @@ public class TestWebHDFS {
|
|
webHdfs.getDelegationToken(null);
|
|
webHdfs.getDelegationToken(null);
|
|
fail("No exception is thrown.");
|
|
fail("No exception is thrown.");
|
|
} catch (AccessControlException ace) {
|
|
} catch (AccessControlException ace) {
|
|
- Assert.assertTrue(ace.getMessage().startsWith(
|
|
|
|
|
|
+ assertTrue(ace.getMessage().startsWith(
|
|
WebHdfsFileSystem.CANT_FALLBACK_TO_INSECURE_MSG));
|
|
WebHdfsFileSystem.CANT_FALLBACK_TO_INSECURE_MSG));
|
|
}
|
|
}
|
|
}
|
|
}
|
|
@@ -1074,12 +1078,12 @@ public class TestWebHDFS {
|
|
new LengthParam((long) LENGTH)));
|
|
new LengthParam((long) LENGTH)));
|
|
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
|
|
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
|
|
conn.setInstanceFollowRedirects(true);
|
|
conn.setInstanceFollowRedirects(true);
|
|
- Assert.assertEquals(LENGTH, conn.getContentLength());
|
|
|
|
|
|
+ assertEquals(LENGTH, conn.getContentLength());
|
|
byte[] subContents = new byte[LENGTH];
|
|
byte[] subContents = new byte[LENGTH];
|
|
byte[] realContents = new byte[LENGTH];
|
|
byte[] realContents = new byte[LENGTH];
|
|
System.arraycopy(CONTENTS, OFFSET, subContents, 0, LENGTH);
|
|
System.arraycopy(CONTENTS, OFFSET, subContents, 0, LENGTH);
|
|
IOUtils.readFully(conn.getInputStream(), realContents);
|
|
IOUtils.readFully(conn.getInputStream(), realContents);
|
|
- Assert.assertArrayEquals(subContents, realContents);
|
|
|
|
|
|
+ assertArrayEquals(subContents, realContents);
|
|
}
|
|
}
|
|
|
|
|
|
@Test
|
|
@Test
|
|
@@ -1093,8 +1097,7 @@ public class TestWebHDFS {
|
|
dfs.mkdirs(path);
|
|
dfs.mkdirs(path);
|
|
dfs.setQuotaByStorageType(path, StorageType.DISK, 100000);
|
|
dfs.setQuotaByStorageType(path, StorageType.DISK, 100000);
|
|
ContentSummary contentSummary = webHdfs.getContentSummary(path);
|
|
ContentSummary contentSummary = webHdfs.getContentSummary(path);
|
|
- Assert
|
|
|
|
- .assertTrue((contentSummary.getTypeQuota(StorageType.DISK) == 100000));
|
|
|
|
|
|
+ assertTrue((contentSummary.getTypeQuota(StorageType.DISK) == 100000));
|
|
}
|
|
}
|
|
|
|
|
|
/**
|
|
/**
|
|
@@ -1230,16 +1233,16 @@ public class TestWebHDFS {
|
|
byte[] buf = new byte[1024];
|
|
byte[] buf = new byte[1024];
|
|
try {
|
|
try {
|
|
in.readFully(1020, buf, 0, 5);
|
|
in.readFully(1020, buf, 0, 5);
|
|
- Assert.fail("EOF expected");
|
|
|
|
|
|
+ fail("EOF expected");
|
|
} catch (EOFException ignored) {}
|
|
} catch (EOFException ignored) {}
|
|
|
|
|
|
// mix pread with stateful read
|
|
// mix pread with stateful read
|
|
int length = in.read(buf, 0, 512);
|
|
int length = in.read(buf, 0, 512);
|
|
in.readFully(100, new byte[1024], 0, 100);
|
|
in.readFully(100, new byte[1024], 0, 100);
|
|
int preadLen = in.read(200, new byte[1024], 0, 200);
|
|
int preadLen = in.read(200, new byte[1024], 0, 200);
|
|
- Assert.assertTrue(preadLen > 0);
|
|
|
|
|
|
+ assertTrue(preadLen > 0);
|
|
IOUtils.readFully(in, buf, length, 1024 - length);
|
|
IOUtils.readFully(in, buf, length, 1024 - length);
|
|
- Assert.assertArrayEquals(content, buf);
|
|
|
|
|
|
+ assertArrayEquals(content, buf);
|
|
} finally {
|
|
} finally {
|
|
if (in != null) {
|
|
if (in != null) {
|
|
in.close();
|
|
in.close();
|
|
@@ -1247,7 +1250,8 @@ public class TestWebHDFS {
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
- @Test(timeout = 30000)
|
|
|
|
|
|
+ @Test
|
|
|
|
+ @Timeout(value = 30)
|
|
public void testGetHomeDirectory() throws Exception {
|
|
public void testGetHomeDirectory() throws Exception {
|
|
Configuration conf = new Configuration();
|
|
Configuration conf = new Configuration();
|
|
cluster = new MiniDFSCluster.Builder(conf).build();
|
|
cluster = new MiniDFSCluster.Builder(conf).build();
|
|
@@ -1291,7 +1295,7 @@ public class TestWebHDFS {
|
|
BlockLocation[] locations = fs.getFileBlockLocations(PATH, OFFSET, LENGTH);
|
|
BlockLocation[] locations = fs.getFileBlockLocations(PATH, OFFSET, LENGTH);
|
|
for (BlockLocation location : locations) {
|
|
for (BlockLocation location : locations) {
|
|
StorageType[] storageTypes = location.getStorageTypes();
|
|
StorageType[] storageTypes = location.getStorageTypes();
|
|
- Assert.assertTrue(storageTypes != null && storageTypes.length > 0
|
|
|
|
|
|
+ assertTrue(storageTypes != null && storageTypes.length > 0
|
|
&& storageTypes[0] == StorageType.DISK);
|
|
&& storageTypes[0] == StorageType.DISK);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
@@ -1399,19 +1403,19 @@ public class TestWebHDFS {
|
|
for(int i=0; i<locations1.length; i++) {
|
|
for(int i=0; i<locations1.length; i++) {
|
|
BlockLocation location1 = locations1[i];
|
|
BlockLocation location1 = locations1[i];
|
|
BlockLocation location2 = locations2[i];
|
|
BlockLocation location2 = locations2[i];
|
|
- Assert.assertEquals(location1.getLength(),
|
|
|
|
|
|
+ assertEquals(location1.getLength(),
|
|
location2.getLength());
|
|
location2.getLength());
|
|
- Assert.assertEquals(location1.getOffset(),
|
|
|
|
|
|
+ assertEquals(location1.getOffset(),
|
|
location2.getOffset());
|
|
location2.getOffset());
|
|
- Assert.assertArrayEquals(location1.getCachedHosts(),
|
|
|
|
|
|
+ assertArrayEquals(location1.getCachedHosts(),
|
|
location2.getCachedHosts());
|
|
location2.getCachedHosts());
|
|
- Assert.assertArrayEquals(location1.getHosts(),
|
|
|
|
|
|
+ assertArrayEquals(location1.getHosts(),
|
|
location2.getHosts());
|
|
location2.getHosts());
|
|
- Assert.assertArrayEquals(location1.getNames(),
|
|
|
|
|
|
+ assertArrayEquals(location1.getNames(),
|
|
location2.getNames());
|
|
location2.getNames());
|
|
- Assert.assertArrayEquals(location1.getTopologyPaths(),
|
|
|
|
|
|
+ assertArrayEquals(location1.getTopologyPaths(),
|
|
location2.getTopologyPaths());
|
|
location2.getTopologyPaths());
|
|
- Assert.assertArrayEquals(location1.getStorageTypes(),
|
|
|
|
|
|
+ assertArrayEquals(location1.getStorageTypes(),
|
|
location2.getStorageTypes());
|
|
location2.getStorageTypes());
|
|
}
|
|
}
|
|
}
|
|
}
|
|
@@ -1448,7 +1452,8 @@ public class TestWebHDFS {
|
|
});
|
|
});
|
|
}
|
|
}
|
|
|
|
|
|
- @Test(timeout=90000)
|
|
|
|
|
|
+ @Test
|
|
|
|
+ @Timeout(value = 90)
|
|
public void testWebHdfsReadRetries() throws Exception {
|
|
public void testWebHdfsReadRetries() throws Exception {
|
|
// ((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
|
|
// ((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
|
|
final Configuration conf = WebHdfsTestUtil.createConf();
|
|
final Configuration conf = WebHdfsTestUtil.createConf();
|
|
@@ -1475,18 +1480,17 @@ public class TestWebHDFS {
|
|
|
|
|
|
// get file status and check that it was written properly.
|
|
// get file status and check that it was written properly.
|
|
final FileStatus s1 = fs.getFileStatus(file1);
|
|
final FileStatus s1 = fs.getFileStatus(file1);
|
|
- assertEquals("Write failed for file " + file1, length, s1.getLen());
|
|
|
|
|
|
+ assertEquals(length, s1.getLen(), "Write failed for file " + file1);
|
|
|
|
|
|
// Ensure file can be read through WebHdfsInputStream
|
|
// Ensure file can be read through WebHdfsInputStream
|
|
FSDataInputStream in = fs.open(file1);
|
|
FSDataInputStream in = fs.open(file1);
|
|
- assertTrue("Input stream is not an instance of class WebHdfsInputStream",
|
|
|
|
- in.getWrappedStream() instanceof WebHdfsInputStream);
|
|
|
|
|
|
+ assertTrue(in.getWrappedStream() instanceof WebHdfsInputStream,
|
|
|
|
+ "Input stream is not an instance of class WebHdfsInputStream");
|
|
int count = 0;
|
|
int count = 0;
|
|
for (; in.read() != -1; count++)
|
|
for (; in.read() != -1; count++)
|
|
;
|
|
;
|
|
- assertEquals("Read failed for file " + file1, s1.getLen(), count);
|
|
|
|
- assertEquals("Sghould not be able to read beyond end of file", in.read(),
|
|
|
|
- -1);
|
|
|
|
|
|
+ assertEquals(s1.getLen(), count, "Read failed for file " + file1);
|
|
|
|
+ assertEquals(in.read(), -1, "Sghould not be able to read beyond end of file");
|
|
in.close();
|
|
in.close();
|
|
try {
|
|
try {
|
|
in.read();
|
|
in.read();
|
|
@@ -1575,9 +1579,9 @@ public class TestWebHDFS {
|
|
} catch (Exception e) {
|
|
} catch (Exception e) {
|
|
assertTrue(e.getMessage().contains(msg));
|
|
assertTrue(e.getMessage().contains(msg));
|
|
}
|
|
}
|
|
- assertEquals(msg + ": Read should " + (shouldAttemptRetry ? "" : "not ")
|
|
|
|
- + "have called shouldRetry. ",
|
|
|
|
- attemptedRetry, shouldAttemptRetry);
|
|
|
|
|
|
+ assertEquals(attemptedRetry, shouldAttemptRetry,
|
|
|
|
+ msg + ": Read should " + (shouldAttemptRetry ? "" : "not ")
|
|
|
|
+ + "have called shouldRetry. ");
|
|
|
|
|
|
verify(rr, times(numTimesTried)).getResponse((HttpURLConnection) any());
|
|
verify(rr, times(numTimesTried)).getResponse((HttpURLConnection) any());
|
|
webIn.close();
|
|
webIn.close();
|
|
@@ -1592,21 +1596,20 @@ public class TestWebHDFS {
|
|
String response =
|
|
String response =
|
|
IOUtils.toString(conn.getInputStream(), StandardCharsets.UTF_8);
|
|
IOUtils.toString(conn.getInputStream(), StandardCharsets.UTF_8);
|
|
LOG.info("Response was : " + response);
|
|
LOG.info("Response was : " + response);
|
|
- Assert.assertEquals(
|
|
|
|
- "Response wasn't " + HttpURLConnection.HTTP_OK,
|
|
|
|
- HttpURLConnection.HTTP_OK, conn.getResponseCode());
|
|
|
|
|
|
+ assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode(),
|
|
|
|
+ "Response wasn't " + HttpURLConnection.HTTP_OK);
|
|
|
|
|
|
JSONObject responseJson = new JSONObject(response);
|
|
JSONObject responseJson = new JSONObject(response);
|
|
- Assert.assertTrue("Response didn't give us a location. " + response,
|
|
|
|
- responseJson.has("Location"));
|
|
|
|
|
|
+ assertTrue(responseJson.has("Location"),
|
|
|
|
+ "Response didn't give us a location. " + response);
|
|
|
|
|
|
//Test that the DN allows CORS on Create
|
|
//Test that the DN allows CORS on Create
|
|
if(TYPE.equals("CREATE")) {
|
|
if(TYPE.equals("CREATE")) {
|
|
URL dnLocation = new URL(responseJson.getString("Location"));
|
|
URL dnLocation = new URL(responseJson.getString("Location"));
|
|
HttpURLConnection dnConn = (HttpURLConnection) dnLocation.openConnection();
|
|
HttpURLConnection dnConn = (HttpURLConnection) dnLocation.openConnection();
|
|
dnConn.setRequestMethod("OPTIONS");
|
|
dnConn.setRequestMethod("OPTIONS");
|
|
- Assert.assertEquals("Datanode url : " + dnLocation + " didn't allow "
|
|
|
|
- + "CORS", HttpURLConnection.HTTP_OK, dnConn.getResponseCode());
|
|
|
|
|
|
+ assertEquals(HttpURLConnection.HTTP_OK, dnConn.getResponseCode(),
|
|
|
|
+ "Datanode url : " + dnLocation + " didn't allow " + "CORS");
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1758,7 +1761,7 @@ public class TestWebHDFS {
|
|
WebHdfsConstants.WEBHDFS_SCHEME);
|
|
WebHdfsConstants.WEBHDFS_SCHEME);
|
|
|
|
|
|
// test getAllStoragePolicies
|
|
// test getAllStoragePolicies
|
|
- Assert.assertTrue(Arrays.equals(dfs.getAllStoragePolicies().toArray(),
|
|
|
|
|
|
+ assertTrue(Arrays.equals(dfs.getAllStoragePolicies().toArray(),
|
|
webHdfs.getAllStoragePolicies().toArray()));
|
|
webHdfs.getAllStoragePolicies().toArray()));
|
|
|
|
|
|
// test get/set/unset policies
|
|
// test get/set/unset policies
|
|
@@ -1771,12 +1774,12 @@ public class TestWebHDFS {
|
|
BlockStoragePolicySpi dfsPolicy = dfs.getStoragePolicy(path);
|
|
BlockStoragePolicySpi dfsPolicy = dfs.getStoragePolicy(path);
|
|
// get policy from webhdfs
|
|
// get policy from webhdfs
|
|
BlockStoragePolicySpi webHdfsPolicy = webHdfs.getStoragePolicy(path);
|
|
BlockStoragePolicySpi webHdfsPolicy = webHdfs.getStoragePolicy(path);
|
|
- Assert.assertEquals(HdfsConstants.COLD_STORAGE_POLICY_NAME.toString(),
|
|
|
|
|
|
+ assertEquals(HdfsConstants.COLD_STORAGE_POLICY_NAME.toString(),
|
|
webHdfsPolicy.getName());
|
|
webHdfsPolicy.getName());
|
|
- Assert.assertEquals(webHdfsPolicy, dfsPolicy);
|
|
|
|
|
|
+ assertEquals(webHdfsPolicy, dfsPolicy);
|
|
// unset policy
|
|
// unset policy
|
|
webHdfs.unsetStoragePolicy(path);
|
|
webHdfs.unsetStoragePolicy(path);
|
|
- Assert.assertEquals(defaultdfsPolicy, webHdfs.getStoragePolicy(path));
|
|
|
|
|
|
+ assertEquals(defaultdfsPolicy, webHdfs.getStoragePolicy(path));
|
|
}
|
|
}
|
|
|
|
|
|
@Test
|
|
@Test
|
|
@@ -1793,7 +1796,7 @@ public class TestWebHDFS {
|
|
HdfsConstants.COLD_STORAGE_POLICY_NAME);
|
|
HdfsConstants.COLD_STORAGE_POLICY_NAME);
|
|
fail("Should throw exception, when storage policy disabled");
|
|
fail("Should throw exception, when storage policy disabled");
|
|
} catch (IOException e) {
|
|
} catch (IOException e) {
|
|
- Assert.assertTrue(e.getMessage().contains(
|
|
|
|
|
|
+ assertTrue(e.getMessage().contains(
|
|
"Failed to set storage policy since"));
|
|
"Failed to set storage policy since"));
|
|
}
|
|
}
|
|
}
|
|
}
|
|
@@ -1807,14 +1810,14 @@ public class TestWebHDFS {
|
|
if (policy.getPolicy().getName().equals(ecpolicy)) {
|
|
if (policy.getPolicy().getName().equals(ecpolicy)) {
|
|
found = true;
|
|
found = true;
|
|
if (state.equals("disable")) {
|
|
if (state.equals("disable")) {
|
|
- Assert.assertTrue(policy.isDisabled());
|
|
|
|
|
|
+ assertTrue(policy.isDisabled());
|
|
} else if (state.equals("enable")) {
|
|
} else if (state.equals("enable")) {
|
|
- Assert.assertTrue(policy.isEnabled());
|
|
|
|
|
|
+ assertTrue(policy.isEnabled());
|
|
}
|
|
}
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
- Assert.assertTrue(found);
|
|
|
|
|
|
+ assertTrue(found);
|
|
}
|
|
}
|
|
|
|
|
|
// Test For Enable/Disable EC Policy in DFS.
|
|
// Test For Enable/Disable EC Policy in DFS.
|
|
@@ -1963,36 +1966,28 @@ public class TestWebHDFS {
|
|
|
|
|
|
private void compareFsServerDefaults(FsServerDefaults serverDefaults1,
|
|
private void compareFsServerDefaults(FsServerDefaults serverDefaults1,
|
|
FsServerDefaults serverDefaults2) throws Exception {
|
|
FsServerDefaults serverDefaults2) throws Exception {
|
|
- Assert.assertEquals("Block size is different",
|
|
|
|
- serverDefaults1.getBlockSize(),
|
|
|
|
- serverDefaults2.getBlockSize());
|
|
|
|
- Assert.assertEquals("Bytes per checksum are different",
|
|
|
|
- serverDefaults1.getBytesPerChecksum(),
|
|
|
|
- serverDefaults2.getBytesPerChecksum());
|
|
|
|
- Assert.assertEquals("Write packet size is different",
|
|
|
|
- serverDefaults1.getWritePacketSize(),
|
|
|
|
- serverDefaults2.getWritePacketSize());
|
|
|
|
- Assert.assertEquals("Default replication is different",
|
|
|
|
- serverDefaults1.getReplication(),
|
|
|
|
- serverDefaults2.getReplication());
|
|
|
|
- Assert.assertEquals("File buffer size are different",
|
|
|
|
- serverDefaults1.getFileBufferSize(),
|
|
|
|
- serverDefaults2.getFileBufferSize());
|
|
|
|
- Assert.assertEquals("Encrypt data transfer key is different",
|
|
|
|
- serverDefaults1.getEncryptDataTransfer(),
|
|
|
|
- serverDefaults2.getEncryptDataTransfer());
|
|
|
|
- Assert.assertEquals("Trash interval is different",
|
|
|
|
- serverDefaults1.getTrashInterval(),
|
|
|
|
- serverDefaults2.getTrashInterval());
|
|
|
|
- Assert.assertEquals("Checksum type is different",
|
|
|
|
- serverDefaults1.getChecksumType(),
|
|
|
|
- serverDefaults2.getChecksumType());
|
|
|
|
- Assert.assertEquals("Key provider uri is different",
|
|
|
|
- serverDefaults1.getKeyProviderUri(),
|
|
|
|
- serverDefaults2.getKeyProviderUri());
|
|
|
|
- Assert.assertEquals("Default storage policy is different",
|
|
|
|
- serverDefaults1.getDefaultStoragePolicyId(),
|
|
|
|
- serverDefaults2.getDefaultStoragePolicyId());
|
|
|
|
|
|
+ assertEquals(serverDefaults1.getBlockSize(), serverDefaults2.getBlockSize(),
|
|
|
|
+ "Block size is different");
|
|
|
|
+ assertEquals(serverDefaults1.getBytesPerChecksum(),
|
|
|
|
+ serverDefaults2.getBytesPerChecksum(), "Bytes per checksum are different");
|
|
|
|
+ assertEquals(serverDefaults1.getWritePacketSize(),
|
|
|
|
+ serverDefaults2.getWritePacketSize(), "Write packet size is different");
|
|
|
|
+ assertEquals(serverDefaults1.getReplication(),
|
|
|
|
+ serverDefaults2.getReplication(), "Default replication is different");
|
|
|
|
+ assertEquals(serverDefaults1.getFileBufferSize(),
|
|
|
|
+ serverDefaults2.getFileBufferSize(), "File buffer size are different");
|
|
|
|
+ assertEquals(serverDefaults1.getEncryptDataTransfer(),
|
|
|
|
+ serverDefaults2.getEncryptDataTransfer(),
|
|
|
|
+ "Encrypt data transfer key is different");
|
|
|
|
+ assertEquals(serverDefaults1.getTrashInterval(),
|
|
|
|
+ serverDefaults2.getTrashInterval(), "Trash interval is different");
|
|
|
|
+ assertEquals(serverDefaults1.getChecksumType(),
|
|
|
|
+ serverDefaults2.getChecksumType(), "Checksum type is different");
|
|
|
|
+ assertEquals(serverDefaults1.getKeyProviderUri(),
|
|
|
|
+ serverDefaults2.getKeyProviderUri(), "Key provider uri is different");
|
|
|
|
+ assertEquals(serverDefaults1.getDefaultStoragePolicyId(),
|
|
|
|
+ serverDefaults2.getDefaultStoragePolicyId(),
|
|
|
|
+ "Default storage policy is different");
|
|
}
|
|
}
|
|
|
|
|
|
/**
|
|
/**
|
|
@@ -2012,7 +2007,7 @@ public class TestWebHDFS {
|
|
.thenThrow(new UnsupportedOperationException());
|
|
.thenThrow(new UnsupportedOperationException());
|
|
try {
|
|
try {
|
|
webfs.getServerDefaults();
|
|
webfs.getServerDefaults();
|
|
- Assert.fail("should have thrown UnSupportedOperationException.");
|
|
|
|
|
|
+ fail("should have thrown UnSupportedOperationException.");
|
|
} catch (UnsupportedOperationException uoe) {
|
|
} catch (UnsupportedOperationException uoe) {
|
|
// Expected exception.
|
|
// Expected exception.
|
|
}
|
|
}
|
|
@@ -2046,7 +2041,7 @@ public class TestWebHDFS {
|
|
|
|
|
|
// get file status and check that it was written properly.
|
|
// get file status and check that it was written properly.
|
|
final FileStatus s1 = fs.getFileStatus(file1);
|
|
final FileStatus s1 = fs.getFileStatus(file1);
|
|
- assertEquals("Write failed for file " + file1, length, s1.getLen());
|
|
|
|
|
|
+ assertEquals(length, s1.getLen(), "Write failed for file " + file1);
|
|
|
|
|
|
FSDataInputStream in = fs.open(file1);
|
|
FSDataInputStream in = fs.open(file1);
|
|
in.read(); // Connection is made only when the first read() occurs.
|
|
in.read(); // Connection is made only when the first read() occurs.
|
|
@@ -2074,7 +2069,8 @@ public class TestWebHDFS {
|
|
* Tests that the LISTSTATUS ang GETFILESTATUS WebHDFS calls return the
|
|
* Tests that the LISTSTATUS ang GETFILESTATUS WebHDFS calls return the
|
|
* ecPolicy for EC files.
|
|
* ecPolicy for EC files.
|
|
*/
|
|
*/
|
|
- @Test(timeout=300000)
|
|
|
|
|
|
+ @Test
|
|
|
|
+ @Timeout(value = 300)
|
|
public void testECPolicyInFileStatus() throws Exception {
|
|
public void testECPolicyInFileStatus() throws Exception {
|
|
final Configuration conf = WebHdfsTestUtil.createConf();
|
|
final Configuration conf = WebHdfsTestUtil.createConf();
|
|
final ErasureCodingPolicy ecPolicy = SystemErasureCodingPolicies
|
|
final ErasureCodingPolicy ecPolicy = SystemErasureCodingPolicies
|
|
@@ -2111,21 +2107,20 @@ public class TestWebHDFS {
|
|
conn.setInstanceFollowRedirects(false);
|
|
conn.setInstanceFollowRedirects(false);
|
|
String listStatusResponse = IOUtils.toString(conn.getInputStream(),
|
|
String listStatusResponse = IOUtils.toString(conn.getInputStream(),
|
|
StandardCharsets.UTF_8);
|
|
StandardCharsets.UTF_8);
|
|
- Assert.assertEquals("Response wasn't " + HttpURLConnection.HTTP_OK,
|
|
|
|
- HttpURLConnection.HTTP_OK, conn.getResponseCode());
|
|
|
|
|
|
+ assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode(),
|
|
|
|
+ "Response wasn't " + HttpURLConnection.HTTP_OK);
|
|
|
|
|
|
// Verify that ecPolicy is set in the ListStatus response for ec file
|
|
// Verify that ecPolicy is set in the ListStatus response for ec file
|
|
String ecpolicyForECfile = getECPolicyFromFileStatusJson(
|
|
String ecpolicyForECfile = getECPolicyFromFileStatusJson(
|
|
getFileStatusJson(listStatusResponse, ecFile.getName()));
|
|
getFileStatusJson(listStatusResponse, ecFile.getName()));
|
|
- assertEquals("EC policy for ecFile should match the set EC policy",
|
|
|
|
- ecpolicyForECfile, ecPolicyName);
|
|
|
|
|
|
+ assertEquals(ecpolicyForECfile, ecPolicyName,
|
|
|
|
+ "EC policy for ecFile should match the set EC policy");
|
|
|
|
|
|
// Verify that ecPolicy is not set in the ListStatus response for non-ec
|
|
// Verify that ecPolicy is not set in the ListStatus response for non-ec
|
|
// file
|
|
// file
|
|
String ecPolicyForNonECfile = getECPolicyFromFileStatusJson(
|
|
String ecPolicyForNonECfile = getECPolicyFromFileStatusJson(
|
|
getFileStatusJson(listStatusResponse, nonEcFile.getName()));
|
|
getFileStatusJson(listStatusResponse, nonEcFile.getName()));
|
|
- assertEquals("EC policy for nonEcFile should be null (not set)",
|
|
|
|
- ecPolicyForNonECfile, null);
|
|
|
|
|
|
+ assertEquals(ecPolicyForNonECfile, null, "EC policy for nonEcFile should be null (not set)");
|
|
|
|
|
|
// Query webhdfs REST API to get fileStatus for ecFile
|
|
// Query webhdfs REST API to get fileStatus for ecFile
|
|
URL getFileStatusUrl = new URL("http", addr.getHostString(), addr.getPort(),
|
|
URL getFileStatusUrl = new URL("http", addr.getHostString(), addr.getPort(),
|
|
@@ -2137,15 +2132,15 @@ public class TestWebHDFS {
|
|
conn.setInstanceFollowRedirects(false);
|
|
conn.setInstanceFollowRedirects(false);
|
|
String getFileStatusResponse = IOUtils.toString(conn.getInputStream(),
|
|
String getFileStatusResponse = IOUtils.toString(conn.getInputStream(),
|
|
StandardCharsets.UTF_8);
|
|
StandardCharsets.UTF_8);
|
|
- Assert.assertEquals("Response wasn't " + HttpURLConnection.HTTP_OK,
|
|
|
|
- HttpURLConnection.HTTP_OK, conn.getResponseCode());
|
|
|
|
|
|
+ assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode(),
|
|
|
|
+ "Response wasn't " + HttpURLConnection.HTTP_OK);
|
|
|
|
|
|
// Verify that ecPolicy is set in getFileStatus response for ecFile
|
|
// Verify that ecPolicy is set in getFileStatus response for ecFile
|
|
JSONObject fileStatusObject = new JSONObject(getFileStatusResponse)
|
|
JSONObject fileStatusObject = new JSONObject(getFileStatusResponse)
|
|
.getJSONObject("FileStatus");
|
|
.getJSONObject("FileStatus");
|
|
ecpolicyForECfile = getECPolicyFromFileStatusJson(fileStatusObject);
|
|
ecpolicyForECfile = getECPolicyFromFileStatusJson(fileStatusObject);
|
|
- assertEquals("EC policy for ecFile should match the set EC policy",
|
|
|
|
- ecpolicyForECfile, ecPolicyName);
|
|
|
|
|
|
+ assertEquals(
|
|
|
|
+ ecpolicyForECfile, ecPolicyName, "EC policy for ecFile should match the set EC policy");
|
|
}
|
|
}
|
|
|
|
|
|
@Test
|
|
@Test
|
|
@@ -2276,16 +2271,16 @@ public class TestWebHDFS {
|
|
}
|
|
}
|
|
|
|
|
|
FsStatus webHdfsFsStatus = webHdfs.getStatus(new Path("/"));
|
|
FsStatus webHdfsFsStatus = webHdfs.getStatus(new Path("/"));
|
|
- Assert.assertNotNull(webHdfsFsStatus);
|
|
|
|
|
|
+ assertNotNull(webHdfsFsStatus);
|
|
|
|
|
|
FsStatus dfsFsStatus = dfs.getStatus(new Path("/"));
|
|
FsStatus dfsFsStatus = dfs.getStatus(new Path("/"));
|
|
- Assert.assertNotNull(dfsFsStatus);
|
|
|
|
|
|
+ assertNotNull(dfsFsStatus);
|
|
|
|
|
|
//Validate used free and capacity are the same as DistributedFileSystem
|
|
//Validate used free and capacity are the same as DistributedFileSystem
|
|
- Assert.assertEquals(webHdfsFsStatus.getUsed(), dfsFsStatus.getUsed());
|
|
|
|
- Assert.assertEquals(webHdfsFsStatus.getRemaining(),
|
|
|
|
|
|
+ assertEquals(webHdfsFsStatus.getUsed(), dfsFsStatus.getUsed());
|
|
|
|
+ assertEquals(webHdfsFsStatus.getRemaining(),
|
|
dfsFsStatus.getRemaining());
|
|
dfsFsStatus.getRemaining());
|
|
- Assert.assertEquals(webHdfsFsStatus.getCapacity(),
|
|
|
|
|
|
+ assertEquals(webHdfsFsStatus.getCapacity(),
|
|
dfsFsStatus.getCapacity());
|
|
dfsFsStatus.getCapacity());
|
|
} finally {
|
|
} finally {
|
|
cluster.shutdown();
|
|
cluster.shutdown();
|