|
@@ -6,9 +6,9 @@
|
|
|
* to you under the Apache License, Version 2.0 (the
|
|
|
* "License"); you may not use this file except in compliance
|
|
|
* with the License. You may obtain a copy of the License at
|
|
|
- *
|
|
|
- * http://www.apache.org/licenses/LICENSE-2.0
|
|
|
- *
|
|
|
+ * <p>
|
|
|
+ * http://www.apache.org/licenses/LICENSE-2.0
|
|
|
+ * <p>
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
@@ -37,15 +37,15 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_PAS
|
|
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_TRUSTSTORE_PASSWORD_KEY;
|
|
|
import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains;
|
|
|
import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows;
|
|
|
-import static org.hamcrest.CoreMatchers.not;
|
|
|
-import static org.junit.Assert.assertArrayEquals;
|
|
|
-import static org.junit.Assert.assertEquals;
|
|
|
-import static org.junit.Assert.assertFalse;
|
|
|
-import static org.junit.Assert.assertNull;
|
|
|
-import static org.junit.Assert.assertThat;
|
|
|
-import static org.junit.Assert.assertTrue;
|
|
|
-import static org.junit.Assert.fail;
|
|
|
+import static org.junit.jupiter.api.Assertions.assertArrayEquals;
|
|
|
+import static org.junit.jupiter.api.Assertions.assertEquals;
|
|
|
+import static org.junit.jupiter.api.Assertions.assertFalse;
|
|
|
+import static org.junit.jupiter.api.Assertions.assertNull;
|
|
|
+import static org.junit.jupiter.api.Assertions.assertThrows;
|
|
|
+import static org.junit.jupiter.api.Assertions.assertTrue;
|
|
|
+import static org.junit.jupiter.api.Assertions.fail;
|
|
|
import static org.mockito.Mockito.*;
|
|
|
+import static org.assertj.core.api.Assertions.assertThat;
|
|
|
|
|
|
import java.io.File;
|
|
|
import java.io.IOException;
|
|
@@ -84,26 +84,26 @@ import org.apache.hadoop.security.alias.JavaKeyStoreProvider;
|
|
|
import org.apache.hadoop.test.GenericTestUtils;
|
|
|
import org.apache.hadoop.test.LambdaTestUtils;
|
|
|
import org.apache.hadoop.util.Shell;
|
|
|
-import org.junit.Assert;
|
|
|
-import org.junit.Before;
|
|
|
-import org.junit.Test;
|
|
|
+import org.junit.jupiter.api.BeforeEach;
|
|
|
+import org.junit.jupiter.api.Test;
|
|
|
+import org.junit.jupiter.api.Timeout;
|
|
|
|
|
|
public class TestDFSUtil {
|
|
|
|
|
|
- static final String NS1_NN_ADDR = "ns1-nn.example.com:8020";
|
|
|
- static final String NS1_NN1_ADDR = "ns1-nn1.example.com:8020";
|
|
|
- static final String NS1_NN2_ADDR = "ns1-nn2.example.com:8020";
|
|
|
- static final String NS1_NN1_HTTPS_ADDR = "ns1-nn1.example.com:50740";
|
|
|
- static final String NS1_NN1_HTTP_ADDR = "ns1-nn1.example.com:50070";
|
|
|
+ static final String NS1_NN_ADDR = "ns1-nn.example.com:8020";
|
|
|
+ static final String NS1_NN1_ADDR = "ns1-nn1.example.com:8020";
|
|
|
+ static final String NS1_NN2_ADDR = "ns1-nn2.example.com:8020";
|
|
|
+ static final String NS1_NN1_HTTPS_ADDR = "ns1-nn1.example.com:50740";
|
|
|
+ static final String NS1_NN1_HTTP_ADDR = "ns1-nn1.example.com:50070";
|
|
|
|
|
|
/**
|
|
|
* Reset to default UGI settings since some tests change them.
|
|
|
*/
|
|
|
- @Before
|
|
|
+ @BeforeEach
|
|
|
public void resetUGI() {
|
|
|
UserGroupInformation.setConfiguration(new Configuration());
|
|
|
}
|
|
|
-
|
|
|
+
|
|
|
/**
|
|
|
* Test conversion of LocatedBlock to BlockLocation
|
|
|
*/
|
|
@@ -130,18 +130,17 @@ public class TestDFSUtil {
|
|
|
|
|
|
BlockLocation[] bs = DFSUtilClient.locatedBlocks2Locations(lbs);
|
|
|
|
|
|
- assertTrue("expected 2 blocks but got " + bs.length,
|
|
|
- bs.length == 2);
|
|
|
+ assertTrue(bs.length == 2, "expected 2 blocks but got " + bs.length);
|
|
|
|
|
|
int corruptCount = 0;
|
|
|
- for (BlockLocation b: bs) {
|
|
|
+ for (BlockLocation b : bs) {
|
|
|
if (b.isCorrupt()) {
|
|
|
corruptCount++;
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- assertTrue("expected 1 corrupt files but got " + corruptCount,
|
|
|
- corruptCount == 1);
|
|
|
+ assertTrue(corruptCount == 1,
|
|
|
+ "expected 1 corrupt files but got " + corruptCount);
|
|
|
|
|
|
// test an empty location
|
|
|
bs = DFSUtilClient.locatedBlocks2Locations(new LocatedBlocks());
|
|
@@ -156,7 +155,7 @@ public class TestDFSUtil {
|
|
|
DatanodeInfo d = DFSTestUtil.getLocalDatanodeInfo();
|
|
|
DatanodeInfo[] ds = new DatanodeInfo[1];
|
|
|
ds[0] = d;
|
|
|
-
|
|
|
+
|
|
|
ExtendedBlock b1 = new ExtendedBlock("bpid", 1, 1, 1);
|
|
|
LocatedBlock l1 = new LocatedBlock(b1, ds, null, null, 0, false, null);
|
|
|
final DatanodeInfo[] cachedLocs = l1.getCachedLocations();
|
|
@@ -180,7 +179,7 @@ public class TestDFSUtil {
|
|
|
conf.set(DFS_NAMESERVICE_ID, "nn1");
|
|
|
assertEquals("nn1", DFSUtil.getNamenodeNameServiceId(conf));
|
|
|
}
|
|
|
-
|
|
|
+
|
|
|
/**
|
|
|
* Test {@link DFSUtil#getNamenodeNameServiceId(Configuration)} to ensure
|
|
|
* nameserviceId for namenode is determined based on matching the address with
|
|
@@ -219,16 +218,18 @@ public class TestDFSUtil {
|
|
|
* exception is thrown when multiple rpc addresses match the local node's
|
|
|
* address
|
|
|
*/
|
|
|
- @Test(expected = HadoopIllegalArgumentException.class)
|
|
|
+ @Test
|
|
|
public void testGetNameServiceIdException() {
|
|
|
- HdfsConfiguration conf = new HdfsConfiguration();
|
|
|
- conf.set(DFS_NAMESERVICES, "nn1,nn2");
|
|
|
- conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY, "nn1"),
|
|
|
- "localhost:9000");
|
|
|
- conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY, "nn2"),
|
|
|
- "localhost:9001");
|
|
|
- DFSUtil.getNamenodeNameServiceId(conf);
|
|
|
- fail("Expected exception is not thrown");
|
|
|
+ assertThrows(HadoopIllegalArgumentException.class, () -> {
|
|
|
+ HdfsConfiguration conf = new HdfsConfiguration();
|
|
|
+ conf.set(DFS_NAMESERVICES, "nn1,nn2");
|
|
|
+ conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY, "nn1"),
|
|
|
+ "localhost:9000");
|
|
|
+ conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY, "nn2"),
|
|
|
+ "localhost:9001");
|
|
|
+ DFSUtil.getNamenodeNameServiceId(conf);
|
|
|
+ fail("Expected exception is not thrown");
|
|
|
+ });
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -244,7 +245,7 @@ public class TestDFSUtil {
|
|
|
assertEquals("nn1", it.next().toString());
|
|
|
assertEquals("nn2", it.next().toString());
|
|
|
}
|
|
|
-
|
|
|
+
|
|
|
@Test
|
|
|
public void testGetOnlyNameServiceIdOrNull() {
|
|
|
HdfsConfiguration conf = new HdfsConfiguration();
|
|
@@ -277,13 +278,13 @@ public class TestDFSUtil {
|
|
|
Map<String, Map<String, InetSocketAddress>> nnMap = DFSUtil
|
|
|
.getNNServiceRpcAddresses(conf);
|
|
|
assertEquals(2, nnMap.size());
|
|
|
-
|
|
|
+
|
|
|
Map<String, InetSocketAddress> nn1Map = nnMap.get("nn1");
|
|
|
assertEquals(1, nn1Map.size());
|
|
|
InetSocketAddress addr = nn1Map.get(null);
|
|
|
assertEquals("localhost", addr.getHostName());
|
|
|
assertEquals(9000, addr.getPort());
|
|
|
-
|
|
|
+
|
|
|
Map<String, InetSocketAddress> nn2Map = nnMap.get("nn2");
|
|
|
assertEquals(1, nn2Map.size());
|
|
|
addr = nn2Map.get(null);
|
|
@@ -301,7 +302,7 @@ public class TestDFSUtil {
|
|
|
}
|
|
|
|
|
|
public void checkNameServiceId(Configuration conf, String addr,
|
|
|
- String expectedNameServiceId) {
|
|
|
+ String expectedNameServiceId) {
|
|
|
InetSocketAddress s = NetUtils.createSocketAddr(addr);
|
|
|
String nameserviceId = DFSUtil.getNameServiceIdFromAddress(conf, s,
|
|
|
DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_RPC_ADDRESS_KEY);
|
|
@@ -317,15 +318,15 @@ public class TestDFSUtil {
|
|
|
// If DFS_FEDERATION_NAMESERVICES is not set, verify that
|
|
|
// default namenode address is returned.
|
|
|
Map<String, Map<String, InetSocketAddress>> addrMap =
|
|
|
- DFSUtil.getNNServiceRpcAddresses(conf);
|
|
|
+ DFSUtil.getNNServiceRpcAddresses(conf);
|
|
|
assertEquals(1, addrMap.size());
|
|
|
-
|
|
|
+
|
|
|
Map<String, InetSocketAddress> defaultNsMap = addrMap.get(null);
|
|
|
assertEquals(1, defaultNsMap.size());
|
|
|
-
|
|
|
+
|
|
|
assertEquals(9999, defaultNsMap.get(null).getPort());
|
|
|
}
|
|
|
-
|
|
|
+
|
|
|
/**
|
|
|
* Test to ensure nameservice specific keys in the configuration are
|
|
|
* copied to generic keys when the namenode starts.
|
|
@@ -334,7 +335,7 @@ public class TestDFSUtil {
|
|
|
public void testConfModificationFederationOnly() {
|
|
|
final HdfsConfiguration conf = new HdfsConfiguration();
|
|
|
String nsId = "ns1";
|
|
|
-
|
|
|
+
|
|
|
conf.set(DFS_NAMESERVICES, nsId);
|
|
|
conf.set(DFS_NAMESERVICE_ID, nsId);
|
|
|
|
|
@@ -353,7 +354,7 @@ public class TestDFSUtil {
|
|
|
assertEquals(key, conf.get(key));
|
|
|
}
|
|
|
}
|
|
|
-
|
|
|
+
|
|
|
/**
|
|
|
* Test to ensure nameservice specific keys in the configuration are
|
|
|
* copied to generic keys when the namenode starts.
|
|
@@ -363,7 +364,7 @@ public class TestDFSUtil {
|
|
|
final HdfsConfiguration conf = new HdfsConfiguration();
|
|
|
String nsId = "ns1";
|
|
|
String nnId = "nn1";
|
|
|
-
|
|
|
+
|
|
|
conf.set(DFS_NAMESERVICES, nsId);
|
|
|
conf.set(DFS_NAMESERVICE_ID, nsId);
|
|
|
conf.set(DFS_HA_NAMENODES_KEY_PREFIX + "." + nsId, nnId);
|
|
@@ -387,7 +388,7 @@ public class TestDFSUtil {
|
|
|
/**
|
|
|
* Ensure that fs.defaultFS is set in the configuration even if neither HA nor
|
|
|
* Federation is enabled.
|
|
|
- *
|
|
|
+ *
|
|
|
* Regression test for HDFS-3351.
|
|
|
*/
|
|
|
@Test
|
|
@@ -395,7 +396,7 @@ public class TestDFSUtil {
|
|
|
final HdfsConfiguration conf = new HdfsConfiguration();
|
|
|
String nsId = null;
|
|
|
String nnId = null;
|
|
|
-
|
|
|
+
|
|
|
conf.set(DFS_NAMENODE_RPC_ADDRESS_KEY, "localhost:1234");
|
|
|
|
|
|
assertFalse("hdfs://localhost:1234".equals(conf.get(FS_DEFAULT_NAME_KEY)));
|
|
@@ -419,23 +420,23 @@ public class TestDFSUtil {
|
|
|
Configuration newConf = new Configuration(conf);
|
|
|
NameNode.initializeGenericKeys(newConf, "ns2", "nn1");
|
|
|
assertEquals("global-default", newConf.get(key));
|
|
|
-
|
|
|
+
|
|
|
// A namenode in another non-HA nameservice should get global default.
|
|
|
newConf = new Configuration(conf);
|
|
|
NameNode.initializeGenericKeys(newConf, "ns2", null);
|
|
|
- assertEquals("global-default", newConf.get(key));
|
|
|
-
|
|
|
+ assertEquals("global-default", newConf.get(key));
|
|
|
+
|
|
|
// A namenode in the same nameservice should get the ns setting
|
|
|
newConf = new Configuration(conf);
|
|
|
NameNode.initializeGenericKeys(newConf, "ns1", "nn2");
|
|
|
- assertEquals("ns1-override", newConf.get(key));
|
|
|
+ assertEquals("ns1-override", newConf.get(key));
|
|
|
|
|
|
// The nn with the nn-specific setting should get its own override
|
|
|
newConf = new Configuration(conf);
|
|
|
NameNode.initializeGenericKeys(newConf, "ns1", "nn1");
|
|
|
- assertEquals("nn1-override", newConf.get(key));
|
|
|
+ assertEquals("nn1-override", newConf.get(key));
|
|
|
}
|
|
|
-
|
|
|
+
|
|
|
/**
|
|
|
* Tests for empty configuration, an exception is thrown from
|
|
|
* {@link DFSUtil#getNNServiceRpcAddresses(Configuration)}
|
|
@@ -456,7 +457,7 @@ public class TestDFSUtil {
|
|
|
|
|
|
try {
|
|
|
Map<String, Map<String, InetSocketAddress>> map =
|
|
|
- DFSUtil.getBackupNodeAddresses(conf);
|
|
|
+ DFSUtil.getBackupNodeAddresses(conf);
|
|
|
fail("Expected IOException is not thrown, result was: " +
|
|
|
DFSUtil.addressMapToString(map));
|
|
|
} catch (IOException expected) {
|
|
@@ -465,7 +466,7 @@ public class TestDFSUtil {
|
|
|
|
|
|
try {
|
|
|
Map<String, Map<String, InetSocketAddress>> map =
|
|
|
- DFSUtil.getSecondaryNameNodeAddresses(conf);
|
|
|
+ DFSUtil.getSecondaryNameNodeAddresses(conf);
|
|
|
fail("Expected IOException is not thrown, result was: " +
|
|
|
DFSUtil.addressMapToString(map));
|
|
|
} catch (IOException expected) {
|
|
@@ -502,11 +503,11 @@ public class TestDFSUtil {
|
|
|
@Test
|
|
|
public void testGetInfoServer() throws IOException, URISyntaxException {
|
|
|
HdfsConfiguration conf = new HdfsConfiguration();
|
|
|
-
|
|
|
+
|
|
|
URI httpsport = DFSUtil.getInfoServer(null, conf, "https");
|
|
|
assertEquals(new URI("https", null, "0.0.0.0",
|
|
|
DFS_NAMENODE_HTTPS_PORT_DEFAULT, null, null, null), httpsport);
|
|
|
-
|
|
|
+
|
|
|
URI httpport = DFSUtil.getInfoServer(null, conf, "http");
|
|
|
assertEquals(new URI("http", null, "0.0.0.0",
|
|
|
DFS_NAMENODE_HTTP_PORT_DEFAULT, null, null, null), httpport);
|
|
@@ -517,17 +518,17 @@ public class TestDFSUtil {
|
|
|
URI.create("http://localhost:" + DFS_NAMENODE_HTTP_PORT_DEFAULT),
|
|
|
httpAddress);
|
|
|
}
|
|
|
-
|
|
|
+
|
|
|
@Test
|
|
|
public void testHANameNodesWithFederation() throws URISyntaxException {
|
|
|
HdfsConfiguration conf = new HdfsConfiguration();
|
|
|
-
|
|
|
+
|
|
|
final String NS1_NN1_HOST = "ns1-nn1.example.com:8020";
|
|
|
final String NS1_NN2_HOST = "ns1-nn2.example.com:8020";
|
|
|
final String NS2_NN1_HOST = "ns2-nn1.example.com:8020";
|
|
|
final String NS2_NN2_HOST = "ns2-nn2.example.com:8020";
|
|
|
conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "hdfs://ns1");
|
|
|
-
|
|
|
+
|
|
|
// Two nameservices, each with two NNs.
|
|
|
conf.set(DFS_NAMESERVICES, "ns1,ns2");
|
|
|
conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX, "ns1"),
|
|
@@ -535,35 +536,35 @@ public class TestDFSUtil {
|
|
|
conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX, "ns2"),
|
|
|
"ns2-nn1,ns2-nn2");
|
|
|
conf.set(DFSUtil.addKeySuffixes(
|
|
|
- DFS_NAMENODE_RPC_ADDRESS_KEY, "ns1", "ns1-nn1"),
|
|
|
+ DFS_NAMENODE_RPC_ADDRESS_KEY, "ns1", "ns1-nn1"),
|
|
|
NS1_NN1_HOST);
|
|
|
conf.set(DFSUtil.addKeySuffixes(
|
|
|
- DFS_NAMENODE_RPC_ADDRESS_KEY, "ns1", "ns1-nn2"),
|
|
|
+ DFS_NAMENODE_RPC_ADDRESS_KEY, "ns1", "ns1-nn2"),
|
|
|
NS1_NN2_HOST);
|
|
|
conf.set(DFSUtil.addKeySuffixes(
|
|
|
- DFS_NAMENODE_RPC_ADDRESS_KEY, "ns2", "ns2-nn1"),
|
|
|
+ DFS_NAMENODE_RPC_ADDRESS_KEY, "ns2", "ns2-nn1"),
|
|
|
NS2_NN1_HOST);
|
|
|
conf.set(DFSUtil.addKeySuffixes(
|
|
|
- DFS_NAMENODE_RPC_ADDRESS_KEY, "ns2", "ns2-nn2"),
|
|
|
+ DFS_NAMENODE_RPC_ADDRESS_KEY, "ns2", "ns2-nn2"),
|
|
|
NS2_NN2_HOST);
|
|
|
-
|
|
|
+
|
|
|
Map<String, Map<String, InetSocketAddress>> map =
|
|
|
DFSUtilClient.getHaNnRpcAddresses(conf);
|
|
|
|
|
|
assertTrue(HAUtil.isHAEnabled(conf, "ns1"));
|
|
|
assertTrue(HAUtil.isHAEnabled(conf, "ns2"));
|
|
|
assertFalse(HAUtil.isHAEnabled(conf, "ns3"));
|
|
|
-
|
|
|
+
|
|
|
assertEquals(NS1_NN1_HOST, map.get("ns1").get("ns1-nn1").toString());
|
|
|
assertEquals(NS1_NN2_HOST, map.get("ns1").get("ns1-nn2").toString());
|
|
|
assertEquals(NS2_NN1_HOST, map.get("ns2").get("ns2-nn1").toString());
|
|
|
assertEquals(NS2_NN2_HOST, map.get("ns2").get("ns2-nn2").toString());
|
|
|
-
|
|
|
- assertEquals(NS1_NN1_HOST,
|
|
|
+
|
|
|
+ assertEquals(NS1_NN1_HOST,
|
|
|
DFSUtil.getNamenodeServiceAddr(conf, "ns1", "ns1-nn1"));
|
|
|
- assertEquals(NS1_NN2_HOST,
|
|
|
+ assertEquals(NS1_NN2_HOST,
|
|
|
DFSUtil.getNamenodeServiceAddr(conf, "ns1", "ns1-nn2"));
|
|
|
- assertEquals(NS2_NN1_HOST,
|
|
|
+ assertEquals(NS2_NN1_HOST,
|
|
|
DFSUtil.getNamenodeServiceAddr(conf, "ns2", "ns2-nn1"));
|
|
|
|
|
|
// No nameservice was given and we can't determine which service addr
|
|
@@ -587,15 +588,15 @@ public class TestDFSUtil {
|
|
|
@Test
|
|
|
public void getNameNodeServiceAddr() throws IOException {
|
|
|
HdfsConfiguration conf = new HdfsConfiguration();
|
|
|
-
|
|
|
+
|
|
|
// One nameservice with two NNs
|
|
|
final String NS1_NN1_HOST = "ns1-nn1.example.com:8020";
|
|
|
final String NS1_NN1_HOST_SVC = "ns1-nn2.example.com:9821";
|
|
|
final String NS1_NN2_HOST = "ns1-nn1.example.com:8020";
|
|
|
final String NS1_NN2_HOST_SVC = "ns1-nn2.example.com:9821";
|
|
|
-
|
|
|
+
|
|
|
conf.set(DFS_NAMESERVICES, "ns1");
|
|
|
- conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX, "ns1"),"nn1,nn2");
|
|
|
+ conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX, "ns1"), "nn1,nn2");
|
|
|
|
|
|
conf.set(DFSUtil.addKeySuffixes(
|
|
|
DFS_NAMENODE_RPC_ADDRESS_KEY, "ns1", "nn1"), NS1_NN1_HOST);
|
|
@@ -609,7 +610,7 @@ public class TestDFSUtil {
|
|
|
// A nameservice is specified explicitly
|
|
|
assertEquals(NS1_NN1_HOST, DFSUtil.getNamenodeServiceAddr(conf, "ns1", "nn1"));
|
|
|
assertEquals(null, DFSUtil.getNamenodeServiceAddr(conf, "invalid", "nn1"));
|
|
|
-
|
|
|
+
|
|
|
// The service addrs are used when they are defined
|
|
|
conf.set(DFSUtil.addKeySuffixes(
|
|
|
DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, "ns1", "nn1"), NS1_NN1_HOST_SVC);
|
|
@@ -641,7 +642,7 @@ public class TestDFSUtil {
|
|
|
HdfsConfiguration conf = new HdfsConfiguration();
|
|
|
|
|
|
conf.set(DFS_NAMESERVICES, "ns1");
|
|
|
- conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX, "ns1"),"nn1,nn2");
|
|
|
+ conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX, "ns1"), "nn1,nn2");
|
|
|
conf.set(DFSUtil.addKeySuffixes(
|
|
|
DFS_NAMENODE_HTTP_ADDRESS_KEY, "ns1", "nn1"), nnaddr1);
|
|
|
conf.set(DFSUtil.addKeySuffixes(
|
|
@@ -661,7 +662,7 @@ public class TestDFSUtil {
|
|
|
}
|
|
|
|
|
|
private static Collection<URI> getInternalNameServiceUris(Configuration conf,
|
|
|
- String... keys) {
|
|
|
+ String... keys) {
|
|
|
final Collection<String> ids = DFSUtil.getInternalNameServices(conf);
|
|
|
return DFSUtil.getNameServiceUris(conf, ids, keys);
|
|
|
}
|
|
@@ -671,14 +672,15 @@ public class TestDFSUtil {
|
|
|
* settings
|
|
|
* @throws Exception
|
|
|
*/
|
|
|
+ @SuppressWarnings("LocalFinalVariableName")
|
|
|
@Test
|
|
|
public void testGetNNUris() throws Exception {
|
|
|
HdfsConfiguration conf = new HdfsConfiguration();
|
|
|
|
|
|
- final String NS2_NN_ADDR = "ns2-nn.example.com:8020";
|
|
|
- final String NN1_ADDR = "nn.example.com:8020";
|
|
|
- final String NN1_SRVC_ADDR = "nn.example.com:9821";
|
|
|
- final String NN2_ADDR = "nn2.example.com:8020";
|
|
|
+ final String NS2_NN_ADDR = "ns2-nn.example.com:8020";
|
|
|
+ final String NN1_ADDR = "nn.example.com:8020";
|
|
|
+ final String NN1_SRVC_ADDR = "nn.example.com:9821";
|
|
|
+ final String NN2_ADDR = "nn2.example.com:8020";
|
|
|
|
|
|
conf.set(DFS_NAMESERVICES, "ns1");
|
|
|
conf.set(DFSUtil.addKeySuffixes(
|
|
@@ -687,11 +689,11 @@ public class TestDFSUtil {
|
|
|
conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "hdfs://" + NN1_ADDR);
|
|
|
|
|
|
Collection<URI> uris = DFSUtil.getInternalNsRpcUris(conf);
|
|
|
- assertEquals("Incorrect number of URIs returned", 2, uris.size());
|
|
|
- assertTrue("Missing URI for name service ns1",
|
|
|
- uris.contains(new URI("hdfs://" + NS1_NN1_ADDR)));
|
|
|
- assertTrue("Missing URI for service address",
|
|
|
- uris.contains(new URI("hdfs://" + NN2_ADDR)));
|
|
|
+ assertEquals(2, uris.size(), "Incorrect number of URIs returned");
|
|
|
+ assertTrue(uris.contains(new URI("hdfs://" + NS1_NN1_ADDR)),
|
|
|
+ "Missing URI for name service ns1");
|
|
|
+ assertTrue(uris.contains(new URI("hdfs://" + NN2_ADDR)),
|
|
|
+ "Missing URI for service address");
|
|
|
|
|
|
conf = new HdfsConfiguration();
|
|
|
conf.set(DFS_NAMESERVICES, "ns1,ns2");
|
|
@@ -724,15 +726,13 @@ public class TestDFSUtil {
|
|
|
+ "IPFailoverProxyProvider");
|
|
|
|
|
|
uris = DFSUtil.getInternalNsRpcUris(conf);
|
|
|
- assertEquals("Incorrect number of URIs returned", 3, uris.size());
|
|
|
- assertTrue("Missing URI for RPC address",
|
|
|
- uris.contains(new URI("hdfs://" + NN1_ADDR)));
|
|
|
- assertTrue("Missing URI for name service ns2",
|
|
|
- uris.contains(new URI(HdfsConstants.HDFS_URI_SCHEME + "://" +
|
|
|
- NS1_NN_ADDR)));
|
|
|
- assertTrue("Missing URI for name service ns2",
|
|
|
- uris.contains(new URI(HdfsConstants.HDFS_URI_SCHEME + "://" +
|
|
|
- NS2_NN_ADDR)));
|
|
|
+ assertEquals(3, uris.size(), "Incorrect number of URIs returned");
|
|
|
+ assertTrue(uris.contains(new URI("hdfs://" + NN1_ADDR)),
|
|
|
+ "Missing URI for RPC address");
|
|
|
+ assertTrue(uris.contains(new URI(HdfsConstants.HDFS_URI_SCHEME + "://" +
|
|
|
+ NS1_NN_ADDR)), "Missing URI for name service ns2");
|
|
|
+ assertTrue(uris.contains(new URI(HdfsConstants.HDFS_URI_SCHEME + "://" +
|
|
|
+ NS2_NN_ADDR)), "Missing URI for name service ns2");
|
|
|
|
|
|
/**
|
|
|
* Second, test ns1 with {@link ConfiguredFailoverProxyProvider} which does
|
|
@@ -743,57 +743,57 @@ public class TestDFSUtil {
|
|
|
+ "ConfiguredFailoverProxyProvider");
|
|
|
|
|
|
uris = DFSUtil.getInternalNsRpcUris(conf);
|
|
|
- assertEquals("Incorrect number of URIs returned", 3, uris.size());
|
|
|
- assertTrue("Missing URI for name service ns1",
|
|
|
- uris.contains(new URI("hdfs://ns1")));
|
|
|
- assertTrue("Missing URI for name service ns2",
|
|
|
- uris.contains(new URI("hdfs://" + NS2_NN_ADDR)));
|
|
|
- assertTrue("Missing URI for RPC address",
|
|
|
- uris.contains(new URI("hdfs://" + NN1_ADDR)));
|
|
|
+ assertEquals(3, uris.size(), "Incorrect number of URIs returned");
|
|
|
+ assertTrue(uris.contains(new URI("hdfs://ns1")), "" +
|
|
|
+ "Missing URI for name service ns1");
|
|
|
+ assertTrue(uris.contains(new URI("hdfs://" + NS2_NN_ADDR)),
|
|
|
+ "Missing URI for name service ns2");
|
|
|
+ assertTrue(uris.contains(new URI("hdfs://" + NN1_ADDR)),
|
|
|
+ "Missing URI for RPC address");
|
|
|
|
|
|
// Make sure that non-HDFS URIs in fs.defaultFS don't get included.
|
|
|
conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY,
|
|
|
"viewfs://vfs-name.example.com");
|
|
|
|
|
|
uris = DFSUtil.getInternalNsRpcUris(conf);
|
|
|
- assertEquals("Incorrect number of URIs returned", 3, uris.size());
|
|
|
- assertTrue("Missing URI for name service ns1",
|
|
|
- uris.contains(new URI("hdfs://ns1")));
|
|
|
- assertTrue("Missing URI for name service ns2",
|
|
|
- uris.contains(new URI("hdfs://" + NS2_NN_ADDR)));
|
|
|
- assertTrue("Missing URI for RPC address",
|
|
|
- uris.contains(new URI("hdfs://" + NN1_ADDR)));
|
|
|
+ assertEquals(3, uris.size(), "Incorrect number of URIs returned");
|
|
|
+ assertTrue(uris.contains(new URI("hdfs://ns1")),
|
|
|
+ "Missing URI for name service ns1");
|
|
|
+ assertTrue(uris.contains(new URI("hdfs://" + NS2_NN_ADDR)),
|
|
|
+ "Missing URI for name service ns2");
|
|
|
+ assertTrue(uris.contains(new URI("hdfs://" + NN1_ADDR)),
|
|
|
+ "Missing URI for RPC address");
|
|
|
|
|
|
// Make sure that an HA URI being the default URI doesn't result in multiple
|
|
|
// entries being returned.
|
|
|
conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "hdfs://ns1");
|
|
|
-
|
|
|
+
|
|
|
uris = DFSUtil.getInternalNsRpcUris(conf);
|
|
|
- assertEquals("Incorrect number of URIs returned", 3, uris.size());
|
|
|
- assertTrue("Missing URI for name service ns1",
|
|
|
- uris.contains(new URI("hdfs://ns1")));
|
|
|
- assertTrue("Missing URI for name service ns2",
|
|
|
- uris.contains(new URI("hdfs://" + NS2_NN_ADDR)));
|
|
|
- assertTrue("Missing URI for RPC address",
|
|
|
- uris.contains(new URI("hdfs://" + NN1_ADDR)));
|
|
|
+ assertEquals(3, uris.size(), "Incorrect number of URIs returned");
|
|
|
+ assertTrue(uris.contains(new URI("hdfs://ns1")),
|
|
|
+ "Missing URI for name service ns1");
|
|
|
+ assertTrue(uris.contains(new URI("hdfs://" + NS2_NN_ADDR)),
|
|
|
+ "Missing URI for name service ns2");
|
|
|
+ assertTrue(uris.contains(new URI("hdfs://" + NN1_ADDR)),
|
|
|
+ "Missing URI for RPC address");
|
|
|
|
|
|
// Check that the default URI is returned if there's nothing else to return.
|
|
|
conf = new HdfsConfiguration();
|
|
|
conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "hdfs://" + NN1_ADDR);
|
|
|
|
|
|
uris = DFSUtil.getInternalNsRpcUris(conf);
|
|
|
- assertEquals("Incorrect number of URIs returned", 1, uris.size());
|
|
|
- assertTrue("Missing URI for RPC address (defaultFS)",
|
|
|
- uris.contains(new URI("hdfs://" + NN1_ADDR)));
|
|
|
+ assertEquals(1, uris.size(), "Incorrect number of URIs returned");
|
|
|
+ assertTrue(uris.contains(new URI("hdfs://" + NN1_ADDR)),
|
|
|
+ "Missing URI for RPC address (defaultFS)");
|
|
|
|
|
|
// Check that the RPC address is the only address returned when the RPC
|
|
|
// and the default FS is given.
|
|
|
conf.set(DFS_NAMENODE_RPC_ADDRESS_KEY, NN2_ADDR);
|
|
|
|
|
|
uris = DFSUtil.getInternalNsRpcUris(conf);
|
|
|
- assertEquals("Incorrect number of URIs returned", 1, uris.size());
|
|
|
- assertTrue("Missing URI for RPC address",
|
|
|
- uris.contains(new URI("hdfs://" + NN2_ADDR)));
|
|
|
+ assertEquals(1, uris.size(), "Incorrect number of URIs returned");
|
|
|
+ assertTrue(uris.contains(new URI("hdfs://" + NN2_ADDR)),
|
|
|
+ "Missing URI for RPC address");
|
|
|
|
|
|
// Make sure that when a service RPC address is used that is distinct from
|
|
|
// the client RPC address, and that client RPC address is also used as the
|
|
@@ -802,20 +802,20 @@ public class TestDFSUtil {
|
|
|
conf.set(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, NN1_ADDR);
|
|
|
|
|
|
uris = DFSUtil.getInternalNsRpcUris(conf);
|
|
|
- assertEquals("Incorrect number of URIs returned", 1, uris.size());
|
|
|
- assertTrue("Missing URI for service ns1",
|
|
|
- uris.contains(new URI("hdfs://" + NN1_ADDR)));
|
|
|
+ assertEquals(1, uris.size(), "Incorrect number of URIs returned");
|
|
|
+ assertTrue(uris.contains(new URI("hdfs://" + NN1_ADDR)),
|
|
|
+ "Missing URI for service ns1");
|
|
|
|
|
|
// Check that when the default FS and service address are given, but
|
|
|
// the RPC address isn't, that only the service address is returned.
|
|
|
conf = new HdfsConfiguration();
|
|
|
conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "hdfs://" + NN1_ADDR);
|
|
|
conf.set(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, NN1_SRVC_ADDR);
|
|
|
-
|
|
|
+
|
|
|
uris = DFSUtil.getInternalNsRpcUris(conf);
|
|
|
- assertEquals("Incorrect number of URIs returned", 1, uris.size());
|
|
|
- assertTrue("Missing URI for service address",
|
|
|
- uris.contains(new URI("hdfs://" + NN1_SRVC_ADDR)));
|
|
|
+ assertEquals(1, uris.size(), "Incorrect number of URIs returned");
|
|
|
+ assertTrue(uris.contains(new URI("hdfs://" + NN1_SRVC_ADDR)),
|
|
|
+ "Missing URI for service address");
|
|
|
}
|
|
|
|
|
|
@Test
|
|
@@ -843,12 +843,13 @@ public class TestDFSUtil {
|
|
|
|
|
|
Collection<URI> uris = DFSUtil.getInternalNsRpcUris(conf);
|
|
|
|
|
|
- assertEquals("Incorrect number of URIs returned", 1, uris.size());
|
|
|
- assertTrue("Missing URI for name service ns1",
|
|
|
- uris.contains(new URI("hdfs://ns1")));
|
|
|
+ assertEquals(1, uris.size(), "Incorrect number of URIs returned");
|
|
|
+ assertTrue(uris.contains(new URI("hdfs://ns1")),
|
|
|
+ "Missing URI for name service ns1");
|
|
|
}
|
|
|
|
|
|
- @Test (timeout=15000)
|
|
|
+ @Test
|
|
|
+ @Timeout(value = 15)
|
|
|
public void testLocalhostReverseLookup() {
|
|
|
// 127.0.0.1 -> localhost reverse resolution does not happen on Windows.
|
|
|
assumeNotWindows();
|
|
@@ -860,67 +861,71 @@ public class TestDFSUtil {
|
|
|
Collection<URI> uris = getInternalNameServiceUris(conf);
|
|
|
assertEquals(1, uris.size());
|
|
|
for (URI uri : uris) {
|
|
|
- assertThat(uri.getHost(), not("127.0.0.1"));
|
|
|
+ assertThat(uri.getHost()).isNotEqualTo("127.0.0.1");
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- @Test (timeout=15000)
|
|
|
+ @Test
|
|
|
+ @Timeout(value = 15)
|
|
|
public void testIsValidName() {
|
|
|
String validPaths[] = new String[]{"/", "/bar/"};
|
|
|
for (String path : validPaths) {
|
|
|
- assertTrue("Should have been accepted '" + path + "'", DFSUtil.isValidName(path));
|
|
|
+ assertTrue(DFSUtil.isValidName(path), "Should have been accepted '" + path + "'");
|
|
|
}
|
|
|
|
|
|
String invalidPaths[] =
|
|
|
new String[]{"/foo/../bar", "/foo/./bar", "/foo//bar", "/foo/:/bar", "/foo:bar"};
|
|
|
for (String path : invalidPaths) {
|
|
|
- assertFalse("Should have been rejected '" + path + "'", DFSUtil.isValidName(path));
|
|
|
+ assertFalse(DFSUtil.isValidName(path), "Should have been rejected '" + path + "'");
|
|
|
}
|
|
|
|
|
|
String windowsPath = "/C:/foo/bar";
|
|
|
if (Shell.WINDOWS) {
|
|
|
- assertTrue("Should have been accepted '" + windowsPath + "' in windows os.",
|
|
|
- DFSUtil.isValidName(windowsPath));
|
|
|
+ assertTrue(DFSUtil.isValidName(windowsPath), "Should have been accepted '" +
|
|
|
+ windowsPath + "' in windows os.");
|
|
|
} else {
|
|
|
- assertFalse("Should have been rejected '" + windowsPath + "' in unix os.",
|
|
|
- DFSUtil.isValidName(windowsPath));
|
|
|
+ assertFalse(DFSUtil.isValidName(windowsPath), "Should have been rejected '" +
|
|
|
+ windowsPath + "' in unix os.");
|
|
|
}
|
|
|
}
|
|
|
-
|
|
|
- @Test(timeout=5000)
|
|
|
+
|
|
|
+ @Test
|
|
|
+ @Timeout(value = 5)
|
|
|
public void testGetSpnegoKeytabKey() {
|
|
|
HdfsConfiguration conf = new HdfsConfiguration();
|
|
|
String defaultKey = "default.spengo.key";
|
|
|
conf.unset(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY);
|
|
|
- assertEquals("Test spnego key in config is null", defaultKey,
|
|
|
- DFSUtil.getSpnegoKeytabKey(conf, defaultKey));
|
|
|
+ assertEquals(defaultKey, DFSUtil.getSpnegoKeytabKey(conf, defaultKey),
|
|
|
+ "Test spnego key in config is null");
|
|
|
|
|
|
conf.set(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY, "");
|
|
|
- assertEquals("Test spnego key is empty", defaultKey,
|
|
|
- DFSUtil.getSpnegoKeytabKey(conf, defaultKey));
|
|
|
+ assertEquals(defaultKey, DFSUtil.getSpnegoKeytabKey(conf, defaultKey),
|
|
|
+ "Test spnego key is empty");
|
|
|
|
|
|
String spengoKey = "spengo.key";
|
|
|
conf.set(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY,
|
|
|
spengoKey);
|
|
|
- assertEquals("Test spnego key is NOT null",
|
|
|
+ assertEquals(
|
|
|
DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY,
|
|
|
- DFSUtil.getSpnegoKeytabKey(conf, defaultKey));
|
|
|
+ DFSUtil.getSpnegoKeytabKey(conf, defaultKey), "Test spnego key is NOT null");
|
|
|
}
|
|
|
|
|
|
- @Test(timeout=10000)
|
|
|
+ @Test
|
|
|
+ @Timeout(value = 10)
|
|
|
public void testDurationToString() throws Exception {
|
|
|
assertEquals("000:00:00:00.000", DFSUtil.durationToString(0));
|
|
|
assertEquals("001:01:01:01.000",
|
|
|
- DFSUtil.durationToString(((24*60*60)+(60*60)+(60)+1)*1000));
|
|
|
+ DFSUtil.durationToString(((24 * 60 * 60) + (60 * 60) + (60) + 1) * 1000));
|
|
|
assertEquals("000:23:59:59.999",
|
|
|
- DFSUtil.durationToString(((23*60*60)+(59*60)+(59))*1000+999));
|
|
|
+ DFSUtil.durationToString(((23 * 60 * 60) + (59 * 60) + (59)) * 1000 + 999));
|
|
|
assertEquals("-001:01:01:01.000",
|
|
|
- DFSUtil.durationToString(-((24*60*60)+(60*60)+(60)+1)*1000));
|
|
|
+ DFSUtil.durationToString(-((24 * 60 * 60) + (60 * 60) + (60) + 1) * 1000));
|
|
|
assertEquals("-000:23:59:59.574",
|
|
|
- DFSUtil.durationToString(-(((23*60*60)+(59*60)+(59))*1000+574)));
|
|
|
+ DFSUtil.durationToString(-(((23 * 60 * 60) + (59 * 60) + (59)) * 1000 + 574)));
|
|
|
}
|
|
|
|
|
|
- @Test(timeout=5000)
|
|
|
+ @Test
|
|
|
+ @Timeout(value = 5)
|
|
|
public void testRelativeTimeConversion() throws Exception {
|
|
|
try {
|
|
|
DFSUtil.parseRelativeTime("1");
|
|
@@ -937,14 +942,14 @@ public class TestDFSUtil {
|
|
|
} catch (IOException e) {
|
|
|
assertExceptionContains("is not a number", e);
|
|
|
}
|
|
|
- assertEquals(61*1000, DFSUtil.parseRelativeTime("61s"));
|
|
|
- assertEquals(61*60*1000, DFSUtil.parseRelativeTime("61m"));
|
|
|
+ assertEquals(61 * 1000, DFSUtil.parseRelativeTime("61s"));
|
|
|
+ assertEquals(61 * 60 * 1000, DFSUtil.parseRelativeTime("61m"));
|
|
|
assertEquals(0, DFSUtil.parseRelativeTime("0s"));
|
|
|
- assertEquals(25*60*60*1000, DFSUtil.parseRelativeTime("25h"));
|
|
|
- assertEquals(4*24*60*60*1000l, DFSUtil.parseRelativeTime("4d"));
|
|
|
- assertEquals(999*24*60*60*1000l, DFSUtil.parseRelativeTime("999d"));
|
|
|
+ assertEquals(25 * 60 * 60 * 1000, DFSUtil.parseRelativeTime("25h"));
|
|
|
+ assertEquals(4 * 24 * 60 * 60 * 1000L, DFSUtil.parseRelativeTime("4d"));
|
|
|
+ assertEquals(999 * 24 * 60 * 60 * 1000L, DFSUtil.parseRelativeTime("999d"));
|
|
|
}
|
|
|
-
|
|
|
+
|
|
|
@Test
|
|
|
public void testAssertAllResultsEqual() {
|
|
|
checkAllResults(new Long[]{}, true);
|
|
@@ -953,7 +958,7 @@ public class TestDFSUtil {
|
|
|
checkAllResults(new Long[]{1l, 1l, 1l}, true);
|
|
|
checkAllResults(new Long[]{new Long(1), new Long(1)}, true);
|
|
|
checkAllResults(new Long[]{null, null, null}, true);
|
|
|
-
|
|
|
+
|
|
|
checkAllResults(new Long[]{1l, 2l}, false);
|
|
|
checkAllResults(new Long[]{2l, 1l}, false);
|
|
|
checkAllResults(new Long[]{1l, 2l, 1l}, false);
|
|
@@ -963,7 +968,7 @@ public class TestDFSUtil {
|
|
|
checkAllResults(new Long[]{null, 1l}, false);
|
|
|
checkAllResults(new Long[]{1l, null, 1l}, false);
|
|
|
}
|
|
|
-
|
|
|
+
|
|
|
private static void checkAllResults(Long[] toCheck, boolean shouldSucceed) {
|
|
|
if (shouldSucceed) {
|
|
|
DFSUtil.assertAllResultsEqual(Arrays.asList(toCheck));
|
|
@@ -985,7 +990,7 @@ public class TestDFSUtil {
|
|
|
Configuration conf = new Configuration();
|
|
|
final Path jksPath = new Path(testDir.toString(), "test.jks");
|
|
|
final String ourUrl =
|
|
|
- JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri();
|
|
|
+ JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri();
|
|
|
|
|
|
File file = new File(testDir, "test.jks");
|
|
|
file.delete();
|
|
@@ -1031,15 +1036,15 @@ public class TestDFSUtil {
|
|
|
DFS_SERVER_HTTPS_TRUSTSTORE_PASSWORD_KEY).getCredential());
|
|
|
|
|
|
// use WebAppUtils as would be used by loadSslConfiguration
|
|
|
- Assert.assertEquals("keypass",
|
|
|
+ assertEquals("keypass",
|
|
|
DFSUtil.getPassword(conf, DFS_SERVER_HTTPS_KEYPASSWORD_KEY));
|
|
|
- Assert.assertEquals("storepass",
|
|
|
+ assertEquals("storepass",
|
|
|
DFSUtil.getPassword(conf, DFS_SERVER_HTTPS_KEYSTORE_PASSWORD_KEY));
|
|
|
- Assert.assertEquals("trustpass",
|
|
|
+ assertEquals("trustpass",
|
|
|
DFSUtil.getPassword(conf, DFS_SERVER_HTTPS_TRUSTSTORE_PASSWORD_KEY));
|
|
|
|
|
|
// let's make sure that a password that doesn't exist returns null
|
|
|
- Assert.assertEquals(null, DFSUtil.getPassword(conf,"invalid-alias"));
|
|
|
+ assertEquals(null, DFSUtil.getPassword(conf, "invalid-alias"));
|
|
|
}
|
|
|
|
|
|
@Test
|
|
@@ -1051,9 +1056,9 @@ public class TestDFSUtil {
|
|
|
final String NN1_ADDRESS = "localhost:9000";
|
|
|
final String NN2_ADDRESS = "localhost:9001";
|
|
|
conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY, "nn1"),
|
|
|
- NN1_ADDRESS);
|
|
|
+ NN1_ADDRESS);
|
|
|
conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY, "nn2"),
|
|
|
- NN2_ADDRESS);
|
|
|
+ NN2_ADDRESS);
|
|
|
|
|
|
{
|
|
|
Collection<String> internal = DFSUtil.getInternalNameServices(conf);
|
|
@@ -1064,7 +1069,7 @@ public class TestDFSUtil {
|
|
|
}
|
|
|
|
|
|
Map<String, Map<String, InetSocketAddress>> nnMap = DFSUtil
|
|
|
- .getNNServiceRpcAddressesForCluster(conf);
|
|
|
+ .getNNServiceRpcAddressesForCluster(conf);
|
|
|
assertEquals(1, nnMap.size());
|
|
|
assertTrue(nnMap.containsKey("nn1"));
|
|
|
|
|
@@ -1080,20 +1085,21 @@ public class TestDFSUtil {
|
|
|
public void testEncryptionProbe() throws Throwable {
|
|
|
Configuration conf = new Configuration(false);
|
|
|
conf.unset(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH);
|
|
|
- assertFalse("encryption enabled on no provider key",
|
|
|
- DFSUtilClient.isHDFSEncryptionEnabled(conf));
|
|
|
+ assertFalse(
|
|
|
+ DFSUtilClient.isHDFSEncryptionEnabled(conf),
|
|
|
+ "encryption enabled on no provider key");
|
|
|
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH,
|
|
|
"");
|
|
|
- assertFalse("encryption enabled on empty provider key",
|
|
|
- DFSUtilClient.isHDFSEncryptionEnabled(conf));
|
|
|
+ assertFalse(DFSUtilClient.isHDFSEncryptionEnabled(conf),
|
|
|
+ "encryption enabled on empty provider key");
|
|
|
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH,
|
|
|
"\n\t\n");
|
|
|
- assertFalse("encryption enabled on whitespace provider key",
|
|
|
- DFSUtilClient.isHDFSEncryptionEnabled(conf));
|
|
|
+ assertFalse(DFSUtilClient.isHDFSEncryptionEnabled(conf),
|
|
|
+ "encryption enabled on whitespace provider key");
|
|
|
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH,
|
|
|
"http://hadoop.apache.org");
|
|
|
- assertTrue("encryption disabled on valid provider key",
|
|
|
- DFSUtilClient.isHDFSEncryptionEnabled(conf));
|
|
|
+ assertTrue(DFSUtilClient.isHDFSEncryptionEnabled(conf),
|
|
|
+ "encryption disabled on valid provider key");
|
|
|
|
|
|
}
|
|
|
|
|
@@ -1101,8 +1107,8 @@ public class TestDFSUtil {
|
|
|
public void testFileIdPath() throws Throwable {
|
|
|
// /.reserved/.inodes/
|
|
|
String prefix = Path.SEPARATOR + HdfsConstants.DOT_RESERVED_STRING +
|
|
|
- Path.SEPARATOR + HdfsConstants.DOT_INODES_STRING +
|
|
|
- Path.SEPARATOR;
|
|
|
+ Path.SEPARATOR + HdfsConstants.DOT_INODES_STRING +
|
|
|
+ Path.SEPARATOR;
|
|
|
Random r = new Random();
|
|
|
for (int i = 0; i < 100; ++i) {
|
|
|
long inode = r.nextLong() & Long.MAX_VALUE;
|
|
@@ -1116,12 +1122,12 @@ public class TestDFSUtil {
|
|
|
Configuration conf = new HdfsConfiguration();
|
|
|
conf.set(DFSConfigKeys.DFS_NAMESERVICES, "ns1, ns2");
|
|
|
String expectedErrorMessage = "Incorrect configuration: namenode address "
|
|
|
- + DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY + ".[ns1, ns2]"
|
|
|
- + " or "
|
|
|
- + DFS_NAMENODE_RPC_ADDRESS_KEY + ".[ns1, ns2]"
|
|
|
- + " is not configured.";
|
|
|
+ + DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY + ".[ns1, ns2]"
|
|
|
+ + " or "
|
|
|
+ + DFS_NAMENODE_RPC_ADDRESS_KEY + ".[ns1, ns2]"
|
|
|
+ + " is not configured.";
|
|
|
LambdaTestUtils.intercept(IOException.class, expectedErrorMessage,
|
|
|
- ()->DFSUtil.getNNServiceRpcAddressesForCluster(conf));
|
|
|
+ () -> DFSUtil.getNNServiceRpcAddressesForCluster(conf));
|
|
|
}
|
|
|
|
|
|
@Test
|
|
@@ -1198,7 +1204,7 @@ public class TestDFSUtil {
|
|
|
if (isLazy) {
|
|
|
// Lazy resolved. There is no need to change host->ip in advance.
|
|
|
assertTrue(inetSocketAddress.isUnresolved());
|
|
|
- }else {
|
|
|
+ } else {
|
|
|
// Need resolve all host->ip.
|
|
|
assertFalse(inetSocketAddress.isUnresolved());
|
|
|
}
|