Преглед изворни кода

HADOOP-12432 Add support for include/exclude lists on IPv6 setup

Signed-off-by: Elliott Clark <eclark@apache.org>
Nemanja Matkovic пре 9 година
родитељ
комит
d89cecf971

+ 6 - 7
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostFileManager.java

@@ -25,17 +25,18 @@ import com.google.common.collect.HashMultimap;
 import com.google.common.collect.Iterators;
 import com.google.common.collect.Multimap;
 import com.google.common.collect.UnmodifiableIterator;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
+import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.util.HostsFileReader;
 
 import javax.annotation.Nullable;
+
 import java.io.IOException;
 import java.net.InetAddress;
 import java.net.InetSocketAddress;
-import java.net.URI;
-import java.net.URISyntaxException;
 import java.util.Collection;
 import java.util.HashSet;
 import java.util.Iterator;
@@ -83,16 +84,14 @@ class HostFileManager {
   @VisibleForTesting
   static InetSocketAddress parseEntry(String type, String fn, String line) {
     try {
-      URI uri = new URI("dummy", line, null, null, null);
-      int port = uri.getPort() == -1 ? 0 : uri.getPort();
-      InetSocketAddress addr = new InetSocketAddress(uri.getHost(), port);
+      InetSocketAddress addr = NetUtils.createSocketAddr(line, 0);
       if (addr.isUnresolved()) {
         LOG.warn(String.format("Failed to resolve address `%s` in `%s`. " +
                 "Ignoring in the %s list.", line, fn, type));
         return null;
       }
       return addr;
-    } catch (URISyntaxException e) {
+    } catch (IllegalArgumentException e) {
       LOG.warn(String.format("Failed to parse `%s` in `%s`. " + "Ignoring in " +
               "the %s list.", line, fn, type));
     }
@@ -227,7 +226,7 @@ class HostFileManager {
         @Override
         public String apply(@Nullable InetSocketAddress addr) {
           assert addr != null;
-          return addr.getAddress().getHostAddress() + ":" + addr.getPort();
+          return NetUtils.getSocketAddressString(addr);
         }
       }));
       return sb.append(")").toString();

+ 18 - 7
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHostFileManager.java

@@ -111,13 +111,19 @@ public class TestHostFileManager {
     includedNodes.add(entry("127.0.0.1:12345"));
     includedNodes.add(entry("localhost:12345"));
     includedNodes.add(entry("127.0.0.1:12345"));
+
+    includedNodes.add(entry("[::1]:42"));
+    includedNodes.add(entry("[0:0:0:0:0:0:0:1]:42"));
+    includedNodes.add(entry("[::1]:42"));
+
     includedNodes.add(entry("127.0.0.2"));
 
     excludedNodes.add(entry("127.0.0.1:12346"));
     excludedNodes.add(entry("127.0.30.1:12346"));
+    excludedNodes.add(entry("[::1]:24"));
 
-    Assert.assertEquals(2, includedNodes.size());
-    Assert.assertEquals(2, excludedNodes.size());
+    Assert.assertEquals(3, includedNodes.size());
+    Assert.assertEquals(3, excludedNodes.size());
 
     hm.refresh(includedNodes, excludedNodes);
 
@@ -126,20 +132,25 @@ public class TestHostFileManager {
     Map<String, DatanodeDescriptor> dnMap = (Map<String,
             DatanodeDescriptor>) Whitebox.getInternalState(dm, "datanodeMap");
 
-    // After the de-duplication, there should be only one DN from the included
+    // After the de-duplication, there should be three DN from the included
     // nodes declared as dead.
-    Assert.assertEquals(2, dm.getDatanodeListForReport(HdfsConstants
+    Assert.assertEquals(3, dm.getDatanodeListForReport(HdfsConstants
             .DatanodeReportType.ALL).size());
-    Assert.assertEquals(2, dm.getDatanodeListForReport(HdfsConstants
+    Assert.assertEquals(3, dm.getDatanodeListForReport(HdfsConstants
             .DatanodeReportType.DEAD).size());
     dnMap.put("uuid-foo", new DatanodeDescriptor(new DatanodeID("127.0.0.1",
             "localhost", "uuid-foo", 12345, 1020, 1021, 1022)));
-    Assert.assertEquals(1, dm.getDatanodeListForReport(HdfsConstants
+    Assert.assertEquals(2, dm.getDatanodeListForReport(HdfsConstants
             .DatanodeReportType.DEAD).size());
     dnMap.put("uuid-bar", new DatanodeDescriptor(new DatanodeID("127.0.0.2",
             "127.0.0.2", "uuid-bar", 12345, 1020, 1021, 1022)));
-    Assert.assertEquals(0, dm.getDatanodeListForReport(HdfsConstants
+    Assert.assertEquals(1, dm.getDatanodeListForReport(HdfsConstants
             .DatanodeReportType.DEAD).size());
+    dnMap.put("uuid-baz", new DatanodeDescriptor(new DatanodeID("[::1]",
+        "localhost", "uuid-baz", 42, 1020, 1021, 1022)));
+    Assert.assertEquals(0, dm.getDatanodeListForReport(HdfsConstants
+        .DatanodeReportType.DEAD).size());
+
     DatanodeDescriptor spam = new DatanodeDescriptor(new DatanodeID("127.0.0" +
             ".3", "127.0.0.3", "uuid-spam", 12345, 1020, 1021, 1022));
     DFSTestUtil.setDatanodeDead(spam);

+ 5 - 5
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHostsFiles.java

@@ -112,8 +112,7 @@ public class TestHostsFiles {
       BlockLocation locs[] = fs.getFileBlockLocations(
           fs.getFileStatus(filePath), 0, Long.MAX_VALUE);
       String name = locs[0].getNames()[0];
-      String names = name + "\n" + "localhost:42\n";
-      LOG.info("adding '" + names + "' to exclude file " + excludeFile.toUri().getPath());
+      LOG.info("adding '" + name + "' to exclude file " + excludeFile.toUri().getPath());
       DFSTestUtil.writeFile(localFileSys, excludeFile, name);
       ns.getBlockManager().getDatanodeManager().refreshNodes(conf);
       DFSTestUtil.waitForDecommission(fs, name);
@@ -150,7 +149,8 @@ public class TestHostsFiles {
     assertTrue(localFileSys.mkdirs(dir));
     StringBuilder includeHosts = new StringBuilder();
     includeHosts.append("localhost:52").append("\n").append("127.0.0.1:7777")
-        .append("\n");
+        .append("\n").append("[::1]:42").append("\n")
+        .append("[0:0:0:0:0:0:0:1]:24").append("\n");
     DFSTestUtil.writeFile(localFileSys, excludeFile, "");
     DFSTestUtil.writeFile(localFileSys, includeFile, includeHosts.toString());
     conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());
@@ -160,7 +160,7 @@ public class TestHostsFiles {
     try {
       cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
       final FSNamesystem ns = cluster.getNameNode().getNamesystem();
-      assertTrue(ns.getNumDeadDataNodes() == 2);
+      assertTrue(ns.getNumDeadDataNodes() == 4);
       assertTrue(ns.getNumLiveDataNodes() == 0);
 
       // Testing using MBeans
@@ -168,7 +168,7 @@ public class TestHostsFiles {
       ObjectName mxbeanName = new ObjectName(
           "Hadoop:service=NameNode,name=FSNamesystemState");
       String nodes = mbs.getAttribute(mxbeanName, "NumDeadDataNodes") + "";
-      assertTrue((Integer) mbs.getAttribute(mxbeanName, "NumDeadDataNodes") == 2);
+      assertTrue((Integer) mbs.getAttribute(mxbeanName, "NumDeadDataNodes") == 4);
       assertTrue((Integer) mbs.getAttribute(mxbeanName, "NumLiveDataNodes") == 0);
     } finally {
       if (cluster != null) {