浏览代码

HDFS-14938. Add check if excludedNodes contain scope in DFSNetworkTopology#chooseRandomWithStorageType(). Contributed by Lisheng Sun.

Ayush Saxena 5 年之前
父节点
当前提交
b643a1cbe8

+ 7 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DFSNetworkTopology.java

@@ -194,7 +194,13 @@ public class DFSNetworkTopology extends NetworkTopology {
     }
     if (!(node instanceof DFSTopologyNodeImpl)) {
       // a node is either DFSTopologyNodeImpl, or a DatanodeDescriptor
-      return ((DatanodeDescriptor)node).hasStorageType(type) ? node : null;
+      // if a node is DatanodeDescriptor and excludedNodes contains it,
+      // return null;
+      if (excludedNodes != null && excludedNodes.contains(node)) {
+        LOG.debug("{} in excludedNodes", node);
+        return null;
+      }
+      return ((DatanodeDescriptor) node).hasStorageType(type) ? node : null;
     }
     DFSTopologyNodeImpl root = (DFSTopologyNodeImpl)node;
     Node excludeRoot = excludedScope == null ? null : getNode(excludedScope);

+ 31 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/net/TestDFSNetworkTopology.java

@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs.net;
 
 import com.google.common.collect.Sets;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -600,4 +601,34 @@ public class TestDFSNetworkTopology {
         "/default/rack1", excluded, StorageType.DISK);
     assertNull("No node should have been selected.", n);
   }
+
+  /**
+   * Tests it should getting no node, if a node from scope is DatanodeDescriptor
+   * and excludedNodes contain it in
+   * DFSNetworkTopology#chooseRandomWithStorageType().
+   */
+  @Test
+  public void testChooseRandomWithStorageTypeScopeEqualsExcludedNodes() {
+    DFSNetworkTopology dfsCluster =
+        DFSNetworkTopology.getInstance(new Configuration());
+    final String[] racks = {"/default/rack1", "/default/rack2"};
+    final String[] hosts = {"host1", "host2"};
+    final StorageType[] types = {StorageType.DISK, StorageType.DISK};
+    DatanodeStorageInfo[] storages = new DatanodeStorageInfo[2];
+    for (int i = 0; i < 2; i++) {
+      final String storageID = "s" + i;
+      final String ip = i + "." + i + "." + i + "." + i;
+      storages[i] = DFSTestUtil.createDatanodeStorageInfo(storageID, ip,
+          racks[i], hosts[i], types[i], null);
+    }
+    DatanodeDescriptor[] dns = DFSTestUtil.toDatanodeDescriptor(storages);
+    dfsCluster.add(dns[0]);
+    dfsCluster.add(dns[1]);
+    HashSet<Node> excluded = new HashSet<>();
+    excluded.add(dns[0]);
+    Node n = dfsCluster.chooseRandomWithStorageType(
+        "/default/rack1/0.0.0.0:" + DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT,
+        null, excluded, StorageType.DISK);
+    assertNull("No node should have been selected.", n);
+  }
 }