Prechádzať zdrojové kódy

HDFS-17655. Cannot run HDFS balancer with BlockPlacementPolicyWithNodeGroup (#7141). Contributed by YUBI LEE.

Signed-off-by: He Xiaoqiao <hexiaoqiao@apache.org>
Yubi Lee 4 mesiacov pred
rodič
commit
d5b836bc34

+ 2 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java

@@ -43,6 +43,7 @@ import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.metrics2.source.JvmMetrics;
 import org.apache.hadoop.classification.VisibleForTesting;
 import org.apache.hadoop.hdfs.DFSUtilClient;
+import org.apache.hadoop.net.NetworkTopology;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.HadoopIllegalArgumentException;
@@ -255,7 +256,7 @@ public class Balancer {
   private static void checkReplicationPolicyCompatibility(Configuration conf
       ) throws UnsupportedActionException {
     BlockPlacementPolicies placementPolicies =
-        new BlockPlacementPolicies(conf, null, null, null);
+        new BlockPlacementPolicies(conf, null, NetworkTopology.getInstance(conf), null);
     if (!(placementPolicies.getPolicy(CONTIGUOUS) instanceof
         BlockPlacementPolicyDefault)) {
       throw new UnsupportedActionException(

+ 19 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java

@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs.server.balancer;
 
 import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_KEY;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.NET_TOPOLOGY_IMPL_KEY;
 import static org.apache.hadoop.fs.StorageType.DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY;
@@ -40,6 +41,8 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBER
 
 import java.lang.reflect.Field;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyWithNodeGroup;
+import org.apache.hadoop.net.NetworkTopologyWithNodeGroup;
 import org.junit.AfterClass;
 
 import static org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset.CONFIG_PROPERTY_NONDFSUSED;
@@ -1645,6 +1648,22 @@ public class TestBalancer {
         CAPACITY, RACK2, new PortNumberBasedNodes(3, 0, 1), true, true);
   }
 
+  /**
+   * Test a cluster with BlockPlacementPolicyWithNodeGroup
+   */
+  @Test(timeout=100000)
+  public void testBalancerCliWithBlockPlacementPolicyWithNodeGroup() throws Exception {
+    Configuration conf = new HdfsConfiguration();
+    initConf(conf);
+    conf.setBoolean(DFSConfigKeys.DFS_USE_DFS_NETWORK_TOPOLOGY_KEY, false);
+    conf.set(NET_TOPOLOGY_IMPL_KEY, NetworkTopologyWithNodeGroup.class.getName());
+    conf.set(DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY,
+        BlockPlacementPolicyWithNodeGroup.class.getName());
+    String rackWithNodeGroup = "/rack0/nodegroup0";
+    doTest(conf, new long[] {CAPACITY}, new String[] {rackWithNodeGroup}, CAPACITY / 2,
+        rackWithNodeGroup, true);
+  }
+
   /**
    * Check that the balancer exits when there is an unfinalized upgrade.
    */