Browse Source

HDFS-15659. MiniDFSCluster dfs.namenode.redundancy.considerLoad default to false (#2443). Contributed by Ahmed Hussein.

Jim Brennan 3 years ago
parent
commit
7d231f4b98

+ 22 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java

@@ -22,6 +22,7 @@ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.NET_TOPOLOGY_NO
 import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_DEFAULT;
 import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_DEFAULT;
 import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_KEY;
 import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SCANNER_VOLUME_JOIN_TIMEOUT_MSEC_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SCANNER_VOLUME_JOIN_TIMEOUT_MSEC_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY;
 import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_DATA_TRANSFER_PROTECTION_KEY;
 import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_DATA_TRANSFER_PROTECTION_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCKREPORT_INITIAL_DELAY_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCKREPORT_INITIAL_DELAY_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY;
@@ -168,6 +169,10 @@ public class MiniDFSCluster implements AutoCloseable {
   // Changing this default may break some tests that assume it is 2.
   // Changing this default may break some tests that assume it is 2.
   private static final int DEFAULT_STORAGES_PER_DATANODE = 2;
   private static final int DEFAULT_STORAGES_PER_DATANODE = 2;
 
 
+  // do not consider load factor when selecting a datanode.
+  private static final boolean DEFAULT_DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD =
+      false;
+
   static { DefaultMetricsSystem.setMiniClusterMode(true); }
   static { DefaultMetricsSystem.setMiniClusterMode(true); }
 
 
   public int getStoragesPerDatanode() {
   public int getStoragesPerDatanode() {
@@ -477,6 +482,19 @@ public class MiniDFSCluster implements AutoCloseable {
       return this;
       return this;
     }
     }
 
 
+    /**
+     * set the value of DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY in the config
+     * file.
+     *
+     * @param val passed to the flag. This allows overriding the default value
+     *            {@link #DEFAULT_DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD}.
+     * @return the builder object.
+     */
+    public Builder setNNRedundancyConsiderLoad(final boolean val) {
+      conf.setBoolean(DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, val);
+      return this;
+    }
+
     /**
     /**
      * Construct the actual MiniDFSCluster
      * Construct the actual MiniDFSCluster
      */
      */
@@ -493,6 +511,10 @@ public class MiniDFSCluster implements AutoCloseable {
               DEFAULT_SCANNER_VOLUME_JOIN_TIMEOUT_MSEC);
               DEFAULT_SCANNER_VOLUME_JOIN_TIMEOUT_MSEC);
       conf.setLong(DFS_BLOCK_SCANNER_VOLUME_JOIN_TIMEOUT_MSEC_KEY,
       conf.setLong(DFS_BLOCK_SCANNER_VOLUME_JOIN_TIMEOUT_MSEC_KEY,
           defaultScannerVolumeTimeOut);
           defaultScannerVolumeTimeOut);
+      // default is false. do not consider load factor when selecting a
+      // datanode.
+      conf.setBoolean(DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY,
+          DEFAULT_DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD);
       this.storagesPerDatanode =
       this.storagesPerDatanode =
           FsDatasetTestUtils.Factory.getFactory(conf).getDefaultNumOfDataDirs();
           FsDatasetTestUtils.Factory.getFactory(conf).getDefaultNumOfDataDirs();
     }
     }

+ 2 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java

@@ -328,9 +328,8 @@ public class TestEncryptedTransfer {
     // use 4 datanodes to make sure that after 1 data node is stopped,
     // use 4 datanodes to make sure that after 1 data node is stopped,
     // client only retries establishing pipeline with the 4th node.
     // client only retries establishing pipeline with the 4th node.
     int numDataNodes = 4;
     int numDataNodes = 4;
-    // do not consider load factor when selecting a data node
-    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY,
-        false);
+    // do not consider load factor when selecting a data node.
+    // It is already set to false in the miniCluster
     setEncryptionConfigKeys();
     setEncryptionConfigKeys();
 
 
     cluster = new MiniDFSCluster.Builder(conf)
     cluster = new MiniDFSCluster.Builder(conf)

+ 0 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java

@@ -243,7 +243,6 @@ public class TestReplication {
    */
    */
   public void runReplication(boolean simulated) throws IOException {
   public void runReplication(boolean simulated) throws IOException {
     Configuration conf = new HdfsConfiguration();
     Configuration conf = new HdfsConfiguration();
-    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, false);
     if (simulated) {
     if (simulated) {
       SimulatedFSDataset.setFactory(conf);
       SimulatedFSDataset.setFactory(conf);
     }
     }