Browse Source

HDFS-6798. Add test case for incorrect data node condition during balancing. (Contributed by Benoy Antony)

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1615044 13f79535-47bb-0310-9956-ffa450edef68
Arpit Agarwal 10 years ago
parent
commit
397c882362

+ 3 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -337,6 +337,9 @@ Release 2.6.0 - UNRELEASED
 
     HDFS-6685. Balancer should preserve storage type of replicas.  (szetszwo)
 
+    HDFS-6798. Add test case for incorrect data node condition during
+    balancing. (Benoy Antony via Arpit Agarwal)
+
   OPTIMIZATIONS
 
     HDFS-6690. Deduplicate xattr names in memory. (wang)

+ 65 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java

@@ -658,6 +658,71 @@ public class TestBalancer {
     oneNodeTest(conf, false);
   }
   
+  /* we first start a cluster and fill the cluster up to a certain size.
+   * then redistribute blocks according the required distribution.
+   * Then we start an empty datanode.
+   * Afterwards a balancer is run to balance the cluster.
+   * A partially filled datanode is excluded during balancing.
+   * This triggers a situation where one of the block's location is unknown.
+   */
+  @Test(timeout=100000)
+  public void testUnknownDatanode() throws Exception {
+    Configuration conf = new HdfsConfiguration();
+    initConf(conf);
+    long distribution[] = new long[] {50*CAPACITY/100, 70*CAPACITY/100, 0*CAPACITY/100};
+    long capacities[] = new long[]{CAPACITY, CAPACITY, CAPACITY};
+    String racks[] = new String[] {RACK0, RACK1, RACK1};
+
+    int numDatanodes = distribution.length;
+    if (capacities.length != numDatanodes || racks.length != numDatanodes) {
+      throw new IllegalArgumentException("Array length is not the same");
+    }
+
+    // calculate total space that need to be filled
+    final long totalUsedSpace = sum(distribution);
+
+    // fill the cluster
+    ExtendedBlock[] blocks = generateBlocks(conf, totalUsedSpace,
+        (short) numDatanodes);
+
+    // redistribute blocks
+    Block[][] blocksDN = distributeBlocks(
+        blocks, (short)(numDatanodes-1), distribution);
+
+    // restart the cluster: do NOT format the cluster
+    conf.set(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, "0.0f");
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes)
+        .format(false)
+        .racks(racks)
+        .simulatedCapacities(capacities)
+        .build();
+    try {
+      cluster.waitActive();
+      client = NameNodeProxies.createProxy(conf, cluster.getFileSystem(0).getUri(),
+          ClientProtocol.class).getProxy();
+
+      for(int i = 0; i < 3; i++) {
+        cluster.injectBlocks(i, Arrays.asList(blocksDN[i]), null);
+      }
+
+      cluster.startDataNodes(conf, 1, true, null,
+          new String[]{RACK0}, null,new long[]{CAPACITY});
+      cluster.triggerHeartbeats();
+
+      Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
+      Set<String>  datanodes = new HashSet<String>();
+      datanodes.add(cluster.getDataNodes().get(0).getDatanodeId().getHostName());
+      Balancer.Parameters p = new Balancer.Parameters(
+          Balancer.Parameters.DEFAULT.policy,
+          Balancer.Parameters.DEFAULT.threshold,
+          datanodes, Balancer.Parameters.DEFAULT.nodesToBeIncluded);
+      final int r = Balancer.run(namenodes, p, conf);
+      assertEquals(Balancer.ReturnStatus.SUCCESS.code, r);
+    } finally {
+      cluster.shutdown();
+    }
+  }
+
   /**
    * Test parse method in Balancer#Cli class with threshold value out of
    * boundaries.