浏览代码

HDFS-12146. [SPS]: Fix TestStoragePolicySatisfierWithStripedFile#testSPSWhenFileHasLowRedundancyBlocks. Contributed by Surendra Singh Lilhore.

Rakesh Radhakrishnan 8 年之前
父节点
当前提交
9e82e5a86e

+ 5 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java

@@ -1025,12 +1025,13 @@ public class TestStoragePolicySatisfier {
       list.add(cluster.stopDataNode(0));
       list.add(cluster.stopDataNode(0));
       list.add(cluster.stopDataNode(0));
       list.add(cluster.stopDataNode(0));
       cluster.restartNameNodes();
       cluster.restartNameNodes();
-      cluster.restartDataNode(list.get(0), true);
-      cluster.restartDataNode(list.get(1), true);
+      cluster.restartDataNode(list.get(0), false);
+      cluster.restartDataNode(list.get(1), false);
       cluster.waitActive();
       cluster.waitActive();
       fs.satisfyStoragePolicy(filePath);
       fs.satisfyStoragePolicy(filePath);
-      Thread.sleep(3000 * 6);
-      cluster.restartDataNode(list.get(2), true);
+      DFSTestUtil.waitExpectedStorageType(filePath.toString(),
+          StorageType.ARCHIVE, 2, 30000, cluster.getFileSystem());
+      cluster.restartDataNode(list.get(2), false);
       DFSTestUtil.waitExpectedStorageType(filePath.toString(),
       DFSTestUtil.waitExpectedStorageType(filePath.toString(),
           StorageType.ARCHIVE, 3, 30000, cluster.getFileSystem());
           StorageType.ARCHIVE, 3, 30000, cluster.getFileSystem());
     } finally {
     } finally {

+ 8 - 8
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithStripedFile.java

@@ -308,8 +308,8 @@ public class TestStoragePolicySatisfierWithStripedFile {
    */
    */
   @Test(timeout = 300000)
   @Test(timeout = 300000)
   public void testSPSWhenFileHasLowRedundancyBlocks() throws Exception {
   public void testSPSWhenFileHasLowRedundancyBlocks() throws Exception {
-    // start 10 datanodes
-    int numOfDatanodes = 10;
+    // start 9 datanodes
+    int numOfDatanodes = 9;
     int storagesPerDatanode = 2;
     int storagesPerDatanode = 2;
     long capacity = 20 * defaultStripeBlockSize;
     long capacity = 20 * defaultStripeBlockSize;
     long[][] capacities = new long[numOfDatanodes][storagesPerDatanode];
     long[][] capacities = new long[numOfDatanodes][storagesPerDatanode];
@@ -338,7 +338,6 @@ public class TestStoragePolicySatisfierWithStripedFile {
             {StorageType.DISK, StorageType.ARCHIVE},
             {StorageType.DISK, StorageType.ARCHIVE},
             {StorageType.DISK, StorageType.ARCHIVE},
             {StorageType.DISK, StorageType.ARCHIVE},
             {StorageType.DISK, StorageType.ARCHIVE},
             {StorageType.DISK, StorageType.ARCHIVE},
-            {StorageType.DISK, StorageType.ARCHIVE},
             {StorageType.DISK, StorageType.ARCHIVE}})
             {StorageType.DISK, StorageType.ARCHIVE}})
         .storageCapacities(capacities)
         .storageCapacities(capacities)
         .build();
         .build();
@@ -366,15 +365,16 @@ public class TestStoragePolicySatisfierWithStripedFile {
       }
       }
       cluster.restartNameNodes();
       cluster.restartNameNodes();
       // Restart half datanodes
       // Restart half datanodes
-      for (int i = 0; i < numOfDatanodes / 2; i++) {
-        cluster.restartDataNode(list.get(i), true);
+      for (int i = 0; i < 5; i++) {
+        cluster.restartDataNode(list.get(i), false);
       }
       }
       cluster.waitActive();
       cluster.waitActive();
       fs.satisfyStoragePolicy(fooFile);
       fs.satisfyStoragePolicy(fooFile);
-      Thread.sleep(3000 * 6);
+      DFSTestUtil.waitExpectedStorageType(fooFile.toString(),
+          StorageType.ARCHIVE, 5, 30000, cluster.getFileSystem());
       //Start reaming datanodes
       //Start reaming datanodes
-      for (int i = numOfDatanodes - 1; i > numOfDatanodes / 2; i--) {
-        cluster.restartDataNode(list.get(i), true);
+      for (int i = numOfDatanodes - 1; i >= 5; i--) {
+        cluster.restartDataNode(list.get(i), false);
       }
       }
       // verify storage types and locations.
       // verify storage types and locations.
       waitExpectedStorageType(cluster, fooFile.toString(), fileLen,
       waitExpectedStorageType(cluster, fooFile.toString(), fileLen,