浏览代码

HDFS-13251. Avoid using hard coded datanode data dirs in unit tests. Contributed by Ajay Kumar.

(cherry picked from commit f83716b7f2e5b63e4c2302c374982755233d4dd6)
Xiaoyu Yao 7 年之前
父节点
当前提交
d09975a453

+ 3 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockStatsMXBean.java

@@ -179,10 +179,9 @@ public class TestBlockStatsMXBean {
 
 
     storageTypeStats = storageTypeStatsMap.get(StorageType.ARCHIVE);
     storageTypeStats = storageTypeStatsMap.get(StorageType.ARCHIVE);
     assertEquals(3, storageTypeStats.getNodesInService());
     assertEquals(3, storageTypeStats.getNodesInService());
-    String dataDir = cluster.getDataDirectory();
-    File dn1ArcVol1 = new File(dataDir, "data" + (3 * 0 + 2));
-    File dn2ArcVol1 = new File(dataDir, "data" + (3 * 1 + 2));
-    File dn3ArcVol1 = new File(dataDir, "data" + (3 * 2 + 2));
+    File dn1ArcVol1 = cluster.getInstanceStorageDir(0, 1);
+    File dn2ArcVol1 = cluster.getInstanceStorageDir(1, 1);
+    File dn3ArcVol1 = cluster.getInstanceStorageDir(2, 1);
     DataNodeTestUtils.injectDataDirFailure(dn1ArcVol1);
     DataNodeTestUtils.injectDataDirFailure(dn1ArcVol1);
     DataNodeTestUtils.injectDataDirFailure(dn2ArcVol1);
     DataNodeTestUtils.injectDataDirFailure(dn2ArcVol1);
     DataNodeTestUtils.injectDataDirFailure(dn3ArcVol1);
     DataNodeTestUtils.injectDataDirFailure(dn3ArcVol1);

+ 4 - 5
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java

@@ -305,7 +305,6 @@ public class TestDataNodeHotSwapVolumes {
 
 
   private void addVolumes(int numNewVolumes, CountDownLatch waitLatch)
   private void addVolumes(int numNewVolumes, CountDownLatch waitLatch)
       throws ReconfigurationException, IOException, InterruptedException {
       throws ReconfigurationException, IOException, InterruptedException {
-    File dataDir = new File(cluster.getDataDirectory());
     DataNode dn = cluster.getDataNodes().get(0);  // First DataNode.
     DataNode dn = cluster.getDataNodes().get(0);  // First DataNode.
     Configuration conf = dn.getConf();
     Configuration conf = dn.getConf();
     String oldDataDir = conf.get(DFS_DATANODE_DATA_DIR_KEY);
     String oldDataDir = conf.get(DFS_DATANODE_DATA_DIR_KEY);
@@ -315,14 +314,14 @@ public class TestDataNodeHotSwapVolumes {
     int startIdx = oldDataDir.split(",").length + 1;
     int startIdx = oldDataDir.split(",").length + 1;
     // Find the first available (non-taken) directory name for data volume.
     // Find the first available (non-taken) directory name for data volume.
     while (true) {
     while (true) {
-      File volumeDir = new File(dataDir, "data" + startIdx);
+      File volumeDir = cluster.getInstanceStorageDir(0, startIdx);
       if (!volumeDir.exists()) {
       if (!volumeDir.exists()) {
         break;
         break;
       }
       }
       startIdx++;
       startIdx++;
     }
     }
     for (int i = startIdx; i < startIdx + numNewVolumes; i++) {
     for (int i = startIdx; i < startIdx + numNewVolumes; i++) {
-      File volumeDir = new File(dataDir, "data" + String.valueOf(i));
+      File volumeDir = cluster.getInstanceStorageDir(0, i);
       newVolumeDirs.add(volumeDir);
       newVolumeDirs.add(volumeDir);
       volumeDir.mkdirs();
       volumeDir.mkdirs();
       newDataDirBuf.append(",");
       newDataDirBuf.append(",");
@@ -985,7 +984,7 @@ public class TestDataNodeHotSwapVolumes {
 
 
     DataNode dn = cluster.getDataNodes().get(0);
     DataNode dn = cluster.getDataNodes().get(0);
     final String oldDataDir = dn.getConf().get(DFS_DATANODE_DATA_DIR_KEY);
     final String oldDataDir = dn.getConf().get(DFS_DATANODE_DATA_DIR_KEY);
-    File dirToFail = new File(cluster.getDataDirectory(), "data1");
+    File dirToFail = cluster.getInstanceStorageDir(0, 0);
 
 
     FsVolumeImpl failedVolume = DataNodeTestUtils.getVolume(dn, dirToFail);
     FsVolumeImpl failedVolume = DataNodeTestUtils.getVolume(dn, dirToFail);
     assertTrue("No FsVolume was found for " + dirToFail,
     assertTrue("No FsVolume was found for " + dirToFail,
@@ -1037,7 +1036,7 @@ public class TestDataNodeHotSwapVolumes {
         InternalDataNodeTestUtils.spyOnBposToNN(dn, cluster.getNameNode());
         InternalDataNodeTestUtils.spyOnBposToNN(dn, cluster.getNameNode());
 
 
     // Remove a data dir from datanode
     // Remove a data dir from datanode
-    File dataDirToKeep = new File(cluster.getDataDirectory(), "data1");
+    File dataDirToKeep = cluster.getInstanceStorageDir(0, 0);
     assertThat(
     assertThat(
         "DN did not update its own config",
         "DN did not update its own config",
         dn.reconfigurePropertyImpl(
         dn.reconfigurePropertyImpl(

+ 10 - 10
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java

@@ -168,7 +168,7 @@ public class TestDataNodeVolumeFailure {
    
    
     // fail the volume
     // fail the volume
     // delete/make non-writable one of the directories (failed volume)
     // delete/make non-writable one of the directories (failed volume)
-    data_fail = new File(dataDir, "data3");
+    data_fail = cluster.getInstanceStorageDir(1, 0);
     failedDir = MiniDFSCluster.getFinalizedDir(data_fail,
     failedDir = MiniDFSCluster.getFinalizedDir(data_fail,
         cluster.getNamesystem().getBlockPoolId());
         cluster.getNamesystem().getBlockPoolId());
     if (failedDir.exists() &&
     if (failedDir.exists() &&
@@ -235,7 +235,7 @@ public class TestDataNodeVolumeFailure {
     DFSTestUtil.createFile(fs, file1, 1024, (short) 2, 1L);
     DFSTestUtil.createFile(fs, file1, 1024, (short) 2, 1L);
     DFSTestUtil.waitReplication(fs, file1, (short) 2);
     DFSTestUtil.waitReplication(fs, file1, (short) 2);
 
 
-    File dn0Vol1 = new File(dataDir, "data" + (2 * 0 + 1));
+    File dn0Vol1 = cluster.getInstanceStorageDir(0, 0);
     DataNodeTestUtils.injectDataDirFailure(dn0Vol1);
     DataNodeTestUtils.injectDataDirFailure(dn0Vol1);
     DataNode dn0 = cluster.getDataNodes().get(0);
     DataNode dn0 = cluster.getDataNodes().get(0);
     DataNodeTestUtils.waitForDiskError(dn0,
     DataNodeTestUtils.waitForDiskError(dn0,
@@ -298,8 +298,8 @@ public class TestDataNodeVolumeFailure {
     assumeNotWindows();
     assumeNotWindows();
 
 
     // make both data directories to fail on dn0
     // make both data directories to fail on dn0
-    final File dn0Vol1 = new File(dataDir, "data" + (2 * 0 + 1));
-    final File dn0Vol2 = new File(dataDir, "data" + (2 * 0 + 2));
+    final File dn0Vol1 = cluster.getInstanceStorageDir(0, 0);
+    final File dn0Vol2 = cluster.getInstanceStorageDir(0, 1);
     DataNodeTestUtils.injectDataDirFailure(dn0Vol1, dn0Vol2);
     DataNodeTestUtils.injectDataDirFailure(dn0Vol1, dn0Vol2);
     DataNode dn0 = cluster.getDataNodes().get(0);
     DataNode dn0 = cluster.getDataNodes().get(0);
     DataNodeTestUtils.waitForDiskError(dn0,
     DataNodeTestUtils.waitForDiskError(dn0,
@@ -322,8 +322,8 @@ public class TestDataNodeVolumeFailure {
     // volume failures which is currently not supported on Windows.
     // volume failures which is currently not supported on Windows.
     assumeNotWindows();
     assumeNotWindows();
 
 
-    final File dn0Vol1 = new File(dataDir, "data" + (2 * 0 + 1));
-    final File dn0Vol2 = new File(dataDir, "data" + (2 * 0 + 2));
+    final File dn0Vol1 = cluster.getInstanceStorageDir(0, 0);
+    final File dn0Vol2 = cluster.getInstanceStorageDir(0, 1);
     final DataNode dn0 = cluster.getDataNodes().get(0);
     final DataNode dn0 = cluster.getDataNodes().get(0);
     final String oldDataDirs = dn0.getConf().get(
     final String oldDataDirs = dn0.getConf().get(
         DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
         DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
@@ -366,8 +366,8 @@ public class TestDataNodeVolumeFailure {
     // volume failures which is currently not supported on Windows.
     // volume failures which is currently not supported on Windows.
     assumeNotWindows();
     assumeNotWindows();
 
 
-    final File dn0Vol1 = new File(dataDir, "data" + (2 * 0 + 1));
-    final File dn0Vol2 = new File(dataDir, "data" + (2 * 0 + 2));
+    final File dn0Vol1 = cluster.getInstanceStorageDir(0, 0);
+    final File dn0Vol2 = cluster.getInstanceStorageDir(0, 1);
     final File dn0VolNew = new File(dataDir, "data_new");
     final File dn0VolNew = new File(dataDir, "data_new");
     final DataNode dn0 = cluster.getDataNodes().get(0);
     final DataNode dn0 = cluster.getDataNodes().get(0);
     final String oldDataDirs = dn0.getConf().get(
     final String oldDataDirs = dn0.getConf().get(
@@ -413,8 +413,8 @@ public class TestDataNodeVolumeFailure {
     DFSTestUtil.waitReplication(fs, file1, (short)3);
     DFSTestUtil.waitReplication(fs, file1, (short)3);
 
 
     // Fail the first volume on both datanodes
     // Fail the first volume on both datanodes
-    File dn1Vol1 = new File(dataDir, "data"+(2*0+1));
-    File dn2Vol1 = new File(dataDir, "data"+(2*1+1));
+    File dn1Vol1 = cluster.getInstanceStorageDir(0, 0);
+    File dn2Vol1 = cluster.getInstanceStorageDir(1, 0);
     DataNodeTestUtils.injectDataDirFailure(dn1Vol1, dn2Vol1);
     DataNodeTestUtils.injectDataDirFailure(dn1Vol1, dn2Vol1);
 
 
     Path file2 = new Path("/test2");
     Path file2 = new Path("/test2");

+ 16 - 18
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java

@@ -78,7 +78,6 @@ public class TestDataNodeVolumeFailureReporting {
   private FileSystem fs;
   private FileSystem fs;
   private MiniDFSCluster cluster;
   private MiniDFSCluster cluster;
   private Configuration conf;
   private Configuration conf;
-  private String dataDir;
   private long volumeCapacity;
   private long volumeCapacity;
 
 
   // Sleep at least 3 seconds (a 1s heartbeat plus padding) to allow
   // Sleep at least 3 seconds (a 1s heartbeat plus padding) to allow
@@ -134,10 +133,10 @@ public class TestDataNodeVolumeFailureReporting {
     final long origCapacity = DFSTestUtil.getLiveDatanodeCapacity(dm);
     final long origCapacity = DFSTestUtil.getLiveDatanodeCapacity(dm);
     long dnCapacity = DFSTestUtil.getDatanodeCapacity(dm, 0);
     long dnCapacity = DFSTestUtil.getDatanodeCapacity(dm, 0);
 
 
-    File dn1Vol1 = new File(dataDir, "data"+(2*0+1));
-    File dn2Vol1 = new File(dataDir, "data"+(2*1+1));
-    File dn3Vol1 = new File(dataDir, "data"+(2*2+1));
-    File dn3Vol2 = new File(dataDir, "data"+(2*2+2));
+    File dn1Vol1 = cluster.getInstanceStorageDir(0, 0);
+    File dn2Vol1 = cluster.getInstanceStorageDir(1, 0);
+    File dn3Vol1 = cluster.getInstanceStorageDir(2, 0);
+    File dn3Vol2 = cluster.getInstanceStorageDir(2, 1);
 
 
     /*
     /*
      * Make the 1st volume directories on the first two datanodes
      * Make the 1st volume directories on the first two datanodes
@@ -275,8 +274,8 @@ public class TestDataNodeVolumeFailureReporting {
 
 
     // Fail the first volume on both datanodes (we have to keep the 
     // Fail the first volume on both datanodes (we have to keep the 
     // third healthy so one node in the pipeline will not fail). 
     // third healthy so one node in the pipeline will not fail). 
-    File dn1Vol1 = new File(dataDir, "data"+(2*0+1));
-    File dn2Vol1 = new File(dataDir, "data"+(2*1+1));
+    File dn1Vol1 = cluster.getInstanceStorageDir(0, 0);
+    File dn2Vol1 = cluster.getInstanceStorageDir(1, 0);
     DataNodeTestUtils.injectDataDirFailure(dn1Vol1, dn2Vol1);
     DataNodeTestUtils.injectDataDirFailure(dn1Vol1, dn2Vol1);
 
 
     Path file1 = new Path("/test1");
     Path file1 = new Path("/test1");
@@ -317,10 +316,10 @@ public class TestDataNodeVolumeFailureReporting {
     long origCapacity = DFSTestUtil.getLiveDatanodeCapacity(dm);
     long origCapacity = DFSTestUtil.getLiveDatanodeCapacity(dm);
     long dnCapacity = DFSTestUtil.getDatanodeCapacity(dm, 0);
     long dnCapacity = DFSTestUtil.getDatanodeCapacity(dm, 0);
 
 
-    File dn1Vol1 = new File(dataDir, "data"+(4*0+1));
-    File dn1Vol2 = new File(dataDir, "data"+(4*0+2));
-    File dn2Vol1 = new File(dataDir, "data"+(4*1+1));
-    File dn2Vol2 = new File(dataDir, "data"+(4*1+2));
+    File dn1Vol1 = cluster.getInstanceStorageDir(0, 0);
+    File dn1Vol2 = cluster.getInstanceStorageDir(0, 1);
+    File dn2Vol1 = cluster.getInstanceStorageDir(1, 0);
+    File dn2Vol2 = cluster.getInstanceStorageDir(1, 1);
 
 
     // Make the first two volume directories on the first two datanodes
     // Make the first two volume directories on the first two datanodes
     // non-accessible.
     // non-accessible.
@@ -376,10 +375,10 @@ public class TestDataNodeVolumeFailureReporting {
 
 
     // Fail the first volume on both datanodes (we have to keep the
     // Fail the first volume on both datanodes (we have to keep the
     // third healthy so one node in the pipeline will not fail).
     // third healthy so one node in the pipeline will not fail).
-    File dn1Vol1 = new File(dataDir, "data"+(2*0+1));
-    File dn1Vol2 = new File(dataDir, "data"+(2*0+2));
-    File dn2Vol1 = new File(dataDir, "data"+(2*1+1));
-    File dn2Vol2 = new File(dataDir, "data"+(2*1+2));
+    File dn1Vol1 = cluster.getInstanceStorageDir(0, 0);
+    File dn1Vol2 = cluster.getInstanceStorageDir(0, 1);
+    File dn2Vol1 = cluster.getInstanceStorageDir(1, 0);
+    File dn2Vol2 = cluster.getInstanceStorageDir(1, 1);
     DataNodeTestUtils.injectDataDirFailure(dn1Vol1);
     DataNodeTestUtils.injectDataDirFailure(dn1Vol1);
     DataNodeTestUtils.injectDataDirFailure(dn2Vol1);
     DataNodeTestUtils.injectDataDirFailure(dn2Vol1);
 
 
@@ -528,8 +527,8 @@ public class TestDataNodeVolumeFailureReporting {
   @Test
   @Test
   public void testHotSwapOutFailedVolumeAndReporting()
   public void testHotSwapOutFailedVolumeAndReporting()
           throws Exception {
           throws Exception {
-    final File dn0Vol1 = new File(dataDir, "data" + (2 * 0 + 1));
-    final File dn0Vol2 = new File(dataDir, "data" + (2 * 0 + 2));
+    final File dn0Vol1 = cluster.getInstanceStorageDir(0, 0);
+    final File dn0Vol2 = cluster.getInstanceStorageDir(0, 1);
     final DataNode dn0 = cluster.getDataNodes().get(0);
     final DataNode dn0 = cluster.getDataNodes().get(0);
     final String oldDataDirs = dn0.getConf().get(
     final String oldDataDirs = dn0.getConf().get(
             DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
             DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
@@ -777,7 +776,6 @@ public class TestDataNodeVolumeFailureReporting {
         .storagesPerDatanode(storagesPerDatanode).build();
         .storagesPerDatanode(storagesPerDatanode).build();
     cluster.waitActive();
     cluster.waitActive();
     fs = cluster.getFileSystem();
     fs = cluster.getFileSystem();
-    dataDir = cluster.getDataDirectory();
     long dnCapacity = DFSTestUtil.getDatanodeCapacity(
     long dnCapacity = DFSTestUtil.getDatanodeCapacity(
         cluster.getNamesystem().getBlockManager().getDatanodeManager(), 0);
         cluster.getNamesystem().getBlockManager().getDatanodeManager(), 0);
     volumeCapacity = dnCapacity / cluster.getStoragesPerDatanode();
     volumeCapacity = dnCapacity / cluster.getStoragesPerDatanode();

+ 1 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java

@@ -50,7 +50,6 @@ public class TestDataNodeVolumeFailureToleration {
   private FileSystem fs;
   private FileSystem fs;
   private MiniDFSCluster cluster;
   private MiniDFSCluster cluster;
   private Configuration conf;
   private Configuration conf;
-  private String dataDir;
 
 
   // Sleep at least 3 seconds (a 1s heartbeat plus padding) to allow
   // Sleep at least 3 seconds (a 1s heartbeat plus padding) to allow
   // for heartbeats to propagate from the datanodes to the namenode.
   // for heartbeats to propagate from the datanodes to the namenode.
@@ -80,7 +79,6 @@ public class TestDataNodeVolumeFailureToleration {
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
     cluster.waitActive();
     cluster.waitActive();
     fs = cluster.getFileSystem();
     fs = cluster.getFileSystem();
-    dataDir = cluster.getDataDirectory();
   }
   }
 
 
   @After
   @After
@@ -161,7 +159,7 @@ public class TestDataNodeVolumeFailureToleration {
     long dnCapacity = DFSTestUtil.getDatanodeCapacity(dm, 0);
     long dnCapacity = DFSTestUtil.getDatanodeCapacity(dm, 0);
 
 
     // Fail a volume on the 2nd DN
     // Fail a volume on the 2nd DN
-    File dn2Vol1 = new File(dataDir, "data"+(2*1+1));
+    File dn2Vol1 = cluster.getStorageDir(1, 0);
     DataNodeTestUtils.injectDataDirFailure(dn2Vol1);
     DataNodeTestUtils.injectDataDirFailure(dn2Vol1);
 
 
     // Should only get two replicas (the first DN and the 3rd)
     // Should only get two replicas (the first DN and the 3rd)

+ 1 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeMetrics.java

@@ -102,9 +102,7 @@ public class TestDataNodeVolumeMetrics {
 
 
       ArrayList<DataNode> dns = cluster.getDataNodes();
       ArrayList<DataNode> dns = cluster.getDataNodes();
       assertTrue("DN1 should be up", dns.get(0).isDatanodeUp());
       assertTrue("DN1 should be up", dns.get(0).isDatanodeUp());
-
-      final String dataDir = cluster.getDataDirectory();
-      final File dn1Vol2 = new File(dataDir, "data2");
+      final File dn1Vol2 = cluster.getInstanceStorageDir(0, 1);
 
 
       DataNodeTestUtils.injectDataDirFailure(dn1Vol2);
       DataNodeTestUtils.injectDataDirFailure(dn1Vol2);
       verifyDataNodeVolumeMetrics(fs, cluster, fileName);
       verifyDataNodeVolumeMetrics(fs, cluster, fileName);

+ 4 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java

@@ -575,13 +575,15 @@ public class TestDiskBalancerCommand {
     assertThat(
     assertThat(
         outputs.get(3),
         outputs.get(3),
         is(allOf(containsString("DISK"),
         is(allOf(containsString("DISK"),
-            containsString("/dfs/data/data1"),
+            containsString(cluster.getInstanceStorageDir(0, 0)
+                .getAbsolutePath()),
             containsString("0.00"),
             containsString("0.00"),
             containsString("1.00"))));
             containsString("1.00"))));
     assertThat(
     assertThat(
         outputs.get(4),
         outputs.get(4),
         is(allOf(containsString("DISK"),
         is(allOf(containsString("DISK"),
-            containsString("/dfs/data/data2"),
+            containsString(cluster.getInstanceStorageDir(0, 1)
+                .getAbsolutePath()),
             containsString("0.00"),
             containsString("0.00"),
             containsString("1.00"))));
             containsString("1.00"))));
   }
   }

+ 7 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java

@@ -349,11 +349,14 @@ public class TestDFSAdmin {
           containsString("FAILED: Change property " +
           containsString("FAILED: Change property " +
               DFS_DATANODE_DATA_DIR_KEY));
               DFS_DATANODE_DATA_DIR_KEY));
     }
     }
+    File dnDir0 = cluster.getInstanceStorageDir(0, 0);
+    File dnDir1 = cluster.getInstanceStorageDir(0, 1);
     assertThat(outs.get(offset + 1),
     assertThat(outs.get(offset + 1),
-        is(allOf(containsString("From:"), containsString("data1"),
-            containsString("data2"))));
-    assertThat(outs.get(offset + 2),
-        is(not(anyOf(containsString("data1"), containsString("data2")))));
+        is(allOf(containsString("From:"), containsString(dnDir0.getName()),
+            containsString(dnDir1.getName()))));
+    assertThat(outs.get(offset + 2), is(not(
+        anyOf(containsString(dnDir0.getName()),
+            containsString(dnDir1.getName())))));
     assertThat(outs.get(offset + 2),
     assertThat(outs.get(offset + 2),
         is(allOf(containsString("To"), containsString("data_new"))));
         is(allOf(containsString("To"), containsString("data_new"))));
   }
   }