|
@@ -152,8 +152,7 @@ public class TestDataNodeVolumeFailure {
|
|
@Test(timeout = 120000)
|
|
@Test(timeout = 120000)
|
|
public void testVolumeFailure() throws Exception {
|
|
public void testVolumeFailure() throws Exception {
|
|
System.out.println("Data dir: is " + dataDir.getPath());
|
|
System.out.println("Data dir: is " + dataDir.getPath());
|
|
-
|
|
|
|
-
|
|
|
|
|
|
+
|
|
// Data dir structure is dataDir/data[1-4]/[current,tmp...]
|
|
// Data dir structure is dataDir/data[1-4]/[current,tmp...]
|
|
// data1,2 is for datanode 1, data2,3 - datanode2
|
|
// data1,2 is for datanode 1, data2,3 - datanode2
|
|
String filename = "/test.txt";
|
|
String filename = "/test.txt";
|
|
@@ -168,7 +167,7 @@ public class TestDataNodeVolumeFailure {
|
|
|
|
|
|
// fail the volume
|
|
// fail the volume
|
|
// delete/make non-writable one of the directories (failed volume)
|
|
// delete/make non-writable one of the directories (failed volume)
|
|
- data_fail = new File(dataDir, "data3");
|
|
|
|
|
|
+ data_fail = cluster.getInstanceStorageDir(1, 0);
|
|
failedDir = MiniDFSCluster.getFinalizedDir(data_fail,
|
|
failedDir = MiniDFSCluster.getFinalizedDir(data_fail,
|
|
cluster.getNamesystem().getBlockPoolId());
|
|
cluster.getNamesystem().getBlockPoolId());
|
|
if (failedDir.exists() &&
|
|
if (failedDir.exists() &&
|
|
@@ -235,7 +234,7 @@ public class TestDataNodeVolumeFailure {
|
|
DFSTestUtil.createFile(fs, file1, 1024, (short) 2, 1L);
|
|
DFSTestUtil.createFile(fs, file1, 1024, (short) 2, 1L);
|
|
DFSTestUtil.waitReplication(fs, file1, (short) 2);
|
|
DFSTestUtil.waitReplication(fs, file1, (short) 2);
|
|
|
|
|
|
- File dn0Vol1 = new File(dataDir, "data" + (2 * 0 + 1));
|
|
|
|
|
|
+ File dn0Vol1 = cluster.getInstanceStorageDir(0, 0);
|
|
DataNodeTestUtils.injectDataDirFailure(dn0Vol1);
|
|
DataNodeTestUtils.injectDataDirFailure(dn0Vol1);
|
|
DataNode dn0 = cluster.getDataNodes().get(0);
|
|
DataNode dn0 = cluster.getDataNodes().get(0);
|
|
DataNodeTestUtils.waitForDiskError(dn0,
|
|
DataNodeTestUtils.waitForDiskError(dn0,
|
|
@@ -294,8 +293,8 @@ public class TestDataNodeVolumeFailure {
|
|
public void testDataNodeShutdownAfterNumFailedVolumeExceedsTolerated()
|
|
public void testDataNodeShutdownAfterNumFailedVolumeExceedsTolerated()
|
|
throws Exception {
|
|
throws Exception {
|
|
// make both data directories to fail on dn0
|
|
// make both data directories to fail on dn0
|
|
- final File dn0Vol1 = new File(dataDir, "data" + (2 * 0 + 1));
|
|
|
|
- final File dn0Vol2 = new File(dataDir, "data" + (2 * 0 + 2));
|
|
|
|
|
|
+ final File dn0Vol1 = cluster.getInstanceStorageDir(0, 0);
|
|
|
|
+ final File dn0Vol2 = cluster.getInstanceStorageDir(0, 1);
|
|
DataNodeTestUtils.injectDataDirFailure(dn0Vol1, dn0Vol2);
|
|
DataNodeTestUtils.injectDataDirFailure(dn0Vol1, dn0Vol2);
|
|
DataNode dn0 = cluster.getDataNodes().get(0);
|
|
DataNode dn0 = cluster.getDataNodes().get(0);
|
|
DataNodeTestUtils.waitForDiskError(dn0,
|
|
DataNodeTestUtils.waitForDiskError(dn0,
|
|
@@ -314,8 +313,8 @@ public class TestDataNodeVolumeFailure {
|
|
@Test
|
|
@Test
|
|
public void testVolumeFailureRecoveredByHotSwappingVolume()
|
|
public void testVolumeFailureRecoveredByHotSwappingVolume()
|
|
throws Exception {
|
|
throws Exception {
|
|
- final File dn0Vol1 = new File(dataDir, "data" + (2 * 0 + 1));
|
|
|
|
- final File dn0Vol2 = new File(dataDir, "data" + (2 * 0 + 2));
|
|
|
|
|
|
+ final File dn0Vol1 = cluster.getInstanceStorageDir(0, 0);
|
|
|
|
+ final File dn0Vol2 = cluster.getInstanceStorageDir(0, 1);
|
|
final DataNode dn0 = cluster.getDataNodes().get(0);
|
|
final DataNode dn0 = cluster.getDataNodes().get(0);
|
|
final String oldDataDirs = dn0.getConf().get(
|
|
final String oldDataDirs = dn0.getConf().get(
|
|
DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
|
|
DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
|
|
@@ -354,9 +353,9 @@ public class TestDataNodeVolumeFailure {
|
|
@Test
|
|
@Test
|
|
public void testTolerateVolumeFailuresAfterAddingMoreVolumes()
|
|
public void testTolerateVolumeFailuresAfterAddingMoreVolumes()
|
|
throws Exception {
|
|
throws Exception {
|
|
- final File dn0Vol1 = new File(dataDir, "data" + (2 * 0 + 1));
|
|
|
|
- final File dn0Vol2 = new File(dataDir, "data" + (2 * 0 + 2));
|
|
|
|
- final File dn0VolNew = new File(dataDir, "data_new");
|
|
|
|
|
|
+ final File dn0Vol1 = cluster.getInstanceStorageDir(0, 0);
|
|
|
|
+ final File dn0Vol2 = cluster.getInstanceStorageDir(0, 1);
|
|
|
|
+ final File dn0VolNew = new File(cluster.getDataDirectory(), "data_new");
|
|
final DataNode dn0 = cluster.getDataNodes().get(0);
|
|
final DataNode dn0 = cluster.getDataNodes().get(0);
|
|
final String oldDataDirs = dn0.getConf().get(
|
|
final String oldDataDirs = dn0.getConf().get(
|
|
DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
|
|
DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
|
|
@@ -401,8 +400,8 @@ public class TestDataNodeVolumeFailure {
|
|
DFSTestUtil.waitReplication(fs, file1, (short)3);
|
|
DFSTestUtil.waitReplication(fs, file1, (short)3);
|
|
|
|
|
|
// Fail the first volume on both datanodes
|
|
// Fail the first volume on both datanodes
|
|
- File dn1Vol1 = new File(dataDir, "data"+(2*0+1));
|
|
|
|
- File dn2Vol1 = new File(dataDir, "data"+(2*1+1));
|
|
|
|
|
|
+ File dn1Vol1 = cluster.getInstanceStorageDir(0, 0);
|
|
|
|
+ File dn2Vol1 = cluster.getInstanceStorageDir(1, 0);
|
|
DataNodeTestUtils.injectDataDirFailure(dn1Vol1, dn2Vol1);
|
|
DataNodeTestUtils.injectDataDirFailure(dn1Vol1, dn2Vol1);
|
|
|
|
|
|
Path file2 = new Path("/test2");
|
|
Path file2 = new Path("/test2");
|