|
@@ -50,10 +50,8 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
|
|
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
|
|
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
|
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
|
|
|
-import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
|
|
import org.apache.hadoop.hdfs.server.common.Storage;
|
|
|
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
|
|
|
-import org.apache.hadoop.hdfs.server.common.StorageInfo;
|
|
|
import org.apache.hadoop.hdfs.server.datanode.BlockScanner;
|
|
|
import org.apache.hadoop.hdfs.server.datanode.DNConf;
|
|
|
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
|
@@ -78,7 +76,9 @@ import org.apache.hadoop.util.Lists;
|
|
|
import org.apache.hadoop.util.StringUtils;
|
|
|
import org.junit.Assert;
|
|
|
import org.junit.Before;
|
|
|
+import org.junit.Rule;
|
|
|
import org.junit.Test;
|
|
|
+import org.junit.rules.TestName;
|
|
|
import org.mockito.Mockito;
|
|
|
|
|
|
import java.io.File;
|
|
@@ -124,7 +124,10 @@ import org.slf4j.Logger;
|
|
|
import org.slf4j.LoggerFactory;
|
|
|
|
|
|
public class TestFsDatasetImpl {
|
|
|
- Logger LOG = LoggerFactory.getLogger(TestFsDatasetImpl.class);
|
|
|
+
|
|
|
+ private static final Logger LOG = LoggerFactory.getLogger(
|
|
|
+ TestFsDatasetImpl.class);
|
|
|
+
|
|
|
private static final String BASE_DIR =
|
|
|
new FileSystemTestHelper().getTestRootDir();
|
|
|
private String replicaCacheRootDir = BASE_DIR + Path.SEPARATOR + "cache";
|
|
@@ -132,10 +135,6 @@ public class TestFsDatasetImpl {
|
|
|
private static final String CLUSTER_ID = "cluser-id";
|
|
|
private static final String[] BLOCK_POOL_IDS = {"bpid-0", "bpid-1"};
|
|
|
|
|
|
- // Use to generate storageUuid
|
|
|
- private static final DataStorage dsForStorageUuid = new DataStorage(
|
|
|
- new StorageInfo(HdfsServerConstants.NodeType.DATA_NODE));
|
|
|
-
|
|
|
private Configuration conf;
|
|
|
private DataNode datanode;
|
|
|
private DataStorage storage;
|
|
@@ -143,6 +142,9 @@ public class TestFsDatasetImpl {
|
|
|
|
|
|
private final static String BLOCKPOOL = "BP-TEST";
|
|
|
|
|
|
+ @Rule
|
|
|
+ public TestName name = new TestName();
|
|
|
+
|
|
|
private static Storage.StorageDirectory createStorageDirectory(File root,
|
|
|
Configuration conf)
|
|
|
throws SecurityException, IOException {
|
|
@@ -228,6 +230,7 @@ public class TestFsDatasetImpl {
|
|
|
assertEquals(NUM_INIT_VOLUMES, getNumVolumes());
|
|
|
assertEquals(0, dataset.getNumFailedVolumes());
|
|
|
}
|
|
|
+
|
|
|
@Test(timeout=10000)
|
|
|
public void testReadLockEnabledByDefault()
|
|
|
throws Exception {
|
|
@@ -1132,7 +1135,7 @@ public class TestFsDatasetImpl {
|
|
|
Assert.assertEquals(0, cluster.getNamesystem().getCorruptReplicaBlocks());
|
|
|
|
|
|
FileSystem fs = cluster.getFileSystem();
|
|
|
- Path filePath = new Path("testData");
|
|
|
+ Path filePath = new Path(name.getMethodName());
|
|
|
DFSTestUtil.createFile(fs, filePath, 1, (short) 1, 0);
|
|
|
|
|
|
block = DFSTestUtil.getFirstBlock(fs, filePath);
|
|
@@ -1179,7 +1182,7 @@ public class TestFsDatasetImpl {
|
|
|
FileSystem fs = cluster.getFileSystem();
|
|
|
DataNode dataNode = cluster.getDataNodes().get(0);
|
|
|
|
|
|
- Path filePath = new Path("testData");
|
|
|
+ Path filePath = new Path(name.getMethodName());
|
|
|
long fileLen = 100;
|
|
|
ExtendedBlock block = createTestFile(fs, fileLen, filePath);
|
|
|
|
|
@@ -1227,7 +1230,7 @@ public class TestFsDatasetImpl {
|
|
|
FileSystem fs = cluster.getFileSystem();
|
|
|
DataNode dataNode = cluster.getDataNodes().get(0);
|
|
|
|
|
|
- Path filePath = new Path("testData");
|
|
|
+ Path filePath = new Path(name.getMethodName());
|
|
|
DFSTestUtil.createFile(fs, filePath, 100, (short) 1, 0);
|
|
|
ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, filePath);
|
|
|
|
|
@@ -1266,7 +1269,7 @@ public class TestFsDatasetImpl {
|
|
|
FileSystem fs = cluster.getFileSystem();
|
|
|
DataNode dataNode = cluster.getDataNodes().get(0);
|
|
|
|
|
|
- Path filePath = new Path("testData");
|
|
|
+ Path filePath = new Path(name.getMethodName());
|
|
|
long fileLen = 100;
|
|
|
|
|
|
ExtendedBlock block = createTestFile(fs, fileLen, filePath);
|
|
@@ -1307,23 +1310,34 @@ public class TestFsDatasetImpl {
|
|
|
* DiskScanner should clean up the hardlink correctly.
|
|
|
*/
|
|
|
@Test(timeout = 30000)
|
|
|
- public void testDnRestartWithHardLink() {
|
|
|
+ public void testDnRestartWithHardLink() throws Exception {
|
|
|
MiniDFSCluster cluster = null;
|
|
|
+ boolean isReplicaDeletionEnabled =
|
|
|
+ conf.getBoolean(DFSConfigKeys.DFS_DATANODE_DUPLICATE_REPLICA_DELETION,
|
|
|
+ DFSConfigKeys.DFS_DATANODE_DUPLICATE_REPLICA_DELETION_DEFAULT);
|
|
|
try {
|
|
|
conf.setBoolean(DFSConfigKeys
|
|
|
.DFS_DATANODE_ALLOW_SAME_DISK_TIERING, true);
|
|
|
conf.setDouble(DFSConfigKeys
|
|
|
.DFS_DATANODE_RESERVE_FOR_ARCHIVE_DEFAULT_PERCENTAGE, 0.5);
|
|
|
+ // Since Datanode restart in the middle of block movement may leave
|
|
|
+ // uncleaned hardlink, disabling this config (i.e. deletion of duplicate
|
|
|
+ // replica) will prevent this edge-case from happening.
|
|
|
+ // We also re-enable deletion of duplicate replica just before starting
|
|
|
+ // Dir Scanner using setDeleteDuplicateReplicasForTests (HDFS-16213).
|
|
|
+ conf.setBoolean(DFSConfigKeys.DFS_DATANODE_DUPLICATE_REPLICA_DELETION,
|
|
|
+ false);
|
|
|
cluster = new MiniDFSCluster.Builder(conf)
|
|
|
.numDataNodes(1)
|
|
|
.storageTypes(
|
|
|
new StorageType[]{StorageType.DISK, StorageType.ARCHIVE})
|
|
|
.storagesPerDatanode(2)
|
|
|
.build();
|
|
|
+ cluster.waitActive();
|
|
|
FileSystem fs = cluster.getFileSystem();
|
|
|
DataNode dataNode = cluster.getDataNodes().get(0);
|
|
|
|
|
|
- Path filePath = new Path("testData");
|
|
|
+ Path filePath = new Path(name.getMethodName());
|
|
|
long fileLen = 100;
|
|
|
|
|
|
ExtendedBlock block = createTestFile(fs, fileLen, filePath);
|
|
@@ -1331,11 +1345,15 @@ public class TestFsDatasetImpl {
|
|
|
FsDatasetImpl fsDataSetImpl = (FsDatasetImpl) dataNode.getFSDataset();
|
|
|
|
|
|
final ReplicaInfo oldReplicaInfo = fsDataSetImpl.getReplicaInfo(block);
|
|
|
+ StorageType oldStorageType = oldReplicaInfo.getVolume().getStorageType();
|
|
|
|
|
|
fsDataSetImpl.finalizeNewReplica(
|
|
|
createNewReplicaObjWithLink(block, fsDataSetImpl), block);
|
|
|
|
|
|
ReplicaInfo newReplicaInfo = fsDataSetImpl.getReplicaInfo(block);
|
|
|
+ StorageType newStorageType = newReplicaInfo.getVolume().getStorageType();
|
|
|
+ assertEquals(StorageType.DISK, oldStorageType);
|
|
|
+ assertEquals(StorageType.ARCHIVE, newStorageType);
|
|
|
|
|
|
cluster.restartDataNode(0);
|
|
|
cluster.waitDatanodeFullyStarted(cluster.getDataNodes().get(0), 60000);
|
|
@@ -1344,26 +1362,35 @@ public class TestFsDatasetImpl {
|
|
|
assertTrue(Files.exists(Paths.get(newReplicaInfo.getBlockURI())));
|
|
|
assertTrue(Files.exists(Paths.get(oldReplicaInfo.getBlockURI())));
|
|
|
|
|
|
- DirectoryScanner scanner = new DirectoryScanner(
|
|
|
- cluster.getDataNodes().get(0).getFSDataset(), conf);
|
|
|
+ // Before starting Dir Scanner, we should enable deleteDuplicateReplicas.
|
|
|
+ FsDatasetSpi<?> fsDataset = cluster.getDataNodes().get(0).getFSDataset();
|
|
|
+ DirectoryScanner scanner = new DirectoryScanner(fsDataset, conf);
|
|
|
+ FsVolumeImpl fsVolume =
|
|
|
+ (FsVolumeImpl) fsDataset.getFsVolumeReferences().get(0);
|
|
|
+ fsVolume.getBlockPoolSlice(fsVolume.getBlockPoolList()[0])
|
|
|
+ .setDeleteDuplicateReplicasForTests(true);
|
|
|
scanner.start();
|
|
|
scanner.run();
|
|
|
|
|
|
- GenericTestUtils.waitFor(new Supplier<Boolean>() {
|
|
|
- @Override public Boolean get() {
|
|
|
- return !Files.exists(Paths.get(oldReplicaInfo.getBlockURI()));
|
|
|
- }
|
|
|
- }, 100, 10000);
|
|
|
+ GenericTestUtils.waitFor(
|
|
|
+ () -> !Files.exists(Paths.get(oldReplicaInfo.getBlockURI())),
|
|
|
+ 100, 10000, "Old replica is not deleted by DirScanner even after "
|
|
|
+ + "10s of waiting has elapsed");
|
|
|
assertTrue(Files.exists(Paths.get(newReplicaInfo.getBlockURI())));
|
|
|
|
|
|
validateFileLen(fs, fileLen, filePath);
|
|
|
|
|
|
- } catch (Exception ex) {
|
|
|
- LOG.info("Exception in testDnRestartWithHardLink ", ex);
|
|
|
- fail("Exception while testing testDnRestartWithHardLink ");
|
|
|
+ // Additional tests to ensure latest replica gets deleted after file
|
|
|
+ // deletion.
|
|
|
+ fs.delete(filePath, false);
|
|
|
+ GenericTestUtils.waitFor(
|
|
|
+ () -> !Files.exists(Paths.get(newReplicaInfo.getBlockURI())),
|
|
|
+ 100, 10000);
|
|
|
} finally {
|
|
|
- if (cluster.isClusterUp()) {
|
|
|
- cluster.shutdown();
|
|
|
+ conf.setBoolean(DFSConfigKeys.DFS_DATANODE_DUPLICATE_REPLICA_DELETION,
|
|
|
+ isReplicaDeletionEnabled);
|
|
|
+ if (cluster != null && cluster.isClusterUp()) {
|
|
|
+ cluster.shutdown(true, true);
|
|
|
}
|
|
|
}
|
|
|
}
|
|
@@ -1384,7 +1411,7 @@ public class TestFsDatasetImpl {
|
|
|
.build();
|
|
|
FileSystem fs = cluster.getFileSystem();
|
|
|
DataNode dataNode = cluster.getDataNodes().get(0);
|
|
|
- Path filePath = new Path("testData");
|
|
|
+ Path filePath = new Path(name.getMethodName());
|
|
|
long fileLen = 100;
|
|
|
|
|
|
ExtendedBlock block = createTestFile(fs, fileLen, filePath);
|
|
@@ -1432,7 +1459,7 @@ public class TestFsDatasetImpl {
|
|
|
.build();
|
|
|
FileSystem fs = cluster.getFileSystem();
|
|
|
DataNode dataNode = cluster.getDataNodes().get(0);
|
|
|
- Path filePath = new Path("testData");
|
|
|
+ Path filePath = new Path(name.getMethodName());
|
|
|
long fileLen = 100;
|
|
|
|
|
|
ExtendedBlock block = createTestFile(fs, fileLen, filePath);
|