|
@@ -22,6 +22,7 @@ import java.util.Arrays;
|
|
|
import java.util.Collections;
|
|
|
import java.util.function.Supplier;
|
|
|
|
|
|
+import org.apache.hadoop.fs.DF;
|
|
|
import org.apache.hadoop.hdfs.server.datanode.DirectoryScanner;
|
|
|
import org.apache.hadoop.thirdparty.com.google.common.collect.Lists;
|
|
|
|
|
@@ -397,7 +398,7 @@ public class TestFsDatasetImpl {
|
|
|
true);
|
|
|
conf.setDouble(DFSConfigKeys
|
|
|
.DFS_DATANODE_RESERVE_FOR_ARCHIVE_DEFAULT_PERCENTAGE,
|
|
|
- 0.5);
|
|
|
+ 0.4);
|
|
|
|
|
|
when(datanode.getConf()).thenReturn(conf);
|
|
|
final DNConf dnConf = new DNConf(datanode);
|
|
@@ -415,11 +416,19 @@ public class TestFsDatasetImpl {
|
|
|
for (String bpid : BLOCK_POOL_IDS) {
|
|
|
nsInfos.add(new NamespaceInfo(0, CLUSTER_ID, bpid, 1));
|
|
|
}
|
|
|
- dataset.addVolume(
|
|
|
- createStorageWithStorageType("archive1",
|
|
|
- StorageType.ARCHIVE, conf, storage, datanode), nsInfos);
|
|
|
+ StorageLocation archive = createStorageWithStorageType("archive1",
|
|
|
+ StorageType.ARCHIVE, conf, storage, datanode);
|
|
|
+ dataset.addVolume(archive, nsInfos);
|
|
|
assertEquals(2, dataset.getVolumeCount());
|
|
|
|
|
|
+ String mount = new DF(new File(archive.getUri()), conf).getMount();
|
|
|
+ double archiveRatio = dataset.getMountVolumeMap()
|
|
|
+ .getCapacityRatioByMountAndStorageType(mount, StorageType.ARCHIVE);
|
|
|
+ double diskRatio = dataset.getMountVolumeMap()
|
|
|
+ .getCapacityRatioByMountAndStorageType(mount, StorageType.DISK);
|
|
|
+ assertEquals(0.4, archiveRatio, 0);
|
|
|
+ assertEquals(0.6, diskRatio, 0);
|
|
|
+
|
|
|
// Add second ARCHIVAL volume should fail fsDataSetImpl.
|
|
|
try {
|
|
|
dataset.addVolume(
|
|
@@ -433,6 +442,106 @@ public class TestFsDatasetImpl {
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+ @Test
|
|
|
+ public void testAddVolumeWithCustomizedCapacityRatio()
|
|
|
+ throws IOException {
|
|
|
+ datanode = mock(DataNode.class);
|
|
|
+ storage = mock(DataStorage.class);
|
|
|
+ this.conf = new Configuration();
|
|
|
+ this.conf.setLong(DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, 0);
|
|
|
+ this.conf.set(DFSConfigKeys.DFS_DATANODE_REPLICA_CACHE_ROOT_DIR_KEY,
|
|
|
+ replicaCacheRootDir);
|
|
|
+ conf.setBoolean(DFSConfigKeys.DFS_DATANODE_ALLOW_SAME_DISK_TIERING,
|
|
|
+ true);
|
|
|
+ conf.setDouble(DFSConfigKeys
|
|
|
+ .DFS_DATANODE_RESERVE_FOR_ARCHIVE_DEFAULT_PERCENTAGE,
|
|
|
+ 0.5);
|
|
|
+
|
|
|
+ // 1) Normal case, get capacity should return correct value.
|
|
|
+ String archivedir = "/archive1";
|
|
|
+ String diskdir = "/disk1";
|
|
|
+ String configStr = "[0.3]file:" + BASE_DIR + archivedir
|
|
|
+ + ", " + "[0.6]file:" + BASE_DIR + diskdir;
|
|
|
+
|
|
|
+ conf.set(DFSConfigKeys
|
|
|
+ .DFS_DATANODE_SAME_DISK_TIERING_CAPACITY_RATIO_PERCENTAGE,
|
|
|
+ configStr);
|
|
|
+
|
|
|
+ when(datanode.getConf()).thenReturn(conf);
|
|
|
+ final DNConf dnConf = new DNConf(datanode);
|
|
|
+ when(datanode.getDnConf()).thenReturn(dnConf);
|
|
|
+ final BlockScanner disabledBlockScanner = new BlockScanner(datanode);
|
|
|
+ when(datanode.getBlockScanner()).thenReturn(disabledBlockScanner);
|
|
|
+ final ShortCircuitRegistry shortCircuitRegistry =
|
|
|
+ new ShortCircuitRegistry(conf);
|
|
|
+ when(datanode.getShortCircuitRegistry()).thenReturn(shortCircuitRegistry);
|
|
|
+
|
|
|
+ createStorageDirs(storage, conf, 0);
|
|
|
+
|
|
|
+ dataset = createStorageWithCapacityRatioConfig(
|
|
|
+ configStr, archivedir, diskdir);
|
|
|
+
|
|
|
+ Path p = new Path("file:" + BASE_DIR);
|
|
|
+ String mount = new DF(new File(p.toUri()), conf).getMount();
|
|
|
+ double archiveRatio = dataset.getMountVolumeMap()
|
|
|
+ .getCapacityRatioByMountAndStorageType(mount, StorageType.ARCHIVE);
|
|
|
+ double diskRatio = dataset.getMountVolumeMap()
|
|
|
+ .getCapacityRatioByMountAndStorageType(mount, StorageType.DISK);
|
|
|
+ assertEquals(0.3, archiveRatio, 0);
|
|
|
+ assertEquals(0.6, diskRatio, 0);
|
|
|
+
|
|
|
+ // 2) Counter part volume should get rest of the capacity
|
|
|
+ // wihtout explicit config
|
|
|
+ configStr = "[0.3]file:" + BASE_DIR + archivedir;
|
|
|
+ dataset = createStorageWithCapacityRatioConfig(
|
|
|
+ configStr, archivedir, diskdir);
|
|
|
+ mount = new DF(new File(p.toUri()), conf).getMount();
|
|
|
+ archiveRatio = dataset.getMountVolumeMap()
|
|
|
+ .getCapacityRatioByMountAndStorageType(mount, StorageType.ARCHIVE);
|
|
|
+ diskRatio = dataset.getMountVolumeMap()
|
|
|
+ .getCapacityRatioByMountAndStorageType(mount, StorageType.DISK);
|
|
|
+ assertEquals(0.3, archiveRatio, 0);
|
|
|
+ assertEquals(0.7, diskRatio, 0);
|
|
|
+
|
|
|
+ // 3) Add volume will fail if capacity ratio is > 1
|
|
|
+ dataset = new FsDatasetImpl(datanode, storage, conf);
|
|
|
+ configStr = "[0.3]file:" + BASE_DIR + archivedir
|
|
|
+ + ", " + "[0.8]file:" + BASE_DIR + diskdir;
|
|
|
+
|
|
|
+ try {
|
|
|
+ createStorageWithCapacityRatioConfig(
|
|
|
+ configStr, archivedir, diskdir);
|
|
|
+ fail("Should fail add volume as capacity ratio sum is > 1");
|
|
|
+ } catch (IOException e) {
|
|
|
+ assertTrue(e.getMessage()
|
|
|
+ .contains("Not enough capacity ratio left on mount"));
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ private FsDatasetImpl createStorageWithCapacityRatioConfig(
|
|
|
+ String configStr, String archivedir, String diskdir)
|
|
|
+ throws IOException {
|
|
|
+ conf.set(DFSConfigKeys
|
|
|
+ .DFS_DATANODE_SAME_DISK_TIERING_CAPACITY_RATIO_PERCENTAGE, configStr
|
|
|
+ );
|
|
|
+ dataset = new FsDatasetImpl(datanode, storage, conf);
|
|
|
+ List<NamespaceInfo> nsInfos = Lists.newArrayList();
|
|
|
+ for (String bpid : BLOCK_POOL_IDS) {
|
|
|
+ nsInfos.add(new NamespaceInfo(0, CLUSTER_ID, bpid, 1));
|
|
|
+ }
|
|
|
+
|
|
|
+ StorageLocation archive = createStorageWithStorageType(
|
|
|
+ archivedir, StorageType.ARCHIVE, conf, storage, datanode);
|
|
|
+
|
|
|
+ StorageLocation disk = createStorageWithStorageType(
|
|
|
+ diskdir, StorageType.DISK, conf, storage, datanode);
|
|
|
+
|
|
|
+ dataset.addVolume(archive, nsInfos);
|
|
|
+ dataset.addVolume(disk, nsInfos);
|
|
|
+ assertEquals(2, dataset.getVolumeCount());
|
|
|
+ return dataset;
|
|
|
+ }
|
|
|
+
|
|
|
@Test
|
|
|
public void testAddVolumeWithSameStorageUuid() throws IOException {
|
|
|
HdfsConfiguration config = new HdfsConfiguration();
|