|
@@ -36,6 +36,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KE
|
|
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY;
|
|
|
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_DATA_TRANSFER_PROTECTION_KEY;
|
|
|
import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.XATTR_SATISFY_STORAGE_POLICY;
|
|
|
+import static org.junit.Assert.assertEquals;
|
|
|
import static org.junit.Assert.assertFalse;
|
|
|
import static org.junit.Assert.fail;
|
|
|
|
|
@@ -202,7 +203,15 @@ public class TestExternalStoragePolicySatisfier {
|
|
|
private void createCluster(boolean createMoverPath) throws IOException {
|
|
|
getConf().setLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
|
|
|
setCluster(startCluster(getConf(), allDiskTypes, NUM_OF_DATANODES,
|
|
|
- STORAGES_PER_DATANODE, CAPACITY, createMoverPath));
|
|
|
+ STORAGES_PER_DATANODE, CAPACITY, createMoverPath, true));
|
|
|
+ getFS();
|
|
|
+ writeContent(FILE);
|
|
|
+ }
|
|
|
+
|
|
|
+ private void createClusterDoNotStartSPS() throws IOException {
|
|
|
+ getConf().setLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
|
|
|
+ setCluster(startCluster(getConf(), allDiskTypes, NUM_OF_DATANODES,
|
|
|
+ STORAGES_PER_DATANODE, CAPACITY, true, false));
|
|
|
getFS();
|
|
|
writeContent(FILE);
|
|
|
}
|
|
@@ -211,12 +220,12 @@ public class TestExternalStoragePolicySatisfier {
|
|
|
StorageType[][] storageTypes, int numberOfDatanodes, int storagesPerDn,
|
|
|
long nodeCapacity) throws IOException {
|
|
|
return startCluster(conf, storageTypes, numberOfDatanodes, storagesPerDn,
|
|
|
- nodeCapacity, false);
|
|
|
+ nodeCapacity, false, true);
|
|
|
}
|
|
|
|
|
|
private MiniDFSCluster startCluster(final Configuration conf,
|
|
|
StorageType[][] storageTypes, int numberOfDatanodes, int storagesPerDn,
|
|
|
- long nodeCapacity, boolean createMoverPath) throws IOException {
|
|
|
+ long nodeCapacity, boolean createMoverPath, boolean startSPS) throws IOException {
|
|
|
long[][] capacities = new long[numberOfDatanodes][storagesPerDn];
|
|
|
for (int i = 0; i < numberOfDatanodes; i++) {
|
|
|
for (int j = 0; j < storagesPerDn; j++) {
|
|
@@ -228,14 +237,16 @@ public class TestExternalStoragePolicySatisfier {
|
|
|
.storageTypes(storageTypes).storageCapacities(capacities).build();
|
|
|
cluster.waitActive();
|
|
|
|
|
|
- nnc = DFSTestUtil.getNameNodeConnector(getConf(),
|
|
|
- HdfsServerConstants.MOVER_ID_PATH, 1, createMoverPath);
|
|
|
+ if (startSPS) {
|
|
|
+ nnc = DFSTestUtil.getNameNodeConnector(getConf(),
|
|
|
+ HdfsServerConstants.MOVER_ID_PATH, 1, createMoverPath);
|
|
|
|
|
|
- externalSps = new StoragePolicySatisfier(getConf());
|
|
|
- externalCtxt = new ExternalSPSContext(externalSps, nnc);
|
|
|
+ externalSps = new StoragePolicySatisfier(getConf());
|
|
|
+ externalCtxt = new ExternalSPSContext(externalSps, nnc);
|
|
|
|
|
|
- externalSps.init(externalCtxt);
|
|
|
- externalSps.start(StoragePolicySatisfierMode.EXTERNAL);
|
|
|
+ externalSps.init(externalCtxt);
|
|
|
+ externalSps.start(StoragePolicySatisfierMode.EXTERNAL);
|
|
|
+ }
|
|
|
return cluster;
|
|
|
}
|
|
|
|
|
@@ -1515,6 +1526,20 @@ public class TestExternalStoragePolicySatisfier {
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+ @Test(timeout = 300000)
|
|
|
+ public void testExternalSPSMetrics()
|
|
|
+ throws Exception {
|
|
|
+
|
|
|
+ try {
|
|
|
+ createClusterDoNotStartSPS();
|
|
|
+ dfs.satisfyStoragePolicy(new Path(FILE));
|
|
|
+ // Assert metrics.
|
|
|
+ assertEquals(1, hdfsCluster.getNamesystem().getPendingSPSPaths());
|
|
|
+ } finally {
|
|
|
+ shutdownCluster();
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
private static void createDirectoryTree(DistributedFileSystem dfs)
|
|
|
throws Exception {
|
|
|
// tree structure
|