|
@@ -96,6 +96,8 @@ public class TestStoragePolicySatisfier {
|
|
|
|
|
|
private void createCluster() throws IOException {
|
|
private void createCluster() throws IOException {
|
|
config.setLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
|
|
config.setLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
|
|
|
|
+ config.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
|
|
|
|
+ true);
|
|
hdfsCluster = startCluster(config, allDiskTypes, numOfDatanodes,
|
|
hdfsCluster = startCluster(config, allDiskTypes, numOfDatanodes,
|
|
storagesPerDatanode, capacity);
|
|
storagesPerDatanode, capacity);
|
|
dfs = hdfsCluster.getFileSystem();
|
|
dfs = hdfsCluster.getFileSystem();
|
|
@@ -522,7 +524,7 @@ public class TestStoragePolicySatisfier {
|
|
createCluster();
|
|
createCluster();
|
|
// Stop SPS
|
|
// Stop SPS
|
|
hdfsCluster.getNameNode().reconfigurePropertyImpl(
|
|
hdfsCluster.getNameNode().reconfigurePropertyImpl(
|
|
- DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, "false");
|
|
|
|
|
|
+ DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY, "false");
|
|
running = hdfsCluster.getFileSystem()
|
|
running = hdfsCluster.getFileSystem()
|
|
.getClient().isStoragePolicySatisfierRunning();
|
|
.getClient().isStoragePolicySatisfierRunning();
|
|
Assert.assertFalse("SPS should stopped as configured.", running);
|
|
Assert.assertFalse("SPS should stopped as configured.", running);
|
|
@@ -533,7 +535,7 @@ public class TestStoragePolicySatisfier {
|
|
|
|
|
|
// Restart SPS
|
|
// Restart SPS
|
|
hdfsCluster.getNameNode().reconfigurePropertyImpl(
|
|
hdfsCluster.getNameNode().reconfigurePropertyImpl(
|
|
- DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, "true");
|
|
|
|
|
|
+ DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY, "true");
|
|
|
|
|
|
running = hdfsCluster.getFileSystem()
|
|
running = hdfsCluster.getFileSystem()
|
|
.getClient().isStoragePolicySatisfierRunning();
|
|
.getClient().isStoragePolicySatisfierRunning();
|
|
@@ -548,7 +550,7 @@ public class TestStoragePolicySatisfier {
|
|
|
|
|
|
// Restart SPS again
|
|
// Restart SPS again
|
|
hdfsCluster.getNameNode().reconfigurePropertyImpl(
|
|
hdfsCluster.getNameNode().reconfigurePropertyImpl(
|
|
- DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, "true");
|
|
|
|
|
|
+ DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY, "true");
|
|
running = hdfsCluster.getFileSystem()
|
|
running = hdfsCluster.getFileSystem()
|
|
.getClient().isStoragePolicySatisfierRunning();
|
|
.getClient().isStoragePolicySatisfierRunning();
|
|
Assert.assertTrue("SPS should be running as "
|
|
Assert.assertTrue("SPS should be running as "
|
|
@@ -558,7 +560,7 @@ public class TestStoragePolicySatisfier {
|
|
doTestWhenStoragePolicySetToCOLD();
|
|
doTestWhenStoragePolicySetToCOLD();
|
|
} catch (ReconfigurationException e) {
|
|
} catch (ReconfigurationException e) {
|
|
throw new IOException("Exception when reconfigure "
|
|
throw new IOException("Exception when reconfigure "
|
|
- + DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY, e);
|
|
|
|
|
|
+ + DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY, e);
|
|
} finally {
|
|
} finally {
|
|
if (out != null) {
|
|
if (out != null) {
|
|
out.close();
|
|
out.close();
|
|
@@ -599,6 +601,8 @@ public class TestStoragePolicySatisfier {
|
|
@Test(timeout = 120000)
|
|
@Test(timeout = 120000)
|
|
public void testMoveWithBlockPinning() throws Exception {
|
|
public void testMoveWithBlockPinning() throws Exception {
|
|
config.setBoolean(DFSConfigKeys.DFS_DATANODE_BLOCK_PINNING_ENABLED, true);
|
|
config.setBoolean(DFSConfigKeys.DFS_DATANODE_BLOCK_PINNING_ENABLED, true);
|
|
|
|
+ config.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
|
|
|
|
+ true);
|
|
hdfsCluster = new MiniDFSCluster.Builder(config).numDataNodes(3)
|
|
hdfsCluster = new MiniDFSCluster.Builder(config).numDataNodes(3)
|
|
.storageTypes(
|
|
.storageTypes(
|
|
new StorageType[][] {{StorageType.DISK, StorageType.DISK},
|
|
new StorageType[][] {{StorageType.DISK, StorageType.DISK},
|
|
@@ -663,6 +667,8 @@ public class TestStoragePolicySatisfier {
|
|
try {
|
|
try {
|
|
int numOfDns = 5;
|
|
int numOfDns = 5;
|
|
config.setLong("dfs.block.size", 1024);
|
|
config.setLong("dfs.block.size", 1024);
|
|
|
|
+ config.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
|
|
|
|
+ true);
|
|
allDiskTypes =
|
|
allDiskTypes =
|
|
new StorageType[][]{{StorageType.DISK, StorageType.ARCHIVE},
|
|
new StorageType[][]{{StorageType.DISK, StorageType.ARCHIVE},
|
|
{StorageType.DISK, StorageType.DISK},
|
|
{StorageType.DISK, StorageType.DISK},
|
|
@@ -707,6 +713,8 @@ public class TestStoragePolicySatisfier {
|
|
{StorageType.DISK, StorageType.SSD},
|
|
{StorageType.DISK, StorageType.SSD},
|
|
{StorageType.DISK, StorageType.RAM_DISK}};
|
|
{StorageType.DISK, StorageType.RAM_DISK}};
|
|
config.setLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
|
|
config.setLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
|
|
|
|
+ config.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
|
|
|
|
+ true);
|
|
try {
|
|
try {
|
|
hdfsCluster = startCluster(config, diskTypes, numOfDatanodes,
|
|
hdfsCluster = startCluster(config, diskTypes, numOfDatanodes,
|
|
storagesPerDatanode, capacity);
|
|
storagesPerDatanode, capacity);
|
|
@@ -746,6 +754,8 @@ public class TestStoragePolicySatisfier {
|
|
{StorageType.DISK, StorageType.DISK}};
|
|
{StorageType.DISK, StorageType.DISK}};
|
|
|
|
|
|
config.setLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
|
|
config.setLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
|
|
|
|
+ config.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
|
|
|
|
+ true);
|
|
try {
|
|
try {
|
|
hdfsCluster = startCluster(config, diskTypes, diskTypes.length,
|
|
hdfsCluster = startCluster(config, diskTypes, diskTypes.length,
|
|
storagesPerDatanode, capacity);
|
|
storagesPerDatanode, capacity);
|
|
@@ -782,6 +792,8 @@ public class TestStoragePolicySatisfier {
|
|
{StorageType.DISK, StorageType.ARCHIVE}};
|
|
{StorageType.DISK, StorageType.ARCHIVE}};
|
|
|
|
|
|
try {
|
|
try {
|
|
|
|
+ config.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
|
|
|
|
+ true);
|
|
hdfsCluster = startCluster(config, diskTypes, diskTypes.length,
|
|
hdfsCluster = startCluster(config, diskTypes, diskTypes.length,
|
|
storagesPerDatanode, capacity);
|
|
storagesPerDatanode, capacity);
|
|
dfs = hdfsCluster.getFileSystem();
|
|
dfs = hdfsCluster.getFileSystem();
|
|
@@ -825,6 +837,8 @@ public class TestStoragePolicySatisfier {
|
|
{StorageType.DISK, StorageType.SSD},
|
|
{StorageType.DISK, StorageType.SSD},
|
|
{StorageType.DISK, StorageType.DISK}};
|
|
{StorageType.DISK, StorageType.DISK}};
|
|
config.setLong("dfs.block.size", 2 * DEFAULT_BLOCK_SIZE);
|
|
config.setLong("dfs.block.size", 2 * DEFAULT_BLOCK_SIZE);
|
|
|
|
+ config.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
|
|
|
|
+ true);
|
|
long dnCapacity = 1024 * DEFAULT_BLOCK_SIZE + (2 * DEFAULT_BLOCK_SIZE - 1);
|
|
long dnCapacity = 1024 * DEFAULT_BLOCK_SIZE + (2 * DEFAULT_BLOCK_SIZE - 1);
|
|
try {
|
|
try {
|
|
hdfsCluster = startCluster(config, diskTypes, numOfDatanodes,
|
|
hdfsCluster = startCluster(config, diskTypes, numOfDatanodes,
|
|
@@ -915,7 +929,8 @@ public class TestStoragePolicySatisfier {
|
|
1L);
|
|
1L);
|
|
config.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY,
|
|
config.setBoolean(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOAD_KEY,
|
|
false);
|
|
false);
|
|
-
|
|
|
|
|
|
+ config.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
|
|
|
|
+ true);
|
|
try {
|
|
try {
|
|
hdfsCluster = startCluster(config, diskTypes, diskTypes.length,
|
|
hdfsCluster = startCluster(config, diskTypes, diskTypes.length,
|
|
storagesPerDatanode, capacity);
|
|
storagesPerDatanode, capacity);
|
|
@@ -968,8 +983,10 @@ public class TestStoragePolicySatisfier {
|
|
public void testSPSWhenFileLengthIsZero() throws Exception {
|
|
public void testSPSWhenFileLengthIsZero() throws Exception {
|
|
MiniDFSCluster cluster = null;
|
|
MiniDFSCluster cluster = null;
|
|
try {
|
|
try {
|
|
- cluster = new MiniDFSCluster.Builder(new Configuration()).numDataNodes(0)
|
|
|
|
- .build();
|
|
|
|
|
|
+ Configuration conf = new Configuration();
|
|
|
|
+ conf.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
|
|
|
|
+ true);
|
|
|
|
+ cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
|
|
cluster.waitActive();
|
|
cluster.waitActive();
|
|
DistributedFileSystem fs = cluster.getFileSystem();
|
|
DistributedFileSystem fs = cluster.getFileSystem();
|
|
Path filePath = new Path("/zeroSizeFile");
|
|
Path filePath = new Path("/zeroSizeFile");
|
|
@@ -1006,6 +1023,8 @@ public class TestStoragePolicySatisfier {
|
|
MiniDFSCluster cluster = null;
|
|
MiniDFSCluster cluster = null;
|
|
try {
|
|
try {
|
|
Configuration conf = new Configuration();
|
|
Configuration conf = new Configuration();
|
|
|
|
+ conf.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
|
|
|
|
+ true);
|
|
conf.set(DFSConfigKeys
|
|
conf.set(DFSConfigKeys
|
|
.DFS_STORAGE_POLICY_SATISFIER_RECHECK_TIMEOUT_MILLIS_KEY,
|
|
.DFS_STORAGE_POLICY_SATISFIER_RECHECK_TIMEOUT_MILLIS_KEY,
|
|
"3000");
|
|
"3000");
|
|
@@ -1054,6 +1073,8 @@ public class TestStoragePolicySatisfier {
|
|
MiniDFSCluster cluster = null;
|
|
MiniDFSCluster cluster = null;
|
|
try {
|
|
try {
|
|
Configuration conf = new Configuration();
|
|
Configuration conf = new Configuration();
|
|
|
|
+ conf.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
|
|
|
|
+ true);
|
|
conf.set(DFSConfigKeys
|
|
conf.set(DFSConfigKeys
|
|
.DFS_STORAGE_POLICY_SATISFIER_RECHECK_TIMEOUT_MILLIS_KEY,
|
|
.DFS_STORAGE_POLICY_SATISFIER_RECHECK_TIMEOUT_MILLIS_KEY,
|
|
"3000");
|
|
"3000");
|