|
@@ -48,8 +48,8 @@ import org.apache.hadoop.hdfs.LogVerificationAppender;
|
|
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
|
|
import org.apache.hadoop.hdfs.TestBlockStoragePolicy;
|
|
|
import org.apache.hadoop.hdfs.protocol.Block;
|
|
|
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
|
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.StatefulBlockInfo;
|
|
|
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
|
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
|
|
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
|
|
|
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
|
@@ -136,8 +136,8 @@ public class TestReplicationPolicy {
|
|
|
}
|
|
|
for (int i=0; i < NUM_OF_DATANODES; i++) {
|
|
|
updateHeartbeatWithUsage(dataNodes[i],
|
|
|
- 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
|
|
|
- 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
|
|
|
+ 2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
|
|
|
+ 2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -160,8 +160,8 @@ public class TestReplicationPolicy {
|
|
|
@Test
|
|
|
public void testChooseTarget1() throws Exception {
|
|
|
updateHeartbeatWithUsage(dataNodes[0],
|
|
|
- 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
|
|
|
- HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
|
|
|
+ 2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
|
|
|
+ HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
|
|
|
0L, 0L, 4, 0); // overloaded
|
|
|
|
|
|
DatanodeStorageInfo[] targets;
|
|
@@ -191,8 +191,8 @@ public class TestReplicationPolicy {
|
|
|
assertFalse(isOnSameRack(targets[0], targets[2]));
|
|
|
|
|
|
updateHeartbeatWithUsage(dataNodes[0],
|
|
|
- 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
|
|
|
- HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
|
|
|
+ 2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
|
|
|
+ HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
|
|
|
}
|
|
|
|
|
|
private static DatanodeStorageInfo[] chooseTarget(int numOfReplicas) {
|
|
@@ -314,8 +314,8 @@ public class TestReplicationPolicy {
|
|
|
public void testChooseTarget3() throws Exception {
|
|
|
// make data node 0 to be not qualified to choose
|
|
|
updateHeartbeatWithUsage(dataNodes[0],
|
|
|
- 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
|
|
|
- (HdfsConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L,
|
|
|
+ 2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
|
|
|
+ (HdfsServerConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L,
|
|
|
0L, 0L, 0, 0); // no space
|
|
|
|
|
|
DatanodeStorageInfo[] targets;
|
|
@@ -348,8 +348,8 @@ public class TestReplicationPolicy {
|
|
|
assertFalse(isOnSameRack(targets[1], targets[3]));
|
|
|
|
|
|
updateHeartbeatWithUsage(dataNodes[0],
|
|
|
- 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
|
|
|
- HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
|
|
|
+ 2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
|
|
|
+ HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -365,8 +365,8 @@ public class TestReplicationPolicy {
|
|
|
// make data node 0 & 1 to be not qualified to choose: not enough disk space
|
|
|
for(int i=0; i<2; i++) {
|
|
|
updateHeartbeatWithUsage(dataNodes[i],
|
|
|
- 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
|
|
|
- (HdfsConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
|
|
|
+ 2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
|
|
|
+ (HdfsServerConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
|
|
|
}
|
|
|
|
|
|
DatanodeStorageInfo[] targets;
|
|
@@ -393,8 +393,8 @@ public class TestReplicationPolicy {
|
|
|
|
|
|
for(int i=0; i<2; i++) {
|
|
|
updateHeartbeatWithUsage(dataNodes[i],
|
|
|
- 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
|
|
|
- HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
|
|
|
+ 2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
|
|
|
+ HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -457,8 +457,8 @@ public class TestReplicationPolicy {
|
|
|
bm.getDatanodeManager().getNetworkTopology().add(newDn);
|
|
|
bm.getDatanodeManager().getHeartbeatManager().addDatanode(newDn);
|
|
|
updateHeartbeatWithUsage(newDn,
|
|
|
- 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
|
|
|
- 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
|
|
|
+ 2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
|
|
|
+ 2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
|
|
|
|
|
|
// Try picking three nodes. Only two should return.
|
|
|
excludedNodes.clear();
|
|
@@ -504,8 +504,8 @@ public class TestReplicationPolicy {
|
|
|
// make data node 0 & 1 to be not qualified to choose: not enough disk space
|
|
|
for(int i=0; i<2; i++) {
|
|
|
updateHeartbeatWithUsage(dataNodes[i],
|
|
|
- 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
|
|
|
- (HdfsConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
|
|
|
+ 2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
|
|
|
+ (HdfsServerConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
|
|
|
}
|
|
|
|
|
|
final LogVerificationAppender appender = new LogVerificationAppender();
|
|
@@ -529,8 +529,8 @@ public class TestReplicationPolicy {
|
|
|
|
|
|
for(int i=0; i<2; i++) {
|
|
|
updateHeartbeatWithUsage(dataNodes[i],
|
|
|
- 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
|
|
|
- HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
|
|
|
+ 2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
|
|
|
+ HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
|
|
|
}
|
|
|
}
|
|
|
|