|
@@ -38,7 +38,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
|
|
|
import org.apache.hadoop.hdfs.DFSTestUtil;
|
|
|
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
|
|
import org.apache.hadoop.hdfs.TestBlockStoragePolicy;
|
|
|
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
|
|
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
|
|
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
|
|
import org.apache.hadoop.net.NetworkTopology;
|
|
|
import org.apache.hadoop.net.NetworkTopologyWithNodeGroup;
|
|
@@ -191,8 +191,8 @@ public class TestReplicationPolicyWithNodeGroup {
|
|
|
private static void setupDataNodeCapacity() {
|
|
|
for(int i=0; i<NUM_OF_DATANODES; i++) {
|
|
|
updateHeartbeatWithUsage(dataNodes[i],
|
|
|
- 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
|
|
|
- 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
|
|
|
+ 2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
|
|
|
+ 2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -274,8 +274,8 @@ public class TestReplicationPolicyWithNodeGroup {
|
|
|
@Test
|
|
|
public void testChooseTarget1() throws Exception {
|
|
|
updateHeartbeatWithUsage(dataNodes[0],
|
|
|
- 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
|
|
|
- HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
|
|
|
+ 2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
|
|
|
+ HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
|
|
|
0L, 0L, 4, 0); // overloaded
|
|
|
|
|
|
DatanodeStorageInfo[] targets;
|
|
@@ -312,8 +312,8 @@ public class TestReplicationPolicyWithNodeGroup {
|
|
|
verifyNoTwoTargetsOnSameNodeGroup(targets);
|
|
|
|
|
|
updateHeartbeatWithUsage(dataNodes[0],
|
|
|
- 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
|
|
|
- HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
|
|
|
+ 2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
|
|
|
+ HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
|
|
|
}
|
|
|
|
|
|
private void verifyNoTwoTargetsOnSameNodeGroup(DatanodeStorageInfo[] targets) {
|
|
@@ -380,8 +380,8 @@ public class TestReplicationPolicyWithNodeGroup {
|
|
|
public void testChooseTarget3() throws Exception {
|
|
|
// make data node 0 to be not qualified to choose
|
|
|
updateHeartbeatWithUsage(dataNodes[0],
|
|
|
- 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
|
|
|
- (HdfsConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L,
|
|
|
+ 2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
|
|
|
+ (HdfsServerConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L,
|
|
|
0L, 0L, 0, 0); // no space
|
|
|
|
|
|
DatanodeStorageInfo[] targets;
|
|
@@ -412,8 +412,8 @@ public class TestReplicationPolicyWithNodeGroup {
|
|
|
isOnSameRack(targets[2], targets[3]));
|
|
|
|
|
|
updateHeartbeatWithUsage(dataNodes[0],
|
|
|
- 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
|
|
|
- HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
|
|
|
+ 2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
|
|
|
+ HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -430,8 +430,8 @@ public class TestReplicationPolicyWithNodeGroup {
|
|
|
// make data node 0-2 to be not qualified to choose: not enough disk space
|
|
|
for(int i=0; i<3; i++) {
|
|
|
updateHeartbeatWithUsage(dataNodes[i],
|
|
|
- 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
|
|
|
- (HdfsConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
|
|
|
+ 2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
|
|
|
+ (HdfsServerConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
|
|
|
}
|
|
|
|
|
|
DatanodeStorageInfo[] targets;
|
|
@@ -661,13 +661,13 @@ public class TestReplicationPolicyWithNodeGroup {
|
|
|
}
|
|
|
for(int i=0; i<NUM_OF_DATANODES_BOUNDARY; i++) {
|
|
|
updateHeartbeatWithUsage(dataNodes[0],
|
|
|
- 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
|
|
|
- (HdfsConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE,
|
|
|
+ 2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
|
|
|
+ (HdfsServerConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE,
|
|
|
0L, 0L, 0L, 0, 0);
|
|
|
|
|
|
updateHeartbeatWithUsage(dataNodesInBoundaryCase[i],
|
|
|
- 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
|
|
|
- 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
|
|
|
+ 2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
|
|
|
+ 2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
|
|
|
}
|
|
|
|
|
|
DatanodeStorageInfo[] targets;
|
|
@@ -697,8 +697,8 @@ public class TestReplicationPolicyWithNodeGroup {
|
|
|
public void testRereplicateOnBoundaryTopology() throws Exception {
|
|
|
for(int i=0; i<NUM_OF_DATANODES_BOUNDARY; i++) {
|
|
|
updateHeartbeatWithUsage(dataNodesInBoundaryCase[i],
|
|
|
- 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
|
|
|
- 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
|
|
|
+ 2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
|
|
|
+ 2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
|
|
|
}
|
|
|
List<DatanodeStorageInfo> chosenNodes = new ArrayList<DatanodeStorageInfo>();
|
|
|
chosenNodes.add(storagesInBoundaryCase[0]);
|
|
@@ -735,8 +735,8 @@ public class TestReplicationPolicyWithNodeGroup {
|
|
|
|
|
|
for(int i=0; i<NUM_OF_DATANODES_MORE_TARGETS; i++) {
|
|
|
updateHeartbeatWithUsage(dataNodesInMoreTargetsCase[i],
|
|
|
- 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
|
|
|
- 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
|
|
|
+ 2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
|
|
|
+ 2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
|
|
|
}
|
|
|
|
|
|
DatanodeStorageInfo[] targets;
|
|
@@ -786,8 +786,8 @@ public class TestReplicationPolicyWithNodeGroup {
|
|
|
//Update heartbeat
|
|
|
for(int i=0; i<NUM_OF_DATANODES_FOR_DEPENDENCIES; i++) {
|
|
|
updateHeartbeatWithUsage(dataNodesForDependencies[i],
|
|
|
- 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
|
|
|
- 2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
|
|
|
+ 2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
|
|
|
+ 2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
|
|
|
}
|
|
|
|
|
|
List<DatanodeStorageInfo> chosenNodes = new ArrayList<DatanodeStorageInfo>();
|