|
@@ -62,6 +62,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
|
|
import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
|
|
|
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
|
|
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
|
|
|
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStatistics;
|
|
|
import org.apache.hadoop.test.GenericTestUtils;
|
|
|
import org.apache.hadoop.test.PathUtils;
|
|
|
import org.apache.log4j.Level;
|
|
@@ -350,14 +351,13 @@ public class TestDecommission {
|
|
|
for (int i = 0; i < 10; i++) {
|
|
|
long[] newStats = namenode.getRpcServer().getStats();
|
|
|
|
|
|
- // For decommissioning nodes, ensure capacity of the DN is no longer
|
|
|
- // counted. Only used space of the DN is counted in cluster capacity
|
|
|
+ // For decommissioning nodes, ensure capacity of the DN and dfsUsed
|
|
|
+ // is no longer counted towards total
|
|
|
assertEquals(newStats[0],
|
|
|
- decommissioning ? info.getDfsUsed() : info.getCapacity());
|
|
|
+ decommissioning ? 0 : info.getCapacity());
|
|
|
|
|
|
- // Ensure cluster used capacity is counted for both normal and
|
|
|
- // decommissioning nodes
|
|
|
- assertEquals(newStats[1], info.getDfsUsed());
|
|
|
+ // Ensure cluster used capacity is counted for normal nodes only
|
|
|
+ assertEquals(newStats[1], decommissioning ? 0 : info.getDfsUsed());
|
|
|
|
|
|
// For decommissioning nodes, remaining space from the DN is not counted
|
|
|
assertEquals(newStats[2], decommissioning ? 0 : info.getRemaining());
|
|
@@ -1264,4 +1264,39 @@ public class TestDecommission {
|
|
|
cluster.shutdown();
|
|
|
}
|
|
|
}
|
|
|
+
|
|
|
+ @Test
|
|
|
+ public void testUsedCapacity() throws Exception {
|
|
|
+ int numNamenodes = 1;
|
|
|
+ int numDatanodes = 2;
|
|
|
+
|
|
|
+ startCluster(numNamenodes,numDatanodes,conf);
|
|
|
+ cluster.waitActive();
|
|
|
+ FSNamesystem ns = cluster.getNamesystem(0);
|
|
|
+ BlockManager blockManager = ns.getBlockManager();
|
|
|
+ DatanodeStatistics datanodeStatistics = blockManager.getDatanodeManager()
|
|
|
+ .getDatanodeStatistics();
|
|
|
+
|
|
|
+ long initialUsedCapacity = datanodeStatistics.getCapacityUsed();
|
|
|
+ long initialTotalCapacity = datanodeStatistics.getCapacityTotal();
|
|
|
+ long initialBlockPoolUsed = datanodeStatistics.getBlockPoolUsed();
|
|
|
+ ArrayList<ArrayList<DatanodeInfo>> namenodeDecomList =
|
|
|
+ new ArrayList<ArrayList<DatanodeInfo>>(numNamenodes);
|
|
|
+ namenodeDecomList.add(0, new ArrayList<DatanodeInfo>(numDatanodes));
|
|
|
+ ArrayList<DatanodeInfo> decommissionedNodes = namenodeDecomList.get(0);
|
|
|
+ //decommission one node
|
|
|
+ DatanodeInfo decomNode = decommissionNode(0, null, decommissionedNodes,
|
|
|
+ AdminStates.DECOMMISSIONED);
|
|
|
+ decommissionedNodes.add(decomNode);
|
|
|
+ long newUsedCapacity = datanodeStatistics.getCapacityUsed();
|
|
|
+ long newTotalCapacity = datanodeStatistics.getCapacityTotal();
|
|
|
+ long newBlockPoolUsed = datanodeStatistics.getBlockPoolUsed();
|
|
|
+
|
|
|
+ assertTrue("DfsUsedCapacity should not be the same after a node has " +
|
|
|
+ "been decommissioned!", initialUsedCapacity != newUsedCapacity);
|
|
|
+ assertTrue("TotalCapacity should not be the same after a node has " +
|
|
|
+ "been decommissioned!", initialTotalCapacity != newTotalCapacity);
|
|
|
+ assertTrue("BlockPoolUsed should not be the same after a node has " +
|
|
|
+ "been decommissioned!",initialBlockPoolUsed != newBlockPoolUsed);
|
|
|
+ }
|
|
|
}
|