|
@@ -18,11 +18,12 @@
|
|
|
package org.apache.hadoop.hdfs;
|
|
|
|
|
|
import static org.assertj.core.api.Assertions.assertThat;
|
|
|
-import static org.junit.Assert.assertEquals;
|
|
|
-import static org.junit.Assert.assertNotNull;
|
|
|
-import static org.junit.Assert.assertNull;
|
|
|
-import static org.junit.Assert.assertTrue;
|
|
|
-import static org.junit.Assert.fail;
|
|
|
+import static org.junit.jupiter.api.Assertions.assertEquals;
|
|
|
+import static org.junit.jupiter.api.Assertions.assertFalse;
|
|
|
+import static org.junit.jupiter.api.Assertions.assertNotNull;
|
|
|
+import static org.junit.jupiter.api.Assertions.assertNull;
|
|
|
+import static org.junit.jupiter.api.Assertions.assertTrue;
|
|
|
+import static org.junit.jupiter.api.Assertions.fail;
|
|
|
|
|
|
import java.io.ByteArrayOutputStream;
|
|
|
import java.io.IOException;
|
|
@@ -86,9 +87,8 @@ import org.apache.hadoop.test.GenericTestUtils;
|
|
|
import org.apache.hadoop.util.Lists;
|
|
|
import org.apache.hadoop.util.ToolRunner;
|
|
|
|
|
|
-import org.junit.Assert;
|
|
|
-import org.junit.Ignore;
|
|
|
-import org.junit.Test;
|
|
|
+import org.junit.jupiter.api.Disabled;
|
|
|
+import org.junit.jupiter.api.Test;
|
|
|
import org.eclipse.jetty.util.ajax.JSON;
|
|
|
import org.slf4j.Logger;
|
|
|
import org.slf4j.LoggerFactory;
|
|
@@ -116,8 +116,7 @@ public class TestDecommission extends AdminStatesBaseTest {
|
|
|
String downnode, int numDatanodes) throws IOException {
|
|
|
boolean isNodeDown = (downnode != null);
|
|
|
// need a raw stream
|
|
|
- assertTrue("Not HDFS:"+fileSys.getUri(),
|
|
|
- fileSys instanceof DistributedFileSystem);
|
|
|
+ assertTrue(fileSys instanceof DistributedFileSystem, "Not HDFS:" + fileSys.getUri());
|
|
|
HdfsDataInputStream dis = (HdfsDataInputStream)
|
|
|
fileSys.open(name);
|
|
|
Collection<LocatedBlock> dinfo = dis.getAllBlocks();
|
|
@@ -170,8 +169,7 @@ public class TestDecommission extends AdminStatesBaseTest {
|
|
|
|
|
|
// For decommissioning nodes, ensure capacity of the DN and dfsUsed
|
|
|
// is no longer counted towards total
|
|
|
- assertEquals(newStats[0],
|
|
|
- decommissioning ? 0 : info.getCapacity());
|
|
|
+ assertEquals(newStats[0], decommissioning ? 0 : info.getCapacity());
|
|
|
|
|
|
// Ensure cluster used capacity is counted for normal nodes only
|
|
|
assertEquals(newStats[1], decommissioning ? 0 : info.getDfsUsed());
|
|
@@ -232,8 +230,8 @@ public class TestDecommission extends AdminStatesBaseTest {
|
|
|
|
|
|
// Ensure decommissioned datanode is not automatically shutdown
|
|
|
DFSClient client = getDfsClient(0);
|
|
|
- assertEquals("All datanodes must be alive", numDatanodes,
|
|
|
- client.datanodeReport(DatanodeReportType.LIVE).length);
|
|
|
+ assertEquals(numDatanodes, client.datanodeReport(DatanodeReportType.LIVE).length,
|
|
|
+ "All datanodes must be alive");
|
|
|
assertNull(checkFile(fileSys, file1, replicas, decomNode.getXferAddr(),
|
|
|
numDatanodes));
|
|
|
cleanupFile(fileSys, file1);
|
|
@@ -406,8 +404,8 @@ public class TestDecommission extends AdminStatesBaseTest {
|
|
|
|
|
|
// Ensure decommissioned datanode is not automatically shutdown
|
|
|
DFSClient client = getDfsClient(i);
|
|
|
- assertEquals("All datanodes must be alive", numDatanodes,
|
|
|
- client.datanodeReport(DatanodeReportType.LIVE).length);
|
|
|
+ assertEquals(numDatanodes, client.datanodeReport(DatanodeReportType.LIVE).length,
|
|
|
+ "All datanodes must be alive");
|
|
|
// wait for the block to be replicated
|
|
|
int tries = 0;
|
|
|
while (tries++ < 20) {
|
|
@@ -420,8 +418,8 @@ public class TestDecommission extends AdminStatesBaseTest {
|
|
|
} catch (InterruptedException ie) {
|
|
|
}
|
|
|
}
|
|
|
- assertTrue("Checked if block was replicated after decommission, tried "
|
|
|
- + tries + " times.", tries < 20);
|
|
|
+ assertTrue(tries < 20,
|
|
|
+ "Checked if block was replicated after decommission, tried " + tries + " times.");
|
|
|
cleanupFile(fileSys, file1);
|
|
|
}
|
|
|
}
|
|
@@ -454,8 +452,8 @@ public class TestDecommission extends AdminStatesBaseTest {
|
|
|
|
|
|
// Decommission one of the datanodes with a replica
|
|
|
BlockLocation loc = fileSys.getFileBlockLocations(file1, 0, 1)[0];
|
|
|
- assertEquals("Unexpected number of replicas from getFileBlockLocations",
|
|
|
- replicas, loc.getHosts().length);
|
|
|
+ assertEquals(replicas, loc.getHosts().length,
|
|
|
+ "Unexpected number of replicas from getFileBlockLocations");
|
|
|
final String toDecomHost = loc.getNames()[0];
|
|
|
String toDecomUuid = null;
|
|
|
for (DataNode d : getCluster().getDataNodes()) {
|
|
@@ -464,7 +462,7 @@ public class TestDecommission extends AdminStatesBaseTest {
|
|
|
break;
|
|
|
}
|
|
|
}
|
|
|
- assertNotNull("Could not find a dn with the block!", toDecomUuid);
|
|
|
+ assertNotNull(toDecomUuid, "Could not find a dn with the block!");
|
|
|
final DatanodeInfo decomNode = takeNodeOutofService(0, toDecomUuid,
|
|
|
0, decommissionedNodes, AdminStates.DECOMMISSIONED);
|
|
|
decommissionedNodes.add(decomNode);
|
|
@@ -476,8 +474,8 @@ public class TestDecommission extends AdminStatesBaseTest {
|
|
|
|
|
|
// Ensure decommissioned datanode is not automatically shutdown
|
|
|
DFSClient client = getDfsClient(0);
|
|
|
- assertEquals("All datanodes must be alive", numDatanodes,
|
|
|
- client.datanodeReport(DatanodeReportType.LIVE).length);
|
|
|
+ assertEquals(numDatanodes, client.datanodeReport(DatanodeReportType.LIVE).length,
|
|
|
+ "All datanodes must be alive");
|
|
|
|
|
|
// wait for the block to be replicated
|
|
|
final ExtendedBlock b = DFSTestUtil.getFirstBlock(fileSys, file1);
|
|
@@ -566,7 +564,7 @@ public class TestDecommission extends AdminStatesBaseTest {
|
|
|
break;
|
|
|
}
|
|
|
}
|
|
|
- assertNotNull("Could not find decomNode in cluster!", decomNode);
|
|
|
+ assertNotNull(decomNode, "Could not find decomNode in cluster!");
|
|
|
return decomNode;
|
|
|
}
|
|
|
|
|
@@ -612,14 +610,14 @@ public class TestDecommission extends AdminStatesBaseTest {
|
|
|
Thread.sleep(HEARTBEAT_INTERVAL * 1000);
|
|
|
info = client.datanodeReport(DatanodeReportType.LIVE);
|
|
|
}
|
|
|
- assertEquals("Number of live nodes should be 0", 0, info.length);
|
|
|
+ assertEquals(0, info.length, "Number of live nodes should be 0");
|
|
|
|
|
|
// Test that bogus hostnames are considered "dead".
|
|
|
// The dead report should have an entry for the bogus entry in the hosts
|
|
|
// file. The original datanode is excluded from the report because it
|
|
|
// is no longer in the included list.
|
|
|
info = client.datanodeReport(DatanodeReportType.DEAD);
|
|
|
- assertEquals("There should be 1 dead node", 1, info.length);
|
|
|
+ assertEquals(1, info.length, "There should be 1 dead node");
|
|
|
assertEquals(bogusIp, info[0].getHostName());
|
|
|
}
|
|
|
}
|
|
@@ -772,7 +770,7 @@ public class TestDecommission extends AdminStatesBaseTest {
|
|
|
|
|
|
toolOut.reset();
|
|
|
assertEquals(0,
|
|
|
- ToolRunner.run(dfsAdmin, new String[] {"-listOpenFiles",
|
|
|
+ ToolRunner.run(dfsAdmin, new String[]{"-listOpenFiles",
|
|
|
"-blockingDecommission", "-path", firstOpenFile}));
|
|
|
toolOut.flush();
|
|
|
result2 = verifyOpenFilesListing(
|
|
@@ -950,7 +948,7 @@ public class TestDecommission extends AdminStatesBaseTest {
|
|
|
OpenFilesIterator.FILTER_PATH_DEFAULT);
|
|
|
assertEquals(0, batchedListEntries.size());
|
|
|
} catch (NullPointerException e) {
|
|
|
- Assert.fail("Should not throw NPE when the file is not under " +
|
|
|
+ fail("Should not throw NPE when the file is not under " +
|
|
|
"construction but has lease!");
|
|
|
}
|
|
|
initExcludeHost("");
|
|
@@ -1176,8 +1174,8 @@ public class TestDecommission extends AdminStatesBaseTest {
|
|
|
getCluster().startDataNodes(getConf(), 1, true, null, null, null, null);
|
|
|
numDatanodes+=1;
|
|
|
|
|
|
- assertEquals("Number of datanodes should be 2 ", 2,
|
|
|
- getCluster().getDataNodes().size());
|
|
|
+ assertEquals(2, getCluster().getDataNodes().size(),
|
|
|
+ "Number of datanodes should be 2 ");
|
|
|
//Restart the namenode
|
|
|
getCluster().restartNameNode();
|
|
|
DatanodeInfo datanodeInfo = NameNodeAdapter.getDatanode(
|
|
@@ -1185,11 +1183,11 @@ public class TestDecommission extends AdminStatesBaseTest {
|
|
|
waitNodeState(datanodeInfo, AdminStates.DECOMMISSIONED);
|
|
|
|
|
|
// Ensure decommissioned datanode is not automatically shutdown
|
|
|
- assertEquals("All datanodes must be alive", numDatanodes,
|
|
|
- client.datanodeReport(DatanodeReportType.LIVE).length);
|
|
|
- assertTrue("Checked if block was replicated after decommission.",
|
|
|
- checkFile(fileSys, file1, replicas, datanodeInfo.getXferAddr(),
|
|
|
- numDatanodes) == null);
|
|
|
+ assertEquals(numDatanodes, client.datanodeReport(DatanodeReportType.LIVE).length,
|
|
|
+ "All datanodes must be alive");
|
|
|
+ assertTrue(checkFile(fileSys, file1, replicas, datanodeInfo.getXferAddr(),
|
|
|
+ numDatanodes) == null,
|
|
|
+ "Checked if block was replicated after decommission.");
|
|
|
|
|
|
cleanupFile(fileSys, file1);
|
|
|
// Restart the cluster and ensure recommissioned datanodes
|
|
@@ -1227,10 +1225,10 @@ public class TestDecommission extends AdminStatesBaseTest {
|
|
|
//Restart the namenode
|
|
|
getCluster().restartNameNode();
|
|
|
|
|
|
- assertEquals("There should be one node alive", 1,
|
|
|
- client.datanodeReport(DatanodeReportType.LIVE).length);
|
|
|
- assertEquals("There should be one node dead", 1,
|
|
|
- client.datanodeReport(DatanodeReportType.DEAD).length);
|
|
|
+ assertEquals(1, client.datanodeReport(DatanodeReportType.LIVE).length,
|
|
|
+ "There should be one node alive");
|
|
|
+ assertEquals(1, client.datanodeReport(DatanodeReportType.DEAD).length,
|
|
|
+ "There should be one node dead");
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -1247,7 +1245,7 @@ public class TestDecommission extends AdminStatesBaseTest {
|
|
|
* It is not recommended to use a registration name which is not also a
|
|
|
* valid DNS hostname for the DataNode. See HDFS-5237 for background.
|
|
|
*/
|
|
|
- @Ignore
|
|
|
+ @Disabled
|
|
|
@Test
|
|
|
public void testIncludeByRegistrationName() throws Exception {
|
|
|
// Any IPv4 address starting with 127 functions as a "loopback" address
|
|
@@ -1301,8 +1299,8 @@ public class TestDecommission extends AdminStatesBaseTest {
|
|
|
try {
|
|
|
DatanodeInfo info[] = client.datanodeReport(DatanodeReportType.LIVE);
|
|
|
if (info.length == 1) {
|
|
|
- Assert.assertFalse(info[0].isDecommissioned());
|
|
|
- Assert.assertFalse(info[0].isDecommissionInProgress());
|
|
|
+ assertFalse(info[0].isDecommissioned());
|
|
|
+ assertFalse(info[0].isDecommissionInProgress());
|
|
|
assertEquals(registrationName, info[0].getHostName());
|
|
|
return true;
|
|
|
}
|
|
@@ -1358,8 +1356,8 @@ public class TestDecommission extends AdminStatesBaseTest {
|
|
|
}
|
|
|
// Run decom scan and check
|
|
|
BlockManagerTestUtil.recheckDecommissionState(datanodeManager);
|
|
|
- assertEquals("Unexpected # of nodes checked", expectedNumCheckedNodes,
|
|
|
- decomManager.getNumNodesChecked());
|
|
|
+ assertEquals(expectedNumCheckedNodes, decomManager.getNumNodesChecked(),
|
|
|
+ "Unexpected # of nodes checked");
|
|
|
// Recommission all nodes
|
|
|
for (DatanodeInfo dn : decommissionedNodes) {
|
|
|
putNodeInService(0, dn);
|
|
@@ -1470,10 +1468,10 @@ public class TestDecommission extends AdminStatesBaseTest {
|
|
|
|
|
|
private void assertTrackedAndPending(DatanodeAdminManager decomManager,
|
|
|
int tracked, int pending) {
|
|
|
- assertEquals("Unexpected number of tracked nodes", tracked,
|
|
|
- decomManager.getNumTrackedNodes());
|
|
|
- assertEquals("Unexpected number of pending nodes", pending,
|
|
|
- decomManager.getNumPendingNodes());
|
|
|
+ assertEquals(tracked, decomManager.getNumTrackedNodes(),
|
|
|
+ "Unexpected number of tracked nodes");
|
|
|
+ assertEquals(pending, decomManager.getNumPendingNodes(),
|
|
|
+ "Unexpected number of pending nodes");
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -1628,12 +1626,12 @@ public class TestDecommission extends AdminStatesBaseTest {
|
|
|
long newTotalCapacity = datanodeStatistics.getCapacityTotal();
|
|
|
long newBlockPoolUsed = datanodeStatistics.getBlockPoolUsed();
|
|
|
|
|
|
- assertTrue("DfsUsedCapacity should not be the same after a node has " +
|
|
|
- "been decommissioned!", initialUsedCapacity != newUsedCapacity);
|
|
|
- assertTrue("TotalCapacity should not be the same after a node has " +
|
|
|
- "been decommissioned!", initialTotalCapacity != newTotalCapacity);
|
|
|
- assertTrue("BlockPoolUsed should not be the same after a node has " +
|
|
|
- "been decommissioned!",initialBlockPoolUsed != newBlockPoolUsed);
|
|
|
+ assertTrue(initialUsedCapacity != newUsedCapacity,
|
|
|
+ "DfsUsedCapacity should not be the same after a node has " + "been decommissioned!");
|
|
|
+ assertTrue(initialTotalCapacity != newTotalCapacity,
|
|
|
+ "TotalCapacity should not be the same after a node has " + "been decommissioned!");
|
|
|
+ assertTrue(initialBlockPoolUsed != newBlockPoolUsed,
|
|
|
+ "BlockPoolUsed should not be the same after a node has " + "been decommissioned!");
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -1712,15 +1710,16 @@ public class TestDecommission extends AdminStatesBaseTest {
|
|
|
while (Instant.now().isBefore(checkUntil)) {
|
|
|
BlockManagerTestUtil.recheckDecommissionState(datanodeManager);
|
|
|
assertEquals(
|
|
|
- "Unexpected number of decommissioning nodes queued in DatanodeAdminManager.",
|
|
|
- 0, decomManager.getNumPendingNodes());
|
|
|
+ 0, decomManager.getNumPendingNodes(),
|
|
|
+ "Unexpected number of decommissioning nodes queued in DatanodeAdminManager.");
|
|
|
assertEquals(
|
|
|
- "Unexpected number of decommissioning nodes tracked in DatanodeAdminManager.",
|
|
|
- numDeadNodes, decomManager.getNumTrackedNodes());
|
|
|
+ numDeadNodes, decomManager.getNumTrackedNodes(),
|
|
|
+ "Unexpected number of decommissioning nodes tracked in DatanodeAdminManager.");
|
|
|
assertTrue(
|
|
|
- "Dead decommissioning nodes unexpectedly transitioned out of DECOMMISSION_INPROGRESS.",
|
|
|
+
|
|
|
deadNodeProps.keySet().stream()
|
|
|
- .allMatch(node -> node.getAdminState().equals(AdminStates.DECOMMISSION_INPROGRESS)));
|
|
|
+ .allMatch(node -> node.getAdminState().equals(AdminStates.DECOMMISSION_INPROGRESS)),
|
|
|
+ "Dead decommissioning nodes unexpectedly transitioned out of DECOMMISSION_INPROGRESS.");
|
|
|
Thread.sleep(500);
|
|
|
}
|
|
|
|
|
@@ -1759,35 +1758,36 @@ public class TestDecommission extends AdminStatesBaseTest {
|
|
|
// "processPendingNodes" to de-queue the live nodes & decommission them
|
|
|
BlockManagerTestUtil.recheckDecommissionState(datanodeManager);
|
|
|
assertEquals(
|
|
|
- "DatanodeAdminBackoffMonitor did not re-queue dead decommissioning nodes as expected.",
|
|
|
- 2, decomManager.getNumPendingNodes());
|
|
|
+ 2, decomManager.getNumPendingNodes(),
|
|
|
+ "DatanodeAdminBackoffMonitor did not re-queue dead decommissioning nodes as expected.");
|
|
|
assertEquals(
|
|
|
- "DatanodeAdminBackoffMonitor did not re-queue dead decommissioning nodes as expected.",
|
|
|
- 0, decomManager.getNumTrackedNodes());
|
|
|
+ 0, decomManager.getNumTrackedNodes(),
|
|
|
+ "DatanodeAdminBackoffMonitor did not re-queue dead decommissioning nodes as expected.");
|
|
|
} else {
|
|
|
// For TestDecommission a single tick/execution of the DatanodeAdminDefaultMonitor
|
|
|
// will re-queue the dead nodes. A seconds tick is needed to de-queue the live nodes
|
|
|
// & decommission them
|
|
|
BlockManagerTestUtil.recheckDecommissionState(datanodeManager);
|
|
|
assertEquals(
|
|
|
- "DatanodeAdminDefaultMonitor did not re-queue dead decommissioning nodes as expected.",
|
|
|
- 4, decomManager.getNumPendingNodes());
|
|
|
+ 4, decomManager.getNumPendingNodes(),
|
|
|
+ "DatanodeAdminDefaultMonitor did not re-queue dead decommissioning nodes as expected.");
|
|
|
assertEquals(
|
|
|
- "DatanodeAdminDefaultMonitor did not re-queue dead decommissioning nodes as expected.",
|
|
|
- 0, decomManager.getNumTrackedNodes());
|
|
|
+ 0, decomManager.getNumTrackedNodes(),
|
|
|
+ "DatanodeAdminDefaultMonitor did not re-queue dead decommissioning nodes as expected.");
|
|
|
BlockManagerTestUtil.recheckDecommissionState(datanodeManager);
|
|
|
assertEquals(
|
|
|
- "DatanodeAdminDefaultMonitor did not decommission live nodes as expected.",
|
|
|
- 2, decomManager.getNumPendingNodes());
|
|
|
+ 2, decomManager.getNumPendingNodes(),
|
|
|
+ "DatanodeAdminDefaultMonitor did not decommission live nodes as expected.");
|
|
|
assertEquals(
|
|
|
- "DatanodeAdminDefaultMonitor did not decommission live nodes as expected.",
|
|
|
- 0, decomManager.getNumTrackedNodes());
|
|
|
+ 0, decomManager.getNumTrackedNodes(),
|
|
|
+ "DatanodeAdminDefaultMonitor did not decommission live nodes as expected.");
|
|
|
}
|
|
|
- assertTrue("Live nodes not DECOMMISSIONED as expected.", liveDecommNodes.stream()
|
|
|
- .allMatch(node -> node.getAdminState().equals(AdminStates.DECOMMISSIONED)));
|
|
|
- assertTrue("Dead nodes not DECOMMISSION_INPROGRESS as expected.",
|
|
|
- deadNodeProps.keySet().stream()
|
|
|
- .allMatch(node -> node.getAdminState().equals(AdminStates.DECOMMISSION_INPROGRESS)));
|
|
|
+ assertTrue(liveDecommNodes.stream()
|
|
|
+ .allMatch(node -> node.getAdminState().equals(AdminStates.DECOMMISSIONED)),
|
|
|
+ "Live nodes not DECOMMISSIONED as expected.");
|
|
|
+ assertTrue(deadNodeProps.keySet().stream()
|
|
|
+ .allMatch(node -> node.getAdminState().equals(AdminStates.DECOMMISSION_INPROGRESS)),
|
|
|
+ "Dead nodes not DECOMMISSION_INPROGRESS as expected.");
|
|
|
assertThat(deadNodeProps.keySet())
|
|
|
.as("Check all dead decommissioning nodes queued in DatanodeAdminManager")
|
|
|
.containsAll(decomManager.getPendingNodes());
|
|
@@ -1798,15 +1798,15 @@ public class TestDecommission extends AdminStatesBaseTest {
|
|
|
while (Instant.now().isBefore(checkUntil)) {
|
|
|
BlockManagerTestUtil.recheckDecommissionState(datanodeManager);
|
|
|
assertEquals(
|
|
|
- "Unexpected number of decommissioning nodes queued in DatanodeAdminManager.",
|
|
|
- 0, decomManager.getNumPendingNodes());
|
|
|
+ 0, decomManager.getNumPendingNodes(),
|
|
|
+ "Unexpected number of decommissioning nodes queued in DatanodeAdminManager.");
|
|
|
assertEquals(
|
|
|
- "Unexpected number of decommissioning nodes tracked in DatanodeAdminManager.",
|
|
|
- numDeadNodes, decomManager.getNumTrackedNodes());
|
|
|
+ numDeadNodes, decomManager.getNumTrackedNodes(),
|
|
|
+ "Unexpected number of decommissioning nodes tracked in DatanodeAdminManager.");
|
|
|
assertTrue(
|
|
|
- "Dead decommissioning nodes unexpectedly transitioned out of DECOMMISSION_INPROGRESS.",
|
|
|
deadNodeProps.keySet().stream()
|
|
|
- .allMatch(node -> node.getAdminState().equals(AdminStates.DECOMMISSION_INPROGRESS)));
|
|
|
+ .allMatch(node -> node.getAdminState().equals(AdminStates.DECOMMISSION_INPROGRESS)),
|
|
|
+ "Dead decommissioning nodes unexpectedly transitioned out of DECOMMISSION_INPROGRESS.");
|
|
|
Thread.sleep(500);
|
|
|
}
|
|
|
|
|
@@ -1844,8 +1844,8 @@ public class TestDecommission extends AdminStatesBaseTest {
|
|
|
final List<DatanodeDescriptor> liveNodes, final int numDeadNodes,
|
|
|
final Map<DatanodeDescriptor, MiniDFSCluster.DataNodeProperties> deadNodeProps,
|
|
|
final ArrayList<DatanodeInfo> decommissionedNodes, final Path filePath) throws Exception {
|
|
|
- assertTrue("Must have numLiveNode > 0", numLiveNodes > 0);
|
|
|
- assertTrue("Must have numDeadNode > 0", numDeadNodes > 0);
|
|
|
+ assertTrue(numLiveNodes > 0, "Must have numLiveNode > 0");
|
|
|
+ assertTrue(numDeadNodes > 0, "Must have numDeadNode > 0");
|
|
|
int numNodes = numLiveNodes + numDeadNodes;
|
|
|
|
|
|
// Allow "numDeadNodes" datanodes to be decommissioned at a time
|