|
@@ -17,7 +17,12 @@
|
|
*/
|
|
*/
|
|
package org.apache.hadoop.hdfs.server.balancer;
|
|
package org.apache.hadoop.hdfs.server.balancer;
|
|
|
|
|
|
-import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
|
|
|
|
|
|
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY;
|
|
|
|
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_LAZY_WRITER_INTERVAL_SEC;
|
|
|
|
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_RAM_DISK_LOW_WATERMARK_BYTES;
|
|
|
|
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY;
|
|
|
|
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY;
|
|
|
|
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LAZY_PERSIST_FILE_SCRUB_INTERVAL_SEC;
|
|
import static org.apache.hadoop.hdfs.StorageType.DEFAULT;
|
|
import static org.apache.hadoop.hdfs.StorageType.DEFAULT;
|
|
import static org.apache.hadoop.hdfs.StorageType.RAM_DISK;
|
|
import static org.apache.hadoop.hdfs.StorageType.RAM_DISK;
|
|
import static org.junit.Assert.assertEquals;
|
|
import static org.junit.Assert.assertEquals;
|
|
@@ -31,6 +36,7 @@ import java.net.URI;
|
|
import java.util.ArrayList;
|
|
import java.util.ArrayList;
|
|
import java.util.Arrays;
|
|
import java.util.Arrays;
|
|
import java.util.Collection;
|
|
import java.util.Collection;
|
|
|
|
+import java.util.Collections;
|
|
import java.util.HashSet;
|
|
import java.util.HashSet;
|
|
import java.util.List;
|
|
import java.util.List;
|
|
import java.util.Random;
|
|
import java.util.Random;
|
|
@@ -44,7 +50,15 @@ import org.apache.commons.logging.impl.Log4JLogger;
|
|
import org.apache.hadoop.conf.Configuration;
|
|
import org.apache.hadoop.conf.Configuration;
|
|
import org.apache.hadoop.fs.FileSystem;
|
|
import org.apache.hadoop.fs.FileSystem;
|
|
import org.apache.hadoop.fs.Path;
|
|
import org.apache.hadoop.fs.Path;
|
|
-import org.apache.hadoop.hdfs.*;
|
|
|
|
|
|
+import org.apache.hadoop.hdfs.DFSClient;
|
|
|
|
+import org.apache.hadoop.hdfs.DFSConfigKeys;
|
|
|
|
+import org.apache.hadoop.hdfs.DFSTestUtil;
|
|
|
|
+import org.apache.hadoop.hdfs.DFSUtil;
|
|
|
|
+import org.apache.hadoop.hdfs.DistributedFileSystem;
|
|
|
|
+import org.apache.hadoop.hdfs.HdfsConfiguration;
|
|
|
|
+import org.apache.hadoop.hdfs.MiniDFSCluster;
|
|
|
|
+import org.apache.hadoop.hdfs.NameNodeProxies;
|
|
|
|
+import org.apache.hadoop.hdfs.StorageType;
|
|
import org.apache.hadoop.hdfs.protocol.Block;
|
|
import org.apache.hadoop.hdfs.protocol.Block;
|
|
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
|
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
|
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
|
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
|
@@ -53,8 +67,10 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
|
|
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
|
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
|
import org.apache.hadoop.hdfs.server.balancer.Balancer.Cli;
|
|
import org.apache.hadoop.hdfs.server.balancer.Balancer.Cli;
|
|
import org.apache.hadoop.hdfs.server.balancer.Balancer.Parameters;
|
|
import org.apache.hadoop.hdfs.server.balancer.Balancer.Parameters;
|
|
|
|
+import org.apache.hadoop.hdfs.server.balancer.Balancer.Result;
|
|
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
|
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
|
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
|
|
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
|
|
|
|
+import org.apache.hadoop.io.IOUtils;
|
|
import org.apache.hadoop.test.GenericTestUtils;
|
|
import org.apache.hadoop.test.GenericTestUtils;
|
|
import org.apache.hadoop.util.Time;
|
|
import org.apache.hadoop.util.Time;
|
|
import org.apache.hadoop.util.Tool;
|
|
import org.apache.hadoop.util.Tool;
|
|
@@ -65,8 +81,8 @@ import org.junit.Test;
|
|
* This class tests if a balancer schedules tasks correctly.
|
|
* This class tests if a balancer schedules tasks correctly.
|
|
*/
|
|
*/
|
|
public class TestBalancer {
|
|
public class TestBalancer {
|
|
- private static final Log LOG = LogFactory.getLog(
|
|
|
|
- "org.apache.hadoop.hdfs.TestBalancer");
|
|
|
|
|
|
+ private static final Log LOG = LogFactory.getLog(TestBalancer.class);
|
|
|
|
+
|
|
static {
|
|
static {
|
|
((Log4JLogger)Balancer.LOG).getLogger().setLevel(Level.ALL);
|
|
((Log4JLogger)Balancer.LOG).getLogger().setLevel(Level.ALL);
|
|
}
|
|
}
|
|
@@ -478,7 +494,7 @@ public class TestBalancer {
|
|
LOG.info("racks = " + Arrays.asList(racks));
|
|
LOG.info("racks = " + Arrays.asList(racks));
|
|
LOG.info("newCapacity= " + newCapacity);
|
|
LOG.info("newCapacity= " + newCapacity);
|
|
LOG.info("newRack = " + newRack);
|
|
LOG.info("newRack = " + newRack);
|
|
- LOG.info("useTool = " + useTool);
|
|
|
|
|
|
+ LOG.info("useTool = " + useTool);
|
|
assertEquals(capacities.length, racks.length);
|
|
assertEquals(capacities.length, racks.length);
|
|
int numOfDatanodes = capacities.length;
|
|
int numOfDatanodes = capacities.length;
|
|
cluster = new MiniDFSCluster.Builder(conf)
|
|
cluster = new MiniDFSCluster.Builder(conf)
|
|
@@ -584,7 +600,7 @@ public class TestBalancer {
|
|
|
|
|
|
// start rebalancing
|
|
// start rebalancing
|
|
Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
|
|
Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
|
|
- final int r = Balancer.run(namenodes, p, conf);
|
|
|
|
|
|
+ final int r = runBalancer(namenodes, p, conf);
|
|
if (conf.getInt(DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY,
|
|
if (conf.getInt(DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY,
|
|
DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_DEFAULT) ==0) {
|
|
DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_DEFAULT) ==0) {
|
|
assertEquals(ExitStatus.NO_MOVE_PROGRESS.getExitCode(), r);
|
|
assertEquals(ExitStatus.NO_MOVE_PROGRESS.getExitCode(), r);
|
|
@@ -593,10 +609,63 @@ public class TestBalancer {
|
|
assertEquals(ExitStatus.SUCCESS.getExitCode(), r);
|
|
assertEquals(ExitStatus.SUCCESS.getExitCode(), r);
|
|
}
|
|
}
|
|
waitForHeartBeat(totalUsedSpace, totalCapacity, client, cluster);
|
|
waitForHeartBeat(totalUsedSpace, totalCapacity, client, cluster);
|
|
- LOG.info("Rebalancing with default ctor.");
|
|
|
|
|
|
+ LOG.info(" .");
|
|
waitForBalancer(totalUsedSpace, totalCapacity, client, cluster, p, excludedNodes);
|
|
waitForBalancer(totalUsedSpace, totalCapacity, client, cluster, p, excludedNodes);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ private static int runBalancer(Collection<URI> namenodes, final Parameters p,
|
|
|
|
+ Configuration conf) throws IOException, InterruptedException {
|
|
|
|
+ final long sleeptime =
|
|
|
|
+ conf.getLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,
|
|
|
|
+ DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT) * 2000 +
|
|
|
|
+ conf.getLong(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY,
|
|
|
|
+ DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_DEFAULT) * 1000;
|
|
|
|
+ LOG.info("namenodes = " + namenodes);
|
|
|
|
+ LOG.info("parameters = " + p);
|
|
|
|
+ LOG.info("Print stack trace", new Throwable());
|
|
|
|
+
|
|
|
|
+ System.out.println("Time Stamp Iteration# Bytes Already Moved Bytes Left To Move Bytes Being Moved");
|
|
|
|
+
|
|
|
|
+ List<NameNodeConnector> connectors = Collections.emptyList();
|
|
|
|
+ try {
|
|
|
|
+ connectors = NameNodeConnector.newNameNodeConnectors(namenodes,
|
|
|
|
+ Balancer.class.getSimpleName(), Balancer.BALANCER_ID_PATH, conf);
|
|
|
|
+
|
|
|
|
+ boolean done = false;
|
|
|
|
+ for(int iteration = 0; !done; iteration++) {
|
|
|
|
+ done = true;
|
|
|
|
+ Collections.shuffle(connectors);
|
|
|
|
+ for(NameNodeConnector nnc : connectors) {
|
|
|
|
+ final Balancer b = new Balancer(nnc, p, conf);
|
|
|
|
+ final Result r = b.runOneIteration();
|
|
|
|
+ r.print(iteration, System.out);
|
|
|
|
+
|
|
|
|
+ // clean all lists
|
|
|
|
+ b.resetData(conf);
|
|
|
|
+ if (r.exitStatus == ExitStatus.IN_PROGRESS) {
|
|
|
|
+ done = false;
|
|
|
|
+ } else if (r.exitStatus != ExitStatus.SUCCESS) {
|
|
|
|
+ //must be an error statue, return.
|
|
|
|
+ return r.exitStatus.getExitCode();
|
|
|
|
+ } else {
|
|
|
|
+ if (iteration > 0) {
|
|
|
|
+ assertTrue(r.bytesAlreadyMoved > 0);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (!done) {
|
|
|
|
+ Thread.sleep(sleeptime);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ } finally {
|
|
|
|
+ for(NameNodeConnector nnc : connectors) {
|
|
|
|
+ IOUtils.cleanup(LOG, nnc);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ return ExitStatus.SUCCESS.getExitCode();
|
|
|
|
+ }
|
|
|
|
+
|
|
private void runBalancerCli(Configuration conf,
|
|
private void runBalancerCli(Configuration conf,
|
|
long totalUsedSpace, long totalCapacity,
|
|
long totalUsedSpace, long totalCapacity,
|
|
Balancer.Parameters p, boolean useFile, int expectedExcludedNodes) throws Exception {
|
|
Balancer.Parameters p, boolean useFile, int expectedExcludedNodes) throws Exception {
|
|
@@ -1118,7 +1187,6 @@ public class TestBalancer {
|
|
initConfWithRamDisk(conf);
|
|
initConfWithRamDisk(conf);
|
|
|
|
|
|
final int defaultRamDiskCapacity = 10;
|
|
final int defaultRamDiskCapacity = 10;
|
|
- final int defaultDiskCapacity = 100;
|
|
|
|
final long ramDiskStorageLimit =
|
|
final long ramDiskStorageLimit =
|
|
((long) defaultRamDiskCapacity * DEFAULT_RAM_DISK_BLOCK_SIZE) +
|
|
((long) defaultRamDiskCapacity * DEFAULT_RAM_DISK_BLOCK_SIZE) +
|
|
(DEFAULT_RAM_DISK_BLOCK_SIZE - 1);
|
|
(DEFAULT_RAM_DISK_BLOCK_SIZE - 1);
|