|
@@ -23,13 +23,7 @@ import java.io.IOException;
|
|
import java.io.PrintWriter;
|
|
import java.io.PrintWriter;
|
|
import java.net.InetAddress;
|
|
import java.net.InetAddress;
|
|
import java.net.UnknownHostException;
|
|
import java.net.UnknownHostException;
|
|
-import java.util.ArrayList;
|
|
|
|
-import java.util.Arrays;
|
|
|
|
-import java.util.Comparator;
|
|
|
|
-import java.util.Iterator;
|
|
|
|
-import java.util.List;
|
|
|
|
-import java.util.NavigableMap;
|
|
|
|
-import java.util.TreeMap;
|
|
|
|
|
|
+import java.util.*;
|
|
|
|
|
|
import org.apache.commons.logging.Log;
|
|
import org.apache.commons.logging.Log;
|
|
import org.apache.commons.logging.LogFactory;
|
|
import org.apache.commons.logging.LogFactory;
|
|
@@ -54,15 +48,8 @@ import org.apache.hadoop.hdfs.server.namenode.HostFileManager.EntrySet;
|
|
import org.apache.hadoop.hdfs.server.namenode.HostFileManager.MutableEntrySet;
|
|
import org.apache.hadoop.hdfs.server.namenode.HostFileManager.MutableEntrySet;
|
|
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
|
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
|
import org.apache.hadoop.hdfs.server.namenode.Namesystem;
|
|
import org.apache.hadoop.hdfs.server.namenode.Namesystem;
|
|
-import org.apache.hadoop.hdfs.server.protocol.BalancerBandwidthCommand;
|
|
|
|
-import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
|
|
|
|
-import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand;
|
|
|
|
|
|
+import org.apache.hadoop.hdfs.server.protocol.*;
|
|
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
|
|
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
|
|
-import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
|
|
|
|
-import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
|
|
|
|
-import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
|
|
|
|
-import org.apache.hadoop.hdfs.server.protocol.DisallowedDatanodeException;
|
|
|
|
-import org.apache.hadoop.hdfs.server.protocol.RegisterCommand;
|
|
|
|
import org.apache.hadoop.hdfs.util.CyclicIteration;
|
|
import org.apache.hadoop.hdfs.util.CyclicIteration;
|
|
import org.apache.hadoop.io.IOUtils;
|
|
import org.apache.hadoop.io.IOUtils;
|
|
import org.apache.hadoop.ipc.Server;
|
|
import org.apache.hadoop.ipc.Server;
|
|
@@ -657,24 +644,6 @@ public class DatanodeManager {
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
- /**
|
|
|
|
- * Generate new storage ID.
|
|
|
|
- *
|
|
|
|
- * @return unique storage ID
|
|
|
|
- *
|
|
|
|
- * Note: that collisions are still possible if somebody will try
|
|
|
|
- * to bring in a data storage from a different cluster.
|
|
|
|
- */
|
|
|
|
- private String newStorageID() {
|
|
|
|
- String newID = null;
|
|
|
|
- while(newID == null) {
|
|
|
|
- newID = "DS" + Integer.toString(DFSUtil.getRandom().nextInt());
|
|
|
|
- if (datanodeMap.get(newID) != null)
|
|
|
|
- newID = null;
|
|
|
|
- }
|
|
|
|
- return newID;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
/**
|
|
/**
|
|
* Register the given datanode with the namenode. NB: the given
|
|
* Register the given datanode with the namenode. NB: the given
|
|
* registration is mutated and given back to the datanode.
|
|
* registration is mutated and given back to the datanode.
|
|
@@ -779,7 +748,7 @@ public class DatanodeManager {
|
|
if ("".equals(nodeReg.getStorageID())) {
|
|
if ("".equals(nodeReg.getStorageID())) {
|
|
// this data storage has never been registered
|
|
// this data storage has never been registered
|
|
// it is either empty or was created by pre-storageID version of DFS
|
|
// it is either empty or was created by pre-storageID version of DFS
|
|
- nodeReg.setStorageID(newStorageID());
|
|
|
|
|
|
+ nodeReg.setStorageID(DatanodeStorage.newStorageID());
|
|
if (NameNode.stateChangeLog.isDebugEnabled()) {
|
|
if (NameNode.stateChangeLog.isDebugEnabled()) {
|
|
NameNode.stateChangeLog.debug(
|
|
NameNode.stateChangeLog.debug(
|
|
"BLOCK* NameSystem.registerDatanode: "
|
|
"BLOCK* NameSystem.registerDatanode: "
|