|
@@ -19,17 +19,21 @@
|
|
package org.apache.hadoop.hdfs.server.datanode;
|
|
package org.apache.hadoop.hdfs.server.datanode;
|
|
|
|
|
|
|
|
|
|
|
|
+import org.apache.commons.io.FileUtils;
|
|
import org.apache.hadoop.conf.Configuration;
|
|
import org.apache.hadoop.conf.Configuration;
|
|
import org.apache.hadoop.fs.FileSystem;
|
|
import org.apache.hadoop.fs.FileSystem;
|
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
|
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
|
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
|
|
|
+import org.apache.hadoop.hdfs.MiniDFSCluster;
|
|
import org.junit.Test;
|
|
import org.junit.Test;
|
|
|
|
|
|
|
|
+import java.io.File;
|
|
import java.net.InetSocketAddress;
|
|
import java.net.InetSocketAddress;
|
|
import java.util.ArrayList;
|
|
import java.util.ArrayList;
|
|
|
|
|
|
import static org.junit.Assert.assertEquals;
|
|
import static org.junit.Assert.assertEquals;
|
|
import static org.junit.Assert.assertNotEquals;
|
|
import static org.junit.Assert.assertNotEquals;
|
|
|
|
+import static org.junit.Assert.assertTrue;
|
|
|
|
|
|
public class TestDataNodeUUID {
|
|
public class TestDataNodeUUID {
|
|
|
|
|
|
@@ -62,4 +66,52 @@ public class TestDataNodeUUID {
|
|
// Make sure that we have a valid DataNodeUUID at that point of time.
|
|
// Make sure that we have a valid DataNodeUUID at that point of time.
|
|
assertNotEquals(dn.getDatanodeUuid(), nullString);
|
|
assertNotEquals(dn.getDatanodeUuid(), nullString);
|
|
}
|
|
}
|
|
|
|
+
|
|
|
|
+ @Test(timeout = 10000)
|
|
|
|
+ public void testUUIDRegeneration() throws Exception {
|
|
|
|
+ File baseDir = new File(System.getProperty("test.build.data"));
|
|
|
|
+ File disk1 = new File(baseDir, "disk1");
|
|
|
|
+ File disk2 = new File(baseDir, "disk2");
|
|
|
|
+
|
|
|
|
+ // Ensure the configured disks do not pre-exist
|
|
|
|
+ FileUtils.deleteDirectory(disk1);
|
|
|
|
+ FileUtils.deleteDirectory(disk2);
|
|
|
|
+
|
|
|
|
+ MiniDFSCluster cluster = null;
|
|
|
|
+ HdfsConfiguration conf = new HdfsConfiguration();
|
|
|
|
+ conf.setStrings(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,
|
|
|
|
+ disk1.toURI().toString(),
|
|
|
|
+ disk2.toURI().toString());
|
|
|
|
+ try {
|
|
|
|
+ cluster = new MiniDFSCluster.Builder(conf)
|
|
|
|
+ .numDataNodes(1)
|
|
|
|
+ .manageDataDfsDirs(false)
|
|
|
|
+ .build();
|
|
|
|
+ cluster.waitActive();
|
|
|
|
+
|
|
|
|
+ // Grab the new-cluster UUID as the original one to test against
|
|
|
|
+ String originalUUID = cluster.getDataNodes().get(0).getDatanodeUuid();
|
|
|
|
+ // Stop and simulate a DN wipe or unmount-but-root-path condition
|
|
|
|
+ // on the second disk
|
|
|
|
+ MiniDFSCluster.DataNodeProperties dn = cluster.stopDataNode(0);
|
|
|
|
+ FileUtils.deleteDirectory(disk2);
|
|
|
|
+ assertTrue("Failed to recreate the data directory: " + disk2,
|
|
|
|
+ disk2.mkdirs());
|
|
|
|
+
|
|
|
|
+ // Restart and check if the UUID changed
|
|
|
|
+ assertTrue("DataNode failed to start up: " + dn,
|
|
|
|
+ cluster.restartDataNode(dn));
|
|
|
|
+ // We need to wait until the DN has completed registration
|
|
|
|
+ while (!cluster.getDataNodes().get(0).isDatanodeFullyStarted()) {
|
|
|
|
+ Thread.sleep(50);
|
|
|
|
+ }
|
|
|
|
+ assertEquals(
|
|
|
|
+ "DN generated a new UUID despite disk1 having it intact",
|
|
|
|
+ originalUUID, cluster.getDataNodes().get(0).getDatanodeUuid());
|
|
|
|
+ } finally {
|
|
|
|
+ if (cluster != null) {
|
|
|
|
+ cluster.shutdown();
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
}
|
|
}
|