|
@@ -23,22 +23,34 @@ import org.apache.hadoop.conf.Configuration;
|
|
|
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
|
|
import org.apache.hadoop.fs.FileStatus;
|
|
|
import org.apache.hadoop.fs.FileSystem;
|
|
|
+import org.apache.hadoop.fs.FileUtil;
|
|
|
import org.apache.hadoop.fs.Path;
|
|
|
import org.apache.hadoop.fs.FSDataOutputStream;
|
|
|
import org.apache.hadoop.fs.FSDataInputStream;
|
|
|
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
|
|
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
|
|
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
|
|
+import org.apache.hadoop.hdfs.server.namenode.FSEditLog;
|
|
|
import org.apache.hadoop.hdfs.server.namenode.FSImage;
|
|
|
+import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
|
|
|
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
|
|
+import org.apache.hadoop.hdfs.server.namenode.INodeFileUnderConstruction;
|
|
|
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
|
|
import org.apache.hadoop.io.IOUtils;
|
|
|
+import org.apache.hadoop.test.GenericTestUtils;
|
|
|
import org.apache.log4j.Level;
|
|
|
|
|
|
+import java.io.File;
|
|
|
import java.io.IOException;
|
|
|
+import java.net.URI;
|
|
|
+import java.util.Collection;
|
|
|
+import java.util.List;
|
|
|
import java.util.Random;
|
|
|
import static org.junit.Assert.*;
|
|
|
import org.junit.Test;
|
|
|
|
|
|
+import com.google.common.collect.Lists;
|
|
|
+
|
|
|
/**
|
|
|
* A JUnit test for checking if restarting DFS preserves the
|
|
|
* blocks that are part of an unclosed file.
|
|
@@ -57,6 +69,9 @@ public class TestPersistBlocks {
|
|
|
|
|
|
static final byte[] DATA_BEFORE_RESTART = new byte[BLOCK_SIZE * NUM_BLOCKS];
|
|
|
static final byte[] DATA_AFTER_RESTART = new byte[BLOCK_SIZE * NUM_BLOCKS];
|
|
|
+
|
|
|
+ private static final String HADOOP_1_0_MULTIBLOCK_TGZ =
|
|
|
+ "hadoop-1.0-multiblock-file.tgz";
|
|
|
static {
|
|
|
Random rand = new Random();
|
|
|
rand.nextBytes(DATA_BEFORE_RESTART);
|
|
@@ -277,4 +292,62 @@ public class TestPersistBlocks {
|
|
|
if (cluster != null) { cluster.shutdown(); }
|
|
|
}
|
|
|
}
|
|
|
+
|
|
|
+ /**
|
|
|
+ * Earlier versions of HDFS didn't persist block allocation to the edit log.
|
|
|
+ * This makes sure that we can still load an edit log when the OP_CLOSE
|
|
|
+ * is the opcode which adds all of the blocks. This is a regression
|
|
|
+ * test for HDFS-2773.
|
|
|
+ * This test uses a tarred pseudo-distributed cluster from Hadoop 1.0
|
|
|
+ * which has a multi-block file. This is similar to the tests in
|
|
|
+ * {@link TestDFSUpgradeFromImage} but none of those images include
|
|
|
+ * a multi-block file.
|
|
|
+ */
|
|
|
+ @Test
|
|
|
+ public void testEarlierVersionEditLog() throws Exception {
|
|
|
+ final Configuration conf = new HdfsConfiguration();
|
|
|
+
|
|
|
+ String tarFile = System.getProperty("test.cache.data", "build/test/cache")
|
|
|
+ + "/" + HADOOP_1_0_MULTIBLOCK_TGZ;
|
|
|
+ String testDir = System.getProperty("test.build.data", "build/test/data");
|
|
|
+ File dfsDir = new File(testDir, "image-1.0");
|
|
|
+ if (dfsDir.exists() && !FileUtil.fullyDelete(dfsDir)) {
|
|
|
+ throw new IOException("Could not delete dfs directory '" + dfsDir + "'");
|
|
|
+ }
|
|
|
+ FileUtil.unTar(new File(tarFile), new File(testDir));
|
|
|
+
|
|
|
+ File nameDir = new File(dfsDir, "name");
|
|
|
+ GenericTestUtils.assertExists(nameDir);
|
|
|
+ File dataDir = new File(dfsDir, "data");
|
|
|
+ GenericTestUtils.assertExists(dataDir);
|
|
|
+
|
|
|
+ conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameDir.getAbsolutePath());
|
|
|
+ conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dataDir.getAbsolutePath());
|
|
|
+
|
|
|
+ MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
|
|
|
+ .format(false)
|
|
|
+ .manageDataDfsDirs(false)
|
|
|
+ .manageNameDfsDirs(false)
|
|
|
+ .numDataNodes(1)
|
|
|
+ .startupOption(StartupOption.UPGRADE)
|
|
|
+ .build();
|
|
|
+ try {
|
|
|
+ FileSystem fs = cluster.getFileSystem();
|
|
|
+ Path testPath = new Path("/user/todd/4blocks");
|
|
|
+ // Read it without caring about the actual data within - we just need
|
|
|
+ // to make sure that the block states and locations are OK.
|
|
|
+ DFSTestUtil.readFile(fs, testPath);
|
|
|
+
|
|
|
+ // Ensure that we can append to it - if the blocks were in some funny
|
|
|
+ // state we'd get some kind of issue here.
|
|
|
+ FSDataOutputStream stm = fs.append(testPath);
|
|
|
+ try {
|
|
|
+ stm.write(1);
|
|
|
+ } finally {
|
|
|
+ IOUtils.closeStream(stm);
|
|
|
+ }
|
|
|
+ } finally {
|
|
|
+ cluster.shutdown();
|
|
|
+ }
|
|
|
+ }
|
|
|
}
|