|
@@ -19,9 +19,12 @@ package org.apache.hadoop.hdfs.server.namenode;
|
|
|
|
|
|
import java.io.File;
|
|
|
import java.io.IOException;
|
|
|
+import java.net.InetSocketAddress;
|
|
|
import java.util.Collections;
|
|
|
import java.util.List;
|
|
|
|
|
|
+import junit.framework.TestCase;
|
|
|
+
|
|
|
import org.apache.commons.logging.Log;
|
|
|
import org.apache.commons.logging.LogFactory;
|
|
|
import org.apache.commons.logging.impl.Log4JLogger;
|
|
@@ -29,13 +32,13 @@ import org.apache.hadoop.conf.Configuration;
|
|
|
import org.apache.hadoop.fs.FileSystem;
|
|
|
import org.apache.hadoop.fs.FileUtil;
|
|
|
import org.apache.hadoop.fs.Path;
|
|
|
+import org.apache.hadoop.hdfs.DFSConfigKeys;
|
|
|
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
|
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
|
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
|
|
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
|
|
|
import org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile;
|
|
|
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
|
|
|
-import org.apache.hadoop.hdfs.DFSConfigKeys;
|
|
|
import org.apache.hadoop.test.GenericTestUtils;
|
|
|
import org.apache.log4j.Level;
|
|
|
|
|
@@ -44,8 +47,6 @@ import com.google.common.collect.ImmutableList;
|
|
|
import com.google.common.collect.ImmutableSet;
|
|
|
import com.google.common.collect.Lists;
|
|
|
|
|
|
-import junit.framework.TestCase;
|
|
|
-
|
|
|
public class TestBackupNode extends TestCase {
|
|
|
public static final Log LOG = LogFactory.getLog(TestBackupNode.class);
|
|
|
|
|
@@ -241,8 +242,11 @@ public class TestBackupNode extends TestCase {
|
|
|
void testCheckpoint(StartupOption op) throws Exception {
|
|
|
Path file1 = new Path("checkpoint.dat");
|
|
|
Path file2 = new Path("checkpoint2.dat");
|
|
|
+ Path file3 = new Path("backup.dat");
|
|
|
|
|
|
Configuration conf = new HdfsConfiguration();
|
|
|
+ short replication = (short)conf.getInt("dfs.replication", 3);
|
|
|
+ int numDatanodes = Math.max(3, replication);
|
|
|
conf.set(DFSConfigKeys.DFS_BLOCKREPORT_INITIAL_DELAY_KEY, "0");
|
|
|
conf.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1); // disable block scanner
|
|
|
conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 1);
|
|
@@ -290,7 +294,7 @@ public class TestBackupNode extends TestCase {
|
|
|
//
|
|
|
// Restart cluster and verify that file1 still exist.
|
|
|
//
|
|
|
- cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
|
|
|
+ cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes)
|
|
|
.format(false).build();
|
|
|
fileSys = cluster.getFileSystem();
|
|
|
// check that file1 still exists
|
|
@@ -319,6 +323,26 @@ public class TestBackupNode extends TestCase {
|
|
|
backup.doCheckpoint();
|
|
|
waitCheckpointDone(cluster, backup, txid);
|
|
|
|
|
|
+ // Try BackupNode operations
|
|
|
+ InetSocketAddress add = backup.getNameNodeAddress();
|
|
|
+ // Write to BN
|
|
|
+ FileSystem bnFS = FileSystem.get(new Path("hdfs://"
|
|
|
+ + NameNode.getHostPortString(add)).toUri(), conf);
|
|
|
+ boolean canWrite = true;
|
|
|
+ try {
|
|
|
+ TestCheckpoint.writeFile(bnFS, file3, replication);
|
|
|
+ } catch (IOException eio) {
|
|
|
+ LOG.info("Write to BN failed as expected: ", eio);
|
|
|
+ canWrite = false;
|
|
|
+ }
|
|
|
+ assertFalse("Write to BackupNode must be prohibited.", canWrite);
|
|
|
+
|
|
|
+ TestCheckpoint.writeFile(fileSys, file3, replication);
|
|
|
+ TestCheckpoint.checkFile(fileSys, file3, replication);
|
|
|
+ // should also be on BN right away
|
|
|
+ assertTrue("file3 does not exist on BackupNode",
|
|
|
+ op != StartupOption.BACKUP || bnFS.exists(file3));
|
|
|
+
|
|
|
} catch(IOException e) {
|
|
|
LOG.error("Error in TestBackupNode:", e);
|
|
|
assertTrue(e.getLocalizedMessage(), false);
|