浏览代码

HDFS-2799. svn merge -c 1325963 from trunk

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1325969 13f79535-47bb-0310-9956-ffa450edef68
Eli Collins 13 年之前
父节点
当前提交
d41c0ede09

+ 2 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -389,6 +389,8 @@ Release 2.0.0 - UNRELEASED
     HDFS-3256. HDFS considers blocks under-replicated if topology script is
     configured with only 1 rack. (atm)
 
+    HDFS-2799. Trim fs.checkpoint.dir values. (Amith D K via eli)
+
   BREAKDOWN OF HDFS-1623 SUBTASKS
 
     HDFS-2179. Add fencing framework and mechanisms for NameNode HA. (todd)

+ 4 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java

@@ -1076,7 +1076,8 @@ public class FSImage implements Closeable {
    */
   static Collection<URI> getCheckpointDirs(Configuration conf,
       String defaultValue) {
-    Collection<String> dirNames = conf.getStringCollection(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY);
+    Collection<String> dirNames = conf.getTrimmedStringCollection(
+        DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY);
     if (dirNames.size() == 0 && defaultValue != null) {
       dirNames.add(defaultValue);
     }
@@ -1085,8 +1086,8 @@ public class FSImage implements Closeable {
 
   static List<URI> getCheckpointEditsDirs(Configuration conf,
       String defaultName) {
-    Collection<String> dirNames = 
-      conf.getStringCollection(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY);
+    Collection<String> dirNames = conf.getTrimmedStringCollection(
+        DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY);
     if (dirNames.size() == 0 && defaultName != null) {
       dirNames.add(defaultName);
     }

+ 42 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java

@@ -462,4 +462,46 @@ public class TestNameEditsConfigs extends TestCase {
       cluster.shutdown();
     }
   }
+
+  /**
+   * Test dfs.namenode.checkpoint.dir and dfs.namenode.checkpoint.edits.dir
+   * should tolerate white space between values.
+   */
+  @Test
+  public void testCheckPointDirsAreTrimmed() throws Exception {
+    MiniDFSCluster cluster = null;
+    SecondaryNameNode secondary = null;
+    File checkpointNameDir1 = new File(base_dir, "chkptName1");
+    File checkpointEditsDir1 = new File(base_dir, "chkptEdits1");
+    File checkpointNameDir2 = new File(base_dir, "chkptName2");
+    File checkpointEditsDir2 = new File(base_dir, "chkptEdits2");
+    File nameDir = new File(base_dir, "name1");
+    String whiteSpace = "  \n   \n  ";
+    Configuration conf = new HdfsConfiguration();
+    conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameDir.getPath());
+    conf.setStrings(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY, whiteSpace
+        + checkpointNameDir1.getPath() + whiteSpace, whiteSpace
+        + checkpointNameDir2.getPath() + whiteSpace);
+    conf.setStrings(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY,
+        whiteSpace + checkpointEditsDir1.getPath() + whiteSpace, whiteSpace
+            + checkpointEditsDir2.getPath() + whiteSpace);
+    cluster = new MiniDFSCluster.Builder(conf).manageNameDfsDirs(false)
+        .numDataNodes(3).build();
+    try {
+      cluster.waitActive();
+      secondary = startSecondaryNameNode(conf);
+      secondary.doCheckpoint();
+      assertTrue(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY + " must be trimmed ",
+          checkpointNameDir1.exists());
+      assertTrue(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY + " must be trimmed ",
+          checkpointNameDir2.exists());
+      assertTrue(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY
+          + " must be trimmed ", checkpointEditsDir1.exists());
+      assertTrue(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY
+          + " must be trimmed ", checkpointEditsDir2.exists());
+    } finally {
+      secondary.shutdown();
+      cluster.shutdown();
+    }
+  }
 }