Browse Source

HDFS-2101. Fix remaining unit tests for new storage filenames. Contributed by Todd Lipcon.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-1073@1146854 13f79535-47bb-0310-9956-ffa450edef68
Todd Lipcon 14 years ago
parent
commit
2ed1186e5a

+ 1 - 0
hdfs/CHANGES.HDFS-1073.txt

@@ -68,3 +68,4 @@ HDFS-2010. Fix NameNode to exit if all edit streams become inaccessible. (atm
            via todd)
 HDFS-2123. Checkpoint interval should be based on txn count, not size. (todd)
 HDFS-1979. Fix backupnode for new edits/image layout. (todd)
+HDFS-2101. Fix remaining unit tests for new storage filenames. (todd)

+ 1 - 1
hdfs/src/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java

@@ -123,7 +123,7 @@ class ImageLoaderCurrent implements ImageLoader {
   protected final DateFormat dateFormat = 
                                       new SimpleDateFormat("yyyy-MM-dd HH:mm");
   private static int[] versions = { -16, -17, -18, -19, -20, -21, -22, -23,
-      -24, -25, -26, -27, -28, -30, -31, -32, -33, -34, -35, -36 };
+      -24, -25, -26, -27, -28, -30, -31, -32, -33, -34, -35, -36, -37, -38};
   private int imageVersion = 0;
 
   /* (non-Javadoc)

+ 17 - 6
hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSFinalize.java

@@ -19,6 +19,9 @@ package org.apache.hadoop.hdfs;
 
 import java.io.File;
 import java.io.IOException;
+import java.util.Collections;
+import java.util.List;
+
 import junit.framework.TestCase;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -26,6 +29,11 @@ import org.apache.hadoop.conf.Configuration;
 import static org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType.NAME_NODE;
 import static org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType.DATA_NODE;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
+import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
+import org.apache.hadoop.test.GenericTestUtils;
+
+import com.google.common.collect.Lists;
+import com.google.common.io.Files;
 
 /**
  * This test ensures the appropriate response from the system when 
@@ -57,14 +65,17 @@ public class TestDFSFinalize extends TestCase {
    * because its removal is asynchronous therefore we have no reliable
    * way to know when it will happen.  
    */
-  void checkResult(String[] nameNodeDirs, String[] dataNodeDirs) throws IOException {
+  static void checkResult(String[] nameNodeDirs, String[] dataNodeDirs) throws Exception {
+    List<File> dirs = Lists.newArrayList();
     for (int i = 0; i < nameNodeDirs.length; i++) {
-      assertTrue(new File(nameNodeDirs[i],"current").isDirectory());
-      assertTrue(new File(nameNodeDirs[i],"current/VERSION").isFile());
-      assertTrue(new File(nameNodeDirs[i],"current/edits").isFile());
-      assertTrue(new File(nameNodeDirs[i],"current/fsimage").isFile());
-      assertTrue(new File(nameNodeDirs[i],"current/fstime").isFile());
+      File curDir = new File(nameNodeDirs[i], "current");
+      dirs.add(curDir);
+      FSImageTestUtil.assertReasonableNameCurrentDir(curDir);
     }
+    
+    FSImageTestUtil.assertParallelFilesAreIdentical(
+        dirs, Collections.<String>emptySet());
+    
     for (int i = 0; i < dataNodeDirs.length; i++) {
       assertEquals(
                    UpgradeUtilities.checksumContents(

+ 36 - 25
hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSRollback.java

@@ -22,6 +22,8 @@ import static org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType.NAME_N
 
 import java.io.File;
 import java.io.IOException;
+import java.util.Collections;
+import java.util.List;
 
 import junit.framework.TestCase;
 
@@ -32,8 +34,11 @@ import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
+import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
 import org.apache.hadoop.util.StringUtils;
 
+import com.google.common.collect.Lists;
+
 /**
 * This test ensures the appropriate response (successful or failure) from
 * the system when the system is rolled back under various storage state and
@@ -61,26 +66,26 @@ public class TestDFSRollback extends TestCase {
    * Verify that the new current directory is the old previous.  
    * It is assumed that the server has recovered and rolled back.
    */
-  void checkResult(NodeType nodeType, String[] baseDirs) throws IOException {
-    switch (nodeType) {
-    case NAME_NODE:
-      for (int i = 0; i < baseDirs.length; i++) {
-        assertTrue(new File(baseDirs[i],"current").isDirectory());
-        assertTrue(new File(baseDirs[i],"current/VERSION").isFile());
-        assertTrue(new File(baseDirs[i],"current/edits").isFile());
-        assertTrue(new File(baseDirs[i],"current/fsimage").isFile());
-        assertTrue(new File(baseDirs[i],"current/fstime").isFile());
-      }
-      break;
-    case DATA_NODE:
-      for (int i = 0; i < baseDirs.length; i++) {
+  void checkResult(NodeType nodeType, String[] baseDirs) throws Exception {
+    List<File> curDirs = Lists.newArrayList();
+    for (String baseDir : baseDirs) {
+      File curDir = new File(baseDir, "current");
+      curDirs.add(curDir);
+      switch (nodeType) {
+      case NAME_NODE:
+        FSImageTestUtil.assertReasonableNameCurrentDir(curDir);
+        break;
+      case DATA_NODE:
         assertEquals(
-                     UpgradeUtilities.checksumContents(
-                                                       nodeType, new File(baseDirs[i],"current")),
-                     UpgradeUtilities.checksumMasterDataNodeContents());
+            UpgradeUtilities.checksumContents(nodeType, curDir),
+            UpgradeUtilities.checksumMasterDataNodeContents());
+        break;
       }
-      break;
     }
+    
+    FSImageTestUtil.assertParallelFilesAreIdentical(
+        curDirs, Collections.<String>emptySet());
+
     for (int i = 0; i < baseDirs.length; i++) {
       assertFalse(new File(baseDirs[i],"previous").isDirectory());
     }
@@ -241,21 +246,17 @@ public class TestDFSRollback extends TestCase {
       log("NameNode rollback with no edits file", numDirs);
       UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
       baseDirs = UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "previous");
-      for (File f : baseDirs) { 
-        FileUtil.fullyDelete(new File(f,"edits"));
-      }
+      deleteMatchingFiles(baseDirs, "edits.*");
       startNameNodeShouldFail(StartupOption.ROLLBACK,
-          "Edits file is not found");
+          "but there are no logs to load");
       UpgradeUtilities.createEmptyDirs(nameNodeDirs);
       
       log("NameNode rollback with no image file", numDirs);
       UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
       baseDirs = UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "previous");
-      for (File f : baseDirs) { 
-        FileUtil.fullyDelete(new File(f,"fsimage")); 
-      }
+      deleteMatchingFiles(baseDirs, "fsimage_.*");
       startNameNodeShouldFail(StartupOption.ROLLBACK,
-          "Image file is not found");
+          "No valid image files found");
       UpgradeUtilities.createEmptyDirs(nameNodeDirs);
       
       log("NameNode rollback with corrupt version file", numDirs);
@@ -284,6 +285,16 @@ public class TestDFSRollback extends TestCase {
     } // end numDir loop
   }
  
+  private void deleteMatchingFiles(File[] baseDirs, String regex) {
+    for (File baseDir : baseDirs) {
+      for (File f : baseDir.listFiles()) {
+        if (f.getName().matches(regex)) {
+          f.delete();
+        }
+      }
+    }
+  }
+
   protected void tearDown() throws Exception {
     LOG.info("Shutting down MiniDFSCluster");
     if (cluster != null) cluster.shutdown();

+ 1 - 1
hdfs/src/test/hdfs/org/apache/hadoop/hdfs/UpgradeUtilities.java

@@ -417,7 +417,7 @@ public class UpgradeUtilities {
       storage = new NNStorage(conf, 
                               Collections.<URI>emptyList(), 
                               Collections.<URI>emptyList());
-
+      storage.setStorageInfo(version);
       StorageDirectory sd = storage.new StorageDirectory(parent[i].getParentFile());
       sd.write(versionFile);
       versionFiles[i] = versionFile;

+ 15 - 1
hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java

@@ -171,7 +171,12 @@ public abstract class FSImageTestUtil {
     }
     
     for (List<File> sameNameList : groupedByName.values()) {
-      assertFileContentsSame(sameNameList.toArray(new File[0]));
+      if (sameNameList.get(0).isDirectory()) {
+        // recurse
+        assertParallelFilesAreIdentical(sameNameList, ignoredFileNames);
+      } else {
+        assertFileContentsSame(sameNameList.toArray(new File[0]));
+      }
     }  
   }
   
@@ -334,5 +339,14 @@ public abstract class FSImageTestUtil {
     }    
   }
 
+  public static void assertReasonableNameCurrentDir(File curDir)
+      throws IOException {
+    assertTrue(curDir.isDirectory());
+    assertTrue(new File(curDir, "VERSION").isFile());
+    assertTrue(new File(curDir, "seen_txid").isFile());
+    File image = findNewestImageFile(curDir.toString());
+    assertNotNull(image);
+  }
+
 
 }

+ 15 - 22
hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCheckPointForSecurityTokens.java

@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
+import static org.junit.Assert.*;
 import junit.framework.Assert;
 import java.io.*;
 import java.net.URI;
@@ -29,6 +30,8 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
+import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
+import org.apache.hadoop.hdfs.server.namenode.FSImageTransactionalStorageInspector.FoundEditLog;
 import org.apache.hadoop.hdfs.tools.DFSAdmin;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.io.Text;
@@ -47,20 +50,6 @@ public class TestCheckPointForSecurityTokens {
   short replication = 3;
   MiniDFSCluster cluster = null;
 
-  NameNode startNameNode( Configuration conf,
-                          String imageDirs,
-                          String editsDirs,
-                          StartupOption start) throws IOException {
-    conf.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "hdfs://localhost:0");
-    conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");  
-    conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, imageDirs);
-    conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, editsDirs);
-    String[] args = new String[]{start.getName()};
-    NameNode nn = NameNode.createNameNode(args, conf);
-    Assert.assertTrue(nn.isInSafeMode());
-    return nn;
-  }
-  
   private void cancelToken(Token<DelegationTokenIdentifier> token)
       throws IOException {
     cluster.getNamesystem().cancelDelegationToken(token);
@@ -95,10 +84,12 @@ public class TestCheckPointForSecurityTokens {
       String[] args = new String[]{"-saveNamespace"};
 
       // verify that the edits file is NOT empty
-      Collection<URI> editsDirs = cluster.getNameEditsDirs(0);
-      for(URI uri : editsDirs) {
-        File ed = new File(uri.getPath());
-        Assert.assertTrue(new File(ed, "current/edits").length() > Integer.SIZE/Byte.SIZE);
+      NameNode nn = cluster.getNameNode();
+      for (StorageDirectory sd : nn.getFSImage().getStorage().dirIterable(null)) {
+        FoundEditLog log = FSImageTestUtil.findLatestEditsLog(sd);
+        assertTrue(log.isInProgress());
+        assertEquals("In-progress log " + log + " should have 5 transactions",
+            5, log.validateLog().numTransactions);
       }
 
       // Saving image in safe mode should succeed
@@ -108,10 +99,12 @@ public class TestCheckPointForSecurityTokens {
       } catch(Exception e) {
         throw new IOException(e.getMessage());
       }
-      // verify that the edits file is empty
-      for(URI uri : editsDirs) {
-        File ed = new File(uri.getPath());
-        Assert.assertTrue(new File(ed, "current/edits").length() == Integer.SIZE/Byte.SIZE);
+      // verify that the edits file is empty except for the START txn
+      for (StorageDirectory sd : nn.getFSImage().getStorage().dirIterable(null)) {
+        FoundEditLog log = FSImageTestUtil.findLatestEditsLog(sd);
+        assertTrue(log.isInProgress());
+        assertEquals("In-progress log " + log + " should only have START txn",
+            1, log.validateLog().numTransactions);
       }
 
       // restart cluster

+ 5 - 5
hdfs/src/test/hdfs/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java

@@ -41,6 +41,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction;
+import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 
 /**
@@ -128,11 +129,10 @@ public class TestOfflineImageViewer extends TestCase {
       cluster.getNameNode().saveNamespace();
       
       // Determine location of fsimage file
-      URI [] files = cluster.getNameDirs(0).toArray(new URI[0]);
-      orig =  new File(files[0].getPath(), "current/fsimage");
-      
-      if (!orig.exists()) {
-        fail("Didn't generate or can't find fsimage.");
+      orig = FSImageTestUtil.findLatestImageFile(
+          cluster.getNameNode().getFSImage().getStorage().getStorageDir(0));
+      if (orig == null) {
+        fail("Didn't generate or can't find fsimage");
       }
     } finally {
       if(cluster != null)