Explorar o código

HADOOP-1242. Improve handling of DFS upgrades. Contributed by Konstantin.

git-svn-id: https://svn.apache.org/repos/asf/lucene/hadoop/trunk@543207 13f79535-47bb-0310-9956-ffa450edef68
Doug Cutting %!s(int64=18) %!d(string=hai) anos
pai
achega
657eaa7296

+ 3 - 0
CHANGES.txt

@@ -505,6 +505,9 @@ Branch 0.13 (unreleased changes)
      AlreadyBeingCreatedException when wrapped as a RemoteException.
      (Hairong Kuang via tomwhite)
 
+129. HADOOP-1242.  Improve handling of DFS upgrades.
+     (Konstantin Shvachko via cutting)
+
 
 Release 0.12.3 - 2007-04-06
 

+ 43 - 9
src/java/org/apache/hadoop/dfs/DataStorage.java

@@ -11,7 +11,6 @@ import java.util.Properties;
 
 import org.apache.hadoop.dfs.FSConstants.StartupOption;
 import org.apache.hadoop.dfs.FSConstants.NodeType;
-import org.apache.hadoop.dfs.FSImage.NameNodeFile;
 import org.apache.hadoop.util.Daemon;
 import org.apache.hadoop.fs.FileUtil.HardLink;
 
@@ -164,6 +163,21 @@ class DataStorage extends Storage {
     File oldF = new File(sd.root, "storage");
     if (!oldF.exists())
       return false;
+    // check the layout version inside the storage file
+    // Lock and Read old storage file
+    RandomAccessFile oldFile = new RandomAccessFile(oldF, "rws");
+    if (oldFile == null)
+      throw new IOException("Cannot read file: " + oldF);
+    FileLock oldLock = oldFile.getChannel().tryLock();
+    try {
+      oldFile.seek(0);
+      int odlVersion = oldFile.readInt();
+      if (odlVersion < LAST_PRE_UPGRADE_LAYOUT_VERSION)
+        return false;
+    } finally {
+      oldLock.release();
+      oldFile.close();
+    }
     // check consistency of the old storage
     File oldDataDir = new File(sd.root, "data");
     if (!oldDataDir.exists()) 
@@ -206,13 +220,14 @@ class DataStorage extends Storage {
     FileLock oldLock = oldFile.getChannel().tryLock();
     if (oldLock == null)
       throw new IOException("Cannot lock file: " + oldF);
+    String odlStorageID = "";
     try {
       oldFile.seek(0);
       int odlVersion = oldFile.readInt();
       if (odlVersion < LAST_PRE_UPGRADE_LAYOUT_VERSION)
         throw new IncorrectVersionException(odlVersion, "file " + oldF,
                                             LAST_PRE_UPGRADE_LAYOUT_VERSION);
-      String odlStorageID = org.apache.hadoop.io.UTF8.readString(oldFile);
+      odlStorageID = org.apache.hadoop.io.UTF8.readString(oldFile);
   
       // check new storage
       File newDataDir = sd.getCurrentDir();
@@ -221,14 +236,8 @@ class DataStorage extends Storage {
         throw new IOException("Version file already exists: " + versionF);
       if (newDataDir.exists()) // somebody created current dir manually
         deleteDir(newDataDir);
-      // Write new layout
+      // move "data" to "current"
       rename(oldDataDir, newDataDir);
-  
-      this.layoutVersion = FSConstants.LAYOUT_VERSION;
-      this.namespaceID = nsInfo.getNamespaceID();
-      this.cTime = 0;
-      this.storageID = odlStorageID;
-      sd.write();
       // close and unlock old file
     } finally {
       oldLock.release();
@@ -236,6 +245,13 @@ class DataStorage extends Storage {
     }
     // move old storage file into current dir
     rename(oldF, new File(sd.getCurrentDir(), "storage"));
+
+    // Write new version file
+    this.layoutVersion = FSConstants.LAYOUT_VERSION;
+    this.namespaceID = nsInfo.getNamespaceID();
+    this.cTime = 0;
+    this.storageID = odlStorageID;
+    sd.write();
     LOG.info("Conversion of " + oldF + " is complete.");
   }
 
@@ -409,4 +425,22 @@ class DataStorage extends Storage {
     for(int i = 0; i < blockNames.length; i++)
       linkBlocks(new File(from, blockNames[i]), new File(to, blockNames[i]));
   }
+
+  protected void corruptPreUpgradeStorage(File rootDir) throws IOException {
+    File oldF = new File(rootDir, "storage");
+    if (oldF.exists())
+      return;
+    // recreate old storage file to let pre-upgrade versions fail
+    if (!oldF.createNewFile())
+      throw new IOException("Cannot create file " + oldF);
+    RandomAccessFile oldFile = new RandomAccessFile(oldF, "rws");
+    if (oldFile == null)
+      throw new IOException("Cannot read file: " + oldF);
+    // write new version into old storage file
+    try {
+      writeCorruptedData(oldFile);
+    } finally {
+      oldFile.close();
+    }
+  }
 }

+ 38 - 2
src/java/org/apache/hadoop/dfs/FSImage.java

@@ -27,6 +27,7 @@ import java.io.File;
 import java.io.FileInputStream;
 import java.io.FileOutputStream;
 import java.io.IOException;
+import java.io.RandomAccessFile;
 import java.util.AbstractList;
 import java.util.ArrayList;
 import java.util.Collection;
@@ -435,7 +436,21 @@ class FSImage extends Storage {
   boolean isConversionNeeded(StorageDirectory sd) throws IOException {
     File oldImageDir = new File(sd.root, "image");
     if (!oldImageDir.exists())
-      return false;
+      throw new InconsistentFSStateException(sd.root,
+          oldImageDir + " does not exist.");
+    // check the layout version inside the image file
+    File oldF = new File(oldImageDir, "fsimage");
+    RandomAccessFile oldFile = new RandomAccessFile(oldF, "rws");
+    if (oldFile == null)
+      throw new IOException("Cannot read file: " + oldF);
+    try {
+      oldFile.seek(0);
+      int odlVersion = oldFile.readInt();
+      if (odlVersion < LAST_PRE_UPGRADE_LAYOUT_VERSION)
+        return false;
+    } finally {
+      oldFile.close();
+    }
     // check consistency of the old storage
     if (!oldImageDir.isDirectory())
       throw new InconsistentFSStateException(sd.root,
@@ -492,8 +507,8 @@ class FSImage extends Storage {
       needReformat = true;
     } else {
       sd.write();
-      LOG.info("Conversion of " + oldImage + " is complete.");
     }
+    LOG.info("Conversion of " + oldImage + " is complete.");
     return needReformat;
   }
 
@@ -960,4 +975,25 @@ class FSImage extends Storage {
       node.setXceiverCount(xceiverCount);
     }
   }
+
+  protected void corruptPreUpgradeStorage(File rootDir) throws IOException {
+    File oldImageDir = new File(rootDir, "image");
+    if (!oldImageDir.exists())
+      if (!oldImageDir.mkdir())
+        throw new IOException("Cannot create directory " + oldImageDir);
+    File oldImage = new File(oldImageDir, "fsimage");
+    if (!oldImage.exists())
+      // recreate old image file to let pre-upgrade versions fail
+      if (!oldImage.createNewFile())
+        throw new IOException("Cannot create file " + oldImage);
+    RandomAccessFile oldFile = new RandomAccessFile(oldImage, "rws");
+    if (oldFile == null)
+      throw new IOException("Cannot read file: " + oldImage);
+    // write new version into old image file
+    try {
+      writeCorruptedData(oldFile);
+    } finally {
+      oldFile.close();
+    }
+  }
 }

+ 17 - 0
src/java/org/apache/hadoop/dfs/Storage.java

@@ -157,6 +157,7 @@ abstract class Storage extends StorageInfo {
      * @throws IOException
      */
     void write() throws IOException {
+      corruptPreUpgradeStorage(root);
       write(getVersionFile());
     }
 
@@ -521,4 +522,20 @@ abstract class Storage extends StorageInfo {
       + "-" + Integer.toString(storage.getLayoutVersion())
       + "-" + Long.toString(storage.getCTime());
   }
+
+  // Pre-upgrade version compatibility
+  protected abstract void corruptPreUpgradeStorage(File rootDir) throws IOException;
+
+  protected void writeCorruptedData(RandomAccessFile file) throws IOException {
+    final String messageForPreUpgradeVersion =
+      "\nThis file is INTENTIONALLY CORRUPTED so that versions\n"
+      + "of Hadoop prior to 0.13 (which are incompatible\n"
+      + "with this directory layout) will fail to start.\n";
+  
+    file.seek(0);
+    file.writeInt(FSConstants.LAYOUT_VERSION);
+    org.apache.hadoop.io.UTF8.writeString(file, "");
+    file.writeBytes(messageForPreUpgradeVersion);
+    file.getFD().sync();
+  }
 }

+ 0 - 3
src/test/org/apache/hadoop/dfs/TestDFSFinalize.java

@@ -23,11 +23,9 @@ import junit.framework.TestCase;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.dfs.FSConstants.NodeType;
 import static org.apache.hadoop.dfs.FSConstants.NodeType.NAME_NODE;
 import static org.apache.hadoop.dfs.FSConstants.NodeType.DATA_NODE;
 import org.apache.hadoop.dfs.FSConstants.StartupOption;
-import org.apache.hadoop.fs.Path;
 
 /**
  * This test ensures the appropriate response from the system when 
@@ -82,7 +80,6 @@ public class TestDFSFinalize extends TestCase {
    * This test attempts to finalize the NameNode and DataNode.
    */
   public void testFinalize() throws Exception {
-    File[] baseDirs;
     UpgradeUtilities.initialize();
     
     for (int numDirs = 1; numDirs <= 2; numDirs++) {

+ 14 - 4
src/test/org/apache/hadoop/dfs/UpgradeUtilities.java

@@ -249,16 +249,26 @@ public class UpgradeUtilities {
       LocalFileSystem localFS = FileSystem.getLocal(new Configuration());
       switch (nodeType) {
       case NAME_NODE:
-        localFS.copyToLocalFile(
-                                new Path(namenodeStorage.toString(), "current"),
+        localFS.copyToLocalFile(new Path(namenodeStorage.toString(), "current"),
                                 new Path(newDir.toString()),
                                 false);
+        Path newImgDir = new Path(newDir.getParent(), "image");
+        if (!localFS.exists(newImgDir))
+          localFS.copyToLocalFile(
+              new Path(namenodeStorage.toString(), "image"),
+              newImgDir,
+              false);
         break;
       case DATA_NODE:
-        localFS.copyToLocalFile(
-                                new Path(datanodeStorage.toString(), "current"),
+        localFS.copyToLocalFile(new Path(datanodeStorage.toString(), "current"),
                                 new Path(newDir.toString()),
                                 false);
+        Path newStorageFile = new Path(newDir.getParent(), "storage");
+        if (!localFS.exists(newStorageFile))
+          localFS.copyToLocalFile(
+              new Path(datanodeStorage.toString(), "storage"),
+              newStorageFile,
+              false);
         break;
       }
       retVal[i] = newDir;