Explorar o código

Porting change r1036213 from trunk to federation branch.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/hdfs/branches/HDFS-1052@1078871 13f79535-47bb-0310-9956-ffa450edef68
Suresh Srinivas %!s(int64=14) %!d(string=hai) anos
pai
achega
3f5ca934cc

+ 3 - 0
CHANGES.txt

@@ -460,6 +460,9 @@ Release 0.22.0 - Unreleased
     HDFS-1483. DFSClient.getBlockLocations should indicate if corresponding
     blocks are corrupt. (Patrick Kling via hairong)
 
+    HDFS-259. Remove intentionally corrupt 0.13 directory layout creation.
+    (Todd Lipcon via eli).
+
   OPTIMIZATIONS
 
     HDFS-1140. Speedup INode.getPathComponents. (Dmytro Molkov via shv)

+ 2 - 2
src/java/org/apache/hadoop/hdfs/protocol/FSConstants.java

@@ -88,7 +88,7 @@ public interface FSConstants {
   // Version is reflected in the data storage file.
   // Versions are negative.
   // Decrement LAYOUT_VERSION to define a new version.
-  public static final int LAYOUT_VERSION = -26;
+  public static final int LAYOUT_VERSION = -27;
   // Current version: 
-  // -26: support image checksum.
+  // -27: remove intentionally corrupt pre-0.13 image directory
 }

+ 17 - 29
src/java/org/apache/hadoop/hdfs/server/common/Storage.java

@@ -64,7 +64,7 @@ public abstract class Storage extends StorageInfo {
 
   // Constants
   
-  // last layout version that did not suppot upgrades
+  // last layout version that did not support upgrades
   protected static final int LAST_PRE_UPGRADE_LAYOUT_VERSION = -3;
   
   // this corresponds to Hadoop-0.14.
@@ -83,7 +83,7 @@ public abstract class Storage extends StorageInfo {
   
   private   static final String STORAGE_FILE_LOCK     = "in_use.lock";
   protected static final String STORAGE_FILE_VERSION  = "VERSION";
-  public static final String STORAGE_DIR_CURRENT      = "current";
+  public    static final String STORAGE_DIR_CURRENT   = "current";
   public    static final String STORAGE_DIR_PREVIOUS  = "previous";
   public    static final String STORAGE_TMP_REMOVED   = "removed.tmp";
   public    static final String STORAGE_TMP_PREVIOUS  = "previous.tmp";
@@ -276,7 +276,6 @@ public abstract class Storage extends StorageInfo {
      * @throws IOException
      */
     public void write() throws IOException {
-      corruptPreUpgradeStorage(root);
       write(getVersionFile());
     }
 
@@ -492,8 +491,7 @@ public abstract class Storage extends StorageInfo {
       if (startOpt == HdfsConstants.StartupOption.FORMAT)
         return StorageState.NOT_FORMATTED;
       if (startOpt != HdfsConstants.StartupOption.IMPORT) {
-        //make sure no conversion is required
-        checkConversionNeeded(this);
+        checkOldLayoutStorage(this);
       }
 
       // check whether current directory is valid
@@ -705,16 +703,22 @@ public abstract class Storage extends StorageInfo {
   protected void addStorageDir(StorageDirectory sd) {
     storageDirs.add(sd);
   }
-  
-  public abstract boolean isConversionNeeded(StorageDirectory sd) throws IOException;
 
-  /*
-   * Coversion is no longer supported. So this should throw exception if
-   * conversion is needed.
+  /**
+   * Return true if the layout of the given storage directory is from a version
+   * of Hadoop prior to the introduction of the "current" and "previous"
+   * directories which allow upgrade and rollback.
+   */
+  public abstract boolean isPreUpgradableLayout(StorageDirectory sd)
+  throws IOException;
+
+  /**
+   * Check if the given storage directory comes from a version of Hadoop
+   * prior to when the directory layout changed (ie 0.13). If this is
+   * the case, this method throws an IOException.
    */
-  private void checkConversionNeeded(StorageDirectory sd) throws IOException {
-    if (isConversionNeeded(sd)) {
-      //throw an exception
+  private void checkOldLayoutStorage(StorageDirectory sd) throws IOException {
+    if (isPreUpgradableLayout(sd)) {
       checkVersionUpgradable(0);
     }
   }
@@ -858,22 +862,6 @@ public abstract class Storage extends StorageInfo {
       + "-" + Integer.toString(storage.getLayoutVersion())
       + "-" + Long.toString(storage.getCTime());
   }
-
-  // Pre-upgrade version compatibility
-  protected abstract void corruptPreUpgradeStorage(File rootDir) throws IOException;
-
-  protected void writeCorruptedData(RandomAccessFile file) throws IOException {
-    final String messageForPreUpgradeVersion =
-      "\nThis file is INTENTIONALLY CORRUPTED so that versions\n"
-      + "of Hadoop prior to 0.13 (which are incompatible\n"
-      + "with this directory layout) will fail to start.\n";
-  
-    file.seek(0);
-    file.writeInt(FSConstants.LAYOUT_VERSION);
-    org.apache.hadoop.hdfs.DeprecatedUTF8.writeString(file, "");
-    file.writeBytes(messageForPreUpgradeVersion);
-    file.getFD().sync();
-  }
   
   String getProperty(Properties props, StorageDirectory sd,
       String name) throws InconsistentFSStateException {

+ 5 - 22
src/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java

@@ -20,7 +20,6 @@ package org.apache.hadoop.hdfs.server.datanode;
 
 import java.io.File;
 import java.io.IOException;
-import java.io.RandomAccessFile;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Iterator;
@@ -468,22 +467,6 @@ public class BlockPoolSliceStorage extends Storage {
         DataStorage.STORAGE_DIR_FINALIZED), diskLayoutVersion);
   }
 
-  protected void corruptPreUpgradeStorage(File rootDir) throws IOException {
-    File oldF = new File(rootDir, "storage");
-    if (oldF.exists())
-      return;
-    // recreate old storage file to let pre-upgrade versions fail
-    if (!oldF.createNewFile())
-      throw new IOException("Cannot create file " + oldF);
-    RandomAccessFile oldFile = new RandomAccessFile(oldF, "rws");
-    // write new version into old storage file
-    try {
-      writeCorruptedData(oldFile);
-    } finally {
-      oldFile.close();
-    }
-  }
-
   private void verifyDistributedUpgradeProgress(NamespaceInfo nsInfo)
       throws IOException {
     UpgradeManagerDatanode um = 
@@ -508,11 +491,6 @@ public class BlockPoolSliceStorage extends Storage {
     return bpRoot;
   }
 
-  @Override
-  public boolean isConversionNeeded(StorageDirectory sd) throws IOException {
-    return false;
-  }
-  
   @Override
   public String toString() {
     return super.toString() + ";bpid=" + blockpoolID;
@@ -527,4 +505,9 @@ public class BlockPoolSliceStorage extends Storage {
   public static File getBpRoot(String bpID, File dnCurDir) {
     return new File(dnCurDir, bpID);
   }
+
+  @Override
+  public boolean isPreUpgradableLayout(StorageDirectory sd) throws IOException {
+    return false;
+  }
 }

+ 2 - 17
src/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java

@@ -322,7 +322,8 @@ public class DataStorage extends Storage {
     }
   }
 
-  public boolean isConversionNeeded(StorageDirectory sd) throws IOException {
+  @Override
+  public boolean isPreUpgradableLayout(StorageDirectory sd) throws IOException {
     File oldF = new File(sd.getRoot(), "storage");
     if (!oldF.exists())
       return false;
@@ -679,22 +680,6 @@ public class DataStorage extends Storage {
                  new File(to, blockNames[i]), oldLV);
   }
 
-  protected void corruptPreUpgradeStorage(File rootDir) throws IOException {
-    File oldF = new File(rootDir, "storage");
-    if (oldF.exists())
-      return;
-    // recreate old storage file to let pre-upgrade versions fail
-    if (!oldF.createNewFile())
-      throw new IOException("Cannot create file " + oldF);
-    RandomAccessFile oldFile = new RandomAccessFile(oldF, "rws");
-    // write new version into old storage file
-    try {
-      writeCorruptedData(oldFile);
-    } finally {
-      oldFile.close();
-    }
-  }
-
   private void verifyDistributedUpgradeProgress(
                   NamespaceInfo nsInfo
                 ) throws IOException {

+ 1 - 1
src/java/org/apache/hadoop/hdfs/server/namenode/BackupStorage.java

@@ -61,7 +61,7 @@ public class BackupStorage extends FSImage {
   }
 
   @Override
-  public boolean isConversionNeeded(StorageDirectory sd) {
+  public boolean isPreUpgradableLayout(StorageDirectory sd) throws IOException {
     return false;
   }
 

+ 2 - 24
src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java

@@ -959,12 +959,9 @@ public class FSImage extends Storage {
     return editLog;
   }
 
-  public boolean isConversionNeeded(StorageDirectory sd) throws IOException {
+  public boolean isPreUpgradableLayout(StorageDirectory sd) throws IOException {
     File oldImageDir = new File(sd.getRoot(), "image");
     if (!oldImageDir.exists()) {
-      if(sd.getVersionFile().exists())
-        throw new InconsistentFSStateException(sd.getRoot(),
-            oldImageDir + " does not exist.");
       return false;
     }
     // check the layout version inside the image file
@@ -980,7 +977,7 @@ public class FSImage extends Storage {
     }
     return true;
   }
-  
+
   //
   // Atomic move sequence, to recover from interrupted checkpoint
   //
@@ -2347,25 +2344,6 @@ public class FSImage extends Storage {
     }
   }
 
-  protected void corruptPreUpgradeStorage(File rootDir) throws IOException {
-    File oldImageDir = new File(rootDir, "image");
-    if (!oldImageDir.exists())
-      if (!oldImageDir.mkdir())
-        throw new IOException("Cannot create directory " + oldImageDir);
-    File oldImage = new File(oldImageDir, "fsimage");
-    if (!oldImage.exists())
-      // recreate old image file to let pre-upgrade versions fail
-      if (!oldImage.createNewFile())
-        throw new IOException("Cannot create file " + oldImage);
-    RandomAccessFile oldFile = new RandomAccessFile(oldImage, "rws");
-    // write new version into old image file
-    try {
-      writeCorruptedData(oldFile);
-    } finally {
-      oldFile.close();
-    }
-  }
-
   private boolean getDistributedUpgradeState() {
     FSNamesystem ns = getFSNamesystem();
     return ns == null ? false : ns.getDistributedUpgradeState();

+ 0 - 6
src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java

@@ -591,12 +591,6 @@ public class SecondaryNameNode implements Runnable {
       super(conf);
     }
 
-    @Override
-    public
-    boolean isConversionNeeded(StorageDirectory sd) {
-      return false;
-    }
-
     /**
      * Analyze checkpoint directories.
      * Create directories if they do not exist.

+ 46 - 0
src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java

@@ -32,6 +32,7 @@ import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
+import org.apache.hadoop.util.StringUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 
@@ -48,6 +49,8 @@ public class TestDFSUpgradeFromImage extends TestCase {
   
   private static final Log LOG = LogFactory.getLog(
                     "org.apache.hadoop.hdfs.TestDFSUpgradeFromImage");
+  private static File TEST_ROOT_DIR =
+                      new File(MiniDFSCluster.getBaseDirectory());
   
   public int numDataNodes = 4;
   
@@ -204,4 +207,47 @@ public class TestDFSUpgradeFromImage extends TestCase {
       if (cluster != null) { cluster.shutdown(); }
     }
   }
+
+  /**
+   * Test that sets up a fake image from Hadoop 0.3.0 and tries to start a
+   * NN, verifying that the correct error message is thrown.
+   */
+  public void testFailOnPreUpgradeImage() throws IOException {
+    Configuration conf = new HdfsConfiguration();
+
+    File namenodeStorage = new File(TEST_ROOT_DIR, "nnimage-0.3.0");
+    conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, namenodeStorage.toString());
+
+    // Set up a fake NN storage that looks like an ancient Hadoop dir circa 0.3.0
+    FileUtil.fullyDelete(namenodeStorage);
+    assertTrue("Make " + namenodeStorage, namenodeStorage.mkdirs());
+    File imageDir = new File(namenodeStorage, "image");
+    assertTrue("Make " + imageDir, imageDir.mkdirs());
+
+    // Hex dump of a formatted image from Hadoop 0.3.0
+    File imageFile = new File(imageDir, "fsimage");
+    byte[] imageBytes = StringUtils.hexStringToByte(
+      "fffffffee17c0d2700000000");
+    FileOutputStream fos = new FileOutputStream(imageFile);
+    try {
+      fos.write(imageBytes);
+    } finally {
+      fos.close();
+    }
+
+    // Now try to start an NN from it
+
+    try {
+      new MiniDFSCluster.Builder(conf).numDataNodes(0)
+        .format(false)
+        .manageDataDfsDirs(false)
+        .manageNameDfsDirs(false)
+        .startupOption(StartupOption.REGULAR)
+        .build();
+      fail("Was able to start NN from 0.3.0 image");
+    } catch (IOException ioe) {
+      LOG.info("Got expected exception", ioe);
+      assertTrue(ioe.toString().contains("Old layout version is 'too old'"));
+    }
+  }
 }

+ 0 - 12
src/test/hdfs/org/apache/hadoop/hdfs/UpgradeUtilities.java

@@ -298,12 +298,6 @@ public class UpgradeUtilities {
       localFS.copyToLocalFile(new Path(namenodeStorage.toString(), "current"),
                               new Path(newDir.toString()),
                               false);
-      Path newImgDir = new Path(newDir.getParent(), "image");
-      if (!localFS.exists(newImgDir))
-        localFS.copyToLocalFile(
-            new Path(namenodeStorage.toString(), "image"),
-            newImgDir,
-            false);
       retVal[i] = newDir;
     }
     return retVal;
@@ -331,12 +325,6 @@ public class UpgradeUtilities {
       localFS.copyToLocalFile(new Path(datanodeStorage.toString(), "current"),
                               new Path(newDir.toString()),
                               false);
-      Path newStorageFile = new Path(newDir.getParent(), "storage");
-      if (!localFS.exists(newStorageFile))
-        localFS.copyToLocalFile(
-            new Path(datanodeStorage.toString(), "storage"),
-            newStorageFile,
-            false);
       retVal[i] = newDir;
     }
     return retVal;