Jelajahi Sumber

HDFS-1842. Change the layout version to -31 to disallow upgrade from and to 0.21 release. Contributed by Suresh Srinivas.


git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.20-security-203@1095980 13f79535-47bb-0310-9956-ffa450edef68
Suresh Srinivas 14 tahun lalu
induk
melakukan
888ae99e51

+ 3 - 0
CHANGES.txt

@@ -57,6 +57,9 @@ Release 0.20.203.0 - unreleased
     HADOOP-7215. RPC clients must use network interface corresponding to 
     the host in the client's kerberos principal key. (suresh)
 
+    HDFS-1842. Change the layout version to -31 to disallow upgrade from
+    and to 0.21 release. (suresh)
+
 Release 0.20.202.0 - unreleased
 
     MAPREDUCE-2355. Add a configuration knob 

+ 2 - 3
src/hdfs/org/apache/hadoop/hdfs/protocol/FSConstants.java

@@ -77,8 +77,7 @@ public interface FSConstants {
   // Version is reflected in the data storage file.
   // Versions are negative.
   // Decrement LAYOUT_VERSION to define a new version.
-  public static final int LAYOUT_VERSION = -19;
+  public static final int LAYOUT_VERSION = -31;
   // Current version: 
-  // -19: added new OP_[GET|RENEW|CANCEL]_DELEGATION_TOKEN and
-  // OP_UPDATE_MASTER_KEY.
+  // -31: to disallow upgrade to release 0.21.
 }

+ 14 - 3
src/hdfs/org/apache/hadoop/hdfs/server/common/Storage.java

@@ -73,6 +73,9 @@ public abstract class Storage extends StorageInfo {
    * any upgrade code that uses this constant should also be removed. */
   public static final int PRE_GENERATIONSTAMP_LAYOUT_VERSION = -13;
   
+  /** Layout version of 21 release */
+  public static final int LAYOUT_VERSION_21 = -24;
+  
   private   static final String STORAGE_FILE_LOCK     = "in_use.lock";
   protected static final String STORAGE_FILE_VERSION  = "VERSION";
   public static final String STORAGE_DIR_CURRENT   = "current";
@@ -688,8 +691,7 @@ public abstract class Storage extends StorageInfo {
    * 
    * @param oldVersion
    */
-  protected static void checkVersionUpgradable(int oldVersion) 
-                                     throws IOException {
+  public static void checkVersionUpgradable(int oldVersion) throws IOException {
     if (oldVersion > LAST_UPGRADABLE_LAYOUT_VERSION) {
       String msg = "*********** Upgrade is not supported from this older" +
                    " version of storage to the current version." + 
@@ -703,7 +705,16 @@ public abstract class Storage extends StorageInfo {
       LOG.error(msg);
       throw new IOException(msg); 
     }
-    
+    if (oldVersion == LAYOUT_VERSION_21) {
+      String msg = "*********** Upgrade is not supported from this " +
+                   " version of storage to the current version." + 
+                   " Please upgrade to release 0.22 " +
+                   " or a later version and then upgrade to current" +
+                   " version. Old layout version is " + oldVersion +
+                   ". ************";
+      LOG.error(msg);
+      throw new IOException(msg); 
+    }
   }
   
   /**

+ 16 - 0
src/test/org/apache/hadoop/hdfs/TestDFSUpgrade.java

@@ -23,6 +23,7 @@ import junit.framework.TestCase;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.protocol.FSConstants;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
 
@@ -238,6 +239,21 @@ public class TestDFSUpgrade extends TestCase {
     } // end numDir loop
   }
  
+  public void testCheckVersionUpgradable() throws Exception {
+    // Except for 0.21 layout version all previous layout versions 
+    // should be upgradable.
+    for (int i = Storage.LAST_UPGRADABLE_LAYOUT_VERSION; 
+         i < FSConstants.LAYOUT_VERSION; i++) {
+      if (i == Storage.LAYOUT_VERSION_21) {
+        try {
+          Storage.checkVersionUpgradable(i);
+          fail("Expected IOException is not thrown");
+        } catch (IOException expected) { }
+      }
+      Storage.checkVersionUpgradable(i);
+    }
+  }
+  
   protected void tearDown() throws Exception {
     LOG.info("Shutting down MiniDFSCluster");
     if (cluster != null) cluster.shutdown();