浏览代码

HDFS-3731. 2.0 release upgrade must handle blocks being written from 1.0 (Kihwal Lee via daryn)

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.23@1391155 13f79535-47bb-0310-9956-ffa450edef68
Daryn Sharp 12 年之前
父节点
当前提交
4d851385b8

+ 0 - 3
hadoop-common-project/hadoop-common/CHANGES.txt

@@ -224,9 +224,6 @@ Release 0.23.3 - UNRELEASED
     HADOOP-8727. Gracefully deprecate dfs.umaskmode in 2.x onwards (Harsh J
     via bobby)
 
-    HDFS-3922. namenode throws away blocks under construction on restart
-    (Kihwal Lee via daryn)
-
 Release 0.23.2 - UNRELEASED 
 
   NEW FEATURES

+ 6 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -18,6 +18,12 @@ Release 0.23.4 - UNRELEASED
     HDFS-3831. Failure to renew tokens due to test-sources left in classpath
     (jlowe via bobby)
 
+    HDFS-3922. namenode throws away blocks under construction on restart
+    (Kihwal Lee via daryn)
+
+    HDFS-3731. Release upgrade must handle blocks being written from 1.0
+    (Kihwal Lee via daryn)
+
 Release 0.23.3 - UNRELEASED
 
   INCOMPATIBLE CHANGES

+ 1 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java

@@ -1551,8 +1551,6 @@ public class BlockManager {
 
     // Ignore replicas already scheduled to be removed from the DN
     if(invalidateBlocks.contains(dn.getStorageID(), block)) {
-      assert storedBlock.findDatanode(dn) < 0 : "Block " + block
-        + " in invalidated blocks set should not appear in DN " + dn;
       return storedBlock;
     }
 
@@ -1577,7 +1575,7 @@ public class BlockManager {
     return storedBlock;
   }
 
-  /*
+  /**
    * The next two methods test the various cases under which we must conclude
    * the replica is corrupt, or under construction.  These are laid out
    * as switch statements, on the theory that it is easier to understand

+ 6 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java

@@ -90,6 +90,12 @@ public abstract class Storage extends StorageInfo {
   public    static final String STORAGE_TMP_LAST_CKPT = "lastcheckpoint.tmp";
   public    static final String STORAGE_PREVIOUS_CKPT = "previous.checkpoint";
   
+  /**
+   * The blocksBeingWritten directory which was used in some 1.x and earlier
+   * releases.
+   */
+  public static final String STORAGE_1_BBW = "blocksBeingWritten";
+  
   public enum StorageState {
     NON_EXISTENT,
     NOT_FORMATTED,

+ 29 - 6
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java

@@ -459,6 +459,8 @@ public class DataStorage extends Storage {
     
     File curDir = sd.getCurrentDir();
     File prevDir = sd.getPreviousDir();
+    File bbwDir = new File(sd.getRoot(), Storage.STORAGE_1_BBW);
+
     assert curDir.exists() : "Data node current directory must exist.";
     // Cleanup directory "detach"
     cleanupDetachDir(new File(curDir, STORAGE_DIR_DETACHED));
@@ -479,7 +481,7 @@ public class DataStorage extends Storage {
     BlockPoolSliceStorage bpStorage = new BlockPoolSliceStorage(nsInfo.getNamespaceID(), 
         nsInfo.getBlockPoolID(), nsInfo.getCTime(), nsInfo.getClusterID());
     bpStorage.format(curDir, nsInfo);
-    linkAllBlocks(tmpDir, new File(curBpDir, STORAGE_DIR_CURRENT));
+    linkAllBlocks(tmpDir, bbwDir, new File(curBpDir, STORAGE_DIR_CURRENT));
     
     // 4. Write version file under <SD>/current
     layoutVersion = HdfsConstants.LAYOUT_VERSION;
@@ -586,14 +588,20 @@ public class DataStorage extends Storage {
              + "; cur CTime = " + this.getCTime());
     assert sd.getCurrentDir().exists() : "Current directory must exist.";
     final File tmpDir = sd.getFinalizedTmp();//finalized.tmp directory
+    final File bbwDir = new File(sd.getRoot(), Storage.STORAGE_1_BBW);
     // 1. rename previous to finalized.tmp
     rename(prevDir, tmpDir);
 
     // 2. delete finalized.tmp dir in a separate thread
+    // Also delete the blocksBeingWritten from HDFS 1.x and earlier, if
+    // it exists.
     new Daemon(new Runnable() {
         public void run() {
           try {
             deleteDir(tmpDir);
+            if (bbwDir.exists()) {
+              deleteDir(bbwDir);
+            }
           } catch(IOException ex) {
             LOG.error("Finalize upgrade for " + dataDirPath + " failed.", ex);
           }
@@ -626,11 +634,16 @@ public class DataStorage extends Storage {
 
   /**
    * Hardlink all finalized and RBW blocks in fromDir to toDir
-   * @param fromDir directory where the snapshot is stored
-   * @param toDir the current data directory
-   * @throws IOException if error occurs during hardlink
+   *
+   * @param fromDir      The directory where the 'from' snapshot is stored
+   * @param fromBbwDir   In HDFS 1.x, the directory where blocks
+   *                     that are under construction are stored.
+   * @param toDir        The current data directory
+   *
+   * @throws IOException If error occurs during hardlink
    */
-  private void linkAllBlocks(File fromDir, File toDir) throws IOException {
+  private void linkAllBlocks(File fromDir, File fromBbwDir, File toDir)
+      throws IOException {
     HardLink hardLink = new HardLink();
     // do the link
     int diskLayoutVersion = this.getLayoutVersion();
@@ -638,13 +651,23 @@ public class DataStorage extends Storage {
       // hardlink finalized blocks in tmpDir/finalized
       linkBlocks(new File(fromDir, STORAGE_DIR_FINALIZED), 
           new File(toDir, STORAGE_DIR_FINALIZED), diskLayoutVersion, hardLink);
-      // hardlink rbw blocks in tmpDir/finalized
+      // hardlink rbw blocks in tmpDir/rbw
       linkBlocks(new File(fromDir, STORAGE_DIR_RBW), 
           new File(toDir, STORAGE_DIR_RBW), diskLayoutVersion, hardLink);
     } else { // pre-RBW version
       // hardlink finalized blocks in tmpDir
       linkBlocks(fromDir, new File(toDir, STORAGE_DIR_FINALIZED), 
           diskLayoutVersion, hardLink);      
+      if (fromBbwDir.exists()) {
+        /*
+         * We need to put the 'blocksBeingWritten' from HDFS 1.x into the rbw
+         * directory.  It's a little messy, because the blocksBeingWriten was
+         * NOT underneath the 'current' directory in those releases.  See
+         * HDFS-3731 for details.
+         */
+        linkBlocks(fromBbwDir,
+            new File(toDir, STORAGE_DIR_RBW), diskLayoutVersion, hardLink);
+      }
     } 
     LOG.info( hardLink.linkStats.report() );
   }

+ 97 - 37
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java

@@ -30,19 +30,24 @@ import org.apache.hadoop.fs.FSInputStream;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 
+import org.junit.Test;
+
 /**
  * This tests data transfer protocol handling in the Datanode. It sends
  * various forms of wrong data and verifies that Datanode handles it well.
  * 
- * This test uses the following two file from src/test/.../dfs directory :
- *   1) hadoop-version-dfs-dir.tgz : contains DFS directories.
+ * This test uses the following items from src/test/.../dfs directory :
+ *   1) hadoop-22-dfs-dir.tgz and other tarred pre-upgrade NN / DN 
+ *      directory images
  *   2) hadoop-dfs-dir.txt : checksums that are compared in this test.
  * Please read hadoop-dfs-dir.txt for more information.  
  */
@@ -55,14 +60,23 @@ public class TestDFSUpgradeFromImage extends TestCase {
   private static final String HADOOP14_IMAGE = "hadoop-14-dfs-dir.tgz";
   private static final String HADOOP_DFS_DIR_TXT = "hadoop-dfs-dir.txt";
   private static final String HADOOP22_IMAGE = "hadoop-22-dfs-dir.tgz";
-  
-  public int numDataNodes = 4;
-  
+  private static final String HADOOP1_BBW_IMAGE = "hadoop1-bbw.tgz";
+
   private static class ReferenceFileInfo {
     String path;
     long checksum;
   }
   
+  private static final Configuration upgradeConf;
+  
+  static {
+    upgradeConf = new HdfsConfiguration();
+    upgradeConf.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1); // block scanning off
+    if (System.getProperty("test.build.data") == null) { // to allow test to be run outside of Maven
+      System.setProperty("test.build.data", "build/test/data");
+    }
+  }
+  
   LinkedList<ReferenceFileInfo> refList = new LinkedList<ReferenceFileInfo>();
   Iterator<ReferenceFileInfo> refIter;
   
@@ -134,11 +148,33 @@ public class TestDFSUpgradeFromImage extends TestCase {
     }
   }
   
-  CRC32 overallChecksum = new CRC32();
+  /**
+   * Try to open a file for reading several times.
+   * 
+   * If we fail because lease recovery hasn't completed, retry the open.
+   */
+  private static FSInputStream dfsOpenFileWithRetries(DistributedFileSystem dfs,
+      String pathName) throws IOException {
+    IOException exc = null;
+    for (int tries = 0; tries < 10; tries++) {
+      try {
+        return dfs.dfs.open(pathName);
+      } catch (IOException e) {
+        exc = e;
+      }
+      if (!exc.getMessage().contains("Cannot obtain " +
+          "block length for LocatedBlock")) {
+        throw exc;
+      }
+      try {
+        Thread.sleep(1000);
+      } catch (InterruptedException ignored) {}
+    }
+    throw exc;
+  }
   
-  private void verifyDir(DistributedFileSystem dfs, Path dir) 
-                                           throws IOException {
-    
+  private void verifyDir(DistributedFileSystem dfs, Path dir,
+      CRC32 overallChecksum) throws IOException {
     FileStatus[] fileArr = dfs.listStatus(dir);
     TreeMap<Path, Boolean> fileMap = new TreeMap<Path, Boolean>();
     
@@ -154,11 +190,11 @@ public class TestDFSUpgradeFromImage extends TestCase {
       overallChecksum.update(pathName.getBytes());
       
       if ( isDir ) {
-        verifyDir(dfs, path);
+        verifyDir(dfs, path, overallChecksum);
       } else {
         // this is not a directory. Checksum the file data.
         CRC32 fileCRC = new CRC32();
-        FSInputStream in = dfs.dfs.open(pathName);
+        FSInputStream in = dfsOpenFileWithRetries(dfs, pathName);
         byte[] buf = new byte[4096];
         int nRead = 0;
         while ( (nRead = in.read(buf, 0, buf.length)) > 0 ) {
@@ -172,7 +208,8 @@ public class TestDFSUpgradeFromImage extends TestCase {
   
   private void verifyFileSystem(DistributedFileSystem dfs) throws IOException {
   
-    verifyDir(dfs, new Path("/"));
+    CRC32 overallChecksum = new CRC32();
+    verifyDir(dfs, new Path("/"), overallChecksum);
     
     verifyChecksum("overallCRC", overallChecksum.getValue());
     
@@ -227,20 +264,13 @@ public class TestDFSUpgradeFromImage extends TestCase {
     }
   }
   
-  /**
-   * Test upgrade from an 0.14 image
-   */
-  public void testUpgradeFromRel14Image() throws IOException {
-    unpackStorage();
-    upgradeAndVerify();
-  }
-  
   /**
    * Test upgrade from 0.22 image
    */
   public void testUpgradeFromRel22Image() throws IOException {
     unpackStorage(HADOOP22_IMAGE);
-    upgradeAndVerify();
+    upgradeAndVerify(new MiniDFSCluster.Builder(upgradeConf).
+        numDataNodes(4));
   }
   
   /**
@@ -261,7 +291,8 @@ public class TestDFSUpgradeFromImage extends TestCase {
     
     // Upgrade should now fail
     try {
-      upgradeAndVerify();
+      upgradeAndVerify(new MiniDFSCluster.Builder(upgradeConf).
+          numDataNodes(4));
       fail("Upgrade did not fail with bad MD5");
     } catch (IOException ioe) {
       String msg = StringUtils.stringifyException(ioe);
@@ -270,21 +301,35 @@ public class TestDFSUpgradeFromImage extends TestCase {
       }
     }
   }
-
-  private void upgradeAndVerify() throws IOException {
+    
+  static void recoverAllLeases(DFSClient dfs, 
+      Path path) throws IOException {
+    String pathStr = path.toString();
+    HdfsFileStatus status = dfs.getFileInfo(pathStr);
+    if (!status.isDir()) {
+      dfs.recoverLease(pathStr);
+      return;
+    }
+    byte prev[] = HdfsFileStatus.EMPTY_NAME;
+    DirectoryListing dirList;
+    do {
+      dirList = dfs.listPaths(pathStr, prev);
+      HdfsFileStatus files[] = dirList.getPartialListing();
+      for (HdfsFileStatus f : files) {
+        recoverAllLeases(dfs, f.getFullPath(path));
+      }
+      prev = dirList.getLastName();
+    } while (dirList.hasMore());
+  }
+  
+  private void upgradeAndVerify(MiniDFSCluster.Builder bld)
+      throws IOException {
     MiniDFSCluster cluster = null;
     try {
-      Configuration conf = new HdfsConfiguration();
-      if (System.getProperty("test.build.data") == null) { // to allow test to be run outside of Ant
-        System.setProperty("test.build.data", "build/test/data");
-      }
-      conf.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1); // block scanning off
-      cluster = new MiniDFSCluster.Builder(conf)
-                                  .numDataNodes(numDataNodes)
-                                  .format(false)
-                                  .startupOption(StartupOption.UPGRADE)
-                                  .clusterId("testClusterId")
-                                  .build();
+      bld.format(false).startupOption(StartupOption.UPGRADE)
+        .clusterId("testClusterId");
+      cluster = bld.build();
+
       cluster.waitActive();
       DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem();
       DFSClient dfsClient = dfs.dfs;
@@ -295,12 +340,27 @@ public class TestDFSUpgradeFromImage extends TestCase {
           Thread.sleep(1000);
         } catch (InterruptedException ignored) {}
       }
-
+      recoverAllLeases(dfsClient, new Path("/"));
       verifyFileSystem(dfs);
     } finally {
       if (cluster != null) { cluster.shutdown(); }
     } 
   }
 
-
+  /**
+   * Test upgrade from a 1.x image with some blocksBeingWritten
+   */
+  @Test
+  public void testUpgradeFromRel1BBWImage() throws IOException {
+    unpackStorage(HADOOP1_BBW_IMAGE);
+    Configuration conf = new Configuration(upgradeConf);
+    conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, 
+        System.getProperty("test.build.data") + File.separator + 
+        "dfs" + File.separator + 
+        "data" + File.separator + 
+        "data1");
+    upgradeAndVerify(new MiniDFSCluster.Builder(conf).
+          numDataNodes(1).enableManagedDfsDirsRedundancy(false).
+          manageDataDfsDirs(false));
+  }
 }

+ 0 - 263
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestDistributedUpgrade.java

@@ -1,263 +0,0 @@
-/**
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-*/
-package org.apache.hadoop.hdfs.server.common;
-
-import static org.apache.hadoop.hdfs.protocol.HdfsConstants.LAYOUT_VERSION;
-
-import java.io.IOException;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.HdfsConfiguration;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.TestDFSUpgradeFromImage;
-import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
-import org.apache.hadoop.hdfs.server.datanode.UpgradeObjectDatanode;
-import org.apache.hadoop.hdfs.server.namenode.UpgradeObjectNamenode;
-import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
-import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
-import org.apache.hadoop.hdfs.tools.DFSAdmin;
-
-import org.junit.Test;
-import static org.junit.Assert.*;
-
-/**
- */
-public class TestDistributedUpgrade {
-  private static final Log LOG = LogFactory.getLog(TestDistributedUpgrade.class);
-  private Configuration conf;
-  private int testCounter = 0;
-  private MiniDFSCluster cluster = null;
-  private String clusterId = "testClsterId";
-    
-  /**
-   * Writes an INFO log message containing the parameters.
-   */
-  void log(String label, int numDirs) {
-    LOG.info("============================================================");
-    LOG.info("***TEST " + (testCounter++) + "*** " 
-             + label + ":"
-             + " numDirs="+numDirs);
-  }
-  
-  /**
-   * Attempts to start a NameNode with the given operation.  Starting
-   * the NameNode should throw an exception.
-   */
-  void startNameNodeShouldFail(StartupOption operation) {
-    try {
-      //cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).startupOption(operation).build(); // should fail
-      // we set manage dirs to true as NN has to start from untar'ed image with 
-      // nn dirs set to name1 and name2
-      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
-                                              .format(false)
-                                              .clusterId(clusterId)
-                                              .startupOption(operation)
-                                              .build(); // should fail
-      throw new AssertionError("NameNode should have failed to start");
-    } catch (Exception expected) {
-      expected = null;
-      // expected
-    }
-  }
-  
-  /**
-   * Attempts to start a DataNode with the given operation.  Starting
-   * the DataNode should throw an exception.
-   */
-  void startDataNodeShouldFail(StartupOption operation) {
-    try {
-      cluster.startDataNodes(conf, 1, false, operation, null); // should fail
-      throw new AssertionError("DataNode should have failed to start");
-    } catch (Exception expected) {
-      // expected
-      assertFalse(cluster.isDataNodeUp());
-    }
-  }
- 
-  /**
-   */
-  @Test(timeout=120000)
-  public void testDistributedUpgrade() throws Exception {
-    int numDirs = 1;
-    TestDFSUpgradeFromImage testImg = new TestDFSUpgradeFromImage();
-    testImg.unpackStorage();
-    int numDNs = testImg.numDataNodes;
-    
-    // register new upgrade objects (ignore all existing)
-    UpgradeObjectCollection.initialize();
-    UpgradeObjectCollection.registerUpgrade(new UO_Datanode1());
-    UpgradeObjectCollection.registerUpgrade(new UO_Namenode1());
-    UpgradeObjectCollection.registerUpgrade(new UO_Datanode2());
-    UpgradeObjectCollection.registerUpgrade(new UO_Namenode2());
-    UpgradeObjectCollection.registerUpgrade(new UO_Datanode3());
-    UpgradeObjectCollection.registerUpgrade(new UO_Namenode3());
-
-    conf = new HdfsConfiguration();
-    if (System.getProperty("test.build.data") == null) { // to test to be run outside of ant
-      System.setProperty("test.build.data", "build/test/data");
-    }
-    conf.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1); // block scanning off
-
-    log("NameNode start in regular mode when dustributed upgrade is required", numDirs);
-    startNameNodeShouldFail(StartupOption.REGULAR);
-
-    log("Start NameNode only distributed upgrade", numDirs);
-    // cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false)
-    // .startupOption(StartupOption.UPGRADE).build();
-    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
-                                              .format(false)
-                                              .clusterId(clusterId)
-                                              .startupOption(StartupOption.UPGRADE)
-                                              .build();
-    cluster.shutdown();
-
-    log("NameNode start in regular mode when dustributed upgrade has been started", numDirs);
-    startNameNodeShouldFail(StartupOption.REGULAR);
-
-    log("NameNode rollback to the old version that require a dustributed upgrade", numDirs);
-    startNameNodeShouldFail(StartupOption.ROLLBACK);
-
-    log("Normal distributed upgrade for the cluster", numDirs);
-    cluster = new MiniDFSCluster.Builder(conf)
-                                .numDataNodes(numDNs)
-                                .format(false)
-                                .clusterId(clusterId)
-                                .startupOption(StartupOption.UPGRADE)
-                                .build();
-    DFSAdmin dfsAdmin = new DFSAdmin();
-    dfsAdmin.setConf(conf);
-    dfsAdmin.run(new String[] {"-safemode", "wait"});
-    cluster.shutdown();
-
-    // it should be ok to start in regular mode
-    log("NameCluster regular startup after the upgrade", numDirs);
-    cluster = new MiniDFSCluster.Builder(conf)
-                                .numDataNodes(numDNs)
-                                .clusterId(clusterId)
-                                .format(false)
-                                .startupOption(StartupOption.REGULAR)
-                                .build();
-
-    cluster.waitActive();
-    cluster.shutdown();
-  }
-
-  public static void main(String[] args) throws Exception {
-    new TestDistributedUpgrade().testDistributedUpgrade();
-    LOG.info("=== DONE ===");
-  }
-}
-
-/**
- * Upgrade object for data-node
- */
-class UO_Datanode extends UpgradeObjectDatanode {
-  int version;
-
-  UO_Datanode(int v) {
-    this.status = (short)0;
-    version = v;
-  }
-
-  public int getVersion() {
-    return version;
-  }
-
-  public void doUpgrade() throws IOException {
-    this.status = (short)100;
-    DatanodeProtocol nn = getNamenode();
-    nn.processUpgradeCommand(
-        new UpgradeCommand(UpgradeCommand.UC_ACTION_REPORT_STATUS, 
-            getVersion(), getUpgradeStatus()));
-  }
-
-  public UpgradeCommand startUpgrade() throws IOException {
-    return null;
-  }
-}
-
-/**
- * Upgrade object for name-node
- */
-class UO_Namenode extends UpgradeObjectNamenode {
-  int version;
-
-  UO_Namenode(int v) {
-    status = (short)0;
-    version = v;
-  }
-
-  public int getVersion() {
-    return version;
-  }
-
-  synchronized public UpgradeCommand processUpgradeCommand(
-                                  UpgradeCommand command) throws IOException {
-    switch(command.getAction()) {
-      case UpgradeCommand.UC_ACTION_REPORT_STATUS:
-        this.status += command.getCurrentStatus()/8;  // 4 reports needed
-        break;
-      default:
-        this.status++;
-    }
-    return null;
-  }
-
-  public UpgradeCommand completeUpgrade() throws IOException {
-    return null;
-  }
-}
-
-class UO_Datanode1 extends UO_Datanode {
-  UO_Datanode1() {
-    super(LAYOUT_VERSION+1);
-  }
-}
-
-class UO_Namenode1 extends UO_Namenode {
-  UO_Namenode1() {
-    super(LAYOUT_VERSION+1);
-  }
-}
-
-class UO_Datanode2 extends UO_Datanode {
-  UO_Datanode2() {
-    super(LAYOUT_VERSION+2);
-  }
-}
-
-class UO_Namenode2 extends UO_Namenode {
-  UO_Namenode2() {
-    super(LAYOUT_VERSION+2);
-  }
-}
-
-class UO_Datanode3 extends UO_Datanode {
-  UO_Datanode3() {
-    super(LAYOUT_VERSION+3);
-  }
-}
-
-class UO_Namenode3 extends UO_Namenode {
-  UO_Namenode3() {
-    super(LAYOUT_VERSION+3);
-  }
-}

二进制
hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop1-bbw.tgz