Selaa lähdekoodia

HDFS-7788. Post-2.6 namenode may not start up with an image containing inodes created with an old release. Contributed by Rushabh Shah.
(cherry picked from commit 7ae5255a1613ccfb43646f33eabacf1062c86e93)

Kihwal Lee 10 vuotta sitten
vanhempi
commit
b9157f92fc

+ 3 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -692,6 +692,9 @@ Release 2.7.0 - UNRELEASED
     HDFS-7808. Remove obsolete -ns options in in DFSHAAdmin.java.
     (Arshad Mohammad via wheat9)
 
+    HDFS-7788. Post-2.6 namenode may not start up with an image containing
+    inodes created with an old release. (Rushabh Shah via kihwal)
+
     BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
       HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

+ 3 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java

@@ -107,6 +107,9 @@ public class INodeFile extends INodeWithAdditionalFields
     static long toLong(long preferredBlockSize, short replication,
         byte storagePolicyID) {
       long h = 0;
+      if (preferredBlockSize == 0) {
+        preferredBlockSize = PREFERRED_BLOCK_SIZE.BITS.getMin();
+      }
       h = PREFERRED_BLOCK_SIZE.BITS.combine(preferredBlockSize, h);
       h = REPLICATION.BITS.combine(replication, h);
       h = STORAGE_POLICY_ID.BITS.combine(storagePolicyID, h);

+ 4 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java

@@ -64,4 +64,8 @@ public class LongBitFormat implements Serializable {
     }
     return (record & ~MASK) | (value << OFFSET);
   }
+  
+  public long getMin() {
+    return MIN;
+  }
 }

+ 48 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java

@@ -28,10 +28,13 @@ import org.junit.Assert;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSOutputStream;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
@@ -40,10 +43,14 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
 import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease;
 import org.apache.hadoop.hdfs.util.MD5FileUtils;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.test.PathUtils;
 import org.junit.Test;
 
 public class TestFSImage {
 
+  private static final String HADOOP_2_7_ZER0_BLOCK_SIZE_TGZ =
+      "image-with-zero-block-size.tar.gz";
   @Test
   public void testPersist() throws IOException {
     Configuration conf = new Configuration();
@@ -183,4 +190,45 @@ public class TestFSImage {
       }
     }
   }
+  
+  /**
+   * In this test case, I have created an image with a file having
+   * preferredblockSize = 0. We are trying to read this image (since file with
+   * preferredblockSize = 0 was allowed pre 2.1.0-beta version. The namenode 
+   * after 2.6 version will not be able to read this particular file.
+   * See HDFS-7788 for more information.
+   * @throws Exception
+   */
+  @Test
+  public void testZeroBlockSize() throws Exception {
+    final Configuration conf = new HdfsConfiguration();
+    String tarFile = System.getProperty("test.cache.data", "build/test/cache")
+      + "/" + HADOOP_2_7_ZER0_BLOCK_SIZE_TGZ;
+    String testDir = PathUtils.getTestDirName(getClass());
+    File dfsDir = new File(testDir, "image-with-zero-block-size");
+    if (dfsDir.exists() && !FileUtil.fullyDelete(dfsDir)) {
+      throw new IOException("Could not delete dfs directory '" + dfsDir + "'");
+    }
+    FileUtil.unTar(new File(tarFile), new File(testDir));
+    File nameDir = new File(dfsDir, "name");
+    GenericTestUtils.assertExists(nameDir);
+    conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, 
+        nameDir.getAbsolutePath());
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
+        .format(false)
+        .manageDataDfsDirs(false)
+        .manageNameDfsDirs(false)
+        .waitSafeMode(false)
+        .build();
+    try {
+      FileSystem fs = cluster.getFileSystem();
+      Path testPath = new Path("/tmp/zeroBlockFile");
+      assertTrue("File /tmp/zeroBlockFile doesn't exist ", fs.exists(testPath));
+      assertTrue("Name node didn't come up", cluster.isNameNodeUp(0));
+    } finally {
+      cluster.shutdown();
+      //Clean up
+      FileUtil.fullyDelete(dfsDir);
+    }
+  }
 }

BIN
hadoop-hdfs-project/hadoop-hdfs/src/test/resources/image-with-zero-block-size.tar.gz