1
0
Pārlūkot izejas kodu

HADOOP-2345. Fixed bad disk format introduced by HADOOP-2345.
Disk layout version changed from -12 to -13. (dhruba)



git-svn-id: https://svn.apache.org/repos/asf/hadoop/core/trunk@630992 13f79535-47bb-0310-9956-ffa450edef68

Dhruba Borthakur 17 gadi atpakaļ
vecāks
revīzija
6980bad97d

+ 3 - 0
CHANGES.txt

@@ -16,6 +16,9 @@ Trunk (unreleased changes)
     HADOOP-1902. "dfs du" command without any arguments operates on the
     current working directory.  (Mahadev Konar via dhruba)
 
+    HADOOP-2345.  Fixed bad disk format introduced by HADOOP-2345.
+    Disk layout version changed from -12 to -13. (dhruba)
+
   NEW FEATURES
 
     HADOOP-1398.  Add HBase in-memory block cache.  (tomwhite)

+ 2 - 2
src/java/org/apache/hadoop/dfs/FSConstants.java

@@ -183,7 +183,7 @@ public interface FSConstants {
   // Version is reflected in the data storage file.
   // Versions are negative.
   // Decrement LAYOUT_VERSION to define a new version.
-  public static final int LAYOUT_VERSION = -12;
+  public static final int LAYOUT_VERSION = -13;
   // Current version: 
-  // Introduce OPEN, CLOSE and GENSTAMP transactions for supporting appends
+  // Fix bug introduced by OPEN, CLOSE and GENSTAMP transactions for supporting appends
 }

+ 1 - 5
src/java/org/apache/hadoop/dfs/FSImage.java

@@ -911,7 +911,7 @@ class FSImage extends Storage {
                                   FSNamesystem fs) throws IOException {
 
     FSDirectory fsDir = fs.dir;
-    if (version > -12) // pre lease image version
+    if (version > -13) // pre lease image version
       return;
     int size = in.readInt();
 
@@ -931,10 +931,6 @@ class FSImage extends Storage {
       fsDir.replaceNode(path, oldnode, cons);
       fs.addLease(path, cons.getClientName()); 
     }
-    if (fs.countLease() != size) {
-      throw new IOException("Created " + size + " leases but found " +
-                            fs.countLease());
-    }
   }
 
   // Helper function that reads in an INodeUnderConstruction

+ 6 - 3
src/java/org/apache/hadoop/dfs/FSNamesystem.java

@@ -4202,9 +4202,12 @@ class FSNamesystem implements FSConstants, FSNamesystemMBean {
    */
   void saveFilesUnderConstruction(DataOutputStream out) throws IOException {
     synchronized (sortedLeases) {
-      out.writeInt(sortedLeases.size()); // write the size
-      for (Iterator<Lease> it = sortedLeases.iterator(); it.hasNext();) {
-        Lease lease = it.next();        
+      int count = 0;
+      for (Lease lease : sortedLeases) {
+        count += lease.getPaths().size();
+      }
+      out.writeInt(count); // write the size
+      for (Lease lease : sortedLeases) {
         Collection<StringBytesWritable> files = lease.getPaths();
         for (Iterator<StringBytesWritable> i = files.iterator(); i.hasNext();){
           String path = i.next().getString();

+ 30 - 1
src/test/org/apache/hadoop/dfs/TestFileCreation.java

@@ -358,7 +358,27 @@ public class TestFileCreation extends TestCase {
                          + "Created file filestatus.dat with one "
                          + " replicas.");
 
+      // create another new file.
+      //
+      Path file2 = new Path("/filestatus2.dat");
+      FSDataOutputStream stm2 = createFile(fs, file2, 1);
+      System.out.println("testFileCreationNamenodeRestart: "
+                         + "Created file filestatus2.dat with one "
+                         + " replicas.");
+
       // restart cluster with the same namenode port as before.
+      // This ensures that leases are persisted in fsimage.
+      cluster.shutdown();
+      try {
+        Thread.sleep(5000);
+      } catch (InterruptedException e) {
+      }
+      cluster = new MiniDFSCluster(nnport, conf, 1, false, true, 
+                                   null, null, null);
+      cluster.waitActive();
+
+      // restart cluster yet again. This triggers the code to read in
+      // persistent leases from fsimage.
       cluster.shutdown();
       try {
         Thread.sleep(5000);
@@ -375,13 +395,22 @@ public class TestFileCreation extends TestCase {
       rand.nextBytes(buffer);
       stm.write(buffer);
       stm.close();
+      stm2.write(buffer);
+      stm2.close();
 
       // verify that new block is associated with this file
       DFSClient client = new DFSClient(addr, conf);
       LocatedBlocks locations = client.namenode.getBlockLocations(
                                   file1.toString(), 0, Long.MAX_VALUE);
       System.out.println("locations = " + locations.locatedBlockCount());
-      assertTrue("Error blocks were not cleaned up",
+      assertTrue("Error blocks were not cleaned up for file " + file1,
+                 locations.locatedBlockCount() == 1);
+
+      // verify filestatus2.dat
+      locations = client.namenode.getBlockLocations(
+                                  file2.toString(), 0, Long.MAX_VALUE);
+      System.out.println("locations = " + locations.locatedBlockCount());
+      assertTrue("Error blocks were not cleaned up for file " + file2,
                  locations.locatedBlockCount() == 1);
     } finally {
       fs.close();