Browse Source

HADOOP-4395. The FSEditLog loading is incorrect for the case OP_SET_OWNER. (szetszwo)

git-svn-id: https://svn.apache.org/repos/asf/hadoop/core/branches/branch-0.18@704194 13f79535-47bb-0310-9956-ffa450edef68
Tsz-wo Sze 16 years ago
parent
commit
766506c978

+ 6 - 3
CHANGES.txt

@@ -9,17 +9,20 @@ Release 0.18.2 - Unreleased
     HADOOP-3614. Fix a bug that Datanode may use an old GenerationStamp to get
     HADOOP-3614. Fix a bug that Datanode may use an old GenerationStamp to get
     meta file. (szetszwo)
     meta file. (szetszwo)
 
 
+    HADOOP-4314. Simulated datanodes should not include blocks that are still
+    being written in their block report. (Raghu Angadi)
+
     HADOOP-4228. dfs datanoe metrics, bytes_read and bytes_written, overflow
     HADOOP-4228. dfs datanoe metrics, bytes_read and bytes_written, overflow
     due to incorrect type used. (hairong)
     due to incorrect type used. (hairong)
 
 
+    HADOOP-4395. The FSEditLog loading is incorrect for the case OP_SET_OWNER.
+    (szetszwo)
+
   NEW FEATURES
   NEW FEATURES
 
 
     HADOOP-2421.  Add jdiff output to documentation, listing all API
     HADOOP-2421.  Add jdiff output to documentation, listing all API
     changes from the prior release.  (cutting)
     changes from the prior release.  (cutting)
 
 
-    HADOOP-4314. Simulated datanodes should not include blocks that are still
-    being written in their block report. (Raghu Angadi)
-
 Release 0.18.1 - 2008-09-17
 Release 0.18.1 - 2008-09-17
 
 
   IMPROVEMENTS
   IMPROVEMENTS

+ 2 - 1
src/hdfs/org/apache/hadoop/dfs/FSEditLog.java

@@ -631,7 +631,8 @@ class FSEditLog {
               throw new IOException("Unexpected opcode " + opcode
               throw new IOException("Unexpected opcode " + opcode
                                     + " for version " + logVersion);
                                     + " for version " + logVersion);
             fsDir.unprotectedSetOwner(FSImage.readString(in),
             fsDir.unprotectedSetOwner(FSImage.readString(in),
-                FSImage.readString(in), FSImage.readString(in));
+                FSImage.readString_EmptyAsNull(in),
+                FSImage.readString_EmptyAsNull(in));
             break;
             break;
           }
           }
           case OP_SET_QUOTA: {
           case OP_SET_QUOTA: {

+ 5 - 0
src/hdfs/org/apache/hadoop/dfs/FSImage.java

@@ -1369,6 +1369,11 @@ class FSImage extends Storage {
     return U_STR.toString();
     return U_STR.toString();
   }
   }
 
 
+  static String readString_EmptyAsNull(DataInputStream in) throws IOException {
+    final String s = readString(in);
+    return s.isEmpty()? null: s;
+  }
+
   static byte[] readBytes(DataInputStream in) throws IOException {
   static byte[] readBytes(DataInputStream in) throws IOException {
     U_STR.readFields(in);
     U_STR.readFields(in);
     int len = U_STR.getLength();
     int len = U_STR.getLength();

+ 33 - 26
src/test/org/apache/hadoop/dfs/TestRestartDFS.java

@@ -18,12 +18,10 @@
 
 
 package org.apache.hadoop.dfs;
 package org.apache.hadoop.dfs;
 
 
-import java.io.IOException;
-import java.util.Random;
-import junit.framework.*;
+import junit.framework.TestCase;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 
 
@@ -31,30 +29,31 @@ import org.apache.hadoop.fs.Path;
  * A JUnit test for checking if restarting DFS preserves integrity.
  * A JUnit test for checking if restarting DFS preserves integrity.
  */
  */
 public class TestRestartDFS extends TestCase {
 public class TestRestartDFS extends TestCase {
-  
-  private static Configuration conf = new Configuration();
-
-  public TestRestartDFS(String testName) {
-    super(testName);
-  }
-
-  protected void setUp() throws Exception {
-  }
-
-  protected void tearDown() throws Exception {
-  }
-  
   /** check if DFS remains in proper condition after a restart */
   /** check if DFS remains in proper condition after a restart */
   public void testRestartDFS() throws Exception {
   public void testRestartDFS() throws Exception {
+    final Configuration conf = new Configuration();
     MiniDFSCluster cluster = null;
     MiniDFSCluster cluster = null;
     DFSTestUtil files = new DFSTestUtil("TestRestartDFS", 20, 3, 8*1024);
     DFSTestUtil files = new DFSTestUtil("TestRestartDFS", 20, 3, 8*1024);
-    Path root = new Path("/");
-    long modificationTime;
+
+    final String dir = "/srcdat";
+    final Path rootpath = new Path("/");
+    final Path dirpath = new Path(dir);
+
+    long rootmtime;
+    FileStatus rootstatus;
+    FileStatus dirstatus;
+
     try {
     try {
       cluster = new MiniDFSCluster(conf, 4, true, null);
       cluster = new MiniDFSCluster(conf, 4, true, null);
       FileSystem fs = cluster.getFileSystem();
       FileSystem fs = cluster.getFileSystem();
-      files.createFiles(fs, "/srcdat");
-      modificationTime = fs.getFileStatus(root).getModificationTime();
+      files.createFiles(fs, dir);
+
+      rootmtime = fs.getFileStatus(rootpath).getModificationTime();
+      rootstatus = fs.getFileStatus(dirpath);
+      dirstatus = fs.getFileStatus(dirpath);
+
+      fs.setOwner(rootpath, rootstatus.getOwner() + "_XXX", null);
+      fs.setOwner(dirpath, null, dirstatus.getGroup() + "_XXX");
     } finally {
     } finally {
       if (cluster != null) { cluster.shutdown(); }
       if (cluster != null) { cluster.shutdown(); }
     }
     }
@@ -62,11 +61,19 @@ public class TestRestartDFS extends TestCase {
       // Here we restart the MiniDFScluster without formatting namenode
       // Here we restart the MiniDFScluster without formatting namenode
       cluster = new MiniDFSCluster(conf, 4, false, null);
       cluster = new MiniDFSCluster(conf, 4, false, null);
       FileSystem fs = cluster.getFileSystem();
       FileSystem fs = cluster.getFileSystem();
-      assertEquals(modificationTime,
-                   fs.getFileStatus(root).getModificationTime());
       assertTrue("Filesystem corrupted after restart.",
       assertTrue("Filesystem corrupted after restart.",
-                 files.checkFiles(fs, "/srcdat"));
-      files.cleanup(fs, "/srcdat");
+                 files.checkFiles(fs, dir));
+
+      final FileStatus newrootstatus = fs.getFileStatus(rootpath);
+      assertEquals(rootmtime, newrootstatus.getModificationTime());
+      assertEquals(rootstatus.getOwner() + "_XXX", newrootstatus.getOwner());
+      assertEquals(rootstatus.getGroup(), newrootstatus.getGroup());
+
+      final FileStatus newdirstatus = fs.getFileStatus(dirpath);
+      assertEquals(dirstatus.getOwner(), newdirstatus.getOwner());
+      assertEquals(dirstatus.getGroup() + "_XXX", newdirstatus.getGroup());
+
+      files.cleanup(fs, dir);
     } finally {
     } finally {
       if (cluster != null) { cluster.shutdown(); }
       if (cluster != null) { cluster.shutdown(); }
     }
     }