Browse Source

svn merge -c 1487643 from trunk for HDFS-4842. Identify the correct prior snapshot when deleting a snapshot under a renamed subtree.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1488091 13f79535-47bb-0310-9956-ffa450edef68
Tsz-wo Sze 12 years ago
parent
commit
ab2588a78b

+ 4 - 1
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -429,7 +429,7 @@ Release 2.0.5-beta - UNRELEASED
     HDFS-4610. Use common utils FileUtil#setReadable/Writable/Executable and 
     FileUtil#canRead/Write/Execute. (Ivan Mitic via suresh)
 
-  BREAKDOWN OF HDFS-2802 HDFS SNAPSHOT SUBTASKS
+  BREAKDOWN OF HDFS-2802 HDFS SNAPSHOT SUBTASKS AND RELATED JIRAS
 
     HDFS-4076. Support snapshot of single files.  (szetszwo)
 
@@ -768,6 +768,9 @@ Release 2.0.5-beta - UNRELEASED
     HDFS-4809. When a QuotaExceededException is thrown during rename, the quota
     usage should be subtracted back.  (Jing Zhao via szetszwo)
 
+    HDFS-4842. Identify the correct prior snapshot when deleting a 
+    snapshot under a renamed subtree. (jing9)
+
 Release 2.0.4-alpha - 2013-04-25
 
   INCOMPATIBLE CHANGES

+ 10 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java

@@ -493,6 +493,11 @@ public abstract class INodeReference extends INode {
       if (prior == null) {
         prior = getPriorSnapshot(this);
       }
+      
+      if (prior != null
+          && Snapshot.ID_COMPARATOR.compare(snapshot, prior) <= 0) {
+        return Quota.Counts.newInstance();
+      }
 
       Quota.Counts counts = getReferredINode().cleanSubtree(snapshot, prior,
           collectedBlocks, removedINodes);
@@ -596,7 +601,11 @@ public abstract class INodeReference extends INode {
         if (prior == null) {
           prior = getPriorSnapshot(this);
         }
-        if (snapshot != null && snapshot.equals(prior)) {
+        // if prior is not null, and prior is not before the to-be-deleted 
+        // snapshot, we can quit here and leave the snapshot deletion work to 
+        // the src tree of rename
+        if (snapshot != null && prior != null
+            && Snapshot.ID_COMPARATOR.compare(snapshot, prior) <= 0) {
           return Quota.Counts.newInstance();
         }
         return getReferredINode().cleanSubtree(snapshot, prior,

+ 162 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java

@@ -1915,4 +1915,166 @@ public class TestRenameWithSnapshots {
     INodeFile barNode = (INodeFile) fsdir.getINode4Write(bar3.toString());
     assertSame(fsdir.getINode4Write(dir3.toString()), barNode.getParent());
   }
+  
+  /**
+   * Rename and deletion snapshot under the same the snapshottable directory.
+   */
+  @Test
+  public void testRenameDirAndDeleteSnapshot_6() throws Exception {
+    final Path test = new Path("/test");
+    final Path dir1 = new Path(test, "dir1");
+    final Path dir2 = new Path(test, "dir2");
+    hdfs.mkdirs(dir1);
+    hdfs.mkdirs(dir2);
+    
+    final Path foo = new Path(dir2, "foo");
+    final Path bar = new Path(foo, "bar");
+    final Path file = new Path(bar, "file");
+    DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPL, SEED);
+    
+    // take a snapshot on /test
+    SnapshotTestHelper.createSnapshot(hdfs, test, "s0");
+    
+    // delete /test/dir2/foo/bar/file after snapshot s0, so that there is a 
+    // snapshot copy recorded in bar
+    hdfs.delete(file, true);
+    
+    // rename foo from dir2 to dir1
+    final Path newfoo = new Path(dir1, foo.getName());
+    hdfs.rename(foo, newfoo);
+    
+    final Path foo_s0 = SnapshotTestHelper.getSnapshotPath(test, "s0",
+        "dir2/foo");
+    assertTrue("the snapshot path " + foo_s0 + " should exist",
+        hdfs.exists(foo_s0));
+    
+    // delete snapshot s0. The deletion will first go down through dir1, and 
+    // find foo in the created list of dir1. Then it will use null as the prior
+    // snapshot and continue the snapshot deletion process in the subtree of 
+    // foo. We need to make sure the snapshot s0 can be deleted cleanly in the
+    // foo subtree.
+    hdfs.deleteSnapshot(test, "s0");
+    // check the internal
+    assertFalse("after deleting s0, " + foo_s0 + " should not exist",
+        hdfs.exists(foo_s0));
+    INodeDirectoryWithSnapshot dir2Node = (INodeDirectoryWithSnapshot) fsdir
+        .getINode4Write(dir2.toString());
+    assertTrue("the diff list of " + dir2
+        + " should be empty after deleting s0", dir2Node.getDiffs().asList()
+        .isEmpty());
+    
+    assertTrue(hdfs.exists(newfoo));
+    INode fooRefNode = fsdir.getINode4Write(newfoo.toString());
+    assertTrue(fooRefNode instanceof INodeReference.DstReference);
+    INodeDirectory fooNode = fooRefNode.asDirectory();
+    // fooNode should be still INodeDirectoryWithSnapshot since we call
+    // recordModification before the rename
+    assertTrue(fooNode instanceof INodeDirectoryWithSnapshot);
+    assertTrue(((INodeDirectoryWithSnapshot) fooNode).getDiffs().asList()
+        .isEmpty());
+    INodeDirectory barNode = fooNode.getChildrenList(null).get(0).asDirectory();
+    // bar should also be an INodeDirectoryWithSnapshot, and both of its diff 
+    // list and children list are empty 
+    assertTrue(((INodeDirectoryWithSnapshot) barNode).getDiffs().asList()
+        .isEmpty());
+    assertTrue(barNode.getChildrenList(null).isEmpty());
+    
+    restartClusterAndCheckImage();
+  }
+  
+  /**
+   * Unit test for HDFS-4842.
+   */
+  @Test
+  public void testRenameDirAndDeleteSnapshot_7() throws Exception {
+    fsn.getSnapshotManager().setAllowNestedSnapshots(true);
+    final Path test = new Path("/test");
+    final Path dir1 = new Path(test, "dir1");
+    final Path dir2 = new Path(test, "dir2");
+    hdfs.mkdirs(dir1);
+    hdfs.mkdirs(dir2);
+    
+    final Path foo = new Path(dir2, "foo");
+    final Path bar = new Path(foo, "bar");
+    final Path file = new Path(bar, "file");
+    DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPL, SEED);
+    
+    // take a snapshot s0 and s1 on /test
+    SnapshotTestHelper.createSnapshot(hdfs, test, "s0");
+    SnapshotTestHelper.createSnapshot(hdfs, test, "s1");
+    // delete file so we have a snapshot copy for s1 in bar
+    hdfs.delete(file, true);
+    
+    // create another snapshot on dir2
+    SnapshotTestHelper.createSnapshot(hdfs, dir2, "s2");
+    
+    // rename foo from dir2 to dir1
+    final Path newfoo = new Path(dir1, foo.getName());
+    hdfs.rename(foo, newfoo);
+    
+    // delete snapshot s1
+    hdfs.deleteSnapshot(test, "s1");
+    
+    // make sure the snapshot copy of file in s1 is merged to s0. For 
+    // HDFS-4842, we need to make sure that we do not wrongly use s2 as the
+    // prior snapshot of s1.
+    final Path file_s2 = SnapshotTestHelper.getSnapshotPath(dir2, "s2",
+        "foo/bar/file");
+    assertFalse(hdfs.exists(file_s2));
+    final Path file_s0 = SnapshotTestHelper.getSnapshotPath(test, "s0",
+        "dir2/foo/bar/file");
+    assertTrue(hdfs.exists(file_s0));
+    
+    // check dir1: foo should be in the created list of s0
+    INodeDirectoryWithSnapshot dir1Node = (INodeDirectoryWithSnapshot) fsdir
+        .getINode4Write(dir1.toString());
+    List<DirectoryDiff> dir1DiffList = dir1Node.getDiffs().asList();
+    assertEquals(1, dir1DiffList.size());
+    List<INode> dList = dir1DiffList.get(0).getChildrenDiff()
+        .getList(ListType.DELETED);
+    assertTrue(dList.isEmpty());
+    List<INode> cList = dir1DiffList.get(0).getChildrenDiff()
+        .getList(ListType.CREATED);
+    assertEquals(1, cList.size());
+    INode cNode = cList.get(0);
+    INode fooNode = fsdir.getINode4Write(newfoo.toString());
+    assertSame(cNode, fooNode);
+    
+    // check foo and its subtree
+    final Path newbar = new Path(newfoo, bar.getName());
+    INodeDirectoryWithSnapshot barNode = (INodeDirectoryWithSnapshot) fsdir
+        .getINode4Write(newbar.toString());
+    assertSame(fooNode.asDirectory(), barNode.getParent());
+    // bar should only have a snapshot diff for s0
+    List<DirectoryDiff> barDiffList = barNode.getDiffs().asList();
+    assertEquals(1, barDiffList.size());
+    DirectoryDiff diff = barDiffList.get(0);
+    assertEquals("s0", Snapshot.getSnapshotName(diff.snapshot));
+    // and file should be stored in the deleted list of this snapshot diff
+    assertEquals("file", diff.getChildrenDiff().getList(ListType.DELETED)
+        .get(0).getLocalName());
+    
+    // check dir2: a WithName instance for foo should be in the deleted list
+    // of the snapshot diff for s2
+    INodeDirectoryWithSnapshot dir2Node = (INodeDirectoryWithSnapshot) fsdir
+        .getINode4Write(dir2.toString());
+    List<DirectoryDiff> dir2DiffList = dir2Node.getDiffs().asList();
+    // dir2Node should contain 2 snapshot diffs, one for s2, and the other was
+    // originally s1 (created when dir2 was transformed to a snapshottable dir),
+    // and currently is s0
+    assertEquals(2, dir2DiffList.size());
+    dList = dir2DiffList.get(1).getChildrenDiff().getList(ListType.DELETED);
+    assertEquals(1, dList.size());
+    cList = dir2DiffList.get(0).getChildrenDiff().getList(ListType.CREATED);
+    assertTrue(cList.isEmpty());
+    final Path foo_s2 = SnapshotTestHelper.getSnapshotPath(dir2, "s2", 
+        foo.getName());
+    INodeReference.WithName fooNode_s2 = 
+        (INodeReference.WithName) fsdir.getINode(foo_s2.toString());
+    assertSame(dList.get(0), fooNode_s2);
+    assertSame(fooNode.asReference().getReferredINode(),
+        fooNode_s2.getReferredINode());
+    
+    restartClusterAndCheckImage();
+  }
 }