Преглед изворни кода

HDFS-5982. Merge change r1570395 from trunk.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1570398 13f79535-47bb-0310-9956-ffa450edef68
Jing Zhao пре 11 година
родитељ
комит
9bc374a0bf

+ 3 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -214,6 +214,9 @@ Release 2.4.0 - UNRELEASED
     HDFS-5944. LeaseManager:findLeaseWithPrefixPath can't handle path like /a/b/
     and cause SecondaryNameNode failed do checkpoint (Yunjiong Zhao via brandonli)
 
+    HDFS-5982. Need to update snapshot manager when applying editlog for deleting
+    a snapshottable directory. (jing9)
+
   BREAKDOWN OF HDFS-5698 SUBTASKS AND RELATED JIRAS
 
     HDFS-5717. Save FSImage header in protobuf. (Haohui Mai via jing9)

+ 14 - 15
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java

@@ -1308,20 +1308,12 @@ public class FSDirectory implements Closeable {
       if (!deleteAllowed(inodesInPath, src) ) {
         filesRemoved = -1;
       } else {
-        // Before removing the node, first check if the targetNode is for a
-        // snapshottable dir with snapshots, or its descendants have
-        // snapshottable dir with snapshots
-        final INode targetNode = inodesInPath.getLastINode();
         List<INodeDirectorySnapshottable> snapshottableDirs = 
             new ArrayList<INodeDirectorySnapshottable>();
-        checkSnapshot(targetNode, snapshottableDirs);
+        checkSnapshot(inodesInPath.getLastINode(), snapshottableDirs);
         filesRemoved = unprotectedDelete(inodesInPath, collectedBlocks,
             removedINodes, now);
-        if (snapshottableDirs.size() > 0) {
-          // There are some snapshottable directories without snapshots to be
-          // deleted. Need to update the SnapshotManager.
-          namesystem.removeSnapshottableDirs(snapshottableDirs);
-        }
+        namesystem.removeSnapshottableDirs(snapshottableDirs);
       }
     } finally {
       writeUnlock();
@@ -1383,18 +1375,25 @@ public class FSDirectory implements Closeable {
    * @param src a string representation of a path to an inode
    * @param mtime the time the inode is removed
    * @throws SnapshotAccessControlException if path is in RO snapshot
-   */ 
+   */
   void unprotectedDelete(String src, long mtime) throws UnresolvedLinkException,
-      QuotaExceededException, SnapshotAccessControlException {
+      QuotaExceededException, SnapshotAccessControlException, IOException {
     assert hasWriteLock();
     BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
     List<INode> removedINodes = new ChunkedArrayList<INode>();
 
     final INodesInPath inodesInPath = rootDir.getINodesInPath4Write(
         normalizePath(src), false);
-    final long filesRemoved = deleteAllowed(inodesInPath, src) ? 
-        unprotectedDelete(inodesInPath, collectedBlocks, 
-            removedINodes, mtime) : -1;
+    long filesRemoved = -1;
+    if (deleteAllowed(inodesInPath, src)) {
+      List<INodeDirectorySnapshottable> snapshottableDirs = 
+          new ArrayList<INodeDirectorySnapshottable>();
+      checkSnapshot(inodesInPath.getLastINode(), snapshottableDirs);
+      filesRemoved = unprotectedDelete(inodesInPath, collectedBlocks,
+          removedINodes, mtime);
+      namesystem.removeSnapshottableDirs(snapshottableDirs); 
+    }
+
     if (filesRemoved >= 0) {
       getFSNamesystem().removePathAndBlocks(src, collectedBlocks, 
           removedINodes);

+ 37 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java

@@ -40,6 +40,7 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
@@ -128,7 +129,42 @@ public class TestSnapshotDeletion {
     exception.expectMessage(error);
     hdfs.delete(sub, true);
   }
-  
+
+  /**
+   * Test applying editlog of operation which deletes a snapshottable directory
+   * without snapshots. The snapshottable dir list in snapshot manager should be
+   * updated.
+   */
+  @Test (timeout=300000)
+  public void testApplyEditLogForDeletion() throws Exception {
+    final Path foo = new Path("/foo");
+    final Path bar1 = new Path(foo, "bar1");
+    final Path bar2 = new Path(foo, "bar2");
+    hdfs.mkdirs(bar1);
+    hdfs.mkdirs(bar2);
+
+    // allow snapshots on bar1 and bar2
+    hdfs.allowSnapshot(bar1);
+    hdfs.allowSnapshot(bar2);
+    assertEquals(2, cluster.getNamesystem().getSnapshotManager()
+        .getNumSnapshottableDirs());
+    assertEquals(2, cluster.getNamesystem().getSnapshotManager()
+        .getSnapshottableDirs().length);
+
+    // delete /foo
+    hdfs.delete(foo, true);
+    cluster.restartNameNode(0);
+    // the snapshottable dir list in snapshot manager should be empty
+    assertEquals(0, cluster.getNamesystem().getSnapshotManager()
+        .getNumSnapshottableDirs());
+    assertEquals(0, cluster.getNamesystem().getSnapshotManager()
+        .getSnapshottableDirs().length);
+    hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
+    hdfs.saveNamespace();
+    hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
+    cluster.restartNameNode(0);
+  }
+
   /**
    * Deleting directory with snapshottable descendant with snapshots must fail.
    */