瀏覽代碼

HDFS-4464. Combine collectSubtreeBlocksAndClear with deleteDiffsForSnapshot and rename it to destroySubtreeAndCollectBlocks.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2802@1441680 13f79535-47bb-0310-9956-ffa450edef68
Tsz-wo Sze 12 年之前
父節點
當前提交
e7db60fbfc

+ 3 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt

@@ -143,3 +143,6 @@ Branch-2802 Snapshot (Unreleased)
 
 
   HDFS-4361. When listing snapshottable directories, only return those
   HDFS-4361. When listing snapshottable directories, only return those
   where the user has permission to take snapshots.  (Jing Zhao via szetszwo)
   where the user has permission to take snapshots.  (Jing Zhao via szetszwo)
+
+  HDFS-4464. Combine collectSubtreeBlocksAndClear with deleteDiffsForSnapshot
+  and rename it to destroySubtreeAndCollectBlocks.  (szetszwo)

+ 13 - 17
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java

@@ -766,7 +766,7 @@ public class FSDirectory implements Closeable {
           INode rmdst = removedDst;
           INode rmdst = removedDst;
           removedDst = null;
           removedDst = null;
           BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
           BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
-          filesDeleted = rmdst.collectSubtreeBlocksAndClear(collectedBlocks);
+          filesDeleted = rmdst.destroySubtreeAndCollectBlocks(null, collectedBlocks);
           getFSNamesystem().removePathAndBlocks(src, collectedBlocks);
           getFSNamesystem().removePathAndBlocks(src, collectedBlocks);
         }
         }
 
 
@@ -1009,19 +1009,18 @@ public class FSDirectory implements Closeable {
     }
     }
     waitForReady();
     waitForReady();
     long now = now();
     long now = now();
-    int filesRemoved;
+    final int filesRemoved;
     writeLock();
     writeLock();
     try {
     try {
       final INodesInPath inodesInPath = rootDir.getINodesInPath4Write(
       final INodesInPath inodesInPath = rootDir.getINodesInPath4Write(
           normalizePath(src), false);
           normalizePath(src), false);
-      final INode[] inodes = inodesInPath.getINodes();
-      if (checkPathINodes(inodes, src) == 0) {
+      if (!deleteAllowed(inodesInPath, src) ) {
         filesRemoved = 0;
         filesRemoved = 0;
       } else {
       } else {
         // Before removing the node, first check if the targetNode is for a
         // Before removing the node, first check if the targetNode is for a
         // snapshottable dir with snapshots, or its descendants have
         // snapshottable dir with snapshots, or its descendants have
         // snapshottable dir with snapshots
         // snapshottable dir with snapshots
-        INode targetNode = inodes[inodes.length-1];
+        final INode targetNode = inodesInPath.getLastINode();
         List<INodeDirectorySnapshottable> snapshottableDirs = 
         List<INodeDirectorySnapshottable> snapshottableDirs = 
             new ArrayList<INodeDirectorySnapshottable>();
             new ArrayList<INodeDirectorySnapshottable>();
         INode snapshotNode = hasSnapshot(targetNode, snapshottableDirs);
         INode snapshotNode = hasSnapshot(targetNode, snapshottableDirs);
@@ -1050,21 +1049,23 @@ public class FSDirectory implements Closeable {
     return true;
     return true;
   }
   }
   
   
-  private int checkPathINodes(INode[] inodes, String src) {
+  private static boolean deleteAllowed(final INodesInPath iip,
+      final String src) {
+    final INode[] inodes = iip.getINodes(); 
     if (inodes == null || inodes.length == 0
     if (inodes == null || inodes.length == 0
         || inodes[inodes.length - 1] == null) {
         || inodes[inodes.length - 1] == null) {
       if(NameNode.stateChangeLog.isDebugEnabled()) {
       if(NameNode.stateChangeLog.isDebugEnabled()) {
         NameNode.stateChangeLog.debug("DIR* FSDirectory.unprotectedDelete: "
         NameNode.stateChangeLog.debug("DIR* FSDirectory.unprotectedDelete: "
             + "failed to remove " + src + " because it does not exist");
             + "failed to remove " + src + " because it does not exist");
       }
       }
-      return 0;
+      return false;
     } else if (inodes.length == 1) { // src is the root
     } else if (inodes.length == 1) { // src is the root
       NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedDelete: "
       NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedDelete: "
           + "failed to remove " + src
           + "failed to remove " + src
           + " because the root is not allowed to be deleted");
           + " because the root is not allowed to be deleted");
-      return 0;
+      return false;
     }
     }
-    return inodes.length;
+    return true;
   }
   }
   
   
   /**
   /**
@@ -1100,16 +1101,11 @@ public class FSDirectory implements Closeable {
     throws UnresolvedLinkException, SnapshotAccessControlException {
     throws UnresolvedLinkException, SnapshotAccessControlException {
     assert hasWriteLock();
     assert hasWriteLock();
     BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
     BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
-    int filesRemoved = 0;
 
 
     final INodesInPath inodesInPath = rootDir.getINodesInPath4Write(
     final INodesInPath inodesInPath = rootDir.getINodesInPath4Write(
         normalizePath(src), false);
         normalizePath(src), false);
-    final INode[] inodes = inodesInPath.getINodes();
-    if (checkPathINodes(inodes, src) == 0) {
-      filesRemoved = 0;
-    } else {
-      filesRemoved = unprotectedDelete(inodesInPath, collectedBlocks, mtime);
-    }
+    final int filesRemoved = deleteAllowed(inodesInPath, src)?
+        unprotectedDelete(inodesInPath, collectedBlocks, mtime): 0;
     if (filesRemoved > 0) {
     if (filesRemoved > 0) {
       getFSNamesystem().removePathAndBlocks(src, collectedBlocks);
       getFSNamesystem().removePathAndBlocks(src, collectedBlocks);
     }
     }
@@ -1143,7 +1139,7 @@ public class FSDirectory implements Closeable {
     // if snapshotCopy == targetNode, it means that the file is also stored in
     // if snapshotCopy == targetNode, it means that the file is also stored in
     // a snapshot so that the block should not be removed.
     // a snapshot so that the block should not be removed.
     final int filesRemoved = snapshotCopy == targetNode? 0
     final int filesRemoved = snapshotCopy == targetNode? 0
-        : targetNode.collectSubtreeBlocksAndClear(collectedBlocks);
+        : targetNode.destroySubtreeAndCollectBlocks(null, collectedBlocks);
     if (NameNode.stateChangeLog.isDebugEnabled()) {
     if (NameNode.stateChangeLog.isDebugEnabled()) {
       NameNode.stateChangeLog.debug("DIR* FSDirectory.unprotectedDelete: "
       NameNode.stateChangeLog.debug("DIR* FSDirectory.unprotectedDelete: "
           + targetNode.getFullPathName() + " is removed");
           + targetNode.getFullPathName() + " is removed");

+ 11 - 8
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java

@@ -314,16 +314,19 @@ public abstract class INode implements Diff.Element<byte[]> {
   }
   }
 
 
   /**
   /**
-   * Collect all the blocks in all children of this INode. Count and return the
-   * number of files in the sub tree. Also clears references since this INode is
-   * deleted.
+   * Destroy the subtree under this inode and collect the blocks from the
+   * descents for further block deletion/update. If snapshot is null, the
+   * subtree resides in the current state; otherwise, the subtree resides in the
+   * given snapshot. The method also clears the references in the deleted inodes
+   * and remove the corresponding snapshot information, if there is any.
    * 
    * 
-   * @param info
-   *          Containing all the blocks collected from the children of this
-   *          INode. These blocks later should be removed/updated in the
-   *          blocksMap.
+   * @param snapshot the snapshot to be deleted; null means the current state.
+   * @param collectedBlocks blocks collected from the descents for further block
+   *                        deletion/update will be added to the given map.
+   * @return the number of deleted files in the subtree.
    */
    */
-  abstract int collectSubtreeBlocksAndClear(BlocksMapUpdateInfo info);
+  abstract int destroySubtreeAndCollectBlocks(Snapshot snapshot,
+      BlocksMapUpdateInfo collectedBlocks);
 
 
   /** Compute {@link ContentSummary}. */
   /** Compute {@link ContentSummary}. */
   public final ContentSummary computeContentSummary() {
   public final ContentSummary computeContentSummary() {

+ 8 - 8
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java

@@ -581,16 +581,16 @@ public class INodeDirectory extends INode {
   }
   }
 
 
   @Override
   @Override
-  int collectSubtreeBlocksAndClear(BlocksMapUpdateInfo info) {
-    int total = 1;
-    if (children == null) {
-      return total;
+  public int destroySubtreeAndCollectBlocks(final Snapshot snapshot,
+      final BlocksMapUpdateInfo collectedBlocks) {
+    int total = 0;
+    for (INode child : getChildrenList(snapshot)) {
+      total += child.destroySubtreeAndCollectBlocks(snapshot, collectedBlocks);
     }
     }
-    for (INode child : children) {
-      total += child.collectSubtreeBlocksAndClear(info);
+    if (snapshot == null) {
+      parent = null;
+      children = null;
     }
     }
-    parent = null;
-    children = null;
     return total;
     return total;
   }
   }
   
   

+ 8 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java

@@ -236,11 +236,16 @@ public class INodeFile extends INode implements BlockCollection {
   }
   }
 
 
   @Override
   @Override
-  public int collectSubtreeBlocksAndClear(BlocksMapUpdateInfo info) {
+  public int destroySubtreeAndCollectBlocks(final Snapshot snapshot,
+      final BlocksMapUpdateInfo collectedBlocks) {
+    if (snapshot != null) {
+      return 0;
+    }
+
     parent = null;
     parent = null;
-    if(blocks != null && info != null) {
+    if (blocks != null && collectedBlocks != null) {
       for (BlockInfo blk : blocks) {
       for (BlockInfo blk : blocks) {
-        info.addDeleteBlock(blk);
+        collectedBlocks.addDeleteBlock(blk);
         blk.setBlockCollection(null);
         blk.setBlockCollection(null);
       }
       }
     }
     }

+ 2 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java

@@ -71,7 +71,8 @@ public class INodeSymlink extends INode {
   }
   }
   
   
   @Override
   @Override
-  int collectSubtreeBlocksAndClear(BlocksMapUpdateInfo info) {
+  int destroySubtreeAndCollectBlocks(final Snapshot snapshot,
+      final BlocksMapUpdateInfo collectedBlocks) {
     return 1;
     return 1;
   }
   }
 
 

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java

@@ -60,7 +60,7 @@ abstract class AbstractINodeDiffList<N extends INode,
    * @return The SnapshotDiff containing the deleted snapshot. 
    * @return The SnapshotDiff containing the deleted snapshot. 
    *         Null if the snapshot with the given name does not exist. 
    *         Null if the snapshot with the given name does not exist. 
    */
    */
-  final AbstractINodeDiff<N, D> deleteSnapshotDiff(Snapshot snapshot,
+  final D deleteSnapshotDiff(final Snapshot snapshot,
       final BlocksMapUpdateInfo collectedBlocks) {
       final BlocksMapUpdateInfo collectedBlocks) {
     int snapshotIndex = Collections.binarySearch(diffs, snapshot);
     int snapshotIndex = Collections.binarySearch(diffs, snapshot);
     if (snapshotIndex < 0) {
     if (snapshotIndex < 0) {

+ 4 - 23
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectorySnapshottable.java

@@ -273,37 +273,18 @@ public class INodeDirectorySnapshottable extends INodeDirectoryWithSnapshot {
    */
    */
   Snapshot removeSnapshot(String snapshotName,
   Snapshot removeSnapshot(String snapshotName,
       BlocksMapUpdateInfo collectedBlocks) throws SnapshotException {
       BlocksMapUpdateInfo collectedBlocks) throws SnapshotException {
-    final int indexOfOld = searchSnapshot(DFSUtil.string2Bytes(snapshotName));
-    if (indexOfOld < 0) {
+    final int i = searchSnapshot(DFSUtil.string2Bytes(snapshotName));
+    if (i < 0) {
       throw new SnapshotException("Cannot delete snapshot " + snapshotName
       throw new SnapshotException("Cannot delete snapshot " + snapshotName
           + " from path " + this.getFullPathName()
           + " from path " + this.getFullPathName()
           + ": the snapshot does not exist.");
           + ": the snapshot does not exist.");
     } else {
     } else {
-      Snapshot snapshot = snapshotsByNames.remove(indexOfOld);
-      deleteDiffsForSnapshot(snapshot, this, collectedBlocks);
+      final Snapshot snapshot = snapshotsByNames.remove(i);
+      destroySubtreeAndCollectBlocks(snapshot, collectedBlocks);
       return snapshot;
       return snapshot;
     }
     }
   }
   }
   
   
-  /**
-   * Recursively delete DirectoryDiff associated with the given snapshot under a
-   * directory
-   */
-  private void deleteDiffsForSnapshot(Snapshot snapshot, INodeDirectory dir,
-      BlocksMapUpdateInfo collectedBlocks) {
-    if (dir instanceof INodeDirectoryWithSnapshot) {
-      INodeDirectoryWithSnapshot sdir = (INodeDirectoryWithSnapshot) dir;
-      sdir.getDiffs().deleteSnapshotDiff(snapshot, collectedBlocks);
-    }
-    ReadOnlyList<INode> children = dir.getChildrenList(null);
-    for (INode child : children) {
-      if (child instanceof INodeDirectory) {
-        deleteDiffsForSnapshot(snapshot, (INodeDirectory) child,
-            collectedBlocks);
-      }
-    }
-  }
-
   /**
   /**
    * Compute the difference between two snapshots (or a snapshot and the current
    * Compute the difference between two snapshots (or a snapshot and the current
    * directory) of the directory.
    * directory) of the directory.

+ 12 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java

@@ -219,7 +219,8 @@ public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota {
         @Override
         @Override
         public void process(INode inode) {
         public void process(INode inode) {
           if (inode != null && inode instanceof INodeFile) {
           if (inode != null && inode instanceof INodeFile) {
-            ((INodeFile)inode).collectSubtreeBlocksAndClear(collectedBlocks);
+            ((INodeFile)inode).destroySubtreeAndCollectBlocks(null,
+                collectedBlocks);
           }
           }
         }
         }
       });
       });
@@ -602,4 +603,14 @@ public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota {
     }
     }
     return dirNum;
     return dirNum;
   }
   }
+
+  @Override
+  public int destroySubtreeAndCollectBlocks(final Snapshot snapshot,
+      final BlocksMapUpdateInfo collectedBlocks) {
+    final int n = super.destroySubtreeAndCollectBlocks(snapshot, collectedBlocks);
+    if (snapshot != null) {
+      getDiffs().deleteSnapshotDiff(snapshot, collectedBlocks);
+    }
+    return n;
+  }
 }
 }

+ 7 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileUnderConstructionWithSnapshot.java

@@ -117,12 +117,16 @@ public class INodeFileUnderConstructionWithSnapshot
   }
   }
 
 
   @Override
   @Override
-  public int collectSubtreeBlocksAndClear(BlocksMapUpdateInfo info) {
+  public int destroySubtreeAndCollectBlocks(final Snapshot snapshot,
+      final BlocksMapUpdateInfo collectedBlocks) {
+    if (snapshot != null) {
+      return 0;
+    }
     if (next == null || next == this) {
     if (next == null || next == this) {
       // this is the only remaining inode.
       // this is the only remaining inode.
-      return super.collectSubtreeBlocksAndClear(info);
+      return super.destroySubtreeAndCollectBlocks(null, collectedBlocks);
     } else {
     } else {
-      return Util.collectSubtreeBlocksAndClear(this, info);
+      return Util.collectSubtreeBlocksAndClear(this, collectedBlocks);
     }
     }
   }
   }
 }
 }

+ 7 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithSnapshot.java

@@ -100,12 +100,16 @@ public class INodeFileWithSnapshot extends INodeFile
   }
   }
 
 
   @Override
   @Override
-  public int collectSubtreeBlocksAndClear(BlocksMapUpdateInfo info) {
+  public int destroySubtreeAndCollectBlocks(final Snapshot snapshot,
+      final BlocksMapUpdateInfo collectedBlocks) {
+    if (snapshot != null) {
+      return 0;
+    }
     if (next == null || next == this) {
     if (next == null || next == this) {
       // this is the only remaining inode.
       // this is the only remaining inode.
-      return super.collectSubtreeBlocksAndClear(info);
+      return super.destroySubtreeAndCollectBlocks(snapshot, collectedBlocks);
     } else {
     } else {
-      return Util.collectSubtreeBlocksAndClear(this, info);
+      return Util.collectSubtreeBlocksAndClear(this, collectedBlocks);
     }
     }
   }
   }
 }
 }