Преглед на файлове

HDFS-4557. Fix FSDirectory#delete when INode#cleanSubtree returns 0. Contributed by Jing Zhao

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2802@1454138 13f79535-47bb-0310-9956-ffa450edef68
Tsz-wo Sze преди 12 години
родител
ревизия
8d95784bf1

+ 3 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt

@@ -185,3 +185,6 @@ Branch-2802 Snapshot (Unreleased)
   HDFS-4545. With snapshots, FSDirectory.unprotectedSetReplication(..) always
   changes file replication but it may or may not changes block replication.
   (szetszwo)
+
+  HDFS-4557. Fix FSDirectory#delete when INode#cleanSubtree returns 0.
+  (Jing Zhao via szetszwo)

+ 8 - 8
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java

@@ -757,7 +757,7 @@ public class FSDirectory implements Closeable {
         getFSNamesystem().unprotectedChangeLease(src, dst);
 
         // Collect the blocks and remove the lease for previous dst
-        int filesDeleted = 0;
+        int filesDeleted = -1;
         if (removedDst != null) {
           INode rmdst = removedDst;
           removedDst = null;
@@ -772,7 +772,7 @@ public class FSDirectory implements Closeable {
           // deleted. Need to update the SnapshotManager.
           namesystem.removeSnapshottableDirs(snapshottableDirs);
         }
-        return filesDeleted >0;
+        return filesDeleted >= 0;
       }
     } finally {
       if (removedSrc != null) {
@@ -1017,7 +1017,7 @@ public class FSDirectory implements Closeable {
       final INodesInPath inodesInPath = rootDir.getINodesInPath4Write(
           normalizePath(src), false);
       if (!deleteAllowed(inodesInPath, src) ) {
-        filesRemoved = 0;
+        filesRemoved = -1;
       } else {
         // Before removing the node, first check if the targetNode is for a
         // snapshottable dir with snapshots, or its descendants have
@@ -1041,10 +1041,10 @@ public class FSDirectory implements Closeable {
     } finally {
       writeUnlock();
     }
-    fsImage.getEditLog().logDelete(src, now);
-    if (filesRemoved <= 0) {
+    if (filesRemoved < 0) {
       return false;
     }
+    fsImage.getEditLog().logDelete(src, now);
     incrDeletedFileCount(filesRemoved);
     // Blocks will be deleted later by the caller of this method
     getFSNamesystem().removePathAndBlocks(src, null);
@@ -1107,8 +1107,8 @@ public class FSDirectory implements Closeable {
     final INodesInPath inodesInPath = rootDir.getINodesInPath4Write(
         normalizePath(src), false);
     final int filesRemoved = deleteAllowed(inodesInPath, src)?
-        unprotectedDelete(inodesInPath, collectedBlocks, mtime): 0;
-    if (filesRemoved > 0) {
+        unprotectedDelete(inodesInPath, collectedBlocks, mtime): -1;
+    if (filesRemoved >= 0) {
       getFSNamesystem().removePathAndBlocks(src, collectedBlocks);
     }
   }
@@ -1128,7 +1128,7 @@ public class FSDirectory implements Closeable {
     // check if target node exists
     INode targetNode = iip.getLastINode();
     if (targetNode == null) {
-      return 0;
+      return -1;
     }
 
     // record modification

+ 8 - 14
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

@@ -5664,8 +5664,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
     //TODO: need to update metrics in corresponding SnapshotManager method 
 
     if (auditLog.isInfoEnabled() && isExternalInvocation()) {
-      logAuditEvent(UserGroupInformation.getCurrentUser(), getRemoteIp(),
-          "allowSnapshot", path, null, null);
+      logAuditEvent(true, "allowSnapshot", path, null, null);
     }
   }
   
@@ -5692,8 +5691,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
     //TODO: need to update metrics in corresponding SnapshotManager method 
     
     if (auditLog.isInfoEnabled() && isExternalInvocation()) {
-      logAuditEvent(UserGroupInformation.getCurrentUser(), getRemoteIp(),
-          "disallowSnapshot", path, null, null);
+      logAuditEvent(true, "disallowSnapshot", path, null, null);
     }
   }
   
@@ -5729,8 +5727,8 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
     if (auditLog.isInfoEnabled() && isExternalInvocation()) {
       Path rootPath = new Path(snapshotRoot, HdfsConstants.DOT_SNAPSHOT_DIR
           + Path.SEPARATOR + snapshotName);
-      logAuditEvent(UserGroupInformation.getCurrentUser(), getRemoteIp(),
-          "createSnapshot", snapshotRoot, rootPath.toString(), null);
+      logAuditEvent(true, "createSnapshot", snapshotRoot, rootPath.toString(),
+          null);
     }
   }
   
@@ -5768,8 +5766,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
           + "/" + snapshotOldName);
       Path newSnapshotRoot = new Path(path, HdfsConstants.DOT_SNAPSHOT_DIR
           + "/" + snapshotNewName);
-      logAuditEvent(UserGroupInformation.getCurrentUser(), getRemoteIp(),
-          "renameSnapshot", oldSnapshotRoot.toString(),
+      logAuditEvent(true, "renameSnapshot", oldSnapshotRoot.toString(),
           newSnapshotRoot.toString(), null);
     }
   }
@@ -5795,8 +5792,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
       readUnlock();
     }
     if (auditLog.isInfoEnabled() && isExternalInvocation()) {
-      logAuditEvent(UserGroupInformation.getCurrentUser(), getRemoteIp(),
-            "listSnapshottableDirectory", null, null, null);
+      logAuditEvent(true, "listSnapshottableDirectory", null, null, null);
     }
     return status;
   }
@@ -5828,8 +5824,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
     }
     
     if (auditLog.isInfoEnabled() && isExternalInvocation()) {
-      logAuditEvent(UserGroupInformation.getCurrentUser(), getRemoteIp(),
-            "computeSnapshotDiff", null, null, null);
+      logAuditEvent(true, "computeSnapshotDiff", null, null, null);
     }
     return diffs != null ? diffs.generateReport() : new SnapshotDiffReport(
         path, fromSnapshot, toSnapshot,
@@ -5874,8 +5869,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
     if (auditLog.isInfoEnabled() && isExternalInvocation()) {
       Path rootPath = new Path(snapshotRoot, HdfsConstants.DOT_SNAPSHOT_DIR
           + Path.SEPARATOR + snapshotName);
-      logAuditEvent(UserGroupInformation.getCurrentUser(), getRemoteIp(),
-          "deleteSnapshot", rootPath.toString(), null, null);
+      logAuditEvent(true, "deleteSnapshot", rootPath.toString(), null, null);
     }
   }
 

+ 6 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java

@@ -303,6 +303,7 @@ public class INodeFile extends INode implements BlockCollection {
 
   @Override
   public int destroyAndCollectBlocks(BlocksMapUpdateInfo collectedBlocks) {
+    int total = 1;
     if (blocks != null && collectedBlocks != null) {
       for (BlockInfo blk : blocks) {
         collectedBlocks.addDeleteBlock(blk);
@@ -311,7 +312,11 @@ public class INodeFile extends INode implements BlockCollection {
     }
     setBlocks(null);
     clearReferences();
-    return 1;
+    
+    if (this instanceof FileWithSnapshot) {
+      total += ((FileWithSnapshot) this).getDiffs().clear();
+    }
+    return total;
   }
   
   @Override

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java

@@ -50,7 +50,7 @@ abstract class AbstractINodeDiffList<N extends INode,
   }
   
   /** Get the size of the list and then clear it. */
-  int clear() {
+  public int clear() {
     final int n = diffs.size();
     diffs.clear();
     return n;

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileUnderConstructionWithSnapshot.java

@@ -121,7 +121,7 @@ public class INodeFileUnderConstructionWithSnapshot
     } else { // delete a snapshot
       return diffs.deleteSnapshotDiff(snapshot, prior, this, collectedBlocks);
     }
-    return 1;
+    return prior == null ? 1 : 0;
   }
 
   @Override

+ 1 - 11
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithSnapshot.java

@@ -22,8 +22,6 @@ import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.namenode.INodeFile;
 
-import com.google.common.base.Preconditions;
-
 /**
  * Represent an {@link INodeFile} that is snapshotted.
  * Note that snapshot files are represented by {@link INodeFileSnapshot}.
@@ -94,15 +92,7 @@ public class INodeFileWithSnapshot extends INodeFile
     } else { // delete a snapshot
       return diffs.deleteSnapshotDiff(snapshot, prior, this, collectedBlocks);
     }
-    return 1;
-  }
-  
-  @Override
-  public int destroyAndCollectBlocks(
-      final BlocksMapUpdateInfo collectedBlocks) {
-    Preconditions.checkState(this.isCurrentFileDeleted);
-    final int n = diffs.clear();
-    return n + super.destroyAndCollectBlocks(collectedBlocks);
+    return prior == null ? 1 : 0;
   }
 
   @Override