Parcourir la source

HDFS-15054. Delete Snapshot not updating new modification time. Contributed by hemanthboyina.

Ayush Saxena il y a 5 ans
Parent
commit
300505c562

+ 4 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSnapshotOp.java

@@ -253,9 +253,11 @@ class FSDirSnapshotOp {
     ChunkedArrayList<INode> removedINodes = new ChunkedArrayList<>();
     INode.ReclaimContext context = new INode.ReclaimContext(
         fsd.getBlockStoragePolicySuite(), collectedBlocks, removedINodes, null);
+    // time of snapshot deletion
+    final long now = Time.now();
     fsd.writeLock();
     try {
-      snapshotManager.deleteSnapshot(iip, snapshotName, context);
+      snapshotManager.deleteSnapshot(iip, snapshotName, context, now);
       fsd.updateCount(iip, context.quotaDelta(), false);
       fsd.removeFromInodeMap(removedINodes);
       fsd.updateReplicationFactor(context.collectedBlocks()
@@ -265,7 +267,7 @@ class FSDirSnapshotOp {
     }
     removedINodes.clear();
     fsd.getEditLog().logDeleteSnapshot(snapshotRoot, snapshotName,
-        logRetryCache);
+        logRetryCache, now);
 
     return collectedBlocks;
   }

+ 11 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java

@@ -1133,9 +1133,18 @@ public class FSEditLog implements LogsPurgeable {
     logEdit(op);
   }
   
-  void logDeleteSnapshot(String snapRoot, String snapName, boolean toLogRpcIds) {
+  /**
+   * Log that a snapshot is deleted.
+   * @param snapRoot Root of the snapshot.
+   * @param snapName Name of the snapshot.
+   * @param toLogRpcIds If it is logging RPC ids.
+   * @param mtime The snapshot deletion time set by Time.now().
+   */
+  void logDeleteSnapshot(String snapRoot, String snapName, boolean toLogRpcIds,
+      long mtime) {
     DeleteSnapshotOp op = DeleteSnapshotOp.getInstance(cache.get())
-        .setSnapshotRoot(snapRoot).setSnapshotName(snapName);
+        .setSnapshotRoot(snapRoot).setSnapshotName(snapName)
+        .setSnapshotMTime(mtime);
     logRpcIds(op, toLogRpcIds);
     logEdit(op);
   }

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java

@@ -820,7 +820,7 @@ public class FSEditLogLoader {
       fsNamesys.getSnapshotManager().deleteSnapshot(iip,
           deleteSnapshotOp.snapshotName,
           new INode.ReclaimContext(fsNamesys.dir.getBlockStoragePolicySuite(),
-              collectedBlocks, removedINodes, null));
+              collectedBlocks, removedINodes, null), deleteSnapshotOp.mtime);
       fsNamesys.getBlockManager().removeBlocksAndUpdateSafemodeTotal(
           collectedBlocks);
       collectedBlocks.clear();

+ 19 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java

@@ -3529,6 +3529,8 @@ public abstract class FSEditLogOp {
   static class DeleteSnapshotOp extends FSEditLogOp {
     String snapshotRoot;
     String snapshotName;
+    /** Modification time of the edit set by Time.now(). */
+    long mtime;
     
     DeleteSnapshotOp() {
       super(OP_DELETE_SNAPSHOT);
@@ -3542,22 +3544,32 @@ public abstract class FSEditLogOp {
     void resetSubFields() {
       snapshotRoot = null;
       snapshotName = null;
+      mtime = 0L;
     }
-    
+
+    /* set the name of the snapshot. */
     DeleteSnapshotOp setSnapshotName(String snapName) {
       this.snapshotName = snapName;
       return this;
     }
 
+    /* set the directory path where the snapshot is taken. */
     DeleteSnapshotOp setSnapshotRoot(String snapRoot) {
       snapshotRoot = snapRoot;
       return this;
     }
-    
+
+    /* The snapshot deletion time set by Time.now(). */
+    DeleteSnapshotOp setSnapshotMTime(long mTime) {
+      this.mtime = mTime;
+      return this;
+    }
+
     @Override
     void readFields(DataInputStream in, int logVersion) throws IOException {
       snapshotRoot = FSImageSerialization.readString(in);
       snapshotName = FSImageSerialization.readString(in);
+      mtime = FSImageSerialization.readLong(in);
       
       // read RPC ids if necessary
       readRpcIds(in, logVersion);
@@ -3567,6 +3579,7 @@ public abstract class FSEditLogOp {
     public void writeFields(DataOutputStream out) throws IOException {
       FSImageSerialization.writeString(snapshotRoot, out);
       FSImageSerialization.writeString(snapshotName, out);
+      FSImageSerialization.writeLong(mtime, out);
       writeRpcIds(rpcClientId, rpcCallId, out);
     }
 
@@ -3574,6 +3587,7 @@ public abstract class FSEditLogOp {
     protected void toXml(ContentHandler contentHandler) throws SAXException {
       XMLUtils.addSaxString(contentHandler, "SNAPSHOTROOT", snapshotRoot);
       XMLUtils.addSaxString(contentHandler, "SNAPSHOTNAME", snapshotName);
+      XMLUtils.addSaxString(contentHandler, "MTIME", Long.toString(mtime));
       appendRpcIdsToXml(contentHandler, rpcClientId, rpcCallId);
     }
 
@@ -3581,6 +3595,7 @@ public abstract class FSEditLogOp {
     void fromXml(Stanza st) throws InvalidXmlException {
       snapshotRoot = st.getValue("SNAPSHOTROOT");
       snapshotName = st.getValue("SNAPSHOTNAME");
+      this.mtime = Long.parseLong(st.getValue("MTIME"));
       
       readRpcIdsFromXml(st);
     }
@@ -3591,7 +3606,8 @@ public abstract class FSEditLogOp {
       builder.append("DeleteSnapshotOp [snapshotRoot=")
           .append(snapshotRoot)
           .append(", snapshotName=")
-          .append(snapshotName);
+          .append(snapshotName)
+          .append(", mtime=").append(mtime);
       appendRpcIdsToString(builder, rpcClientId, rpcCallId);
       builder.append("]");
       return builder.toString();

+ 7 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java

@@ -289,11 +289,16 @@ public class INodeDirectory extends INodeWithAdditionalFields
         leaseManager, captureOpenFiles, maxSnapshotLimit, mtime);
   }
 
+  /**
+   * Delete a snapshot.
+   * @param snapshotName Name of the snapshot.
+   * @param mtime The snapshot deletion time set by Time.now().
+   */
   public Snapshot removeSnapshot(
-      ReclaimContext reclaimContext, String snapshotName)
+      ReclaimContext reclaimContext, String snapshotName, long mtime)
       throws SnapshotException {
     return getDirectorySnapshottableFeature().removeSnapshot(
-        reclaimContext, this, snapshotName);
+        reclaimContext, this, snapshotName, mtime);
   }
 
   /**

+ 3 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java

@@ -233,12 +233,13 @@ public class DirectorySnapshottableFeature extends DirectoryWithSnapshotFeature
    * @param reclaimContext records blocks and inodes that need to be reclaimed
    * @param snapshotRoot The directory where we take snapshots
    * @param snapshotName The name of the snapshot to be removed
+   * @param now The snapshot deletion time set by Time.now().
    * @return The removed snapshot. Null if no snapshot with the given name
    *         exists.
    */
   public Snapshot removeSnapshot(
       INode.ReclaimContext reclaimContext, INodeDirectory snapshotRoot,
-      String snapshotName) throws SnapshotException {
+      String snapshotName, long now) throws SnapshotException {
     final int i = searchSnapshot(DFSUtil.string2Bytes(snapshotName));
     if (i < 0) {
       throw new SnapshotException("Cannot delete snapshot " + snapshotName
@@ -250,6 +251,7 @@ public class DirectorySnapshottableFeature extends DirectoryWithSnapshotFeature
       snapshotRoot.cleanSubtree(reclaimContext, snapshot.getId(), prior);
       // remove from snapshotsByNames after successfully cleaning the subtree
       snapshotsByNames.remove(i);
+      snapshotRoot.updateModificationTime(now, Snapshot.CURRENT_STATE_ID);
       return snapshot;
     }
   }

+ 3 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java

@@ -346,13 +346,14 @@ public class SnapshotManager implements SnapshotStatsMXBean {
   /**
    * Delete a snapshot for a snapshottable directory
    * @param snapshotName Name of the snapshot to be deleted
+   * @param now is the snapshot deletion time set by Time.now().
    * @param reclaimContext Used to collect information to reclaim blocks
    *                       and inodes
    */
   public void deleteSnapshot(final INodesInPath iip, final String snapshotName,
-      INode.ReclaimContext reclaimContext) throws IOException {
+      INode.ReclaimContext reclaimContext, long now) throws IOException {
     INodeDirectory srcRoot = getSnapshottableRoot(iip);
-    srcRoot.removeSnapshot(reclaimContext, snapshotName);
+    srcRoot.removeSnapshot(reclaimContext, snapshotName, now);
     numSnapshots.getAndDecrement();
   }
 

+ 23 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java

@@ -490,6 +490,29 @@ public class TestSnapshot {
         newSnapshotStatus.getModificationTime());
   }
 
+  /**
+   * Test snapshot directory mtime after snapshot deletion.
+   */
+  @Test(timeout = 60000)
+  public void testDeletionSnapshotMtime() throws Exception {
+    Path dir = new Path("/dir");
+    Path sub = new Path(dir, "sub");
+    Path subFile = new Path(sub, "file");
+    DFSTestUtil.createFile(hdfs, subFile, BLOCKSIZE, REPLICATION, seed);
+
+    hdfs.allowSnapshot(dir);
+    Path snapshotPath = hdfs.createSnapshot(dir, "s1");
+    FileStatus oldSnapshotStatus = hdfs.getFileStatus(snapshotPath);
+    hdfs.deleteSnapshot(dir, "s1");
+    FileStatus dirStatus = hdfs.getFileStatus(dir);
+    assertNotEquals(dirStatus.getModificationTime(),
+        oldSnapshotStatus.getModificationTime());
+    cluster.restartNameNodes();
+    FileStatus newSnapshotStatus = hdfs.getFileStatus(dir);
+    assertEquals(dirStatus.getModificationTime(),
+        newSnapshotStatus.getModificationTime());
+  }
+
   /**
    * Prepare a list of modifications. A modification may be a file creation,
    * file deletion, or a modification operation such as appending to an existing

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotManager.java

@@ -78,7 +78,7 @@ public class TestSnapshotManager {
 
     // Delete a snapshot to free up a slot.
     //
-    sm.deleteSnapshot(iip, "", mock(INode.ReclaimContext.class));
+    sm.deleteSnapshot(iip, "", mock(INode.ReclaimContext.class), Time.now());
 
     // Attempt to create a snapshot again. It should still fail due
     // to snapshot ID rollover.

BIN
hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored


+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml

@@ -299,6 +299,7 @@
       <TXID>24</TXID>
       <SNAPSHOTROOT>/directory_mkdir</SNAPSHOTROOT>
       <SNAPSHOTNAME>snapshot2</SNAPSHOTNAME>
+      <MTIME>1512607197720</MTIME>
       <RPC_CLIENTID>cab1aa2d-e08a-4d2f-8216-76e167eccd94</RPC_CLIENTID>
       <RPC_CALLID>56</RPC_CALLID>
     </DATA>