Browse Source

HDFS-15446. CreateSnapshotOp fails during edit log loading for /.reserved/raw/path with error java.io.FileNotFoundException: Directory does not exist: /.reserved/raw/path. Contributed by Stephen O'Donnell.

Ayush Saxena 4 years ago
parent
commit
642aafb106

+ 20 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java

@@ -693,6 +693,26 @@ public class FSDirectory implements Closeable {
     return iip;
   }
 
+  /**
+   * This method should only be used from internal paths and not those provided
+   * directly by a user. It resolves a given path into an INodesInPath in a
+   * similar way to resolvePath(...), only traversal and permissions are not
+   * checked.
+   * @param src The path to resolve.
+   * @return if the path indicates an inode, return path after replacing up to
+   *        {@code <inodeid>} with the corresponding path of the inode, else
+   *        the path in {@code src} as is. If the path refers to a path in
+   *        the "raw" directory, return the non-raw pathname.
+   * @throws FileNotFoundException
+   */
+  public INodesInPath unprotectedResolvePath(String src)
+      throws FileNotFoundException {
+    byte[][] components = INode.getPathComponents(src);
+    boolean isRaw = isReservedRawName(components);
+    components = resolveComponents(components, this);
+    return INodesInPath.resolve(rootDir, components, isRaw);
+  }
+
   INodesInPath resolvePath(FSPermissionChecker pc, String src, long fileId)
       throws UnresolvedLinkException, FileNotFoundException,
       AccessControlException, ParentNotDirectoryException {

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java

@@ -798,7 +798,7 @@ public class FSEditLogLoader {
       final String snapshotRoot =
           renameReservedPathsOnUpgrade(createSnapshotOp.snapshotRoot,
               logVersion);
-      INodesInPath iip = fsDir.getINodesInPath(snapshotRoot, DirOp.WRITE);
+      INodesInPath iip = fsDir.unprotectedResolvePath(snapshotRoot);
       String path = fsNamesys.getSnapshotManager().createSnapshot(
           fsDir.getFSNamesystem().getLeaseManager(),
           iip, snapshotRoot, createSnapshotOp.snapshotName);
@@ -815,7 +815,7 @@ public class FSEditLogLoader {
       final String snapshotRoot =
           renameReservedPathsOnUpgrade(deleteSnapshotOp.snapshotRoot,
               logVersion);
-      INodesInPath iip = fsDir.getINodesInPath(snapshotRoot, DirOp.WRITE);
+      INodesInPath iip = fsDir.unprotectedResolvePath(snapshotRoot);
       fsNamesys.getSnapshotManager().deleteSnapshot(iip,
           deleteSnapshotOp.snapshotName,
           new INode.ReclaimContext(fsNamesys.dir.getBlockStoragePolicySuite(),
@@ -837,7 +837,7 @@ public class FSEditLogLoader {
       final String snapshotRoot =
           renameReservedPathsOnUpgrade(renameSnapshotOp.snapshotRoot,
               logVersion);
-      INodesInPath iip = fsDir.getINodesInPath(snapshotRoot, DirOp.WRITE);
+      INodesInPath iip = fsDir.unprotectedResolvePath(snapshotRoot);
       fsNamesys.getSnapshotManager().renameSnapshot(iip,
           snapshotRoot, renameSnapshotOp.snapshotOldName,
           renameSnapshotOp.snapshotNewName);

+ 53 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java

@@ -456,6 +456,59 @@ public class TestSnapshot {
     assertEquals(0, rootNode.getDirectorySnapshottableFeature().getSnapshotQuota());
   }
 
+  /**
+   * HDFS-15446 - ensure that snapshot operations on /.reserved/raw
+   * paths work and the NN can load the resulting edits.
+   */
+  @Test(timeout = 60000)
+  public void testSnapshotOpsOnReservedPath() throws Exception {
+    Path dir = new Path("/dir");
+    Path nestedDir = new Path("/nested/dir");
+    Path sub = new Path(dir, "sub");
+    Path subFile = new Path(sub, "file");
+    Path nestedFile = new Path(nestedDir, "file");
+    DFSTestUtil.createFile(hdfs, subFile, BLOCKSIZE, REPLICATION, seed);
+    DFSTestUtil.createFile(hdfs, nestedFile, BLOCKSIZE, REPLICATION, seed);
+
+    hdfs.allowSnapshot(dir);
+    hdfs.allowSnapshot(nestedDir);
+    Path reservedDir = new Path("/.reserved/raw/dir");
+    Path reservedNestedDir = new Path("/.reserved/raw/nested/dir");
+    hdfs.createSnapshot(reservedDir, "s1");
+    hdfs.createSnapshot(reservedNestedDir, "s1");
+    hdfs.renameSnapshot(reservedDir, "s1", "s2");
+    hdfs.renameSnapshot(reservedNestedDir, "s1", "s2");
+    hdfs.deleteSnapshot(reservedDir, "s2");
+    hdfs.deleteSnapshot(reservedNestedDir, "s2");
+    // The original problem with reserved path, is that the NN was unable to
+    // replay the edits, therefore restarting the NN to ensure it starts
+    // and no exceptions are raised.
+    cluster.restartNameNode(true);
+  }
+
+  /**
+   * HDFS-15446 - ensure that snapshot operations on /.reserved/raw
+   * paths work and the NN can load the resulting edits. This test if for
+   * snapshots at the root level.
+   */
+  @Test(timeout = 60000)
+  public void testSnapshotOpsOnRootReservedPath() throws Exception {
+    Path dir = new Path("/");
+    Path sub = new Path(dir, "sub");
+    Path subFile = new Path(sub, "file");
+    DFSTestUtil.createFile(hdfs, subFile, BLOCKSIZE, REPLICATION, seed);
+
+    hdfs.allowSnapshot(dir);
+    Path reservedDir = new Path("/.reserved/raw");
+    hdfs.createSnapshot(reservedDir, "s1");
+    hdfs.renameSnapshot(reservedDir, "s1", "s2");
+    hdfs.deleteSnapshot(reservedDir, "s2");
+    // The original problem with reserved path, is that the NN was unable to
+    // replay the edits, therefore restarting the NN to ensure it starts
+    // and no exceptions are raised.
+    cluster.restartNameNode(true);
+  }
+
   /**
    * Prepare a list of modifications. A modification may be a file creation,
    * file deletion, or a modification operation such as appending to an existing