瀏覽代碼

HDFS-7087. Ability to list /.reserved. Contributed by Xiao Chen.

Andrew Wang 9 年之前
父節點
當前提交
3dadf369d5

+ 2 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -1555,6 +1555,8 @@ Release 2.8.0 - UNRELEASED
     TestBlockManager.testBlocksAreNotUnderreplicatedInSingleRack.
     (Masatake Iwasaki via wang)
 
+    HDFS-7087. Ability to list /.reserved. (Xiao Chen via wang)
+
   OPTIMIZATIONS
 
     HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

+ 7 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java

@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import org.apache.hadoop.HadoopIllegalArgumentException;
+import org.apache.hadoop.fs.InvalidPathException;
 import org.apache.hadoop.fs.PathIsNotDirectoryException;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.fs.UnresolvedLinkException;
@@ -50,6 +51,9 @@ public class FSDirAttrOp {
       FSDirectory fsd, final String srcArg, FsPermission permission)
       throws IOException {
     String src = srcArg;
+    if (FSDirectory.isExactReservedName(src)) {
+      throw new InvalidPathException(src);
+    }
     FSPermissionChecker pc = fsd.getPermissionChecker();
     byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
     INodesInPath iip;
@@ -69,6 +73,9 @@ public class FSDirAttrOp {
   static HdfsFileStatus setOwner(
       FSDirectory fsd, String src, String username, String group)
       throws IOException {
+    if (FSDirectory.isExactReservedName(src)) {
+      throw new InvalidPathException(src);
+    }
     FSPermissionChecker pc = fsd.getPermissionChecker();
     byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
     INodesInPath iip;

+ 5 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java

@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
+import org.apache.hadoop.fs.InvalidPathException;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
 import org.apache.hadoop.fs.UnresolvedLinkException;
@@ -177,6 +178,10 @@ class FSDirDeleteOp {
       NameNode.stateChangeLog.debug("DIR* NameSystem.delete: " + src);
     }
 
+    if (FSDirectory.isExactReservedName(src)) {
+      throw new InvalidPathException(src);
+    }
+
     FSDirectory fsd = fsn.getFSDirectory();
     BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
     List<INode> removedINodes = new ChunkedArrayList<>();

+ 6 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java

@@ -499,6 +499,12 @@ class FSDirRenameOp {
           + error);
       throw new IOException(error);
     }
+
+    if (FSDirectory.isExactReservedName(src)
+        || FSDirectory.isExactReservedName(dst)) {
+      error = "Cannot rename to or from /.reserved";
+      throw new InvalidPathException(error);
+    }
   }
 
   private static void validateOverwrite(

+ 16 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java

@@ -230,6 +230,9 @@ class FSDirStatAndListingOp {
       throws IOException {
     String srcs = FSDirectory.normalizePath(src);
     final boolean isRawPath = FSDirectory.isReservedRawName(src);
+    if (FSDirectory.isExactReservedName(srcs)) {
+      return getReservedListing(fsd);
+    }
 
     fsd.readLock();
     try {
@@ -339,6 +342,15 @@ class FSDirStatAndListingOp {
         listing, snapshots.size() - skipSize - numOfListing);
   }
 
+  /**
+   * Get a listing of the /.reserved directory.
+   * @param fsd FSDirectory
+   * @return listing containing child directories of /.reserved
+   */
+  private static DirectoryListing getReservedListing(FSDirectory fsd) {
+    return new DirectoryListing(fsd.getReservedStatuses(), 0);
+  }
+
   /** Get the file info for a specific file.
    * @param fsd FSDirectory
    * @param src The string representation of the path to the file
@@ -375,6 +387,10 @@ class FSDirStatAndListingOp {
       FSDirectory fsd, String src, boolean resolveLink, boolean isRawPath)
     throws IOException {
     String srcs = FSDirectory.normalizePath(src);
+    if (FSDirectory.isExactReservedName(src)) {
+      return FSDirectory.DOT_RESERVED_STATUS;
+    }
+
     if (srcs.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR)) {
       if (fsd.getINode4DotSnapshot(srcs) != null) {
         return new HdfsFileStatus(0, true, 0, 0, 0, 0, null, null, null, null,

+ 2 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSymlinkOp.java

@@ -42,7 +42,8 @@ class FSDirSymlinkOp {
     if (!DFSUtil.isValidName(link)) {
       throw new InvalidPathException("Invalid link name: " + link);
     }
-    if (FSDirectory.isReservedName(target) || target.isEmpty()) {
+    if (FSDirectory.isReservedName(target) || target.isEmpty()
+        || FSDirectory.isExactReservedName(target)) {
       throw new InvalidPathException("Invalid target name: " + target);
     }
 

+ 7 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java

@@ -26,6 +26,7 @@ import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
 import org.apache.hadoop.fs.FileEncryptionInfo;
+import org.apache.hadoop.fs.InvalidPathException;
 import org.apache.hadoop.fs.XAttr;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.FsAction;
@@ -365,6 +366,12 @@ class FSDirWriteFileOp {
           " already exists as a directory");
     }
 
+    if (FSDirectory.isExactReservedName(src) || (FSDirectory.isReservedName(src)
+        && !FSDirectory.isReservedRawName(src)
+        && !FSDirectory.isReservedInodesName(src))) {
+      throw new InvalidPathException(src);
+    }
+
     final INodeFile myFile = INodeFile.valueOf(inode, src, true);
     if (fsd.isPermissionEnabled()) {
       if (overwrite && myFile != null) {

+ 61 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java

@@ -122,6 +122,11 @@ public class FSDirectory implements Closeable {
   public final static byte[] DOT_INODES = 
       DFSUtil.string2Bytes(DOT_INODES_STRING);
 
+  public final static HdfsFileStatus DOT_RESERVED_STATUS =
+      new HdfsFileStatus(0, true, 0, 0, 0, 0, new FsPermission((short) 01770),
+          null, null, null, HdfsFileStatus.EMPTY_NAME, -1L, 0, null,
+          HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, null);
+
   INodeDirectory rootDir;
   private final FSNamesystem namesystem;
   private volatile boolean skipQuotaCheck = false; //skip while consuming edits
@@ -169,6 +174,8 @@ public class FSDirectory implements Closeable {
 
   private final FSEditLog editLog;
 
+  private HdfsFileStatus[] reservedStatuses;
+
   private INodeAttributeProvider attributeProvider;
 
   public void setINodeAttributeProvider(INodeAttributeProvider provider) {
@@ -312,7 +319,44 @@ public class FSDirectory implements Closeable {
         DFSConfigKeys.DFS_NAMENODE_QUOTA_INIT_THREADS_KEY,
         DFSConfigKeys.DFS_NAMENODE_QUOTA_INIT_THREADS_DEFAULT);
   }
-    
+
+  /**
+   * Get HdfsFileStatuses of the reserved paths: .inodes and raw.
+   *
+   * @return Array of HdfsFileStatus
+   */
+  HdfsFileStatus[] getReservedStatuses() {
+    Preconditions.checkNotNull(reservedStatuses, "reservedStatuses should "
+        + " not be null. It is populated when FSNamesystem loads FS image."
+        + " It has to be set at this time instead of initialization time"
+        + " because CTime is loaded during FSNamesystem#loadFromDisk.");
+    return reservedStatuses;
+  }
+
+  /**
+   * Create HdfsFileStatuses of the reserved paths: .inodes and raw.
+   * These statuses are solely for listing purpose. All other operations
+   * on the reserved dirs are disallowed.
+   * Operations on sub directories are resolved by
+   * {@link FSDirectory#resolvePath(String, byte[][], FSDirectory)}
+   * and conducted directly, without the need to check the reserved dirs.
+   *
+   * This method should only be invoked once during namenode initialization.
+   *
+   * @param cTime CTime of the file system
+   * @return Array of HdfsFileStatus
+   */
+  void createReservedStatuses(long cTime) {
+    HdfsFileStatus inodes = new HdfsFileStatus(0, true, 0, 0, cTime, cTime,
+        new FsPermission((short) 0770), null, supergroup, null,
+        DOT_INODES, -1L, 0, null,
+        HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, null);
+    HdfsFileStatus raw = new HdfsFileStatus(0, true, 0, 0, cTime, cTime,
+        new FsPermission((short) 0770), null, supergroup, null, RAW, -1L,
+        0, null, HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, null);
+    reservedStatuses = new HdfsFileStatus[] {inodes, raw};
+  }
+
   FSNamesystem getFSNamesystem() {
     return namesystem;
   }
@@ -1263,11 +1307,20 @@ public class FSDirectory implements Closeable {
     return src.startsWith(DOT_RESERVED_PATH_PREFIX + Path.SEPARATOR);
   }
 
+  public static boolean isExactReservedName(String src) {
+    return CHECK_RESERVED_FILE_NAMES && src.equals(DOT_RESERVED_PATH_PREFIX);
+  }
+
   static boolean isReservedRawName(String src) {
     return src.startsWith(DOT_RESERVED_PATH_PREFIX +
         Path.SEPARATOR + RAW_STRING);
   }
 
+  static boolean isReservedInodesName(String src) {
+    return src.startsWith(DOT_RESERVED_PATH_PREFIX +
+        Path.SEPARATOR + DOT_INODES_STRING);
+  }
+
   /**
    * Resolve a /.reserved/... path to a non-reserved path.
    * <p/>
@@ -1319,7 +1372,13 @@ public class FSDirectory implements Closeable {
       if (nComponents == 3) {
         return Path.SEPARATOR;
       } else {
-        return constructRemainingPath("", pathComponents, 3);
+        if (nComponents == 4
+            && Arrays.equals(DOT_RESERVED, pathComponents[3])) {
+          /* It's /.reserved/raw/.reserved so don't strip */
+          return src;
+        } else {
+          return constructRemainingPath("", pathComponents, 3);
+        }
       }
     } else {
       /* It's some sort of /.reserved/<unknown> path. Ignore it. */

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

@@ -689,6 +689,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     if (nnMetrics != null) {
       nnMetrics.setFsImageLoadTime((int) timeTakenToLoadFSImage);
     }
+    namesystem.getFSDirectory().createReservedStatuses(namesystem.getCTime());
     return namesystem;
   }
   

+ 0 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java

@@ -1193,10 +1193,6 @@ public class TestGlobPaths {
       Assert.assertEquals(reservedRoot,
         TestPath.mergeStatuses(wrap.
             globStatus(new Path(reservedRoot), new AcceptAllPathFilter())));
-      // These inodes don't show up via listStatus.
-      Assert.assertEquals("",
-        TestPath.mergeStatuses(wrap.
-            globStatus(new Path("/.reserved/*"), new AcceptAllPathFilter())));
     }
   }
 

+ 194 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java

@@ -41,6 +41,7 @@ import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.server.datanode.FsDatasetTestUtils.MaterializedReplica;
+import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.tools.DFSAdmin;
 import org.apache.hadoop.io.IOUtils;
@@ -3147,4 +3148,197 @@ public class TestDFSShell {
   public void testNoTrashConfig() throws Exception {
     deleteFileUsingTrash(false, false);
   }
+
+  @Test (timeout = 30000)
+  public void testListReserved() throws IOException {
+    Configuration conf = new HdfsConfiguration();
+    MiniDFSCluster cluster =
+        new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
+    FileSystem fs = cluster.getFileSystem();
+    FsShell shell = new FsShell();
+    shell.setConf(conf);
+    FileStatus test = fs.getFileStatus(new Path("/.reserved"));
+    assertEquals(FSDirectory.DOT_RESERVED_STRING, test.getPath().getName());
+
+    // Listing /.reserved/ should show 2 items: raw and .inodes
+    FileStatus[] stats = fs.listStatus(new Path("/.reserved"));
+    assertEquals(2, stats.length);
+    assertEquals(FSDirectory.DOT_INODES_STRING, stats[0].getPath().getName());
+    assertEquals(conf.get(DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_KEY),
+        stats[0].getGroup());
+    assertEquals("raw", stats[1].getPath().getName());
+    assertEquals(conf.get(DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_KEY),
+        stats[1].getGroup());
+
+    // Listing / should not show /.reserved
+    stats = fs.listStatus(new Path("/"));
+    assertEquals(0, stats.length);
+
+    // runCmd prints error into System.err, thus verify from there.
+    PrintStream syserr = System.err;
+    final ByteArrayOutputStream baos = new ByteArrayOutputStream();
+    PrintStream ps = new PrintStream(baos);
+    System.setErr(ps);
+
+    runCmd(shell, "-ls", "/.reserved");
+    assertEquals(0, baos.toString().length());
+
+    runCmd(shell, "-ls", "/.reserved/raw/.reserved");
+    assertTrue(baos.toString().contains("No such file or directory"));
+
+    System.setErr(syserr);
+    cluster.shutdown();
+  }
+
+  @Test (timeout = 30000)
+  public void testMkdirReserved() throws IOException {
+    Configuration conf = new HdfsConfiguration();
+    MiniDFSCluster cluster =
+        new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
+    FileSystem fs = cluster.getFileSystem();
+    try {
+      fs.mkdirs(new Path("/.reserved"));
+      fail("Can't mkdir /.reserved");
+    } catch (Exception e) {
+      // Expected, HadoopIllegalArgumentException thrown from remote
+      assertTrue(e.getMessage().contains("\".reserved\" is reserved"));
+    }
+    cluster.shutdown();
+  }
+
+  @Test (timeout = 30000)
+  public void testRmReserved() throws IOException {
+    Configuration conf = new HdfsConfiguration();
+    MiniDFSCluster cluster =
+        new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
+    FileSystem fs = cluster.getFileSystem();
+    try {
+      fs.delete(new Path("/.reserved"), true);
+      fail("Can't delete /.reserved");
+    } catch (Exception e) {
+      // Expected, InvalidPathException thrown from remote
+      assertTrue(e.getMessage().contains("Invalid path name /.reserved"));
+    }
+    cluster.shutdown();
+  }
+
+  @Test //(timeout = 30000)
+  public void testCopyReserved() throws IOException {
+    Configuration conf = new HdfsConfiguration();
+    MiniDFSCluster cluster =
+        new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
+    FileSystem fs = cluster.getFileSystem();
+    final File localFile = new File(TEST_ROOT_DIR, "testFileForPut");
+    localFile.createNewFile();
+    final String localfilepath =
+        new Path(localFile.getAbsolutePath()).toUri().toString();
+    try {
+      fs.copyFromLocalFile(new Path(localfilepath), new Path("/.reserved"));
+      fail("Can't copyFromLocal to /.reserved");
+    } catch (Exception e) {
+      // Expected, InvalidPathException thrown from remote
+      assertTrue(e.getMessage().contains("Invalid path name /.reserved"));
+    }
+
+    final String testdir = System.getProperty("test.build.data")
+        + "/TestDFSShell-testCopyReserved";
+    final Path hdfsTestDir = new Path(testdir);
+    writeFile(fs, new Path(testdir, "testFileForPut"));
+    final Path src = new Path(hdfsTestDir, "srcfile");
+    fs.create(src).close();
+    assertTrue(fs.exists(src));
+
+    // runCmd prints error into System.err, thus verify from there.
+    PrintStream syserr = System.err;
+    final ByteArrayOutputStream baos = new ByteArrayOutputStream();
+    PrintStream ps = new PrintStream(baos);
+    System.setErr(ps);
+
+    FsShell shell = new FsShell();
+    shell.setConf(conf);
+    runCmd(shell, "-cp", src.toString(), "/.reserved");
+    assertTrue(baos.toString().contains("Invalid path name /.reserved"));
+    System.setErr(syserr);
+    cluster.shutdown();
+  }
+
+  @Test (timeout = 30000)
+  public void testChmodReserved() throws IOException {
+    Configuration conf = new HdfsConfiguration();
+    MiniDFSCluster cluster =
+        new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
+    FileSystem fs = cluster.getFileSystem();
+
+    // runCmd prints error into System.err, thus verify from there.
+    PrintStream syserr = System.err;
+    final ByteArrayOutputStream baos = new ByteArrayOutputStream();
+    PrintStream ps = new PrintStream(baos);
+    System.setErr(ps);
+
+    FsShell shell = new FsShell();
+    shell.setConf(conf);
+    runCmd(shell, "-chmod", "777", "/.reserved");
+    assertTrue(baos.toString().contains("Invalid path name /.reserved"));
+    System.setErr(syserr);
+    cluster.shutdown();
+  }
+
+  @Test (timeout = 30000)
+  public void testChownReserved() throws IOException {
+    Configuration conf = new HdfsConfiguration();
+    MiniDFSCluster cluster =
+        new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
+    FileSystem fs = cluster.getFileSystem();
+
+    // runCmd prints error into System.err, thus verify from there.
+    PrintStream syserr = System.err;
+    final ByteArrayOutputStream baos = new ByteArrayOutputStream();
+    PrintStream ps = new PrintStream(baos);
+    System.setErr(ps);
+
+    FsShell shell = new FsShell();
+    shell.setConf(conf);
+    runCmd(shell, "-chown", "user1", "/.reserved");
+    assertTrue(baos.toString().contains("Invalid path name /.reserved"));
+    System.setErr(syserr);
+    cluster.shutdown();
+  }
+
+  @Test (timeout = 30000)
+  public void testSymLinkReserved() throws IOException {
+    Configuration conf = new HdfsConfiguration();
+    MiniDFSCluster cluster =
+        new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
+    FileSystem fs = cluster.getFileSystem();
+    try {
+      fs.createSymlink(new Path("/.reserved"), new Path("/rl1"), false);
+      fail("Can't create symlink to /.reserved");
+    } catch (Exception e) {
+      // Expected, InvalidPathException thrown from remote
+      assertTrue(e.getMessage().contains("Invalid target name: /.reserved"));
+    }
+    cluster.shutdown();
+  }
+
+  @Test (timeout = 30000)
+  public void testSnapshotReserved() throws IOException {
+    Configuration conf = new HdfsConfiguration();
+    MiniDFSCluster cluster =
+        new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
+    DistributedFileSystem fs = cluster.getFileSystem();
+    final Path reserved = new Path("/.reserved");
+    try {
+      fs.allowSnapshot(reserved);
+      fail("Can't allow snapshot on /.reserved");
+    } catch (FileNotFoundException e) {
+      assertTrue(e.getMessage().contains("Directory does not exist"));
+    }
+    try {
+      fs.createSnapshot(reserved, "snap");
+      fail("Can't create snapshot on /.reserved");
+    } catch (FileNotFoundException e) {
+      assertTrue(e.getMessage().contains("Directory does not exist"));
+    }
+    cluster.shutdown();
+  }
 }

+ 6 - 7
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReservedRawPaths.java

@@ -33,6 +33,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.client.HdfsAdmin;
 import org.apache.hadoop.hdfs.server.namenode.EncryptionZoneManager;
+import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.log4j.Level;
@@ -304,14 +305,12 @@ public class TestReservedRawPaths {
     DFSTestUtil.createFile(fs, baseFileRaw, len, (short) 1, 0xFEED);
 
     /*
-     * Ensure that you can't list /.reserved. Ever.
+     * Ensure that you can list /.reserved, with results: raw and .inodes
      */
-    try {
-      fs.listStatus(new Path("/.reserved"));
-      fail("expected FNFE");
-    } catch (FileNotFoundException e) {
-      assertExceptionContains("/.reserved does not exist", e);
-    }
+    FileStatus[] stats = fs.listStatus(new Path("/.reserved"));
+    assertEquals(2, stats.length);
+    assertEquals(FSDirectory.DOT_INODES_STRING, stats[0].getPath().getName());
+    assertEquals("raw", stats[1].getPath().getName());
 
     try {
       fs.listStatus(new Path("/.reserved/.inodes"));