Explorar o código

HDFS-14802. The feature of protect directories should be used in RenameOp (#1669)

Hui Fei %!s(int64=5) %!d(string=hai) anos
pai
achega
67f2c491fe

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/resources/core-default.xml

@@ -904,7 +904,7 @@
   <name>fs.protected.directories</name>
   <name>fs.protected.directories</name>
   <value></value>
   <value></value>
   <description>A comma-separated list of directories which cannot
   <description>A comma-separated list of directories which cannot
-    be deleted even by the superuser unless they are empty. This
+    be deleted or renamed even by the superuser unless they are empty. This
     setting can be used to guard important system directories
     setting can be used to guard important system directories
     against accidental deletion due to administrator error.
     against accidental deletion due to administrator error.
   </description>
   </description>

+ 9 - 6
hadoop-common-project/hadoop-common/src/site/markdown/filesystem/filesystem.md

@@ -43,6 +43,15 @@ The implementations of `FileSystem` shipped with Apache Hadoop
 All the requirements of a valid FileSystem are considered implicit preconditions and postconditions:
 All the requirements of a valid FileSystem are considered implicit preconditions and postconditions:
 all operations on a valid FileSystem MUST result in a new FileSystem that is also valid.
 all operations on a valid FileSystem MUST result in a new FileSystem that is also valid.
 
 
+## Feasible features
+
+### <a name="ProtectedDirectories"></a>Protected directories
+
+HDFS has the notion of *Protected Directories*, which are declared in
+the option `fs.protected.directories`. Any attempt to delete or rename
+such a directory or a parent thereof raises an `AccessControlException`.
+Accordingly, any attempt to delete the root directory SHALL, if there is
+a protected directory, result in such an exception being raised.
 
 
 ## Predicates and other state access operations
 ## Predicates and other state access operations
 
 
@@ -1009,12 +1018,6 @@ filesystem is desired.
 
 
 1. Object Stores: see [Object Stores: root directory deletion](#object-stores-rm-root).
 1. Object Stores: see [Object Stores: root directory deletion](#object-stores-rm-root).
 
 
-HDFS has the notion of *Protected Directories*, which are declared in
-the option `fs.protected.directories`. Any attempt to delete such a directory
-or a parent thereof raises an `AccessControlException`. Accordingly, any
-attempt to delete the root directory SHALL, if there is a protected directory,
-result in such an exception being raised.
-
 This specification does not recommend any specific action. Do note, however,
 This specification does not recommend any specific action. Do note, however,
 that the POSIX model assumes that there is a permissions model such that normal
 that the POSIX model assumes that there is a permissions model such that normal
 users do not have the permission to delete that root directory; it is an action
 users do not have the permission to delete that root directory; it is an action

+ 51 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java

@@ -39,6 +39,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_TRUSTSTORE_P
 
 
 import java.io.ByteArrayInputStream;
 import java.io.ByteArrayInputStream;
 import java.io.DataInputStream;
 import java.io.DataInputStream;
+import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.IOException;
 import java.io.PrintStream;
 import java.io.PrintStream;
 import java.net.InetAddress;
 import java.net.InetAddress;
@@ -57,6 +58,7 @@ import java.util.LinkedHashSet;
 import java.util.List;
 import java.util.List;
 import java.util.Map;
 import java.util.Map;
 import java.util.Set;
 import java.util.Set;
+import java.util.SortedSet;
 
 
 import org.apache.commons.cli.CommandLine;
 import org.apache.commons.cli.CommandLine;
 import org.apache.commons.cli.CommandLineParser;
 import org.apache.commons.cli.CommandLineParser;
@@ -65,6 +67,11 @@ import org.apache.commons.cli.Options;
 import org.apache.commons.cli.ParseException;
 import org.apache.commons.cli.ParseException;
 import org.apache.commons.cli.PosixParser;
 import org.apache.commons.cli.PosixParser;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.commons.lang3.StringUtils;
+import org.apache.hadoop.fs.ParentNotDirectoryException;
+import org.apache.hadoop.fs.UnresolvedLinkException;
+import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
+import org.apache.hadoop.hdfs.server.namenode.INodesInPath;
+import org.apache.hadoop.security.AccessControlException;
 import org.slf4j.Logger;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.HadoopIllegalArgumentException;
@@ -1735,4 +1742,48 @@ public class DFSUtil {
     }
     }
     return id;
     return id;
   }
   }
+
+  /**
+   * Throw if the given directory has any non-empty protected descendants
+   * (including itself).
+   *
+   * @param iip directory whose descendants are to be checked.
+   * @throws AccessControlException if a non-empty protected descendant
+   *                                was found.
+   * @throws ParentNotDirectoryException
+   * @throws UnresolvedLinkException
+   * @throws FileNotFoundException
+   */
+  public static void checkProtectedDescendants(
+      FSDirectory fsd, INodesInPath iip)
+          throws AccessControlException, UnresolvedLinkException,
+          ParentNotDirectoryException {
+    final SortedSet<String> protectedDirs = fsd.getProtectedDirectories();
+    if (protectedDirs.isEmpty()) {
+      return;
+    }
+
+    String src = iip.getPath();
+    // Is src protected? Caller has already checked it is non-empty.
+    if (protectedDirs.contains(src)) {
+      throw new AccessControlException(
+          "Cannot delete/rename non-empty protected directory " + src);
+    }
+
+    // Are any descendants of src protected?
+    // The subSet call returns only the descendants of src since
+    // {@link Path#SEPARATOR} is "/" and '0' is the next ASCII
+    // character after '/'.
+    for (String descendant :
+        protectedDirs.subSet(src + Path.SEPARATOR, src + "0")) {
+      INodesInPath subdirIIP =
+          fsd.getINodesInPath(descendant, FSDirectory.DirOp.WRITE);
+      if (fsd.isNonEmptyDirectory(subdirIIP)) {
+        throw new AccessControlException(
+            "Cannot delete/rename non-empty protected subdirectory "
+            + descendant);
+      }
+    }
+  }
+
 }
 }

+ 2 - 48
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirDeleteOp.java

@@ -18,22 +18,17 @@
 package org.apache.hadoop.hdfs.server.namenode;
 package org.apache.hadoop.hdfs.server.namenode;
 
 
 import org.apache.hadoop.fs.InvalidPathException;
 import org.apache.hadoop.fs.InvalidPathException;
-import org.apache.hadoop.fs.ParentNotDirectoryException;
-import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
 import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
-import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
 import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
 import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
 import org.apache.hadoop.hdfs.server.namenode.INode.ReclaimContext;
 import org.apache.hadoop.hdfs.server.namenode.INode.ReclaimContext;
-import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.util.ChunkedArrayList;
 import org.apache.hadoop.util.ChunkedArrayList;
 
 
-import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.List;
-import java.util.SortedSet;
 
 
 import static org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.CURRENT_STATE_ID;
 import static org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.CURRENT_STATE_ID;
 import static org.apache.hadoop.util.Time.now;
 import static org.apache.hadoop.util.Time.now;
@@ -115,7 +110,7 @@ class FSDirDeleteOp {
         throw new PathIsNotEmptyDirectoryException(
         throw new PathIsNotEmptyDirectoryException(
             iip.getPath() + " is non empty");
             iip.getPath() + " is non empty");
       }
       }
-      checkProtectedDescendants(fsd, iip);
+      DFSUtil.checkProtectedDescendants(fsd, iip);
     }
     }
 
 
     return deleteInternal(fsn, iip, logRetryCache);
     return deleteInternal(fsn, iip, logRetryCache);
@@ -271,45 +266,4 @@ class FSDirDeleteOp {
     }
     }
     return true;
     return true;
   }
   }
-
-  /**
-   * Throw if the given directory has any non-empty protected descendants
-   * (including itself).
-   *
-   * @param iip directory whose descendants are to be checked.
-   * @throws AccessControlException if a non-empty protected descendant
-   *                                was found.
-   * @throws ParentNotDirectoryException
-   * @throws UnresolvedLinkException
-   * @throws FileNotFoundException
-   */
-  private static void checkProtectedDescendants(
-      FSDirectory fsd, INodesInPath iip)
-          throws AccessControlException, UnresolvedLinkException,
-          ParentNotDirectoryException {
-    final SortedSet<String> protectedDirs = fsd.getProtectedDirectories();
-    if (protectedDirs.isEmpty()) {
-      return;
-    }
-
-    String src = iip.getPath();
-    // Is src protected? Caller has already checked it is non-empty.
-    if (protectedDirs.contains(src)) {
-      throw new AccessControlException(
-          "Cannot delete non-empty protected directory " + src);
-    }
-
-    // Are any descendants of src protected?
-    // The subSet call returns only the descendants of src since
-    // {@link Path#SEPARATOR} is "/" and '0' is the next ASCII
-    // character after '/'.
-    for (String descendant :
-            protectedDirs.subSet(src + Path.SEPARATOR, src + "0")) {
-      INodesInPath subdirIIP = fsd.getINodesInPath(descendant, DirOp.WRITE);
-      if (fsd.isNonEmptyDirectory(subdirIIP)) {
-        throw new AccessControlException(
-            "Cannot delete non-empty protected subdirectory " + descendant);
-      }
-    }
-  }
 }
 }

+ 5 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java

@@ -25,6 +25,7 @@ import org.apache.hadoop.fs.Options;
 import org.apache.hadoop.fs.ParentNotDirectoryException;
 import org.apache.hadoop.fs.ParentNotDirectoryException;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.SnapshotException;
 import org.apache.hadoop.hdfs.protocol.SnapshotException;
@@ -477,6 +478,10 @@ class FSDirRenameOp {
   private static RenameResult renameTo(FSDirectory fsd, FSPermissionChecker pc,
   private static RenameResult renameTo(FSDirectory fsd, FSPermissionChecker pc,
       INodesInPath srcIIP, INodesInPath dstIIP, boolean logRetryCache)
       INodesInPath srcIIP, INodesInPath dstIIP, boolean logRetryCache)
           throws IOException {
           throws IOException {
+    if(fsd.isNonEmptyDirectory(srcIIP)) {
+      DFSUtil.checkProtectedDescendants(fsd, srcIIP);
+    }
+
     if (fsd.isPermissionEnabled()) {
     if (fsd.isPermissionEnabled()) {
       // Check write access to parent of src
       // Check write access to parent of src
       fsd.checkPermission(pc, srcIIP, false, null, FsAction.WRITE, null, null,
       fsd.checkPermission(pc, srcIIP, false, null, FsAction.WRITE, null, null,

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java

@@ -499,7 +499,7 @@ public class FSDirectory implements Closeable {
         normalizePaths(protectedDirs, FS_PROTECTED_DIRECTORIES));
         normalizePaths(protectedDirs, FS_PROTECTED_DIRECTORIES));
   }
   }
 
 
-  SortedSet<String> getProtectedDirectories() {
+  public SortedSet<String> getProtectedDirectories() {
     return protectedDirectories;
     return protectedDirectories;
   }
   }
 
 
@@ -720,7 +720,7 @@ public class FSDirectory implements Closeable {
   /**
   /**
    * @return true if the path is a non-empty directory; otherwise, return false.
    * @return true if the path is a non-empty directory; otherwise, return false.
    */
    */
-  boolean isNonEmptyDirectory(INodesInPath inodesInPath) {
+  public boolean isNonEmptyDirectory(INodesInPath inodesInPath) {
     readLock();
     readLock();
     try {
     try {
       final INode inode = inodesInPath.getLastINode();
       final INode inode = inodesInPath.getLastINode();

+ 53 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProtectedDirectories.java

@@ -234,7 +234,7 @@ public class TestProtectedDirectories {
   }
   }
 
 
   @Test
   @Test
-  public void testAll() throws Throwable {
+  public void testDelete() throws Throwable {
     for (TestMatrixEntry testMatrixEntry : createTestMatrix()) {
     for (TestMatrixEntry testMatrixEntry : createTestMatrix()) {
       Configuration conf = new HdfsConfiguration();
       Configuration conf = new HdfsConfiguration();
       MiniDFSCluster cluster = setupTestCase(
       MiniDFSCluster cluster = setupTestCase(
@@ -264,6 +264,34 @@ public class TestProtectedDirectories {
     }
     }
   }
   }
 
 
+  /*
+   * Verify that protected directories could not be renamed.
+   */
+  @Test
+  public void testRename() throws Throwable {
+    for (TestMatrixEntry testMatrixEntry : createTestMatrix()) {
+      Configuration conf = new HdfsConfiguration();
+      MiniDFSCluster cluster = setupTestCase(
+          conf, testMatrixEntry.getProtectedPaths(),
+          testMatrixEntry.getUnprotectedPaths());
+
+      try {
+        LOG.info("Running {}", testMatrixEntry);
+        FileSystem fs = cluster.getFileSystem();
+        for (Path srcPath : testMatrixEntry.getAllPathsToBeDeleted()) {
+          assertThat(
+              testMatrixEntry + ": Testing whether "
+                  + srcPath + " can be renamed",
+              renamePath(fs, srcPath,
+                  new Path(srcPath.toString() + "_renamed")),
+              is(testMatrixEntry.canPathBeRenamed(srcPath)));
+        }
+      } finally {
+        cluster.shutdown();
+      }
+    }
+  }
+
   /**
   /**
    * Verify that configured paths are normalized by removing
    * Verify that configured paths are normalized by removing
    * redundant separators.
    * redundant separators.
@@ -356,6 +384,26 @@ public class TestProtectedDirectories {
     }
     }
   }
   }
 
 
+  /**
+   * Return true if the path was successfully renamed. False if it
+   * failed with AccessControlException. Any other exceptions are
+   * propagated to the caller.
+   *
+   * @param fs
+   * @param srcPath
+   * @param dstPath
+   * @return
+   */
+  private boolean renamePath(FileSystem fs, Path srcPath, Path dstPath)
+      throws IOException {
+    try {
+      fs.rename(srcPath, dstPath);
+      return true;
+    } catch (AccessControlException ace) {
+      return false;
+    }
+  }
+
   private static class TestMatrixEntry {
   private static class TestMatrixEntry {
     // true if the path can be deleted.
     // true if the path can be deleted.
     final Map<Path, Boolean> protectedPaths = Maps.newHashMap();
     final Map<Path, Boolean> protectedPaths = Maps.newHashMap();
@@ -395,6 +443,10 @@ public class TestProtectedDirectories {
           protectedPaths.get(path) : unProtectedPaths.get(path);
           protectedPaths.get(path) : unProtectedPaths.get(path);
     }
     }
 
 
+    public boolean canPathBeRenamed(Path path) {
+      return protectedPaths.containsKey(path) ?
+          protectedPaths.get(path) : unProtectedPaths.get(path);
+    }
 
 
     public TestMatrixEntry addProtectedDir(String dir, boolean canBeDeleted) {
     public TestMatrixEntry addProtectedDir(String dir, boolean canBeDeleted) {
       protectedPaths.put(new Path(dir), canBeDeleted);
       protectedPaths.put(new Path(dir), canBeDeleted);