Browse Source

HDFS-8312. Trash does not descent into child directories to check for permissions. Contributed By Weiwei Yang via Eric Yang.

Brahma Reddy Battula 8 years ago
parent
commit
3004358639

+ 2 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Options.java

@@ -213,7 +213,8 @@ public final class Options {
    */
   public static enum Rename {
     NONE((byte) 0), // No options
-    OVERWRITE((byte) 1); // Overwrite the rename destination
+    OVERWRITE((byte) 1), // Overwrite the rename destination
+    TO_TRASH ((byte) 2); // Rename to trash
 
     private final byte code;
     

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Trash.java

@@ -125,7 +125,7 @@ public class Trash extends Configured {
   }
 
   /** get the current working directory */
-  Path getCurrentTrashDir() {
+  public Path getCurrentTrashDir() {
     return trashPolicy.getCurrentTrashDir();
   }
 

+ 6 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java

@@ -103,6 +103,7 @@ public class TrashPolicyDefault extends TrashPolicy {
     return deletionInterval != 0;
   }
 
+  @SuppressWarnings("deprecation")
   @Override
   public boolean moveToTrash(Path path) throws IOException {
     if (!isEnabled())
@@ -151,8 +152,11 @@ public class TrashPolicyDefault extends TrashPolicy {
           trashPath = new Path(orig + Time.now());
         }
         
-        if (fs.rename(path, trashPath))           // move to current trash
-          return true;
+        // move to current trash
+        fs.rename(path, trashPath,
+            Rename.TO_TRASH);
+        LOG.info("Moved: '" + path + "' to trash at: " + trashPath);
+        return true;
       } catch (IOException e) {
         cause = e;
       }

+ 4 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -283,6 +283,10 @@ Release 2.7.4 - UNRELEASED
 	
     HDFS-10987. Make Decommission less expensive when lot of blocks present. 
     (Brahma Reddy Battula)
+    
+    HDFS-8312. Trash does not descent into child directories to check for permissions. 
+   (Contributed By Weiwei Yang via Eric Yang)
+    HDFS-11784. Backport HDFS-8312 contributed by Brahma Reddy Battula.
 
 Release 2.7.3 - 2016-08-25
 

+ 14 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java

@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs.protocolPB;
 
 import java.io.IOException;
+import java.util.ArrayList;
 import java.util.EnumSet;
 import java.util.List;
 
@@ -583,9 +584,20 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
   public Rename2ResponseProto rename2(RpcController controller,
       Rename2RequestProto req) throws ServiceException {
 
+    // resolve rename options
+    ArrayList<Rename> optionList = new ArrayList<Rename>();
+    if(req.getOverwriteDest()) {
+      optionList.add(Rename.OVERWRITE);
+    } else if(req.hasMoveToTrash()) {
+      optionList.add(Rename.TO_TRASH);
+    }
+
+    if(optionList.isEmpty()) {
+      optionList.add(Rename.NONE);
+    }
     try {
-      server.rename2(req.getSrc(), req.getDst(), 
-          req.getOverwriteDest() ? Rename.OVERWRITE : Rename.NONE);
+      server.rename2(req.getSrc(), req.getDst(),
+          optionList.toArray(new Rename[optionList.size()]));
     } catch (IOException e) {
       throw new ServiceException(e);
     }   

+ 6 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java

@@ -498,16 +498,21 @@ public class ClientNamenodeProtocolTranslatorPB implements
       NSQuotaExceededException, ParentNotDirectoryException, SafeModeException,
       UnresolvedLinkException, IOException {
     boolean overwrite = false;
+    boolean toTrash = false;
     if (options != null) {
       for (Rename option : options) {
         if (option == Rename.OVERWRITE) {
           overwrite = true;
+        } else if (option == Rename.TO_TRASH) {
+          toTrash = true;
         }
       }
     }
     Rename2RequestProto req = Rename2RequestProto.newBuilder().
         setSrc(src).
-        setDst(dst).setOverwriteDest(overwrite).
+        setDst(dst).
+        setOverwriteDest(overwrite).
+        setMoveToTrash(toTrash).
         build();
     try {
       rpcProxy.rename2(null, req);

+ 23 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java

@@ -261,11 +261,29 @@ class FSDirRenameOp {
     src = srcIIP.getPath();
     dst = dstIIP.getPath();
     if (fsd.isPermissionEnabled()) {
-      // Rename does not operate on link targets
-      // Do not resolveLink when checking permissions of src and dst
-      // Check write access to parent of src
-      fsd.checkPermission(pc, srcIIP, false, null, FsAction.WRITE, null, null,
-          false);
+      boolean renameToTrash = false;
+      if (null != options &&
+          Arrays.asList(options).
+          contains(Options.Rename.TO_TRASH)) {
+        renameToTrash = true;
+      }
+
+      if(renameToTrash) {
+        // if destination is the trash directory,
+        // besides the permission check on "rename"
+        // we need to enforce the check for "delete"
+        // otherwise, it would expose a
+        // security hole that stuff moved to trash
+        // will be deleted by superuser
+        fsd.checkPermission(pc, srcIIP, false, null, FsAction.WRITE, null,
+            FsAction.ALL, true);
+      } else {
+        // Rename does not operate on link targets
+        // Do not resolveLink when checking permissions of src and dst
+        // Check write access to parent of src
+        fsd.checkPermission(pc, srcIIP, false, null, FsAction.WRITE, null,
+            null, false);
+      }
       // Check write access to ancestor of dst
       fsd.checkPermission(pc, dstIIP, false, FsAction.WRITE, null, null, null,
           false);

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto

@@ -224,6 +224,7 @@ message Rename2RequestProto {
   required string src = 1;
   required string dst = 2;
   required bool overwriteDest = 3;
+  optional bool moveToTrash = 4;
 }
 
 message Rename2ResponseProto { // void response

+ 81 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java

@@ -39,6 +39,7 @@ import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.Trash;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.security.AccessControlException;
@@ -274,6 +275,86 @@ public class TestDFSPermission {
         FsPermission.createImmutable((short)0777));
   }
   
+  @Test(timeout=30000)
+  public void testTrashPermission() throws Exception {
+    //  /BSS                  user1:group2 777
+    //   /BSS/user1            user1:group2 755
+    //   /BSS/user1/test       user1:group1 600
+    Path rootDir = new Path("/BSS");
+    Path user1Dir = new Path("/BSS/user1");
+    Path user1File = new Path("/BSS/user1/test");
+
+    try {
+      conf.set(CommonConfigurationKeys.FS_TRASH_INTERVAL_KEY, "10");
+      fs = FileSystem.get(conf);
+
+      fs.mkdirs(rootDir);
+      fs.setPermission(rootDir, new FsPermission((short) 0777));
+
+      login(USER1);
+      fs.mkdirs(user1Dir);
+      fs.setPermission(user1Dir, new FsPermission((short) 0755));
+      fs.setOwner(user1Dir, USER1.getShortUserName(), GROUP2_NAME);
+
+      create(OpType.CREATE, user1File);
+      fs.setOwner(user1File, USER1.getShortUserName(), GROUP1_NAME);
+      fs.setPermission(user1File, new FsPermission((short) 0600));
+
+      try {
+        // login as user2, attempt to delete /BSS/user1
+        // this should fail because user2 has no permission to
+        // its sub directory.
+        login(USER2);
+        fs.delete(user1Dir, true);
+        fail("User2 should not be allowed to delete user1's dir.");
+      } catch (AccessControlException e) {
+        e.printStackTrace();
+        assertTrue("Permission denied messages must carry the username",
+            e.getMessage().contains(USER2_NAME));
+      }
+
+      // ensure the /BSS/user1 still exists
+      assertTrue(fs.exists(user1Dir));
+
+      try {
+        login(SUPERUSER);
+        Trash trash = new Trash(fs, conf);
+        Path trashRoot = trash.getCurrentTrashDir();
+        while(true) {
+          trashRoot = trashRoot.getParent();
+          if(trashRoot.getParent().isRoot()) {
+            break;
+          }
+        }
+        fs.mkdirs(trashRoot);
+        fs.setPermission(trashRoot, new FsPermission((short) 0777));
+
+        // login as user2, attempt to move /BSS/user1 to trash
+        // this should also fail otherwise the directory will be
+        // removed by trash emptier (emptier is running by superuser)
+        login(USER2);
+        Trash userTrash = new Trash(fs, conf);
+        assertTrue(userTrash.isEnabled());
+        userTrash.moveToTrash(user1Dir);
+        fail("User2 should not be allowed to move"
+            + "user1's dir to trash");
+      } catch (IOException e) {
+        // expect the exception is caused by permission denied
+        assertTrue(e.getCause() instanceof AccessControlException);
+        e.printStackTrace();
+        assertTrue("Permission denied messages must carry the username",
+            e.getCause().getMessage().contains(USER2_NAME));
+      }
+
+      // ensure /BSS/user1 still exists
+      assertEquals(fs.exists(user1Dir), true);
+    } finally {
+      login(SUPERUSER);
+      fs.delete(rootDir, true);
+      conf.set(CommonConfigurationKeys.FS_TRASH_INTERVAL_KEY, "0");
+    }
+  }
+
   /* check if the ownership of a file/directory is set correctly */
   @Test
   public void testOwnership() throws Exception {