Browse Source

HADOOP-10557. FsShell -cp -pa option for preserving extended ACLs. Contributed by Akira Ajisaka.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1603222 13f79535-47bb-0310-9956-ffa450edef68
Chris Nauroth 11 years ago
parent
commit
6e37dd331b

+ 3 - 0
hadoop-common-project/hadoop-common/CHANGES.txt

@@ -433,6 +433,9 @@ Release 2.5.0 - UNRELEASED
     HADOOP-10666. Remove Copyright /d/d/d/d Apache Software Foundation from
     HADOOP-10666. Remove Copyright /d/d/d/d Apache Software Foundation from
     the source files license header. (Henry Saputra via wang)
     the source files license header. (Henry Saputra via wang)
 
 
+    HADOOP-10557. FsShell -cp -pa option for preserving extended ACLs.
+    (Akira Ajisaka via cnauroth)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
   BUG FIXES 
   BUG FIXES 

+ 17 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java

@@ -25,6 +25,7 @@ import java.net.URISyntaxException;
 import java.util.EnumSet;
 import java.util.EnumSet;
 import java.util.Iterator;
 import java.util.Iterator;
 import java.util.LinkedList;
 import java.util.LinkedList;
+import java.util.List;
 import java.util.Map;
 import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Map.Entry;
 import java.util.NoSuchElementException;
 import java.util.NoSuchElementException;
@@ -39,6 +40,9 @@ import org.apache.hadoop.fs.PathIsDirectoryException;
 import org.apache.hadoop.fs.PathIsNotDirectoryException;
 import org.apache.hadoop.fs.PathIsNotDirectoryException;
 import org.apache.hadoop.fs.PathNotFoundException;
 import org.apache.hadoop.fs.PathNotFoundException;
 import org.apache.hadoop.fs.PathOperationException;
 import org.apache.hadoop.fs.PathOperationException;
+import org.apache.hadoop.fs.permission.AclEntry;
+import org.apache.hadoop.fs.permission.AclUtil;
+import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.IOUtils;
 
 
 /**
 /**
@@ -88,7 +92,7 @@ abstract class CommandWithDestination extends FsCommand {
   }
   }
   
   
   protected static enum FileAttribute {
   protected static enum FileAttribute {
-    TIMESTAMPS, OWNERSHIP, PERMISSION, XATTR;
+    TIMESTAMPS, OWNERSHIP, PERMISSION, ACL, XATTR;
 
 
     public static FileAttribute getAttribute(char symbol) {
     public static FileAttribute getAttribute(char symbol) {
       for (FileAttribute attribute : values()) {
       for (FileAttribute attribute : values()) {
@@ -306,11 +310,22 @@ abstract class CommandWithDestination extends FsCommand {
           src.stat.getOwner(),
           src.stat.getOwner(),
           src.stat.getGroup());
           src.stat.getGroup());
       }
       }
-      if (shouldPreserve(FileAttribute.PERMISSION)) {
+      if (shouldPreserve(FileAttribute.PERMISSION) ||
+          shouldPreserve(FileAttribute.ACL)) {
         target.fs.setPermission(
         target.fs.setPermission(
           target.path,
           target.path,
           src.stat.getPermission());
           src.stat.getPermission());
       }
       }
+      if (shouldPreserve(FileAttribute.ACL)) {
+        FsPermission perm = src.stat.getPermission();
+        if (perm.getAclBit()) {
+          List<AclEntry> srcEntries =
+              src.fs.getAclStatus(src.path).getEntries();
+          List<AclEntry> srcFullEntries =
+              AclUtil.getAclFromPermAndEntries(perm, srcEntries);
+          target.fs.setAcl(target.path, srcFullEntries);
+        }
+      }
       if (shouldPreserve(FileAttribute.XATTR)) {
       if (shouldPreserve(FileAttribute.XATTR)) {
         Map<String, byte[]> srcXAttrs = src.fs.getXAttrs(src.path);
         Map<String, byte[]> srcXAttrs = src.fs.getXAttrs(src.path);
         if (srcXAttrs != null) {
         if (srcXAttrs != null) {

+ 7 - 5
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java

@@ -133,16 +133,18 @@ class CopyCommands {
 
 
   static class Cp extends CommandWithDestination {
   static class Cp extends CommandWithDestination {
     public static final String NAME = "cp";
     public static final String NAME = "cp";
-    public static final String USAGE = "[-f] [-p | -p[topx]] <src> ... <dst>";
+    public static final String USAGE = "[-f] [-p | -p[topax]] <src> ... <dst>";
     public static final String DESCRIPTION =
     public static final String DESCRIPTION =
       "Copy files that match the file pattern <src> to a " +
       "Copy files that match the file pattern <src> to a " +
       "destination.  When copying multiple files, the destination " +
       "destination.  When copying multiple files, the destination " +
       "must be a directory. Passing -p preserves status " +
       "must be a directory. Passing -p preserves status " +
-      "[topx] (timestamps, ownership, permission, XAttr). " +
+      "[topax] (timestamps, ownership, permission, ACLs, XAttr). " +
       "If -p is specified with no <arg>, then preserves " +
       "If -p is specified with no <arg>, then preserves " +
-      "timestamps, ownership, permission. Passing -f " +
-      "overwrites the destination if it already exists.\n";
-    
+      "timestamps, ownership, permission. If -pa is specified, " +
+      "then preserves permission also because ACL is a super-set of " +
+      "permission. Passing -f overwrites the destination if it " +
+      "already exists.\n";
+
     @Override
     @Override
     protected void processOptions(LinkedList<String> args) throws IOException {
     protected void processOptions(LinkedList<String> args) throws IOException {
       popPreserveOption(args);
       popPreserveOption(args);

+ 5 - 3
hadoop-common-project/hadoop-common/src/site/apt/FileSystemShell.apt.vm

@@ -159,7 +159,7 @@ count
 
 
 cp
 cp
 
 
-   Usage: <<<hdfs dfs -cp [-f] [-p | -p[topx]] URI [URI ...] <dest> >>>
+   Usage: <<<hdfs dfs -cp [-f] [-p | -p[topax]] URI [URI ...] <dest> >>>
 
 
    Copy files from source to destination. This command allows multiple sources
    Copy files from source to destination. This command allows multiple sources
    as well in which case the destination must be a directory.
    as well in which case the destination must be a directory.
@@ -169,8 +169,10 @@ cp
       * The -f option will overwrite the destination if it already exists.
       * The -f option will overwrite the destination if it already exists.
       
       
       * The -p option will preserve file attributes [topx] (timestamps, 
       * The -p option will preserve file attributes [topx] (timestamps, 
-        ownership, permission, XAttr). If -p is specified with no <arg>, 
-        then preserves timestamps, ownership, permission.
+        ownership, permission, ACL, XAttr). If -p is specified with no <arg>,
+        then preserves timestamps, ownership, permission. If -pa is specified,
+        then preserves permission also because ACL is a super-set of
+        permission.
 
 
    Example:
    Example:
 
 

+ 8 - 4
hadoop-common-project/hadoop-common/src/test/resources/testConf.xml

@@ -296,7 +296,7 @@
       <comparators>
       <comparators>
         <comparator>
         <comparator>
           <type>RegexpComparator</type>
           <type>RegexpComparator</type>
-          <expected-output>^-cp \[-f\] \[-p \| -p\[topx\]\] &lt;src&gt; \.\.\. &lt;dst&gt; :\s*</expected-output>
+          <expected-output>^-cp \[-f\] \[-p \| -p\[topax\]\] &lt;src&gt; \.\.\. &lt;dst&gt; :\s*</expected-output>
         </comparator>
         </comparator>
         <comparator>
         <comparator>
           <type>RegexpComparator</type>
           <type>RegexpComparator</type>
@@ -308,15 +308,19 @@
         </comparator>
         </comparator>
         <comparator>
         <comparator>
           <type>RegexpComparator</type>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*\[topx\] \(timestamps, ownership, permission, XAttr\). If -p is specified with no( )*</expected-output>
+          <expected-output>^( |\t)*\[topax\] \(timestamps, ownership, permission, ACLs, XAttr\). If -p is specified( )*</expected-output>
         </comparator>
         </comparator>
         <comparator>
         <comparator>
           <type>RegexpComparator</type>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*&lt;arg&gt;, then preserves timestamps, ownership, permission. Passing -f overwrites( )*</expected-output>
+          <expected-output>^( |\t)*with no &lt;arg&gt;, then preserves timestamps, ownership, permission. If -pa is( )*</expected-output>
         </comparator>
         </comparator>
         <comparator>
         <comparator>
           <type>RegexpComparator</type>
           <type>RegexpComparator</type>
-          <expected-output>^\s*the destination if it already exists.( )*</expected-output>
+          <expected-output>^( |\t)*specified, then preserves permission also because ACL is a super-set of( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^\s*permission. Passing -f overwrites the destination if it already exists.( )*</expected-output>
         </comparator>
         </comparator>
       </comparators>
       </comparators>
     </test>
     </test>

+ 162 - 9
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java

@@ -34,6 +34,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.*;
 import org.apache.hadoop.fs.*;
+import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
@@ -54,10 +55,16 @@ import org.apache.hadoop.util.ToolRunner;
 import org.junit.Test;
 import org.junit.Test;
 
 
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
+import static org.apache.hadoop.fs.permission.AclEntryScope.ACCESS;
+import static org.apache.hadoop.fs.permission.AclEntryType.*;
+import static org.apache.hadoop.fs.permission.FsAction.*;
+import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.aclEntry;
 import static org.hamcrest.CoreMatchers.is;
 import static org.hamcrest.CoreMatchers.is;
 import static org.hamcrest.CoreMatchers.not;
 import static org.hamcrest.CoreMatchers.not;
 import static org.junit.Assert.*;
 import static org.junit.Assert.*;
 
 
+import com.google.common.collect.Lists;
+
 /**
 /**
  * This class tests commands from DFSShell.
  * This class tests commands from DFSShell.
  */
  */
@@ -1621,11 +1628,13 @@ public class TestDFSShell {
     assertEquals("expected to fail -1", res , -1);
     assertEquals("expected to fail -1", res , -1);
   }
   }
   
   
-  // Preserve Copy Option is -ptopx (timestamps, ownership, permission, XATTR)
+  // Preserve Copy Option is -ptopxa (timestamps, ownership, permission, XATTR,
+  // ACLs)
   @Test (timeout = 120000)
   @Test (timeout = 120000)
   public void testCopyCommandsWithPreserveOption() throws Exception {
   public void testCopyCommandsWithPreserveOption() throws Exception {
     Configuration conf = new Configuration();
     Configuration conf = new Configuration();
     conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
     conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
+    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
         .format(true).build();
         .format(true).build();
     FsShell shell = null;
     FsShell shell = null;
@@ -1638,6 +1647,14 @@ public class TestDFSShell {
       fs.mkdirs(hdfsTestDir);
       fs.mkdirs(hdfsTestDir);
       Path src = new Path(hdfsTestDir, "srcfile");
       Path src = new Path(hdfsTestDir, "srcfile");
       fs.create(src).close();
       fs.create(src).close();
+
+      fs.setAcl(src, Lists.newArrayList(
+          aclEntry(ACCESS, USER, ALL),
+          aclEntry(ACCESS, USER, "foo", ALL),
+          aclEntry(ACCESS, GROUP, READ_EXECUTE),
+          aclEntry(ACCESS, GROUP, "bar", READ_EXECUTE),
+          aclEntry(ACCESS, OTHER, EXECUTE)));
+
       FileStatus status = fs.getFileStatus(src);
       FileStatus status = fs.getFileStatus(src);
       final long mtime = status.getModificationTime();
       final long mtime = status.getModificationTime();
       final long atime = status.getAccessTime();
       final long atime = status.getAccessTime();
@@ -1661,41 +1678,93 @@ public class TestDFSShell {
       assertEquals(atime, targetStatus.getAccessTime());
       assertEquals(atime, targetStatus.getAccessTime());
       assertEquals(owner, targetStatus.getOwner());
       assertEquals(owner, targetStatus.getOwner());
       assertEquals(group, targetStatus.getGroup());
       assertEquals(group, targetStatus.getGroup());
-      assertTrue(perm.equals(targetStatus.getPermission()));
+      FsPermission targetPerm = targetStatus.getPermission();
+      assertTrue(perm.equals(targetPerm));
       Map<String, byte[]> xattrs = fs.getXAttrs(target1);
       Map<String, byte[]> xattrs = fs.getXAttrs(target1);
       assertTrue(xattrs.isEmpty());
       assertTrue(xattrs.isEmpty());
+      List<AclEntry> acls = fs.getAclStatus(target1).getEntries();
+      assertTrue(acls.isEmpty());
+      assertFalse(targetPerm.getAclBit());
 
 
       // -ptop
       // -ptop
       Path target2 = new Path(hdfsTestDir, "targetfile2");
       Path target2 = new Path(hdfsTestDir, "targetfile2");
       argv = new String[] { "-cp", "-ptop", src.toUri().toString(), 
       argv = new String[] { "-cp", "-ptop", src.toUri().toString(), 
           target2.toUri().toString() };
           target2.toUri().toString() };
       ret = ToolRunner.run(shell, argv);
       ret = ToolRunner.run(shell, argv);
-      assertEquals("cp -p is not working", SUCCESS, ret);
-      targetStatus = fs.getFileStatus(target1);
+      assertEquals("cp -ptop is not working", SUCCESS, ret);
+      targetStatus = fs.getFileStatus(target2);
       assertEquals(mtime, targetStatus.getModificationTime());
       assertEquals(mtime, targetStatus.getModificationTime());
       assertEquals(atime, targetStatus.getAccessTime());
       assertEquals(atime, targetStatus.getAccessTime());
       assertEquals(owner, targetStatus.getOwner());
       assertEquals(owner, targetStatus.getOwner());
       assertEquals(group, targetStatus.getGroup());
       assertEquals(group, targetStatus.getGroup());
-      assertTrue(perm.equals(targetStatus.getPermission()));
+      targetPerm = targetStatus.getPermission();
+      assertTrue(perm.equals(targetPerm));
       xattrs = fs.getXAttrs(target2);
       xattrs = fs.getXAttrs(target2);
       assertTrue(xattrs.isEmpty());
       assertTrue(xattrs.isEmpty());
-      
+      acls = fs.getAclStatus(target2).getEntries();
+      assertTrue(acls.isEmpty());
+      assertFalse(targetPerm.getAclBit());
+
       // -ptopx
       // -ptopx
       Path target3 = new Path(hdfsTestDir, "targetfile3");
       Path target3 = new Path(hdfsTestDir, "targetfile3");
       argv = new String[] { "-cp", "-ptopx", src.toUri().toString(), 
       argv = new String[] { "-cp", "-ptopx", src.toUri().toString(), 
           target3.toUri().toString() };
           target3.toUri().toString() };
       ret = ToolRunner.run(shell, argv);
       ret = ToolRunner.run(shell, argv);
-      assertEquals("cp -p is not working", SUCCESS, ret);
-      targetStatus = fs.getFileStatus(target1);
+      assertEquals("cp -ptopx is not working", SUCCESS, ret);
+      targetStatus = fs.getFileStatus(target3);
       assertEquals(mtime, targetStatus.getModificationTime());
       assertEquals(mtime, targetStatus.getModificationTime());
       assertEquals(atime, targetStatus.getAccessTime());
       assertEquals(atime, targetStatus.getAccessTime());
       assertEquals(owner, targetStatus.getOwner());
       assertEquals(owner, targetStatus.getOwner());
       assertEquals(group, targetStatus.getGroup());
       assertEquals(group, targetStatus.getGroup());
-      assertTrue(perm.equals(targetStatus.getPermission()));
+      targetPerm = targetStatus.getPermission();
+      assertTrue(perm.equals(targetPerm));
       xattrs = fs.getXAttrs(target3);
       xattrs = fs.getXAttrs(target3);
       assertEquals(xattrs.size(), 2);
       assertEquals(xattrs.size(), 2);
       assertArrayEquals(new byte[]{0x31, 0x32, 0x33}, xattrs.get("user.a1"));
       assertArrayEquals(new byte[]{0x31, 0x32, 0x33}, xattrs.get("user.a1"));
       assertArrayEquals(new byte[]{0x31, 0x31, 0x31}, xattrs.get("trusted.a1"));
       assertArrayEquals(new byte[]{0x31, 0x31, 0x31}, xattrs.get("trusted.a1"));
+      acls = fs.getAclStatus(target3).getEntries();
+      assertTrue(acls.isEmpty());
+      assertFalse(targetPerm.getAclBit());
+
+      // -ptopa
+      Path target4 = new Path(hdfsTestDir, "targetfile4");
+      argv = new String[] { "-cp", "-ptopa", src.toUri().toString(),
+          target4.toUri().toString() };
+      ret = ToolRunner.run(shell, argv);
+      assertEquals("cp -ptopa is not working", SUCCESS, ret);
+      targetStatus = fs.getFileStatus(target4);
+      assertEquals(mtime, targetStatus.getModificationTime());
+      assertEquals(atime, targetStatus.getAccessTime());
+      assertEquals(owner, targetStatus.getOwner());
+      assertEquals(group, targetStatus.getGroup());
+      targetPerm = targetStatus.getPermission();
+      assertTrue(perm.equals(targetPerm));
+      xattrs = fs.getXAttrs(target4);
+      assertTrue(xattrs.isEmpty());
+      acls = fs.getAclStatus(target4).getEntries();
+      assertFalse(acls.isEmpty());
+      assertTrue(targetPerm.getAclBit());
+      assertEquals(fs.getAclStatus(src), fs.getAclStatus(target4));
+
+      // -ptoa (verify -pa option will preserve permissions also)
+      Path target5 = new Path(hdfsTestDir, "targetfile5");
+      argv = new String[] { "-cp", "-ptoa", src.toUri().toString(),
+          target5.toUri().toString() };
+      ret = ToolRunner.run(shell, argv);
+      assertEquals("cp -ptoa is not working", SUCCESS, ret);
+      targetStatus = fs.getFileStatus(target5);
+      assertEquals(mtime, targetStatus.getModificationTime());
+      assertEquals(atime, targetStatus.getAccessTime());
+      assertEquals(owner, targetStatus.getOwner());
+      assertEquals(group, targetStatus.getGroup());
+      targetPerm = targetStatus.getPermission();
+      assertTrue(perm.equals(targetPerm));
+      xattrs = fs.getXAttrs(target5);
+      assertTrue(xattrs.isEmpty());
+      acls = fs.getAclStatus(target5).getEntries();
+      assertFalse(acls.isEmpty());
+      assertTrue(targetPerm.getAclBit());
+      assertEquals(fs.getAclStatus(src), fs.getAclStatus(target5));
     } finally {
     } finally {
       if (null != shell) {
       if (null != shell) {
         shell.close();
         shell.close();
@@ -1709,6 +1778,90 @@ public class TestDFSShell {
     }
     }
   }
   }
 
 
+  // Verify cp -pa option will preserve both ACL and sticky bit.
+  @Test (timeout = 120000)
+  public void testCopyCommandsPreserveAclAndStickyBit() throws Exception {
+    Configuration conf = new Configuration();
+    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
+        .format(true).build();
+    FsShell shell = null;
+    FileSystem fs = null;
+    final String testdir =
+        "/tmp/TestDFSShell-testCopyCommandsPreserveAclAndStickyBit-"
+        + counter.getAndIncrement();
+    final Path hdfsTestDir = new Path(testdir);
+    try {
+      fs = cluster.getFileSystem();
+      fs.mkdirs(hdfsTestDir);
+      Path src = new Path(hdfsTestDir, "srcfile");
+      fs.create(src).close();
+
+      fs.setAcl(src, Lists.newArrayList(
+          aclEntry(ACCESS, USER, ALL),
+          aclEntry(ACCESS, USER, "foo", ALL),
+          aclEntry(ACCESS, GROUP, READ_EXECUTE),
+          aclEntry(ACCESS, GROUP, "bar", READ_EXECUTE),
+          aclEntry(ACCESS, OTHER, EXECUTE)));
+      // set sticky bit
+      fs.setPermission(src,
+          new FsPermission(ALL, READ_EXECUTE, EXECUTE, true));
+
+      FileStatus status = fs.getFileStatus(src);
+      final long mtime = status.getModificationTime();
+      final long atime = status.getAccessTime();
+      final String owner = status.getOwner();
+      final String group = status.getGroup();
+      final FsPermission perm = status.getPermission();
+
+      shell = new FsShell(conf);
+
+      // -p preserves sticky bit and doesn't preserve ACL
+      Path target1 = new Path(hdfsTestDir, "targetfile1");
+      String[] argv = new String[] { "-cp", "-p", src.toUri().toString(),
+          target1.toUri().toString() };
+      int ret = ToolRunner.run(shell, argv);
+      assertEquals("cp is not working", SUCCESS, ret);
+      FileStatus targetStatus = fs.getFileStatus(target1);
+      assertEquals(mtime, targetStatus.getModificationTime());
+      assertEquals(atime, targetStatus.getAccessTime());
+      assertEquals(owner, targetStatus.getOwner());
+      assertEquals(group, targetStatus.getGroup());
+      FsPermission targetPerm = targetStatus.getPermission();
+      assertTrue(perm.equals(targetPerm));
+      List<AclEntry> acls = fs.getAclStatus(target1).getEntries();
+      assertTrue(acls.isEmpty());
+      assertFalse(targetPerm.getAclBit());
+
+      // -ptopa preserves both sticky bit and ACL
+      Path target2 = new Path(hdfsTestDir, "targetfile2");
+      argv = new String[] { "-cp", "-ptopa", src.toUri().toString(),
+          target2.toUri().toString() };
+      ret = ToolRunner.run(shell, argv);
+      assertEquals("cp -ptopa is not working", SUCCESS, ret);
+      targetStatus = fs.getFileStatus(target2);
+      assertEquals(mtime, targetStatus.getModificationTime());
+      assertEquals(atime, targetStatus.getAccessTime());
+      assertEquals(owner, targetStatus.getOwner());
+      assertEquals(group, targetStatus.getGroup());
+      targetPerm = targetStatus.getPermission();
+      assertTrue(perm.equals(targetPerm));
+      acls = fs.getAclStatus(target2).getEntries();
+      assertFalse(acls.isEmpty());
+      assertTrue(targetPerm.getAclBit());
+      assertEquals(fs.getAclStatus(src), fs.getAclStatus(target2));
+    } finally {
+      if (null != shell) {
+        shell.close();
+      }
+      if (null != fs) {
+        fs.delete(hdfsTestDir, true);
+        fs.close();
+      }
+      cluster.shutdown();
+    }
+  }
+
   // force Copy Option is -f
   // force Copy Option is -f
   @Test (timeout = 30000)
   @Test (timeout = 30000)
   public void testCopyCommandsWithForceOption() throws Exception {
   public void testCopyCommandsWithForceOption() throws Exception {