فهرست منبع

Merge trunk r1595999 to branch.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2006@1596000 13f79535-47bb-0310-9956-ffa450edef68
Andrew Wang 11 سال پیش
والد
کامیت
330f986a4e
74فایلهای تغییر یافته به همراه2033 افزوده شده و 561 حذف شده
  1. 2 0
      .gitignore
  2. 7 0
      hadoop-common-project/hadoop-common/CHANGES.txt
  3. 12 6
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyShell.java
  4. 15 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java
  5. 134 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclUtil.java
  6. 5 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/ScopedAclEntries.java
  7. 22 77
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/AclCommands.java
  8. 9 7
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/CBZip2InputStream.java
  9. 24 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyShell.java
  10. 19 3
      hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/IdUserGroup.java
  11. 72 22
      hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcProgram.java
  12. 45 0
      hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/nfs3/TestIdUserGroup.java
  13. 47 16
      hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/TestFrameDecoder.java
  14. 18 0
      hadoop-common-project/hadoop-nfs/src/test/resources/log4j.properties
  15. 4 4
      hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/Mountd.java
  16. 3 3
      hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java
  17. 13 6
      hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3.java
  18. 4 3
      hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
  19. 32 0
      hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
  20. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
  21. 8 5
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
  22. 68 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveIterator.java
  23. 14 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/FsAclPermission.java
  24. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java
  25. 25 17
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
  26. 1 9
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
  27. 23 27
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java
  28. 8 43
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclStorage.java
  29. 1 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclTransformation.java
  30. 22 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java
  31. 23 9
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
  32. 8 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java
  33. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewer.java
  34. 196 79
      hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_trash.c
  35. 11 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
  36. 71 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java
  37. 30 20
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java
  38. 6 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java
  39. 40 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHostsFiles.java
  40. 24 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCacheAdminConf.xml
  41. 5 0
      hadoop-mapreduce-project/CHANGES.txt
  42. 13 6
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/preemption/KillAMPreemptionPolicy.java
  43. 144 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestKillAMPreemptionPolicy.java
  44. 0 2
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/apt/MapredAppMasterRest.apt.vm
  45. 4 6
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/site/apt/HistoryServerRest.apt.vm
  46. 5 2
      hadoop-project/src/site/site.xml
  47. 27 7
      hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListing.java
  48. 153 0
      hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListingFileStatus.java
  49. 6 1
      hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCp.java
  50. 1 0
      hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpConstants.java
  51. 4 2
      hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptionSwitch.java
  52. 1 1
      hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java
  53. 24 35
      hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/SimpleCopyListing.java
  54. 3 3
      hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java
  55. 10 5
      hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyMapper.java
  56. 8 7
      hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/UniformSizeInputFormat.java
  57. 3 2
      hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunk.java
  58. 2 2
      hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputFormat.java
  59. 80 3
      hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/DistCpUtils.java
  60. 11 11
      hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/StubContext.java
  61. 2 3
      hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestCopyListing.java
  62. 329 0
      hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpWithAcls.java
  63. 1 2
      hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestFileBasedCopyListing.java
  64. 1 2
      hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestGlobbedCopyListing.java
  65. 8 2
      hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestOptionsParser.java
  66. 66 53
      hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/mapred/TestCopyMapper.java
  67. 4 3
      hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/mapred/TestUniformSizeInputFormat.java
  68. 5 5
      hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/mapred/lib/TestDynamicInputFormat.java
  69. 3 1
      hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/util/TestDistCpUtils.java
  70. 12 6
      hadoop-yarn-project/CHANGES.txt
  71. 2 2
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationsRequestPBImpl.java
  72. 8 6
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestGetApplicationsRequest.java
  73. 6 3
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java
  74. 13 8
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java

+ 2 - 0
.gitignore

@@ -1,6 +1,8 @@
 *.iml
 *.iml
 *.ipr
 *.ipr
 *.iws
 *.iws
+*.orig
+*.rej
 .idea
 .idea
 .svn
 .svn
 .classpath
 .classpath

+ 7 - 0
hadoop-common-project/hadoop-common/CHANGES.txt

@@ -328,6 +328,8 @@ Trunk (Unreleased)
 
 
     HADOOP-10583. bin/hadoop key throws NPE with no args and assorted other fixups. (clamb via tucu)
     HADOOP-10583. bin/hadoop key throws NPE with no args and assorted other fixups. (clamb via tucu)
 
 
+    HADOOP-10586. KeyShell doesn't allow setting Options via CLI. (clamb via tucu)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
     HADOOP-7761. Improve the performance of raw comparisons. (todd)
     HADOOP-7761. Improve the performance of raw comparisons. (todd)
@@ -388,6 +390,11 @@ Release 2.5.0 - UNRELEASED
     HADOOP-10572. Example NFS mount command must pass noacl as it isn't
     HADOOP-10572. Example NFS mount command must pass noacl as it isn't
     supported by the server yet. (Harsh J via brandonli)
     supported by the server yet. (Harsh J via brandonli)
 
 
+    HADOOP-10609. .gitignore should ignore .orig and .rej files. (kasha)
+
+    HADOOP-10614. CBZip2InputStream is not threadsafe (Xiangrui Meng via
+    Sandy Ryza)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
   BUG FIXES 
   BUG FIXES 

+ 12 - 6
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyShell.java

@@ -89,6 +89,8 @@ public class KeyShell extends Configured implements Tool {
    * @throws IOException
    * @throws IOException
    */
    */
   private int init(String[] args) throws IOException {
   private int init(String[] args) throws IOException {
+    final Options options = KeyProvider.options(getConf());
+
     for (int i = 0; i < args.length; i++) { // parse command line
     for (int i = 0; i < args.length; i++) { // parse command line
       boolean moreTokens = (i < args.length - 1);
       boolean moreTokens = (i < args.length - 1);
       if (args[i].equals("create")) {
       if (args[i].equals("create")) {
@@ -97,7 +99,7 @@ public class KeyShell extends Configured implements Tool {
           keyName = args[++i];
           keyName = args[++i];
         }
         }
 
 
-        command = new CreateCommand(keyName);
+        command = new CreateCommand(keyName, options);
         if ("--help".equals(keyName)) {
         if ("--help".equals(keyName)) {
           printKeyShellUsage();
           printKeyShellUsage();
           return -1;
           return -1;
@@ -127,9 +129,11 @@ public class KeyShell extends Configured implements Tool {
       } else if ("list".equals(args[i])) {
       } else if ("list".equals(args[i])) {
         command = new ListCommand();
         command = new ListCommand();
       } else if ("--size".equals(args[i]) && moreTokens) {
       } else if ("--size".equals(args[i]) && moreTokens) {
-        getConf().set(KeyProvider.DEFAULT_BITLENGTH_NAME, args[++i]);
+        options.setBitLength(Integer.parseInt(args[++i]));
       } else if ("--cipher".equals(args[i]) && moreTokens) {
       } else if ("--cipher".equals(args[i]) && moreTokens) {
-        getConf().set(KeyProvider.DEFAULT_CIPHER_NAME, args[++i]);
+        options.setCipher(args[++i]);
+      } else if ("--description".equals(args[i]) && moreTokens) {
+        options.setDescription(args[++i]);
       } else if ("--provider".equals(args[i]) && moreTokens) {
       } else if ("--provider".equals(args[i]) && moreTokens) {
         userSuppliedProvider = true;
         userSuppliedProvider = true;
         getConf().set(KeyProviderFactory.KEY_PROVIDER_PATH, args[++i]);
         getConf().set(KeyProviderFactory.KEY_PROVIDER_PATH, args[++i]);
@@ -399,6 +403,7 @@ public class KeyShell extends Configured implements Tool {
   private class CreateCommand extends Command {
   private class CreateCommand extends Command {
     public static final String USAGE =
     public static final String USAGE =
       "create <keyname> [--cipher <cipher>] [--size <size>]\n" +
       "create <keyname> [--cipher <cipher>] [--size <size>]\n" +
+      "                     [--description <description>]\n" +
       "                     [--provider <provider>] [--help]";
       "                     [--provider <provider>] [--help]";
     public static final String DESC =
     public static final String DESC =
       "The create subcommand creates a new key for the name specified\n" +
       "The create subcommand creates a new key for the name specified\n" +
@@ -408,10 +413,12 @@ public class KeyShell extends Configured implements Tool {
       "The default keysize is 256. You may specify the requested key\n" +
       "The default keysize is 256. You may specify the requested key\n" +
       "length using the --size argument.\n";
       "length using the --size argument.\n";
 
 
-    String keyName = null;
+    final String keyName;
+    final Options options;
 
 
-    public CreateCommand(String keyName) {
+    public CreateCommand(String keyName, Options options) {
       this.keyName = keyName;
       this.keyName = keyName;
+      this.options = options;
     }
     }
 
 
     public boolean validate() {
     public boolean validate() {
@@ -434,7 +441,6 @@ public class KeyShell extends Configured implements Tool {
     public void execute() throws IOException, NoSuchAlgorithmException {
     public void execute() throws IOException, NoSuchAlgorithmException {
       warnIfTransientProvider();
       warnIfTransientProvider();
       try {
       try {
-        Options options = KeyProvider.options(getConf());
         provider.createKey(keyName, options);
         provider.createKey(keyName, options);
         out.println(keyName + " has been successfully created.");
         out.println(keyName + " has been successfully created.");
         provider.flush();
         provider.flush();

+ 15 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java

@@ -99,6 +99,21 @@ public class FileStatus implements Writable, Comparable {
     assert (isdir && symlink == null) || !isdir;
     assert (isdir && symlink == null) || !isdir;
   }
   }
 
 
+  /**
+   * Copy constructor.
+   *
+   * @param other FileStatus to copy
+   */
+  public FileStatus(FileStatus other) throws IOException {
+    // It's important to call the getters here instead of directly accessing the
+    // members.  Subclasses like ViewFsFileStatus can override the getters.
+    this(other.getLen(), other.isDirectory(), other.getReplication(),
+      other.getBlockSize(), other.getModificationTime(), other.getAccessTime(),
+      other.getPermission(), other.getOwner(), other.getGroup(),
+      (other.isSymlink() ? other.getSymlink() : null),
+      other.getPath());
+  }
+
   /**
   /**
    * Get the length of this file, in bytes.
    * Get the length of this file, in bytes.
    * @return the length of this file, in bytes.
    * @return the length of this file, in bytes.

+ 134 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclUtil.java

@@ -0,0 +1,134 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.permission;
+
+import java.util.Iterator;
+import java.util.List;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+import com.google.common.collect.Lists;
+
+/**
+ * AclUtil contains utility methods for manipulating ACLs.
+ */
+@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
+@InterfaceStability.Unstable
+public final class AclUtil {
+
+  /**
+   * Given permissions and extended ACL entries, returns the full logical ACL.
+   *
+   * @param perm FsPermission containing permissions
+   * @param entries List<AclEntry> containing extended ACL entries
+   * @return List<AclEntry> containing full logical ACL
+   */
+  public static List<AclEntry> getAclFromPermAndEntries(FsPermission perm,
+      List<AclEntry> entries) {
+    List<AclEntry> acl = Lists.newArrayListWithCapacity(entries.size() + 3);
+
+    // Owner entry implied by owner permission bits.
+    acl.add(new AclEntry.Builder()
+      .setScope(AclEntryScope.ACCESS)
+      .setType(AclEntryType.USER)
+      .setPermission(perm.getUserAction())
+      .build());
+
+    // All extended access ACL entries.
+    boolean hasAccessAcl = false;
+    Iterator<AclEntry> entryIter = entries.iterator();
+    AclEntry curEntry = null;
+    while (entryIter.hasNext()) {
+      curEntry = entryIter.next();
+      if (curEntry.getScope() == AclEntryScope.DEFAULT) {
+        break;
+      }
+      hasAccessAcl = true;
+      acl.add(curEntry);
+    }
+
+    // Mask entry implied by group permission bits, or group entry if there is
+    // no access ACL (only default ACL).
+    acl.add(new AclEntry.Builder()
+      .setScope(AclEntryScope.ACCESS)
+      .setType(hasAccessAcl ? AclEntryType.MASK : AclEntryType.GROUP)
+      .setPermission(perm.getGroupAction())
+      .build());
+
+    // Other entry implied by other bits.
+    acl.add(new AclEntry.Builder()
+      .setScope(AclEntryScope.ACCESS)
+      .setType(AclEntryType.OTHER)
+      .setPermission(perm.getOtherAction())
+      .build());
+
+    // Default ACL entries.
+    if (curEntry != null && curEntry.getScope() == AclEntryScope.DEFAULT) {
+      acl.add(curEntry);
+      while (entryIter.hasNext()) {
+        acl.add(entryIter.next());
+      }
+    }
+
+    return acl;
+  }
+
+  /**
+   * Translates the given permission bits to the equivalent minimal ACL.
+   *
+   * @param perm FsPermission to translate
+   * @return List<AclEntry> containing exactly 3 entries representing the owner,
+   *   group and other permissions
+   */
+  public static List<AclEntry> getMinimalAcl(FsPermission perm) {
+    return Lists.newArrayList(
+      new AclEntry.Builder()
+        .setScope(AclEntryScope.ACCESS)
+        .setType(AclEntryType.USER)
+        .setPermission(perm.getUserAction())
+        .build(),
+      new AclEntry.Builder()
+        .setScope(AclEntryScope.ACCESS)
+        .setType(AclEntryType.GROUP)
+        .setPermission(perm.getGroupAction())
+        .build(),
+      new AclEntry.Builder()
+        .setScope(AclEntryScope.ACCESS)
+        .setType(AclEntryType.OTHER)
+        .setPermission(perm.getOtherAction())
+        .build());
+  }
+
+  /**
+   * Checks if the given entries represent a minimal ACL (contains exactly 3
+   * entries).
+   *
+   * @param entries List<AclEntry> entries to check
+   * @return boolean true if the entries represent a minimal ACL
+   */
+  public static boolean isMinimalAcl(List<AclEntry> entries) {
+    return entries.size() == 3;
+  }
+
+  /**
+   * There is no reason to instantiate this class.
+   */
+  private AclUtil() {
+  }
+}

+ 5 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ScopedAclEntries.java → hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/ScopedAclEntries.java

@@ -15,12 +15,13 @@
  * See the License for the specific language governing permissions and
  * See the License for the specific language governing permissions and
  * limitations under the License.
  * limitations under the License.
  */
  */
-package org.apache.hadoop.hdfs.server.namenode;
+package org.apache.hadoop.fs.permission;
 
 
 import java.util.Collections;
 import java.util.Collections;
 import java.util.List;
 import java.util.List;
 
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclEntryScope;
 import org.apache.hadoop.fs.permission.AclEntryScope;
 
 
@@ -28,8 +29,9 @@ import org.apache.hadoop.fs.permission.AclEntryScope;
  * Groups a list of ACL entries into separate lists for access entries vs.
  * Groups a list of ACL entries into separate lists for access entries vs.
  * default entries.
  * default entries.
  */
  */
-@InterfaceAudience.Private
-final class ScopedAclEntries {
+@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
+@InterfaceStability.Unstable
+public final class ScopedAclEntries {
   private static final int PIVOT_NOT_FOUND = -1;
   private static final int PIVOT_NOT_FOUND = -1;
 
 
   private final List<AclEntry> accessEntries;
   private final List<AclEntry> accessEntries;

+ 22 - 77
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/AclCommands.java

@@ -18,7 +18,7 @@
 package org.apache.hadoop.fs.shell;
 package org.apache.hadoop.fs.shell;
 
 
 import java.io.IOException;
 import java.io.IOException;
-import java.util.Iterator;
+import java.util.Collections;
 import java.util.LinkedList;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.List;
 
 
@@ -31,8 +31,10 @@ import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclEntryScope;
 import org.apache.hadoop.fs.permission.AclEntryScope;
 import org.apache.hadoop.fs.permission.AclEntryType;
 import org.apache.hadoop.fs.permission.AclEntryType;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.AclUtil;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.fs.permission.ScopedAclEntries;
 
 
 /**
 /**
  * Acl related operations
  * Acl related operations
@@ -84,67 +86,34 @@ class AclCommands extends FsCommand {
           (perm.getOtherAction().implies(FsAction.EXECUTE) ? "t" : "T"));
           (perm.getOtherAction().implies(FsAction.EXECUTE) ? "t" : "T"));
       }
       }
 
 
-      if (perm.getAclBit()) {
-        AclStatus aclStatus = item.fs.getAclStatus(item.path);
-        List<AclEntry> entries = aclStatus.getEntries();
-        printExtendedAcl(perm, entries);
-      } else {
-        printMinimalAcl(perm);
-      }
-
+      List<AclEntry> entries = perm.getAclBit() ?
+        item.fs.getAclStatus(item.path).getEntries() :
+        Collections.<AclEntry>emptyList();
+      ScopedAclEntries scopedEntries = new ScopedAclEntries(
+        AclUtil.getAclFromPermAndEntries(perm, entries));
+      printAclEntriesForSingleScope(scopedEntries.getAccessEntries());
+      printAclEntriesForSingleScope(scopedEntries.getDefaultEntries());
       out.println();
       out.println();
     }
     }
 
 
     /**
     /**
-     * Prints an extended ACL, including all extended ACL entries and also the
-     * base entries implied by the permission bits.
+     * Prints all the ACL entries in a single scope.
      *
      *
-     * @param perm FsPermission of file
      * @param entries List<AclEntry> containing ACL entries of file
      * @param entries List<AclEntry> containing ACL entries of file
      */
      */
-    private void printExtendedAcl(FsPermission perm, List<AclEntry> entries) {
-      // Print owner entry implied by owner permission bits.
-      out.println(new AclEntry.Builder()
-        .setScope(AclEntryScope.ACCESS)
-        .setType(AclEntryType.USER)
-        .setPermission(perm.getUserAction())
-        .build());
-
-      // Print all extended access ACL entries.
-      boolean hasAccessAcl = false;
-      Iterator<AclEntry> entryIter = entries.iterator();
-      AclEntry curEntry = null;
-      while (entryIter.hasNext()) {
-        curEntry = entryIter.next();
-        if (curEntry.getScope() == AclEntryScope.DEFAULT) {
-          break;
-        }
-        hasAccessAcl = true;
-        printExtendedAclEntry(curEntry, perm.getGroupAction());
+    private void printAclEntriesForSingleScope(List<AclEntry> entries) {
+      if (entries.isEmpty()) {
+        return;
       }
       }
-
-      // Print mask entry implied by group permission bits, or print group entry
-      // if there is no access ACL (only default ACL).
-      out.println(new AclEntry.Builder()
-        .setScope(AclEntryScope.ACCESS)
-        .setType(hasAccessAcl ? AclEntryType.MASK : AclEntryType.GROUP)
-        .setPermission(perm.getGroupAction())
-        .build());
-
-      // Print other entry implied by other bits.
-      out.println(new AclEntry.Builder()
-        .setScope(AclEntryScope.ACCESS)
-        .setType(AclEntryType.OTHER)
-        .setPermission(perm.getOtherAction())
-        .build());
-
-      // Print default ACL entries.
-      if (curEntry != null && curEntry.getScope() == AclEntryScope.DEFAULT) {
-        out.println(curEntry);
-        // ACL sort order guarantees default mask is the second-to-last entry.
+      if (AclUtil.isMinimalAcl(entries)) {
+        for (AclEntry entry: entries) {
+          out.println(entry);
+        }
+      } else {
+        // ACL sort order guarantees mask is the second-to-last entry.
         FsAction maskPerm = entries.get(entries.size() - 2).getPermission();
         FsAction maskPerm = entries.get(entries.size() - 2).getPermission();
-        while (entryIter.hasNext()) {
-          printExtendedAclEntry(entryIter.next(), maskPerm);
+        for (AclEntry entry: entries) {
+          printExtendedAclEntry(entry, maskPerm);
         }
         }
       }
       }
     }
     }
@@ -172,30 +141,6 @@ class AclCommands extends FsCommand {
         out.println(entry);
         out.println(entry);
       }
       }
     }
     }
-
-    /**
-     * Prints a minimal ACL, consisting of exactly 3 ACL entries implied by the
-     * permission bits.
-     *
-     * @param perm FsPermission of file
-     */
-    private void printMinimalAcl(FsPermission perm) {
-      out.println(new AclEntry.Builder()
-        .setScope(AclEntryScope.ACCESS)
-        .setType(AclEntryType.USER)
-        .setPermission(perm.getUserAction())
-        .build());
-      out.println(new AclEntry.Builder()
-        .setScope(AclEntryScope.ACCESS)
-        .setType(AclEntryType.GROUP)
-        .setPermission(perm.getGroupAction())
-        .build());
-      out.println(new AclEntry.Builder()
-        .setScope(AclEntryScope.ACCESS)
-        .setType(AclEntryType.OTHER)
-        .setPermission(perm.getOtherAction())
-        .build());
-    }
   }
   }
 
 
   /**
   /**

+ 9 - 7
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/CBZip2InputStream.java

@@ -129,7 +129,7 @@ public class CBZip2InputStream extends InputStream implements BZip2Constants {
   private int computedBlockCRC, computedCombinedCRC;
   private int computedBlockCRC, computedCombinedCRC;
 
 
   private boolean skipResult = false;// used by skipToNextMarker
   private boolean skipResult = false;// used by skipToNextMarker
-  private static boolean skipDecompression = false;
+  private boolean skipDecompression = false;
 
 
   // Variables used by setup* methods exclusively
   // Variables used by setup* methods exclusively
 
 
@@ -281,12 +281,18 @@ public class CBZip2InputStream extends InputStream implements BZip2Constants {
   */
   */
   public CBZip2InputStream(final InputStream in, READ_MODE readMode)
   public CBZip2InputStream(final InputStream in, READ_MODE readMode)
       throws IOException {
       throws IOException {
+    this(in, readMode, false);
+  }
+
+  private CBZip2InputStream(final InputStream in, READ_MODE readMode, boolean skipDecompression)
+      throws IOException {
 
 
     super();
     super();
     int blockSize = 0X39;// i.e 9
     int blockSize = 0X39;// i.e 9
     this.blockSize100k = blockSize - '0';
     this.blockSize100k = blockSize - '0';
     this.in = new BufferedInputStream(in, 1024 * 9);// >1 MB buffer
     this.in = new BufferedInputStream(in, 1024 * 9);// >1 MB buffer
     this.readMode = readMode;
     this.readMode = readMode;
+    this.skipDecompression = skipDecompression;
     if (readMode == READ_MODE.CONTINUOUS) {
     if (readMode == READ_MODE.CONTINUOUS) {
       currentState = STATE.START_BLOCK_STATE;
       currentState = STATE.START_BLOCK_STATE;
       lazyInitialization = (in.available() == 0)?true:false;
       lazyInitialization = (in.available() == 0)?true:false;
@@ -316,11 +322,7 @@ public class CBZip2InputStream extends InputStream implements BZip2Constants {
    *
    *
    */
    */
   public static long numberOfBytesTillNextMarker(final InputStream in) throws IOException{
   public static long numberOfBytesTillNextMarker(final InputStream in) throws IOException{
-    CBZip2InputStream.skipDecompression = true;
-    CBZip2InputStream anObject = null;
-
-    anObject = new CBZip2InputStream(in, READ_MODE.BYBLOCK);
-
+    CBZip2InputStream anObject = new CBZip2InputStream(in, READ_MODE.BYBLOCK, true);
     return anObject.getProcessedByteCount();
     return anObject.getProcessedByteCount();
   }
   }
 
 
@@ -397,7 +399,7 @@ public class CBZip2InputStream extends InputStream implements BZip2Constants {
 
 
     if(skipDecompression){
     if(skipDecompression){
       changeStateToProcessABlock();
       changeStateToProcessABlock();
-      CBZip2InputStream.skipDecompression = false;
+      skipDecompression = false;
     }
     }
 
 
     final int hi = offs + len;
     final int hi = offs + len;

+ 24 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyShell.java

@@ -111,6 +111,30 @@ public class TestKeyShell {
     assertFalse(outContent.toString(), outContent.toString().contains("key1"));
     assertFalse(outContent.toString(), outContent.toString().contains("key1"));
   }
   }
   
   
+  /* HADOOP-10586 KeyShell didn't allow -description. */
+  @Test
+  public void testKeySuccessfulCreationWithDescription() throws Exception {
+    outContent.reset();
+    String[] args1 = {"create", "key1", "--provider",
+                      "jceks://file" + tmpDir + "/keystore.jceks",
+                      "--description", "someDescription"};
+    int rc = 0;
+    KeyShell ks = new KeyShell();
+    ks.setConf(new Configuration());
+    rc = ks.run(args1);
+    assertEquals(0, rc);
+    assertTrue(outContent.toString().contains("key1 has been successfully " +
+        "created."));
+
+    outContent.reset();
+    String[] args2a = {"list", "--metadata", "--provider",
+                      "jceks://file" + tmpDir + "/keystore.jceks"};
+    rc = ks.run(args2a);
+    assertEquals(0, rc);
+    assertTrue(outContent.toString().contains("description"));
+    assertTrue(outContent.toString().contains("someDescription"));
+  }
+
   @Test
   @Test
   public void testInvalidKeySize() throws Exception {
   public void testInvalidKeySize() throws Exception {
     String[] args1 = {"create", "key1", "--size", "56", "--provider", 
     String[] args1 = {"create", "key1", "--size", "56", "--provider", 

+ 19 - 3
hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/IdUserGroup.java

@@ -113,7 +113,23 @@ public class IdUserGroup {
           "The new entry is to be ignored for the following reason.",
           "The new entry is to be ignored for the following reason.",
           DUPLICATE_NAME_ID_DEBUG_INFO));
           DUPLICATE_NAME_ID_DEBUG_INFO));
   }
   }
-      
+
+  /**
+   * uid and gid are defined as uint32 in linux. Some systems create
+   * (intended or unintended) <nfsnobody, 4294967294> kind of <name,Id>
+   * mapping, where 4294967294 is 2**32-2 as unsigned int32. As an example,
+   *   https://bugzilla.redhat.com/show_bug.cgi?id=511876.
+   * Because user or group id are treated as Integer (signed integer or int32)
+   * here, the number 4294967294 is out of range. The solution is to convert
+   * uint32 to int32, so to map the out-of-range ID to the negative side of
+   * Integer, e.g. 4294967294 maps to -2 and 4294967295 maps to -1.
+   */
+  private static Integer parseId(final String idStr) {
+    Long longVal = Long.parseLong(idStr);
+    int intVal = longVal.intValue();
+    return Integer.valueOf(intVal);
+  }
+  
   /**
   /**
    * Get the whole list of users and groups and save them in the maps.
    * Get the whole list of users and groups and save them in the maps.
    * @throws IOException 
    * @throws IOException 
@@ -134,8 +150,8 @@ public class IdUserGroup {
         }
         }
         LOG.debug("add to " + mapName + "map:" + nameId[0] + " id:" + nameId[1]);
         LOG.debug("add to " + mapName + "map:" + nameId[0] + " id:" + nameId[1]);
         // HDFS can't differentiate duplicate names with simple authentication
         // HDFS can't differentiate duplicate names with simple authentication
-        final Integer key = Integer.valueOf(nameId[1]);
-        final String value = nameId[0];        
+        final Integer key = parseId(nameId[1]);        
+        final String value = nameId[0];
         if (map.containsKey(key)) {
         if (map.containsKey(key)) {
           final String prevValue = map.get(key);
           final String prevValue = map.get(key);
           if (value.equals(prevValue)) {
           if (value.equals(prevValue)) {

+ 72 - 22
hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcProgram.java

@@ -19,11 +19,14 @@ package org.apache.hadoop.oncrpc;
 
 
 import java.io.IOException;
 import java.io.IOException;
 import java.net.DatagramSocket;
 import java.net.DatagramSocket;
+import java.net.InetSocketAddress;
+import java.net.SocketAddress;
 
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.oncrpc.RpcAcceptedReply.AcceptState;
 import org.apache.hadoop.oncrpc.RpcAcceptedReply.AcceptState;
 import org.apache.hadoop.oncrpc.security.Verifier;
 import org.apache.hadoop.oncrpc.security.Verifier;
+import org.apache.hadoop.oncrpc.security.VerifierNone;
 import org.apache.hadoop.portmap.PortmapMapping;
 import org.apache.hadoop.portmap.PortmapMapping;
 import org.apache.hadoop.portmap.PortmapRequest;
 import org.apache.hadoop.portmap.PortmapRequest;
 import org.jboss.netty.buffer.ChannelBuffer;
 import org.jboss.netty.buffer.ChannelBuffer;
@@ -37,7 +40,7 @@ import org.jboss.netty.channel.SimpleChannelUpstreamHandler;
  * and implement {@link #handleInternal} to handle the requests received.
  * and implement {@link #handleInternal} to handle the requests received.
  */
  */
 public abstract class RpcProgram extends SimpleChannelUpstreamHandler {
 public abstract class RpcProgram extends SimpleChannelUpstreamHandler {
-  private static final Log LOG = LogFactory.getLog(RpcProgram.class);
+  static final Log LOG = LogFactory.getLog(RpcProgram.class);
   public static final int RPCB_PORT = 111;
   public static final int RPCB_PORT = 111;
   private final String program;
   private final String program;
   private final String host;
   private final String host;
@@ -45,6 +48,7 @@ public abstract class RpcProgram extends SimpleChannelUpstreamHandler {
   private final int progNumber;
   private final int progNumber;
   private final int lowProgVersion;
   private final int lowProgVersion;
   private final int highProgVersion;
   private final int highProgVersion;
+  private final boolean allowInsecurePorts;
   
   
   /**
   /**
    * If not null, this will be used as the socket to use to connect to the
    * If not null, this will be used as the socket to use to connect to the
@@ -61,10 +65,14 @@ public abstract class RpcProgram extends SimpleChannelUpstreamHandler {
    * @param progNumber program number as defined in RFC 1050
    * @param progNumber program number as defined in RFC 1050
    * @param lowProgVersion lowest version of the specification supported
    * @param lowProgVersion lowest version of the specification supported
    * @param highProgVersion highest version of the specification supported
    * @param highProgVersion highest version of the specification supported
+   * @param DatagramSocket registrationSocket if not null, use this socket to
+   *        register with portmap daemon
+   * @param allowInsecurePorts true to allow client connections from
+   *        unprivileged ports, false otherwise
    */
    */
   protected RpcProgram(String program, String host, int port, int progNumber,
   protected RpcProgram(String program, String host, int port, int progNumber,
       int lowProgVersion, int highProgVersion,
       int lowProgVersion, int highProgVersion,
-      DatagramSocket registrationSocket) {
+      DatagramSocket registrationSocket, boolean allowInsecurePorts) {
     this.program = program;
     this.program = program;
     this.host = host;
     this.host = host;
     this.port = port;
     this.port = port;
@@ -72,6 +80,9 @@ public abstract class RpcProgram extends SimpleChannelUpstreamHandler {
     this.lowProgVersion = lowProgVersion;
     this.lowProgVersion = lowProgVersion;
     this.highProgVersion = highProgVersion;
     this.highProgVersion = highProgVersion;
     this.registrationSocket = registrationSocket;
     this.registrationSocket = registrationSocket;
+    this.allowInsecurePorts = allowInsecurePorts;
+    LOG.info("Will " + (allowInsecurePorts ? "" : "not ") + "accept client "
+        + "connections from unprivileged ports");
   }
   }
 
 
   /**
   /**
@@ -133,43 +144,82 @@ public abstract class RpcProgram extends SimpleChannelUpstreamHandler {
       throws Exception {
       throws Exception {
     RpcInfo info = (RpcInfo) e.getMessage();
     RpcInfo info = (RpcInfo) e.getMessage();
     RpcCall call = (RpcCall) info.header();
     RpcCall call = (RpcCall) info.header();
+    
+    SocketAddress remoteAddress = info.remoteAddress();
+    if (!allowInsecurePorts) {
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Will not allow connections from unprivileged ports. " +
+            "Checking for valid client port...");
+      }
+      if (remoteAddress instanceof InetSocketAddress) {
+        InetSocketAddress inetRemoteAddress = (InetSocketAddress) remoteAddress;
+        if (inetRemoteAddress.getPort() > 1023) {
+          LOG.warn("Connection attempted from '" + inetRemoteAddress + "' "
+              + "which is an unprivileged port. Rejecting connection.");
+          sendRejectedReply(call, remoteAddress, ctx);
+          return;
+        } else {
+          if (LOG.isDebugEnabled()) {
+            LOG.debug("Accepting connection from '" + remoteAddress + "'");
+          }
+        }
+      } else {
+        LOG.warn("Could not determine remote port of socket address '" +
+            remoteAddress + "'. Rejecting connection.");
+        sendRejectedReply(call, remoteAddress, ctx);
+        return;
+      }
+    }
+    
     if (LOG.isTraceEnabled()) {
     if (LOG.isTraceEnabled()) {
       LOG.trace(program + " procedure #" + call.getProcedure());
       LOG.trace(program + " procedure #" + call.getProcedure());
     }
     }
     
     
     if (this.progNumber != call.getProgram()) {
     if (this.progNumber != call.getProgram()) {
       LOG.warn("Invalid RPC call program " + call.getProgram());
       LOG.warn("Invalid RPC call program " + call.getProgram());
-      RpcAcceptedReply reply = RpcAcceptedReply.getInstance(call.getXid(),
-          AcceptState.PROG_UNAVAIL, Verifier.VERIFIER_NONE);
-
-      XDR out = new XDR();
-      reply.write(out);
-      ChannelBuffer b = ChannelBuffers.wrappedBuffer(out.asReadOnlyWrap()
-          .buffer());
-      RpcResponse rsp = new RpcResponse(b, info.remoteAddress());
-      RpcUtil.sendRpcResponse(ctx, rsp);
+      sendAcceptedReply(call, remoteAddress, AcceptState.PROG_UNAVAIL, ctx);
       return;
       return;
     }
     }
 
 
     int ver = call.getVersion();
     int ver = call.getVersion();
     if (ver < lowProgVersion || ver > highProgVersion) {
     if (ver < lowProgVersion || ver > highProgVersion) {
       LOG.warn("Invalid RPC call version " + ver);
       LOG.warn("Invalid RPC call version " + ver);
-      RpcAcceptedReply reply = RpcAcceptedReply.getInstance(call.getXid(),
-          AcceptState.PROG_MISMATCH, Verifier.VERIFIER_NONE);
-
-      XDR out = new XDR();
-      reply.write(out);
-      out.writeInt(lowProgVersion);
-      out.writeInt(highProgVersion);
-      ChannelBuffer b = ChannelBuffers.wrappedBuffer(out.asReadOnlyWrap()
-          .buffer());
-      RpcResponse rsp = new RpcResponse(b, info.remoteAddress());
-      RpcUtil.sendRpcResponse(ctx, rsp);
+      sendAcceptedReply(call, remoteAddress, AcceptState.PROG_MISMATCH, ctx);
       return;
       return;
     }
     }
     
     
     handleInternal(ctx, info);
     handleInternal(ctx, info);
   }
   }
+  
+  private void sendAcceptedReply(RpcCall call, SocketAddress remoteAddress,
+      AcceptState acceptState, ChannelHandlerContext ctx) {
+    RpcAcceptedReply reply = RpcAcceptedReply.getInstance(call.getXid(),
+        acceptState, Verifier.VERIFIER_NONE);
+
+    XDR out = new XDR();
+    reply.write(out);
+    if (acceptState == AcceptState.PROG_MISMATCH) {
+      out.writeInt(lowProgVersion);
+      out.writeInt(highProgVersion);
+    }
+    ChannelBuffer b = ChannelBuffers.wrappedBuffer(out.asReadOnlyWrap()
+        .buffer());
+    RpcResponse rsp = new RpcResponse(b, remoteAddress);
+    RpcUtil.sendRpcResponse(ctx, rsp);
+  }
+  
+  private static void sendRejectedReply(RpcCall call,
+      SocketAddress remoteAddress, ChannelHandlerContext ctx) {
+    XDR out = new XDR();
+    RpcDeniedReply reply = new RpcDeniedReply(call.getXid(),
+        RpcReply.ReplyState.MSG_DENIED,
+        RpcDeniedReply.RejectState.AUTH_ERROR, new VerifierNone());
+    reply.write(out);
+    ChannelBuffer buf = ChannelBuffers.wrappedBuffer(out.asReadOnlyWrap()
+        .buffer());
+    RpcResponse rsp = new RpcResponse(buf, remoteAddress);
+    RpcUtil.sendRpcResponse(ctx, rsp);
+  }
 
 
   protected abstract void handleInternal(ChannelHandlerContext ctx, RpcInfo info);
   protected abstract void handleInternal(ChannelHandlerContext ctx, RpcInfo info);
   
   

+ 45 - 0
hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/nfs3/TestIdUserGroup.java

@@ -66,6 +66,51 @@ public class TestIdUserGroup {
     assertEquals("mapred3", gMap.get(498));
     assertEquals("mapred3", gMap.get(498));
   }
   }
 
 
+  @Test
+  public void testIdOutOfIntegerRange() throws IOException {
+    String GET_ALL_USERS_CMD = "echo \""
+        + "nfsnobody:x:4294967294:4294967294:Anonymous NFS User:/var/lib/nfs:/sbin/nologin\n"
+        + "nfsnobody1:x:4294967295:4294967295:Anonymous NFS User:/var/lib/nfs1:/sbin/nologin\n"
+        + "maxint:x:2147483647:2147483647:Grid Distributed File System:/home/maxint:/bin/bash\n"
+        + "minint:x:2147483648:2147483648:Grid Distributed File System:/home/minint:/bin/bash\n"
+        + "archivebackup:*:1031:4294967294:Archive Backup:/home/users/archivebackup:/bin/sh\n"
+        + "hdfs:x:11501:10787:Grid Distributed File System:/home/hdfs:/bin/bash\n"
+        + "daemon:x:2:2:daemon:/sbin:/sbin/nologin\""
+        + " | cut -d: -f1,3";
+    String GET_ALL_GROUPS_CMD = "echo \""
+        + "hdfs:*:11501:hrt_hdfs\n"
+        + "rpcuser:*:29:\n"
+        + "nfsnobody:*:4294967294:\n"
+        + "nfsnobody1:*:4294967295:\n"
+        + "maxint:*:2147483647:\n"
+        + "minint:*:2147483648:\n"
+        + "mapred3:x:498\"" 
+        + " | cut -d: -f1,3";
+    // Maps for id to name map
+    BiMap<Integer, String> uMap = HashBiMap.create();
+    BiMap<Integer, String> gMap = HashBiMap.create();
+
+    IdUserGroup.updateMapInternal(uMap, "user", GET_ALL_USERS_CMD, ":");
+    assertTrue(uMap.size() == 7);
+    assertEquals("nfsnobody", uMap.get(-2));
+    assertEquals("nfsnobody1", uMap.get(-1));
+    assertEquals("maxint", uMap.get(2147483647));
+    assertEquals("minint", uMap.get(-2147483648));
+    assertEquals("archivebackup", uMap.get(1031));
+    assertEquals("hdfs",uMap.get(11501));
+    assertEquals("daemon", uMap.get(2));
+
+    IdUserGroup.updateMapInternal(gMap, "group", GET_ALL_GROUPS_CMD, ":");
+    assertTrue(gMap.size() == 7);
+    assertEquals("hdfs",gMap.get(11501));
+    assertEquals("rpcuser", gMap.get(29));
+    assertEquals("nfsnobody", gMap.get(-2));
+    assertEquals("nfsnobody1", gMap.get(-1));
+    assertEquals("maxint", gMap.get(2147483647));
+    assertEquals("minint", gMap.get(-2147483648));
+    assertEquals("mapred3", gMap.get(498));
+  }
+
   @Test
   @Test
   public void testUserUpdateSetting() throws IOException {
   public void testUserUpdateSetting() throws IOException {
     IdUserGroup iug = new IdUserGroup();
     IdUserGroup iug = new IdUserGroup();

+ 47 - 16
hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/TestFrameDecoder.java

@@ -28,6 +28,8 @@ import java.util.Random;
 import org.apache.hadoop.oncrpc.RpcUtil.RpcFrameDecoder;
 import org.apache.hadoop.oncrpc.RpcUtil.RpcFrameDecoder;
 import org.apache.hadoop.oncrpc.security.CredentialsNone;
 import org.apache.hadoop.oncrpc.security.CredentialsNone;
 import org.apache.hadoop.oncrpc.security.VerifierNone;
 import org.apache.hadoop.oncrpc.security.VerifierNone;
+import org.apache.log4j.Level;
+import org.apache.commons.logging.impl.Log4JLogger;
 import org.jboss.netty.buffer.ByteBufferBackedChannelBuffer;
 import org.jboss.netty.buffer.ByteBufferBackedChannelBuffer;
 import org.jboss.netty.buffer.ChannelBuffer;
 import org.jboss.netty.buffer.ChannelBuffer;
 import org.jboss.netty.buffer.ChannelBuffers;
 import org.jboss.netty.buffer.ChannelBuffers;
@@ -38,10 +40,16 @@ import org.junit.Test;
 import org.mockito.Mockito;
 import org.mockito.Mockito;
 
 
 public class TestFrameDecoder {
 public class TestFrameDecoder {
+  
+  static {
+    ((Log4JLogger) RpcProgram.LOG).getLogger().setLevel(Level.ALL);
+  }
 
 
   private static int resultSize;
   private static int resultSize;
 
 
   static void testRequest(XDR request, int serverPort) {
   static void testRequest(XDR request, int serverPort) {
+    // Reset resultSize so as to avoid interference from other tests in this class.
+    resultSize = 0;
     SimpleTcpClient tcpClient = new SimpleTcpClient("localhost", serverPort, request,
     SimpleTcpClient tcpClient = new SimpleTcpClient("localhost", serverPort, request,
         true);
         true);
     tcpClient.run();
     tcpClient.run();
@@ -50,9 +58,10 @@ public class TestFrameDecoder {
   static class TestRpcProgram extends RpcProgram {
   static class TestRpcProgram extends RpcProgram {
 
 
     protected TestRpcProgram(String program, String host, int port,
     protected TestRpcProgram(String program, String host, int port,
-        int progNumber, int lowProgVersion, int highProgVersion) {
+        int progNumber, int lowProgVersion, int highProgVersion,
+        boolean allowInsecurePorts) {
       super(program, host, port, progNumber, lowProgVersion, highProgVersion,
       super(program, host, port, progNumber, lowProgVersion, highProgVersion,
-          null);
+          null, allowInsecurePorts);
     }
     }
 
 
     @Override
     @Override
@@ -149,7 +158,41 @@ public class TestFrameDecoder {
 
 
   @Test
   @Test
   public void testFrames() {
   public void testFrames() {
+    int serverPort = startRpcServer(true);
 
 
+    XDR xdrOut = createGetportMount();
+    int headerSize = xdrOut.size();
+    int bufsize = 2 * 1024 * 1024;
+    byte[] buffer = new byte[bufsize];
+    xdrOut.writeFixedOpaque(buffer);
+    int requestSize = xdrOut.size() - headerSize;
+
+    // Send the request to the server
+    testRequest(xdrOut, serverPort);
+
+    // Verify the server got the request with right size
+    assertEquals(requestSize, resultSize);
+  }
+  
+  @Test
+  public void testUnprivilegedPort() {
+    // Don't allow connections from unprivileged ports. Given that this test is
+    // presumably not being run by root, this will be the case.
+    int serverPort = startRpcServer(false);
+
+    XDR xdrOut = createGetportMount();
+    int bufsize = 2 * 1024 * 1024;
+    byte[] buffer = new byte[bufsize];
+    xdrOut.writeFixedOpaque(buffer);
+
+    // Send the request to the server
+    testRequest(xdrOut, serverPort);
+
+    // Verify the server rejected the request.
+    assertEquals(0, resultSize);
+  }
+  
+  private static int startRpcServer(boolean allowInsecurePorts) {
     Random rand = new Random();
     Random rand = new Random();
     int serverPort = 30000 + rand.nextInt(10000);
     int serverPort = 30000 + rand.nextInt(10000);
     int retries = 10;    // A few retries in case initial choice is in use.
     int retries = 10;    // A few retries in case initial choice is in use.
@@ -157,7 +200,7 @@ public class TestFrameDecoder {
     while (true) {
     while (true) {
       try {
       try {
         RpcProgram program = new TestFrameDecoder.TestRpcProgram("TestRpcProgram",
         RpcProgram program = new TestFrameDecoder.TestRpcProgram("TestRpcProgram",
-            "localhost", serverPort, 100000, 1, 2);
+            "localhost", serverPort, 100000, 1, 2, allowInsecurePorts);
         SimpleTcpServer tcpServer = new SimpleTcpServer(serverPort, program, 1);
         SimpleTcpServer tcpServer = new SimpleTcpServer(serverPort, program, 1);
         tcpServer.run();
         tcpServer.run();
         break;          // Successfully bound a port, break out.
         break;          // Successfully bound a port, break out.
@@ -169,19 +212,7 @@ public class TestFrameDecoder {
         }
         }
       }
       }
     }
     }
-
-    XDR xdrOut = createGetportMount();
-    int headerSize = xdrOut.size();
-    int bufsize = 2 * 1024 * 1024;
-    byte[] buffer = new byte[bufsize];
-    xdrOut.writeFixedOpaque(buffer);
-    int requestSize = xdrOut.size() - headerSize;
-
-    // Send the request to the server
-    testRequest(xdrOut, serverPort);
-
-    // Verify the server got the request with right size
-    assertEquals(requestSize, resultSize);
+    return serverPort;
   }
   }
 
 
   static void createPortmapXDRheader(XDR xdr_out, int procedure) {
   static void createPortmapXDRheader(XDR xdr_out, int procedure) {

+ 18 - 0
hadoop-common-project/hadoop-nfs/src/test/resources/log4j.properties

@@ -0,0 +1,18 @@
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+# log4j configuration used during build and unit tests
+
+log4j.rootLogger=info,stdout
+log4j.threshhold=ALL
+log4j.appender.stdout=org.apache.log4j.ConsoleAppender
+log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
+log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/Mountd.java

@@ -32,14 +32,14 @@ import org.apache.hadoop.mount.MountdBase;
  */
  */
 public class Mountd extends MountdBase {
 public class Mountd extends MountdBase {
 
 
-  public Mountd(Configuration config, DatagramSocket registrationSocket)
-      throws IOException {
-    super(new RpcProgramMountd(config, registrationSocket));
+  public Mountd(Configuration config, DatagramSocket registrationSocket,
+      boolean allowInsecurePorts) throws IOException {
+    super(new RpcProgramMountd(config, registrationSocket, allowInsecurePorts));
   }
   }
   
   
   public static void main(String[] args) throws IOException {
   public static void main(String[] args) throws IOException {
     Configuration config = new Configuration();
     Configuration config = new Configuration();
-    Mountd mountd = new Mountd(config, null);
+    Mountd mountd = new Mountd(config, null, true);
     mountd.start(true);
     mountd.start(true);
   }
   }
 }
 }

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java

@@ -79,11 +79,11 @@ public class RpcProgramMountd extends RpcProgram implements MountInterface {
   
   
   private final NfsExports hostsMatcher;
   private final NfsExports hostsMatcher;
 
 
-  public RpcProgramMountd(Configuration config,
-      DatagramSocket registrationSocket) throws IOException {
+  public RpcProgramMountd(Configuration config, DatagramSocket registrationSocket,
+      boolean allowInsecurePorts) throws IOException {
     // Note that RPC cache is not enabled
     // Note that RPC cache is not enabled
     super("mountd", "localhost", config.getInt("nfs3.mountd.port", PORT),
     super("mountd", "localhost", config.getInt("nfs3.mountd.port", PORT),
-        PROGRAM, VERSION_1, VERSION_3, registrationSocket);
+        PROGRAM, VERSION_1, VERSION_3, registrationSocket, allowInsecurePorts);
     exports = new ArrayList<String>();
     exports = new ArrayList<String>();
     exports.add(config.get(Nfs3Constant.EXPORT_POINT,
     exports.add(config.get(Nfs3Constant.EXPORT_POINT,
         Nfs3Constant.EXPORT_POINT_DEFAULT));
         Nfs3Constant.EXPORT_POINT_DEFAULT));

+ 13 - 6
hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3.java

@@ -21,6 +21,7 @@ import java.io.IOException;
 import java.net.DatagramSocket;
 import java.net.DatagramSocket;
 
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.nfs.mount.Mountd;
 import org.apache.hadoop.hdfs.nfs.mount.Mountd;
 import org.apache.hadoop.nfs.nfs3.Nfs3Base;
 import org.apache.hadoop.nfs.nfs3.Nfs3Base;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.StringUtils;
@@ -41,12 +42,13 @@ public class Nfs3 extends Nfs3Base {
   }
   }
   
   
   public Nfs3(Configuration conf) throws IOException {
   public Nfs3(Configuration conf) throws IOException {
-    this(conf, null);
+    this(conf, null, true);
   }
   }
   
   
-  public Nfs3(Configuration conf, DatagramSocket registrationSocket) throws IOException {
-    super(new RpcProgramNfs3(conf, registrationSocket), conf);
-    mountd = new Mountd(conf, registrationSocket);
+  public Nfs3(Configuration conf, DatagramSocket registrationSocket,
+      boolean allowInsecurePorts) throws IOException {
+    super(new RpcProgramNfs3(conf, registrationSocket, allowInsecurePorts), conf);
+    mountd = new Mountd(conf, registrationSocket, allowInsecurePorts);
   }
   }
 
 
   public Mountd getMountd() {
   public Mountd getMountd() {
@@ -61,8 +63,13 @@ public class Nfs3 extends Nfs3Base {
   
   
   static void startService(String[] args,
   static void startService(String[] args,
       DatagramSocket registrationSocket) throws IOException {
       DatagramSocket registrationSocket) throws IOException {
-    StringUtils.startupShutdownMessage(Nfs3.class, args, LOG);    
-    final Nfs3 nfsServer = new Nfs3(new Configuration(), registrationSocket);
+    StringUtils.startupShutdownMessage(Nfs3.class, args, LOG);
+    Configuration conf = new Configuration();
+    boolean allowInsecurePorts = conf.getBoolean(
+        DFSConfigKeys.DFS_NFS_ALLOW_INSECURE_PORTS_KEY,
+        DFSConfigKeys.DFS_NFS_ALLOW_INSECURE_PORTS_DEFAULT);
+    final Nfs3 nfsServer = new Nfs3(new Configuration(), registrationSocket,
+        allowInsecurePorts);
     nfsServer.startServiceInternal(true);
     nfsServer.startServiceInternal(true);
   }
   }
   
   

+ 4 - 3
hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java

@@ -166,11 +166,12 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
   
   
   private final RpcCallCache rpcCallCache;
   private final RpcCallCache rpcCallCache;
 
 
-  public RpcProgramNfs3(Configuration config, DatagramSocket registrationSocket)
-      throws IOException {
+  public RpcProgramNfs3(Configuration config, DatagramSocket registrationSocket,
+      boolean allowInsecurePorts) throws IOException {
     super("NFS3", "localhost", config.getInt(Nfs3Constant.NFS3_SERVER_PORT,
     super("NFS3", "localhost", config.getInt(Nfs3Constant.NFS3_SERVER_PORT,
         Nfs3Constant.NFS3_SERVER_PORT_DEFAULT), Nfs3Constant.PROGRAM,
         Nfs3Constant.NFS3_SERVER_PORT_DEFAULT), Nfs3Constant.PROGRAM,
-        Nfs3Constant.VERSION, Nfs3Constant.VERSION, registrationSocket);
+        Nfs3Constant.VERSION, Nfs3Constant.VERSION, registrationSocket,
+        allowInsecurePorts);
    
    
     config.set(FsPermission.UMASK_LABEL, "000");
     config.set(FsPermission.UMASK_LABEL, "000");
     iug = new IdUserGroup();
     iug = new IdUserGroup();

+ 32 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -273,6 +273,9 @@ Release 2.5.0 - UNRELEASED
     HDFS-6334. Client failover proxy provider for IP failover based NN HA.
     HDFS-6334. Client failover proxy provider for IP failover based NN HA.
     (kihwal)
     (kihwal)
 
 
+    HDFS-6406. Add capability for NFS gateway to reject connections from
+    unprivileged ports. (atm)
+
   IMPROVEMENTS
   IMPROVEMENTS
 
 
     HDFS-6007. Update documentation about short-circuit local reads (iwasakims
     HDFS-6007. Update documentation about short-circuit local reads (iwasakims
@@ -363,6 +366,12 @@ Release 2.5.0 - UNRELEASED
 
 
     HDFS-6287. Add vecsum test of libhdfs read access times (cmccabe)
     HDFS-6287. Add vecsum test of libhdfs read access times (cmccabe)
 
 
+    HDFS-5683. Better audit log messages for caching operations.
+    (Abhiraj Butala via wang)
+
+    HDFS-6345. DFS.listCacheDirectives() should allow filtering based on
+    cache directive ID. (wang)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
     HDFS-6214. Webhdfs has poor throughput for files >2GB (daryn)
     HDFS-6214. Webhdfs has poor throughput for files >2GB (daryn)
@@ -481,6 +490,14 @@ Release 2.5.0 - UNRELEASED
 
 
     HDFS-6381. Fix a typo in INodeReference.java. (Binglin Chang via jing9)
     HDFS-6381. Fix a typo in INodeReference.java. (Binglin Chang via jing9)
 
 
+    HDFS-6400. Cannot execute hdfs oiv_legacy. (Akira AJISAKA via kihwal)
+
+    HDFS-6250. Fix test failed in TestBalancerWithNodeGroup.testBalancerWithRackLocality
+    (Binglin Chang and Chen He via junping_du)
+
+    HDFS-4913. Deleting file through fuse-dfs when using trash fails requiring
+    root permissions (cmccabe)
+
 Release 2.4.1 - UNRELEASED
 Release 2.4.1 - UNRELEASED
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES
@@ -549,6 +566,21 @@ Release 2.4.1 - UNRELEASED
 
 
     HDFS-6326. WebHdfs ACL compatibility is broken. (cnauroth)
     HDFS-6326. WebHdfs ACL compatibility is broken. (cnauroth)
 
 
+    HDFS-6361. TestIdUserGroup.testUserUpdateSetting failed due to out of range
+    nfsnobody Id. (Yongjun Zhang via brandonli)
+
+    HDFS-6362. InvalidateBlocks is inconsistent in usage of DatanodeUuid and
+    StorageID. (Arpit Agarwal)
+
+    HDFS-6402. Suppress findbugs warning for failure to override equals and
+    hashCode in FsAclPermission. (cnauroth)
+
+    HDFS-6325. Append should fail if the last block has insufficient number of
+    replicas (Keith Pak via cos)
+
+    HDFS-6397. NN shows inconsistent value in deadnode count.
+    (Mohammad Kamrul Islam via kihwal)
+
 Release 2.4.0 - 2014-04-07 
 Release 2.4.0 - 2014-04-07 
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs

@@ -162,7 +162,7 @@ elif [ "$COMMAND" = "jmxget" ] ; then
   CLASS=org.apache.hadoop.hdfs.tools.JMXGet
   CLASS=org.apache.hadoop.hdfs.tools.JMXGet
 elif [ "$COMMAND" = "oiv" ] ; then
 elif [ "$COMMAND" = "oiv" ] ; then
   CLASS=org.apache.hadoop.hdfs.tools.offlineImageViewer.OfflineImageViewerPB
   CLASS=org.apache.hadoop.hdfs.tools.offlineImageViewer.OfflineImageViewerPB
-elif [ "COMMAND" = "oiv_legacy" ] ; then
+elif [ "$COMMAND" = "oiv_legacy" ] ; then
   CLASS=org.apache.hadoop.hdfs.tools.offlineImageViewer.OfflineImageViewer
   CLASS=org.apache.hadoop.hdfs.tools.offlineImageViewer.OfflineImageViewer
 elif [ "$COMMAND" = "oev" ] ; then
 elif [ "$COMMAND" = "oev" ] ; then
   CLASS=org.apache.hadoop.hdfs.tools.offlineEditsViewer.OfflineEditsViewer
   CLASS=org.apache.hadoop.hdfs.tools.offlineEditsViewer.OfflineEditsViewer

+ 8 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java

@@ -638,9 +638,12 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
 
 
   public static final String DFS_DFSCLIENT_HEDGED_READ_THREADPOOL_SIZE =
   public static final String DFS_DFSCLIENT_HEDGED_READ_THREADPOOL_SIZE =
       "dfs.client.hedged.read.threadpool.size";
       "dfs.client.hedged.read.threadpool.size";
-  public static final int    DEFAULT_DFSCLIENT_HEDGED_READ_THREADPOOL_SIZE = 0;
-  public static final String DFS_NFS_KEYTAB_FILE_KEY = "dfs.nfs.keytab.file";
-  public static final String DFS_NFS_KERBEROS_PRINCIPAL_KEY = "dfs.nfs.kerberos.principal";
-  public static final String DFS_NFS_REGISTRATION_PORT_KEY = "dfs.nfs.registration.port";
-  public static final int    DFS_NFS_REGISTRATION_PORT_DEFAULT = 40; // Currently unassigned.
+  public static final int     DEFAULT_DFSCLIENT_HEDGED_READ_THREADPOOL_SIZE = 0;
+  public static final String  DFS_NFS_KEYTAB_FILE_KEY = "dfs.nfs.keytab.file";
+  public static final String  DFS_NFS_KERBEROS_PRINCIPAL_KEY = "dfs.nfs.kerberos.principal";
+  public static final String  DFS_NFS_REGISTRATION_PORT_KEY = "dfs.nfs.registration.port";
+  public static final int     DFS_NFS_REGISTRATION_PORT_DEFAULT = 40; // Currently unassigned.
+  public static final String  DFS_NFS_ALLOW_INSECURE_PORTS_KEY = "dfs.nfs.allow.insecure.ports";
+  public static final boolean DFS_NFS_ALLOW_INSECURE_PORTS_DEFAULT = true;
+  
 }
 }

+ 68 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveIterator.java

@@ -23,6 +23,10 @@ import java.io.IOException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.BatchedRemoteIterator;
 import org.apache.hadoop.fs.BatchedRemoteIterator;
+import org.apache.hadoop.fs.InvalidRequestException;
+import org.apache.hadoop.ipc.RemoteException;
+
+import com.google.common.base.Preconditions;
 
 
 /**
 /**
  * CacheDirectiveIterator is a remote iterator that iterates cache directives.
  * CacheDirectiveIterator is a remote iterator that iterates cache directives.
@@ -33,7 +37,7 @@ import org.apache.hadoop.fs.BatchedRemoteIterator;
 public class CacheDirectiveIterator
 public class CacheDirectiveIterator
     extends BatchedRemoteIterator<Long, CacheDirectiveEntry> {
     extends BatchedRemoteIterator<Long, CacheDirectiveEntry> {
 
 
-  private final CacheDirectiveInfo filter;
+  private CacheDirectiveInfo filter;
   private final ClientProtocol namenode;
   private final ClientProtocol namenode;
 
 
   public CacheDirectiveIterator(ClientProtocol namenode,
   public CacheDirectiveIterator(ClientProtocol namenode,
@@ -43,10 +47,72 @@ public class CacheDirectiveIterator
     this.filter = filter;
     this.filter = filter;
   }
   }
 
 
+  private static CacheDirectiveInfo removeIdFromFilter(CacheDirectiveInfo filter) {
+    CacheDirectiveInfo.Builder builder = new CacheDirectiveInfo.Builder(filter);
+    builder.setId(null);
+    return builder.build();
+  }
+
+  /**
+   * Used for compatibility when communicating with a server version that
+   * does not support filtering directives by ID.
+   */
+  private static class SingleEntry implements
+      BatchedEntries<CacheDirectiveEntry> {
+
+    private final CacheDirectiveEntry entry;
+
+    public SingleEntry(final CacheDirectiveEntry entry) {
+      this.entry = entry;
+    }
+
+    @Override
+    public CacheDirectiveEntry get(int i) {
+      if (i > 0) {
+        return null;
+      }
+      return entry;
+    }
+
+    @Override
+    public int size() {
+      return 1;
+    }
+
+    @Override
+    public boolean hasMore() {
+      return false;
+    }
+  }
+
   @Override
   @Override
   public BatchedEntries<CacheDirectiveEntry> makeRequest(Long prevKey)
   public BatchedEntries<CacheDirectiveEntry> makeRequest(Long prevKey)
       throws IOException {
       throws IOException {
-    return namenode.listCacheDirectives(prevKey, filter);
+    BatchedEntries<CacheDirectiveEntry> entries = null;
+    try {
+      entries = namenode.listCacheDirectives(prevKey, filter);
+    } catch (IOException e) {
+      if (e.getMessage().contains("Filtering by ID is unsupported")) {
+        // Retry case for old servers, do the filtering client-side
+        long id = filter.getId();
+        filter = removeIdFromFilter(filter);
+        // Using id - 1 as prevId should get us a window containing the id
+        // This is somewhat brittle, since it depends on directives being
+        // returned in order of ascending ID.
+        entries = namenode.listCacheDirectives(id - 1, filter);
+        for (int i=0; i<entries.size(); i++) {
+          CacheDirectiveEntry entry = entries.get(i);
+          if (entry.getInfo().getId().equals((Long)id)) {
+            return new SingleEntry(entry);
+          }
+        }
+        throw new RemoteException(InvalidRequestException.class.getName(),
+            "Did not find requested id " + id);
+      }
+      throw e;
+    }
+    Preconditions.checkNotNull(entries);
+    return entries;
   }
   }
 
 
   @Override
   @Override

+ 14 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/FsAclPermission.java

@@ -60,4 +60,18 @@ public class FsAclPermission extends FsPermission {
   public boolean getAclBit() {
   public boolean getAclBit() {
     return aclBit;
     return aclBit;
   }
   }
+
+  @Override
+  public boolean equals(Object o) {
+    // This intentionally delegates to the base class.  This is only overridden
+    // to suppress a FindBugs warning.
+    return super.equals(o);
+  }
+
+  @Override
+  public int hashCode() {
+    // This intentionally delegates to the base class.  This is only overridden
+    // to suppress a FindBugs warning.
+    return super.hashCode();
+  }
 }
 }

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java

@@ -170,7 +170,7 @@ class NameNodeConnector {
   }
   }
 
 
   /* The idea for making sure that there is no more than one balancer
   /* The idea for making sure that there is no more than one balancer
-   * running in an HDFS is to create a file in the HDFS, writes the IP address
+   * running in an HDFS is to create a file in the HDFS, writes the hostname
    * of the machine on which the balancer is running to the file, but did not
    * of the machine on which the balancer is running to the file, but did not
    * close the file until the balancer exits. 
    * close the file until the balancer exits. 
    * This prevents the second balancer from running because it can not
    * This prevents the second balancer from running because it can not

+ 25 - 17
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java

@@ -265,7 +265,8 @@ public class BlockManager {
     final long pendingPeriod = conf.getLong(
     final long pendingPeriod = conf.getLong(
         DFSConfigKeys.DFS_NAMENODE_STARTUP_DELAY_BLOCK_DELETION_MS_KEY,
         DFSConfigKeys.DFS_NAMENODE_STARTUP_DELAY_BLOCK_DELETION_MS_KEY,
         DFSConfigKeys.DFS_NAMENODE_STARTUP_DELAY_BLOCK_DELETION_MS_DEFAULT);
         DFSConfigKeys.DFS_NAMENODE_STARTUP_DELAY_BLOCK_DELETION_MS_DEFAULT);
-    invalidateBlocks = new InvalidateBlocks(datanodeManager, pendingPeriod);
+    invalidateBlocks = new InvalidateBlocks(
+        datanodeManager.blockInvalidateLimit, pendingPeriod);
 
 
     // Compute the map capacity by allocating 2% of total memory
     // Compute the map capacity by allocating 2% of total memory
     blocksMap = new BlocksMap(
     blocksMap = new BlocksMap(
@@ -701,7 +702,7 @@ public class BlockManager {
 
 
     // remove this block from the list of pending blocks to be deleted. 
     // remove this block from the list of pending blocks to be deleted. 
     for (DatanodeStorageInfo storage : targets) {
     for (DatanodeStorageInfo storage : targets) {
-      invalidateBlocks.remove(storage.getStorageID(), oldBlock);
+      invalidateBlocks.remove(storage.getDatanodeDescriptor(), oldBlock);
     }
     }
     
     
     // Adjust safe-mode totals, since under-construction blocks don't
     // Adjust safe-mode totals, since under-construction blocks don't
@@ -726,7 +727,7 @@ public class BlockManager {
     for(DatanodeStorageInfo storage : blocksMap.getStorages(block)) {
     for(DatanodeStorageInfo storage : blocksMap.getStorages(block)) {
       final String storageID = storage.getStorageID();
       final String storageID = storage.getStorageID();
       // filter invalidate replicas
       // filter invalidate replicas
-      if(!invalidateBlocks.contains(storageID, block)) {
+      if(!invalidateBlocks.contains(storage.getDatanodeDescriptor(), block)) {
         locations.add(storage);
         locations.add(storage);
       }
       }
     }
     }
@@ -944,6 +945,16 @@ public class BlockManager {
                             minReplication);
                             minReplication);
   }
   }
 
 
+  /**
+   * Check if a block is replicated to at least the minimum replication.
+   */
+  public boolean isSufficientlyReplicated(BlockInfo b) {
+    // Compare against the lesser of the minReplication and number of live DNs.
+    final int replication =
+        Math.min(minReplication, getDatanodeManager().getNumLiveDataNodes());
+    return countNodes(b).liveReplicas() >= replication;
+  }
+
   /**
   /**
    * return a list of blocks & their locations on <code>datanode</code> whose
    * return a list of blocks & their locations on <code>datanode</code> whose
    * total size is <code>size</code>
    * total size is <code>size</code>
@@ -1016,7 +1027,7 @@ public class BlockManager {
     pendingDNMessages.removeAllMessagesForDatanode(node);
     pendingDNMessages.removeAllMessagesForDatanode(node);
 
 
     node.resetBlocks();
     node.resetBlocks();
-    invalidateBlocks.remove(node.getDatanodeUuid());
+    invalidateBlocks.remove(node);
     
     
     // If the DN hasn't block-reported since the most recent
     // If the DN hasn't block-reported since the most recent
     // failover, then we may have been holding up on processing
     // failover, then we may have been holding up on processing
@@ -1184,7 +1195,7 @@ public class BlockManager {
    * @return total number of block for deletion
    * @return total number of block for deletion
    */
    */
   int computeInvalidateWork(int nodesToProcess) {
   int computeInvalidateWork(int nodesToProcess) {
-    final List<String> nodes = invalidateBlocks.getStorageIDs();
+    final List<DatanodeInfo> nodes = invalidateBlocks.getDatanodes();
     Collections.shuffle(nodes);
     Collections.shuffle(nodes);
 
 
     nodesToProcess = Math.min(nodes.size(), nodesToProcess);
     nodesToProcess = Math.min(nodes.size(), nodesToProcess);
@@ -1973,7 +1984,7 @@ public class BlockManager {
     }
     }
 
 
     // Ignore replicas already scheduled to be removed from the DN
     // Ignore replicas already scheduled to be removed from the DN
-    if(invalidateBlocks.contains(dn.getDatanodeUuid(), block)) {
+    if(invalidateBlocks.contains(dn, block)) {
       /*
       /*
        * TODO: following assertion is incorrect, see HDFS-2668 assert
        * TODO: following assertion is incorrect, see HDFS-2668 assert
        * storedBlock.findDatanode(dn) < 0 : "Block " + block +
        * storedBlock.findDatanode(dn) < 0 : "Block " + block +
@@ -3199,9 +3210,8 @@ public class BlockManager {
    *
    *
    * @return number of blocks scheduled for removal during this iteration.
    * @return number of blocks scheduled for removal during this iteration.
    */
    */
-  private int invalidateWorkForOneNode(String nodeId) {
+  private int invalidateWorkForOneNode(DatanodeInfo dn) {
     final List<Block> toInvalidate;
     final List<Block> toInvalidate;
-    final DatanodeDescriptor dn;
     
     
     namesystem.writeLock();
     namesystem.writeLock();
     try {
     try {
@@ -3210,15 +3220,13 @@ public class BlockManager {
         LOG.debug("In safemode, not computing replication work");
         LOG.debug("In safemode, not computing replication work");
         return 0;
         return 0;
       }
       }
-      // get blocks to invalidate for the nodeId
-      assert nodeId != null;
-      dn = datanodeManager.getDatanode(nodeId);
-      if (dn == null) {
-        invalidateBlocks.remove(nodeId);
-        return 0;
-      }
-      toInvalidate = invalidateBlocks.invalidateWork(nodeId, dn);
-      if (toInvalidate == null) {
+      try {
+        toInvalidate = invalidateBlocks.invalidateWork(datanodeManager.getDatanode(dn));
+        
+        if (toInvalidate == null) {
+          return 0;
+        }
+      } catch(UnregisteredNodeException une) {
         return 0;
         return 0;
       }
       }
     } finally {
     } finally {

+ 1 - 9
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java

@@ -1057,15 +1057,7 @@ public class DatanodeManager {
 
 
   /** @return the number of dead datanodes. */
   /** @return the number of dead datanodes. */
   public int getNumDeadDataNodes() {
   public int getNumDeadDataNodes() {
-    int numDead = 0;
-    synchronized (datanodeMap) {   
-      for(DatanodeDescriptor dn : datanodeMap.values()) {
-        if (isDatanodeDead(dn) ) {
-          numDead++;
-        }
-      }
-    }
-    return numDead;
+    return getDatanodeListForReport(DatanodeReportType.DEAD).size();
   }
   }
 
 
   /** @return list of datanodes where decommissioning is in progress. */
   /** @return list of datanodes where decommissioning is in progress. */

+ 23 - 27
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java

@@ -44,13 +44,13 @@ import com.google.common.annotations.VisibleForTesting;
  */
  */
 @InterfaceAudience.Private
 @InterfaceAudience.Private
 class InvalidateBlocks {
 class InvalidateBlocks {
-  /** Mapping: StorageID -> Collection of Blocks */
-  private final Map<String, LightWeightHashSet<Block>> node2blocks =
-      new TreeMap<String, LightWeightHashSet<Block>>();
+  /** Mapping: DatanodeInfo -> Collection of Blocks */
+  private final Map<DatanodeInfo, LightWeightHashSet<Block>> node2blocks =
+      new TreeMap<DatanodeInfo, LightWeightHashSet<Block>>();
   /** The total number of blocks in the map. */
   /** The total number of blocks in the map. */
   private long numBlocks = 0L;
   private long numBlocks = 0L;
 
 
-  private final DatanodeManager datanodeManager;
+  private final int blockInvalidateLimit;
 
 
   /**
   /**
    * The period of pending time for block invalidation since the NameNode
    * The period of pending time for block invalidation since the NameNode
@@ -60,8 +60,8 @@ class InvalidateBlocks {
   /** the startup time */
   /** the startup time */
   private final long startupTime = Time.monotonicNow();
   private final long startupTime = Time.monotonicNow();
 
 
-  InvalidateBlocks(final DatanodeManager datanodeManager, long pendingPeriodInMs) {
-    this.datanodeManager = datanodeManager;
+  InvalidateBlocks(final int blockInvalidateLimit, long pendingPeriodInMs) {
+    this.blockInvalidateLimit = blockInvalidateLimit;
     this.pendingPeriodInMs = pendingPeriodInMs;
     this.pendingPeriodInMs = pendingPeriodInMs;
     printBlockDeletionTime(BlockManager.LOG);
     printBlockDeletionTime(BlockManager.LOG);
   }
   }
@@ -86,12 +86,9 @@ class InvalidateBlocks {
    * invalidation. Blocks are compared including their generation stamps:
    * invalidation. Blocks are compared including their generation stamps:
    * if a block is pending invalidation but with a different generation stamp,
    * if a block is pending invalidation but with a different generation stamp,
    * returns false.
    * returns false.
-   * @param storageID the storage to check
-   * @param the block to look for
-   * 
    */
    */
-  synchronized boolean contains(final String storageID, final Block block) {
-    final LightWeightHashSet<Block> s = node2blocks.get(storageID);
+  synchronized boolean contains(final DatanodeInfo dn, final Block block) {
+    final LightWeightHashSet<Block> s = node2blocks.get(dn);
     if (s == null) {
     if (s == null) {
       return false; // no invalidate blocks for this storage ID
       return false; // no invalidate blocks for this storage ID
     }
     }
@@ -106,10 +103,10 @@ class InvalidateBlocks {
    */
    */
   synchronized void add(final Block block, final DatanodeInfo datanode,
   synchronized void add(final Block block, final DatanodeInfo datanode,
       final boolean log) {
       final boolean log) {
-    LightWeightHashSet<Block> set = node2blocks.get(datanode.getDatanodeUuid());
+    LightWeightHashSet<Block> set = node2blocks.get(datanode);
     if (set == null) {
     if (set == null) {
       set = new LightWeightHashSet<Block>();
       set = new LightWeightHashSet<Block>();
-      node2blocks.put(datanode.getDatanodeUuid(), set);
+      node2blocks.put(datanode, set);
     }
     }
     if (set.add(block)) {
     if (set.add(block)) {
       numBlocks++;
       numBlocks++;
@@ -121,20 +118,20 @@ class InvalidateBlocks {
   }
   }
 
 
   /** Remove a storage from the invalidatesSet */
   /** Remove a storage from the invalidatesSet */
-  synchronized void remove(final String storageID) {
-    final LightWeightHashSet<Block> blocks = node2blocks.remove(storageID);
+  synchronized void remove(final DatanodeInfo dn) {
+    final LightWeightHashSet<Block> blocks = node2blocks.remove(dn);
     if (blocks != null) {
     if (blocks != null) {
       numBlocks -= blocks.size();
       numBlocks -= blocks.size();
     }
     }
   }
   }
 
 
   /** Remove the block from the specified storage. */
   /** Remove the block from the specified storage. */
-  synchronized void remove(final String storageID, final Block block) {
-    final LightWeightHashSet<Block> v = node2blocks.get(storageID);
+  synchronized void remove(final DatanodeInfo dn, final Block block) {
+    final LightWeightHashSet<Block> v = node2blocks.get(dn);
     if (v != null && v.remove(block)) {
     if (v != null && v.remove(block)) {
       numBlocks--;
       numBlocks--;
       if (v.isEmpty()) {
       if (v.isEmpty()) {
-        node2blocks.remove(storageID);
+        node2blocks.remove(dn);
       }
       }
     }
     }
   }
   }
@@ -148,18 +145,18 @@ class InvalidateBlocks {
       return;
       return;
     }
     }
 
 
-    for(Map.Entry<String,LightWeightHashSet<Block>> entry : node2blocks.entrySet()) {
+    for(Map.Entry<DatanodeInfo, LightWeightHashSet<Block>> entry : node2blocks.entrySet()) {
       final LightWeightHashSet<Block> blocks = entry.getValue();
       final LightWeightHashSet<Block> blocks = entry.getValue();
       if (blocks.size() > 0) {
       if (blocks.size() > 0) {
-        out.println(datanodeManager.getDatanode(entry.getKey()));
+        out.println(entry.getKey());
         out.println(blocks);
         out.println(blocks);
       }
       }
     }
     }
   }
   }
 
 
   /** @return a list of the storage IDs. */
   /** @return a list of the storage IDs. */
-  synchronized List<String> getStorageIDs() {
-    return new ArrayList<String>(node2blocks.keySet());
+  synchronized List<DatanodeInfo> getDatanodes() {
+    return new ArrayList<DatanodeInfo>(node2blocks.keySet());
   }
   }
 
 
   /**
   /**
@@ -170,8 +167,7 @@ class InvalidateBlocks {
     return pendingPeriodInMs - (Time.monotonicNow() - startupTime);
     return pendingPeriodInMs - (Time.monotonicNow() - startupTime);
   }
   }
 
 
-  synchronized List<Block> invalidateWork(
-      final String storageId, final DatanodeDescriptor dn) {
+  synchronized List<Block> invalidateWork(final DatanodeDescriptor dn) {
     final long delay = getInvalidationDelay();
     final long delay = getInvalidationDelay();
     if (delay > 0) {
     if (delay > 0) {
       if (BlockManager.LOG.isDebugEnabled()) {
       if (BlockManager.LOG.isDebugEnabled()) {
@@ -181,18 +177,18 @@ class InvalidateBlocks {
       }
       }
       return null;
       return null;
     }
     }
-    final LightWeightHashSet<Block> set = node2blocks.get(storageId);
+    final LightWeightHashSet<Block> set = node2blocks.get(dn);
     if (set == null) {
     if (set == null) {
       return null;
       return null;
     }
     }
 
 
     // # blocks that can be sent in one message is limited
     // # blocks that can be sent in one message is limited
-    final int limit = datanodeManager.blockInvalidateLimit;
+    final int limit = blockInvalidateLimit;
     final List<Block> toInvalidate = set.pollN(limit);
     final List<Block> toInvalidate = set.pollN(limit);
 
 
     // If we send everything in this message, remove this node entry
     // If we send everything in this message, remove this node entry
     if (set.isEmpty()) {
     if (set.isEmpty()) {
-      remove(storageId);
+      remove(dn);
     }
     }
 
 
     dn.addBlocksToBeInvalidated(toInvalidate);
     dn.addBlocksToBeInvalidated(toInvalidate);

+ 8 - 43
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclStorage.java

@@ -27,8 +27,10 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclEntryScope;
 import org.apache.hadoop.fs.permission.AclEntryScope;
 import org.apache.hadoop.fs.permission.AclEntryType;
 import org.apache.hadoop.fs.permission.AclEntryType;
+import org.apache.hadoop.fs.permission.AclUtil;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.fs.permission.ScopedAclEntries;
 import org.apache.hadoop.hdfs.protocol.AclException;
 import org.apache.hadoop.hdfs.protocol.AclException;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 
 
@@ -90,7 +92,7 @@ final class AclStorage {
     FsPermission childPerm = child.getFsPermission();
     FsPermission childPerm = child.getFsPermission();
 
 
     // Copy each default ACL entry from parent to new child's access ACL.
     // Copy each default ACL entry from parent to new child's access ACL.
-    boolean parentDefaultIsMinimal = isMinimalAcl(parentDefaultEntries);
+    boolean parentDefaultIsMinimal = AclUtil.isMinimalAcl(parentDefaultEntries);
     for (AclEntry entry: parentDefaultEntries) {
     for (AclEntry entry: parentDefaultEntries) {
       AclEntryType type = entry.getType();
       AclEntryType type = entry.getType();
       String name = entry.getName();
       String name = entry.getName();
@@ -127,7 +129,7 @@ final class AclStorage {
       Collections.<AclEntry>emptyList();
       Collections.<AclEntry>emptyList();
 
 
     final FsPermission newPerm;
     final FsPermission newPerm;
-    if (!isMinimalAcl(accessEntries) || !defaultEntries.isEmpty()) {
+    if (!AclUtil.isMinimalAcl(accessEntries) || !defaultEntries.isEmpty()) {
       // Save the new ACL to the child.
       // Save the new ACL to the child.
       child.addAclFeature(createAclFeature(accessEntries, defaultEntries));
       child.addAclFeature(createAclFeature(accessEntries, defaultEntries));
       newPerm = createFsPermissionForExtendedAcl(accessEntries, childPerm);
       newPerm = createFsPermissionForExtendedAcl(accessEntries, childPerm);
@@ -172,7 +174,7 @@ final class AclStorage {
     FsPermission perm = inode.getFsPermission();
     FsPermission perm = inode.getFsPermission();
     AclFeature f = inode.getAclFeature();
     AclFeature f = inode.getAclFeature();
     if (f == null) {
     if (f == null) {
-      return getMinimalAcl(perm);
+      return AclUtil.getMinimalAcl(perm);
     }
     }
 
 
     final List<AclEntry> existingAcl;
     final List<AclEntry> existingAcl;
@@ -208,7 +210,7 @@ final class AclStorage {
     } else {
     } else {
       // It's possible that there is a default ACL but no access ACL. In this
       // It's possible that there is a default ACL but no access ACL. In this
       // case, add the minimal access ACL implied by the permission bits.
       // case, add the minimal access ACL implied by the permission bits.
-      existingAcl.addAll(getMinimalAcl(perm));
+      existingAcl.addAll(AclUtil.getMinimalAcl(perm));
     }
     }
 
 
     // Add all default entries after the access entries.
     // Add all default entries after the access entries.
@@ -267,7 +269,7 @@ final class AclStorage {
     assert newAcl.size() >= 3;
     assert newAcl.size() >= 3;
     FsPermission perm = inode.getFsPermission();
     FsPermission perm = inode.getFsPermission();
     final FsPermission newPerm;
     final FsPermission newPerm;
-    if (!isMinimalAcl(newAcl)) {
+    if (!AclUtil.isMinimalAcl(newAcl)) {
       // This is an extended ACL.  Split entries into access vs. default.
       // This is an extended ACL.  Split entries into access vs. default.
       ScopedAclEntries scoped = new ScopedAclEntries(newAcl);
       ScopedAclEntries scoped = new ScopedAclEntries(newAcl);
       List<AclEntry> accessEntries = scoped.getAccessEntries();
       List<AclEntry> accessEntries = scoped.getAccessEntries();
@@ -321,7 +323,7 @@ final class AclStorage {
     // For the access ACL, the feature only needs to hold the named user and
     // For the access ACL, the feature only needs to hold the named user and
     // group entries.  For a correctly sorted ACL, these will be in a
     // group entries.  For a correctly sorted ACL, these will be in a
     // predictable range.
     // predictable range.
-    if (!isMinimalAcl(accessEntries)) {
+    if (!AclUtil.isMinimalAcl(accessEntries)) {
       featureEntries.addAll(
       featureEntries.addAll(
         accessEntries.subList(1, accessEntries.size() - 2));
         accessEntries.subList(1, accessEntries.size() - 2));
     }
     }
@@ -366,41 +368,4 @@ final class AclStorage {
       accessEntries.get(2).getPermission(),
       accessEntries.get(2).getPermission(),
       existingPerm.getStickyBit());
       existingPerm.getStickyBit());
   }
   }
-
-  /**
-   * Translates the given permission bits to the equivalent minimal ACL.
-   *
-   * @param perm FsPermission to translate
-   * @return List<AclEntry> containing exactly 3 entries representing the owner,
-   *   group and other permissions
-   */
-  private static List<AclEntry> getMinimalAcl(FsPermission perm) {
-    return Lists.newArrayList(
-      new AclEntry.Builder()
-        .setScope(AclEntryScope.ACCESS)
-        .setType(AclEntryType.USER)
-        .setPermission(perm.getUserAction())
-        .build(),
-      new AclEntry.Builder()
-        .setScope(AclEntryScope.ACCESS)
-        .setType(AclEntryType.GROUP)
-        .setPermission(perm.getGroupAction())
-        .build(),
-      new AclEntry.Builder()
-        .setScope(AclEntryScope.ACCESS)
-        .setType(AclEntryType.OTHER)
-        .setPermission(perm.getOtherAction())
-        .build());
-  }
-
-  /**
-   * Checks if the given entries represent a minimal ACL (contains exactly 3
-   * entries).
-   *
-   * @param entries List<AclEntry> entries to check
-   * @return boolean true if the entries represent a minimal ACL
-   */
-  private static boolean isMinimalAcl(List<AclEntry> entries) {
-    return entries.size() == 3;
-  }
 }
 }

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclTransformation.java

@@ -40,6 +40,7 @@ import org.apache.hadoop.fs.permission.AclEntryScope;
 import org.apache.hadoop.fs.permission.AclEntryType;
 import org.apache.hadoop.fs.permission.AclEntryType;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.fs.permission.ScopedAclEntries;
 import org.apache.hadoop.hdfs.protocol.AclException;
 import org.apache.hadoop.hdfs.protocol.AclException;
 
 
 /**
 /**

+ 22 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java

@@ -691,15 +691,25 @@ public final class CacheManager {
     assert namesystem.hasReadLock();
     assert namesystem.hasReadLock();
     final int NUM_PRE_ALLOCATED_ENTRIES = 16;
     final int NUM_PRE_ALLOCATED_ENTRIES = 16;
     String filterPath = null;
     String filterPath = null;
-    if (filter.getId() != null) {
-      throw new IOException("Filtering by ID is unsupported.");
-    }
     if (filter.getPath() != null) {
     if (filter.getPath() != null) {
       filterPath = validatePath(filter);
       filterPath = validatePath(filter);
     }
     }
     if (filter.getReplication() != null) {
     if (filter.getReplication() != null) {
-      throw new IOException("Filtering by replication is unsupported.");
+      throw new InvalidRequestException(
+          "Filtering by replication is unsupported.");
+    }
+
+    // Querying for a single ID
+    final Long id = filter.getId();
+    if (id != null) {
+      if (!directivesById.containsKey(id)) {
+        throw new InvalidRequestException("Did not find requested id " + id);
+      }
+      // Since we use a tailMap on directivesById, setting prev to id-1 gets
+      // us the directive with the id (if present)
+      prevId = id - 1;
     }
     }
+
     ArrayList<CacheDirectiveEntry> replies =
     ArrayList<CacheDirectiveEntry> replies =
         new ArrayList<CacheDirectiveEntry>(NUM_PRE_ALLOCATED_ENTRIES);
         new ArrayList<CacheDirectiveEntry>(NUM_PRE_ALLOCATED_ENTRIES);
     int numReplies = 0;
     int numReplies = 0;
@@ -711,6 +721,14 @@ public final class CacheManager {
       }
       }
       CacheDirective curDirective = cur.getValue();
       CacheDirective curDirective = cur.getValue();
       CacheDirectiveInfo info = cur.getValue().toInfo();
       CacheDirectiveInfo info = cur.getValue().toInfo();
+
+      // If the requested ID is present, it should be the first item.
+      // Hitting this case means the ID is not present, or we're on the second
+      // item and should break out.
+      if (id != null &&
+          !(info.getId().equals(id))) {
+        break;
+      }
       if (filter.getPool() != null && 
       if (filter.getPool() != null && 
           !info.getPool().equals(filter.getPool())) {
           !info.getPool().equals(filter.getPool())) {
         continue;
         continue;

+ 23 - 9
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

@@ -2380,7 +2380,13 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
       // finalizeINodeFileUnderConstruction so we need to refresh 
       // finalizeINodeFileUnderConstruction so we need to refresh 
       // the referenced file.  
       // the referenced file.  
       myFile = INodeFile.valueOf(dir.getINode(src), src, true);
       myFile = INodeFile.valueOf(dir.getINode(src), src, true);
-      
+      final BlockInfo lastBlock = myFile.getLastBlock();
+      // Check that the block has at least minimum replication.
+      if(lastBlock != null && lastBlock.isComplete() &&
+          !getBlockManager().isSufficientlyReplicated(lastBlock)) {
+        throw new IOException("append: lastBlock=" + lastBlock +
+            " of src=" + src + " is not sufficiently replicated yet.");
+      }
       final DatanodeDescriptor clientNode = 
       final DatanodeDescriptor clientNode = 
           blockManager.getDatanodeManager().getDatanodeByHost(clientMachine);
           blockManager.getDatanodeManager().getDatanodeByHost(clientMachine);
       return prepareFileForWrite(src, myFile, holder, clientMachine, clientNode,
       return prepareFileForWrite(src, myFile, holder, clientMachine, clientNode,
@@ -7443,6 +7449,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
       cacheManager.waitForRescanIfNeeded();
       cacheManager.waitForRescanIfNeeded();
     }
     }
     writeLock();
     writeLock();
+    String effectiveDirectiveStr = null;
     Long result = null;
     Long result = null;
     try {
     try {
       checkOperation(OperationCategory.WRITE);
       checkOperation(OperationCategory.WRITE);
@@ -7454,11 +7461,12 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
         throw new IOException("addDirective: you cannot specify an ID " +
         throw new IOException("addDirective: you cannot specify an ID " +
             "for this operation.");
             "for this operation.");
       }
       }
-      CacheDirectiveInfo effectiveDirective = 
+      CacheDirectiveInfo effectiveDirective =
           cacheManager.addDirective(directive, pc, flags);
           cacheManager.addDirective(directive, pc, flags);
       getEditLog().logAddCacheDirectiveInfo(effectiveDirective,
       getEditLog().logAddCacheDirectiveInfo(effectiveDirective,
           cacheEntry != null);
           cacheEntry != null);
       result = effectiveDirective.getId();
       result = effectiveDirective.getId();
+      effectiveDirectiveStr = effectiveDirective.toString();
       success = true;
       success = true;
     } finally {
     } finally {
       writeUnlock();
       writeUnlock();
@@ -7466,7 +7474,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
         getEditLog().logSync();
         getEditLog().logSync();
       }
       }
       if (isAuditEnabled() && isExternalInvocation()) {
       if (isAuditEnabled() && isExternalInvocation()) {
-        logAuditEvent(success, "addCacheDirective", null, null, null);
+        logAuditEvent(success, "addCacheDirective", effectiveDirectiveStr, null, null);
       }
       }
       RetryCache.setState(cacheEntry, success, result);
       RetryCache.setState(cacheEntry, success, result);
     }
     }
@@ -7503,7 +7511,8 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
         getEditLog().logSync();
         getEditLog().logSync();
       }
       }
       if (isAuditEnabled() && isExternalInvocation()) {
       if (isAuditEnabled() && isExternalInvocation()) {
-        logAuditEvent(success, "modifyCacheDirective", null, null, null);
+        String idStr = "{id: " + directive.getId().toString() + "}";
+        logAuditEvent(success, "modifyCacheDirective", idStr, directive.toString(), null);
       }
       }
       RetryCache.setState(cacheEntry, success);
       RetryCache.setState(cacheEntry, success);
     }
     }
@@ -7531,7 +7540,8 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
     } finally {
     } finally {
       writeUnlock();
       writeUnlock();
       if (isAuditEnabled() && isExternalInvocation()) {
       if (isAuditEnabled() && isExternalInvocation()) {
-        logAuditEvent(success, "removeCacheDirective", null, null,
+        String idStr = "{id: " + id.toString() + "}";
+        logAuditEvent(success, "removeCacheDirective", idStr, null,
             null);
             null);
       }
       }
       RetryCache.setState(cacheEntry, success);
       RetryCache.setState(cacheEntry, success);
@@ -7556,7 +7566,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
     } finally {
     } finally {
       readUnlock();
       readUnlock();
       if (isAuditEnabled() && isExternalInvocation()) {
       if (isAuditEnabled() && isExternalInvocation()) {
-        logAuditEvent(success, "listCacheDirectives", null, null,
+        logAuditEvent(success, "listCacheDirectives", filter.toString(), null,
             null);
             null);
       }
       }
     }
     }
@@ -7573,6 +7583,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
     }
     }
     writeLock();
     writeLock();
     boolean success = false;
     boolean success = false;
+    String poolInfoStr = null;
     try {
     try {
       checkOperation(OperationCategory.WRITE);
       checkOperation(OperationCategory.WRITE);
       if (isInSafeMode()) {
       if (isInSafeMode()) {
@@ -7583,12 +7594,13 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
         pc.checkSuperuserPrivilege();
         pc.checkSuperuserPrivilege();
       }
       }
       CachePoolInfo info = cacheManager.addCachePool(req);
       CachePoolInfo info = cacheManager.addCachePool(req);
+      poolInfoStr = info.toString();
       getEditLog().logAddCachePool(info, cacheEntry != null);
       getEditLog().logAddCachePool(info, cacheEntry != null);
       success = true;
       success = true;
     } finally {
     } finally {
       writeUnlock();
       writeUnlock();
       if (isAuditEnabled() && isExternalInvocation()) {
       if (isAuditEnabled() && isExternalInvocation()) {
-        logAuditEvent(success, "addCachePool", req.getPoolName(), null, null);
+        logAuditEvent(success, "addCachePool", poolInfoStr, null, null);
       }
       }
       RetryCache.setState(cacheEntry, success);
       RetryCache.setState(cacheEntry, success);
     }
     }
@@ -7621,7 +7633,8 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
     } finally {
     } finally {
       writeUnlock();
       writeUnlock();
       if (isAuditEnabled() && isExternalInvocation()) {
       if (isAuditEnabled() && isExternalInvocation()) {
-        logAuditEvent(success, "modifyCachePool", req.getPoolName(), null, null);
+        String poolNameStr = "{poolName: " + req.getPoolName() + "}";
+        logAuditEvent(success, "modifyCachePool", poolNameStr, req.toString(), null);
       }
       }
       RetryCache.setState(cacheEntry, success);
       RetryCache.setState(cacheEntry, success);
     }
     }
@@ -7654,7 +7667,8 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
     } finally {
     } finally {
       writeUnlock();
       writeUnlock();
       if (isAuditEnabled() && isExternalInvocation()) {
       if (isAuditEnabled() && isExternalInvocation()) {
-        logAuditEvent(success, "removeCachePool", cachePoolName, null, null);
+        String poolNameStr = "{poolName: " + cachePoolName + "}";
+        logAuditEvent(success, "removeCachePool", poolNameStr, null, null);
       }
       }
       RetryCache.setState(cacheEntry, success);
       RetryCache.setState(cacheEntry, success);
     }
     }

+ 8 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java

@@ -503,19 +503,21 @@ public class CacheAdmin extends Configured implements Tool {
 
 
     @Override
     @Override
     public String getShortUsage() {
     public String getShortUsage() {
-      return "[" + getName() + " [-stats] [-path <path>] [-pool <pool>]]\n";
+      return "[" + getName()
+          + " [-stats] [-path <path>] [-pool <pool>] [-id <id>]\n";
     }
     }
 
 
     @Override
     @Override
     public String getLongUsage() {
     public String getLongUsage() {
       TableListing listing = getOptionDescriptionListing();
       TableListing listing = getOptionDescriptionListing();
+      listing.addRow("-stats", "List path-based cache directive statistics.");
       listing.addRow("<path>", "List only " +
       listing.addRow("<path>", "List only " +
           "cache directives with this path. " +
           "cache directives with this path. " +
           "Note that if there is a cache directive for <path> " +
           "Note that if there is a cache directive for <path> " +
           "in a cache pool that we don't have read access for, it " + 
           "in a cache pool that we don't have read access for, it " + 
           "will not be listed.");
           "will not be listed.");
       listing.addRow("<pool>", "List only path cache directives in that pool.");
       listing.addRow("<pool>", "List only path cache directives in that pool.");
-      listing.addRow("-stats", "List path-based cache directive statistics.");
+      listing.addRow("<id>", "List the cache directive with this id.");
       return getShortUsage() + "\n" +
       return getShortUsage() + "\n" +
         "List cache directives.\n\n" +
         "List cache directives.\n\n" +
         listing.toString();
         listing.toString();
@@ -534,6 +536,10 @@ public class CacheAdmin extends Configured implements Tool {
         builder.setPool(poolFilter);
         builder.setPool(poolFilter);
       }
       }
       boolean printStats = StringUtils.popOption("-stats", args);
       boolean printStats = StringUtils.popOption("-stats", args);
+      String idFilter = StringUtils.popOptionWithArgument("-id", args);
+      if (idFilter != null) {
+        builder.setId(Long.parseLong(idFilter));
+      }
       if (!args.isEmpty()) {
       if (!args.isEmpty()) {
         System.err.println("Can't understand argument: " + args.get(0));
         System.err.println("Can't understand argument: " + args.get(0));
         return 1;
         return 1;

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewer.java

@@ -47,7 +47,7 @@ public class OfflineImageViewer {
   public static final Log LOG = LogFactory.getLog(OfflineImageViewer.class);
   public static final Log LOG = LogFactory.getLog(OfflineImageViewer.class);
   
   
   private final static String usage = 
   private final static String usage = 
-    "Usage: bin/hdfs oiv [OPTIONS] -i INPUTFILE -o OUTPUTFILE\n" +
+    "Usage: bin/hdfs oiv_legacy [OPTIONS] -i INPUTFILE -o OUTPUTFILE\n" +
     "Offline Image Viewer\n" + 
     "Offline Image Viewer\n" + 
     "View a Hadoop fsimage INPUTFILE using the specified PROCESSOR,\n" +
     "View a Hadoop fsimage INPUTFILE using the specified PROCESSOR,\n" +
     "saving the results in OUTPUTFILE.\n" +
     "saving the results in OUTPUTFILE.\n" +

+ 196 - 79
hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/fuse_trash.c

@@ -16,111 +16,228 @@
  * limitations under the License.
  * limitations under the License.
  */
  */
 
 
-
 #include <hdfs.h>
 #include <hdfs.h>
+#include <inttypes.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
 #include <strings.h>
 #include <strings.h>
 
 
+#include "fuse_context_handle.h"
 #include "fuse_dfs.h"
 #include "fuse_dfs.h"
 #include "fuse_trash.h"
 #include "fuse_trash.h"
-#include "fuse_context_handle.h"
+#include "fuse_users.h"
 
 
+#define TRASH_RENAME_TRIES  100
+#define ALREADY_IN_TRASH_ERR 9000
 
 
-const char *const TrashPrefixDir = "/user/root/.Trash";
-const char *const TrashDir = "/user/root/.Trash/Current";
+/**
+ * Split a path into a parent directory and a base path component.
+ *
+ * @param abs_path    The absolute path.
+ * @param pcomp       (out param) Will be set to the last path component.
+ *                        Malloced.
+ * @param parent_dir  (out param) Will be set to the parent directory.
+ *                        Malloced.
+ *
+ * @return            0 on success.
+ *                    On success, both *pcomp and *parent_dir will contain
+ *                    malloc'ed strings.
+ *                    EINVAL if the path wasn't absolute.
+ *                    EINVAL if there is no parent directory (i.e. abs_path=/)
+ *                    ENOMEM if we ran out of memory.
+ */
+static int get_parent_dir(const char *abs_path, char **pcomp,
+                          char **parent_dir)
+{
+  int ret;
+  char *pdir = NULL, *pc = NULL, *last_slash;
+
+  pdir = strdup(abs_path);
+  if (!pdir) {
+    ret = ENOMEM;
+    goto done;
+  }
+  last_slash = rindex(pdir, '/');
+  if (!last_slash) {
+    ERROR("get_parent_dir(%s): expected absolute path.\n", abs_path);
+    ret = EINVAL;
+    goto done;
+  }
+  if (last_slash[1] == '\0') {
+    *last_slash = '\0';
+    last_slash = rindex(pdir, '/');
+    if (!last_slash) {
+      ERROR("get_parent_dir(%s): there is no parent dir.\n", abs_path);
+      ret = EINVAL;
+      goto done;
+    }
+  }
+  pc = strdup(last_slash + 1);
+  if (!pc) {
+    ret = ENOMEM;
+    goto done;
+  }
+  *last_slash = '\0';
+  ret = 0;
+done:
+  if (ret) {
+    free(pdir);
+    free(pc);
+    return ret;
+  }
+  *pcomp = pc;
+  *parent_dir = pdir;
+  return 0;
+}
 
 
-#define TRASH_RENAME_TRIES  100
+/**
+ * Get the base path to the trash.  This will depend on the user ID.
+ * For example, a user whose ID maps to 'foo' will get back the path
+ * "/user/foo/.Trash/Current".
+ *
+ * @param trash_base       (out param) the base path to the trash.
+ *                             Malloced.
+ *
+ * @return                 0 on success; error code otherwise.
+ */
+static int get_trash_base(char **trash_base)
+{
+  const char * const PREFIX = "/user/";
+  const char * const SUFFIX = "/.Trash/Current";
+  char *user_name = NULL, *base = NULL;
+  uid_t uid = fuse_get_context()->uid;
+  int ret;
+
+  user_name = getUsername(uid);
+  if (!user_name) {
+    ERROR("get_trash_base(): failed to get username for uid %"PRId64"\n",
+          (uint64_t)uid);
+    ret = EIO;
+    goto done;
+  }
+  if (asprintf(&base, "%s%s%s", PREFIX, user_name, SUFFIX) < 0) {
+    base = NULL;
+    ret = ENOMEM;
+    goto done;
+  }
+  ret = 0;
+done:
+  free(user_name);
+  if (ret) {
+    free(base);
+    return ret;
+  }
+  *trash_base = base;
+  return 0;
+}
 
 
 //
 //
 // NOTE: this function is a c implementation of org.apache.hadoop.fs.Trash.moveToTrash(Path path).
 // NOTE: this function is a c implementation of org.apache.hadoop.fs.Trash.moveToTrash(Path path).
 //
 //
-
-int move_to_trash(const char *item, hdfsFS userFS) {
-
-  // retrieve dfs specific data
-  dfs_context *dfs = (dfs_context*)fuse_get_context()->private_data;
-
-  // check params and the context var
-  assert(item);
-  assert(dfs);
-  assert('/' == *item);
-  assert(rindex(item,'/') >= 0);
-
-
-  char fname[4096]; // or last element of the directory path
-  char parent_dir[4096]; // the directory the fname resides in
-
-  if (strlen(item) > sizeof(fname) - strlen(TrashDir)) {
-    ERROR("Buffer too small to accomodate path of len %d", (int)strlen(item));
-    return -EIO;
+int move_to_trash(const char *abs_path, hdfsFS userFS)
+{
+  int ret;
+  char *pcomp = NULL, *parent_dir = NULL, *trash_base = NULL;
+  char *target_dir = NULL, *target = NULL;
+
+  ret = get_parent_dir(abs_path, &pcomp, &parent_dir);
+  if (ret) {
+    goto done;
   }
   }
-
-  // separate the file name and the parent directory of the item to be deleted
-  {
-    int length_of_parent_dir = rindex(item, '/') - item ;
-    int length_of_fname = strlen(item) - length_of_parent_dir - 1; // the '/'
-
-    // note - the below strncpys should be safe from overflow because of the check on item's string length above.
-    strncpy(parent_dir, item, length_of_parent_dir);
-    parent_dir[length_of_parent_dir ] = 0;
-    strncpy(fname, item + length_of_parent_dir + 1, strlen(item));
-    fname[length_of_fname + 1] = 0;
+  ret = get_trash_base(&trash_base);
+  if (ret) {
+    goto done;
   }
   }
-
-  // create the target trash directory
-  char trash_dir[4096];
-  if (snprintf(trash_dir, sizeof(trash_dir), "%s%s", TrashDir, parent_dir) 
-      >= sizeof trash_dir) {
-    ERROR("Move to trash error target not big enough for %s", item);
-    return -EIO;
+  if (!strncmp(trash_base, abs_path, strlen(trash_base))) {
+    INFO("move_to_trash(%s): file is already in the trash; deleting.",
+         abs_path);
+    ret = ALREADY_IN_TRASH_ERR;
+    goto done;
+  }
+  fprintf(stderr, "trash_base='%s'\n", trash_base);
+  if (asprintf(&target_dir, "%s%s", trash_base, parent_dir) < 0) {
+    ret = ENOMEM;
+    target_dir = NULL;
+    goto done;
+  }
+  if (asprintf(&target, "%s/%s", target_dir, pcomp) < 0) {
+    ret = ENOMEM;
+    target = NULL;
+    goto done;
   }
   }
-
   // create the target trash directory in trash (if needed)
   // create the target trash directory in trash (if needed)
-  if ( hdfsExists(userFS, trash_dir)) {
+  if (hdfsExists(userFS, target_dir) != 0) {
     // make the directory to put it in in the Trash - NOTE
     // make the directory to put it in in the Trash - NOTE
     // hdfsCreateDirectory also creates parents, so Current will be created if it does not exist.
     // hdfsCreateDirectory also creates parents, so Current will be created if it does not exist.
-    if (hdfsCreateDirectory(userFS, trash_dir)) {
-      return -EIO;
+    if (hdfsCreateDirectory(userFS, target_dir)) {
+      ret = errno;
+      ERROR("move_to_trash(%s) error: hdfsCreateDirectory(%s) failed with error %d",
+            abs_path, target_dir, ret);
+      goto done;
     }
     }
-  }
-
-  //
-  // if the target path in Trash already exists, then append with
-  // a number. Start from 1.
-  //
-  char target[4096];
-  int j ;
-  if ( snprintf(target, sizeof target,"%s/%s",trash_dir, fname) >= sizeof target) {
-    ERROR("Move to trash error target not big enough for %s", item);
-    return -EIO;
-  }
-
-  // NOTE: this loop differs from the java version by capping the #of tries
-  for (j = 1; ! hdfsExists(userFS, target) && j < TRASH_RENAME_TRIES ; j++) {
-    if (snprintf(target, sizeof target,"%s/%s.%d",trash_dir, fname, j) >= sizeof target) {
-      ERROR("Move to trash error target not big enough for %s", item);
-      return -EIO;
+  } else if (hdfsExists(userFS, target) == 0) {
+    // If there is already a file in the trash with this path, append a number.
+    int idx;
+    for (idx = 1; idx < TRASH_RENAME_TRIES; idx++) {
+      free(target);
+      if (asprintf(&target, "%s%s.%d", target_dir, pcomp, idx) < 0) {
+        target = NULL;
+        ret = ENOMEM;
+        goto done;
+      }
+      if (hdfsExists(userFS, target) != 0) {
+        break;
+      }
+    }
+    if (idx == TRASH_RENAME_TRIES) {
+      ERROR("move_to_trash(%s) error: there are already %d files in the trash "
+            "with this name.\n", abs_path, TRASH_RENAME_TRIES);
+      ret = EINVAL;
+      goto done;
     }
     }
   }
   }
-  if (hdfsRename(userFS, item, target)) {
-    ERROR("Trying to rename %s to %s", item, target);
-    return -EIO;
+  if (hdfsRename(userFS, abs_path, target)) {
+    ret = errno;
+    ERROR("move_to_trash(%s): failed to rename the file to %s: error %d",
+          abs_path, target, ret);
+    goto done;
   }
   }
-  return 0;
-} 
 
 
-
-int hdfsDeleteWithTrash(hdfsFS userFS, const char *path, int useTrash) {
-
-  // move the file to the trash if this is enabled and its not actually in the trash.
-  if (useTrash && strncmp(path, TrashPrefixDir, strlen(TrashPrefixDir)) != 0) {
-    int ret= move_to_trash(path, userFS);
-    return ret;
+  ret = 0;
+done:
+  if ((ret != 0) && (ret != ALREADY_IN_TRASH_ERR)) {
+    ERROR("move_to_trash(%s) failed with error %d", abs_path, ret);
   }
   }
+  free(pcomp);
+  free(parent_dir);
+  free(trash_base);
+  free(target_dir);
+  free(target);
+  return ret;
+}
 
 
+int hdfsDeleteWithTrash(hdfsFS userFS, const char *path, int useTrash)
+{
+  int tried_to_move_to_trash = 0;
+  if (useTrash) {
+    tried_to_move_to_trash = 1;
+    if (move_to_trash(path, userFS) == 0) {
+      return 0;
+    }
+  }
   if (hdfsDelete(userFS, path, 1)) {
   if (hdfsDelete(userFS, path, 1)) {
-    ERROR("Trying to delete the file %s", path);
-    return -EIO;
+    int err = errno;
+    if (err < 0) {
+      err = -err;
+    }
+    ERROR("hdfsDeleteWithTrash(%s): hdfsDelete failed: error %d.",
+          path, err);
+    return -err;
+  }
+  if (tried_to_move_to_trash) {
+    ERROR("hdfsDeleteWithTrash(%s): deleted the file instead.\n", path);
   }
   }
-
   return 0;
   return 0;
 }
 }

+ 11 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml

@@ -1317,6 +1317,17 @@
   </description>
   </description>
 </property>
 </property>
 
 
+<property>
+  <name>dfs.nfs.allow.insecure.ports</name>
+  <value>true</value>
+  <description>
+    When set to false, client connections originating from unprivileged ports
+    (those above 1023) will be rejected. This is to ensure that clients
+    connecting to this NFS Gateway must have had root privilege on the machine
+    where they're connecting from.
+  </description>
+</property>
+
 <property>
 <property>
   <name>dfs.webhdfs.enabled</name>
   <name>dfs.webhdfs.enabled</name>
   <value>true</value>
   <value>true</value>

+ 71 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java

@@ -28,6 +28,7 @@ import static org.mockito.Mockito.spy;
 
 
 import java.io.IOException;
 import java.io.IOException;
 import java.io.OutputStream;
 import java.io.OutputStream;
+import java.util.List;
 import java.util.concurrent.atomic.AtomicReference;
 import java.util.concurrent.atomic.AtomicReference;
 
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
@@ -37,11 +38,15 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
 import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
+import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.apache.hadoop.hdfs.server.namenode.INodeFile;
 import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
 import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
@@ -327,4 +332,70 @@ public class TestFileAppend4 {
       cluster.shutdown();
       cluster.shutdown();
     }
     }
   }
   }
+
+  /**
+   * Test that an append with no locations fails with an exception
+   * showing insufficient locations.
+   */
+  @Test(timeout = 60000)
+  public void testAppendInsufficientLocations() throws Exception {
+    Configuration conf = new Configuration();
+
+    // lower heartbeat interval for fast recognition of DN
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,
+        1000);
+    conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
+    conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 3000);
+
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4)
+        .build();
+    DistributedFileSystem fileSystem = null;
+    try {
+      // create a file with replication 3
+      fileSystem = cluster.getFileSystem();
+      Path f = new Path("/testAppend");
+      FSDataOutputStream create = fileSystem.create(f, (short) 2);
+      create.write("/testAppend".getBytes());
+      create.close();
+
+      // Check for replications
+      DFSTestUtil.waitReplication(fileSystem, f, (short) 2);
+
+      // Shut down all DNs that have the last block location for the file
+      LocatedBlocks lbs = fileSystem.dfs.getNamenode().
+          getBlockLocations("/testAppend", 0, Long.MAX_VALUE);
+      List<DataNode> dnsOfCluster = cluster.getDataNodes();
+      DatanodeInfo[] dnsWithLocations = lbs.getLastLocatedBlock().
+          getLocations();
+      for( DataNode dn : dnsOfCluster) {
+        for(DatanodeInfo loc: dnsWithLocations) {
+          if(dn.getDatanodeId().equals(loc)){
+            dn.shutdown();
+            DFSTestUtil.waitForDatanodeDeath(dn);
+          }
+        }
+      }
+
+      // Wait till 0 replication is recognized
+      DFSTestUtil.waitReplication(fileSystem, f, (short) 0);
+
+      // Append to the file, at this state there are 3 live DNs but none of them
+      // have the block.
+      try{
+        fileSystem.append(f);
+        fail("Append should fail because insufficient locations");
+      } catch (IOException e){
+        LOG.info("Expected exception: ", e);
+      }
+      FSDirectory dir = cluster.getNamesystem().getFSDirectory();
+      final INodeFile inode = INodeFile.
+          valueOf(dir.getINode("/testAppend"), "/testAppend");
+      assertTrue("File should remain closed", !inode.isUnderConstruction());
+    } finally {
+      if (null != fileSystem) {
+        fileSystem.close();
+      }
+      cluster.shutdown();
+    }
+  }
 }
 }

+ 30 - 20
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java

@@ -22,8 +22,9 @@ import static org.junit.Assert.assertEquals;
 import java.io.IOException;
 import java.io.IOException;
 import java.net.URI;
 import java.net.URI;
 import java.util.Collection;
 import java.util.Collection;
-import java.util.HashMap;
-import java.util.Map;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
 import java.util.concurrent.TimeoutException;
 import java.util.concurrent.TimeoutException;
 
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
@@ -39,6 +40,9 @@ import org.apache.hadoop.hdfs.MiniDFSClusterWithNodeGroup;
 import org.apache.hadoop.hdfs.NameNodeProxies;
 import org.apache.hadoop.hdfs.NameNodeProxies;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyWithNodeGroup;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyWithNodeGroup;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.net.NetworkTopology;
@@ -53,7 +57,7 @@ public class TestBalancerWithNodeGroup {
   private static final Log LOG = LogFactory.getLog(
   private static final Log LOG = LogFactory.getLog(
   "org.apache.hadoop.hdfs.TestBalancerWithNodeGroup");
   "org.apache.hadoop.hdfs.TestBalancerWithNodeGroup");
   
   
-  final private static long CAPACITY = 6000L;
+  final private static long CAPACITY = 5000L;
   final private static String RACK0 = "/rack0";
   final private static String RACK0 = "/rack0";
   final private static String RACK1 = "/rack1";
   final private static String RACK1 = "/rack1";
   final private static String NODEGROUP0 = "/nodegroup0";
   final private static String NODEGROUP0 = "/nodegroup0";
@@ -77,6 +81,7 @@ public class TestBalancerWithNodeGroup {
   static Configuration createConf() {
   static Configuration createConf() {
     Configuration conf = new HdfsConfiguration();
     Configuration conf = new HdfsConfiguration();
     TestBalancer.initConf(conf);
     TestBalancer.initConf(conf);
+    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
     conf.set(CommonConfigurationKeysPublic.NET_TOPOLOGY_IMPL_KEY, 
     conf.set(CommonConfigurationKeysPublic.NET_TOPOLOGY_IMPL_KEY, 
         NetworkTopologyWithNodeGroup.class.getName());
         NetworkTopologyWithNodeGroup.class.getName());
     conf.set(DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY, 
     conf.set(DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY, 
@@ -191,6 +196,19 @@ public class TestBalancerWithNodeGroup {
     LOG.info("Rebalancing with default factor.");
     LOG.info("Rebalancing with default factor.");
   }
   }
 
 
+  private Set<ExtendedBlock> getBlocksOnRack(List<LocatedBlock> blks, String rack) {
+    Set<ExtendedBlock> ret = new HashSet<ExtendedBlock>();
+    for (LocatedBlock blk : blks) {
+      for (DatanodeInfo di : blk.getLocations()) {
+        if (rack.equals(NetworkTopology.getFirstHalf(di.getNetworkLocation()))) {
+          ret.add(blk.getBlock());
+          break;
+        }
+      }
+    }
+    return ret;
+  }
+
   /**
   /**
    * Create a cluster with even distribution, and a new empty node is added to
    * Create a cluster with even distribution, and a new empty node is added to
    * the cluster, then test rack locality for balancer policy. 
    * the cluster, then test rack locality for balancer policy. 
@@ -220,9 +238,14 @@ public class TestBalancerWithNodeGroup {
 
 
       // fill up the cluster to be 30% full
       // fill up the cluster to be 30% full
       long totalUsedSpace = totalCapacity * 3 / 10;
       long totalUsedSpace = totalCapacity * 3 / 10;
-      TestBalancer.createFile(cluster, filePath, totalUsedSpace / numOfDatanodes,
+      long length = totalUsedSpace / numOfDatanodes;
+      TestBalancer.createFile(cluster, filePath, length,
           (short) numOfDatanodes, 0);
           (short) numOfDatanodes, 0);
       
       
+      LocatedBlocks lbs = client.getBlockLocations(filePath.toUri().getPath(), 0,
+          length);
+      Set<ExtendedBlock> before = getBlocksOnRack(lbs.getLocatedBlocks(), RACK0);
+
       long newCapacity = CAPACITY;
       long newCapacity = CAPACITY;
       String newRack = RACK1;
       String newRack = RACK1;
       String newNodeGroup = NODEGROUP2;
       String newNodeGroup = NODEGROUP2;
@@ -235,22 +258,9 @@ public class TestBalancerWithNodeGroup {
       // run balancer and validate results
       // run balancer and validate results
       runBalancerCanFinish(conf, totalUsedSpace, totalCapacity);
       runBalancerCanFinish(conf, totalUsedSpace, totalCapacity);
       
       
-      DatanodeInfo[] datanodeReport = 
-              client.getDatanodeReport(DatanodeReportType.ALL);
-      
-      Map<String, Integer> rackToUsedCapacity = new HashMap<String, Integer>();
-      for (DatanodeInfo datanode: datanodeReport) {
-        String rack = NetworkTopology.getFirstHalf(datanode.getNetworkLocation());
-        int usedCapacity = (int) datanode.getDfsUsed();
-         
-        if (rackToUsedCapacity.get(rack) != null) {
-          rackToUsedCapacity.put(rack, usedCapacity + rackToUsedCapacity.get(rack));
-        } else {
-          rackToUsedCapacity.put(rack, usedCapacity);
-        }
-      }
-      assertEquals(rackToUsedCapacity.size(), 2);
-      assertEquals(rackToUsedCapacity.get(RACK0), rackToUsedCapacity.get(RACK1));
+      lbs = client.getBlockLocations(filePath.toUri().getPath(), 0, length);
+      Set<ExtendedBlock> after = getBlocksOnRack(lbs.getLocatedBlocks(), RACK0);
+      assertEquals(before, after);
       
       
     } finally {
     } finally {
       cluster.shutdown();
       cluster.shutdown();

+ 6 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java

@@ -477,6 +477,12 @@ public class TestCacheDirectives {
     iter = dfs.listCacheDirectives(
     iter = dfs.listCacheDirectives(
         new CacheDirectiveInfo.Builder().setPool("pool2").build());
         new CacheDirectiveInfo.Builder().setPool("pool2").build());
     validateListAll(iter, betaId);
     validateListAll(iter, betaId);
+    iter = dfs.listCacheDirectives(
+        new CacheDirectiveInfo.Builder().setId(alphaId2).build());
+    validateListAll(iter, alphaId2);
+    iter = dfs.listCacheDirectives(
+        new CacheDirectiveInfo.Builder().setId(relativeId).build());
+    validateListAll(iter, relativeId);
 
 
     dfs.removeCacheDirective(betaId);
     dfs.removeCacheDirective(betaId);
     iter = dfs.listCacheDirectives(
     iter = dfs.listCacheDirectives(

+ 40 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHostsFiles.java

@@ -129,4 +129,44 @@ public class TestHostsFiles {
       cluster.shutdown();
       cluster.shutdown();
     }
     }
   }
   }
+
+  @Test
+  public void testHostsIncludeForDeadCount() throws Exception {
+    Configuration conf = getConf();
+
+    // Configure an excludes file
+    FileSystem localFileSys = FileSystem.getLocal(conf);
+    Path workingDir = localFileSys.getWorkingDirectory();
+    Path dir = new Path(workingDir, "build/test/data/temp/decommission");
+    Path excludeFile = new Path(dir, "exclude");
+    Path includeFile = new Path(dir, "include");
+    assertTrue(localFileSys.mkdirs(dir));
+    StringBuilder includeHosts = new StringBuilder();
+    includeHosts.append("localhost:52").append("\n").append("127.0.0.1:7777")
+        .append("\n");
+    DFSTestUtil.writeFile(localFileSys, excludeFile, "");
+    DFSTestUtil.writeFile(localFileSys, includeFile, includeHosts.toString());
+    conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());
+    conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath());
+
+    MiniDFSCluster cluster = null;
+    try {
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
+      final FSNamesystem ns = cluster.getNameNode().getNamesystem();
+      assertTrue(ns.getNumDeadDataNodes() == 2);
+      assertTrue(ns.getNumLiveDataNodes() == 0);
+
+      // Testing using MBeans
+      MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
+      ObjectName mxbeanName = new ObjectName(
+          "Hadoop:service=NameNode,name=FSNamesystemState");
+      String nodes = mbs.getAttribute(mxbeanName, "NumDeadDataNodes") + "";
+      assertTrue((Integer) mbs.getAttribute(mxbeanName, "NumDeadDataNodes") == 2);
+      assertTrue((Integer) mbs.getAttribute(mxbeanName, "NumLiveDataNodes") == 0);
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
 }
 }

+ 24 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testCacheAdminConf.xml

@@ -519,5 +519,29 @@
         </comparator>
         </comparator>
       </comparators>
       </comparators>
     </test>
     </test>
+
+    <test> <!--Tested -->
+      <description>Testing listing a single cache directive</description>
+      <test-commands>
+        <cache-admin-command>-addPool pool1</cache-admin-command>
+        <cache-admin-command>-addDirective -path /foo -pool pool1 -ttl 2d</cache-admin-command>
+        <cache-admin-command>-addDirective -path /bar -pool pool1 -ttl 24h</cache-admin-command>
+        <cache-admin-command>-addDirective -path /baz -replication 2 -pool pool1 -ttl 60m</cache-admin-command>
+        <cache-admin-command>-listDirectives -stats -id 30</cache-admin-command>
+      </test-commands>
+      <cleanup-commands>
+        <cache-admin-command>-removePool pool1</cache-admin-command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>Found 1 entry</expected-output>
+        </comparator>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>30 pool1      1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
   </tests>
   </tests>
 </configuration>
 </configuration>

+ 5 - 0
hadoop-mapreduce-project/CHANGES.txt

@@ -139,6 +139,9 @@ Trunk (Unreleased)
 
 
     MAPREDUCE-5717. Task pings are interpreted as task progress (jlowe)
     MAPREDUCE-5717. Task pings are interpreted as task progress (jlowe)
 
 
+    MAPREDUCE-5867. Fix NPE in KillAMPreemptionPolicy related to 
+    ProportionalCapacityPreemptionPolicy (Sunil G via devaraj)
+
 Release 2.5.0 - UNRELEASED
 Release 2.5.0 - UNRELEASED
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES
@@ -197,6 +200,8 @@ Release 2.5.0 - UNRELEASED
     MAPREDUCE-5861. finishedSubMaps field in LocalContainerLauncher does not 
     MAPREDUCE-5861. finishedSubMaps field in LocalContainerLauncher does not 
     need to be volatile. (Tsuyoshi OZAWA via junping_du)
     need to be volatile. (Tsuyoshi OZAWA via junping_du)
 
 
+    MAPREDUCE-5809. Enhance distcp to support preserving HDFS ACLs. (cnauroth)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
   BUG FIXES 
   BUG FIXES 

+ 13 - 6
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/preemption/KillAMPreemptionPolicy.java

@@ -29,7 +29,9 @@ import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.PreemptionContainer;
 import org.apache.hadoop.yarn.api.records.PreemptionContainer;
+import org.apache.hadoop.yarn.api.records.PreemptionContract;
 import org.apache.hadoop.yarn.api.records.PreemptionMessage;
 import org.apache.hadoop.yarn.api.records.PreemptionMessage;
+import org.apache.hadoop.yarn.api.records.StrictPreemptionContract;
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.event.EventHandler;
 
 
 /**
 /**
@@ -52,13 +54,18 @@ public class KillAMPreemptionPolicy implements AMPreemptionPolicy {
   public void preempt(Context ctxt, PreemptionMessage preemptionRequests) {
   public void preempt(Context ctxt, PreemptionMessage preemptionRequests) {
     // for both strict and negotiable preemption requests kill the
     // for both strict and negotiable preemption requests kill the
     // container
     // container
-    for (PreemptionContainer c :
-        preemptionRequests.getStrictContract().getContainers()) {
-      killContainer(ctxt, c);
+    StrictPreemptionContract strictContract = preemptionRequests
+        .getStrictContract();
+    if (strictContract != null) {
+      for (PreemptionContainer c : strictContract.getContainers()) {
+        killContainer(ctxt, c);
+      }
     }
     }
-    for (PreemptionContainer c :
-         preemptionRequests.getContract().getContainers()) {
-       killContainer(ctxt, c);
+    PreemptionContract contract = preemptionRequests.getContract();
+    if (contract != null) {
+      for (PreemptionContainer c : contract.getContainers()) {
+        killContainer(ctxt, c);
+      }
     }
     }
   }
   }
 
 

+ 144 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestKillAMPreemptionPolicy.java

@@ -0,0 +1,144 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.mapreduce.v2.app;
+
+import static org.mockito.Matchers.any;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
+import org.apache.hadoop.mapreduce.v2.app.MRAppMaster.RunningAppContext;
+import org.apache.hadoop.mapreduce.v2.app.job.event.JobCounterUpdateEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent;
+import org.apache.hadoop.mapreduce.v2.app.rm.preemption.AMPreemptionPolicy;
+import org.apache.hadoop.mapreduce.v2.app.rm.preemption.KillAMPreemptionPolicy;
+import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.PreemptionContainer;
+import org.apache.hadoop.yarn.api.records.PreemptionContract;
+import org.apache.hadoop.yarn.api.records.PreemptionMessage;
+import org.apache.hadoop.yarn.api.records.StrictPreemptionContract;
+import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.junit.Test;
+
+public class TestKillAMPreemptionPolicy {
+  private final RecordFactory recordFactory = RecordFactoryProvider
+      .getRecordFactory(null);
+
+  @SuppressWarnings("unchecked")
+  @Test
+  public void testKillAMPreemptPolicy() {
+
+    ApplicationId appId = ApplicationId.newInstance(123456789, 1);
+    ContainerId container = ContainerId.newInstance(
+        ApplicationAttemptId.newInstance(appId, 1), 1);
+    AMPreemptionPolicy.Context mPctxt = mock(AMPreemptionPolicy.Context.class);
+    when(mPctxt.getTaskAttempt(any(ContainerId.class))).thenReturn(
+        MRBuilderUtils.newTaskAttemptId(MRBuilderUtils.newTaskId(
+            MRBuilderUtils.newJobId(appId, 1), 1, TaskType.MAP), 0));
+    List<Container> p = new ArrayList<Container>();
+    p.add(Container.newInstance(container, null, null, null, null, null));
+    when(mPctxt.getContainers(any(TaskType.class))).thenReturn(p);
+
+    KillAMPreemptionPolicy policy = new KillAMPreemptionPolicy();
+
+    // strictContract is null & contract is null
+    RunningAppContext mActxt = getRunningAppContext();
+    policy.init(mActxt);
+    PreemptionMessage pM = getPreemptionMessage(false, false, container);
+    policy.preempt(mPctxt, pM);
+    verify(mActxt.getEventHandler(), times(0)).handle(
+        any(TaskAttemptEvent.class));
+    verify(mActxt.getEventHandler(), times(0)).handle(
+        any(JobCounterUpdateEvent.class));
+
+    // strictContract is not null & contract is null
+    mActxt = getRunningAppContext();
+    policy.init(mActxt);
+    pM = getPreemptionMessage(true, false, container);
+    policy.preempt(mPctxt, pM);
+    verify(mActxt.getEventHandler(), times(2)).handle(
+        any(TaskAttemptEvent.class));
+    verify(mActxt.getEventHandler(), times(2)).handle(
+        any(JobCounterUpdateEvent.class));
+
+    // strictContract is null & contract is not null
+    mActxt = getRunningAppContext();
+    policy.init(mActxt);
+    pM = getPreemptionMessage(false, true, container);
+    policy.preempt(mPctxt, pM);
+    verify(mActxt.getEventHandler(), times(2)).handle(
+        any(TaskAttemptEvent.class));
+    verify(mActxt.getEventHandler(), times(2)).handle(
+        any(JobCounterUpdateEvent.class));
+
+    // strictContract is not null & contract is not null
+    mActxt = getRunningAppContext();
+    policy.init(mActxt);
+    pM = getPreemptionMessage(true, true, container);
+    policy.preempt(mPctxt, pM);
+    verify(mActxt.getEventHandler(), times(4)).handle(
+        any(TaskAttemptEvent.class));
+    verify(mActxt.getEventHandler(), times(4)).handle(
+        any(JobCounterUpdateEvent.class));
+  }
+
+  private RunningAppContext getRunningAppContext() {
+    RunningAppContext mActxt = mock(RunningAppContext.class);
+    EventHandler<?> eventHandler = mock(EventHandler.class);
+    when(mActxt.getEventHandler()).thenReturn(eventHandler);
+    return mActxt;
+  }
+
+  private PreemptionMessage getPreemptionMessage(boolean strictContract,
+      boolean contract, final ContainerId container) {
+    PreemptionMessage preemptionMessage = recordFactory
+        .newRecordInstance(PreemptionMessage.class);
+    Set<PreemptionContainer> cntrs = new HashSet<PreemptionContainer>();
+    PreemptionContainer preemptContainer = recordFactory
+        .newRecordInstance(PreemptionContainer.class);
+    preemptContainer.setId(container);
+    cntrs.add(preemptContainer);
+    if (strictContract) {
+      StrictPreemptionContract set = recordFactory
+          .newRecordInstance(StrictPreemptionContract.class);
+      set.setContainers(cntrs);
+      preemptionMessage.setStrictContract(set);
+    }
+    if (contract) {
+      PreemptionContract preemptContract = recordFactory
+          .newRecordInstance(PreemptionContract.class);
+      preemptContract.setContainers(cntrs);
+      preemptionMessage.setContract(preemptContract);
+    }
+    return preemptionMessage;
+  }
+
+}

+ 0 - 2
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/MapredAppMasterRest.apt.vm → hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/apt/MapredAppMasterRest.apt.vm

@@ -18,8 +18,6 @@
 
 
 MapReduce Application Master REST API's.
 MapReduce Application Master REST API's.
 
 
-  \[ {{{./index.html}Go Back}} \]
-
 %{toc|section=1|fromDepth=0|toDepth=2}
 %{toc|section=1|fromDepth=0|toDepth=2}
 
 
 * Overview
 * Overview

+ 4 - 6
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/HistoryServerRest.apt.vm → hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/site/apt/HistoryServerRest.apt.vm

@@ -11,20 +11,18 @@
 ~~ limitations under the License. See accompanying LICENSE file.
 ~~ limitations under the License. See accompanying LICENSE file.
 
 
   ---
   ---
-  History Server REST API's.
+  MapReduce History Server REST API's.
   ---
   ---
   ---
   ---
   ${maven.build.timestamp}
   ${maven.build.timestamp}
 
 
-History Server REST API's.
-
-  \[ {{{./index.html}Go Back}} \]
+MapReduce History Server REST API's.
 
 
 %{toc|section=1|fromDepth=0|toDepth=3}
 %{toc|section=1|fromDepth=0|toDepth=3}
 
 
 * Overview
 * Overview
 
 
-  The history server REST API's allow the user to get status on finished applications. Currently it only supports MapReduce and provides information on finished jobs.
+  The history server REST API's allow the user to get status on finished applications.
 
 
 * History Server Information API
 * History Server Information API
 
 
@@ -2671,4 +2669,4 @@ History Server REST API's.
     </counter>
     </counter>
   </taskAttemptCounterGroup>
   </taskAttemptCounterGroup>
 </jobTaskAttemptCounters>
 </jobTaskAttemptCounters>
-+---+
++---+

+ 5 - 2
hadoop-project/src/site/site.xml

@@ -100,6 +100,11 @@
       <item name="DistCp" href="hadoop-mapreduce-client/hadoop-mapreduce-client-core/DistCp.html"/>
       <item name="DistCp" href="hadoop-mapreduce-client/hadoop-mapreduce-client-core/DistCp.html"/>
     </menu>
     </menu>
 
 
+    <menu name="MapReduce REST APIs" inherit="top">
+      <item name="MR Application Master" href="hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapredAppMasterRest.html"/>
+      <item name="MR History Server" href="hadoop-mapreduce-client/hadoop-mapreduce-client-hs/HistoryServerRest.html"/>
+    </menu>
+
     <menu name="YARN" inherit="top">
     <menu name="YARN" inherit="top">
       <item name="YARN Architecture" href="hadoop-yarn/hadoop-yarn-site/YARN.html"/>
       <item name="YARN Architecture" href="hadoop-yarn/hadoop-yarn-site/YARN.html"/>
       <item name="Capacity Scheduler" href="hadoop-yarn/hadoop-yarn-site/CapacityScheduler.html"/>
       <item name="Capacity Scheduler" href="hadoop-yarn/hadoop-yarn-site/CapacityScheduler.html"/>
@@ -117,8 +122,6 @@
       <item name="Introduction" href="hadoop-yarn/hadoop-yarn-site/WebServicesIntro.html"/>
       <item name="Introduction" href="hadoop-yarn/hadoop-yarn-site/WebServicesIntro.html"/>
       <item name="Resource Manager" href="hadoop-yarn/hadoop-yarn-site/ResourceManagerRest.html"/>
       <item name="Resource Manager" href="hadoop-yarn/hadoop-yarn-site/ResourceManagerRest.html"/>
       <item name="Node Manager" href="hadoop-yarn/hadoop-yarn-site/NodeManagerRest.html"/>
       <item name="Node Manager" href="hadoop-yarn/hadoop-yarn-site/NodeManagerRest.html"/>
-      <item name="MR Application Master" href="hadoop-yarn/hadoop-yarn-site/MapredAppMasterRest.html"/>
-      <item name="History Server" href="hadoop-yarn/hadoop-yarn-site/HistoryServerRest.html"/>
     </menu>
     </menu>
     
     
     <menu name="Auth" inherit="top">
     <menu name="Auth" inherit="top">

+ 27 - 7
hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListing.java

@@ -22,7 +22,6 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.io.SequenceFile;
 import org.apache.hadoop.io.SequenceFile;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.IOUtils;
@@ -31,11 +30,15 @@ import org.apache.hadoop.security.Credentials;
 
 
 import java.io.IOException;
 import java.io.IOException;
 import java.lang.reflect.Constructor;
 import java.lang.reflect.Constructor;
+import java.net.URI;
+import java.util.Set;
+
+import com.google.common.collect.Sets;
 
 
 /**
 /**
  * The CopyListing abstraction is responsible for how the list of
  * The CopyListing abstraction is responsible for how the list of
  * sources and targets is constructed, for DistCp's copy function.
  * sources and targets is constructed, for DistCp's copy function.
- * The copy-listing should be a SequenceFile<Text, FileStatus>,
+ * The copy-listing should be a SequenceFile<Text, CopyListingFileStatus>,
  * located at the path specified to buildListing(),
  * located at the path specified to buildListing(),
  * each entry being a pair of (Source relative path, source file status),
  * each entry being a pair of (Source relative path, source file status),
  * all the paths being fully qualified.
  * all the paths being fully qualified.
@@ -85,7 +88,7 @@ public abstract class CopyListing extends Configured {
     config.setLong(DistCpConstants.CONF_LABEL_TOTAL_BYTES_TO_BE_COPIED, getBytesToCopy());
     config.setLong(DistCpConstants.CONF_LABEL_TOTAL_BYTES_TO_BE_COPIED, getBytesToCopy());
     config.setLong(DistCpConstants.CONF_LABEL_TOTAL_NUMBER_OF_RECORDS, getNumberOfPaths());
     config.setLong(DistCpConstants.CONF_LABEL_TOTAL_NUMBER_OF_RECORDS, getNumberOfPaths());
 
 
-    checkForDuplicates(pathToListFile);
+    validateFinalListing(pathToListFile, options);
   }
   }
 
 
   /**
   /**
@@ -124,13 +127,15 @@ public abstract class CopyListing extends Configured {
   protected abstract long getNumberOfPaths();
   protected abstract long getNumberOfPaths();
 
 
   /**
   /**
-   * Validate the final resulting path listing to see if there are any duplicate entries
+   * Validate the final resulting path listing.  Checks if there are duplicate
+   * entries.  If preserving ACLs, checks that file system can support ACLs.
    *
    *
    * @param pathToListFile - path listing build by doBuildListing
    * @param pathToListFile - path listing build by doBuildListing
+   * @param options - Input options to distcp
    * @throws IOException - Any issues while checking for duplicates and throws
    * @throws IOException - Any issues while checking for duplicates and throws
    * @throws DuplicateFileException - if there are duplicates
    * @throws DuplicateFileException - if there are duplicates
    */
    */
-  private void checkForDuplicates(Path pathToListFile)
+  private void validateFinalListing(Path pathToListFile, DistCpOptions options)
       throws DuplicateFileException, IOException {
       throws DuplicateFileException, IOException {
 
 
     Configuration config = getConf();
     Configuration config = getConf();
@@ -142,17 +147,26 @@ public abstract class CopyListing extends Configured {
                           config, SequenceFile.Reader.file(sortedList));
                           config, SequenceFile.Reader.file(sortedList));
     try {
     try {
       Text lastKey = new Text("*"); //source relative path can never hold *
       Text lastKey = new Text("*"); //source relative path can never hold *
-      FileStatus lastFileStatus = new FileStatus();
+      CopyListingFileStatus lastFileStatus = new CopyListingFileStatus();
 
 
       Text currentKey = new Text();
       Text currentKey = new Text();
+      Set<URI> aclSupportCheckFsSet = Sets.newHashSet();
       while (reader.next(currentKey)) {
       while (reader.next(currentKey)) {
         if (currentKey.equals(lastKey)) {
         if (currentKey.equals(lastKey)) {
-          FileStatus currentFileStatus = new FileStatus();
+          CopyListingFileStatus currentFileStatus = new CopyListingFileStatus();
           reader.getCurrentValue(currentFileStatus);
           reader.getCurrentValue(currentFileStatus);
           throw new DuplicateFileException("File " + lastFileStatus.getPath() + " and " +
           throw new DuplicateFileException("File " + lastFileStatus.getPath() + " and " +
               currentFileStatus.getPath() + " would cause duplicates. Aborting");
               currentFileStatus.getPath() + " would cause duplicates. Aborting");
         }
         }
         reader.getCurrentValue(lastFileStatus);
         reader.getCurrentValue(lastFileStatus);
+        if (options.shouldPreserve(DistCpOptions.FileAttribute.ACL)) {
+          FileSystem lastFs = lastFileStatus.getPath().getFileSystem(config);
+          URI lastFsUri = lastFs.getUri();
+          if (!aclSupportCheckFsSet.contains(lastFsUri)) {
+            DistCpUtils.checkFileSystemAclSupport(lastFs);
+            aclSupportCheckFsSet.add(lastFsUri);
+          }
+        }
         lastKey.set(currentKey);
         lastKey.set(currentKey);
       }
       }
     } finally {
     } finally {
@@ -236,4 +250,10 @@ public abstract class CopyListing extends Configured {
       super(message);
       super(message);
     }
     }
   }
   }
+
+  public static class AclsNotSupportedException extends RuntimeException {
+    public AclsNotSupportedException(String message) {
+      super(message);
+    }
+  }
 }
 }

+ 153 - 0
hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListingFileStatus.java

@@ -0,0 +1,153 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.tools;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.util.Collections;
+import java.util.List;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.permission.AclEntry;
+import org.apache.hadoop.fs.permission.AclEntryType;
+import org.apache.hadoop.fs.permission.AclEntryScope;
+import org.apache.hadoop.fs.permission.AclUtil;
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.io.WritableUtils;
+
+import com.google.common.base.Objects;
+import com.google.common.collect.Lists;
+
+/**
+ * CopyListingFileStatus is a specialized subclass of {@link FileStatus} for
+ * attaching additional data members useful to distcp.  This class does not
+ * override {@link FileStatus#compareTo}, because the additional data members
+ * are not relevant to sort order.
+ */
+@InterfaceAudience.Private
+public final class CopyListingFileStatus extends FileStatus {
+
+  private static final byte NO_ACL_ENTRIES = -1;
+
+  // Retain static arrays of enum values to prevent repeated allocation of new
+  // arrays during deserialization.
+  private static final AclEntryType[] ACL_ENTRY_TYPES = AclEntryType.values();
+  private static final AclEntryScope[] ACL_ENTRY_SCOPES = AclEntryScope.values();
+  private static final FsAction[] FS_ACTIONS = FsAction.values();
+
+  private List<AclEntry> aclEntries;
+
+  /**
+   * Default constructor.
+   */
+  public CopyListingFileStatus() {
+  }
+
+  /**
+   * Creates a new CopyListingFileStatus by copying the members of the given
+   * FileStatus.
+   *
+   * @param fileStatus FileStatus to copy
+   */
+  public CopyListingFileStatus(FileStatus fileStatus) throws IOException {
+    super(fileStatus);
+  }
+
+  /**
+   * Returns the full logical ACL.
+   *
+   * @return List<AclEntry> containing full logical ACL
+   */
+  public List<AclEntry> getAclEntries() {
+    return AclUtil.getAclFromPermAndEntries(getPermission(),
+      aclEntries != null ? aclEntries : Collections.<AclEntry>emptyList());
+  }
+
+  /**
+   * Sets optional ACL entries.
+   *
+   * @param aclEntries List<AclEntry> containing all ACL entries
+   */
+  public void setAclEntries(List<AclEntry> aclEntries) {
+    this.aclEntries = aclEntries;
+  }
+
+  @Override
+  public void write(DataOutput out) throws IOException {
+    super.write(out);
+    if (aclEntries != null) {
+      // byte is sufficient, because 32 ACL entries is the max enforced by HDFS.
+      out.writeByte(aclEntries.size());
+      for (AclEntry entry: aclEntries) {
+        out.writeByte(entry.getScope().ordinal());
+        out.writeByte(entry.getType().ordinal());
+        WritableUtils.writeString(out, entry.getName());
+        out.writeByte(entry.getPermission().ordinal());
+      }
+    } else {
+      out.writeByte(NO_ACL_ENTRIES);
+    }
+  }
+
+  @Override
+  public void readFields(DataInput in) throws IOException {
+    super.readFields(in);
+    byte aclEntriesSize = in.readByte();
+    if (aclEntriesSize != NO_ACL_ENTRIES) {
+      aclEntries = Lists.newArrayListWithCapacity(aclEntriesSize);
+      for (int i = 0; i < aclEntriesSize; ++i) {
+        aclEntries.add(new AclEntry.Builder()
+          .setScope(ACL_ENTRY_SCOPES[in.readByte()])
+          .setType(ACL_ENTRY_TYPES[in.readByte()])
+          .setName(WritableUtils.readString(in))
+          .setPermission(FS_ACTIONS[in.readByte()])
+          .build());
+      }
+    } else {
+      aclEntries = null;
+    }
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (!super.equals(o)) {
+      return false;
+    }
+    if (getClass() != o.getClass()) {
+      return false;
+    }
+    CopyListingFileStatus other = (CopyListingFileStatus)o;
+    return Objects.equal(aclEntries, other.aclEntries);
+  }
+
+  @Override
+  public int hashCode() {
+    return Objects.hashCode(super.hashCode(), aclEntries);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder(super.toString());
+    sb.append('{');
+    sb.append("aclEntries = " + aclEntries);
+    sb.append('}');
+    return sb.toString();
+  }
+}

+ 6 - 1
hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCp.java

@@ -125,6 +125,9 @@ public class DistCp extends Configured implements Tool {
     } catch (DuplicateFileException e) {
     } catch (DuplicateFileException e) {
       LOG.error("Duplicate files in input path: ", e);
       LOG.error("Duplicate files in input path: ", e);
       return DistCpConstants.DUPLICATE_INPUT;
       return DistCpConstants.DUPLICATE_INPUT;
+    } catch (AclsNotSupportedException e) {
+      LOG.error("ACLs not supported on at least one file system: ", e);
+      return DistCpConstants.ACLS_NOT_SUPPORTED;
     } catch (Exception e) {
     } catch (Exception e) {
       LOG.error("Exception encountered ", e);
       LOG.error("Exception encountered ", e);
       return DistCpConstants.UNKNOWN_ERROR;
       return DistCpConstants.UNKNOWN_ERROR;
@@ -298,7 +301,9 @@ public class DistCp extends Configured implements Tool {
     FileSystem targetFS = targetPath.getFileSystem(configuration);
     FileSystem targetFS = targetPath.getFileSystem(configuration);
     targetPath = targetPath.makeQualified(targetFS.getUri(),
     targetPath = targetPath.makeQualified(targetFS.getUri(),
                                           targetFS.getWorkingDirectory());
                                           targetFS.getWorkingDirectory());
-
+    if (inputOptions.shouldPreserve(DistCpOptions.FileAttribute.ACL)) {
+      DistCpUtils.checkFileSystemAclSupport(targetFS);
+    }
     if (inputOptions.shouldAtomicCommit()) {
     if (inputOptions.shouldAtomicCommit()) {
       Path workDir = inputOptions.getAtomicWorkPath();
       Path workDir = inputOptions.getAtomicWorkPath();
       if (workDir == null) {
       if (workDir == null) {

+ 1 - 0
hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpConstants.java

@@ -115,6 +115,7 @@ public class DistCpConstants {
   public static final int SUCCESS = 0;
   public static final int SUCCESS = 0;
   public static final int INVALID_ARGUMENT = -1;
   public static final int INVALID_ARGUMENT = -1;
   public static final int DUPLICATE_INPUT = -2;
   public static final int DUPLICATE_INPUT = -2;
+  public static final int ACLS_NOT_SUPPORTED = -3;
   public static final int UNKNOWN_ERROR = -999;
   public static final int UNKNOWN_ERROR = -999;
   
   
   /**
   /**

+ 4 - 2
hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptionSwitch.java

@@ -45,8 +45,10 @@ public enum DistCpOptionSwitch {
    *
    *
    */
    */
   PRESERVE_STATUS(DistCpConstants.CONF_LABEL_PRESERVE_STATUS,
   PRESERVE_STATUS(DistCpConstants.CONF_LABEL_PRESERVE_STATUS,
-      new Option("p", true, "preserve status (rbugpc)" +
-          "(replication, block-size, user, group, permission, checksum-type)")),
+      new Option("p", true, "preserve status (rbugpca)(replication, " +
+          "block-size, user, group, permission, checksum-type, ACL).  If " +
+          "-p is specified with no <arg>, then preserves replication, block " +
+          "size, user, group, permission and checksum type.")),
 
 
   /**
   /**
    * Update target location by copying only files that are missing
    * Update target location by copying only files that are missing

+ 1 - 1
hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java

@@ -65,7 +65,7 @@ public class DistCpOptions {
   private boolean targetPathExists = true;
   private boolean targetPathExists = true;
   
   
   public static enum FileAttribute{
   public static enum FileAttribute{
-    REPLICATION, BLOCKSIZE, USER, GROUP, PERMISSION, CHECKSUMTYPE;
+    REPLICATION, BLOCKSIZE, USER, GROUP, PERMISSION, CHECKSUMTYPE, ACL;
 
 
     public static FileAttribute getAttribute(char symbol) {
     public static FileAttribute getAttribute(char symbol) {
       for (FileAttribute attribute : values()) {
       for (FileAttribute attribute : values()) {

+ 24 - 35
hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/SimpleCopyListing.java

@@ -23,11 +23,12 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.SequenceFile;
 import org.apache.hadoop.io.SequenceFile;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.DataInputBuffer;
+import org.apache.hadoop.tools.DistCpOptions.FileAttribute;
 import org.apache.hadoop.tools.util.DistCpUtils;
 import org.apache.hadoop.tools.util.DistCpUtils;
 import org.apache.hadoop.mapreduce.security.TokenCache;
 import org.apache.hadoop.mapreduce.security.TokenCache;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.Credentials;
@@ -35,6 +36,7 @@ import org.apache.hadoop.security.Credentials;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.annotations.VisibleForTesting;
 
 
 import java.io.*;
 import java.io.*;
+import java.util.List;
 import java.util.Stack;
 import java.util.Stack;
 
 
 /**
 /**
@@ -139,28 +141,34 @@ public class SimpleCopyListing extends CopyListing {
 
 
         FileStatus rootStatus = sourceFS.getFileStatus(path);
         FileStatus rootStatus = sourceFS.getFileStatus(path);
         Path sourcePathRoot = computeSourceRootPath(rootStatus, options);
         Path sourcePathRoot = computeSourceRootPath(rootStatus, options);
-        boolean localFile = (rootStatus.getClass() != FileStatus.class);
 
 
         FileStatus[] sourceFiles = sourceFS.listStatus(path);
         FileStatus[] sourceFiles = sourceFS.listStatus(path);
         boolean explore = (sourceFiles != null && sourceFiles.length > 0);
         boolean explore = (sourceFiles != null && sourceFiles.length > 0);
         if (!explore || rootStatus.isDirectory()) {
         if (!explore || rootStatus.isDirectory()) {
-          writeToFileListingRoot(fileListWriter, rootStatus, sourcePathRoot,
-              localFile, options);
+          CopyListingFileStatus rootCopyListingStatus =
+            DistCpUtils.toCopyListingFileStatus(sourceFS, rootStatus,
+              options.shouldPreserve(FileAttribute.ACL));
+          writeToFileListingRoot(fileListWriter, rootCopyListingStatus,
+              sourcePathRoot, options);
         }
         }
         if (explore) {
         if (explore) {
           for (FileStatus sourceStatus: sourceFiles) {
           for (FileStatus sourceStatus: sourceFiles) {
             if (LOG.isDebugEnabled()) {
             if (LOG.isDebugEnabled()) {
               LOG.debug("Recording source-path: " + sourceStatus.getPath() + " for copy.");
               LOG.debug("Recording source-path: " + sourceStatus.getPath() + " for copy.");
             }
             }
-            writeToFileListing(fileListWriter, sourceStatus, sourcePathRoot,
-                localFile, options);
+            CopyListingFileStatus sourceCopyListingStatus =
+              DistCpUtils.toCopyListingFileStatus(sourceFS, sourceStatus,
+                options.shouldPreserve(FileAttribute.ACL) &&
+                sourceStatus.isDirectory());
+            writeToFileListing(fileListWriter, sourceCopyListingStatus,
+                sourcePathRoot, options);
 
 
             if (isDirectoryAndNotEmpty(sourceFS, sourceStatus)) {
             if (isDirectoryAndNotEmpty(sourceFS, sourceStatus)) {
               if (LOG.isDebugEnabled()) {
               if (LOG.isDebugEnabled()) {
                 LOG.debug("Traversing non-empty source dir: " + sourceStatus.getPath());
                 LOG.debug("Traversing non-empty source dir: " + sourceStatus.getPath());
               }
               }
               traverseNonEmptyDirectory(fileListWriter, sourceStatus, sourcePathRoot,
               traverseNonEmptyDirectory(fileListWriter, sourceStatus, sourcePathRoot,
-                  localFile, options);
+                  options);
             }
             }
           }
           }
         }
         }
@@ -233,7 +241,7 @@ public class SimpleCopyListing extends CopyListing {
     return SequenceFile.createWriter(getConf(),
     return SequenceFile.createWriter(getConf(),
             SequenceFile.Writer.file(pathToListFile),
             SequenceFile.Writer.file(pathToListFile),
             SequenceFile.Writer.keyClass(Text.class),
             SequenceFile.Writer.keyClass(Text.class),
-            SequenceFile.Writer.valueClass(FileStatus.class),
+            SequenceFile.Writer.valueClass(CopyListingFileStatus.class),
             SequenceFile.Writer.compression(SequenceFile.CompressionType.NONE));
             SequenceFile.Writer.compression(SequenceFile.CompressionType.NONE));
   }
   }
 
 
@@ -250,7 +258,6 @@ public class SimpleCopyListing extends CopyListing {
   private void traverseNonEmptyDirectory(SequenceFile.Writer fileListWriter,
   private void traverseNonEmptyDirectory(SequenceFile.Writer fileListWriter,
                                          FileStatus sourceStatus,
                                          FileStatus sourceStatus,
                                          Path sourcePathRoot,
                                          Path sourcePathRoot,
-                                         boolean localFile,
                                          DistCpOptions options)
                                          DistCpOptions options)
                                          throws IOException {
                                          throws IOException {
     FileSystem sourceFS = sourcePathRoot.getFileSystem(getConf());
     FileSystem sourceFS = sourcePathRoot.getFileSystem(getConf());
@@ -262,8 +269,11 @@ public class SimpleCopyListing extends CopyListing {
         if (LOG.isDebugEnabled())
         if (LOG.isDebugEnabled())
           LOG.debug("Recording source-path: "
           LOG.debug("Recording source-path: "
                     + sourceStatus.getPath() + " for copy.");
                     + sourceStatus.getPath() + " for copy.");
-        writeToFileListing(fileListWriter, child, sourcePathRoot,
-             localFile, options);
+        CopyListingFileStatus childCopyListingStatus =
+          DistCpUtils.toCopyListingFileStatus(sourceFS, child,
+            options.shouldPreserve(FileAttribute.ACL) && child.isDirectory());
+        writeToFileListing(fileListWriter, childCopyListingStatus,
+             sourcePathRoot, options);
         if (isDirectoryAndNotEmpty(sourceFS, child)) {
         if (isDirectoryAndNotEmpty(sourceFS, child)) {
           if (LOG.isDebugEnabled())
           if (LOG.isDebugEnabled())
             LOG.debug("Traversing non-empty source dir: "
             LOG.debug("Traversing non-empty source dir: "
@@ -275,8 +285,7 @@ public class SimpleCopyListing extends CopyListing {
   }
   }
   
   
   private void writeToFileListingRoot(SequenceFile.Writer fileListWriter,
   private void writeToFileListingRoot(SequenceFile.Writer fileListWriter,
-      FileStatus fileStatus, Path sourcePathRoot,
-      boolean localFile,
+      CopyListingFileStatus fileStatus, Path sourcePathRoot,
       DistCpOptions options) throws IOException {
       DistCpOptions options) throws IOException {
     boolean syncOrOverwrite = options.shouldSyncFolder() ||
     boolean syncOrOverwrite = options.shouldSyncFolder() ||
         options.shouldOverwrite();
         options.shouldOverwrite();
@@ -288,14 +297,12 @@ public class SimpleCopyListing extends CopyListing {
       }      
       }      
       return;
       return;
     }
     }
-    writeToFileListing(fileListWriter, fileStatus, sourcePathRoot, localFile,
-        options);
+    writeToFileListing(fileListWriter, fileStatus, sourcePathRoot, options);
   }
   }
 
 
   private void writeToFileListing(SequenceFile.Writer fileListWriter,
   private void writeToFileListing(SequenceFile.Writer fileListWriter,
-                                  FileStatus fileStatus,
+                                  CopyListingFileStatus fileStatus,
                                   Path sourcePathRoot,
                                   Path sourcePathRoot,
-                                  boolean localFile,
                                   DistCpOptions options) throws IOException {
                                   DistCpOptions options) throws IOException {
     if (LOG.isDebugEnabled()) {
     if (LOG.isDebugEnabled()) {
       LOG.debug("REL PATH: " + DistCpUtils.getRelativePath(sourcePathRoot,
       LOG.debug("REL PATH: " + DistCpUtils.getRelativePath(sourcePathRoot,
@@ -303,9 +310,6 @@ public class SimpleCopyListing extends CopyListing {
     }
     }
 
 
     FileStatus status = fileStatus;
     FileStatus status = fileStatus;
-    if (localFile) {
-      status = getFileStatus(fileStatus);
-    }
 
 
     if (!shouldCopy(fileStatus.getPath(), options)) {
     if (!shouldCopy(fileStatus.getPath(), options)) {
       return;
       return;
@@ -320,19 +324,4 @@ public class SimpleCopyListing extends CopyListing {
     }
     }
     totalPaths++;
     totalPaths++;
   }
   }
-
-  private static final ByteArrayOutputStream buffer = new ByteArrayOutputStream(64);
-  private DataInputBuffer in = new DataInputBuffer();
-  
-  private FileStatus getFileStatus(FileStatus fileStatus) throws IOException {
-    FileStatus status = new FileStatus();
-
-    buffer.reset();
-    DataOutputStream out = new DataOutputStream(buffer);
-    fileStatus.write(out);
-
-    in.reset(buffer.toByteArray(), 0, buffer.size());
-    status.readFields(in);
-    return status;
-  }
 }
 }

+ 3 - 3
hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java

@@ -178,7 +178,7 @@ public class CopyCommitter extends FileOutputCommitter {
 
 
     long preservedEntries = 0;
     long preservedEntries = 0;
     try {
     try {
-      FileStatus srcFileStatus = new FileStatus();
+      CopyListingFileStatus srcFileStatus = new CopyListingFileStatus();
       Text srcRelPath = new Text();
       Text srcRelPath = new Text();
 
 
       // Iterate over every source path that was copied.
       // Iterate over every source path that was copied.
@@ -246,9 +246,9 @@ public class CopyCommitter extends FileOutputCommitter {
     // Delete all from target that doesn't also exist on source.
     // Delete all from target that doesn't also exist on source.
     long deletedEntries = 0;
     long deletedEntries = 0;
     try {
     try {
-      FileStatus srcFileStatus = new FileStatus();
+      CopyListingFileStatus srcFileStatus = new CopyListingFileStatus();
       Text srcRelPath = new Text();
       Text srcRelPath = new Text();
-      FileStatus trgtFileStatus = new FileStatus();
+      CopyListingFileStatus trgtFileStatus = new CopyListingFileStatus();
       Text trgtRelPath = new Text();
       Text trgtRelPath = new Text();
 
 
       FileSystem targetFS = targetFinalPath.getFileSystem(conf);
       FileSystem targetFS = targetFinalPath.getFileSystem(conf);

+ 10 - 5
hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyMapper.java

@@ -24,9 +24,11 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.mapreduce.JobContext;
 import org.apache.hadoop.mapreduce.JobContext;
 import org.apache.hadoop.mapreduce.Mapper;
 import org.apache.hadoop.mapreduce.Mapper;
+import org.apache.hadoop.tools.CopyListingFileStatus;
 import org.apache.hadoop.tools.DistCpConstants;
 import org.apache.hadoop.tools.DistCpConstants;
 import org.apache.hadoop.tools.DistCpOptionSwitch;
 import org.apache.hadoop.tools.DistCpOptionSwitch;
 import org.apache.hadoop.tools.DistCpOptions;
 import org.apache.hadoop.tools.DistCpOptions;
@@ -37,12 +39,13 @@ import org.apache.hadoop.util.StringUtils;
 import java.io.*;
 import java.io.*;
 import java.util.EnumSet;
 import java.util.EnumSet;
 import java.util.Arrays;
 import java.util.Arrays;
+import java.util.List;
 
 
 /**
 /**
  * Mapper class that executes the DistCp copy operation.
  * Mapper class that executes the DistCp copy operation.
  * Implements the o.a.h.mapreduce.Mapper<> interface.
  * Implements the o.a.h.mapreduce.Mapper<> interface.
  */
  */
-public class CopyMapper extends Mapper<Text, FileStatus, Text, Text> {
+public class CopyMapper extends Mapper<Text, CopyListingFileStatus, Text, Text> {
 
 
   /**
   /**
    * Hadoop counters for the DistCp CopyMapper.
    * Hadoop counters for the DistCp CopyMapper.
@@ -172,8 +175,8 @@ public class CopyMapper extends Mapper<Text, FileStatus, Text, Text> {
    * @throws IOException
    * @throws IOException
    */
    */
   @Override
   @Override
-  public void map(Text relPath, FileStatus sourceFileStatus, Context context)
-          throws IOException, InterruptedException {
+  public void map(Text relPath, CopyListingFileStatus sourceFileStatus,
+          Context context) throws IOException, InterruptedException {
     Path sourcePath = sourceFileStatus.getPath();
     Path sourcePath = sourceFileStatus.getPath();
 
 
     if (LOG.isDebugEnabled())
     if (LOG.isDebugEnabled())
@@ -191,11 +194,13 @@ public class CopyMapper extends Mapper<Text, FileStatus, Text, Text> {
     LOG.info(description);
     LOG.info(description);
 
 
     try {
     try {
-      FileStatus sourceCurrStatus;
+      CopyListingFileStatus sourceCurrStatus;
       FileSystem sourceFS;
       FileSystem sourceFS;
       try {
       try {
         sourceFS = sourcePath.getFileSystem(conf);
         sourceFS = sourcePath.getFileSystem(conf);
-        sourceCurrStatus = sourceFS.getFileStatus(sourcePath);
+        sourceCurrStatus = DistCpUtils.toCopyListingFileStatus(sourceFS,
+          sourceFS.getFileStatus(sourcePath),
+          fileAttributes.contains(FileAttribute.ACL));
       } catch (FileNotFoundException e) {
       } catch (FileNotFoundException e) {
         throw new IOException(new RetriableFileCopyCommand.CopyReadException(e));
         throw new IOException(new RetriableFileCopyCommand.CopyReadException(e));
       }
       }

+ 8 - 7
hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/UniformSizeInputFormat.java

@@ -23,11 +23,11 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.SequenceFile;
 import org.apache.hadoop.io.SequenceFile;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.tools.CopyListingFileStatus;
 import org.apache.hadoop.tools.DistCpConstants;
 import org.apache.hadoop.tools.DistCpConstants;
 import org.apache.hadoop.tools.util.DistCpUtils;
 import org.apache.hadoop.tools.util.DistCpUtils;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.mapreduce.*;
 import org.apache.hadoop.mapreduce.*;
 import org.apache.hadoop.mapreduce.lib.input.SequenceFileRecordReader;
 import org.apache.hadoop.mapreduce.lib.input.SequenceFileRecordReader;
 import org.apache.hadoop.mapreduce.lib.input.FileSplit;
 import org.apache.hadoop.mapreduce.lib.input.FileSplit;
@@ -44,7 +44,8 @@ import java.util.ArrayList;
  * that the total-number of bytes to be copied for each input split is
  * that the total-number of bytes to be copied for each input split is
  * uniform.
  * uniform.
  */
  */
-public class UniformSizeInputFormat extends InputFormat<Text, FileStatus> {
+public class UniformSizeInputFormat
+    extends InputFormat<Text, CopyListingFileStatus> {
   private static final Log LOG
   private static final Log LOG
                 = LogFactory.getLog(UniformSizeInputFormat.class);
                 = LogFactory.getLog(UniformSizeInputFormat.class);
 
 
@@ -76,7 +77,7 @@ public class UniformSizeInputFormat extends InputFormat<Text, FileStatus> {
     List<InputSplit> splits = new ArrayList<InputSplit>(numSplits);
     List<InputSplit> splits = new ArrayList<InputSplit>(numSplits);
     long nBytesPerSplit = (long) Math.ceil(totalSizeBytes * 1.0 / numSplits);
     long nBytesPerSplit = (long) Math.ceil(totalSizeBytes * 1.0 / numSplits);
 
 
-    FileStatus srcFileStatus = new FileStatus();
+    CopyListingFileStatus srcFileStatus = new CopyListingFileStatus();
     Text srcRelPath = new Text();
     Text srcRelPath = new Text();
     long currentSplitSize = 0;
     long currentSplitSize = 0;
     long lastSplitStart = 0;
     long lastSplitStart = 0;
@@ -161,9 +162,9 @@ public class UniformSizeInputFormat extends InputFormat<Text, FileStatus> {
    * @throws InterruptedException
    * @throws InterruptedException
    */
    */
   @Override
   @Override
-  public RecordReader<Text, FileStatus> createRecordReader(InputSplit split,
-                                                     TaskAttemptContext context)
-                                      throws IOException, InterruptedException {
-    return new SequenceFileRecordReader<Text, FileStatus>();
+  public RecordReader<Text, CopyListingFileStatus> createRecordReader(
+      InputSplit split, TaskAttemptContext context)
+      throws IOException, InterruptedException {
+    return new SequenceFileRecordReader<Text, CopyListingFileStatus>();
   }
   }
 }
 }

+ 3 - 2
hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputChunk.java

@@ -26,6 +26,7 @@ import org.apache.hadoop.io.SequenceFile;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.tools.CopyListingFileStatus;
 import org.apache.hadoop.tools.DistCpConstants;
 import org.apache.hadoop.tools.DistCpConstants;
 import org.apache.hadoop.tools.util.DistCpUtils;
 import org.apache.hadoop.tools.util.DistCpUtils;
 import org.apache.hadoop.mapreduce.lib.input.SequenceFileRecordReader;
 import org.apache.hadoop.mapreduce.lib.input.SequenceFileRecordReader;
@@ -90,7 +91,7 @@ class DynamicInputChunk<K, V> {
   private void openForWrite() throws IOException {
   private void openForWrite() throws IOException {
     writer = SequenceFile.createWriter(
     writer = SequenceFile.createWriter(
             chunkFilePath.getFileSystem(configuration), configuration,
             chunkFilePath.getFileSystem(configuration), configuration,
-            chunkFilePath, Text.class, FileStatus.class,
+            chunkFilePath, Text.class, CopyListingFileStatus.class,
             SequenceFile.CompressionType.NONE);
             SequenceFile.CompressionType.NONE);
 
 
   }
   }
@@ -117,7 +118,7 @@ class DynamicInputChunk<K, V> {
    * @param value Corresponding value from the listing file.
    * @param value Corresponding value from the listing file.
    * @throws IOException Exception onf failure to write to the file.
    * @throws IOException Exception onf failure to write to the file.
    */
    */
-  public void write(Text key, FileStatus value) throws IOException {
+  public void write(Text key, CopyListingFileStatus value) throws IOException {
     writer.append(key, value);
     writer.append(key, value);
   }
   }
 
 

+ 2 - 2
hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/lib/DynamicInputFormat.java

@@ -29,7 +29,7 @@ import org.apache.hadoop.io.SequenceFile;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.tools.CopyListingFileStatus;
 
 
 import java.util.List;
 import java.util.List;
 import java.util.ArrayList;
 import java.util.ArrayList;
@@ -133,7 +133,7 @@ public class DynamicInputFormat<K, V> extends InputFormat<K, V> {
     
     
     List<DynamicInputChunk> chunksFinal = new ArrayList<DynamicInputChunk>();
     List<DynamicInputChunk> chunksFinal = new ArrayList<DynamicInputChunk>();
 
 
-    FileStatus fileStatus = new FileStatus();
+    CopyListingFileStatus fileStatus = new CopyListingFileStatus();
     Text relPath = new Text();
     Text relPath = new Text();
     int recordCounter = 0;
     int recordCounter = 0;
     int chunkCount = 0;
     int chunkCount = 0;

+ 80 - 3
hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/DistCpUtils.java

@@ -25,15 +25,21 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.FileChecksum;
 import org.apache.hadoop.fs.FileChecksum;
+import org.apache.hadoop.fs.permission.AclEntry;
+import org.apache.hadoop.fs.permission.AclUtil;
+import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.io.SequenceFile;
 import org.apache.hadoop.io.SequenceFile;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Text;
+import org.apache.hadoop.tools.CopyListingFileStatus;
 import org.apache.hadoop.tools.DistCpOptions.FileAttribute;
 import org.apache.hadoop.tools.DistCpOptions.FileAttribute;
 import org.apache.hadoop.tools.mapred.UniformSizeInputFormat;
 import org.apache.hadoop.tools.mapred.UniformSizeInputFormat;
+import org.apache.hadoop.tools.CopyListing.AclsNotSupportedException;
 import org.apache.hadoop.tools.DistCpOptions;
 import org.apache.hadoop.tools.DistCpOptions;
 import org.apache.hadoop.mapreduce.InputFormat;
 import org.apache.hadoop.mapreduce.InputFormat;
 
 
 import java.io.IOException;
 import java.io.IOException;
 import java.util.EnumSet;
 import java.util.EnumSet;
+import java.util.List;
 import java.util.Locale;
 import java.util.Locale;
 import java.text.DecimalFormat;
 import java.text.DecimalFormat;
 import java.net.URI;
 import java.net.URI;
@@ -181,7 +187,7 @@ public class DistCpUtils {
    *                       change or any transient error)
    *                       change or any transient error)
    */
    */
   public static void preserve(FileSystem targetFS, Path path,
   public static void preserve(FileSystem targetFS, Path path,
-                              FileStatus srcFileStatus,
+                              CopyListingFileStatus srcFileStatus,
                               EnumSet<FileAttribute> attributes) throws IOException {
                               EnumSet<FileAttribute> attributes) throws IOException {
 
 
     FileStatus targetFileStatus = targetFS.getFileStatus(path);
     FileStatus targetFileStatus = targetFS.getFileStatus(path);
@@ -189,7 +195,18 @@ public class DistCpUtils {
     String user = targetFileStatus.getOwner();
     String user = targetFileStatus.getOwner();
     boolean chown = false;
     boolean chown = false;
 
 
-    if (attributes.contains(FileAttribute.PERMISSION) &&
+    if (attributes.contains(FileAttribute.ACL)) {
+      List<AclEntry> srcAcl = srcFileStatus.getAclEntries();
+      List<AclEntry> targetAcl = getAcl(targetFS, targetFileStatus);
+      if (!srcAcl.equals(targetAcl)) {
+        targetFS.setAcl(path, srcAcl);
+      }
+      // setAcl can't preserve sticky bit, so also call setPermission if needed.
+      if (srcFileStatus.getPermission().getStickyBit() !=
+          targetFileStatus.getPermission().getStickyBit()) {
+        targetFS.setPermission(path, srcFileStatus.getPermission());
+      }
+    } else if (attributes.contains(FileAttribute.PERMISSION) &&
       !srcFileStatus.getPermission().equals(targetFileStatus.getPermission())) {
       !srcFileStatus.getPermission().equals(targetFileStatus.getPermission())) {
       targetFS.setPermission(path, srcFileStatus.getPermission());
       targetFS.setPermission(path, srcFileStatus.getPermission());
     }
     }
@@ -216,6 +233,46 @@ public class DistCpUtils {
     }
     }
   }
   }
 
 
+  /**
+   * Returns a file's full logical ACL.
+   *
+   * @param fileSystem FileSystem containing the file
+   * @param fileStatus FileStatus of file
+   * @return List<AclEntry> containing full logical ACL
+   * @throws IOException if there is an I/O error
+   */
+  public static List<AclEntry> getAcl(FileSystem fileSystem,
+      FileStatus fileStatus) throws IOException {
+    List<AclEntry> entries = fileSystem.getAclStatus(fileStatus.getPath())
+      .getEntries();
+    return AclUtil.getAclFromPermAndEntries(fileStatus.getPermission(), entries);
+  }
+
+  /**
+   * Converts a FileStatus to a CopyListingFileStatus.  If preserving ACLs,
+   * populates the CopyListingFileStatus with the ACLs.
+   *
+   * @param fileSystem FileSystem containing the file
+   * @param fileStatus FileStatus of file
+   * @param preserveAcls boolean true if preserving ACLs
+   * @throws IOException if there is an I/O error
+   */
+  public static CopyListingFileStatus toCopyListingFileStatus(
+      FileSystem fileSystem, FileStatus fileStatus, boolean preserveAcls)
+      throws IOException {
+    CopyListingFileStatus copyListingFileStatus =
+      new CopyListingFileStatus(fileStatus);
+    if (preserveAcls) {
+      FsPermission perm = fileStatus.getPermission();
+      if (perm.getAclBit()) {
+        List<AclEntry> aclEntries = fileSystem.getAclStatus(
+          fileStatus.getPath()).getEntries();
+        copyListingFileStatus.setAclEntries(aclEntries);
+      }
+    }
+    return copyListingFileStatus;
+  }
+
   /**
   /**
    * Sort sequence file containing FileStatus and Text as key and value respecitvely
    * Sort sequence file containing FileStatus and Text as key and value respecitvely
    *
    *
@@ -227,7 +284,8 @@ public class DistCpUtils {
    */
    */
   public static Path sortListing(FileSystem fs, Configuration conf, Path sourceListing)
   public static Path sortListing(FileSystem fs, Configuration conf, Path sourceListing)
       throws IOException {
       throws IOException {
-    SequenceFile.Sorter sorter = new SequenceFile.Sorter(fs, Text.class, FileStatus.class, conf);
+    SequenceFile.Sorter sorter = new SequenceFile.Sorter(fs, Text.class,
+      CopyListingFileStatus.class, conf);
     Path output = new Path(sourceListing.toString() +  "_sorted");
     Path output = new Path(sourceListing.toString() +  "_sorted");
 
 
     if (fs.exists(output)) {
     if (fs.exists(output)) {
@@ -238,6 +296,25 @@ public class DistCpUtils {
     return output;
     return output;
   }
   }
 
 
+  /**
+   * Determines if a file system supports ACLs by running a canary getAclStatus
+   * request on the file system root.  This method is used before distcp job
+   * submission to fail fast if the user requested preserving ACLs, but the file
+   * system cannot support ACLs.
+   *
+   * @param fs FileSystem to check
+   * @throws AclsNotSupportedException if fs does not support ACLs
+   */
+  public static void checkFileSystemAclSupport(FileSystem fs)
+      throws AclsNotSupportedException {
+    try {
+      fs.getAclStatus(new Path(Path.SEPARATOR));
+    } catch (Exception e) {
+      throw new AclsNotSupportedException("ACLs not supported for file system: "
+        + fs.getUri());
+    }
+  }
+
   /**
   /**
    * String utility to convert a number-of-bytes to human readable format.
    * String utility to convert a number-of-bytes to human readable format.
    */
    */

+ 11 - 11
hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/StubContext.java

@@ -23,7 +23,6 @@ import org.apache.hadoop.mapreduce.task.MapContextImpl;
 import org.apache.hadoop.mapreduce.lib.map.WrappedMapper;
 import org.apache.hadoop.mapreduce.lib.map.WrappedMapper;
 import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
 import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Text;
-import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 
 
 import java.util.List;
 import java.util.List;
@@ -33,18 +32,19 @@ import java.io.IOException;
 public class StubContext {
 public class StubContext {
 
 
   private StubStatusReporter reporter = new StubStatusReporter();
   private StubStatusReporter reporter = new StubStatusReporter();
-  private RecordReader<Text, FileStatus> reader;
+  private RecordReader<Text, CopyListingFileStatus> reader;
   private StubInMemoryWriter writer = new StubInMemoryWriter();
   private StubInMemoryWriter writer = new StubInMemoryWriter();
-  private Mapper<Text, FileStatus, Text, Text>.Context mapperContext;
+  private Mapper<Text, CopyListingFileStatus, Text, Text>.Context mapperContext;
 
 
-  public StubContext(Configuration conf, RecordReader<Text, FileStatus> reader,
-                     int taskId) throws IOException, InterruptedException {
+  public StubContext(Configuration conf,
+      RecordReader<Text, CopyListingFileStatus> reader, int taskId)
+      throws IOException, InterruptedException {
 
 
-    WrappedMapper<Text, FileStatus, Text, Text> wrappedMapper
-            = new WrappedMapper<Text, FileStatus, Text, Text>();
+    WrappedMapper<Text, CopyListingFileStatus, Text, Text> wrappedMapper
+            = new WrappedMapper<Text, CopyListingFileStatus, Text, Text>();
 
 
-    MapContextImpl<Text, FileStatus, Text, Text> contextImpl
-            = new MapContextImpl<Text, FileStatus, Text, Text>(conf,
+    MapContextImpl<Text, CopyListingFileStatus, Text, Text> contextImpl
+            = new MapContextImpl<Text, CopyListingFileStatus, Text, Text>(conf,
             getTaskAttemptID(taskId), reader, writer,
             getTaskAttemptID(taskId), reader, writer,
             null, reporter, null);
             null, reporter, null);
 
 
@@ -52,7 +52,7 @@ public class StubContext {
     this.mapperContext = wrappedMapper.getMapContext(contextImpl);
     this.mapperContext = wrappedMapper.getMapContext(contextImpl);
   }
   }
 
 
-  public Mapper<Text, FileStatus, Text, Text>.Context getContext() {
+  public Mapper<Text, CopyListingFileStatus, Text, Text>.Context getContext() {
     return mapperContext;
     return mapperContext;
   }
   }
 
 
@@ -60,7 +60,7 @@ public class StubContext {
     return reporter;
     return reporter;
   }
   }
 
 
-  public RecordReader<Text, FileStatus> getReader() {
+  public RecordReader<Text, CopyListingFileStatus> getReader() {
     return reader;
     return reader;
   }
   }
 
 

+ 2 - 3
hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestCopyListing.java

@@ -24,7 +24,6 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter;
 import org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter;
 import org.apache.hadoop.tools.util.TestDistCpUtils;
 import org.apache.hadoop.tools.util.TestDistCpUtils;
@@ -106,7 +105,7 @@ public class TestCopyListing extends SimpleCopyListing {
     Assert.assertEquals(listing.getNumberOfPaths(), 3);
     Assert.assertEquals(listing.getNumberOfPaths(), 3);
     SequenceFile.Reader reader = new SequenceFile.Reader(getConf(),
     SequenceFile.Reader reader = new SequenceFile.Reader(getConf(),
         SequenceFile.Reader.file(listingFile));
         SequenceFile.Reader.file(listingFile));
-    FileStatus fileStatus = new FileStatus();
+    CopyListingFileStatus fileStatus = new CopyListingFileStatus();
     Text relativePath = new Text();
     Text relativePath = new Text();
     Assert.assertTrue(reader.next(relativePath, fileStatus));
     Assert.assertTrue(reader.next(relativePath, fileStatus));
     Assert.assertEquals(relativePath.toString(), "/1");
     Assert.assertEquals(relativePath.toString(), "/1");
@@ -274,7 +273,7 @@ public class TestCopyListing extends SimpleCopyListing {
 
 
       reader = new SequenceFile.Reader(getConf(), SequenceFile.Reader.file(listFile));
       reader = new SequenceFile.Reader(getConf(), SequenceFile.Reader.file(listFile));
 
 
-      FileStatus fileStatus = new FileStatus();
+      CopyListingFileStatus fileStatus = new CopyListingFileStatus();
       Text relativePath = new Text();
       Text relativePath = new Text();
       Assert.assertTrue(reader.next(relativePath, fileStatus));
       Assert.assertTrue(reader.next(relativePath, fileStatus));
       Assert.assertTrue(relativePath.toString().equals(""));
       Assert.assertTrue(relativePath.toString().equals(""));

+ 329 - 0
hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestDistCpWithAcls.java

@@ -0,0 +1,329 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.tools;
+
+import static org.apache.hadoop.fs.permission.AclEntryScope.*;
+import static org.apache.hadoop.fs.permission.AclEntryType.*;
+import static org.apache.hadoop.fs.permission.FsAction.*;
+import static org.junit.Assert.*;
+
+import java.io.IOException;
+import java.net.URI;
+import java.util.Arrays;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.AclEntry;
+import org.apache.hadoop.fs.permission.AclEntryScope;
+import org.apache.hadoop.fs.permission.AclEntryType;
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.util.Progressable;
+import org.apache.hadoop.util.ToolRunner;
+
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+/**
+ * Tests distcp in combination with HDFS ACLs.
+ */
+public class TestDistCpWithAcls {
+
+  private static MiniDFSCluster cluster;
+  private static Configuration conf;
+  private static FileSystem fs;
+
+  @BeforeClass
+  public static void init() throws Exception {
+    initCluster(true, true);
+    // Create this directory structure:
+    // /src
+    //   /dir1
+    //     /subdir1
+    //   /dir2
+    //     /dir2/file2
+    //     /dir2/file3
+    //   /dir3sticky
+    //   /file1    
+    fs.mkdirs(new Path("/src/dir1/subdir1"));
+    fs.mkdirs(new Path("/src/dir2"));
+    fs.create(new Path("/src/dir2/file2")).close();
+    fs.create(new Path("/src/dir2/file3")).close();
+    fs.mkdirs(new Path("/src/dir3sticky"));
+    fs.create(new Path("/src/file1")).close();
+
+    // Set a mix of ACLs and plain permissions throughout the tree.
+    fs.modifyAclEntries(new Path("/src/dir1"), Arrays.asList(
+      aclEntry(DEFAULT, USER, "bruce", ALL)));
+
+    fs.modifyAclEntries(new Path("/src/dir2/file2"), Arrays.asList(
+      aclEntry(ACCESS, GROUP, "sales", NONE)));
+
+    fs.setPermission(new Path("/src/dir2/file3"),
+      new FsPermission((short)0660));
+
+    fs.modifyAclEntries(new Path("/src/file1"), Arrays.asList(
+      aclEntry(ACCESS, USER, "diana", READ)));
+
+    fs.setPermission(new Path("/src/dir3sticky"),
+      new FsPermission((short)01777));
+  }
+
+  @AfterClass
+  public static void shutdown() {
+    IOUtils.cleanup(null, fs);
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
+
+  @Test
+  public void testPreserveAcls() throws Exception {
+    assertRunDistCp(DistCpConstants.SUCCESS, "/dstPreserveAcls");
+
+    assertAclEntries("/dstPreserveAcls/dir1", new AclEntry[] {
+      aclEntry(DEFAULT, USER, ALL),
+      aclEntry(DEFAULT, USER, "bruce", ALL),
+      aclEntry(DEFAULT, GROUP, READ_EXECUTE),
+      aclEntry(DEFAULT, MASK, ALL),
+      aclEntry(DEFAULT, OTHER, READ_EXECUTE) } );
+    assertPermission("/dstPreserveAcls/dir1", (short)0755);
+
+    assertAclEntries("/dstPreserveAcls/dir1/subdir1", new AclEntry[] { });
+    assertPermission("/dstPreserveAcls/dir1/subdir1", (short)0755);
+
+    assertAclEntries("/dstPreserveAcls/dir2", new AclEntry[] { });
+    assertPermission("/dstPreserveAcls/dir2", (short)0755);
+
+    assertAclEntries("/dstPreserveAcls/dir2/file2", new AclEntry[] {
+      aclEntry(ACCESS, GROUP, READ),
+      aclEntry(ACCESS, GROUP, "sales", NONE) } );
+    assertPermission("/dstPreserveAcls/dir2/file2", (short)0644);
+
+    assertAclEntries("/dstPreserveAcls/dir2/file3", new AclEntry[] { });
+    assertPermission("/dstPreserveAcls/dir2/file3", (short)0660);
+
+    assertAclEntries("/dstPreserveAcls/dir3sticky", new AclEntry[] { });
+    assertPermission("/dstPreserveAcls/dir3sticky", (short)01777);
+
+    assertAclEntries("/dstPreserveAcls/file1", new AclEntry[] {
+      aclEntry(ACCESS, USER, "diana", READ),
+      aclEntry(ACCESS, GROUP, READ) } );
+    assertPermission("/dstPreserveAcls/file1", (short)0644);
+  }
+
+  @Test
+  public void testAclsNotEnabled() throws Exception {
+    try {
+      restart(false);
+      assertRunDistCp(DistCpConstants.ACLS_NOT_SUPPORTED, "/dstAclsNotEnabled");
+    } finally {
+      restart(true);
+    }
+  }
+
+  @Test
+  public void testAclsNotImplemented() throws Exception {
+    assertRunDistCp(DistCpConstants.ACLS_NOT_SUPPORTED,
+      "stubfs://dstAclsNotImplemented");
+  }
+
+  /**
+   * Stub FileSystem implementation used for testing the case of attempting
+   * distcp with ACLs preserved on a file system that does not support ACLs.
+   * The base class implementation throws UnsupportedOperationException for the
+   * ACL methods, so we don't need to override them.
+   */
+  public static class StubFileSystem extends FileSystem {
+
+    @Override
+    public FSDataOutputStream append(Path f, int bufferSize,
+        Progressable progress) throws IOException {
+      return null;
+    }
+
+    @Override
+    public FSDataOutputStream create(Path f, FsPermission permission,
+        boolean overwrite, int bufferSize, short replication, long blockSize,
+        Progressable progress) throws IOException {
+      return null;
+    }
+
+    @Override
+    public boolean delete(Path f, boolean recursive) throws IOException {
+      return false;
+    }
+
+    @Override
+    public FileStatus getFileStatus(Path f) throws IOException {
+      return null;
+    }
+
+    @Override
+    public URI getUri() {
+      return URI.create("stubfs:///");
+    }
+
+    @Override
+    public Path getWorkingDirectory() {
+      return new Path(Path.SEPARATOR);
+    }
+
+    @Override
+    public FileStatus[] listStatus(Path f) throws IOException {
+      return null;
+    }
+
+    @Override
+    public boolean mkdirs(Path f, FsPermission permission)
+        throws IOException {
+      return false;
+    }
+
+    @Override
+    public FSDataInputStream open(Path f, int bufferSize) throws IOException {
+      return null;
+    }
+
+    @Override
+    public boolean rename(Path src, Path dst) throws IOException {
+      return false;
+    }
+
+    @Override
+    public void setWorkingDirectory(Path dir) {
+    }
+  }
+
+  /**
+   * Create a new AclEntry with scope, type and permission (no name).
+   *
+   * @param scope AclEntryScope scope of the ACL entry
+   * @param type AclEntryType ACL entry type
+   * @param permission FsAction set of permissions in the ACL entry
+   * @return AclEntry new AclEntry
+   */
+  private static AclEntry aclEntry(AclEntryScope scope, AclEntryType type,
+      FsAction permission) {
+    return new AclEntry.Builder()
+      .setScope(scope)
+      .setType(type)
+      .setPermission(permission)
+      .build();
+  }
+
+  /**
+   * Create a new AclEntry with scope, type, name and permission.
+   *
+   * @param scope AclEntryScope scope of the ACL entry
+   * @param type AclEntryType ACL entry type
+   * @param name String optional ACL entry name
+   * @param permission FsAction set of permissions in the ACL entry
+   * @return AclEntry new AclEntry
+   */
+  private static AclEntry aclEntry(AclEntryScope scope, AclEntryType type,
+      String name, FsAction permission) {
+    return new AclEntry.Builder()
+      .setScope(scope)
+      .setType(type)
+      .setName(name)
+      .setPermission(permission)
+      .build();
+  }
+
+  /**
+   * Asserts the ACL entries returned by getAclStatus for a specific path.
+   *
+   * @param path String path to check
+   * @param entries AclEntry[] expected ACL entries
+   * @throws Exception if there is any error
+   */
+  private static void assertAclEntries(String path, AclEntry[] entries)
+      throws Exception {
+    assertArrayEquals(entries, fs.getAclStatus(new Path(path)).getEntries()
+      .toArray(new AclEntry[0]));
+  }
+
+  /**
+   * Asserts the value of the FsPermission bits on the inode of a specific path.
+   *
+   * @param path String path to check
+   * @param perm short expected permission bits
+   * @throws Exception if there is any error
+   */
+  private static void assertPermission(String path, short perm)
+      throws Exception {
+    assertEquals(perm,
+      fs.getFileStatus(new Path(path)).getPermission().toShort());
+  }
+
+  /**
+   * Runs distcp from /src to specified destination, preserving ACLs.  Asserts
+   * expected exit code.
+   *
+   * @param int exitCode expected exit code
+   * @param dst String distcp destination
+   * @throws Exception if there is any error
+   */
+  private static void assertRunDistCp(int exitCode, String dst)
+      throws Exception {
+    DistCp distCp = new DistCp(conf, null);
+    assertEquals(exitCode, ToolRunner.run(
+      conf, distCp, new String[] { "-pa", "/src", dst }));
+  }
+
+  /**
+   * Initialize the cluster, wait for it to become active, and get FileSystem.
+   *
+   * @param format if true, format the NameNode and DataNodes before starting up
+   * @param aclsEnabled if true, ACL support is enabled
+   * @throws Exception if any step fails
+   */
+  private static void initCluster(boolean format, boolean aclsEnabled)
+      throws Exception {
+    conf = new Configuration();
+    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, aclsEnabled);
+    conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "stubfs:///");
+    conf.setClass("fs.stubfs.impl", StubFileSystem.class, FileSystem.class);
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).format(format)
+      .build();
+    cluster.waitActive();
+    fs = cluster.getFileSystem();
+  }
+
+  /**
+   * Restarts the cluster with ACLs enabled or disabled.
+   *
+   * @param aclsEnabled if true, ACL support is enabled
+   * @throws Exception if any step fails
+   */
+  private static void restart(boolean aclsEnabled) throws Exception {
+    shutdown();
+    initCluster(false, aclsEnabled);
+  }
+}

+ 1 - 2
hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestFileBasedCopyListing.java

@@ -23,7 +23,6 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.io.SequenceFile;
 import org.apache.hadoop.io.SequenceFile;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Text;
@@ -531,7 +530,7 @@ public class TestFileBasedCopyListing {
                                             SequenceFile.Reader.file(listFile));
                                             SequenceFile.Reader.file(listFile));
     try {
     try {
       Text relPath = new Text();
       Text relPath = new Text();
-      FileStatus fileStatus = new FileStatus();
+      CopyListingFileStatus fileStatus = new CopyListingFileStatus();
       while (reader.next(relPath, fileStatus)) {
       while (reader.next(relPath, fileStatus)) {
         if (fileStatus.isDirectory() && relPath.toString().equals("")) {
         if (fileStatus.isDirectory() && relPath.toString().equals("")) {
           // ignore root with empty relPath, which is an entry to be 
           // ignore root with empty relPath, which is an entry to be 

+ 1 - 2
hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestGlobbedCopyListing.java

@@ -19,7 +19,6 @@
 package org.apache.hadoop.tools;
 package org.apache.hadoop.tools;
 
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -121,7 +120,7 @@ public class TestGlobbedCopyListing {
     SequenceFile.Reader reader = new SequenceFile.Reader(cluster.getFileSystem(),
     SequenceFile.Reader reader = new SequenceFile.Reader(cluster.getFileSystem(),
                                               listingPath, new Configuration());
                                               listingPath, new Configuration());
     Text key   = new Text();
     Text key   = new Text();
-    FileStatus value = new FileStatus();
+    CopyListingFileStatus value = new CopyListingFileStatus();
     Map<String, String> actualValues = new HashMap<String, String>();
     Map<String, String> actualValues = new HashMap<String, String>();
     while (reader.next(key, value)) {
     while (reader.next(key, value)) {
       if (value.isDirectory() && key.toString().equals("")) {
       if (value.isDirectory() && key.toString().equals("")) {

+ 8 - 2
hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestOptionsParser.java

@@ -410,6 +410,7 @@ public class TestOptionsParser {
     Assert.assertTrue(options.shouldPreserve(FileAttribute.USER));
     Assert.assertTrue(options.shouldPreserve(FileAttribute.USER));
     Assert.assertTrue(options.shouldPreserve(FileAttribute.GROUP));
     Assert.assertTrue(options.shouldPreserve(FileAttribute.GROUP));
     Assert.assertTrue(options.shouldPreserve(FileAttribute.CHECKSUMTYPE));
     Assert.assertTrue(options.shouldPreserve(FileAttribute.CHECKSUMTYPE));
+    Assert.assertFalse(options.shouldPreserve(FileAttribute.ACL));
 
 
     options = OptionsParser.parse(new String[] {
     options = OptionsParser.parse(new String[] {
         "-p",
         "-p",
@@ -421,6 +422,7 @@ public class TestOptionsParser {
     Assert.assertTrue(options.shouldPreserve(FileAttribute.USER));
     Assert.assertTrue(options.shouldPreserve(FileAttribute.USER));
     Assert.assertTrue(options.shouldPreserve(FileAttribute.GROUP));
     Assert.assertTrue(options.shouldPreserve(FileAttribute.GROUP));
     Assert.assertTrue(options.shouldPreserve(FileAttribute.CHECKSUMTYPE));
     Assert.assertTrue(options.shouldPreserve(FileAttribute.CHECKSUMTYPE));
+    Assert.assertFalse(options.shouldPreserve(FileAttribute.ACL));
 
 
     options = OptionsParser.parse(new String[] {
     options = OptionsParser.parse(new String[] {
         "-pbr",
         "-pbr",
@@ -433,6 +435,7 @@ public class TestOptionsParser {
     Assert.assertFalse(options.shouldPreserve(FileAttribute.USER));
     Assert.assertFalse(options.shouldPreserve(FileAttribute.USER));
     Assert.assertFalse(options.shouldPreserve(FileAttribute.GROUP));
     Assert.assertFalse(options.shouldPreserve(FileAttribute.GROUP));
     Assert.assertFalse(options.shouldPreserve(FileAttribute.CHECKSUMTYPE));
     Assert.assertFalse(options.shouldPreserve(FileAttribute.CHECKSUMTYPE));
+    Assert.assertFalse(options.shouldPreserve(FileAttribute.ACL));
 
 
     options = OptionsParser.parse(new String[] {
     options = OptionsParser.parse(new String[] {
         "-pbrgup",
         "-pbrgup",
@@ -445,9 +448,10 @@ public class TestOptionsParser {
     Assert.assertTrue(options.shouldPreserve(FileAttribute.USER));
     Assert.assertTrue(options.shouldPreserve(FileAttribute.USER));
     Assert.assertTrue(options.shouldPreserve(FileAttribute.GROUP));
     Assert.assertTrue(options.shouldPreserve(FileAttribute.GROUP));
     Assert.assertFalse(options.shouldPreserve(FileAttribute.CHECKSUMTYPE));
     Assert.assertFalse(options.shouldPreserve(FileAttribute.CHECKSUMTYPE));
+    Assert.assertFalse(options.shouldPreserve(FileAttribute.ACL));
 
 
     options = OptionsParser.parse(new String[] {
     options = OptionsParser.parse(new String[] {
-        "-pbrgupc",
+        "-pbrgupca",
         "-f",
         "-f",
         "hdfs://localhost:8020/source/first",
         "hdfs://localhost:8020/source/first",
         "hdfs://localhost:8020/target/"});
         "hdfs://localhost:8020/target/"});
@@ -457,6 +461,7 @@ public class TestOptionsParser {
     Assert.assertTrue(options.shouldPreserve(FileAttribute.USER));
     Assert.assertTrue(options.shouldPreserve(FileAttribute.USER));
     Assert.assertTrue(options.shouldPreserve(FileAttribute.GROUP));
     Assert.assertTrue(options.shouldPreserve(FileAttribute.GROUP));
     Assert.assertTrue(options.shouldPreserve(FileAttribute.CHECKSUMTYPE));
     Assert.assertTrue(options.shouldPreserve(FileAttribute.CHECKSUMTYPE));
+    Assert.assertTrue(options.shouldPreserve(FileAttribute.ACL));
 
 
     options = OptionsParser.parse(new String[] {
     options = OptionsParser.parse(new String[] {
         "-pc",
         "-pc",
@@ -469,6 +474,7 @@ public class TestOptionsParser {
     Assert.assertFalse(options.shouldPreserve(FileAttribute.USER));
     Assert.assertFalse(options.shouldPreserve(FileAttribute.USER));
     Assert.assertFalse(options.shouldPreserve(FileAttribute.GROUP));
     Assert.assertFalse(options.shouldPreserve(FileAttribute.GROUP));
     Assert.assertTrue(options.shouldPreserve(FileAttribute.CHECKSUMTYPE));
     Assert.assertTrue(options.shouldPreserve(FileAttribute.CHECKSUMTYPE));
+    Assert.assertFalse(options.shouldPreserve(FileAttribute.ACL));
 
 
     options = OptionsParser.parse(new String[] {
     options = OptionsParser.parse(new String[] {
         "-p",
         "-p",
@@ -485,7 +491,7 @@ public class TestOptionsParser {
 
 
     try {
     try {
       OptionsParser.parse(new String[] {
       OptionsParser.parse(new String[] {
-          "-pabc",
+          "-pabcd",
           "-f",
           "-f",
           "hdfs://localhost:8020/source/first",
           "hdfs://localhost:8020/source/first",
           "hdfs://localhost:8020/target"});
           "hdfs://localhost:8020/target"});

+ 66 - 53
hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/mapred/TestCopyMapper.java

@@ -42,6 +42,7 @@ import org.apache.hadoop.io.Text;
 import org.apache.hadoop.mapreduce.Mapper;
 import org.apache.hadoop.mapreduce.Mapper;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.tools.CopyListingFileStatus;
 import org.apache.hadoop.tools.DistCpConstants;
 import org.apache.hadoop.tools.DistCpConstants;
 import org.apache.hadoop.tools.DistCpOptionSwitch;
 import org.apache.hadoop.tools.DistCpOptionSwitch;
 import org.apache.hadoop.tools.DistCpOptions;
 import org.apache.hadoop.tools.DistCpOptions;
@@ -222,7 +223,7 @@ public class TestCopyMapper {
       FileSystem fs = cluster.getFileSystem();
       FileSystem fs = cluster.getFileSystem();
       CopyMapper copyMapper = new CopyMapper();
       CopyMapper copyMapper = new CopyMapper();
       StubContext stubContext = new StubContext(getConfiguration(), null, 0);
       StubContext stubContext = new StubContext(getConfiguration(), null, 0);
-      Mapper<Text, FileStatus, Text, Text>.Context context
+      Mapper<Text, CopyListingFileStatus, Text, Text>.Context context
               = stubContext.getContext();
               = stubContext.getContext();
 
 
       Configuration configuration = context.getConfiguration();
       Configuration configuration = context.getConfiguration();
@@ -238,7 +239,7 @@ public class TestCopyMapper {
 
 
       for (Path path: pathList) {
       for (Path path: pathList) {
         copyMapper.map(new Text(DistCpUtils.getRelativePath(new Path(SOURCE_PATH), path)),
         copyMapper.map(new Text(DistCpUtils.getRelativePath(new Path(SOURCE_PATH), path)),
-                fs.getFileStatus(path), context);
+                new CopyListingFileStatus(fs.getFileStatus(path)), context);
       }
       }
 
 
       // Check that the maps worked.
       // Check that the maps worked.
@@ -283,12 +284,11 @@ public class TestCopyMapper {
   }
   }
 
 
   private void testCopyingExistingFiles(FileSystem fs, CopyMapper copyMapper,
   private void testCopyingExistingFiles(FileSystem fs, CopyMapper copyMapper,
-                                        Mapper<Text, FileStatus, Text, Text>.Context context) {
-
+      Mapper<Text, CopyListingFileStatus, Text, Text>.Context context) {
     try {
     try {
       for (Path path : pathList) {
       for (Path path : pathList) {
         copyMapper.map(new Text(DistCpUtils.getRelativePath(new Path(SOURCE_PATH), path)),
         copyMapper.map(new Text(DistCpUtils.getRelativePath(new Path(SOURCE_PATH), path)),
-                fs.getFileStatus(path), context);
+                new CopyListingFileStatus(fs.getFileStatus(path)), context);
       }
       }
 
 
       Assert.assertEquals(nFiles,
       Assert.assertEquals(nFiles,
@@ -309,7 +309,7 @@ public class TestCopyMapper {
       FileSystem fs = cluster.getFileSystem();
       FileSystem fs = cluster.getFileSystem();
       CopyMapper copyMapper = new CopyMapper();
       CopyMapper copyMapper = new CopyMapper();
       StubContext stubContext = new StubContext(getConfiguration(), null, 0);
       StubContext stubContext = new StubContext(getConfiguration(), null, 0);
-      Mapper<Text, FileStatus, Text, Text>.Context context
+      Mapper<Text, CopyListingFileStatus, Text, Text>.Context context
               = stubContext.getContext();
               = stubContext.getContext();
 
 
       Configuration configuration = context.getConfiguration();
       Configuration configuration = context.getConfiguration();
@@ -320,7 +320,7 @@ public class TestCopyMapper {
       copyMapper.setup(context);
       copyMapper.setup(context);
 
 
       copyMapper.map(new Text(DistCpUtils.getRelativePath(new Path(SOURCE_PATH), pathList.get(0))),
       copyMapper.map(new Text(DistCpUtils.getRelativePath(new Path(SOURCE_PATH), pathList.get(0))),
-              fs.getFileStatus(pathList.get(0)), context);
+              new CopyListingFileStatus(fs.getFileStatus(pathList.get(0))), context);
 
 
       Assert.assertTrue("There should have been an exception.", false);
       Assert.assertTrue("There should have been an exception.", false);
     }
     }
@@ -343,7 +343,7 @@ public class TestCopyMapper {
       FileSystem fs = cluster.getFileSystem();
       FileSystem fs = cluster.getFileSystem();
       CopyMapper copyMapper = new CopyMapper();
       CopyMapper copyMapper = new CopyMapper();
       StubContext stubContext = new StubContext(getConfiguration(), null, 0);
       StubContext stubContext = new StubContext(getConfiguration(), null, 0);
-      Mapper<Text, FileStatus, Text, Text>.Context context
+      Mapper<Text, CopyListingFileStatus, Text, Text>.Context context
               = stubContext.getContext();
               = stubContext.getContext();
 
 
       mkdirs(SOURCE_PATH + "/src/file");
       mkdirs(SOURCE_PATH + "/src/file");
@@ -351,7 +351,8 @@ public class TestCopyMapper {
       try {
       try {
         copyMapper.setup(context);
         copyMapper.setup(context);
         copyMapper.map(new Text("/src/file"),
         copyMapper.map(new Text("/src/file"),
-            fs.getFileStatus(new Path(SOURCE_PATH + "/src/file")),
+            new CopyListingFileStatus(fs.getFileStatus(
+              new Path(SOURCE_PATH + "/src/file"))),
             context);
             context);
       } catch (IOException e) {
       } catch (IOException e) {
         Assert.assertTrue(e.getMessage().startsWith("Can't replace"));
         Assert.assertTrue(e.getMessage().startsWith("Can't replace"));
@@ -372,22 +373,24 @@ public class TestCopyMapper {
 
 
       final CopyMapper copyMapper = new CopyMapper();
       final CopyMapper copyMapper = new CopyMapper();
 
 
-      final Mapper<Text, FileStatus, Text, Text>.Context context =  tmpUser.
-          doAs(new PrivilegedAction<Mapper<Text, FileStatus, Text, Text>.Context>() {
-        @Override
-        public Mapper<Text, FileStatus, Text, Text>.Context run() {
-          try {
-            StubContext stubContext = new StubContext(getConfiguration(), null, 0);
-            return stubContext.getContext();
-          } catch (Exception e) {
-            LOG.error("Exception encountered ", e);
-            throw new RuntimeException(e);
-          }
-        }
-      });
+      final Mapper<Text, CopyListingFileStatus, Text, Text>.Context context =
+        tmpUser.doAs(
+          new PrivilegedAction<Mapper<Text, CopyListingFileStatus, Text, Text>.Context>() {
+            @Override
+            public Mapper<Text, CopyListingFileStatus, Text, Text>.Context run() {
+              try {
+                StubContext stubContext = new StubContext(getConfiguration(), null, 0);
+                return stubContext.getContext();
+              } catch (Exception e) {
+                LOG.error("Exception encountered ", e);
+                throw new RuntimeException(e);
+              }
+            }
+          });
 
 
       EnumSet<DistCpOptions.FileAttribute> preserveStatus =
       EnumSet<DistCpOptions.FileAttribute> preserveStatus =
           EnumSet.allOf(DistCpOptions.FileAttribute.class);
           EnumSet.allOf(DistCpOptions.FileAttribute.class);
+      preserveStatus.remove(DistCpOptions.FileAttribute.ACL);
 
 
       context.getConfiguration().set(DistCpConstants.CONF_LABEL_PRESERVE_STATUS,
       context.getConfiguration().set(DistCpConstants.CONF_LABEL_PRESERVE_STATUS,
         DistCpUtils.packAttributes(preserveStatus));
         DistCpUtils.packAttributes(preserveStatus));
@@ -415,7 +418,8 @@ public class TestCopyMapper {
           try {
           try {
             copyMapper.setup(context);
             copyMapper.setup(context);
             copyMapper.map(new Text("/src/file"),
             copyMapper.map(new Text("/src/file"),
-                tmpFS.getFileStatus(new Path(SOURCE_PATH + "/src/file")),
+                new CopyListingFileStatus(tmpFS.getFileStatus(
+                  new Path(SOURCE_PATH + "/src/file"))),
                 context);
                 context);
             Assert.fail("Expected copy to fail");
             Assert.fail("Expected copy to fail");
           } catch (AccessControlException e) {
           } catch (AccessControlException e) {
@@ -442,19 +446,20 @@ public class TestCopyMapper {
 
 
       final CopyMapper copyMapper = new CopyMapper();
       final CopyMapper copyMapper = new CopyMapper();
 
 
-      final Mapper<Text, FileStatus, Text, Text>.Context context =  tmpUser.
-          doAs(new PrivilegedAction<Mapper<Text, FileStatus, Text, Text>.Context>() {
-        @Override
-        public Mapper<Text, FileStatus, Text, Text>.Context run() {
-          try {
-            StubContext stubContext = new StubContext(getConfiguration(), null, 0);
-            return stubContext.getContext();
-          } catch (Exception e) {
-            LOG.error("Exception encountered ", e);
-            throw new RuntimeException(e);
-          }
-        }
-      });
+      final Mapper<Text, CopyListingFileStatus, Text, Text>.Context context =
+        tmpUser.doAs(
+          new PrivilegedAction<Mapper<Text, CopyListingFileStatus, Text, Text>.Context>() {
+            @Override
+            public Mapper<Text, CopyListingFileStatus, Text, Text>.Context run() {
+              try {
+                StubContext stubContext = new StubContext(getConfiguration(), null, 0);
+                return stubContext.getContext();
+              } catch (Exception e) {
+                LOG.error("Exception encountered ", e);
+                throw new RuntimeException(e);
+              }
+            }
+          });
 
 
       touchFile(SOURCE_PATH + "/src/file");
       touchFile(SOURCE_PATH + "/src/file");
       mkdirs(TARGET_PATH);
       mkdirs(TARGET_PATH);
@@ -481,7 +486,8 @@ public class TestCopyMapper {
           try {
           try {
             copyMapper.setup(context);
             copyMapper.setup(context);
             copyMapper.map(new Text("/src/file"),
             copyMapper.map(new Text("/src/file"),
-                tmpFS.getFileStatus(new Path(SOURCE_PATH + "/src/file")),
+                new CopyListingFileStatus(tmpFS.getFileStatus(
+                  new Path(SOURCE_PATH + "/src/file"))),
                 context);
                 context);
           } catch (Exception e) {
           } catch (Exception e) {
             throw new RuntimeException(e);
             throw new RuntimeException(e);
@@ -518,9 +524,11 @@ public class TestCopyMapper {
         }
         }
       });
       });
 
 
-      final Mapper<Text, FileStatus, Text, Text>.Context context = stubContext.getContext();
+      final Mapper<Text, CopyListingFileStatus, Text, Text>.Context context =
+        stubContext.getContext();
       EnumSet<DistCpOptions.FileAttribute> preserveStatus =
       EnumSet<DistCpOptions.FileAttribute> preserveStatus =
           EnumSet.allOf(DistCpOptions.FileAttribute.class);
           EnumSet.allOf(DistCpOptions.FileAttribute.class);
+      preserveStatus.remove(DistCpOptions.FileAttribute.ACL);
 
 
       context.getConfiguration().set(DistCpConstants.CONF_LABEL_PRESERVE_STATUS,
       context.getConfiguration().set(DistCpConstants.CONF_LABEL_PRESERVE_STATUS,
         DistCpUtils.packAttributes(preserveStatus));
         DistCpUtils.packAttributes(preserveStatus));
@@ -551,7 +559,8 @@ public class TestCopyMapper {
           try {
           try {
             copyMapper.setup(context);
             copyMapper.setup(context);
             copyMapper.map(new Text("/src/file"),
             copyMapper.map(new Text("/src/file"),
-                tmpFS.getFileStatus(new Path(SOURCE_PATH + "/src/file")),
+                new CopyListingFileStatus(tmpFS.getFileStatus(
+                  new Path(SOURCE_PATH + "/src/file"))),
                 context);
                 context);
             Assert.assertEquals(stubContext.getWriter().values().size(), 1);
             Assert.assertEquals(stubContext.getWriter().values().size(), 1);
             Assert.assertTrue(stubContext.getWriter().values().get(0).toString().startsWith("SKIP"));
             Assert.assertTrue(stubContext.getWriter().values().get(0).toString().startsWith("SKIP"));
@@ -594,8 +603,9 @@ public class TestCopyMapper {
 
 
       EnumSet<DistCpOptions.FileAttribute> preserveStatus =
       EnumSet<DistCpOptions.FileAttribute> preserveStatus =
           EnumSet.allOf(DistCpOptions.FileAttribute.class);
           EnumSet.allOf(DistCpOptions.FileAttribute.class);
+      preserveStatus.remove(DistCpOptions.FileAttribute.ACL);
 
 
-      final Mapper<Text, FileStatus, Text, Text>.Context context
+      final Mapper<Text, CopyListingFileStatus, Text, Text>.Context context
               = stubContext.getContext();
               = stubContext.getContext();
 
 
       context.getConfiguration().set(DistCpConstants.CONF_LABEL_PRESERVE_STATUS,
       context.getConfiguration().set(DistCpConstants.CONF_LABEL_PRESERVE_STATUS,
@@ -629,7 +639,8 @@ public class TestCopyMapper {
           try {
           try {
             copyMapper.setup(context);
             copyMapper.setup(context);
             copyMapper.map(new Text("/src/file"),
             copyMapper.map(new Text("/src/file"),
-                tmpFS.getFileStatus(new Path(SOURCE_PATH + "/src/file")),
+                new CopyListingFileStatus(tmpFS.getFileStatus(
+                  new Path(SOURCE_PATH + "/src/file"))),
                 context);
                 context);
             Assert.fail("Didn't expect the file to be copied");
             Assert.fail("Didn't expect the file to be copied");
           } catch (AccessControlException ignore) {
           } catch (AccessControlException ignore) {
@@ -661,7 +672,7 @@ public class TestCopyMapper {
       FileSystem fs = cluster.getFileSystem();
       FileSystem fs = cluster.getFileSystem();
       CopyMapper copyMapper = new CopyMapper();
       CopyMapper copyMapper = new CopyMapper();
       StubContext stubContext = new StubContext(getConfiguration(), null, 0);
       StubContext stubContext = new StubContext(getConfiguration(), null, 0);
-      Mapper<Text, FileStatus, Text, Text>.Context context
+      Mapper<Text, CopyListingFileStatus, Text, Text>.Context context
               = stubContext.getContext();
               = stubContext.getContext();
 
 
       touchFile(SOURCE_PATH + "/src/file");
       touchFile(SOURCE_PATH + "/src/file");
@@ -669,7 +680,8 @@ public class TestCopyMapper {
       try {
       try {
         copyMapper.setup(context);
         copyMapper.setup(context);
         copyMapper.map(new Text("/src/file"),
         copyMapper.map(new Text("/src/file"),
-            fs.getFileStatus(new Path(SOURCE_PATH + "/src/file")),
+            new CopyListingFileStatus(fs.getFileStatus(
+              new Path(SOURCE_PATH + "/src/file"))),
             context);
             context);
       } catch (IOException e) {
       } catch (IOException e) {
         Assert.assertTrue(e.getMessage().startsWith("Can't replace"));
         Assert.assertTrue(e.getMessage().startsWith("Can't replace"));
@@ -688,7 +700,7 @@ public class TestCopyMapper {
       FileSystem fs = cluster.getFileSystem();
       FileSystem fs = cluster.getFileSystem();
       CopyMapper copyMapper = new CopyMapper();
       CopyMapper copyMapper = new CopyMapper();
       StubContext stubContext = new StubContext(getConfiguration(), null, 0);
       StubContext stubContext = new StubContext(getConfiguration(), null, 0);
-      Mapper<Text, FileStatus, Text, Text>.Context context
+      Mapper<Text, CopyListingFileStatus, Text, Text>.Context context
               = stubContext.getContext();
               = stubContext.getContext();
 
 
       Configuration configuration = context.getConfiguration();
       Configuration configuration = context.getConfiguration();
@@ -705,7 +717,7 @@ public class TestCopyMapper {
         if (!fileStatus.isDirectory()) {
         if (!fileStatus.isDirectory()) {
           fs.delete(path, true);
           fs.delete(path, true);
           copyMapper.map(new Text(DistCpUtils.getRelativePath(new Path(SOURCE_PATH), path)),
           copyMapper.map(new Text(DistCpUtils.getRelativePath(new Path(SOURCE_PATH), path)),
-                  fileStatus, context);
+                  new CopyListingFileStatus(fileStatus), context);
         }
         }
       }
       }
       if (ignoreFailures) {
       if (ignoreFailures) {
@@ -745,7 +757,7 @@ public class TestCopyMapper {
       FileSystem fs = cluster.getFileSystem();
       FileSystem fs = cluster.getFileSystem();
       CopyMapper copyMapper = new CopyMapper();
       CopyMapper copyMapper = new CopyMapper();
       StubContext stubContext = new StubContext(getConfiguration(), null, 0);
       StubContext stubContext = new StubContext(getConfiguration(), null, 0);
-      Mapper<Text, FileStatus, Text, Text>.Context context
+      Mapper<Text, CopyListingFileStatus, Text, Text>.Context context
           = stubContext.getContext();
           = stubContext.getContext();
 
 
       Configuration configuration = context.getConfiguration();
       Configuration configuration = context.getConfiguration();
@@ -759,7 +771,7 @@ public class TestCopyMapper {
       for (Path path : pathList) {
       for (Path path : pathList) {
         final FileStatus fileStatus = fs.getFileStatus(path);
         final FileStatus fileStatus = fs.getFileStatus(path);
         copyMapper.map(new Text(DistCpUtils.getRelativePath(new Path(SOURCE_PATH), path)),
         copyMapper.map(new Text(DistCpUtils.getRelativePath(new Path(SOURCE_PATH), path)),
-            fileStatus, context);
+            new CopyListingFileStatus(fileStatus), context);
       }
       }
 
 
       Assert.fail("Copy should have failed because of block-size difference.");
       Assert.fail("Copy should have failed because of block-size difference.");
@@ -780,7 +792,7 @@ public class TestCopyMapper {
       FileSystem fs = cluster.getFileSystem();
       FileSystem fs = cluster.getFileSystem();
       CopyMapper copyMapper = new CopyMapper();
       CopyMapper copyMapper = new CopyMapper();
       StubContext stubContext = new StubContext(getConfiguration(), null, 0);
       StubContext stubContext = new StubContext(getConfiguration(), null, 0);
-      Mapper<Text, FileStatus, Text, Text>.Context context
+      Mapper<Text, CopyListingFileStatus, Text, Text>.Context context
               = stubContext.getContext();
               = stubContext.getContext();
 
 
       Configuration configuration = context.getConfiguration();
       Configuration configuration = context.getConfiguration();
@@ -798,7 +810,7 @@ public class TestCopyMapper {
       for (Path path : pathList) {
       for (Path path : pathList) {
         final FileStatus fileStatus = fs.getFileStatus(path);
         final FileStatus fileStatus = fs.getFileStatus(path);
         copyMapper.map(new Text(DistCpUtils.getRelativePath(new Path(SOURCE_PATH), path)),
         copyMapper.map(new Text(DistCpUtils.getRelativePath(new Path(SOURCE_PATH), path)),
-                fileStatus, context);
+                new CopyListingFileStatus(fileStatus), context);
       }
       }
 
 
       // Check that the block-size/replication aren't preserved.
       // Check that the block-size/replication aren't preserved.
@@ -855,7 +867,7 @@ public class TestCopyMapper {
       FileSystem fs = cluster.getFileSystem();
       FileSystem fs = cluster.getFileSystem();
       CopyMapper copyMapper = new CopyMapper();
       CopyMapper copyMapper = new CopyMapper();
       StubContext stubContext = new StubContext(getConfiguration(), null, 0);
       StubContext stubContext = new StubContext(getConfiguration(), null, 0);
-      Mapper<Text, FileStatus, Text, Text>.Context context
+      Mapper<Text, CopyListingFileStatus, Text, Text>.Context context
               = stubContext.getContext();
               = stubContext.getContext();
 
 
       context.getConfiguration().set(
       context.getConfiguration().set(
@@ -863,7 +875,8 @@ public class TestCopyMapper {
               targetFilePath.getParent().toString()); // Parent directory.
               targetFilePath.getParent().toString()); // Parent directory.
       copyMapper.setup(context);
       copyMapper.setup(context);
 
 
-      final FileStatus sourceFileStatus = fs.getFileStatus(sourceFilePath);
+      final CopyListingFileStatus sourceFileStatus = new CopyListingFileStatus(
+        fs.getFileStatus(sourceFilePath));
 
 
       long before = fs.getFileStatus(targetFilePath).getModificationTime();
       long before = fs.getFileStatus(targetFilePath).getModificationTime();
       copyMapper.map(new Text(DistCpUtils.getRelativePath(
       copyMapper.map(new Text(DistCpUtils.getRelativePath(
@@ -907,7 +920,7 @@ public class TestCopyMapper {
       FileSystem fs = cluster.getFileSystem();
       FileSystem fs = cluster.getFileSystem();
       CopyMapper copyMapper = new CopyMapper();
       CopyMapper copyMapper = new CopyMapper();
       StubContext stubContext = new StubContext(getConfiguration(), null, 0);
       StubContext stubContext = new StubContext(getConfiguration(), null, 0);
-      Mapper<Text, FileStatus, Text, Text>.Context context
+      Mapper<Text, CopyListingFileStatus, Text, Text>.Context context
               = stubContext.getContext();
               = stubContext.getContext();
 
 
       Configuration configuration = context.getConfiguration();
       Configuration configuration = context.getConfiguration();
@@ -926,7 +939,7 @@ public class TestCopyMapper {
       for (Path path : pathList) {
       for (Path path : pathList) {
         final FileStatus fileStatus = fs.getFileStatus(path);
         final FileStatus fileStatus = fs.getFileStatus(path);
         copyMapper.map(new Text(DistCpUtils.getRelativePath(new Path(SOURCE_PATH), path)),
         copyMapper.map(new Text(DistCpUtils.getRelativePath(new Path(SOURCE_PATH), path)),
-                fileStatus, context);
+                new CopyListingFileStatus(fileStatus), context);
       }
       }
 
 
       // Check that the user/group attributes are preserved
       // Check that the user/group attributes are preserved

+ 4 - 3
hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/mapred/TestUniformSizeInputFormat.java

@@ -30,6 +30,7 @@ import org.apache.hadoop.mapreduce.*;
 import org.apache.hadoop.mapreduce.task.JobContextImpl;
 import org.apache.hadoop.mapreduce.task.JobContextImpl;
 import org.apache.hadoop.mapreduce.lib.input.FileSplit;
 import org.apache.hadoop.mapreduce.lib.input.FileSplit;
 import org.apache.hadoop.tools.CopyListing;
 import org.apache.hadoop.tools.CopyListing;
+import org.apache.hadoop.tools.CopyListingFileStatus;
 import org.apache.hadoop.tools.DistCpOptions;
 import org.apache.hadoop.tools.DistCpOptions;
 import org.apache.hadoop.tools.StubContext;
 import org.apache.hadoop.tools.StubContext;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.Credentials;
@@ -122,8 +123,8 @@ public class TestUniformSizeInputFormat {
     for (int i=0; i<splits.size(); ++i) {
     for (int i=0; i<splits.size(); ++i) {
       InputSplit split = splits.get(i);
       InputSplit split = splits.get(i);
       int currentSplitSize = 0;
       int currentSplitSize = 0;
-      RecordReader<Text, FileStatus> recordReader = uniformSizeInputFormat.createRecordReader(
-              split, null);
+      RecordReader<Text, CopyListingFileStatus> recordReader =
+        uniformSizeInputFormat.createRecordReader(split, null);
       StubContext stubContext = new StubContext(jobContext.getConfiguration(),
       StubContext stubContext = new StubContext(jobContext.getConfiguration(),
                                                 recordReader, 0);
                                                 recordReader, 0);
       final TaskAttemptContext taskAttemptContext
       final TaskAttemptContext taskAttemptContext
@@ -168,7 +169,7 @@ public class TestUniformSizeInputFormat {
 
 
     try {
     try {
       reader.seek(lastEnd);
       reader.seek(lastEnd);
-      FileStatus srcFileStatus = new FileStatus();
+      CopyListingFileStatus srcFileStatus = new CopyListingFileStatus();
       Text srcRelPath = new Text();
       Text srcRelPath = new Text();
       Assert.assertFalse(reader.next(srcRelPath, srcFileStatus));
       Assert.assertFalse(reader.next(srcRelPath, srcFileStatus));
     } finally {
     } finally {

+ 5 - 5
hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/mapred/lib/TestDynamicInputFormat.java

@@ -25,13 +25,13 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.mapreduce.*;
 import org.apache.hadoop.mapreduce.*;
 import org.apache.hadoop.mapreduce.task.JobContextImpl;
 import org.apache.hadoop.mapreduce.task.JobContextImpl;
 import org.apache.hadoop.tools.CopyListing;
 import org.apache.hadoop.tools.CopyListing;
+import org.apache.hadoop.tools.CopyListingFileStatus;
 import org.apache.hadoop.tools.DistCpOptions;
 import org.apache.hadoop.tools.DistCpOptions;
 import org.apache.hadoop.tools.StubContext;
 import org.apache.hadoop.tools.StubContext;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.Credentials;
@@ -118,15 +118,15 @@ public class TestDynamicInputFormat {
                     +"/tmp/testDynInputFormat/fileList.seq"), options);
                     +"/tmp/testDynInputFormat/fileList.seq"), options);
 
 
     JobContext jobContext = new JobContextImpl(configuration, new JobID());
     JobContext jobContext = new JobContextImpl(configuration, new JobID());
-    DynamicInputFormat<Text, FileStatus> inputFormat =
-        new DynamicInputFormat<Text, FileStatus>();
+    DynamicInputFormat<Text, CopyListingFileStatus> inputFormat =
+        new DynamicInputFormat<Text, CopyListingFileStatus>();
     List<InputSplit> splits = inputFormat.getSplits(jobContext);
     List<InputSplit> splits = inputFormat.getSplits(jobContext);
 
 
     int nFiles = 0;
     int nFiles = 0;
     int taskId = 0;
     int taskId = 0;
 
 
     for (InputSplit split : splits) {
     for (InputSplit split : splits) {
-      RecordReader<Text, FileStatus> recordReader =
+      RecordReader<Text, CopyListingFileStatus> recordReader =
            inputFormat.createRecordReader(split, null);
            inputFormat.createRecordReader(split, null);
       StubContext stubContext = new StubContext(jobContext.getConfiguration(),
       StubContext stubContext = new StubContext(jobContext.getConfiguration(),
                                                 recordReader, taskId);
                                                 recordReader, taskId);
@@ -136,7 +136,7 @@ public class TestDynamicInputFormat {
       recordReader.initialize(splits.get(0), taskAttemptContext);
       recordReader.initialize(splits.get(0), taskAttemptContext);
       float previousProgressValue = 0f;
       float previousProgressValue = 0f;
       while (recordReader.nextKeyValue()) {
       while (recordReader.nextKeyValue()) {
-        FileStatus fileStatus = recordReader.getCurrentValue();
+        CopyListingFileStatus fileStatus = recordReader.getCurrentValue();
         String source = fileStatus.getPath().toString();
         String source = fileStatus.getPath().toString();
         System.out.println(source);
         System.out.println(source);
         Assert.assertTrue(expectedFilePaths.contains(source));
         Assert.assertTrue(expectedFilePaths.contains(source));

+ 3 - 1
hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/util/TestDistCpUtils.java

@@ -26,6 +26,7 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.tools.CopyListingFileStatus;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
 import org.junit.Assert;
 import org.junit.Assert;
@@ -106,7 +107,8 @@ public class TestDistCpUtils {
       Path src = new Path("/tmp/src");
       Path src = new Path("/tmp/src");
       fs.mkdirs(path);
       fs.mkdirs(path);
       fs.mkdirs(src);
       fs.mkdirs(src);
-      FileStatus srcStatus = fs.getFileStatus(src);
+      CopyListingFileStatus srcStatus = new CopyListingFileStatus(
+        fs.getFileStatus(src));
 
 
       FsPermission noPerm = new FsPermission((short) 0);
       FsPermission noPerm = new FsPermission((short) 0);
       fs.setPermission(path, noPerm);
       fs.setPermission(path, noPerm);

+ 12 - 6
hadoop-yarn-project/CHANGES.txt

@@ -111,12 +111,6 @@ Release 2.5.0 - UNRELEASED
 
 
     YARN-2011. Fix typo and warning in TestLeafQueue (Chen He via junping_du)
     YARN-2011. Fix typo and warning in TestLeafQueue (Chen He via junping_du)
 
 
-    YARN-1976. Fix yarn application CLI to print the scheme of the tracking url
-    of failed/killed applications. (Junping Du via jianhe)
-
-    YARN-2016. Fix a bug in GetApplicationsRequestPBImpl to add the missed fields
-    to proto. (Junping Du via jianhe)
-
     YARN-2042. String shouldn't be compared using == in
     YARN-2042. String shouldn't be compared using == in
     QueuePlacementRule#NestedUserQueue#getQueueForApp (Chen He via Sandy Ryza)
     QueuePlacementRule#NestedUserQueue#getQueueForApp (Chen He via Sandy Ryza)
 
 
@@ -228,6 +222,18 @@ Release 2.4.1 - UNRELEASED
     YARN-1986. In Fifo Scheduler, node heartbeat in between creating app and
     YARN-1986. In Fifo Scheduler, node heartbeat in between creating app and
     attempt causes NPE (Hong Zhiguo via Sandy Ryza)
     attempt causes NPE (Hong Zhiguo via Sandy Ryza)
 
 
+    YARN-1976. Fix yarn application CLI to print the scheme of the tracking url
+    of failed/killed applications. (Junping Du via jianhe)
+
+    YARN-2016. Fix a bug in GetApplicationsRequestPBImpl to add the missed fields
+    to proto. (Junping Du via jianhe)
+
+    YARN-2053. Fixed a bug in AMS to not add null NMToken into NMTokens list from
+    previous attempts for work-preserving AM restart. (Wangda Tan via jianhe)
+
+    YARN-2066. Wrong field is referenced in GetApplicationsRequestPBImpl#mergeLocalToBuilder()
+    (Hong Zhiguo via junping_du)
+
 Release 2.4.0 - 2014-04-07 
 Release 2.4.0 - 2014-04-07 
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES

+ 2 - 2
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationsRequestPBImpl.java

@@ -127,8 +127,8 @@ public class GetApplicationsRequestPBImpl extends GetApplicationsRequest {
     }
     }
     
     
     if (this.finish != null) {
     if (this.finish != null) {
-      builder.setFinishBegin(start.getMinimumLong());
-      builder.setFinishEnd(start.getMaximumLong());
+      builder.setFinishBegin(finish.getMinimumLong());
+      builder.setFinishEnd(finish.getMaximumLong());
     }
     }
     
     
     builder.setLimit(limit);
     builder.setLimit(limit);

+ 8 - 6
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestGetApplicationsRequest.java

@@ -47,10 +47,12 @@ public class TestGetApplicationsRequest {
     types.add("type1");
     types.add("type1");
     request.setApplicationTypes(types);
     request.setApplicationTypes(types);
     
     
-    long begin = System.currentTimeMillis();
-    long end = System.currentTimeMillis() + 1;
-    request.setStartRange(begin, end);
-    request.setFinishRange(begin, end);
+    long startBegin = System.currentTimeMillis();
+    long startEnd = System.currentTimeMillis() + 1;
+    request.setStartRange(startBegin, startEnd);
+    long finishBegin = System.currentTimeMillis() + 2;
+    long finishEnd = System.currentTimeMillis() + 3;
+    request.setFinishRange(finishBegin, finishEnd);
     
     
     long limit = 100L;
     long limit = 100L;
     request.setLimit(limit);
     request.setLimit(limit);
@@ -85,11 +87,11 @@ public class TestGetApplicationsRequest {
     
     
     Assert.assertEquals(
     Assert.assertEquals(
         "StartRange from proto is not the same with original request",
         "StartRange from proto is not the same with original request",
-        requestFromProto.getStartRange(), new LongRange(begin, end));
+        requestFromProto.getStartRange(), new LongRange(startBegin, startEnd));
     
     
     Assert.assertEquals(
     Assert.assertEquals(
         "FinishRange from proto is not the same with original request",
         "FinishRange from proto is not the same with original request",
-        requestFromProto.getFinishRange(), new LongRange(begin, end));
+        requestFromProto.getFinishRange(), new LongRange(finishBegin, finishEnd));
     
     
     Assert.assertEquals(
     Assert.assertEquals(
         "Limit from proto is not the same with original request",
         "Limit from proto is not the same with original request",

+ 6 - 3
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java

@@ -298,9 +298,12 @@ public class ApplicationMasterService extends AbstractService implements
         List<NMToken> nmTokens = new ArrayList<NMToken>();
         List<NMToken> nmTokens = new ArrayList<NMToken>();
         for (Container container : transferredContainers) {
         for (Container container : transferredContainers) {
           try {
           try {
-            nmTokens.add(rmContext.getNMTokenSecretManager()
-              .createAndGetNMToken(app.getUser(), applicationAttemptId,
-                container));
+            NMToken token = rmContext.getNMTokenSecretManager()
+                .createAndGetNMToken(app.getUser(), applicationAttemptId,
+                    container);
+            if (null != token) {
+              nmTokens.add(token);
+            }
           } catch (IllegalArgumentException e) {
           } catch (IllegalArgumentException e) {
             // if it's a DNS issue, throw UnknowHostException directly and that
             // if it's a DNS issue, throw UnknowHostException directly and that
             // will be automatically retried by RMProxy in RPC layer.
             // will be automatically retried by RMProxy in RPC layer.

+ 13 - 8
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java

@@ -264,31 +264,36 @@ public class TestAMRestart {
     nm2.registerNode();
     nm2.registerNode();
     MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1);
     MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1);
 
 
-    int NUM_CONTAINERS = 1;
     List<Container> containers = new ArrayList<Container>();
     List<Container> containers = new ArrayList<Container>();
     // nmTokens keeps track of all the nmTokens issued in the allocate call.
     // nmTokens keeps track of all the nmTokens issued in the allocate call.
     List<NMToken> expectedNMTokens = new ArrayList<NMToken>();
     List<NMToken> expectedNMTokens = new ArrayList<NMToken>();
 
 
-    // am1 allocate 1 container on nm1.
+    // am1 allocate 2 container on nm1.
+    // first container
     while (true) {
     while (true) {
       AllocateResponse response =
       AllocateResponse response =
-          am1.allocate("127.0.0.1", 2000, NUM_CONTAINERS,
+          am1.allocate("127.0.0.1", 2000, 2,
             new ArrayList<ContainerId>());
             new ArrayList<ContainerId>());
       nm1.nodeHeartbeat(true);
       nm1.nodeHeartbeat(true);
       containers.addAll(response.getAllocatedContainers());
       containers.addAll(response.getAllocatedContainers());
       expectedNMTokens.addAll(response.getNMTokens());
       expectedNMTokens.addAll(response.getNMTokens());
-      if (containers.size() == NUM_CONTAINERS) {
+      if (containers.size() == 2) {
         break;
         break;
       }
       }
       Thread.sleep(200);
       Thread.sleep(200);
       System.out.println("Waiting for container to be allocated.");
       System.out.println("Waiting for container to be allocated.");
     }
     }
-    // launch the container
+    // launch the container-2
     nm1.nodeHeartbeat(am1.getApplicationAttemptId(), 2, ContainerState.RUNNING);
     nm1.nodeHeartbeat(am1.getApplicationAttemptId(), 2, ContainerState.RUNNING);
     ContainerId containerId2 =
     ContainerId containerId2 =
         ContainerId.newInstance(am1.getApplicationAttemptId(), 2);
         ContainerId.newInstance(am1.getApplicationAttemptId(), 2);
     rm1.waitForState(nm1, containerId2, RMContainerState.RUNNING);
     rm1.waitForState(nm1, containerId2, RMContainerState.RUNNING);
-
+    // launch the container-3
+    nm1.nodeHeartbeat(am1.getApplicationAttemptId(), 3, ContainerState.RUNNING);
+    ContainerId containerId3 =
+        ContainerId.newInstance(am1.getApplicationAttemptId(), 3);
+    rm1.waitForState(nm1, containerId3, RMContainerState.RUNNING);
+    
     // fail am1
     // fail am1
     nm1.nodeHeartbeat(am1.getApplicationAttemptId(), 1, ContainerState.COMPLETE);
     nm1.nodeHeartbeat(am1.getApplicationAttemptId(), 1, ContainerState.COMPLETE);
     am1.waitForState(RMAppAttemptState.FAILED);
     am1.waitForState(RMAppAttemptState.FAILED);
@@ -308,12 +313,12 @@ public class TestAMRestart {
     containers = new ArrayList<Container>();
     containers = new ArrayList<Container>();
     while (true) {
     while (true) {
       AllocateResponse allocateResponse =
       AllocateResponse allocateResponse =
-          am2.allocate("127.1.1.1", 4000, NUM_CONTAINERS,
+          am2.allocate("127.1.1.1", 4000, 1,
             new ArrayList<ContainerId>());
             new ArrayList<ContainerId>());
       nm2.nodeHeartbeat(true);
       nm2.nodeHeartbeat(true);
       containers.addAll(allocateResponse.getAllocatedContainers());
       containers.addAll(allocateResponse.getAllocatedContainers());
       expectedNMTokens.addAll(allocateResponse.getNMTokens());
       expectedNMTokens.addAll(allocateResponse.getNMTokens());
-      if (containers.size() == NUM_CONTAINERS) {
+      if (containers.size() == 1) {
         break;
         break;
       }
       }
       Thread.sleep(200);
       Thread.sleep(200);