Browse Source

Merge r1609845 through r1615019 from trunk.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-6584@1615020 13f79535-47bb-0310-9956-ffa450edef68
Tsz-wo Sze 10 years ago
parent
commit
dc7744d2e5
100 changed files with 3078 additions and 764 deletions
  1. 8 0
      hadoop-common-project/hadoop-common/CHANGES.txt
  2. 66 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
  3. 28 75
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
  4. 2 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
  5. 65 76
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
  6. 82 77
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
  7. 7 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
  8. 8 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java
  9. 8 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
  10. 7 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java
  11. 9 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
  12. 9 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java
  13. 14 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
  14. 0 19
      hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
  15. 3 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java
  16. 51 33
      hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java
  17. 174 15
      hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAudit.java
  18. 4 2
      hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAuthenticationFilter.java
  19. 6 1
      hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
  20. 9 1
      hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSExceptionsProvider.java
  21. 11 0
      hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java
  22. 19 0
      hadoop-common-project/hadoop-kms/src/site/apt/index.apt.vm
  23. 134 0
      hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMSAudit.java
  24. 25 0
      hadoop-common-project/hadoop-kms/src/test/resources/log4j-kmsaudit.properties
  25. 27 3
      hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
  26. 5 0
      hadoop-hdfs-project/hadoop-hdfs/pom.xml
  27. 6 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java
  28. 12 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
  29. 20 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
  30. 10 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/StorageType.java
  31. 35 36
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
  32. 16 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
  33. 13 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
  34. 8 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
  35. 368 249
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
  36. 61 23
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/BalancingPolicy.java
  37. 4 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
  38. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
  39. 23 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
  40. 6 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
  41. 18 6
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
  42. 19 6
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlocksWithLocations.java
  43. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/XmlEditsVisitor.java
  44. 8 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumCounters.java
  45. 128 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumDoubles.java
  46. 7 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
  47. 58 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/FsActionParam.java
  48. 3 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java
  49. 10 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
  50. 1 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
  51. 44 0
      hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm
  52. 18 11
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestGenericRefresh.java
  53. 32 12
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestRefreshCallQueue.java
  54. 151 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemWithXAttrs.java
  55. 148 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsWithXAttrs.java
  56. 77 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java
  57. 22 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
  58. 3 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
  59. 445 20
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
  60. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java
  61. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java
  62. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java
  63. 0 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
  64. 28 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java
  65. 2 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
  66. 9 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestAclWithSnapshot.java
  67. 33 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java
  68. 24 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java
  69. 35 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermissionSymlinks.java
  70. 3 0
      hadoop-mapreduce-project/CHANGES.txt
  71. 3 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/TaskAttemptListenerImpl.java
  72. 1 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/AppContext.java
  73. 5 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
  74. 2 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java
  75. 10 3
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapred/TestTaskAttemptListenerImpl.java
  76. 5 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockAppContext.java
  77. 5 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java
  78. 3 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JHAdminConfig.java
  79. 7 2
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRWebAppUtil.java
  80. 8 3
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryClientService.java
  81. 5 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistory.java
  82. 4 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/server/HSAdminServer.java
  83. 14 14
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
  84. 4 4
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/test/java/org/apache/hadoop/mapred/TestShuffleHandler.java
  85. 2 0
      hadoop-project-dist/pom.xml
  86. 7 1
      hadoop-project/pom.xml
  87. 12 0
      hadoop-yarn-project/CHANGES.txt
  88. 12 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
  89. 0 5
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/yarn_server_resourcemanager_service_protos.proto
  90. 6 5
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
  91. 4 2
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDSFailedAppMaster.java
  92. 32 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java
  93. 34 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
  94. 128 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfiguration.java
  95. 10 6
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryClientService.java
  96. 3 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java
  97. 14 10
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/records/Version.java
  98. 15 14
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/records/impl/pb/VersionPBImpl.java
  99. 7 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_protos.proto
  100. 18 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java

+ 8 - 0
hadoop-common-project/hadoop-common/CHANGES.txt

@@ -192,6 +192,9 @@ Trunk (Unreleased)
     HADOOP-10891. Add EncryptedKeyVersion factory method to
     KeyProviderCryptoExtension. (wang)
 
+    HADOOP-10756. KMS audit log should consolidate successful similar requests. 
+    (asuresh via tucu)
+
   BUG FIXES
 
     HADOOP-9451. Fault single-layer config if node group topology is enabled.
@@ -460,6 +463,9 @@ Release 2.6.0 - UNRELEASED
 
     HADOOP-10882. Move DirectBufferPool into common util. (todd)
 
+    HADOOP-8069. Enable TCP_NODELAY by default for IPC. (Todd Lipcon via
+    Arpit Agarwal)
+
   OPTIMIZATIONS
 
   BUG FIXES
@@ -810,6 +816,8 @@ Release 2.5.0 - UNRELEASED
     HADOOP-10894. Fix dead link in ToolRunner documentation. (Akira Ajisaka
     via Arpit Agarwal)
 
+    HADOOP-10910. Increase findbugs maxHeap size. (wang)
+
   BREAKDOWN OF HADOOP-10514 SUBTASKS AND RELATED JIRAS
 
     HADOOP-10520. Extended attributes definition and FileSystem APIs for

+ 66 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java

@@ -1843,6 +1843,38 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
     return pass;
   }
 
+  /**
+   * Get the socket address for <code>hostProperty</code> as a
+   * <code>InetSocketAddress</code>. If <code>hostProperty</code> is
+   * <code>null</code>, <code>addressProperty</code> will be used. This
+   * is useful for cases where we want to differentiate between host
+   * bind address and address clients should use to establish connection.
+   *
+   * @param hostProperty bind host property name.
+   * @param addressProperty address property name.
+   * @param defaultAddressValue the default value
+   * @param defaultPort the default port
+   * @return InetSocketAddress
+   */
+  public InetSocketAddress getSocketAddr(
+      String hostProperty,
+      String addressProperty,
+      String defaultAddressValue,
+      int defaultPort) {
+
+    InetSocketAddress bindAddr = getSocketAddr(
+      addressProperty, defaultAddressValue, defaultPort);
+
+    final String host = get(hostProperty);
+
+    if (host == null || host.isEmpty()) {
+      return bindAddr;
+    }
+
+    return NetUtils.createSocketAddr(
+        host, bindAddr.getPort(), hostProperty);
+  }
+
   /**
    * Get the socket address for <code>name</code> property as a
    * <code>InetSocketAddress</code>.
@@ -1864,6 +1896,40 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
   public void setSocketAddr(String name, InetSocketAddress addr) {
     set(name, NetUtils.getHostPortString(addr));
   }
+
+  /**
+   * Set the socket address a client can use to connect for the
+   * <code>name</code> property as a <code>host:port</code>.  The wildcard
+   * address is replaced with the local host's address. If the host and address
+   * properties are configured the host component of the address will be combined
+   * with the port component of the addr to generate the address.  This is to allow
+   * optional control over which host name is used in multi-home bind-host
+   * cases where a host can have multiple names
+   * @param hostProperty the bind-host configuration name
+   * @param addressProperty the service address configuration name
+   * @param defaultAddressValue the service default address configuration value
+   * @param addr InetSocketAddress of the service listener
+   * @return InetSocketAddress for clients to connect
+   */
+  public InetSocketAddress updateConnectAddr(
+      String hostProperty,
+      String addressProperty,
+      String defaultAddressValue,
+      InetSocketAddress addr) {
+
+    final String host = get(hostProperty);
+    final String connectHostPort = getTrimmed(addressProperty, defaultAddressValue);
+
+    if (host == null || host.isEmpty() || connectHostPort == null || connectHostPort.isEmpty()) {
+      //not our case, fall back to original logic
+      return updateConnectAddr(addressProperty, addr);
+    }
+
+    final String connectHost = connectHostPort.split(":")[0];
+    // Create connect address using client address hostname and server port.
+    return updateConnectAddr(addressProperty, NetUtils.createSocketAddrForHost(
+        connectHost, addr.getPort()));
+  }
   
   /**
    * Set the socket address a client can use to connect for the

+ 28 - 75
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java

@@ -43,6 +43,7 @@ import org.apache.hadoop.fs.Options.CreateOpts;
 import org.apache.hadoop.fs.Options.Rename;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.InvalidPathException;
 import org.apache.hadoop.security.AccessControlException;
@@ -803,6 +804,18 @@ public abstract class AbstractFileSystem {
       throws AccessControlException, FileNotFoundException,
       UnresolvedLinkException, IOException;
 
+  /**
+   * The specification of this method matches that of
+   * {@link FileContext#access(Path, FsAction)}
+   * except that an UnresolvedLinkException may be thrown if a symlink is
+   * encountered in the path.
+   */
+  @InterfaceAudience.LimitedPrivate({"HDFS", "Hive"})
+  public void access(Path path, FsAction mode) throws AccessControlException,
+      FileNotFoundException, UnresolvedLinkException, IOException {
+    FileSystem.checkAccessPermissions(this.getFileStatus(path), mode);
+  }
+
   /**
    * The specification of this method matches that of
    * {@link FileContext#getFileLinkStatus(Path)}
@@ -1040,21 +1053,10 @@ public abstract class AbstractFileSystem {
 
   /**
    * Set an xattr of a file or directory.
-   * The name must be prefixed with user/trusted/security/system and
-   * followed by ".". For example, "user.attr".
-   * <p/>
-   * A regular user can only set an xattr for the "user" namespace.
-   * The super user can set an xattr of either the "user" or "trusted" namespaces.
-   * The xattrs of the "security" and "system" namespaces are only used/exposed 
-   * internally by/to the FS impl.
-   * <p/>
-   * The access permissions of an xattr in the "user" namespace are
-   * defined by the file and directory permission bits.
-   * An xattr can only be set when the logged-in user has the correct permissions.
-   * If the xattr exists, it will be replaced.
+   * The name must be prefixed with the namespace followed by ".". For example,
+   * "user.attr".
    * <p/>
-   * @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
-   * http://en.wikipedia.org/wiki/Extended_file_attributes</a>
+   * Refer to the HDFS extended attributes user documentation for details.
    *
    * @param path Path to modify
    * @param name xattr name.
@@ -1069,21 +1071,10 @@ public abstract class AbstractFileSystem {
 
   /**
    * Set an xattr of a file or directory.
-   * The name must be prefixed with user/trusted/security/system and
-   * followed by ".". For example, "user.attr".
+   * The name must be prefixed with the namespace followed by ".". For example,
+   * "user.attr".
    * <p/>
-   * A regular user can only set an xattr for the "user" namespace.
-   * The super user can set an xattr of either the "user" or "trusted" namespaces.
-   * The xattrs of the "security" and "system" namespaces are only used/exposed 
-   * internally by/to the FS impl.
-   * <p/>
-   * The access permissions of an xattr in the "user" namespace are
-   * defined by the file and directory permission bits.
-   * An xattr can only be set when the logged-in user has the correct permissions.
-   * If the xattr exists, it will be replaced.
-   * <p/>
-   * @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
-   * http://en.wikipedia.org/wiki/Extended_file_attributes</a>
+   * Refer to the HDFS extended attributes user documentation for details.
    *
    * @param path Path to modify
    * @param name xattr name.
@@ -1099,18 +1090,10 @@ public abstract class AbstractFileSystem {
 
   /**
    * Get an xattr for a file or directory.
-   * The name must be prefixed with user/trusted/security/system and
-   * followed by ".". For example, "user.attr".
-   * <p/>
-   * A regular user can only get an xattr for the "user" namespace.
-   * The super user can get an xattr of either the "user" or "trusted" namespaces.
-   * The xattrs of the "security" and "system" namespaces are only used/exposed 
-   * internally by/to the FS impl.
+   * The name must be prefixed with the namespace followed by ".". For example,
+   * "user.attr".
    * <p/>
-   * An xattr will only be returned when the logged-in user has the correct permissions.
-   * <p/>
-   * @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
-   * http://en.wikipedia.org/wiki/Extended_file_attributes</a>
+   * Refer to the HDFS extended attributes user documentation for details.
    *
    * @param path Path to get extended attribute
    * @param name xattr name.
@@ -1127,13 +1110,7 @@ public abstract class AbstractFileSystem {
    * Only those xattrs for which the logged-in user has permissions to view
    * are returned.
    * <p/>
-   * A regular user can only get xattrs for the "user" namespace.
-   * The super user can only get xattrs for "user" and "trusted" namespaces.
-   * The xattr of "security" and "system" namespaces are only used/exposed 
-   * internally by/to the FS impl.
-   * <p/>
-   * @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
-   * http://en.wikipedia.org/wiki/Extended_file_attributes</a>
+   * Refer to the HDFS extended attributes user documentation for details.
    *
    * @param path Path to get extended attributes
    * @return Map<String, byte[]> describing the XAttrs of the file or directory
@@ -1149,13 +1126,7 @@ public abstract class AbstractFileSystem {
    * Only those xattrs for which the logged-in user has permissions to view
    * are returned.
    * <p/>
-   * A regular user can only get xattrs for the "user" namespace.
-   * The super user can only get xattrs for "user" and "trusted" namespaces.
-   * The xattr of "security" and "system" namespaces are only used/exposed 
-   * internally by/to the FS impl.
-   * <p/>
-   * @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
-   * http://en.wikipedia.org/wiki/Extended_file_attributes</a>
+   * Refer to the HDFS extended attributes user documentation for details.
    *
    * @param path Path to get extended attributes
    * @param names XAttr names.
@@ -1173,14 +1144,7 @@ public abstract class AbstractFileSystem {
    * Only the xattr names for which the logged-in user has permissions to view
    * are returned.
    * <p/>
-   * A regular user can only get xattr names for the "user" namespace.
-   * The super user can only get xattr names for the "user" and "trusted"
-   * namespaces.
-   * The xattr names in the "security" and "system" namespaces are only
-   * used/exposed internally by/to the FS impl.
-   * <p/>
-   * @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
-   * http://en.wikipedia.org/wiki/Extended_file_attributes</a>
+   * Refer to the HDFS extended attributes user documentation for details.
    *
    * @param path Path to get extended attributes
    * @return Map<String, byte[]> describing the XAttrs of the file or directory
@@ -1194,21 +1158,10 @@ public abstract class AbstractFileSystem {
 
   /**
    * Remove an xattr of a file or directory.
-   * The name must be prefixed with user/trusted/security/system and
-   * followed by ".". For example, "user.attr".
-   * <p/>
-   * A regular user can only remove an xattr for the "user" namespace.
-   * The super user can remove an xattr of either the "user" or "trusted" namespaces.
-   * The xattrs of the "security" and "system" namespaces are only used/exposed 
-   * internally by/to the FS impl.
-   * <p/>
-   * The access permissions of an xattr in the "user" namespace are
-   * defined by the file and directory permission bits.
-   * An xattr can only be set when the logged-in user has the correct permissions.
-   * If the xattr exists, it will be replaced.
+   * The name must be prefixed with the namespace followed by ".". For example,
+   * "user.attr".
    * <p/>
-   * @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
-   * http://en.wikipedia.org/wiki/Extended_file_attributes</a>
+   * Refer to the HDFS extended attributes user documentation for details.
    *
    * @param path Path to remove extended attribute
    * @param name xattr name

+ 2 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java

@@ -207,7 +207,7 @@ public class CommonConfigurationKeysPublic {
   public static final String  IPC_CLIENT_TCPNODELAY_KEY =
     "ipc.client.tcpnodelay";
   /** Defalt value for IPC_CLIENT_TCPNODELAY_KEY */
-  public static final boolean IPC_CLIENT_TCPNODELAY_DEFAULT = false;
+  public static final boolean IPC_CLIENT_TCPNODELAY_DEFAULT = true;
   /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
   public static final String  IPC_SERVER_LISTEN_QUEUE_SIZE_KEY =
     "ipc.server.listen.queue.size";
@@ -226,7 +226,7 @@ public class CommonConfigurationKeysPublic {
   public static final String  IPC_SERVER_TCPNODELAY_KEY =
     "ipc.server.tcpnodelay";
   /** Default value for IPC_SERVER_TCPNODELAY_KEY */
-  public static final boolean IPC_SERVER_TCPNODELAY_DEFAULT = false;
+  public static final boolean IPC_SERVER_TCPNODELAY_DEFAULT = true;
 
   /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
   public static final String  HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_KEY =

+ 65 - 76
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java

@@ -44,6 +44,7 @@ import org.apache.hadoop.fs.FileSystem.Statistics;
 import org.apache.hadoop.fs.Options.CreateOpts;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_DEFAULT;
@@ -1108,6 +1109,55 @@ public final class FileContext {
     }.resolve(this, absF);
   }
 
+  /**
+   * Checks if the user can access a path.  The mode specifies which access
+   * checks to perform.  If the requested permissions are granted, then the
+   * method returns normally.  If access is denied, then the method throws an
+   * {@link AccessControlException}.
+   * <p/>
+   * The default implementation of this method calls {@link #getFileStatus(Path)}
+   * and checks the returned permissions against the requested permissions.
+   * Note that the getFileStatus call will be subject to authorization checks.
+   * Typically, this requires search (execute) permissions on each directory in
+   * the path's prefix, but this is implementation-defined.  Any file system
+   * that provides a richer authorization model (such as ACLs) may override the
+   * default implementation so that it checks against that model instead.
+   * <p>
+   * In general, applications should avoid using this method, due to the risk of
+   * time-of-check/time-of-use race conditions.  The permissions on a file may
+   * change immediately after the access call returns.  Most applications should
+   * prefer running specific file system actions as the desired user represented
+   * by a {@link UserGroupInformation}.
+   *
+   * @param path Path to check
+   * @param mode type of access to check
+   * @throws AccessControlException if access is denied
+   * @throws FileNotFoundException if the path does not exist
+   * @throws UnsupportedFileSystemException if file system for <code>path</code>
+   *   is not supported
+   * @throws IOException see specific implementation
+   * 
+   * Exceptions applicable to file systems accessed over RPC:
+   * @throws RpcClientException If an exception occurred in the RPC client
+   * @throws RpcServerException If an exception occurred in the RPC server
+   * @throws UnexpectedServerException If server implementation throws 
+   *           undeclared exception to RPC server
+   */
+  @InterfaceAudience.LimitedPrivate({"HDFS", "Hive"})
+  public void access(final Path path, final FsAction mode)
+      throws AccessControlException, FileNotFoundException,
+      UnsupportedFileSystemException, IOException {
+    final Path absPath = fixRelativePart(path);
+    new FSLinkResolver<Void>() {
+      @Override
+      public Void next(AbstractFileSystem fs, Path p) throws IOException,
+          UnresolvedLinkException {
+        fs.access(p, mode);
+        return null;
+      }
+    }.resolve(this, absPath);
+  }
+
   /**
    * Return a file status object that represents the path. If the path 
    * refers to a symlink then the FileStatus of the symlink is returned.
@@ -2297,21 +2347,10 @@ public final class FileContext {
 
   /**
    * Set an xattr of a file or directory.
-   * The name must be prefixed with user/trusted/security/system and
-   * followed by ".". For example, "user.attr".
-   * <p/>
-   * A regular user can only set an xattr for the "user" namespace.
-   * The super user can set an xattr of either the "user" or "trusted" namespaces.
-   * The xattrs of the "security" and "system" namespaces are only used/exposed 
-   * internally by/to the FS impl.
+   * The name must be prefixed with the namespace followed by ".". For example,
+   * "user.attr".
    * <p/>
-   * The access permissions of an xattr in the "user" namespace are
-   * defined by the file and directory permission bits.
-   * An xattr can only be set when the logged-in user has the correct permissions.
-   * If the xattr exists, it will be replaced.
-   * <p/>
-   * @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
-   * http://en.wikipedia.org/wiki/Extended_file_attributes</a>
+   * Refer to the HDFS extended attributes user documentation for details.
    *
    * @param path Path to modify
    * @param name xattr name.
@@ -2326,21 +2365,10 @@ public final class FileContext {
 
   /**
    * Set an xattr of a file or directory.
-   * The name must be prefixed with user/trusted/security/system and
-   * followed by ".". For example, "user.attr".
-   * <p/>
-   * A regular user can only set an xattr for the "user" namespace.
-   * The super user can set an xattr of either the "user" or "trusted" namespaces.
-   * The xattrs of the "security" and "system" namespaces are only used/exposed 
-   * internally by/to the FS impl.
+   * The name must be prefixed with the namespace followed by ".". For example,
+   * "user.attr".
    * <p/>
-   * The access permissions of an xattr in the "user" namespace are
-   * defined by the file and directory permission bits.
-   * An xattr can only be set when the logged-in user has the correct permissions.
-   * If the xattr exists, it will be replaced.
-   * <p/>
-   * @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
-   * http://en.wikipedia.org/wiki/Extended_file_attributes</a>
+   * Refer to the HDFS extended attributes user documentation for details.
    *
    * @param path Path to modify
    * @param name xattr name.
@@ -2363,19 +2391,10 @@ public final class FileContext {
 
   /**
    * Get an xattr for a file or directory.
-   * The name must be prefixed with user/trusted/security/system and
-   * followed by ".". For example, "user.attr".
+   * The name must be prefixed with the namespace followed by ".". For example,
+   * "user.attr".
    * <p/>
-   * 
-   * A regular user can only get an xattr for the "user" namespace.
-   * The super user can get an xattr of either the "user" or "trusted" namespaces.
-   * The xattrs of the "security" and "system" namespaces are only used/exposed 
-   * internally by/to the FS impl.
-   * <p/>
-   * An xattr will only be returned when the logged-in user has the correct permissions.
-   * <p/>
-   * @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
-   * http://en.wikipedia.org/wiki/Extended_file_attributes</a>
+   * Refer to the HDFS extended attributes user documentation for details.
    *
    * @param path Path to get extended attribute
    * @param name xattr name.
@@ -2398,13 +2417,7 @@ public final class FileContext {
    * Only those xattrs for which the logged-in user has permissions to view
    * are returned.
    * <p/>
-   * A regular user can only get xattrs for the "user" namespace.
-   * The super user can only get xattrs for "user" and "trusted" namespaces.
-   * The xattr of "security" and "system" namespaces are only used/exposed 
-   * internally by/to the FS impl.
-   * <p/>
-   * @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
-   * http://en.wikipedia.org/wiki/Extended_file_attributes</a>
+   * Refer to the HDFS extended attributes user documentation for details.
    *
    * @param path Path to get extended attributes
    * @return Map<String, byte[]> describing the XAttrs of the file or directory
@@ -2426,13 +2439,7 @@ public final class FileContext {
    * Only those xattrs for which the logged-in user has permissions to view
    * are returned.
    * <p/>
-   * A regular user can only get xattrs for the "user" namespace.
-   * The super user can only get xattrs for "user" and "trusted" namespaces.
-   * The xattr of "security" and "system" namespaces are only used/exposed 
-   * internally by/to the FS impl.
-   * <p/>
-   * @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
-   * http://en.wikipedia.org/wiki/Extended_file_attributes</a>
+   * Refer to the HDFS extended attributes user documentation for details.
    *
    * @param path Path to get extended attributes
    * @param names XAttr names.
@@ -2453,21 +2460,10 @@ public final class FileContext {
 
   /**
    * Remove an xattr of a file or directory.
-   * The name must be prefixed with user/trusted/security/system and
-   * followed by ".". For example, "user.attr".
+   * The name must be prefixed with the namespace followed by ".". For example,
+   * "user.attr".
    * <p/>
-   * A regular user can only remove an xattr for the "user" namespace.
-   * The super user can remove an xattr of either the "user" or "trusted" namespaces.
-   * The xattrs of the "security" and "system" namespaces are only used/exposed 
-   * internally by/to the FS impl.
-   * <p/>
-   * The access permissions of an xattr in the "user" namespace are
-   * defined by the file and directory permission bits.
-   * An xattr can only be set when the logged-in user has the correct permissions.
-   * If the xattr exists, it will be replaced.
-   * <p/>
-   * @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
-   * http://en.wikipedia.org/wiki/Extended_file_attributes</a>
+   * Refer to the HDFS extended attributes user documentation for details.
    *
    * @param path Path to remove extended attribute
    * @param name xattr name
@@ -2490,14 +2486,7 @@ public final class FileContext {
    * Only those xattr names which the logged-in user has permissions to view
    * are returned.
    * <p/>
-   * A regular user can only get xattr names for the "user" namespace.
-   * The super user can only get xattr names for "user" and "trusted"
-   * namespaces.
-   * The xattrs of the "security" and "system" namespaces are only
-   * used/exposed internally by/to the FS impl.
-   * <p/>
-   * @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
-   * http://en.wikipedia.org/wiki/Extended_file_attributes</a>
+   * Refer to the HDFS extended attributes user documentation for details.
    *
    * @param path Path to get extended attributes
    * @return List<String> of the XAttr names of the file or directory

+ 82 - 77
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java

@@ -25,6 +25,7 @@ import java.net.URI;
 import java.net.URISyntaxException;
 import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.EnumSet;
 import java.util.HashMap;
 import java.util.HashSet;
@@ -50,6 +51,7 @@ import org.apache.hadoop.fs.Options.ChecksumOpt;
 import org.apache.hadoop.fs.Options.Rename;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.io.MultipleIOException;
 import org.apache.hadoop.io.Text;
@@ -2072,6 +2074,71 @@ public abstract class FileSystem extends Configured implements Closeable {
    */
   public abstract FileStatus getFileStatus(Path f) throws IOException;
 
+  /**
+   * Checks if the user can access a path.  The mode specifies which access
+   * checks to perform.  If the requested permissions are granted, then the
+   * method returns normally.  If access is denied, then the method throws an
+   * {@link AccessControlException}.
+   * <p/>
+   * The default implementation of this method calls {@link #getFileStatus(Path)}
+   * and checks the returned permissions against the requested permissions.
+   * Note that the getFileStatus call will be subject to authorization checks.
+   * Typically, this requires search (execute) permissions on each directory in
+   * the path's prefix, but this is implementation-defined.  Any file system
+   * that provides a richer authorization model (such as ACLs) may override the
+   * default implementation so that it checks against that model instead.
+   * <p>
+   * In general, applications should avoid using this method, due to the risk of
+   * time-of-check/time-of-use race conditions.  The permissions on a file may
+   * change immediately after the access call returns.  Most applications should
+   * prefer running specific file system actions as the desired user represented
+   * by a {@link UserGroupInformation}.
+   *
+   * @param path Path to check
+   * @param mode type of access to check
+   * @throws AccessControlException if access is denied
+   * @throws FileNotFoundException if the path does not exist
+   * @throws IOException see specific implementation
+   */
+  @InterfaceAudience.LimitedPrivate({"HDFS", "Hive"})
+  public void access(Path path, FsAction mode) throws AccessControlException,
+      FileNotFoundException, IOException {
+    checkAccessPermissions(this.getFileStatus(path), mode);
+  }
+
+  /**
+   * This method provides the default implementation of
+   * {@link #access(Path, FsAction)}.
+   *
+   * @param stat FileStatus to check
+   * @param mode type of access to check
+   * @throws IOException for any error
+   */
+  @InterfaceAudience.Private
+  static void checkAccessPermissions(FileStatus stat, FsAction mode)
+      throws IOException {
+    FsPermission perm = stat.getPermission();
+    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
+    String user = ugi.getShortUserName();
+    List<String> groups = Arrays.asList(ugi.getGroupNames());
+    if (user.equals(stat.getOwner())) {
+      if (perm.getUserAction().implies(mode)) {
+        return;
+      }
+    } else if (groups.contains(stat.getGroup())) {
+      if (perm.getGroupAction().implies(mode)) {
+        return;
+      }
+    } else {
+      if (perm.getOtherAction().implies(mode)) {
+        return;
+      }
+    }
+    throw new AccessControlException(String.format(
+      "Permission denied: user=%s, path=\"%s\":%s:%s:%s%s", user, stat.getPath(),
+      stat.getOwner(), stat.getGroup(), stat.isDirectory() ? "d" : "-", perm));
+  }
+
   /**
    * See {@link FileContext#fixRelativePart}
    */
@@ -2364,21 +2431,10 @@ public abstract class FileSystem extends Configured implements Closeable {
 
   /**
    * Set an xattr of a file or directory.
-   * The name must be prefixed with user/trusted/security/system and
-   * followed by ".". For example, "user.attr".
-   * <p/>
-   * A regular user can only set an xattr for the "user" namespace.
-   * The super user can set an xattr of either the "user" or "trusted" namespaces.
-   * The xattrs of the "security" and "system" namespaces are only used/exposed 
-   * internally by/to the FS impl.
+   * The name must be prefixed with the namespace followed by ".". For example,
+   * "user.attr".
    * <p/>
-   * The access permissions of an xattr in the "user" namespace are
-   * defined by the file and directory permission bits.
-   * An xattr can only be set when the logged-in user has the correct permissions.
-   * If the xattr exists, it will be replaced.
-   * <p/>
-   * @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
-   * http://en.wikipedia.org/wiki/Extended_file_attributes</a>
+   * Refer to the HDFS extended attributes user documentation for details.
    *
    * @param path Path to modify
    * @param name xattr name.
@@ -2393,21 +2449,10 @@ public abstract class FileSystem extends Configured implements Closeable {
 
   /**
    * Set an xattr of a file or directory.
-   * The name must be prefixed with user/trusted/security/system and
-   * followed by ".". For example, "user.attr".
-   * <p/>
-   * A regular user can only set an xattr for the "user" namespace.
-   * The super user can set an xattr of either the "user" or "trusted" namespaces.
-   * The xattrs of the "security" and "system" namespaces are only used/exposed 
-   * internally by/to the FS impl.
+   * The name must be prefixed with the namespace followed by ".". For example,
+   * "user.attr".
    * <p/>
-   * The access permissions of an xattr in the "user" namespace are
-   * defined by the file and directory permission bits.
-   * An xattr can only be set if the logged-in user has the correct permissions.
-   * If the xattr exists, it is replaced.
-   * <p/>
-   * @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
-   * http://en.wikipedia.org/wiki/Extended_file_attributes</a>
+   * Refer to the HDFS extended attributes user documentation for details.
    *
    * @param path Path to modify
    * @param name xattr name.
@@ -2423,20 +2468,10 @@ public abstract class FileSystem extends Configured implements Closeable {
 
   /**
    * Get an xattr name and value for a file or directory.
-   * The name must be prefixed with user/trusted/security/system and
-   * followed by ".". For example, "user.attr".
+   * The name must be prefixed with the namespace followed by ".". For example,
+   * "user.attr".
    * <p/>
-   * 
-   * A regular user can only get an xattr for the "user" namespace.
-   * The super user can get an xattr of either the "user" or "trusted" namespaces.
-   * The xattrs of the "security" and "system" namespaces are only used/exposed 
-   * internally by/to the FS impl.
-   * <p/>
-   * An xattr will only be returned if the logged-in user has the
-   * correct permissions.
-   * <p/>
-   * @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
-   * http://en.wikipedia.org/wiki/Extended_file_attributes</a>
+   * Refer to the HDFS extended attributes user documentation for details.
    *
    * @param path Path to get extended attribute
    * @param name xattr name.
@@ -2453,13 +2488,7 @@ public abstract class FileSystem extends Configured implements Closeable {
    * Only those xattrs which the logged-in user has permissions to view
    * are returned.
    * <p/>
-   * A regular user can only get xattrs for the "user" namespace.
-   * The super user can only get xattrs for "user" and "trusted" namespaces.
-   * The xattrs of the "security" and "system" namespaces are only used/exposed
-   * internally by/to the FS impl.
-   * <p/>
-   * @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
-   * http://en.wikipedia.org/wiki/Extended_file_attributes</a>
+   * Refer to the HDFS extended attributes user documentation for details.
    *
    * @param path Path to get extended attributes
    * @return Map<String, byte[]> describing the XAttrs of the file or directory
@@ -2475,13 +2504,7 @@ public abstract class FileSystem extends Configured implements Closeable {
    * Only those xattrs which the logged-in user has permissions to view
    * are returned.
    * <p/>
-   * A regular user can only get xattrs for the "user" namespace.
-   * The super user can only get xattrs for "user" and "trusted" namespaces.
-   * The xattrs of the "security" and "system" namespaces are only used/exposed
-   * internally by/to the FS impl.
-   * <p/>
-   * @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
-   * http://en.wikipedia.org/wiki/Extended_file_attributes</a>
+   * Refer to the HDFS extended attributes user documentation for details.
    *
    * @param path Path to get extended attributes
    * @param names XAttr names.
@@ -2499,14 +2522,7 @@ public abstract class FileSystem extends Configured implements Closeable {
    * Only those xattr names which the logged-in user has permissions to view
    * are returned.
    * <p/>
-   * A regular user can only get xattr names for the "user" namespace.
-   * The super user can only get xattr names for "user" and "trusted"
-   * namespaces.
-   * The xattrs of the "security" and "system" namespaces are only
-   * used/exposed internally by/to the FS impl.
-   * <p/>
-   * @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
-   * http://en.wikipedia.org/wiki/Extended_file_attributes</a>
+   * Refer to the HDFS extended attributes user documentation for details.
    *
    * @param path Path to get extended attributes
    * @return List<String> of the XAttr names of the file or directory
@@ -2519,21 +2535,10 @@ public abstract class FileSystem extends Configured implements Closeable {
 
   /**
    * Remove an xattr of a file or directory.
-   * The name must be prefixed with user/trusted/security/system and
-   * followed by ".". For example, "user.attr".
-   * <p/>
-   * A regular user can only remove an xattr for the "user" namespace.
-   * The super user can remove an xattr of either the "user" or "trusted" namespaces.
-   * The xattrs of the "security" and "system" namespaces are only used/exposed 
-   * internally by/to the FS impl.
-   * <p/>
-   * The access permissions of an xattr in the "user" namespace are
-   * defined by the file and directory permission bits.
-   * An xattr can only be set when the logged-in user has the correct permissions.
-   * If the xattr exists, it will be replaced.
+   * The name must be prefixed with the namespace followed by ".". For example,
+   * "user.attr".
    * <p/>
-   * @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
-   * http://en.wikipedia.org/wiki/Extended_file_attributes</a>
+   * Refer to the HDFS extended attributes user documentation for details.
    *
    * @param path Path to remove extended attribute
    * @param name xattr name

+ 7 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java

@@ -30,6 +30,7 @@ import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.Options.ChecksumOpt;
 import org.apache.hadoop.security.AccessControlException;
@@ -397,6 +398,12 @@ public class FilterFileSystem extends FileSystem {
     return fs.getFileStatus(f);
   }
 
+  @Override
+  public void access(Path path, FsAction mode) throws AccessControlException,
+      FileNotFoundException, IOException {
+    fs.access(path, mode);
+  }
+
   public void createSymlink(final Path target, final Path link,
       final boolean createParent) throws AccessControlException,
       FileAlreadyExistsException, FileNotFoundException,

+ 8 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java

@@ -29,6 +29,7 @@ import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.FileSystem.Statistics;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.Options.ChecksumOpt;
 import org.apache.hadoop.security.AccessControlException;
@@ -119,6 +120,13 @@ public abstract class FilterFs extends AbstractFileSystem {
     return myFs.getFileStatus(f);
   }
 
+  @Override
+  public void access(Path path, FsAction mode) throws AccessControlException,
+      FileNotFoundException, UnresolvedLinkException, IOException {
+    checkPath(path);
+    myFs.access(path, mode);
+  }
+
   @Override
   public FileStatus getFileLinkStatus(final Path f) 
     throws IOException, UnresolvedLinkException {

+ 8 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java

@@ -41,7 +41,9 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.util.Progressable;
 
 /**
@@ -222,6 +224,12 @@ class ChRootedFileSystem extends FilterFileSystem {
     return super.getFileStatus(fullPath(f));
   }
 
+  @Override
+  public void access(Path path, FsAction mode) throws AccessControlException,
+      FileNotFoundException, IOException {
+    super.access(fullPath(path), mode);
+  }
+
   @Override
   public FsStatus getStatus(Path p) throws IOException {
     return super.getStatus(fullPath(p));

+ 7 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java

@@ -41,7 +41,9 @@ import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.Progressable;
 
@@ -200,6 +202,11 @@ class ChRootedFs extends AbstractFileSystem {
     return myFs.getFileStatus(fullPath(f));
   }
 
+  public void access(Path path, FsAction mode) throws AccessControlException,
+      FileNotFoundException, UnresolvedLinkException, IOException {
+    myFs.access(fullPath(path), mode);
+  }
+
   @Override
   public FileStatus getFileLinkStatus(final Path f) 
     throws IOException, UnresolvedLinkException {

+ 9 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java

@@ -51,6 +51,7 @@ import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.AclUtil;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.viewfs.InodeTree.INode;
 import org.apache.hadoop.fs.viewfs.InodeTree.INodeLink;
@@ -359,7 +360,14 @@ public class ViewFileSystem extends FileSystem {
     return new ViewFsFileStatus(status, this.makeQualified(f));
   }
   
-  
+  @Override
+  public void access(Path path, FsAction mode) throws AccessControlException,
+      FileNotFoundException, IOException {
+    InodeTree.ResolveResult<FileSystem> res =
+      fsState.resolve(getUriPath(path), true);
+    res.targetFileSystem.access(res.remainingPath, mode);
+  }
+
   @Override
   public FileStatus[] listStatus(final Path f) throws AccessControlException,
       FileNotFoundException, IOException {

+ 9 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java

@@ -54,6 +54,7 @@ import org.apache.hadoop.fs.local.LocalConfigKeys;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclUtil;
 import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.viewfs.InodeTree.INode;
 import org.apache.hadoop.fs.viewfs.InodeTree.INodeLink;
@@ -352,6 +353,14 @@ public class ViewFs extends AbstractFileSystem {
     return new ViewFsFileStatus(status, this.makeQualified(f));
   }
 
+  @Override
+  public void access(Path path, FsAction mode) throws AccessControlException,
+      FileNotFoundException, UnresolvedLinkException, IOException {
+    InodeTree.ResolveResult<AbstractFileSystem> res =
+      fsState.resolve(getUriPath(path), true);
+    res.targetFileSystem.access(res.remainingPath, mode);
+  }
+
   @Override
   public FileStatus getFileLinkStatus(final Path f)
      throws AccessControlException, FileNotFoundException,

+ 14 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java

@@ -27,6 +27,7 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.Date;
+import java.util.HashSet;
 import java.util.Iterator;
 import java.util.LinkedHashSet;
 import java.util.List;
@@ -377,6 +378,19 @@ public class StringUtils {
     return str.trim().split("\\s*,\\s*");
   }
 
+  /**
+   * Trims all the strings in a Collection<String> and returns a Set<String>.
+   * @param strings
+   * @return
+   */
+  public static Set<String> getTrimmedStrings(Collection<String> strings) {
+    Set<String> trimmedStrings = new HashSet<String>();
+    for (String string: strings) {
+      trimmedStrings.add(string.trim());
+    }
+    return trimmedStrings;
+  }
+
   final public static String[] emptyStringArray = {};
   final public static char COMMA = ',';
   final public static String COMMA_STR = ",";

+ 0 - 19
hadoop-common-project/hadoop-common/src/main/resources/core-default.xml

@@ -807,25 +807,6 @@ for ldap providers in the same way as above does.
   </description>
 </property>
 
-<property>
-  <name>ipc.server.tcpnodelay</name>
-  <value>false</value>
-  <description>Turn on/off Nagle's algorithm for the TCP socket connection on 
-  the server. Setting to true disables the algorithm and may decrease latency
-  with a cost of more/smaller packets. 
-  </description>
-</property>
-
-<property>
-  <name>ipc.client.tcpnodelay</name>
-  <value>false</value>
-  <description>Turn on/off Nagle's algorithm for the TCP socket connection on 
-  the client. Setting to true disables the algorithm and may decrease latency
-  with a cost of more/smaller packets. 
-  </description>
-</property>
-
-
 <!-- Proxy Configuration -->
 
 <property>

+ 3 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java

@@ -23,6 +23,7 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.token.Token;
@@ -201,6 +202,8 @@ public class TestHarFileSystem {
     public void removeXAttr(Path path, String name) throws IOException;
 
     public AclStatus getAclStatus(Path path) throws IOException;
+
+    public void access(Path path, FsAction mode) throws IOException;
   }
 
   @Test

+ 51 - 33
hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java

@@ -20,6 +20,7 @@ package org.apache.hadoop.crypto.key.kms.server;
 import org.apache.commons.codec.binary.Base64;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.crypto.key.KeyProvider;
+import org.apache.hadoop.crypto.key.KeyProvider.KeyVersion;
 import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
 import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion;
 import org.apache.hadoop.crypto.key.kms.KMSRESTConstants;
@@ -27,7 +28,6 @@ import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.authentication.client.AuthenticationException;
 import org.apache.hadoop.security.authorize.AuthorizationException;
 import org.apache.hadoop.crypto.key.kms.KMSClientProvider;
-import org.apache.hadoop.util.StringUtils;
 
 import javax.ws.rs.Consumes;
 import javax.ws.rs.DELETE;
@@ -59,22 +59,25 @@ import java.util.Map;
 @Path(KMSRESTConstants.SERVICE_VERSION)
 @InterfaceAudience.Private
 public class KMS {
-  private static final String CREATE_KEY = "CREATE_KEY";
-  private static final String DELETE_KEY = "DELETE_KEY";
-  private static final String ROLL_NEW_VERSION = "ROLL_NEW_VERSION";
-  private static final String GET_KEYS = "GET_KEYS";
-  private static final String GET_KEYS_METADATA = "GET_KEYS_METADATA";
-  private static final String GET_KEY_VERSION = "GET_KEY_VERSION";
-  private static final String GET_CURRENT_KEY = "GET_CURRENT_KEY";
-  private static final String GET_KEY_VERSIONS = "GET_KEY_VERSIONS";
-  private static final String GET_METADATA = "GET_METADATA";
-  private static final String GENERATE_EEK = "GENERATE_EEK";
-  private static final String DECRYPT_EEK = "DECRYPT_EEK";
-
+  public static final String CREATE_KEY = "CREATE_KEY";
+  public static final String DELETE_KEY = "DELETE_KEY";
+  public static final String ROLL_NEW_VERSION = "ROLL_NEW_VERSION";
+  public static final String GET_KEYS = "GET_KEYS";
+  public static final String GET_KEYS_METADATA = "GET_KEYS_METADATA";
+  public static final String GET_KEY_VERSIONS = "GET_KEY_VERSIONS";
+  public static final String GET_METADATA = "GET_METADATA";
+
+  public static final String GET_KEY_VERSION = "GET_KEY_VERSION";
+  public static final String GET_CURRENT_KEY = "GET_CURRENT_KEY";
+  public static final String GENERATE_EEK = "GENERATE_EEK";
+  public static final String DECRYPT_EEK = "DECRYPT_EEK";
+  
   private KeyProviderCryptoExtension provider;
+  private KMSAudit kmsAudit;
 
   public KMS() throws Exception {
     provider = KMSWebApp.getKeyProvider();
+    kmsAudit= KMSWebApp.getKMSAudit();
   }
 
   private static Principal getPrincipal(SecurityContext securityContext)
@@ -86,13 +89,26 @@ public class KMS {
     return user;
   }
 
-  private static void assertAccess(KMSACLs.Type aclType, Principal principal,
+
+  private static final String UNAUTHORIZED_MSG_WITH_KEY = 
+      "User:{0} not allowed to do ''{1}'' on ''{2}''";
+  
+  private static final String UNAUTHORIZED_MSG_WITHOUT_KEY = 
+      "User:{0} not allowed to do ''{1}''";
+
+  private void assertAccess(KMSACLs.Type aclType, Principal principal,
+      String operation) throws AccessControlException {
+    assertAccess(aclType, principal, operation, null);
+  }
+
+  private void assertAccess(KMSACLs.Type aclType, Principal principal,
       String operation, String key) throws AccessControlException {
     if (!KMSWebApp.getACLs().hasAccess(aclType, principal.getName())) {
       KMSWebApp.getUnauthorizedCallsMeter().mark();
-      KMSAudit.unauthorized(principal, operation, key);
+      kmsAudit.unauthorized(principal, operation, key);
       throw new AuthorizationException(MessageFormat.format(
-          "User:{0} not allowed to do ''{1}'' on ''{2}''",
+          (key != null) ? UNAUTHORIZED_MSG_WITH_KEY 
+                        : UNAUTHORIZED_MSG_WITHOUT_KEY,
           principal.getName(), operation, key));
     }
   }
@@ -149,7 +165,7 @@ public class KMS {
 
     provider.flush();
 
-    KMSAudit.ok(user, CREATE_KEY, name, "UserProvidedMaterial:" +
+    kmsAudit.ok(user, CREATE_KEY, name, "UserProvidedMaterial:" +
         (material != null) + " Description:" + description);
 
     if (!KMSWebApp.getACLs().hasAccess(KMSACLs.Type.GET, user.getName())) {
@@ -175,7 +191,7 @@ public class KMS {
     provider.deleteKey(name);
     provider.flush();
 
-    KMSAudit.ok(user, DELETE_KEY, name, "");
+    kmsAudit.ok(user, DELETE_KEY, name, "");
 
     return Response.ok().build();
   }
@@ -203,7 +219,7 @@ public class KMS {
 
     provider.flush();
 
-    KMSAudit.ok(user, ROLL_NEW_VERSION, name, "UserProvidedMaterial:" +
+    kmsAudit.ok(user, ROLL_NEW_VERSION, name, "UserProvidedMaterial:" +
         (material != null) + " NewVersion:" + keyVersion.getVersionName());
 
     if (!KMSWebApp.getACLs().hasAccess(KMSACLs.Type.GET, user.getName())) {
@@ -222,11 +238,10 @@ public class KMS {
     KMSWebApp.getAdminCallsMeter().mark();
     Principal user = getPrincipal(securityContext);
     String[] keyNames = keyNamesList.toArray(new String[keyNamesList.size()]);
-    String names = StringUtils.arrayToString(keyNames);
-    assertAccess(KMSACLs.Type.GET_METADATA, user, GET_KEYS_METADATA, names);
+    assertAccess(KMSACLs.Type.GET_METADATA, user, GET_KEYS_METADATA);
     KeyProvider.Metadata[] keysMeta = provider.getKeysMetadata(keyNames);
     Object json = KMSServerJSONUtils.toJSON(keyNames, keysMeta);
-    KMSAudit.ok(user, GET_KEYS_METADATA, names, "");
+    kmsAudit.ok(user, GET_KEYS_METADATA, "");
     return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
   }
 
@@ -237,9 +252,9 @@ public class KMS {
       throws Exception {
     KMSWebApp.getAdminCallsMeter().mark();
     Principal user = getPrincipal(securityContext);
-    assertAccess(KMSACLs.Type.GET_KEYS, user, GET_KEYS, "*");
+    assertAccess(KMSACLs.Type.GET_KEYS, user, GET_KEYS);
     Object json = provider.getKeys();
-    KMSAudit.ok(user, GET_KEYS, "*", "");
+    kmsAudit.ok(user, GET_KEYS, "");
     return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
   }
 
@@ -263,7 +278,7 @@ public class KMS {
     KMSWebApp.getAdminCallsMeter().mark();
     assertAccess(KMSACLs.Type.GET_METADATA, user, GET_METADATA, name);
     Object json = KMSServerJSONUtils.toJSON(name, provider.getMetadata(name));
-    KMSAudit.ok(user, GET_METADATA, name, "");
+    kmsAudit.ok(user, GET_METADATA, name, "");
     return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
   }
 
@@ -279,7 +294,7 @@ public class KMS {
     KMSWebApp.getKeyCallsMeter().mark();
     assertAccess(KMSACLs.Type.GET, user, GET_CURRENT_KEY, name);
     Object json = KMSServerJSONUtils.toJSON(provider.getCurrentKey(name));
-    KMSAudit.ok(user, GET_CURRENT_KEY, name, "");
+    kmsAudit.ok(user, GET_CURRENT_KEY, name, "");
     return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
   }
 
@@ -292,9 +307,12 @@ public class KMS {
     Principal user = getPrincipal(securityContext);
     KMSClientProvider.checkNotEmpty(versionName, "versionName");
     KMSWebApp.getKeyCallsMeter().mark();
-    assertAccess(KMSACLs.Type.GET, user, GET_KEY_VERSION, versionName);
-    Object json = KMSServerJSONUtils.toJSON(provider.getKeyVersion(versionName));
-    KMSAudit.ok(user, GET_KEY_VERSION, versionName, "");
+    KeyVersion keyVersion = provider.getKeyVersion(versionName);
+    assertAccess(KMSACLs.Type.GET, user, GET_KEY_VERSION);
+    if (keyVersion != null) {
+      kmsAudit.ok(user, GET_KEY_VERSION, keyVersion.getName(), "");
+    }
+    Object json = KMSServerJSONUtils.toJSON(keyVersion);
     return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
   }
 
@@ -327,7 +345,7 @@ public class KMS {
       } catch (Exception e) {
         throw new IOException(e);
       }
-      KMSAudit.ok(user, GENERATE_EEK, name, "");
+      kmsAudit.ok(user, GENERATE_EEK, name, "");
       retJSON = new ArrayList();
       for (EncryptedKeyVersion edek : retEdeks) {
         ((ArrayList)retJSON).add(KMSServerJSONUtils.toJSON(edek));
@@ -362,7 +380,7 @@ public class KMS {
         (String) jsonPayload.get(KMSRESTConstants.MATERIAL_FIELD);
     Object retJSON;
     if (eekOp.equals(KMSRESTConstants.EEK_DECRYPT)) {
-      assertAccess(KMSACLs.Type.DECRYPT_EEK, user, DECRYPT_EEK, versionName);
+      assertAccess(KMSACLs.Type.DECRYPT_EEK, user, DECRYPT_EEK, keyName);
       KMSClientProvider.checkNotNull(ivStr, KMSRESTConstants.IV_FIELD);
       byte[] iv = Base64.decodeBase64(ivStr);
       KMSClientProvider.checkNotNull(encMaterialStr,
@@ -373,7 +391,7 @@ public class KMS {
               new KMSClientProvider.KMSEncryptedKeyVersion(keyName, versionName,
                   iv, KeyProviderCryptoExtension.EEK, encMaterial));
       retJSON = KMSServerJSONUtils.toJSON(retKeyVersion);
-      KMSAudit.ok(user, DECRYPT_EEK, versionName, "");
+      kmsAudit.ok(user, DECRYPT_EEK, keyName, "");
     } else {
       throw new IllegalArgumentException("Wrong " + KMSRESTConstants.EEK_OP +
           " value, it must be " + KMSRESTConstants.EEK_GENERATE + " or " +
@@ -396,7 +414,7 @@ public class KMS {
     KMSWebApp.getKeyCallsMeter().mark();
     assertAccess(KMSACLs.Type.GET, user, GET_KEY_VERSIONS, name);
     Object json = KMSServerJSONUtils.toJSON(provider.getKeyVersions(name));
-    KMSAudit.ok(user, GET_KEY_VERSIONS, name, "");
+    kmsAudit.ok(user, GET_KEY_VERSIONS, name, "");
     return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
   }
 

+ 174 - 15
hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAudit.java

@@ -20,43 +20,202 @@ package org.apache.hadoop.crypto.key.kms.server;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import com.google.common.base.Joiner;
+import com.google.common.base.Strings;
+import com.google.common.cache.Cache;
+import com.google.common.cache.CacheBuilder;
+import com.google.common.cache.RemovalListener;
+import com.google.common.cache.RemovalNotification;
+import com.google.common.collect.Sets;
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
+
 import java.security.Principal;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicLong;
 
 /**
  * Provides convenience methods for audit logging consistently the different
  * types of events.
  */
 public class KMSAudit {
+
+  private static class AuditEvent {
+    private final AtomicLong accessCount = new AtomicLong(-1);
+    private final String keyName;
+    private final String user;
+    private final String op;
+    private final String extraMsg;
+    private final long startTime = System.currentTimeMillis();
+
+    private AuditEvent(String keyName, String user, String op, String msg) {
+      this.keyName = keyName;
+      this.user = user;
+      this.op = op;
+      this.extraMsg = msg;
+    }
+
+    public String getExtraMsg() {
+      return extraMsg;
+    }
+
+    public AtomicLong getAccessCount() {
+      return accessCount;
+    }
+
+    public String getKeyName() {
+      return keyName;
+    }
+
+    public String getUser() {
+      return user;
+    }
+
+    public String getOp() {
+      return op;
+    }
+
+    public long getStartTime() {
+      return startTime;
+    }
+  }
+
+  public static enum OpStatus {
+    OK, UNAUTHORIZED, UNAUTHENTICATED, ERROR;
+  }
+
+  private static Set<String> AGGREGATE_OPS_WHITELIST = Sets.newHashSet(
+    KMS.GET_KEY_VERSION, KMS.GET_CURRENT_KEY, KMS.DECRYPT_EEK, KMS.GENERATE_EEK
+  );
+
+  private Cache<String, AuditEvent> cache;
+
+  private ScheduledExecutorService executor;
+
   public static final String KMS_LOGGER_NAME = "kms-audit";
 
   private static Logger AUDIT_LOG = LoggerFactory.getLogger(KMS_LOGGER_NAME);
 
-  private static void op(String status, String op, Principal user, String key,
-      String extraMsg) {
-    AUDIT_LOG.info("Status:{} User:{} Op:{} Name:{}{}", status, user.getName(),
-        op, key, extraMsg);
+  KMSAudit(long delay) {
+    cache = CacheBuilder.newBuilder()
+        .expireAfterWrite(delay, TimeUnit.MILLISECONDS)
+        .removalListener(
+            new RemovalListener<String, AuditEvent>() {
+              @Override
+              public void onRemoval(
+                  RemovalNotification<String, AuditEvent> entry) {
+                AuditEvent event = entry.getValue();
+                if (event.getAccessCount().get() > 0) {
+                  KMSAudit.this.logEvent(event);
+                  event.getAccessCount().set(0);
+                  KMSAudit.this.cache.put(entry.getKey(), event);
+                }
+              }
+            }).build();
+    executor = Executors.newScheduledThreadPool(1, new ThreadFactoryBuilder()
+        .setDaemon(true).setNameFormat(KMS_LOGGER_NAME + "_thread").build());
+    executor.scheduleAtFixedRate(new Runnable() {
+      @Override
+      public void run() {
+        cache.cleanUp();
+      }
+    }, delay / 10, delay / 10, TimeUnit.MILLISECONDS);
+  }
+
+  private void logEvent(AuditEvent event) {
+    AUDIT_LOG.info(
+        "OK[op={}, key={}, user={}, accessCount={}, interval={}ms] {}",
+        event.getOp(), event.getKeyName(), event.getUser(),
+        event.getAccessCount().get(),
+        (System.currentTimeMillis() - event.getStartTime()),
+        event.getExtraMsg());
+  }
+
+  private void op(OpStatus opStatus, final String op, final String user,
+      final String key, final String extraMsg) {
+    if (!Strings.isNullOrEmpty(user) && !Strings.isNullOrEmpty(key)
+        && !Strings.isNullOrEmpty(op)
+        && AGGREGATE_OPS_WHITELIST.contains(op)) {
+      String cacheKey = createCacheKey(user, key, op);
+      if (opStatus == OpStatus.UNAUTHORIZED) {
+        cache.invalidate(cacheKey);
+        AUDIT_LOG.info("UNAUTHORIZED[op={}, key={}, user={}] {}", op, key, user,
+            extraMsg);
+      } else {
+        try {
+          AuditEvent event = cache.get(cacheKey, new Callable<AuditEvent>() {
+            @Override
+            public AuditEvent call() throws Exception {
+              return new AuditEvent(key, user, op, extraMsg);
+            }
+          });
+          // Log first access (initialized as -1 so
+          // incrementAndGet() == 0 implies first access)
+          if (event.getAccessCount().incrementAndGet() == 0) {
+            event.getAccessCount().incrementAndGet();
+            logEvent(event);
+          }
+        } catch (ExecutionException ex) {
+          throw new RuntimeException(ex);
+        }
+      }
+    } else {
+      List<String> kvs = new LinkedList<String>();
+      if (!Strings.isNullOrEmpty(op)) {
+        kvs.add("op=" + op);
+      }
+      if (!Strings.isNullOrEmpty(key)) {
+        kvs.add("key=" + key);
+      }
+      if (!Strings.isNullOrEmpty(user)) {
+        kvs.add("user=" + user);
+      }
+      if (kvs.size() == 0) {
+        AUDIT_LOG.info("{} {}", opStatus.toString(), extraMsg);
+      } else {
+        String join = Joiner.on(", ").join(kvs);
+        AUDIT_LOG.info("{}[{}] {}", opStatus.toString(), join, extraMsg);
+      }
+    }
   }
 
-  public static void ok(Principal user, String op, String key,
+  public void ok(Principal user, String op, String key,
       String extraMsg) {
-    op("OK", op, user, key, extraMsg);
+    op(OpStatus.OK, op, user.getName(), key, extraMsg);
+  }
+
+  public void ok(Principal user, String op, String extraMsg) {
+    op(OpStatus.OK, op, user.getName(), null, extraMsg);
   }
 
-  public static void unauthorized(Principal user, String op, String key) {
-    op("UNAUTHORIZED", op, user, key, "");
+  public void unauthorized(Principal user, String op, String key) {
+    op(OpStatus.UNAUTHORIZED, op, user.getName(), key, "");
   }
 
-  public static void error(Principal user, String method, String url,
+  public void error(Principal user, String method, String url,
       String extraMsg) {
-    AUDIT_LOG.info("Status:ERROR User:{} Method:{} URL:{} Exception:'{}'",
-        user.getName(), method, url, extraMsg);
+    op(OpStatus.ERROR, null, user.getName(), null, "Method:'" + method
+        + "' Exception:'" + extraMsg + "'");
   }
 
-  public static void unauthenticated(String remoteHost, String method,
+  public void unauthenticated(String remoteHost, String method,
       String url, String extraMsg) {
-    AUDIT_LOG.info(
-        "Status:UNAUTHENTICATED RemoteHost:{} Method:{} URL:{} ErrorMsg:'{}'",
-        remoteHost, method, url, extraMsg);
+    op(OpStatus.UNAUTHENTICATED, null, null, null, "RemoteHost:"
+        + remoteHost + " Method:" + method
+        + " URL:" + url + " ErrorMsg:'" + extraMsg + "'");
   }
 
+  private static String createCacheKey(String user, String key, String op) {
+    return user + "#" + key + "#" + op;
+  }
+
+  public void shutdown() {
+    executor.shutdownNow();
+  }
 }

+ 4 - 2
hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAuthenticationFilter.java

@@ -115,8 +115,10 @@ public class KMSAuthenticationFilter extends AuthenticationFilter {
       if (queryString != null) {
         requestURL.append("?").append(queryString);
       }
-      KMSAudit.unauthenticated(request.getRemoteHost(), method,
-          requestURL.toString(), kmsResponse.msg);
+
+      KMSWebApp.getKMSAudit().unauthenticated(
+          request.getRemoteHost(), method, requestURL.toString(),
+          kmsResponse.msg);
     }
   }
 

+ 6 - 1
hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java

@@ -43,12 +43,17 @@ public class KMSConfiguration {
   // TImeout for the Current Key cache
   public static final String CURR_KEY_CACHE_TIMEOUT_KEY = CONFIG_PREFIX +
       "current.key.cache.timeout.ms";
-
+  // Delay for Audit logs that need aggregation
+  public static final String KMS_AUDIT_AGGREGATION_DELAY = CONFIG_PREFIX +
+      "aggregation.delay.ms";
+  
   public static final boolean KEY_CACHE_ENABLE_DEFAULT = true;
   // 10 mins
   public static final long KEY_CACHE_TIMEOUT_DEFAULT = 10 * 60 * 1000;
   // 30 secs
   public static final long CURR_KEY_CACHE_TIMEOUT_DEFAULT = 30 * 1000;
+  // 10 secs
+  public static final long KMS_AUDIT_AGGREGATION_DELAY_DEFAULT = 10000;
 
   static Configuration getConfiguration(boolean loadHadoopDefaults,
       String ... resources) {

+ 9 - 1
hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSExceptionsProvider.java

@@ -20,9 +20,11 @@ package org.apache.hadoop.crypto.key.kms.server;
 import org.apache.hadoop.classification.InterfaceAudience;
 
 import com.sun.jersey.api.container.ContainerException;
+
 import org.apache.hadoop.crypto.key.kms.KMSRESTConstants;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.authentication.client.AuthenticationException;
+import org.apache.hadoop.security.authorize.AuthorizationException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -30,6 +32,7 @@ import javax.ws.rs.core.MediaType;
 import javax.ws.rs.core.Response;
 import javax.ws.rs.ext.ExceptionMapper;
 import javax.ws.rs.ext.Provider;
+
 import java.io.IOException;
 import java.security.Principal;
 import java.util.LinkedHashMap;
@@ -83,6 +86,10 @@ public class KMSExceptionsProvider implements ExceptionMapper<Exception> {
       status = Response.Status.FORBIDDEN;
       // we don't audit here because we did it already when checking access
       doAudit = false;
+    } else if (throwable instanceof AuthorizationException) {
+      status = Response.Status.UNAUTHORIZED;
+      // we don't audit here because we did it already when checking access
+      doAudit = false;
     } else if (throwable instanceof AccessControlException) {
       status = Response.Status.FORBIDDEN;
     } else if (exception instanceof IOException) {
@@ -95,7 +102,8 @@ public class KMSExceptionsProvider implements ExceptionMapper<Exception> {
       status = Response.Status.INTERNAL_SERVER_ERROR;
     }
     if (doAudit) {
-      KMSAudit.error(KMSMDCFilter.getPrincipal(), KMSMDCFilter.getMethod(),
+      KMSWebApp.getKMSAudit().error(KMSMDCFilter.getPrincipal(),
+          KMSMDCFilter.getMethod(),
           KMSMDCFilter.getURL(), getOneLineMessage(exception));
     }
     return createResponse(status, throwable);

+ 11 - 0
hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java

@@ -76,6 +76,7 @@ public class KMSWebApp implements ServletContextListener {
   private static Meter decryptEEKCallsMeter;
   private static Meter generateEEKCallsMeter;
   private static Meter invalidCallsMeter;
+  private static KMSAudit kmsAudit;
   private static KeyProviderCryptoExtension keyProviderCryptoExtension;
 
   static {
@@ -144,6 +145,11 @@ public class KMSWebApp implements ServletContextListener {
       unauthenticatedCallsMeter = metricRegistry.register(
           UNAUTHENTICATED_CALLS_METER, new Meter());
 
+      kmsAudit =
+          new KMSAudit(kmsConf.getLong(
+              KMSConfiguration.KMS_AUDIT_AGGREGATION_DELAY,
+              KMSConfiguration.KMS_AUDIT_AGGREGATION_DELAY_DEFAULT));
+
       // this is required for the the JMXJsonServlet to work properly.
       // the JMXJsonServlet is behind the authentication filter,
       // thus the '*' ACL.
@@ -199,6 +205,7 @@ public class KMSWebApp implements ServletContextListener {
 
   @Override
   public void contextDestroyed(ServletContextEvent sce) {
+    kmsAudit.shutdown();
     acls.stopReloader();
     jmxReporter.stop();
     jmxReporter.close();
@@ -245,4 +252,8 @@ public class KMSWebApp implements ServletContextListener {
   public static KeyProviderCryptoExtension getKeyProvider() {
     return keyProviderCryptoExtension;
   }
+
+  public static KMSAudit getKMSAudit() {
+    return kmsAudit;
+  }
 }

+ 19 - 0
hadoop-common-project/hadoop-kms/src/site/apt/index.apt.vm

@@ -104,6 +104,25 @@ Hadoop Key Management Server (KMS) - Documentation Sets ${project.version}
   </property>
 +---+
 
+** KMS Aggregated Audit logs
+
+Audit logs are aggregated for API accesses to the GET_KEY_VERSION, 
+GET_CURRENT_KEY, DECRYPT_EEK, GENERATE_EEK operations.
+
+Entries are grouped by the (user,key,operation) combined key for a configurable
+aggregation interval after which the number of accesses to the specified
+end-point by the user for a given key is flushed to the audit log.
+
+The Aggregation interval is configured via the property :
+
++---+
+  <property>
+    <name>hadoop.kms.aggregation.delay.ms</name>
+    <value>10000</value>
+  </property>
++---+
+ 
+
 ** Start/Stop the KMS
 
   To start/stop KMS use KMS's bin/kms.sh script. For example:

+ 134 - 0
hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMSAudit.java

@@ -0,0 +1,134 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.crypto.key.kms.server;
+
+import java.io.ByteArrayOutputStream;
+import java.io.FilterOutputStream;
+import java.io.OutputStream;
+import java.io.PrintStream;
+import java.security.Principal;
+
+import org.apache.log4j.LogManager;
+import org.apache.log4j.PropertyConfigurator;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.Mockito;
+
+public class TestKMSAudit {
+
+  private PrintStream originalOut;
+  private ByteArrayOutputStream memOut;
+  private FilterOut filterOut;
+  private PrintStream capturedOut;
+  
+  private KMSAudit kmsAudit; 
+
+  private static class FilterOut extends FilterOutputStream {
+    public FilterOut(OutputStream out) {
+      super(out);
+    }
+
+    public void setOutputStream(OutputStream out) {
+      this.out = out;
+    }
+  }
+
+  @Before
+  public void setUp() {
+    originalOut = System.err;
+    memOut = new ByteArrayOutputStream();
+    filterOut = new FilterOut(memOut);
+    capturedOut = new PrintStream(filterOut);
+    System.setErr(capturedOut);
+    PropertyConfigurator.configure(Thread.currentThread().
+        getContextClassLoader()
+        .getResourceAsStream("log4j-kmsaudit.properties"));
+    this.kmsAudit = new KMSAudit(1000);
+  }
+
+  @After
+  public void cleanUp() {
+    System.setErr(originalOut);
+    LogManager.resetConfiguration();
+    kmsAudit.shutdown();
+  }
+
+  private String getAndResetLogOutput() {
+    capturedOut.flush();
+    String logOutput = new String(memOut.toByteArray());
+    memOut = new ByteArrayOutputStream();
+    filterOut.setOutputStream(memOut);
+    return logOutput;
+  }
+
+  @Test
+  public void testAggregation() throws Exception {
+    Principal luser = Mockito.mock(Principal.class);
+    Mockito.when(luser.getName()).thenReturn("luser");
+    kmsAudit.ok(luser, KMS.DECRYPT_EEK, "k1", "testmsg");
+    kmsAudit.ok(luser, KMS.DECRYPT_EEK, "k1", "testmsg");
+    kmsAudit.ok(luser, KMS.DECRYPT_EEK, "k1", "testmsg");
+    kmsAudit.ok(luser, KMS.DELETE_KEY, "k1", "testmsg");
+    kmsAudit.ok(luser, KMS.ROLL_NEW_VERSION, "k1", "testmsg");
+    kmsAudit.ok(luser, KMS.DECRYPT_EEK, "k1", "testmsg");
+    kmsAudit.ok(luser, KMS.DECRYPT_EEK, "k1", "testmsg");
+    kmsAudit.ok(luser, KMS.DECRYPT_EEK, "k1", "testmsg");
+    Thread.sleep(1500);
+    kmsAudit.ok(luser, KMS.DECRYPT_EEK, "k1", "testmsg");
+    Thread.sleep(1500);
+    String out = getAndResetLogOutput();
+    System.out.println(out);
+    Assert.assertTrue(
+        out.matches(
+            "OK\\[op=DECRYPT_EEK, key=k1, user=luser, accessCount=1, interval=[^m]{1,4}ms\\] testmsg"
+            // Not aggregated !!
+            + "OK\\[op=DELETE_KEY, key=k1, user=luser\\] testmsg"
+            + "OK\\[op=ROLL_NEW_VERSION, key=k1, user=luser\\] testmsg"
+            // Aggregated
+            + "OK\\[op=DECRYPT_EEK, key=k1, user=luser, accessCount=6, interval=[^m]{1,4}ms\\] testmsg"
+            + "OK\\[op=DECRYPT_EEK, key=k1, user=luser, accessCount=1, interval=[^m]{1,4}ms\\] testmsg"));
+  }
+
+  @Test
+  public void testAggregationUnauth() throws Exception {
+    Principal luser = Mockito.mock(Principal.class);
+    Mockito.when(luser.getName()).thenReturn("luser");
+    kmsAudit.unauthorized(luser, KMS.GENERATE_EEK, "k2");
+    Thread.sleep(1000);
+    kmsAudit.ok(luser, KMS.GENERATE_EEK, "k3", "testmsg");
+    kmsAudit.ok(luser, KMS.GENERATE_EEK, "k3", "testmsg");
+    kmsAudit.ok(luser, KMS.GENERATE_EEK, "k3", "testmsg");
+    kmsAudit.ok(luser, KMS.GENERATE_EEK, "k3", "testmsg");
+    kmsAudit.ok(luser, KMS.GENERATE_EEK, "k3", "testmsg");
+    kmsAudit.unauthorized(luser, KMS.GENERATE_EEK, "k3");
+    kmsAudit.ok(luser, KMS.GENERATE_EEK, "k3", "testmsg");
+    Thread.sleep(2000);
+    String out = getAndResetLogOutput();
+    System.out.println(out);
+    Assert.assertTrue(
+        out.matches(
+            "UNAUTHORIZED\\[op=GENERATE_EEK, key=k2, user=luser\\] "
+            + "OK\\[op=GENERATE_EEK, key=k3, user=luser, accessCount=1, interval=[^m]{1,4}ms\\] testmsg"
+            + "OK\\[op=GENERATE_EEK, key=k3, user=luser, accessCount=5, interval=[^m]{1,4}ms\\] testmsg"
+            + "UNAUTHORIZED\\[op=GENERATE_EEK, key=k3, user=luser\\] "
+            + "OK\\[op=GENERATE_EEK, key=k3, user=luser, accessCount=1, interval=[^m]{1,4}ms\\] testmsg"));
+  }
+
+}

+ 25 - 0
hadoop-common-project/hadoop-kms/src/test/resources/log4j-kmsaudit.properties

@@ -0,0 +1,25 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# LOG Appender
+log4j.appender.kms-audit=org.apache.log4j.ConsoleAppender
+log4j.appender.kms-audit.Target=System.err
+log4j.appender.kms-audit.layout=org.apache.log4j.PatternLayout
+log4j.appender.kms-audit.layout.ConversionPattern=%m
+
+log4j.rootLogger=INFO, kms-audit

+ 27 - 3
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -204,9 +204,6 @@ Trunk (Unreleased)
 
     HDFS-3549. Fix dist tar build fails in hadoop-hdfs-raid project. (Jason Lowe via daryn)
 
-    HDFS-3482. hdfs balancer throws ArrayIndexOutOfBoundsException 
-    if option is specified without values. ( Madhukara Phatak via umamahesh) 
-
     HDFS-3614. Revert unused MiniDFSCluster constructor from HDFS-3049.
     (acmurthy via eli)
 
@@ -346,6 +343,20 @@ Release 2.6.0 - UNRELEASED
 
     HDFS-6739. Add getDatanodeStorageReport to ClientProtocol. (szetszwo)
 
+    HDFS-6665. Add tests for XAttrs in combination with viewfs.
+    (Stephen Chu via wang)
+
+    HDFS-6778. The extended attributes javadoc should simply refer to the
+    user docs. (clamb via wang)
+
+    HDFS-6570. add api that enables checking if a user has certain permissions on
+    a file. (Jitendra Pandey via cnauroth)
+
+    HDFS-6441. Add ability to exclude/include specific datanodes while
+    balancing. (Benoy Antony and Yu Li via Arpit Agarwal)
+
+    HDFS-6685. Balancer should preserve storage type of replicas.  (szetszwo)
+
   OPTIMIZATIONS
 
     HDFS-6690. Deduplicate xattr names in memory. (wang)
@@ -408,6 +419,16 @@ Release 2.6.0 - UNRELEASED
     HDFS-6749. FSNamesystem methods should call resolvePath.
     (Charles Lamb via cnauroth)
 
+    HDFS-4629. Using com.sun.org.apache.xml.internal.serialize.* in
+    XmlEditsVisitor.java is JVM vendor specific. Breaks IBM JAVA.
+    (Amir Sanjar via stevel)
+
+    HDFS-3482. hdfs balancer throws ArrayIndexOutOfBoundsException 
+    if option is specified without values. ( Madhukara Phatak via umamahesh) 
+
+    HDFS-6797. DataNode logs wrong layoutversion during upgrade. (Benoy Antony
+    via Arpit Agarwal)
+
 Release 2.5.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES
@@ -963,6 +984,9 @@ Release 2.5.0 - UNRELEASED
     HDFS-6717. JIRA HDFS-5804 breaks default nfs-gateway behavior for unsecured config
     (brandonli)
 
+    HDFS-6768. Fix a few unit tests that use hard-coded port numbers. (Arpit
+    Agarwal)
+
   BREAKDOWN OF HDFS-2006 SUBTASKS AND RELATED JIRAS
 
     HDFS-6299. Protobuf for XAttr and client-side implementation. (Yi Liu via umamahesh)

+ 5 - 0
hadoop-hdfs-project/hadoop-hdfs/pom.xml

@@ -176,6 +176,11 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
       <artifactId>netty</artifactId>
       <scope>compile</scope>
     </dependency>
+    <dependency>
+      <groupId>xerces</groupId>
+      <artifactId>xercesImpl</artifactId>
+      <scope>compile</scope>
+    </dependency>
   </dependencies>
 
   <build>

+ 6 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java

@@ -33,6 +33,7 @@ import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.Options.ChecksumOpt;
 import org.apache.hadoop.hdfs.CorruptFileBlockIterator;
@@ -448,6 +449,11 @@ public class Hdfs extends AbstractFileSystem {
     dfs.removeXAttr(getUriPath(path), name);
   }
 
+  @Override
+  public void access(Path path, final FsAction mode) throws IOException {
+    dfs.checkAccess(getUriPath(path), mode);
+  }
+
   /**
    * Renew an existing delegation token.
    * 

+ 12 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java

@@ -122,6 +122,7 @@ import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
 import org.apache.hadoop.hdfs.net.Peer;
@@ -2832,6 +2833,17 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
     }
   }
 
+  public void checkAccess(String src, FsAction mode) throws IOException {
+    checkOpen();
+    try {
+      namenode.checkAccess(src, mode);
+    } catch (RemoteException re) {
+      throw re.unwrapRemoteException(AccessControlException.class,
+          FileNotFoundException.class,
+          UnresolvedPathException.class);
+    }
+  }
+
   @Override // RemotePeerFactory
   public Peer newConnectedPeer(InetSocketAddress addr,
       Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId)

+ 20 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java

@@ -59,6 +59,7 @@ import org.apache.hadoop.fs.VolumeId;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.hdfs.client.HdfsAdmin;
 import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
@@ -1898,4 +1899,23 @@ public class DistributedFileSystem extends FileSystem {
       }
     }.resolve(this, absF);
   }
+
+  @Override
+  public void access(Path path, final FsAction mode) throws IOException {
+    final Path absF = fixRelativePart(path);
+    new FileSystemLinkResolver<Void>() {
+      @Override
+      public Void doCall(final Path p) throws IOException {
+        dfs.checkAccess(getPathName(p), mode);
+        return null;
+      }
+
+      @Override
+      public Void next(final FileSystem fs, final Path p)
+          throws IOException {
+        fs.access(p, mode);
+        return null;
+      }
+    }.resolve(this, absF);
+  }
 }

+ 10 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/StorageType.java

@@ -18,6 +18,9 @@
 
 package org.apache.hadoop.hdfs;
 
+import java.util.Arrays;
+import java.util.List;
+
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 
@@ -35,4 +38,10 @@ public enum StorageType {
   public static final StorageType DEFAULT = DISK;
   
   public static final StorageType[] EMPTY_ARRAY = {};
-}
+  
+  private static final StorageType[] VALUES = values();
+  
+  public static List<StorageType> asList() {
+    return Arrays.asList(VALUES);
+  }
+}

+ 35 - 36
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java

@@ -39,6 +39,7 @@ import org.apache.hadoop.fs.XAttr;
 import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
@@ -1267,17 +1268,11 @@ public interface ClientProtocol {
   
   /**
    * Set xattr of a file or directory.
-   * A regular user only can set xattr of "user" namespace.
-   * A super user can set xattr of "user" and "trusted" namespace.
-   * XAttr of "security" and "system" namespace is only used/exposed 
-   * internally to the FS impl.
+   * The name must be prefixed with the namespace followed by ".". For example,
+   * "user.attr".
    * <p/>
-   * For xattr of "user" namespace, its access permissions are 
-   * defined by the file or directory permission bits.
-   * XAttr will be set only when login user has correct permissions.
-   * <p/>
-   * @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
-   * http://en.wikipedia.org/wiki/Extended_file_attributes</a>
+   * Refer to the HDFS extended attributes user documentation for details.
+   *
    * @param src file or directory
    * @param xAttr <code>XAttr</code> to set
    * @param flag set flag
@@ -1288,18 +1283,13 @@ public interface ClientProtocol {
       throws IOException;
   
   /**
-   * Get xattrs of file or directory. Values in xAttrs parameter are ignored.
-   * If xattrs is null or empty, equals getting all xattrs of the file or 
-   * directory.
-   * Only xattrs which login user has correct permissions will be returned. 
+   * Get xattrs of a file or directory. Values in xAttrs parameter are ignored.
+   * If xAttrs is null or empty, this is the same as getting all xattrs of the
+   * file or directory.  Only those xattrs for which the logged-in user has
+   * permissions to view are returned.
    * <p/>
-   * A regular user only can get xattr of "user" namespace.
-   * A super user can get xattr of "user" and "trusted" namespace.
-   * XAttr of "security" and "system" namespace is only used/exposed 
-   * internally to the FS impl.
-   * <p/>
-   * @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
-   * http://en.wikipedia.org/wiki/Extended_file_attributes</a>
+   * Refer to the HDFS extended attributes user documentation for details.
+   *
    * @param src file or directory
    * @param xAttrs xAttrs to get
    * @return List<XAttr> <code>XAttr</code> list 
@@ -1314,13 +1304,8 @@ public interface ClientProtocol {
    * Only the xattr names for which the logged in user has the permissions to
    * access will be returned.
    * <p/>
-   * A regular user only can get xattr names from the "user" namespace.
-   * A super user can get xattr names of the "user" and "trusted" namespace.
-   * XAttr names of the "security" and "system" namespaces are only used/exposed
-   * internally by the file system impl.
-   * <p/>
-   * @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
-   * http://en.wikipedia.org/wiki/Extended_file_attributes</a>
+   * Refer to the HDFS extended attributes user documentation for details.
+   *
    * @param src file or directory
    * @param xAttrs xAttrs to get
    * @return List<XAttr> <code>XAttr</code> list
@@ -1332,19 +1317,33 @@ public interface ClientProtocol {
   
   /**
    * Remove xattr of a file or directory.Value in xAttr parameter is ignored.
-   * Name must be prefixed with user/trusted/security/system.
-   * <p/>
-   * A regular user only can remove xattr of "user" namespace.
-   * A super user can remove xattr of "user" and "trusted" namespace.
-   * XAttr of "security" and "system" namespace is only used/exposed 
-   * internally to the FS impl.
+   * The name must be prefixed with the namespace followed by ".". For example,
+   * "user.attr".
    * <p/>
-   * @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
-   * http://en.wikipedia.org/wiki/Extended_file_attributes</a>
+   * Refer to the HDFS extended attributes user documentation for details.
+   *
    * @param src file or directory
    * @param xAttr <code>XAttr</code> to remove
    * @throws IOException
    */
   @AtMostOnce
   public void removeXAttr(String src, XAttr xAttr) throws IOException;
+
+  /**
+   * Checks if the user can access a path.  The mode specifies which access
+   * checks to perform.  If the requested permissions are granted, then the
+   * method returns normally.  If access is denied, then the method throws an
+   * {@link AccessControlException}.
+   * In general, applications should avoid using this method, due to the risk of
+   * time-of-check/time-of-use race conditions.  The permissions on a file may
+   * change immediately after the access call returns.
+   *
+   * @param path Path to check
+   * @param mode type of access to check
+   * @throws AccessControlException if access is denied
+   * @throws FileNotFoundException if the path does not exist
+   * @throws IOException see specific implementation
+   */
+  @Idempotent
+  public void checkAccess(String path, FsAction mode) throws IOException;
 }

+ 16 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java

@@ -174,6 +174,8 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Update
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto;
@@ -320,6 +322,9 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
   private static final RemoveXAttrResponseProto
     VOID_REMOVEXATTR_RESPONSE = RemoveXAttrResponseProto.getDefaultInstance();
 
+  private static final CheckAccessResponseProto
+    VOID_CHECKACCESS_RESPONSE = CheckAccessResponseProto.getDefaultInstance();
+
   /**
    * Constructor
    * 
@@ -1338,4 +1343,15 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
     }
     return VOID_REMOVEXATTR_RESPONSE;
   }
+
+  @Override
+  public CheckAccessResponseProto checkAccess(RpcController controller,
+     CheckAccessRequestProto req) throws ServiceException {
+    try {
+      server.checkAccess(req.getPath(), PBHelper.convert(req.getMode()));
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+    return VOID_CHECKACCESS_RESPONSE;
+  }
 }

+ 13 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java

@@ -39,6 +39,7 @@ import org.apache.hadoop.fs.XAttr;
 import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
@@ -144,6 +145,7 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSaf
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.ListXAttrsRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.RemoveXAttrRequestProto;
@@ -1346,4 +1348,15 @@ public class ClientNamenodeProtocolTranslatorPB implements
       throw ProtobufHelper.getRemoteException(e);
     }
   }
+
+  @Override
+  public void checkAccess(String path, FsAction mode) throws IOException {
+    CheckAccessRequestProto req = CheckAccessRequestProto.newBuilder()
+        .setPath(path).setMode(PBHelper.convert(mode)).build();
+    try {
+      rpcProxy.checkAccess(null, req);
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+  }
 }

+ 8 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java

@@ -352,15 +352,19 @@ public class PBHelper {
     return BlockWithLocationsProto.newBuilder()
         .setBlock(convert(blk.getBlock()))
         .addAllDatanodeUuids(Arrays.asList(blk.getDatanodeUuids()))
-        .addAllStorageUuids(Arrays.asList(blk.getStorageIDs())).build();
+        .addAllStorageUuids(Arrays.asList(blk.getStorageIDs()))
+        .addAllStorageTypes(convertStorageTypes(blk.getStorageTypes()))
+        .build();
   }
 
   public static BlockWithLocations convert(BlockWithLocationsProto b) {
     final List<String> datanodeUuids = b.getDatanodeUuidsList();
     final List<String> storageUuids = b.getStorageUuidsList();
+    final List<StorageTypeProto> storageTypes = b.getStorageTypesList();
     return new BlockWithLocations(convert(b.getBlock()),
         datanodeUuids.toArray(new String[datanodeUuids.size()]),
-        storageUuids.toArray(new String[storageUuids.size()]));
+        storageUuids.toArray(new String[storageUuids.size()]),
+        convertStorageTypes(storageTypes, storageUuids.size()));
   }
 
   public static BlocksWithLocationsProto convert(BlocksWithLocations blks) {
@@ -2111,11 +2115,11 @@ public class PBHelper {
     return castEnum(v, XATTR_NAMESPACE_VALUES);
   }
 
-  private static FsActionProto convert(FsAction v) {
+  public static FsActionProto convert(FsAction v) {
     return FsActionProto.valueOf(v != null ? v.ordinal() : 0);
   }
 
-  private static FsAction convert(FsActionProto v) {
+  public static FsAction convert(FsActionProto v) {
     return castEnum(v, FSACTION_VALUES);
   }
 

File diff suppressed because it is too large
+ 368 - 249
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java


+ 61 - 23
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/BalancingPolicy.java

@@ -18,7 +18,11 @@
 package org.apache.hadoop.hdfs.server.balancer;
 
 import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.StorageType;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
+import org.apache.hadoop.hdfs.server.protocol.StorageReport;
+import org.apache.hadoop.hdfs.util.EnumCounters;
+import org.apache.hadoop.hdfs.util.EnumDoubles;
 
 /**
  * Balancing policy.
@@ -28,31 +32,43 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
  */
 @InterfaceAudience.Private
 abstract class BalancingPolicy {
-  long totalCapacity;
-  long totalUsedSpace;
-  private double avgUtilization;
+  final EnumCounters<StorageType> totalCapacities
+      = new EnumCounters<StorageType>(StorageType.class);
+  final EnumCounters<StorageType> totalUsedSpaces
+      = new EnumCounters<StorageType>(StorageType.class);
+  final EnumDoubles<StorageType> avgUtilizations
+      = new EnumDoubles<StorageType>(StorageType.class);
 
   void reset() {
-    totalCapacity = 0L;
-    totalUsedSpace = 0L;
-    avgUtilization = 0.0;
+    totalCapacities.reset();
+    totalUsedSpaces.reset();
+    avgUtilizations.reset();
   }
 
   /** Get the policy name. */
   abstract String getName();
 
   /** Accumulate used space and capacity. */
-  abstract void accumulateSpaces(DatanodeInfo d);
+  abstract void accumulateSpaces(DatanodeStorageReport r);
 
   void initAvgUtilization() {
-    this.avgUtilization = totalUsedSpace*100.0/totalCapacity;
+    for(StorageType t : StorageType.asList()) {
+      final long capacity = totalCapacities.get(t);
+      if (capacity > 0L) {
+        final double avg  = totalUsedSpaces.get(t)*100.0/capacity;
+        avgUtilizations.set(t, avg);
+      }
+    }
   }
-  double getAvgUtilization() {
-    return avgUtilization;
+
+  double getAvgUtilization(StorageType t) {
+    return avgUtilizations.get(t);
   }
 
-  /** Return the utilization of a datanode */
-  abstract double getUtilization(DatanodeInfo d);
+  /** @return the utilization of a particular storage type of a datanode;
+   *          or return null if the datanode does not have such storage type.
+   */
+  abstract Double getUtilization(DatanodeStorageReport r, StorageType t);
   
   @Override
   public String toString() {
@@ -84,14 +100,25 @@ abstract class BalancingPolicy {
     }
 
     @Override
-    void accumulateSpaces(DatanodeInfo d) {
-      totalCapacity += d.getCapacity();
-      totalUsedSpace += d.getDfsUsed();  
+    void accumulateSpaces(DatanodeStorageReport r) {
+      for(StorageReport s : r.getStorageReports()) {
+        final StorageType t = s.getStorage().getStorageType();
+        totalCapacities.add(t, s.getCapacity());
+        totalUsedSpaces.add(t, s.getDfsUsed());
+      }
     }
     
     @Override
-    double getUtilization(DatanodeInfo d) {
-      return d.getDfsUsed()*100.0/d.getCapacity();
+    Double getUtilization(DatanodeStorageReport r, final StorageType t) {
+      long capacity = 0L;
+      long dfsUsed = 0L;
+      for(StorageReport s : r.getStorageReports()) {
+        if (s.getStorage().getStorageType() == t) {
+          capacity += s.getCapacity();
+          dfsUsed += s.getDfsUsed();
+        }
+      }
+      return capacity == 0L? null: dfsUsed*100.0/capacity;
     }
   }
 
@@ -108,14 +135,25 @@ abstract class BalancingPolicy {
     }
 
     @Override
-    void accumulateSpaces(DatanodeInfo d) {
-      totalCapacity += d.getCapacity();
-      totalUsedSpace += d.getBlockPoolUsed();  
+    void accumulateSpaces(DatanodeStorageReport r) {
+      for(StorageReport s : r.getStorageReports()) {
+        final StorageType t = s.getStorage().getStorageType();
+        totalCapacities.add(t, s.getCapacity());
+        totalUsedSpaces.add(t, s.getBlockPoolUsed());
+      }
     }
 
     @Override
-    double getUtilization(DatanodeInfo d) {
-      return d.getBlockPoolUsed()*100.0/d.getCapacity();
+    Double getUtilization(DatanodeStorageReport r, final StorageType t) {
+      long capacity = 0L;
+      long blockPoolUsed = 0L;
+      for(StorageReport s : r.getStorageReports()) {
+        if (s.getStorage().getStorageType() == t) {
+          capacity += s.getCapacity();
+          blockPoolUsed += s.getBlockPoolUsed();
+        }
+      }
+      return capacity == 0L? null: blockPoolUsed*100.0/capacity;
     }
   }
 }

+ 4 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java

@@ -2855,12 +2855,15 @@ public class BlockManager {
     } else {
       final String[] datanodeUuids = new String[locations.size()];
       final String[] storageIDs = new String[datanodeUuids.length];
+      final StorageType[] storageTypes = new StorageType[datanodeUuids.length];
       for(int i = 0; i < locations.size(); i++) {
         final DatanodeStorageInfo s = locations.get(i);
         datanodeUuids[i] = s.getDatanodeDescriptor().getDatanodeUuid();
         storageIDs[i] = s.getStorageID();
+        storageTypes[i] = s.getStorageType();
       }
-      results.add(new BlockWithLocations(block, datanodeUuids, storageIDs));
+      results.add(new BlockWithLocations(block, datanodeUuids, storageIDs,
+          storageTypes));
       return block.getNumBytes();
     }
   }

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java

@@ -312,7 +312,7 @@ public class BlockPoolSliceStorage extends Storage {
     }
     LOG.info("Upgrading block pool storage directory " + bpSd.getRoot()
         + ".\n   old LV = " + this.getLayoutVersion() + "; old CTime = "
-        + this.getCTime() + ".\n   new LV = " + nsInfo.getLayoutVersion()
+        + this.getCTime() + ".\n   new LV = " + HdfsConstants.DATANODE_LAYOUT_VERSION
         + "; new CTime = " + nsInfo.getCTime());
     // get <SD>/previous directory
     String dnRoot = getDataNodeStorageRoot(bpSd.getRoot().getCanonicalPath());

+ 23 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

@@ -8461,6 +8461,29 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
     }
   }
 
+  void checkAccess(String src, FsAction mode) throws AccessControlException,
+      FileNotFoundException, UnresolvedLinkException, IOException {
+    checkOperation(OperationCategory.READ);
+    byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
+    readLock();
+    try {
+      checkOperation(OperationCategory.READ);
+      src = FSDirectory.resolvePath(src, pathComponents, dir);
+      if (dir.getINode(src) == null) {
+        throw new FileNotFoundException("Path not found");
+      }
+      if (isPermissionEnabled) {
+        FSPermissionChecker pc = getPermissionChecker();
+        checkPathAccess(pc, src, mode);
+      }
+    } catch (AccessControlException e) {
+      logAuditEvent(false, "checkAccess", src);
+      throw e;
+    } finally {
+      readUnlock();
+    }
+  }
+
   /**
    * Default AuditLogger implementation; used when no access logger is
    * defined in the config file. It can also be explicitly listed in the

+ 6 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java

@@ -54,6 +54,7 @@ import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.ha.HAServiceStatus;
 import org.apache.hadoop.ha.HealthCheckFailedException;
@@ -1443,5 +1444,10 @@ class NameNodeRpcServer implements NamenodeProtocols {
   public void removeXAttr(String src, XAttr xAttr) throws IOException {
     namesystem.removeXAttr(src, xAttr);
   }
+
+  @Override
+  public void checkAccess(String path, FsAction mode) throws IOException {
+    namesystem.checkAccess(path, mode);
+  }
 }
 

+ 18 - 6
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java

@@ -56,6 +56,8 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.Options;
 import org.apache.hadoop.fs.XAttr;
 import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.hdfs.StorageType;
 import org.apache.hadoop.hdfs.XAttrHelper;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
@@ -110,6 +112,7 @@ import org.apache.hadoop.hdfs.web.resources.XAttrEncodingParam;
 import org.apache.hadoop.hdfs.web.resources.XAttrNameParam;
 import org.apache.hadoop.hdfs.web.resources.XAttrSetFlagParam;
 import org.apache.hadoop.hdfs.web.resources.XAttrValueParam;
+import org.apache.hadoop.hdfs.web.resources.FsActionParam;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.ipc.RetriableException;
 import org.apache.hadoop.ipc.Server;
@@ -750,10 +753,12 @@ public class NamenodeWebHdfsMethods {
       @QueryParam(XAttrEncodingParam.NAME) @DefaultValue(XAttrEncodingParam.DEFAULT) 
           final XAttrEncodingParam xattrEncoding,
       @QueryParam(ExcludeDatanodesParam.NAME) @DefaultValue(ExcludeDatanodesParam.DEFAULT)
-          final ExcludeDatanodesParam excludeDatanodes          
+          final ExcludeDatanodesParam excludeDatanodes,
+      @QueryParam(FsActionParam.NAME) @DefaultValue(FsActionParam.DEFAULT)
+          final FsActionParam fsAction
       ) throws IOException, InterruptedException {
     return get(ugi, delegation, username, doAsUser, ROOT, op, offset, length,
-        renewer, bufferSize, xattrNames, xattrEncoding, excludeDatanodes);
+        renewer, bufferSize, xattrNames, xattrEncoding, excludeDatanodes, fsAction);
   }
 
   /** Handle HTTP GET request. */
@@ -784,11 +789,13 @@ public class NamenodeWebHdfsMethods {
       @QueryParam(XAttrEncodingParam.NAME) @DefaultValue(XAttrEncodingParam.DEFAULT) 
           final XAttrEncodingParam xattrEncoding,
       @QueryParam(ExcludeDatanodesParam.NAME) @DefaultValue(ExcludeDatanodesParam.DEFAULT)
-          final ExcludeDatanodesParam excludeDatanodes
+          final ExcludeDatanodesParam excludeDatanodes,
+      @QueryParam(FsActionParam.NAME) @DefaultValue(FsActionParam.DEFAULT)
+          final FsActionParam fsAction
       ) throws IOException, InterruptedException {
 
     init(ugi, delegation, username, doAsUser, path, op, offset, length,
-        renewer, bufferSize, xattrEncoding, excludeDatanodes);
+        renewer, bufferSize, xattrEncoding, excludeDatanodes, fsAction);
 
     return ugi.doAs(new PrivilegedExceptionAction<Response>() {
       @Override
@@ -796,7 +803,7 @@ public class NamenodeWebHdfsMethods {
         try {
           return get(ugi, delegation, username, doAsUser,
               path.getAbsolutePath(), op, offset, length, renewer, bufferSize,
-              xattrNames, xattrEncoding, excludeDatanodes);
+              xattrNames, xattrEncoding, excludeDatanodes, fsAction);
         } finally {
           reset();
         }
@@ -817,7 +824,8 @@ public class NamenodeWebHdfsMethods {
       final BufferSizeParam bufferSize,
       final List<XAttrNameParam> xattrNames,
       final XAttrEncodingParam xattrEncoding,
-      final ExcludeDatanodesParam excludeDatanodes
+      final ExcludeDatanodesParam excludeDatanodes,
+      final FsActionParam fsAction
       ) throws IOException, URISyntaxException {
     final NameNode namenode = (NameNode)context.getAttribute("name.node");
     final NamenodeProtocols np = getRPCServer(namenode);
@@ -914,6 +922,10 @@ public class NamenodeWebHdfsMethods {
       final String js = JsonUtil.toJsonString(xAttrs);
       return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
     }
+    case CHECKACCESS: {
+      np.checkAccess(fullpath, FsAction.getFsAction(fsAction.getValue()));
+      return Response.ok().build();
+    }
     default:
       throw new UnsupportedOperationException(op + " is not supported");
     }

+ 19 - 6
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlocksWithLocations.java

@@ -17,10 +17,9 @@
  */
 package org.apache.hadoop.hdfs.server.protocol;
 
-import java.util.Arrays;
-
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hdfs.StorageType;
 import org.apache.hadoop.hdfs.protocol.Block;
 
 /**
@@ -39,12 +38,15 @@ public class BlocksWithLocations {
     final Block block;
     final String[] datanodeUuids;
     final String[] storageIDs;
+    final StorageType[] storageTypes;
     
     /** constructor */
-    public BlockWithLocations(Block block, String[] datanodeUuids, String[] storageIDs) {
+    public BlockWithLocations(Block block, String[] datanodeUuids,
+        String[] storageIDs, StorageType[] storageTypes) {
       this.block = block;
       this.datanodeUuids = datanodeUuids;
       this.storageIDs = storageIDs;
+      this.storageTypes = storageTypes;
     }
     
     /** get the block */
@@ -61,7 +63,12 @@ public class BlocksWithLocations {
     public String[] getStorageIDs() {
       return storageIDs;
     }
-    
+
+    /** @return the storage types */
+    public StorageType[] getStorageTypes() {
+      return storageTypes;
+    }
+
     @Override
     public String toString() {
       final StringBuilder b = new StringBuilder();
@@ -70,12 +77,18 @@ public class BlocksWithLocations {
         return b.append("[]").toString();
       }
       
-      b.append(storageIDs[0]).append('@').append(datanodeUuids[0]);
+      appendString(0, b.append("["));
       for(int i = 1; i < datanodeUuids.length; i++) {
-        b.append(", ").append(storageIDs[i]).append("@").append(datanodeUuids[i]);
+        appendString(i, b.append(","));
       }
       return b.append("]").toString();
     }
+    
+    private StringBuilder appendString(int i, StringBuilder b) {
+      return b.append("[").append(storageTypes[i]).append("]")
+              .append(storageIDs[i])
+              .append("@").append(datanodeUuids[i]);
+    }
   }
 
   private final BlockWithLocations[] blocks;

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/XmlEditsVisitor.java

@@ -29,8 +29,8 @@ import org.xml.sax.ContentHandler;
 import org.xml.sax.SAXException;
 import org.xml.sax.helpers.AttributesImpl;
 
-import com.sun.org.apache.xml.internal.serialize.OutputFormat;
-import com.sun.org.apache.xml.internal.serialize.XMLSerializer;
+import org.apache.xml.serialize.OutputFormat;
+import org.apache.xml.serialize.XMLSerializer;
 
 /**
  * An XmlEditsVisitor walks over an EditLog structure and writes out

+ 8 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumCounters.java

@@ -37,7 +37,7 @@ import com.google.common.base.Preconditions;
 public class EnumCounters<E extends Enum<E>> {
   /** The class of the enum. */
   private final Class<E> enumClass;
-  /** The counter array, counters[i] corresponds to the enumConstants[i]. */
+  /** An array of longs corresponding to the enum type. */
   private final long[] counters;
 
   /**
@@ -75,6 +75,13 @@ public class EnumCounters<E extends Enum<E>> {
     }
   }
 
+  /** Reset all counters to zero. */
+  public final void reset() {
+    for(int i = 0; i < counters.length; i++) {
+      this.counters[i] = 0L;
+    }
+  }
+
   /** Add the given value to counter e. */
   public final void add(final E e, final long value) {
     counters[e.ordinal()] += value;

+ 128 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumDoubles.java

@@ -0,0 +1,128 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.util;
+
+import java.util.Arrays;
+
+import com.google.common.base.Preconditions;
+
+/**
+ * Similar to {@link EnumCounters} except that the value type is double.
+ *
+ * @param <E> the enum type
+ */
+public class EnumDoubles<E extends Enum<E>> {
+  /** The class of the enum. */
+  private final Class<E> enumClass;
+  /** An array of doubles corresponding to the enum type. */
+  private final double[] doubles;
+
+  /**
+   * Construct doubles for the given enum constants.
+   * @param enumClass the enum class.
+   */
+  public EnumDoubles(final Class<E> enumClass) {
+    final E[] enumConstants = enumClass.getEnumConstants();
+    Preconditions.checkNotNull(enumConstants);
+    this.enumClass = enumClass;
+    this.doubles = new double[enumConstants.length];
+  }
+  
+  /** @return the value corresponding to e. */
+  public final double get(final E e) {
+    return doubles[e.ordinal()];
+  }
+
+  /** Negate all values. */
+  public final void negation() {
+    for(int i = 0; i < doubles.length; i++) {
+      doubles[i] = -doubles[i];
+    }
+  }
+  
+  /** Set e to the given value. */
+  public final void set(final E e, final double value) {
+    doubles[e.ordinal()] = value;
+  }
+
+  /** Set the values of this object to that object. */
+  public final void set(final EnumDoubles<E> that) {
+    for(int i = 0; i < doubles.length; i++) {
+      this.doubles[i] = that.doubles[i];
+    }
+  }
+
+  /** Reset all values to zero. */
+  public final void reset() {
+    for(int i = 0; i < doubles.length; i++) {
+      this.doubles[i] = 0.0;
+    }
+  }
+
+  /** Add the given value to e. */
+  public final void add(final E e, final double value) {
+    doubles[e.ordinal()] += value;
+  }
+
+  /** Add the values of that object to this. */
+  public final void add(final EnumDoubles<E> that) {
+    for(int i = 0; i < doubles.length; i++) {
+      this.doubles[i] += that.doubles[i];
+    }
+  }
+
+  /** Subtract the given value from e. */
+  public final void subtract(final E e, final double value) {
+    doubles[e.ordinal()] -= value;
+  }
+
+  /** Subtract the values of this object from that object. */
+  public final void subtract(final EnumDoubles<E> that) {
+    for(int i = 0; i < doubles.length; i++) {
+      this.doubles[i] -= that.doubles[i];
+    }
+  }
+
+  @Override
+  public boolean equals(Object obj) {
+    if (obj == this) {
+      return true;
+    } else if (obj == null || !(obj instanceof EnumDoubles)) {
+      return false;
+    }
+    final EnumDoubles<?> that = (EnumDoubles<?>)obj;
+    return this.enumClass == that.enumClass
+        && Arrays.equals(this.doubles, that.doubles);
+  }
+
+  @Override
+  public int hashCode() {
+    return Arrays.hashCode(doubles);
+  }
+
+  @Override
+  public String toString() {
+    final E[] enumConstants = enumClass.getEnumConstants();
+    final StringBuilder b = new StringBuilder();
+    for(int i = 0; i < doubles.length; i++) {
+      final String name = enumConstants[i].name();
+      b.append(name).append("=").append(doubles[i]).append(", ");
+    }
+    return b.substring(0, b.length() - 2);
+  }
+}

+ 7 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java

@@ -54,6 +54,7 @@ import org.apache.hadoop.fs.XAttrCodec;
 import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
@@ -1356,6 +1357,12 @@ public class WebHdfsFileSystem extends FileSystem
     }.run();
   }
 
+  @Override
+  public void access(final Path path, final FsAction mode) throws IOException {
+    final HttpOpParam.Op op = GetOpParam.Op.CHECKACCESS;
+    new FsPathRunner(op, path, new FsActionParam(mode)).run();
+  }
+
   @Override
   public ContentSummary getContentSummary(final Path p) throws IOException {
     statistics.incrementReadOps(1);

+ 58 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/FsActionParam.java

@@ -0,0 +1,58 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.web.resources;
+
+import org.apache.hadoop.fs.permission.FsAction;
+
+import java.util.regex.Pattern;
+
+/** {@link FsAction} Parameter */
+public class FsActionParam extends StringParam {
+
+  /** Parameter name. */
+  public static final String NAME = "fsaction";
+
+  /** Default parameter value. */
+  public static final String DEFAULT = NULL;
+
+  private static String FS_ACTION_PATTERN = "[rwx-]{3}";
+
+  private static final Domain DOMAIN = new Domain(NAME,
+      Pattern.compile(FS_ACTION_PATTERN));
+
+  /**
+   * Constructor.
+   * @param str a string representation of the parameter value.
+   */
+  public FsActionParam(final String str) {
+    super(DOMAIN, str == null || str.equals(DEFAULT)? null: str);
+  }
+
+  /**
+   * Constructor.
+   * @param value the parameter value.
+   */
+  public FsActionParam(final FsAction value) {
+    super(DOMAIN, value == null? null: value.SYMBOL);
+  }
+
+  @Override
+  public String getName() {
+    return NAME;
+  }
+}

+ 3 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java

@@ -39,7 +39,9 @@ public class GetOpParam extends HttpOpParam<GetOpParam.Op> {
     GETXATTRS(false, HttpURLConnection.HTTP_OK),
     LISTXATTRS(false, HttpURLConnection.HTTP_OK),
 
-    NULL(false, HttpURLConnection.HTTP_NOT_IMPLEMENTED);
+    NULL(false, HttpURLConnection.HTTP_NOT_IMPLEMENTED),
+
+    CHECKACCESS(false, HttpURLConnection.HTTP_OK);
 
     final boolean redirect;
     final int expectedHttpResponseCode;

+ 10 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto

@@ -654,6 +654,14 @@ message DeleteSnapshotRequestProto {
 message DeleteSnapshotResponseProto { // void response
 }
 
+message CheckAccessRequestProto {
+  required string path = 1;
+  required AclEntryProto.FsActionProto mode = 2;
+}
+
+message CheckAccessResponseProto { // void response
+}
+
 service ClientNamenodeProtocol {
   rpc getBlockLocations(GetBlockLocationsRequestProto)
       returns(GetBlockLocationsResponseProto);
@@ -783,4 +791,6 @@ service ClientNamenodeProtocol {
       returns(ListXAttrsResponseProto);
   rpc removeXAttr(RemoveXAttrRequestProto)
       returns(RemoveXAttrResponseProto);
+  rpc checkAccess(CheckAccessRequestProto)
+      returns(CheckAccessResponseProto);
 }

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto

@@ -405,6 +405,7 @@ message BlockWithLocationsProto {
   required BlockProto block = 1;   // Block
   repeated string datanodeUuids = 2; // Datanodes with replicas of the block
   repeated string storageUuids = 3;  // Storages with replicas of the block
+  repeated StorageTypeProto storageTypes = 4;
 }
 
 /**

+ 44 - 0
hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm

@@ -82,6 +82,9 @@ WebHDFS REST API
     * {{{List all XAttrs}<<<LISTXATTRS>>>}}
         (see  {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.listXAttrs)
 
+    * {{{Check access}<<<CHECKACCESS>>>}}
+        (see  {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.access)
+
   * HTTP PUT
 
     * {{{Create and Write to a File}<<<CREATE>>>}}
@@ -927,6 +930,28 @@ Transfer-Encoding: chunked
   {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.getAclStatus
 
 
+** {Check access}
+
+  * Submit a HTTP GET request.
+  
++---------------------------------
+curl -i -X PUT "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=CHECKACCESS
+                              &fsaction=<FSACTION>
++---------------------------------
+
+  The client receives a response with zero content length:
+
++---------------------------------
+HTTP/1.1 200 OK
+Content-Length: 0
++---------------------------------
+
+  []
+
+  See also:
+  {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.access
+    
+
 * {Extended Attributes(XAttrs) Operations}
 
 ** {Set XAttr}
@@ -2166,6 +2191,25 @@ var tokenProperties =
   {{Proxy Users}}
 
 
+** {Fs Action}
+
+*----------------+-------------------------------------------------------------------+
+|| Name          | <<<fsaction>>> |
+*----------------+-------------------------------------------------------------------+
+|| Description   | File system operation read/write/execute |
+*----------------+-------------------------------------------------------------------+
+|| Type          | String |
+*----------------+-------------------------------------------------------------------+
+|| Default Value | null (an invalid value) |
+*----------------+-------------------------------------------------------------------+
+|| Valid Values  | Strings matching regex pattern \"[rwx-]\{3\}\" |
+*----------------+-------------------------------------------------------------------+
+|| Syntax        | \"[rwx-]\{3\}\" |
+*----------------+-------------------------------------------------------------------+
+
+  See also:
+  {{{Check access}<<<CHECKACCESS>>>}},
+
 ** {Group}
 
 *----------------+-------------------------------------------------------------------+

+ 18 - 11
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestGenericRefresh.java

@@ -47,7 +47,6 @@ import org.mockito.Mockito;
 public class TestGenericRefresh {
   private static MiniDFSCluster cluster;
   private static Configuration config;
-  private static final int NNPort = 54222;
 
   private static RefreshHandler firstHandler;
   private static RefreshHandler secondHandler;
@@ -57,8 +56,8 @@ public class TestGenericRefresh {
     config = new Configuration();
     config.set("hadoop.security.authorization", "true");
 
-    FileSystem.setDefaultUri(config, "hdfs://localhost:" + NNPort);
-    cluster = new MiniDFSCluster.Builder(config).nameNodePort(NNPort).build();
+    FileSystem.setDefaultUri(config, "hdfs://localhost:0");
+    cluster = new MiniDFSCluster.Builder(config).build();
     cluster.waitActive();
   }
 
@@ -103,7 +102,8 @@ public class TestGenericRefresh {
   @Test
   public void testInvalidIdentifier() throws Exception {
     DFSAdmin admin = new DFSAdmin(config);
-    String [] args = new String[]{"-refresh", "localhost:" + NNPort, "unregisteredIdentity"};
+    String [] args = new String[]{"-refresh", "localhost:" + 
+        cluster.getNameNodePort(), "unregisteredIdentity"};
     int exitCode = admin.run(args);
     assertEquals("DFSAdmin should fail due to no handler registered", -1, exitCode);
   }
@@ -111,7 +111,8 @@ public class TestGenericRefresh {
   @Test
   public void testValidIdentifier() throws Exception {
     DFSAdmin admin = new DFSAdmin(config);
-    String[] args = new String[]{"-refresh", "localhost:" + NNPort, "firstHandler"};
+    String[] args = new String[]{"-refresh",
+        "localhost:" + cluster.getNameNodePort(), "firstHandler"};
     int exitCode = admin.run(args);
     assertEquals("DFSAdmin should succeed", 0, exitCode);
 
@@ -124,11 +125,13 @@ public class TestGenericRefresh {
   @Test
   public void testVariableArgs() throws Exception {
     DFSAdmin admin = new DFSAdmin(config);
-    String[] args = new String[]{"-refresh", "localhost:" + NNPort, "secondHandler", "one"};
+    String[] args = new String[]{"-refresh", "localhost:" +
+        cluster.getNameNodePort(), "secondHandler", "one"};
     int exitCode = admin.run(args);
     assertEquals("DFSAdmin should return 2", 2, exitCode);
 
-    exitCode = admin.run(new String[]{"-refresh", "localhost:" + NNPort, "secondHandler", "one", "two"});
+    exitCode = admin.run(new String[]{"-refresh", "localhost:" +
+        cluster.getNameNodePort(), "secondHandler", "one", "two"});
     assertEquals("DFSAdmin should now return 3", 3, exitCode);
 
     Mockito.verify(secondHandler).handleRefresh("secondHandler", new String[]{"one"});
@@ -141,7 +144,8 @@ public class TestGenericRefresh {
 
     // And now this should fail
     DFSAdmin admin = new DFSAdmin(config);
-    String[] args = new String[]{"-refresh", "localhost:" + NNPort, "firstHandler"};
+    String[] args = new String[]{"-refresh", "localhost:" +
+        cluster.getNameNodePort(), "firstHandler"};
     int exitCode = admin.run(args);
     assertEquals("DFSAdmin should return -1", -1, exitCode);
   }
@@ -161,7 +165,8 @@ public class TestGenericRefresh {
 
     // this should trigger both
     DFSAdmin admin = new DFSAdmin(config);
-    String[] args = new String[]{"-refresh", "localhost:" + NNPort, "sharedId", "one"};
+    String[] args = new String[]{"-refresh", "localhost:" +
+        cluster.getNameNodePort(), "sharedId", "one"};
     int exitCode = admin.run(args);
     assertEquals(-1, exitCode); // -1 because one of the responses is unregistered
 
@@ -189,7 +194,8 @@ public class TestGenericRefresh {
 
     // We refresh both
     DFSAdmin admin = new DFSAdmin(config);
-    String[] args = new String[]{"-refresh", "localhost:" + NNPort, "shared"};
+    String[] args = new String[]{"-refresh", "localhost:" +
+        cluster.getNameNodePort(), "shared"};
     int exitCode = admin.run(args);
     assertEquals(-1, exitCode); // We get -1 because of our logic for melding non-zero return codes
 
@@ -215,7 +221,8 @@ public class TestGenericRefresh {
     RefreshRegistry.defaultRegistry().register("exceptional", otherExceptionalHandler);
 
     DFSAdmin admin = new DFSAdmin(config);
-    String[] args = new String[]{"-refresh", "localhost:" + NNPort, "exceptional"};
+    String[] args = new String[]{"-refresh", "localhost:" +
+        cluster.getNameNodePort(), "exceptional"};
     int exitCode = admin.run(args);
     assertEquals(-1, exitCode); // Exceptions result in a -1
 

+ 32 - 12
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestRefreshCallQueue.java

@@ -24,6 +24,8 @@ import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
 import java.io.IOException;
+import java.net.BindException;
+import java.util.Random;
 import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.LinkedBlockingQueue;
 
@@ -42,24 +44,42 @@ public class TestRefreshCallQueue {
   private FileSystem fs;
   static int mockQueueConstructions;
   static int mockQueuePuts;
-  private static final int NNPort = 54222;
-  private static String CALLQUEUE_CONFIG_KEY = "ipc." + NNPort + ".callqueue.impl";
+  private String callQueueConfigKey = "";
+  private final Random rand = new Random();
 
   @Before
   public void setUp() throws Exception {
     // We want to count additional events, so we reset here
     mockQueueConstructions = 0;
     mockQueuePuts = 0;
-
-    config = new Configuration();
-    config.setClass(CALLQUEUE_CONFIG_KEY,
-        MockCallQueue.class, BlockingQueue.class);
-    config.set("hadoop.security.authorization", "true");
-
-    FileSystem.setDefaultUri(config, "hdfs://localhost:" + NNPort);
-    fs = FileSystem.get(config);
-    cluster = new MiniDFSCluster.Builder(config).nameNodePort(NNPort).build();
-    cluster.waitActive();
+    int portRetries = 5;
+    int nnPort;
+
+    for (; portRetries > 0; --portRetries) {
+      // Pick a random port in the range [30000,60000).
+      nnPort = 30000 + rand.nextInt(30000);  
+      config = new Configuration();
+      callQueueConfigKey = "ipc." + nnPort + ".callqueue.impl";
+      config.setClass(callQueueConfigKey,
+          MockCallQueue.class, BlockingQueue.class);
+      config.set("hadoop.security.authorization", "true");
+
+      FileSystem.setDefaultUri(config, "hdfs://localhost:" + nnPort);
+      fs = FileSystem.get(config);
+      
+      try {
+        cluster = new MiniDFSCluster.Builder(config).nameNodePort(nnPort).build();
+        cluster.waitActive();
+        break;
+      } catch (BindException be) {
+        // Retry with a different port number.
+      }
+    }
+    
+    if (portRetries == 0) {
+      // Bail if we get very unlucky with our choice of ports.
+      fail("Failed to pick an ephemeral port for the NameNode RPC server.");
+    }
   }
 
   @After

+ 151 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemWithXAttrs.java

@@ -0,0 +1,151 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.viewfs;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileSystemTestHelper;
+import org.apache.hadoop.fs.FsConstants;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.MiniDFSNNTopology;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+
+/**
+ * Verify XAttrs through ViewFileSystem functionality.
+ */
+public class TestViewFileSystemWithXAttrs {
+
+  private static MiniDFSCluster cluster;
+  private static Configuration clusterConf = new Configuration();
+  private static FileSystem fHdfs;
+  private static FileSystem fHdfs2;
+  private FileSystem fsView;
+  private Configuration fsViewConf;
+  private FileSystem fsTarget, fsTarget2;
+  private Path targetTestRoot, targetTestRoot2, mountOnNn1, mountOnNn2;
+  private FileSystemTestHelper fileSystemTestHelper =
+      new FileSystemTestHelper("/tmp/TestViewFileSystemWithXAttrs");
+
+  // XAttrs
+  protected static final String name1 = "user.a1";
+  protected static final byte[] value1 = {0x31, 0x32, 0x33};
+  protected static final String name2 = "user.a2";
+  protected static final byte[] value2 = {0x37, 0x38, 0x39};
+
+  @BeforeClass
+  public static void clusterSetupAtBeginning() throws IOException {
+    cluster = new MiniDFSCluster.Builder(clusterConf)
+        .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2))
+        .numDataNodes(2)
+        .build();
+    cluster.waitClusterUp();
+
+    fHdfs = cluster.getFileSystem(0);
+    fHdfs2 = cluster.getFileSystem(1);
+  }
+
+  @AfterClass
+  public static void ClusterShutdownAtEnd() throws Exception {
+    cluster.shutdown();
+  }
+
+  @Before
+  public void setUp() throws Exception {
+    fsTarget = fHdfs;
+    fsTarget2 = fHdfs2;
+    targetTestRoot = fileSystemTestHelper.getAbsoluteTestRootPath(fsTarget);
+    targetTestRoot2 = fileSystemTestHelper.getAbsoluteTestRootPath(fsTarget2);
+
+    fsTarget.delete(targetTestRoot, true);
+    fsTarget2.delete(targetTestRoot2, true);
+    fsTarget.mkdirs(targetTestRoot);
+    fsTarget2.mkdirs(targetTestRoot2);
+
+    fsViewConf = ViewFileSystemTestSetup.createConfig();
+    setupMountPoints();
+    fsView = FileSystem.get(FsConstants.VIEWFS_URI, fsViewConf);
+  }
+
+  private void setupMountPoints() {
+    mountOnNn1 = new Path("/mountOnNn1");
+    mountOnNn2 = new Path("/mountOnNn2");
+    ConfigUtil.addLink(fsViewConf, mountOnNn1.toString(),
+        targetTestRoot.toUri());
+    ConfigUtil.addLink(fsViewConf, mountOnNn2.toString(),
+        targetTestRoot2.toUri());
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    fsTarget.delete(fileSystemTestHelper.getTestRootPath(fsTarget), true);
+    fsTarget2.delete(fileSystemTestHelper.getTestRootPath(fsTarget2), true);
+  }
+
+  /**
+   * Verify a ViewFileSystem wrapped over multiple federated NameNodes will
+   * dispatch the XAttr operations to the correct NameNode.
+   */
+  @Test
+  public void testXAttrOnMountEntry() throws Exception {
+    // Set XAttrs on the first namespace and verify they are correct
+    fsView.setXAttr(mountOnNn1, name1, value1);
+    fsView.setXAttr(mountOnNn1, name2, value2);
+    assertEquals(2, fsView.getXAttrs(mountOnNn1).size());
+    assertArrayEquals(value1, fsView.getXAttr(mountOnNn1, name1));
+    assertArrayEquals(value2, fsView.getXAttr(mountOnNn1, name2));
+    // Double-check by getting the XAttrs using FileSystem
+    // instead of ViewFileSystem
+    assertArrayEquals(value1, fHdfs.getXAttr(targetTestRoot, name1));
+    assertArrayEquals(value2, fHdfs.getXAttr(targetTestRoot, name2));
+
+    // Paranoid check: verify the other namespace does not
+    // have XAttrs set on the same path.
+    assertEquals(0, fsView.getXAttrs(mountOnNn2).size());
+    assertEquals(0, fHdfs2.getXAttrs(targetTestRoot2).size());
+
+    // Remove the XAttr entries on the first namespace
+    fsView.removeXAttr(mountOnNn1, name1);
+    fsView.removeXAttr(mountOnNn1, name2);
+    assertEquals(0, fsView.getXAttrs(mountOnNn1).size());
+    assertEquals(0, fHdfs.getXAttrs(targetTestRoot).size());
+
+    // Now set XAttrs on the second namespace
+    fsView.setXAttr(mountOnNn2, name1, value1);
+    fsView.setXAttr(mountOnNn2, name2, value2);
+    assertEquals(2, fsView.getXAttrs(mountOnNn2).size());
+    assertArrayEquals(value1, fsView.getXAttr(mountOnNn2, name1));
+    assertArrayEquals(value2, fsView.getXAttr(mountOnNn2, name2));
+    assertArrayEquals(value1, fHdfs2.getXAttr(targetTestRoot2, name1));
+    assertArrayEquals(value2, fHdfs2.getXAttr(targetTestRoot2, name2));
+
+    fsView.removeXAttr(mountOnNn2, name1);
+    fsView.removeXAttr(mountOnNn2, name2);
+    assertEquals(0, fsView.getXAttrs(mountOnNn2).size());
+    assertEquals(0, fHdfs2.getXAttrs(targetTestRoot2).size());
+  }
+}

+ 148 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsWithXAttrs.java

@@ -0,0 +1,148 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.viewfs;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.FileContextTestHelper;
+import org.apache.hadoop.fs.FsConstants;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.MiniDFSNNTopology;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.io.IOException;
+
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+
+/**
+ * Verify XAttrs through ViewFs functionality.
+ */
+public class TestViewFsWithXAttrs {
+
+  private static MiniDFSCluster cluster;
+  private static Configuration clusterConf = new Configuration();
+  private static FileContext fc, fc2;
+  private FileContext fcView, fcTarget, fcTarget2;
+  private Configuration fsViewConf;
+  private Path targetTestRoot, targetTestRoot2, mountOnNn1, mountOnNn2;
+  private FileContextTestHelper fileContextTestHelper =
+      new FileContextTestHelper("/tmp/TestViewFsWithXAttrs");
+
+  // XAttrs
+  protected static final String name1 = "user.a1";
+  protected static final byte[] value1 = {0x31, 0x32, 0x33};
+  protected static final String name2 = "user.a2";
+  protected static final byte[] value2 = {0x37, 0x38, 0x39};
+
+  @BeforeClass
+  public static void clusterSetupAtBeginning() throws IOException {
+    cluster = new MiniDFSCluster.Builder(clusterConf)
+        .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2))
+        .numDataNodes(2)
+        .build();
+    cluster.waitClusterUp();
+
+    fc = FileContext.getFileContext(cluster.getURI(0), clusterConf);
+    fc2 = FileContext.getFileContext(cluster.getURI(1), clusterConf);
+  }
+
+  @AfterClass
+  public static void ClusterShutdownAtEnd() throws Exception {
+    cluster.shutdown();
+  }
+
+  @Before
+  public void setUp() throws Exception {
+    fcTarget = fc;
+    fcTarget2 = fc2;
+    targetTestRoot = fileContextTestHelper.getAbsoluteTestRootPath(fc);
+    targetTestRoot2 = fileContextTestHelper.getAbsoluteTestRootPath(fc2);
+
+    fcTarget.delete(targetTestRoot, true);
+    fcTarget2.delete(targetTestRoot2, true);
+    fcTarget.mkdir(targetTestRoot, new FsPermission((short) 0750), true);
+    fcTarget2.mkdir(targetTestRoot2, new FsPermission((short) 0750), true);
+
+    fsViewConf = ViewFileSystemTestSetup.createConfig();
+    setupMountPoints();
+    fcView = FileContext.getFileContext(FsConstants.VIEWFS_URI, fsViewConf);
+  }
+
+  private void setupMountPoints() {
+    mountOnNn1 = new Path("/mountOnNn1");
+    mountOnNn2 = new Path("/mountOnNn2");
+    ConfigUtil.addLink(fsViewConf, mountOnNn1.toString(), targetTestRoot.toUri());
+    ConfigUtil.addLink(fsViewConf, mountOnNn2.toString(), targetTestRoot2.toUri());
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    fcTarget.delete(fileContextTestHelper.getTestRootPath(fcTarget), true);
+    fcTarget2.delete(fileContextTestHelper.getTestRootPath(fcTarget2), true);
+  }
+
+  /**
+   * Verify a ViewFs wrapped over multiple federated NameNodes will
+   * dispatch the XAttr operations to the correct NameNode.
+   */
+  @Test
+  public void testXAttrOnMountEntry() throws Exception {
+    // Set XAttrs on the first namespace and verify they are correct
+    fcView.setXAttr(mountOnNn1, name1, value1);
+    fcView.setXAttr(mountOnNn1, name2, value2);
+    assertEquals(2, fcView.getXAttrs(mountOnNn1).size());
+    assertArrayEquals(value1, fcView.getXAttr(mountOnNn1, name1));
+    assertArrayEquals(value2, fcView.getXAttr(mountOnNn1, name2));
+    // Double-check by getting the XAttrs using FileSystem
+    // instead of ViewFs
+    assertArrayEquals(value1, fc.getXAttr(targetTestRoot, name1));
+    assertArrayEquals(value2, fc.getXAttr(targetTestRoot, name2));
+
+    // Paranoid check: verify the other namespace does not
+    // have XAttrs set on the same path.
+    assertEquals(0, fcView.getXAttrs(mountOnNn2).size());
+    assertEquals(0, fc2.getXAttrs(targetTestRoot2).size());
+
+    // Remove the XAttr entries on the first namespace
+    fcView.removeXAttr(mountOnNn1, name1);
+    fcView.removeXAttr(mountOnNn1, name2);
+    assertEquals(0, fcView.getXAttrs(mountOnNn1).size());
+    assertEquals(0, fc.getXAttrs(targetTestRoot).size());
+
+    // Now set XAttrs on the second namespace
+    fcView.setXAttr(mountOnNn2, name1, value1);
+    fcView.setXAttr(mountOnNn2, name2, value2);
+    assertEquals(2, fcView.getXAttrs(mountOnNn2).size());
+    assertArrayEquals(value1, fcView.getXAttr(mountOnNn2, name1));
+    assertArrayEquals(value2, fcView.getXAttr(mountOnNn2, name2));
+    assertArrayEquals(value1, fc2.getXAttr(targetTestRoot2, name1));
+    assertArrayEquals(value2, fc2.getXAttr(targetTestRoot2, name2));
+
+    fcView.removeXAttr(mountOnNn2, name1);
+    fcView.removeXAttr(mountOnNn2, name2);
+    assertEquals(0, fcView.getXAttrs(mountOnNn2).size());
+    assertEquals(0, fc2.getXAttrs(targetTestRoot2).size());
+  }
+}

+ 77 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java

@@ -20,8 +20,11 @@ package org.apache.hadoop.hdfs;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
+import java.io.FileNotFoundException;
 import java.io.IOException;
+import java.security.PrivilegedExceptionAction;
 import java.util.HashMap;
 import java.util.Map;
 import java.util.Random;
@@ -36,6 +39,7 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.Time;
@@ -421,6 +425,79 @@ public class TestDFSPermission {
     }
   }
 
+  @Test
+  public void testAccessOwner() throws IOException, InterruptedException {
+    FileSystem rootFs = FileSystem.get(conf);
+    Path p1 = new Path("/p1");
+    rootFs.mkdirs(p1);
+    rootFs.setOwner(p1, USER1_NAME, GROUP1_NAME);
+    fs = USER1.doAs(new PrivilegedExceptionAction<FileSystem>() {
+      @Override
+      public FileSystem run() throws Exception {
+        return FileSystem.get(conf);
+      }
+    });
+    fs.setPermission(p1, new FsPermission((short) 0444));
+    fs.access(p1, FsAction.READ);
+    try {
+      fs.access(p1, FsAction.WRITE);
+      fail("The access call should have failed.");
+    } catch (AccessControlException e) {
+      // expected
+    }
+
+    Path badPath = new Path("/bad/bad");
+    try {
+      fs.access(badPath, FsAction.READ);
+      fail("The access call should have failed");
+    } catch (FileNotFoundException e) {
+      // expected
+    }
+  }
+
+  @Test
+  public void testAccessGroupMember() throws IOException, InterruptedException {
+    FileSystem rootFs = FileSystem.get(conf);
+    Path p2 = new Path("/p2");
+    rootFs.mkdirs(p2);
+    rootFs.setOwner(p2, UserGroupInformation.getCurrentUser().getShortUserName(), GROUP1_NAME);
+    rootFs.setPermission(p2, new FsPermission((short) 0740));
+    fs = USER1.doAs(new PrivilegedExceptionAction<FileSystem>() {
+      @Override
+      public FileSystem run() throws Exception {
+        return FileSystem.get(conf);
+      }
+    });
+    fs.access(p2, FsAction.READ);
+    try {
+      fs.access(p2, FsAction.EXECUTE);
+      fail("The access call should have failed.");
+    } catch (AccessControlException e) {
+      // expected
+    }
+  }
+
+  @Test
+  public void testAccessOthers() throws IOException, InterruptedException {
+    FileSystem rootFs = FileSystem.get(conf);
+    Path p3 = new Path("/p3");
+    rootFs.mkdirs(p3);
+    rootFs.setPermission(p3, new FsPermission((short) 0774));
+    fs = USER1.doAs(new PrivilegedExceptionAction<FileSystem>() {
+      @Override
+      public FileSystem run() throws Exception {
+        return FileSystem.get(conf);
+      }
+    });
+    fs.access(p3, FsAction.READ);
+    try {
+      fs.access(p3, FsAction.READ_WRITE);
+      fail("The access call should have failed.");
+    } catch (AccessControlException e) {
+      // expected
+    }
+  }
+
   /* Check if namenode performs permission checking correctly 
    * for the given user for operations mkdir, open, setReplication, 
    * getFileInfo, isDirectory, exists, getContentLength, list, rename,

+ 22 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java

@@ -26,6 +26,7 @@ import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
 import java.io.IOException;
+import java.security.PrivilegedExceptionAction;
 import java.util.List;
 
 import org.apache.commons.logging.Log;
@@ -36,6 +37,7 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.AclEntry;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
@@ -47,6 +49,8 @@ import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.After;
 import org.junit.Before;
@@ -297,7 +301,8 @@ public class TestSafeMode {
    * assert that they are either allowed or fail as expected.
    */
   @Test
-  public void testOperationsWhileInSafeMode() throws IOException {
+  public void testOperationsWhileInSafeMode() throws IOException,
+      InterruptedException {
     final Path file1 = new Path("/file1");
 
     assertFalse(dfs.setSafeMode(SafeModeAction.SAFEMODE_GET));
@@ -407,6 +412,22 @@ public class TestSafeMode {
       fail("getAclStatus failed while in SM");
     }
 
+    // Test access
+    UserGroupInformation ugiX = UserGroupInformation.createRemoteUser("userX");
+    FileSystem myfs = ugiX.doAs(new PrivilegedExceptionAction<FileSystem>() {
+      @Override
+      public FileSystem run() throws IOException {
+        return FileSystem.get(conf);
+      }
+    });
+    myfs.access(file1, FsAction.READ);
+    try {
+      myfs.access(file1, FsAction.WRITE);
+      fail("The access call should have failed.");
+    } catch (AccessControlException e) {
+      // expected
+    }
+
     assertFalse("Could not leave SM",
         dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE));
   }

+ 3 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java

@@ -184,8 +184,10 @@ public class TestPBHelper {
   private static BlockWithLocations getBlockWithLocations(int bid) {
     final String[] datanodeUuids = {"dn1", "dn2", "dn3"};
     final String[] storageIDs = {"s1", "s2", "s3"};
+    final StorageType[] storageTypes = {
+        StorageType.DISK, StorageType.DISK, StorageType.DISK};
     return new BlockWithLocations(new Block(bid, 0, 1),
-        datanodeUuids, storageIDs);
+        datanodeUuids, storageIDs, storageTypes);
   }
 
   private void compare(BlockWithLocations locs1, BlockWithLocations locs2) {

+ 445 - 20
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java

@@ -18,17 +18,23 @@
 package org.apache.hadoop.hdfs.server.balancer;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
+import java.io.File;
 import java.io.IOException;
+import java.io.PrintWriter;
 import java.net.URI;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
+import java.util.HashSet;
 import java.util.List;
 import java.util.Random;
+import java.util.Set;
 import java.util.concurrent.TimeoutException;
 
+import org.apache.commons.lang.StringUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.impl.Log4JLogger;
@@ -48,6 +54,8 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.server.balancer.Balancer.Cli;
+import org.apache.hadoop.hdfs.server.balancer.Balancer.Parameters;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.Tool;
@@ -255,6 +263,18 @@ public class TestBalancer {
       }
     }
   }
+
+  /**
+   * Wait until balanced: each datanode gives utilization within
+   * BALANCE_ALLOWED_VARIANCE of average
+   * @throws IOException
+   * @throws TimeoutException
+   */
+  static void waitForBalancer(long totalUsedSpace, long totalCapacity,
+      ClientProtocol client, MiniDFSCluster cluster, Balancer.Parameters p)
+  throws IOException, TimeoutException {
+    waitForBalancer(totalUsedSpace, totalCapacity, client, cluster, p, 0);
+  }
   
   /**
    * Wait until balanced: each datanode gives utilization within 
@@ -263,11 +283,17 @@ public class TestBalancer {
    * @throws TimeoutException
    */
   static void waitForBalancer(long totalUsedSpace, long totalCapacity,
-      ClientProtocol client, MiniDFSCluster cluster)
-  throws IOException, TimeoutException {
+      ClientProtocol client, MiniDFSCluster cluster, Balancer.Parameters p,
+      int expectedExcludedNodes) throws IOException, TimeoutException {
     long timeout = TIMEOUT;
     long failtime = (timeout <= 0L) ? Long.MAX_VALUE
         : Time.now() + timeout;
+    if (!p.nodesToBeIncluded.isEmpty()) {
+      totalCapacity = p.nodesToBeIncluded.size() * CAPACITY;
+    }
+    if (!p.nodesToBeExcluded.isEmpty()) {
+        totalCapacity -= p.nodesToBeExcluded.size() * CAPACITY;
+    }
     final double avgUtilization = ((double)totalUsedSpace) / totalCapacity;
     boolean balanced;
     do {
@@ -275,9 +301,20 @@ public class TestBalancer {
           client.getDatanodeReport(DatanodeReportType.ALL);
       assertEquals(datanodeReport.length, cluster.getDataNodes().size());
       balanced = true;
+      int actualExcludedNodeCount = 0;
       for (DatanodeInfo datanode : datanodeReport) {
         double nodeUtilization = ((double)datanode.getDfsUsed())
             / datanode.getCapacity();
+        if (Balancer.Util.shouldBeExcluded(p.nodesToBeExcluded, datanode)) {
+          assertTrue(nodeUtilization == 0);
+          actualExcludedNodeCount++;
+          continue;
+        }
+        if (!Balancer.Util.shouldBeIncluded(p.nodesToBeIncluded, datanode)) {
+          assertTrue(nodeUtilization == 0);
+          actualExcludedNodeCount++;
+          continue;
+        }
         if (Math.abs(avgUtilization - nodeUtilization) > BALANCE_ALLOWED_VARIANCE) {
           balanced = false;
           if (Time.now() > failtime) {
@@ -294,6 +331,7 @@ public class TestBalancer {
           break;
         }
       }
+      assertEquals(expectedExcludedNodes,actualExcludedNodeCount);
     } while (!balanced);
   }
 
@@ -307,22 +345,118 @@ public class TestBalancer {
     }
     return b.append("]").toString();
   }
-  /** This test start a cluster with specified number of nodes, 
+  /**
+   * Class which contains information about the
+   * new nodes to be added to the cluster for balancing.
+   */
+  static abstract class NewNodeInfo {
+
+    Set<String> nodesToBeExcluded = new HashSet<String>();
+    Set<String> nodesToBeIncluded = new HashSet<String>();
+
+     abstract String[] getNames();
+     abstract int getNumberofNewNodes();
+     abstract int getNumberofIncludeNodes();
+     abstract int getNumberofExcludeNodes();
+
+     public Set<String> getNodesToBeIncluded() {
+       return nodesToBeIncluded;
+     }
+     public Set<String> getNodesToBeExcluded() {
+       return nodesToBeExcluded;
+     }
+  }
+
+  /**
+   * The host names of new nodes are specified
+   */
+  static class HostNameBasedNodes extends NewNodeInfo {
+    String[] hostnames;
+
+    public HostNameBasedNodes(String[] hostnames,
+        Set<String> nodesToBeExcluded, Set<String> nodesToBeIncluded) {
+      this.hostnames = hostnames;
+      this.nodesToBeExcluded = nodesToBeExcluded;
+      this.nodesToBeIncluded = nodesToBeIncluded;
+    }
+
+    @Override
+    String[] getNames() {
+      return hostnames;
+    }
+    @Override
+    int getNumberofNewNodes() {
+      return hostnames.length;
+    }
+    @Override
+    int getNumberofIncludeNodes() {
+      return nodesToBeIncluded.size();
+    }
+    @Override
+    int getNumberofExcludeNodes() {
+      return nodesToBeExcluded.size();
+    }
+  }
+
+  /**
+   * The number of data nodes to be started are specified.
+   * The data nodes will have same host name, but different port numbers.
+   *
+   */
+  static class PortNumberBasedNodes extends NewNodeInfo {
+    int newNodes;
+    int excludeNodes;
+    int includeNodes;
+
+    public PortNumberBasedNodes(int newNodes, int excludeNodes, int includeNodes) {
+      this.newNodes = newNodes;
+      this.excludeNodes = excludeNodes;
+      this.includeNodes = includeNodes;
+    }
+
+    @Override
+    String[] getNames() {
+      return null;
+    }
+    @Override
+    int getNumberofNewNodes() {
+      return newNodes;
+    }
+    @Override
+    int getNumberofIncludeNodes() {
+      return includeNodes;
+    }
+    @Override
+    int getNumberofExcludeNodes() {
+      return excludeNodes;
+    }
+  }
+
+  private void doTest(Configuration conf, long[] capacities, String[] racks,
+      long newCapacity, String newRack, boolean useTool) throws Exception {
+    doTest(conf, capacities, racks, newCapacity, newRack, null, useTool, false);
+  }
+
+  /** This test start a cluster with specified number of nodes,
    * and fills it to be 30% full (with a single file replicated identically
    * to all datanodes);
    * It then adds one new empty node and starts balancing.
-   * 
+   *
    * @param conf - configuration
    * @param capacities - array of capacities of original nodes in cluster
    * @param racks - array of racks for original nodes in cluster
    * @param newCapacity - new node's capacity
    * @param newRack - new node's rack
+   * @param nodes - information about new nodes to be started.
    * @param useTool - if true run test via Cli with command-line argument 
    *   parsing, etc.   Otherwise invoke balancer API directly.
+   * @param useFile - if true, the hosts to included or excluded will be stored in a
+   *   file and then later read from the file.
    * @throws Exception
    */
-  private void doTest(Configuration conf, long[] capacities, String[] racks, 
-      long newCapacity, String newRack, boolean useTool) throws Exception {
+  private void doTest(Configuration conf, long[] capacities,
+      String[] racks, long newCapacity, String newRack, NewNodeInfo nodes,
+      boolean useTool, boolean useFile) throws Exception {
     LOG.info("capacities = " +  long2String(capacities)); 
     LOG.info("racks      = " +  Arrays.asList(racks)); 
     LOG.info("newCapacity= " +  newCapacity); 
@@ -346,17 +480,75 @@ public class TestBalancer {
       long totalUsedSpace = totalCapacity*3/10;
       createFile(cluster, filePath, totalUsedSpace / numOfDatanodes,
           (short) numOfDatanodes, 0);
-      // start up an empty node with the same capacity and on the same rack
-      cluster.startDataNodes(conf, 1, true, null,
-          new String[]{newRack}, new long[]{newCapacity});
 
-      totalCapacity += newCapacity;
+      if (nodes == null) { // there is no specification of new nodes.
+        // start up an empty node with the same capacity and on the same rack
+        cluster.startDataNodes(conf, 1, true, null,
+            new String[]{newRack}, null,new long[]{newCapacity});
+        totalCapacity += newCapacity;
+      } else {
+        //if running a test with "include list", include original nodes as well
+        if (nodes.getNumberofIncludeNodes()>0) {
+          for (DataNode dn: cluster.getDataNodes())
+            nodes.getNodesToBeIncluded().add(dn.getDatanodeId().getHostName());
+        }
+        String[] newRacks = new String[nodes.getNumberofNewNodes()];
+        long[] newCapacities = new long[nodes.getNumberofNewNodes()];
+        for (int i=0; i < nodes.getNumberofNewNodes(); i++) {
+          newRacks[i] = newRack;
+          newCapacities[i] = newCapacity;
+        }
+        // if host names are specified for the new nodes to be created.
+        if (nodes.getNames() != null) {
+          cluster.startDataNodes(conf, nodes.getNumberofNewNodes(), true, null,
+              newRacks, nodes.getNames(), newCapacities);
+          totalCapacity += newCapacity*nodes.getNumberofNewNodes();
+        } else {  // host names are not specified
+          cluster.startDataNodes(conf, nodes.getNumberofNewNodes(), true, null,
+              newRacks, null, newCapacities);
+          totalCapacity += newCapacity*nodes.getNumberofNewNodes();
+          //populate the include nodes
+          if (nodes.getNumberofIncludeNodes() > 0) {
+            int totalNodes = cluster.getDataNodes().size();
+            for (int i=0; i < nodes.getNumberofIncludeNodes(); i++) {
+              nodes.getNodesToBeIncluded().add (cluster.getDataNodes().get(
+                  totalNodes-1-i).getDatanodeId().getXferAddr());
+            }
+          }
+          //polulate the exclude nodes
+          if (nodes.getNumberofExcludeNodes() > 0) {
+            int totalNodes = cluster.getDataNodes().size();
+            for (int i=0; i < nodes.getNumberofExcludeNodes(); i++) {
+              nodes.getNodesToBeExcluded().add (cluster.getDataNodes().get(
+                  totalNodes-1-i).getDatanodeId().getXferAddr());
+            }
+          }
+        }
+      }
+      // run balancer and validate results
+      Balancer.Parameters p = Balancer.Parameters.DEFAULT;
+      if (nodes != null) {
+        p = new Balancer.Parameters(
+            Balancer.Parameters.DEFAULT.policy,
+            Balancer.Parameters.DEFAULT.threshold,
+            nodes.getNodesToBeExcluded(), nodes.getNodesToBeIncluded());
+      }
+
+      int expectedExcludedNodes = 0;
+      if (nodes != null) {
+        if (!nodes.getNodesToBeExcluded().isEmpty()) {
+          expectedExcludedNodes = nodes.getNodesToBeExcluded().size();
+        } else if (!nodes.getNodesToBeIncluded().isEmpty()) {
+          expectedExcludedNodes =
+              cluster.getDataNodes().size() - nodes.getNodesToBeIncluded().size();
+        }
+      }
 
       // run balancer and validate results
       if (useTool) {
-        runBalancerCli(conf, totalUsedSpace, totalCapacity);
+        runBalancerCli(conf, totalUsedSpace, totalCapacity, p, useFile, expectedExcludedNodes);
       } else {
-        runBalancer(conf, totalUsedSpace, totalCapacity);
+        runBalancer(conf, totalUsedSpace, totalCapacity, p, expectedExcludedNodes);
       }
     } finally {
       cluster.shutdown();
@@ -365,11 +557,17 @@ public class TestBalancer {
 
   private void runBalancer(Configuration conf,
       long totalUsedSpace, long totalCapacity) throws Exception {
+    runBalancer(conf, totalUsedSpace, totalCapacity, Balancer.Parameters.DEFAULT, 0);
+  }
+
+  private void runBalancer(Configuration conf,
+     long totalUsedSpace, long totalCapacity, Balancer.Parameters p,
+     int excludedNodes) throws Exception {
     waitForHeartBeat(totalUsedSpace, totalCapacity, client, cluster);
 
     // start rebalancing
     Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
-    final int r = Balancer.run(namenodes, Balancer.Parameters.DEFALUT, conf);
+    final int r = Balancer.run(namenodes, p, conf);
     if (conf.getInt(DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY, 
         DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_DEFAULT) ==0) {
       assertEquals(Balancer.ReturnStatus.NO_MOVE_PROGRESS.code, r);
@@ -379,22 +577,66 @@ public class TestBalancer {
     }
     waitForHeartBeat(totalUsedSpace, totalCapacity, client, cluster);
     LOG.info("Rebalancing with default ctor.");
-    waitForBalancer(totalUsedSpace, totalCapacity, client, cluster);
+    waitForBalancer(totalUsedSpace, totalCapacity, client, cluster, p, excludedNodes);
   }
-  
+
   private void runBalancerCli(Configuration conf,
-      long totalUsedSpace, long totalCapacity) throws Exception {
+      long totalUsedSpace, long totalCapacity,
+      Balancer.Parameters p, boolean useFile, int expectedExcludedNodes) throws Exception {
     waitForHeartBeat(totalUsedSpace, totalCapacity, client, cluster);
+    List <String> args = new ArrayList<String>();
+    args.add("-policy");
+    args.add("datanode");
+
+    File excludeHostsFile = null;
+    if (!p.nodesToBeExcluded.isEmpty()) {
+      args.add("-exclude");
+      if (useFile) {
+        excludeHostsFile = new File ("exclude-hosts-file");
+        PrintWriter pw = new PrintWriter(excludeHostsFile);
+        for (String host: p.nodesToBeExcluded) {
+          pw.write( host + "\n");
+        }
+        pw.close();
+        args.add("-f");
+        args.add("exclude-hosts-file");
+      } else {
+        args.add(StringUtils.join(p.nodesToBeExcluded, ','));
+      }
+    }
+
+    File includeHostsFile = null;
+    if (!p.nodesToBeIncluded.isEmpty()) {
+      args.add("-include");
+      if (useFile) {
+        includeHostsFile = new File ("include-hosts-file");
+        PrintWriter pw = new PrintWriter(includeHostsFile);
+        for (String host: p.nodesToBeIncluded){
+          pw.write( host + "\n");
+        }
+        pw.close();
+        args.add("-f");
+        args.add("include-hosts-file");
+      } else {
+        args.add(StringUtils.join(p.nodesToBeIncluded, ','));
+      }
+    }
 
-    final String[] args = { "-policy", "datanode" };
     final Tool tool = new Cli();    
     tool.setConf(conf);
-    final int r = tool.run(args); // start rebalancing
+    final int r = tool.run(args.toArray(new String[0])); // start rebalancing
     
     assertEquals("Tools should exit 0 on success", 0, r);
     waitForHeartBeat(totalUsedSpace, totalCapacity, client, cluster);
     LOG.info("Rebalancing with default ctor.");
-    waitForBalancer(totalUsedSpace, totalCapacity, client, cluster);
+    waitForBalancer(totalUsedSpace, totalCapacity, client, cluster, p, expectedExcludedNodes);
+
+    if (excludeHostsFile != null && excludeHostsFile.exists()) {
+      excludeHostsFile.delete();
+    }
+    if (includeHostsFile != null && includeHostsFile.exists()) {
+      includeHostsFile.delete();
+    }
   }
   
   /** one-node cluster test*/
@@ -440,7 +682,7 @@ public class TestBalancer {
     }
   }
   
-  /** Test a cluster with even distribution, 
+  /** Test a cluster with even distribution,
    * then a new empty node is added to the cluster*/
   @Test(timeout=100000)
   public void testBalancer0() throws Exception {
@@ -554,7 +796,13 @@ public class TestBalancer {
     } catch (IllegalArgumentException e) {
 
     }
+    parameters = new String[] {"-include",  "testnode1", "-exclude", "testnode2"};
+    try {
+      Balancer.Cli.parse(parameters);
+      fail("IllegalArgumentException is expected when both -exclude and -include are specified");
+    } catch (IllegalArgumentException e) {
 
+    }
   }
 
   /**
@@ -569,6 +817,183 @@ public class TestBalancer {
     oneNodeTest(conf, true);
   }
   
+  /**
+   * Test a cluster with even distribution,
+   * then three nodes are added to the cluster,
+   * runs balancer with two of the nodes in the exclude list
+   */
+  @Test(timeout=100000)
+  public void testBalancerWithExcludeList() throws Exception {
+    final Configuration conf = new HdfsConfiguration();
+    initConf(conf);
+    Set<String> excludeHosts = new HashSet<String>();
+    excludeHosts.add( "datanodeY");
+    excludeHosts.add( "datanodeZ");
+    doTest(conf, new long[]{CAPACITY, CAPACITY}, new String[]{RACK0, RACK1}, CAPACITY, RACK2,
+        new HostNameBasedNodes(new String[] {"datanodeX", "datanodeY", "datanodeZ"},
+        excludeHosts, Parameters.DEFAULT.nodesToBeIncluded), false, false);
+  }
+
+  /**
+   * Test a cluster with even distribution,
+   * then three nodes are added to the cluster,
+   * runs balancer with two of the nodes in the exclude list
+   */
+  @Test(timeout=100000)
+  public void testBalancerWithExcludeListWithPorts() throws Exception {
+    final Configuration conf = new HdfsConfiguration();
+    initConf(conf);
+    doTest(conf, new long[]{CAPACITY, CAPACITY}, new String[]{RACK0, RACK1},
+        CAPACITY, RACK2, new PortNumberBasedNodes(3, 2, 0), false, false);
+  }
+
+  /**
+   * Test a cluster with even distribution,
+   * then three nodes are added to the cluster,
+   * runs balancer with two of the nodes in the exclude list
+   */
+  @Test(timeout=100000)
+  public void testBalancerCliWithExcludeList() throws Exception {
+    final Configuration conf = new HdfsConfiguration();
+    initConf(conf);
+    Set<String> excludeHosts = new HashSet<String>();
+    excludeHosts.add( "datanodeY");
+    excludeHosts.add( "datanodeZ");
+    doTest(conf, new long[]{CAPACITY, CAPACITY}, new String[]{RACK0, RACK1}, CAPACITY, RACK2,
+      new HostNameBasedNodes(new String[] {"datanodeX", "datanodeY", "datanodeZ"}, excludeHosts,
+      Parameters.DEFAULT.nodesToBeIncluded), true, false);
+  }
+
+  /**
+   * Test a cluster with even distribution,
+   * then three nodes are added to the cluster,
+   * runs balancer with two of the nodes in the exclude list
+   */
+  @Test(timeout=100000)
+  public void testBalancerCliWithExcludeListWithPorts() throws Exception {
+    final Configuration conf = new HdfsConfiguration();
+    initConf(conf);
+    doTest(conf, new long[]{CAPACITY, CAPACITY}, new String[]{RACK0, RACK1},
+        CAPACITY, RACK2, new PortNumberBasedNodes(3, 2, 0), true, false);
+  }
+
+  /**
+   * Test a cluster with even distribution,
+   * then three nodes are added to the cluster,
+   * runs balancer with two of the nodes in the exclude list in a file
+   */
+  @Test(timeout=100000)
+  public void testBalancerCliWithExcludeListInAFile() throws Exception {
+    final Configuration conf = new HdfsConfiguration();
+    initConf(conf);
+    Set<String> excludeHosts = new HashSet<String>();
+    excludeHosts.add( "datanodeY");
+    excludeHosts.add( "datanodeZ");
+    doTest(conf, new long[]{CAPACITY, CAPACITY}, new String[]{RACK0, RACK1}, CAPACITY, RACK2,
+        new HostNameBasedNodes(new String[] {"datanodeX", "datanodeY", "datanodeZ"},
+        excludeHosts, Parameters.DEFAULT.nodesToBeIncluded), true, true);
+  }
+
+  /**
+   * Test a cluster with even distribution,G
+   * then three nodes are added to the cluster,
+   * runs balancer with two of the nodes in the exclude list
+   */
+  @Test(timeout=100000)
+  public void testBalancerCliWithExcludeListWithPortsInAFile() throws Exception {
+    final Configuration conf = new HdfsConfiguration();
+    initConf(conf);
+    doTest(conf, new long[]{CAPACITY, CAPACITY}, new String[]{RACK0, RACK1},
+        CAPACITY, RACK2, new PortNumberBasedNodes(3, 2, 0), true, true);
+  }
+
+  /**
+   * Test a cluster with even distribution,
+   * then three nodes are added to the cluster,
+   * runs balancer with two of the nodes in the include list
+   */
+  @Test(timeout=100000)
+  public void testBalancerWithIncludeList() throws Exception {
+    final Configuration conf = new HdfsConfiguration();
+    initConf(conf);
+    Set<String> includeHosts = new HashSet<String>();
+    includeHosts.add( "datanodeY");
+    doTest(conf, new long[]{CAPACITY, CAPACITY}, new String[]{RACK0, RACK1}, CAPACITY, RACK2,
+        new HostNameBasedNodes(new String[] {"datanodeX", "datanodeY", "datanodeZ"},
+        Parameters.DEFAULT.nodesToBeExcluded, includeHosts), false, false);
+  }
+
+  /**
+   * Test a cluster with even distribution,
+   * then three nodes are added to the cluster,
+   * runs balancer with two of the nodes in the include list
+   */
+  @Test(timeout=100000)
+  public void testBalancerWithIncludeListWithPorts() throws Exception {
+    final Configuration conf = new HdfsConfiguration();
+    initConf(conf);
+    doTest(conf, new long[]{CAPACITY, CAPACITY}, new String[]{RACK0, RACK1},
+        CAPACITY, RACK2, new PortNumberBasedNodes(3, 0, 1), false, false);
+  }
+
+  /**
+   * Test a cluster with even distribution,
+   * then three nodes are added to the cluster,
+   * runs balancer with two of the nodes in the include list
+   */
+  @Test(timeout=100000)
+  public void testBalancerCliWithIncludeList() throws Exception {
+    final Configuration conf = new HdfsConfiguration();
+    initConf(conf);
+    Set<String> includeHosts = new HashSet<String>();
+    includeHosts.add( "datanodeY");
+    doTest(conf, new long[]{CAPACITY, CAPACITY}, new String[]{RACK0, RACK1}, CAPACITY, RACK2,
+        new HostNameBasedNodes(new String[] {"datanodeX", "datanodeY", "datanodeZ"},
+        Parameters.DEFAULT.nodesToBeExcluded, includeHosts), true, false);
+  }
+
+  /**
+   * Test a cluster with even distribution,
+   * then three nodes are added to the cluster,
+   * runs balancer with two of the nodes in the include list
+   */
+  @Test(timeout=100000)
+  public void testBalancerCliWithIncludeListWithPorts() throws Exception {
+    final Configuration conf = new HdfsConfiguration();
+    initConf(conf);
+    doTest(conf, new long[]{CAPACITY, CAPACITY}, new String[]{RACK0, RACK1},
+        CAPACITY, RACK2, new PortNumberBasedNodes(3, 0, 1), true, false);
+  }
+
+  /**
+   * Test a cluster with even distribution,
+   * then three nodes are added to the cluster,
+   * runs balancer with two of the nodes in the include list
+   */
+  @Test(timeout=100000)
+  public void testBalancerCliWithIncludeListInAFile() throws Exception {
+    final Configuration conf = new HdfsConfiguration();
+    initConf(conf);
+    Set<String> includeHosts = new HashSet<String>();
+    includeHosts.add( "datanodeY");
+    doTest(conf, new long[]{CAPACITY, CAPACITY}, new String[]{RACK0, RACK1}, CAPACITY, RACK2,
+        new HostNameBasedNodes(new String[] {"datanodeX", "datanodeY", "datanodeZ"},
+        Parameters.DEFAULT.nodesToBeExcluded, includeHosts), true, true);
+  }
+
+  /**
+   * Test a cluster with even distribution,
+   * then three nodes are added to the cluster,
+   * runs balancer with two of the nodes in the include list
+   */
+  @Test(timeout=100000)
+  public void testBalancerCliWithIncludeListWithPortsInAFile() throws Exception {
+    final Configuration conf = new HdfsConfiguration();
+    initConf(conf);
+    doTest(conf, new long[]{CAPACITY, CAPACITY}, new String[]{RACK0, RACK1},
+        CAPACITY, RACK2, new PortNumberBasedNodes(3, 0, 1), true, true);
+  }
+
   /**
    * @param args
    */

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java

@@ -97,10 +97,10 @@ public class TestBalancerWithHANameNodes {
       Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
       assertEquals(1, namenodes.size());
       assertTrue(namenodes.contains(HATestUtil.getLogicalUri(cluster)));
-      final int r = Balancer.run(namenodes, Balancer.Parameters.DEFALUT, conf);
+      final int r = Balancer.run(namenodes, Balancer.Parameters.DEFAULT, conf);
       assertEquals(Balancer.ReturnStatus.SUCCESS.code, r);
       TestBalancer.waitForBalancer(totalUsedSpace, totalCapacity, client,
-          cluster);
+          cluster, Balancer.Parameters.DEFAULT);
     } finally {
       cluster.shutdown();
     }

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java

@@ -159,7 +159,7 @@ public class TestBalancerWithMultipleNameNodes {
 
     // start rebalancing
     final Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(s.conf);
-    final int r = Balancer.run(namenodes, Balancer.Parameters.DEFALUT, s.conf);
+    final int r = Balancer.run(namenodes, Balancer.Parameters.DEFAULT, s.conf);
     Assert.assertEquals(Balancer.ReturnStatus.SUCCESS.code, r);
 
     LOG.info("BALANCER 2");
@@ -195,7 +195,7 @@ public class TestBalancerWithMultipleNameNodes {
       balanced = true;
       for(int d = 0; d < used.length; d++) {
         final double p = used[d]*100.0/cap[d];
-        balanced = p <= avg + Balancer.Parameters.DEFALUT.threshold;
+        balanced = p <= avg + Balancer.Parameters.DEFAULT.threshold;
         if (!balanced) {
           if (i % 100 == 0) {
             LOG.warn("datanodes " + d + " is not yet balanced: "

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java

@@ -175,7 +175,7 @@ public class TestBalancerWithNodeGroup {
 
     // start rebalancing
     Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
-    final int r = Balancer.run(namenodes, Balancer.Parameters.DEFALUT, conf);
+    final int r = Balancer.run(namenodes, Balancer.Parameters.DEFAULT, conf);
     assertEquals(Balancer.ReturnStatus.SUCCESS.code, r);
 
     waitForHeartBeat(totalUsedSpace, totalCapacity);
@@ -189,7 +189,7 @@ public class TestBalancerWithNodeGroup {
 
     // start rebalancing
     Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
-    final int r = Balancer.run(namenodes, Balancer.Parameters.DEFALUT, conf);
+    final int r = Balancer.run(namenodes, Balancer.Parameters.DEFAULT, conf);
     Assert.assertTrue(r == Balancer.ReturnStatus.SUCCESS.code ||
         (r == Balancer.ReturnStatus.NO_MOVE_PROGRESS.code));
     waitForHeartBeat(totalUsedSpace, totalCapacity);

+ 0 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java

@@ -590,7 +590,6 @@ public class TestBlockRecovery {
     Configuration conf = new HdfsConfiguration();
     conf.set(DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_KEY, "1000");
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
-        .nnTopology(MiniDFSNNTopology.simpleSingleNN(8020, 50070))
         .numDataNodes(1).build();
     try {
       cluster.waitClusterUp();

+ 28 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java

@@ -34,6 +34,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -1256,6 +1257,33 @@ public abstract class FSAclBaseTest {
     fsAsDiana.getAclStatus(bruceFile);
   }
 
+  @Test
+  public void testAccess() throws IOException, InterruptedException {
+    Path p1 = new Path("/p1");
+    fs.mkdirs(p1);
+    fs.setOwner(p1, BRUCE.getShortUserName(), "groupX");
+    fsAsBruce.setAcl(p1, Lists.newArrayList(
+        aclEntry(ACCESS, USER, READ),
+        aclEntry(ACCESS, USER, "bruce", READ),
+        aclEntry(ACCESS, GROUP, NONE),
+        aclEntry(ACCESS, OTHER, NONE)));
+    fsAsBruce.access(p1, FsAction.READ);
+    try {
+      fsAsBruce.access(p1, FsAction.WRITE);
+      fail("The access call should have failed.");
+    } catch (AccessControlException e) {
+      // expected
+    }
+
+    Path badPath = new Path("/bad/bad");
+    try {
+      fsAsBruce.access(badPath, FsAction.READ);
+      fail("The access call should have failed");
+    } catch (FileNotFoundException e) {
+      // expected
+    }
+  }
+
   /**
    * Creates a FileSystem for the super-user.
    *

+ 2 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java

@@ -45,6 +45,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathIsNotDirectoryException;
 import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.fs.XAttr;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.DFSClient;
@@ -606,6 +607,7 @@ public class TestINodeFile {
         fs.getAclStatus(testFileInodePath);
         fs.getXAttrs(testFileInodePath);
         fs.listXAttrs(testFileInodePath);
+        fs.access(testFileInodePath, FsAction.READ_WRITE);
       }
       
       // symbolic link related tests

+ 9 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestAclWithSnapshot.java

@@ -30,6 +30,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
@@ -674,6 +675,13 @@ public class TestAclWithSnapshot {
     } catch (AccessControlException e) {
       // expected
     }
+
+    try {
+      fs.access(pathToCheck, FsAction.READ);
+      fail("The access call should have failed for "+pathToCheck);
+    } catch (AccessControlException e) {
+      // expected
+    }
   }
 
   /**
@@ -689,6 +697,7 @@ public class TestAclWithSnapshot {
       UserGroupInformation user, Path pathToCheck) throws Exception {
     try {
       fs.listStatus(pathToCheck);
+      fs.access(pathToCheck, FsAction.READ);
     } catch (AccessControlException e) {
       fail("expected permission granted for user " + user + ", path = " +
         pathToCheck);

+ 33 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java

@@ -39,6 +39,7 @@ import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystemContractBaseTest;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.AppendTestUtil;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -49,6 +50,7 @@ import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.junit.Assert;
+import org.junit.Test;
 
 public class TestWebHdfsFileSystemContract extends FileSystemContractBaseTest {
   private static final Configuration conf = new Configuration();
@@ -530,4 +532,35 @@ public class TestWebHdfsFileSystemContract extends FileSystemContractBaseTest {
       }
     }
   }
+
+  @Test
+  public void testAccess() throws IOException, InterruptedException {
+    Path p1 = new Path("/pathX");
+    try {
+      UserGroupInformation ugi = UserGroupInformation.createUserForTesting("alpha",
+          new String[]{"beta"});
+      WebHdfsFileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystemAs(ugi, conf,
+          WebHdfsFileSystem.SCHEME);
+
+      fs.mkdirs(p1);
+      fs.setPermission(p1, new FsPermission((short) 0444));
+      fs.access(p1, FsAction.READ);
+      try {
+        fs.access(p1, FsAction.WRITE);
+        fail("The access call should have failed.");
+      } catch (AccessControlException e) {
+        // expected
+      }
+
+      Path badPath = new Path("/bad");
+      try {
+        fs.access(badPath, FsAction.READ);
+        fail("The access call should have failed");
+      } catch (FileNotFoundException e) {
+        // expected
+      }
+    } finally {
+      fs.delete(p1, true);
+    }
+  }
 }

+ 24 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java

@@ -31,6 +31,7 @@ import java.util.Arrays;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
@@ -40,6 +41,7 @@ import org.apache.hadoop.hdfs.web.resources.GetOpParam;
 import org.apache.hadoop.hdfs.web.resources.PutOpParam;
 import org.apache.hadoop.hdfs.web.resources.TokenArgumentParam;
 import org.apache.hadoop.hdfs.web.resources.UserParam;
+import org.apache.hadoop.hdfs.web.resources.FsActionParam;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.SecurityUtil;
@@ -283,6 +285,28 @@ public class TestWebHdfsUrl {
         },
         fileStatusUrl);    
   }
+
+  @Test(timeout=60000)
+  public void testCheckAccessUrl() throws IOException {
+    Configuration conf = new Configuration();
+
+    UserGroupInformation ugi =
+        UserGroupInformation.createRemoteUser("test-user");
+    UserGroupInformation.setLoginUser(ugi);
+
+    WebHdfsFileSystem webhdfs = getWebHdfsFileSystem(ugi, conf);
+    Path fsPath = new Path("/p1");
+
+    URL checkAccessUrl = webhdfs.toUrl(GetOpParam.Op.CHECKACCESS,
+        fsPath, new FsActionParam(FsAction.READ_WRITE));
+    checkQueryParams(
+        new String[]{
+            GetOpParam.Op.CHECKACCESS.toQueryString(),
+            new UserParam(ugi.getShortUserName()).toString(),
+            FsActionParam.NAME + "=" + FsAction.READ_WRITE.SYMBOL
+        },
+        checkAccessUrl);
+  }
   
   private void checkQueryParams(String[] expected, URL url) {
     Arrays.sort(expected);

+ 35 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermissionSymlinks.java

@@ -27,6 +27,7 @@ import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
 import java.io.IOException;
+import java.io.FileNotFoundException;
 import java.security.PrivilegedExceptionAction;
 import java.util.Arrays;
 
@@ -39,6 +40,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystemTestWrapper;
 import org.apache.hadoop.fs.Options.Rename;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
@@ -393,4 +395,37 @@ public class TestPermissionSymlinks {
       GenericTestUtils.assertExceptionContains("Permission denied", e);
     }
   }
+
+  @Test
+  public void testAccess() throws Exception {
+    fs.setPermission(target, new FsPermission((short) 0002));
+    fs.setAcl(target, Arrays.asList(
+        aclEntry(ACCESS, USER, ALL),
+        aclEntry(ACCESS, GROUP, NONE),
+        aclEntry(ACCESS, USER, user.getShortUserName(), WRITE),
+        aclEntry(ACCESS, OTHER, WRITE)));
+    FileContext myfc = user.doAs(new PrivilegedExceptionAction<FileContext>() {
+      @Override
+      public FileContext run() throws IOException {
+        return FileContext.getFileContext(conf);
+      }
+    });
+
+    // Path to targetChild via symlink
+    myfc.access(link, FsAction.WRITE);
+    try {
+      myfc.access(link, FsAction.ALL);
+      fail("The access call should have failed.");
+    } catch (AccessControlException e) {
+      // expected
+    }
+
+    Path badPath = new Path(link, "bad");
+    try {
+      myfc.access(badPath, FsAction.READ);
+      fail("The access call should have failed");
+    } catch (FileNotFoundException e) {
+      // expected
+    }
+  }
 }

+ 3 - 0
hadoop-mapreduce-project/CHANGES.txt

@@ -83,6 +83,9 @@ Trunk (Unreleased)
     MAPREDUCE-5912. Task.calculateOutputSize does not handle Windows files after
     MAPREDUCE-5196. (Remus Rusanu via cnauroth)
 
+    MAPREDUCE-6019. MapReduce changes for exposing YARN/MR endpoints on multiple
+    interfaces. (Craig Welch, Milan Potocnik, Arpit Agarwal via xgong)
+
   BUG FIXES
 
     MAPREDUCE-5714. Removed forceful JVM exit in shutDownJob.  

+ 3 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/TaskAttemptListenerImpl.java

@@ -141,7 +141,9 @@ public class TaskAttemptListenerImpl extends CompositeService
       }
 
       server.start();
-      this.address = NetUtils.getConnectAddress(server);
+      this.address = NetUtils.createSocketAddrForHost(
+          context.getNMHostname(),
+          server.getListenerAddress().getPort());
     } catch (IOException e) {
       throw new YarnRuntimeException(e);
     }

+ 1 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/AppContext.java

@@ -66,4 +66,5 @@ public interface AppContext {
 
   boolean hasSuccessfullyUnregistered();
 
+  String getNMHostname();
 }

+ 5 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java

@@ -1018,6 +1018,11 @@ public class MRAppMaster extends CompositeService {
     public void resetIsLastAMRetry() {
       isLastAMRetry = false;
     }
+
+    @Override
+    public String getNMHostname() {
+      return nmHost;
+    }
   }
 
   @SuppressWarnings("unchecked")

+ 2 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java

@@ -131,7 +131,8 @@ public class MRClientService extends AbstractService implements ClientService {
     }
 
     server.start();
-    this.bindAddress = NetUtils.getConnectAddress(server);
+    this.bindAddress = NetUtils.createSocketAddrForHost(appContext.getNMHostname(),
+        server.getListenerAddress().getPort());
     LOG.info("Instantiated MRClientService at " + this.bindAddress);
     try {
       // Explicitly disabling SSL for map reduce task as we can't allow MR users

+ 10 - 3
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapred/TestTaskAttemptListenerImpl.java

@@ -61,6 +61,13 @@ public class TestTaskAttemptListenerImpl {
   public static class MockTaskAttemptListenerImpl
       extends TaskAttemptListenerImpl {
 
+    public MockTaskAttemptListenerImpl(AppContext context,
+        JobTokenSecretManager jobTokenSecretManager,
+        RMHeartbeatHandler rmHeartbeatHandler, AMPreemptionPolicy policy) {
+
+      super(context, jobTokenSecretManager, rmHeartbeatHandler, policy);
+    }
+
     public MockTaskAttemptListenerImpl(AppContext context,
         JobTokenSecretManager jobTokenSecretManager,
         RMHeartbeatHandler rmHeartbeatHandler,
@@ -210,7 +217,7 @@ public class TestTaskAttemptListenerImpl {
     when(appCtx.getEventHandler()).thenReturn(ea);
     CheckpointAMPreemptionPolicy policy = new CheckpointAMPreemptionPolicy();
     policy.init(appCtx);
-    TaskAttemptListenerImpl listener = new TaskAttemptListenerImpl(
+    TaskAttemptListenerImpl listener = new MockTaskAttemptListenerImpl(
         appCtx, secret, rmHeartbeatHandler, policy) {
       @Override
       protected void registerHeartbeatHandler(Configuration conf) {
@@ -271,7 +278,7 @@ public class TestTaskAttemptListenerImpl {
     when(appCtx.getEventHandler()).thenReturn(ea);
     CheckpointAMPreemptionPolicy policy = new CheckpointAMPreemptionPolicy();
     policy.init(appCtx);
-    TaskAttemptListenerImpl listener = new TaskAttemptListenerImpl(
+    TaskAttemptListenerImpl listener = new MockTaskAttemptListenerImpl(
         appCtx, secret, rmHeartbeatHandler, policy) {
       @Override
       protected void registerHeartbeatHandler(Configuration conf) {
@@ -326,7 +333,7 @@ public class TestTaskAttemptListenerImpl {
     when(appCtx.getEventHandler()).thenReturn(ea);
     CheckpointAMPreemptionPolicy policy = new CheckpointAMPreemptionPolicy();
     policy.init(appCtx);
-    TaskAttemptListenerImpl listener = new TaskAttemptListenerImpl(
+    TaskAttemptListenerImpl listener = new MockTaskAttemptListenerImpl(
         appCtx, secret, rmHeartbeatHandler, policy) {
       @Override
       protected void registerHeartbeatHandler(Configuration conf) {

+ 5 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockAppContext.java

@@ -143,4 +143,9 @@ public class MockAppContext implements AppContext {
     return true;
   }
 
+@Override
+  public String getNMHostname() {
+    // bogus - Not Required
+    return null;
+  }
 }

+ 5 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java

@@ -879,5 +879,10 @@ public class TestRuntimeEstimators {
       return true;
     }
 
+    @Override
+    public String getNMHostname() {
+      // bogus - Not Required
+      return null;
+    }
   }
 }

+ 3 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JHAdminConfig.java

@@ -38,7 +38,9 @@ public class JHAdminConfig {
   public static final int DEFAULT_MR_HISTORY_PORT = 10020;
   public static final String DEFAULT_MR_HISTORY_ADDRESS = "0.0.0.0:" +
       DEFAULT_MR_HISTORY_PORT;
-  
+  public static final String MR_HISTORY_BIND_HOST = MR_HISTORY_PREFIX
+      + "bind-host";
+
   /** The address of the History server admin interface. */
   public static final String JHS_ADMIN_ADDRESS = MR_HISTORY_PREFIX
       + "admin.address";

+ 7 - 2
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRWebAppUtil.java

@@ -29,6 +29,7 @@ import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.ipc.RPCUtil;
 
 import java.net.InetAddress;
 import java.net.InetSocketAddress;
@@ -105,11 +106,15 @@ public class MRWebAppUtil {
   
   public static InetSocketAddress getJHSWebBindAddress(Configuration conf) {
     if (httpPolicyInJHS == Policy.HTTPS_ONLY) {
-      return conf.getSocketAddr(JHAdminConfig.MR_HISTORY_WEBAPP_HTTPS_ADDRESS,
+      return conf.getSocketAddr(
+          JHAdminConfig.MR_HISTORY_BIND_HOST,
+          JHAdminConfig.MR_HISTORY_WEBAPP_HTTPS_ADDRESS,
           JHAdminConfig.DEFAULT_MR_HISTORY_WEBAPP_HTTPS_ADDRESS,
           JHAdminConfig.DEFAULT_MR_HISTORY_WEBAPP_HTTPS_PORT);
     } else {
-      return conf.getSocketAddr(JHAdminConfig.MR_HISTORY_WEBAPP_ADDRESS,
+      return conf.getSocketAddr(
+          JHAdminConfig.MR_HISTORY_BIND_HOST,
+          JHAdminConfig.MR_HISTORY_WEBAPP_ADDRESS,
           JHAdminConfig.DEFAULT_MR_HISTORY_WEBAPP_ADDRESS,
           JHAdminConfig.DEFAULT_MR_HISTORY_WEBAPP_PORT);
     }

+ 8 - 3
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryClientService.java

@@ -83,6 +83,7 @@ import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.service.AbstractService;
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.ipc.RPCUtil;
 import org.apache.hadoop.yarn.ipc.YarnRPC;
 import org.apache.hadoop.yarn.util.Records;
 import org.apache.hadoop.yarn.webapp.WebApp;
@@ -119,6 +120,7 @@ public class HistoryClientService extends AbstractService {
     YarnRPC rpc = YarnRPC.create(conf);
     initializeWebApp(conf);
     InetSocketAddress address = conf.getSocketAddr(
+        JHAdminConfig.MR_HISTORY_BIND_HOST,
         JHAdminConfig.MR_HISTORY_ADDRESS,
         JHAdminConfig.DEFAULT_MR_HISTORY_ADDRESS,
         JHAdminConfig.DEFAULT_MR_HISTORY_PORT);
@@ -137,9 +139,11 @@ public class HistoryClientService extends AbstractService {
     }
     
     server.start();
-    this.bindAddress = conf.updateConnectAddr(JHAdminConfig.MR_HISTORY_ADDRESS,
+    this.bindAddress = conf.updateConnectAddr(JHAdminConfig.MR_HISTORY_BIND_HOST,
+                                              JHAdminConfig.MR_HISTORY_ADDRESS,
+                                              JHAdminConfig.DEFAULT_MR_HISTORY_ADDRESS,
                                               server.getListenerAddress());
-    LOG.info("Instantiated MRClientService at " + this.bindAddress);
+    LOG.info("Instantiated HistoryClientService at " + this.bindAddress);
 
     super.serviceStart();
   }
@@ -158,8 +162,9 @@ public class HistoryClientService extends AbstractService {
             JHAdminConfig.MR_WEBAPP_SPNEGO_USER_NAME_KEY)
         .at(NetUtils.getHostPortString(bindAddress)).start(webApp);
     
+    String connectHost = MRWebAppUtil.getJHSWebappURLWithoutScheme(conf).split(":")[0];
     MRWebAppUtil.setJHSWebappURLWithoutScheme(conf,
-        NetUtils.getHostPortString(webApp.getListenerAddress()));
+        connectHost + ":" + webApp.getListenerAddress().getPort());
   }
 
   @Override

+ 5 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistory.java

@@ -394,4 +394,9 @@ public class JobHistory extends AbstractService implements HistoryContext {
     return true;
   }
 
+  @Override
+  public String getNMHostname() {
+    // bogus - Not Required
+    return null;
+  }
 }

+ 4 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/server/HSAdminServer.java

@@ -34,6 +34,7 @@ import org.apache.hadoop.security.Groups;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authorize.AccessControlList;
 import org.apache.hadoop.security.authorize.ProxyUsers;
+import org.apache.hadoop.yarn.ipc.RPCUtil;
 import org.apache.hadoop.yarn.logaggregation.AggregatedLogDeletionService;
 import org.apache.hadoop.security.proto.RefreshUserMappingsProtocolProtos.RefreshUserMappingsProtocolService;
 import org.apache.hadoop.security.protocolPB.RefreshUserMappingsProtocolPB;
@@ -94,7 +95,9 @@ public class HSAdminServer extends AbstractService implements HSAdminProtocol {
 
     WritableRpcEngine.ensureInitialized();
 
-    clientRpcAddress = conf.getSocketAddr(JHAdminConfig.JHS_ADMIN_ADDRESS,
+    clientRpcAddress = conf.getSocketAddr(
+        JHAdminConfig.MR_HISTORY_BIND_HOST,
+        JHAdminConfig.JHS_ADMIN_ADDRESS,
         JHAdminConfig.DEFAULT_JHS_ADMIN_ADDRESS,
         JHAdminConfig.DEFAULT_JHS_ADMIN_PORT);
     clientRpcServer = new RPC.Builder(conf)

+ 14 - 14
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java

@@ -82,13 +82,13 @@ import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.NMDBSchemaVersionProto;
+import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.VersionProto;
 import org.apache.hadoop.yarn.server.api.ApplicationInitializationContext;
 import org.apache.hadoop.yarn.server.api.ApplicationTerminationContext;
 import org.apache.hadoop.yarn.server.api.AuxiliaryService;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ContainerLocalizer;
-import org.apache.hadoop.yarn.server.nodemanager.recovery.records.NMDBSchemaVersion;
-import org.apache.hadoop.yarn.server.nodemanager.recovery.records.impl.pb.NMDBSchemaVersionPBImpl;
+import org.apache.hadoop.yarn.server.records.Version;
+import org.apache.hadoop.yarn.server.records.impl.pb.VersionPBImpl;
 import org.apache.hadoop.yarn.server.utils.LeveldbIterator;
 import org.apache.hadoop.yarn.util.ConverterUtils;
 import org.fusesource.leveldbjni.JniDBFactory;
@@ -151,8 +151,8 @@ public class ShuffleHandler extends AuxiliaryService {
 
   private static final String STATE_DB_NAME = "mapreduce_shuffle_state";
   private static final String STATE_DB_SCHEMA_VERSION_KEY = "shuffle-schema-version";
-  protected static final NMDBSchemaVersion CURRENT_VERSION_INFO = 
-      NMDBSchemaVersion.newInstance(1, 0);
+  protected static final Version CURRENT_VERSION_INFO = 
+      Version.newInstance(1, 0);
 
   private int port;
   private ChannelFactory selector;
@@ -491,21 +491,21 @@ public class ShuffleHandler extends AuxiliaryService {
   }
   
   @VisibleForTesting
-  NMDBSchemaVersion loadVersion() throws IOException {
+  Version loadVersion() throws IOException {
     byte[] data = stateDb.get(bytes(STATE_DB_SCHEMA_VERSION_KEY));
     // if version is not stored previously, treat it as 1.0.
     if (data == null || data.length == 0) {
-      return NMDBSchemaVersion.newInstance(1, 0);
+      return Version.newInstance(1, 0);
     }
-    NMDBSchemaVersion version =
-        new NMDBSchemaVersionPBImpl(NMDBSchemaVersionProto.parseFrom(data));
+    Version version =
+        new VersionPBImpl(VersionProto.parseFrom(data));
     return version;
   }
 
-  private void storeSchemaVersion(NMDBSchemaVersion version) throws IOException {
+  private void storeSchemaVersion(Version version) throws IOException {
     String key = STATE_DB_SCHEMA_VERSION_KEY;
     byte[] data = 
-        ((NMDBSchemaVersionPBImpl) version).getProto().toByteArray();
+        ((VersionPBImpl) version).getProto().toByteArray();
     try {
       stateDb.put(bytes(key), data);
     } catch (DBException e) {
@@ -519,11 +519,11 @@ public class ShuffleHandler extends AuxiliaryService {
   
   // Only used for test
   @VisibleForTesting
-  void storeVersion(NMDBSchemaVersion version) throws IOException {
+  void storeVersion(Version version) throws IOException {
     storeSchemaVersion(version);
   }
 
-  protected NMDBSchemaVersion getCurrentVersion() {
+  protected Version getCurrentVersion() {
     return CURRENT_VERSION_INFO;
   }
   
@@ -538,7 +538,7 @@ public class ShuffleHandler extends AuxiliaryService {
    *    upgrade shuffle info or remove incompatible old state.
    */
   private void checkVersion() throws IOException {
-    NMDBSchemaVersion loadedVersion = loadVersion();
+    Version loadedVersion = loadVersion();
     LOG.info("Loaded state DB schema version info " + loadedVersion);
     if (loadedVersion.equals(getCurrentVersion())) {
       return;

+ 4 - 4
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/test/java/org/apache/hadoop/mapred/TestShuffleHandler.java

@@ -75,7 +75,7 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.api.ApplicationInitializationContext;
 import org.apache.hadoop.yarn.server.api.ApplicationTerminationContext;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ContainerLocalizer;
-import org.apache.hadoop.yarn.server.nodemanager.recovery.records.NMDBSchemaVersion;
+import org.apache.hadoop.yarn.server.records.Version;
 import org.jboss.netty.channel.Channel;
 import org.jboss.netty.channel.ChannelFuture;
 import org.jboss.netty.channel.ChannelHandlerContext;
@@ -764,11 +764,11 @@ public class TestShuffleHandler {
       // verify we are still authorized to shuffle to the old application
       rc = getShuffleResponseCode(shuffle, jt);
       Assert.assertEquals(HttpURLConnection.HTTP_OK, rc);
-      NMDBSchemaVersion version = NMDBSchemaVersion.newInstance(1, 0);
+      Version version = Version.newInstance(1, 0);
       Assert.assertEquals(version, shuffle.getCurrentVersion());
     
       // emulate shuffle handler restart with compatible version
-      NMDBSchemaVersion version11 = NMDBSchemaVersion.newInstance(1, 1);
+      Version version11 = Version.newInstance(1, 1);
       // update version info before close shuffle
       shuffle.storeVersion(version11);
       Assert.assertEquals(version11, shuffle.loadVersion());
@@ -785,7 +785,7 @@ public class TestShuffleHandler {
       Assert.assertEquals(HttpURLConnection.HTTP_OK, rc);
     
       // emulate shuffle handler restart with incompatible version
-      NMDBSchemaVersion version21 = NMDBSchemaVersion.newInstance(2, 1);
+      Version version21 = Version.newInstance(2, 1);
       shuffle.storeVersion(version21);
       Assert.assertEquals(version21, shuffle.loadVersion());
       shuffle.close();

+ 2 - 0
hadoop-project-dist/pom.xml

@@ -100,6 +100,8 @@
         <artifactId>findbugs-maven-plugin</artifactId>
         <configuration>
           <excludeFilterFile>${basedir}/dev-support/findbugsExcludeFile.xml</excludeFilterFile>
+          <fork>true</fork>
+          <maxHeap>2048</maxHeap>
         </configuration>
       </plugin>
       <plugin>

+ 7 - 1
hadoop-project/pom.xml

@@ -789,7 +789,13 @@
         <groupId>com.microsoft.windowsazure.storage</groupId>
         <artifactId>microsoft-windowsazure-storage-sdk</artifactId>
         <version>0.6.0</version>
-    </dependency>
+     </dependency>
+
+     <dependency>
+       <groupId>xerces</groupId>
+       <artifactId>xercesImpl</artifactId>
+       <version>2.9.1</version>
+     </dependency>
       
     </dependencies>
   </dependencyManagement>

+ 12 - 0
hadoop-yarn-project/CHANGES.txt

@@ -71,6 +71,15 @@ Release 2.6.0 - UNRELEASED
     YARN-2211. Persist AMRMToken master key in RMStateStore for RM recovery.
     (Xuan Gong via jianhe)
 
+    YARN-2328. FairScheduler: Verify update and continuous scheduling threads are
+    stopped when the scheduler is stopped. (kasha)
+
+    YARN-2347. Consolidated RMStateVersion and NMDBSchemaVersion into Version in
+    yarn-server-common. (Junping Du via zjshen)
+
+    YARN-1994. Expose YARN/MR endpoints on multiple interfaces. (Craig Welch,
+    Milan Potocnik, Arpit Agarwal via xgong)
+
   OPTIMIZATIONS
 
   BUG FIXES
@@ -108,6 +117,9 @@ Release 2.6.0 - UNRELEASED
 
     YARN-1796. container-executor shouldn't require o-r permissions (atm)
 
+    YARN-2354. DistributedShell may allocate more containers than client
+    specified after AM restarts. (Li Lu via jianhe)
+
 Release 2.5.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

+ 12 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java

@@ -126,6 +126,10 @@ public class YarnConfiguration extends Configuration {
   public static final String DEFAULT_RM_ADDRESS =
     "0.0.0.0:" + DEFAULT_RM_PORT;
 
+  /** The actual bind address for the RM.*/
+  public static final String RM_BIND_HOST =
+    RM_PREFIX + "bind-host";
+
   /** The number of threads used to handle applications manager requests.*/
   public static final String RM_CLIENT_THREAD_COUNT =
     RM_PREFIX + "client.thread-count";
@@ -545,6 +549,10 @@ public class YarnConfiguration extends Configuration {
   public static final String DEFAULT_NM_ADDRESS = "0.0.0.0:"
       + DEFAULT_NM_PORT;
   
+  /** The actual bind address or the NM.*/
+  public static final String NM_BIND_HOST =
+    NM_PREFIX + "bind-host";
+
   /** who will execute(launch) the containers.*/
   public static final String NM_CONTAINER_EXECUTOR = 
     NM_PREFIX + "container-executor.class";
@@ -1132,6 +1140,10 @@ public class YarnConfiguration extends Configuration {
   public static final String DEFAULT_TIMELINE_SERVICE_ADDRESS = "0.0.0.0:"
       + DEFAULT_TIMELINE_SERVICE_PORT;
 
+  /** The listening endpoint for the timeline service application.*/
+  public static final String TIMELINE_SERVICE_BIND_HOST =
+      TIMELINE_SERVICE_PREFIX + "bind-host";
+
   /** The number of threads to handle client RPC API requests. */
   public static final String TIMELINE_SERVICE_HANDLER_THREAD_COUNT =
       TIMELINE_SERVICE_PREFIX + "handler-thread-count";

+ 0 - 5
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/yarn_server_resourcemanager_service_protos.proto

@@ -130,11 +130,6 @@ message ApplicationAttemptStateDataProto {
   optional int32 am_container_exit_status = 9 [default = -1000];
 }
 
-message RMStateVersionProto {
-  optional int32 major_version = 1;
-  optional int32 minor_version = 2;
-}
-
 message EpochProto {
   optional int64 epoch = 1;
 }

+ 6 - 5
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java

@@ -208,7 +208,8 @@ public class ApplicationMaster {
 
   // App Master configuration
   // No. of containers to run shell command on
-  private int numTotalContainers = 1;
+  @VisibleForTesting
+  protected int numTotalContainers = 1;
   // Memory to request for the container on which the shell command will run
   private int containerMemory = 10;
   // VirtualCores to request for the container on which the shell command will run
@@ -594,8 +595,8 @@ public class ApplicationMaster {
 
     List<Container> previousAMRunningContainers =
         response.getContainersFromPreviousAttempts();
-    LOG.info("Received " + previousAMRunningContainers.size()
-        + " previous AM's running containers on AM registration.");
+    LOG.info(appAttemptID + " received " + previousAMRunningContainers.size()
+      + " previous attempts' running containers on AM registration.");
     numAllocatedContainers.addAndGet(previousAMRunningContainers.size());
 
     int numTotalContainersToRequest =
@@ -610,7 +611,7 @@ public class ApplicationMaster {
       ContainerRequest containerAsk = setupContainerAskForRM();
       amRMClient.addContainerRequest(containerAsk);
     }
-    numRequestedContainers.set(numTotalContainersToRequest);
+    numRequestedContainers.set(numTotalContainers);
     try {
       publishApplicationAttemptEvent(timelineClient, appAttemptID.toString(),
           DSEvent.DS_APP_ATTEMPT_END);
@@ -689,7 +690,7 @@ public class ApplicationMaster {
       LOG.info("Got response from RM for container ask, completedCnt="
           + completedContainers.size());
       for (ContainerStatus containerStatus : completedContainers) {
-        LOG.info("Got container status for containerID="
+        LOG.info(appAttemptID + " got container status for containerID="
             + containerStatus.getContainerId() + ", state="
             + containerStatus.getState() + ", exitStatus="
             + containerStatus.getExitStatus() + ", diagnostics="

+ 4 - 2
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDSFailedAppMaster.java

@@ -36,9 +36,11 @@ public class TestDSFailedAppMaster extends ApplicationMaster {
     if (appAttemptID.getAttemptId() == 2) {
       // should reuse the earlier running container, so numAllocatedContainers
       // should be set to 1. And should ask no more containers, so
-      // numRequestedContainers should be set to 0.
+      // numRequestedContainers should be the same as numTotalContainers.
+      // The only container is the container requested by the AM in the first
+      // attempt.
       if (numAllocatedContainers.get() != 1
-          || numRequestedContainers.get() != 0) {
+          || numRequestedContainers.get() != numTotalContainers) {
         LOG.info("NumAllocatedContainers is " + numAllocatedContainers.get()
             + " and NumRequestedContainers is " + numAllocatedContainers.get()
             + ".Application Master failed. exiting");

+ 32 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/WebAppUtils.java

@@ -34,6 +34,7 @@ import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.conf.HAUtil;
+import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.util.RMHAUtils;
 
 @Private
@@ -170,6 +171,37 @@ public class WebAppUtils {
     return sb.toString();
   }
   
+  /**
+   * Get the URL to use for binding where bind hostname can be specified
+   * to override the hostname in the webAppURLWithoutScheme. Port specified in the
+   * webAppURLWithoutScheme will be used.
+   *
+   * @param conf the configuration
+   * @param hostProperty bind host property name
+   * @param webAppURLWithoutScheme web app URL without scheme String
+   * @return String representing bind URL
+   */
+  public static String getWebAppBindURL(
+      Configuration conf,
+      String hostProperty,
+      String webAppURLWithoutScheme) {
+
+    // If the bind-host setting exists then it overrides the hostname
+    // portion of the corresponding webAppURLWithoutScheme
+    String host = conf.getTrimmed(hostProperty);
+    if (host != null && !host.isEmpty()) {
+      if (webAppURLWithoutScheme.contains(":")) {
+        webAppURLWithoutScheme = host + ":" + webAppURLWithoutScheme.split(":")[1];
+      }
+      else {
+        throw new YarnRuntimeException("webAppURLWithoutScheme must include port specification but doesn't: " +
+                                       webAppURLWithoutScheme);
+      }
+    }
+
+    return webAppURLWithoutScheme;
+  }
+
   public static String getNMWebAppURLWithoutScheme(Configuration conf) {
     if (YarnConfiguration.useHttps(conf)) {
       return conf.get(YarnConfiguration.NM_WEBAPP_HTTPS_ADDRESS,

+ 34 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml

@@ -70,6 +70,17 @@
     <value>${yarn.resourcemanager.hostname}:8032</value>
   </property>
 
+  <property>
+    <description>
+      The actual address the server will bind to. If this optional address is
+      set, the RPC and webapp servers will bind to this address and the port specified in
+      yarn.resourcemanager.address and yarn.resourcemanager.webapp.address, respectively. This
+      is most useful for making RM listen to all interfaces by setting to 0.0.0.0.
+    </description>
+    <name>yarn.resourcemanager.bind-host</name>
+    <value></value>
+  </property>
+
   <property>
     <description>The number of threads used to handle applications manager requests.</description>
     <name>yarn.resourcemanager.client.thread-count</name>
@@ -635,6 +646,17 @@
     <value>${yarn.nodemanager.hostname}:0</value>
   </property>
 
+  <property>
+    <description>
+      The actual address the server will bind to. If this optional address is
+      set, the RPC and webapp servers will bind to this address and the port specified in
+      yarn.nodemanager.address and yarn.nodemanager.webapp.address, respectively. This is
+      most useful for making NM listen to all interfaces by setting to 0.0.0.0.
+    </description>
+    <name>yarn.nodemanager.bind-host</name>
+    <value></value>
+  </property>
+
   <property>
     <description>Environment variables that should be forwarded from the NodeManager's environment to the container's.</description>
     <name>yarn.nodemanager.admin-env</name>
@@ -1172,6 +1194,18 @@
     <value>${yarn.timeline-service.hostname}:8190</value>
   </property>
 
+  <property>
+    <description>
+      The actual address the server will bind to. If this optional address is
+      set, the RPC and webapp servers will bind to this address and the port specified in
+      yarn.timeline-service.address and yarn.timeline-service.webapp.address, respectively.
+      This is most useful for making the service listen to all interfaces by setting to
+      0.0.0.0.
+    </description>
+    <name>yarn.timeline-service.bind-host</name>
+    <value></value>
+  </property>
+
   <property>
     <description>Store class name for timeline store.</description>
     <name>yarn.timeline-service.store-class</name>

+ 128 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfiguration.java

@@ -28,6 +28,7 @@ import java.net.SocketAddress;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.assertFalse;
 
 public class TestYarnConfiguration {
 
@@ -75,4 +76,131 @@ public class TestYarnConfiguration {
         YarnConfiguration.DEFAULT_NM_PORT);
     assertEquals(1234, addr.getPort());
   }
+
+  @Test
+  public void testGetSocketAddr() throws Exception {
+
+    YarnConfiguration conf;
+    InetSocketAddress resourceTrackerAddress;
+
+    //all default
+    conf = new YarnConfiguration();
+    resourceTrackerAddress = conf.getSocketAddr(
+        YarnConfiguration.RM_BIND_HOST,
+        YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS,
+        YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS,
+        YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT);
+    assertEquals(
+      new InetSocketAddress(
+        YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS.split(":")[0],
+        YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT),
+        resourceTrackerAddress);
+
+    //with address
+    conf.set(YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS, "10.0.0.1");
+    resourceTrackerAddress = conf.getSocketAddr(
+        YarnConfiguration.RM_BIND_HOST,
+        YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS,
+        YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS,
+        YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT);
+    assertEquals(
+      new InetSocketAddress(
+        "10.0.0.1",
+        YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT),
+        resourceTrackerAddress);
+
+    //address and socket
+    conf.set(YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS, "10.0.0.2:5001");
+    resourceTrackerAddress = conf.getSocketAddr(
+        YarnConfiguration.RM_BIND_HOST,
+        YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS,
+        YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS,
+        YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT);
+    assertEquals(
+      new InetSocketAddress(
+        "10.0.0.2",
+        5001),
+        resourceTrackerAddress);
+
+    //bind host only
+    conf = new YarnConfiguration();
+    conf.set(YarnConfiguration.RM_BIND_HOST, "10.0.0.3");
+    resourceTrackerAddress = conf.getSocketAddr(
+        YarnConfiguration.RM_BIND_HOST,
+        YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS,
+        YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS,
+        YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT);
+    assertEquals(
+      new InetSocketAddress(
+        "10.0.0.3",
+        YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT),
+        resourceTrackerAddress);
+
+    //bind host and address no port
+    conf.set(YarnConfiguration.RM_BIND_HOST, "0.0.0.0");
+    conf.set(YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS, "10.0.0.2");
+    resourceTrackerAddress = conf.getSocketAddr(
+        YarnConfiguration.RM_BIND_HOST,
+        YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS,
+        YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS,
+        YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT);
+    assertEquals(
+      new InetSocketAddress(
+        "0.0.0.0",
+        YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT),
+        resourceTrackerAddress);
+
+    //bind host and address with port
+    conf.set(YarnConfiguration.RM_BIND_HOST, "0.0.0.0");
+    conf.set(YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS, "10.0.0.2:5003");
+    resourceTrackerAddress = conf.getSocketAddr(
+        YarnConfiguration.RM_BIND_HOST,
+        YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS,
+        YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS,
+        YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT);
+    assertEquals(
+      new InetSocketAddress(
+        "0.0.0.0",
+        5003),
+        resourceTrackerAddress);
+
+  }
+
+  @Test
+  public void testUpdateConnectAddr() throws Exception {
+    YarnConfiguration conf;
+    InetSocketAddress resourceTrackerConnectAddress;
+    InetSocketAddress serverAddress;
+
+    //no override, old behavior.  Won't work on a host named "yo.yo.yo"
+    conf = new YarnConfiguration();
+    conf.set(YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS, "yo.yo.yo");
+    serverAddress = new InetSocketAddress(
+        YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS.split(":")[0],
+        Integer.valueOf(YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS.split(":")[1]));
+
+    resourceTrackerConnectAddress = conf.updateConnectAddr(
+        YarnConfiguration.RM_BIND_HOST,
+        YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS,
+        YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS,
+        serverAddress);
+
+    assertFalse(resourceTrackerConnectAddress.toString().startsWith("yo.yo.yo"));
+
+    //cause override with address
+    conf = new YarnConfiguration();
+    conf.set(YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS, "yo.yo.yo");
+    conf.set(YarnConfiguration.RM_BIND_HOST, "0.0.0.0");
+    serverAddress = new InetSocketAddress(
+        YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS.split(":")[0],
+        Integer.valueOf(YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS.split(":")[1]));
+
+    resourceTrackerConnectAddress = conf.updateConnectAddr(
+        YarnConfiguration.RM_BIND_HOST,
+        YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS,
+        YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS,
+        serverAddress);
+
+    assertTrue(resourceTrackerConnectAddress.toString().startsWith("yo.yo.yo"));
+  }
 }

+ 10 - 6
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryClientService.java

@@ -56,6 +56,7 @@ import org.apache.hadoop.yarn.exceptions.ApplicationAttemptNotFoundException;
 import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException;
 import org.apache.hadoop.yarn.exceptions.ContainerNotFoundException;
 import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.ipc.RPCUtil;
 import org.apache.hadoop.yarn.ipc.YarnRPC;
 
 public class ApplicationHistoryClientService extends AbstractService {
@@ -75,10 +76,11 @@ public class ApplicationHistoryClientService extends AbstractService {
   protected void serviceStart() throws Exception {
     Configuration conf = getConfig();
     YarnRPC rpc = YarnRPC.create(conf);
-    InetSocketAddress address =
-        conf.getSocketAddr(YarnConfiguration.TIMELINE_SERVICE_ADDRESS,
-          YarnConfiguration.DEFAULT_TIMELINE_SERVICE_ADDRESS,
-          YarnConfiguration.DEFAULT_TIMELINE_SERVICE_PORT);
+    InetSocketAddress address = conf.getSocketAddr(
+        YarnConfiguration.TIMELINE_SERVICE_BIND_HOST,
+        YarnConfiguration.TIMELINE_SERVICE_ADDRESS,
+        YarnConfiguration.DEFAULT_TIMELINE_SERVICE_ADDRESS,
+        YarnConfiguration.DEFAULT_TIMELINE_SERVICE_PORT);
 
     server =
         rpc.getServer(ApplicationHistoryProtocol.class, protocolHandler,
@@ -88,8 +90,10 @@ public class ApplicationHistoryClientService extends AbstractService {
 
     server.start();
     this.bindAddress =
-        conf.updateConnectAddr(YarnConfiguration.TIMELINE_SERVICE_ADDRESS,
-          server.getListenerAddress());
+        conf.updateConnectAddr(YarnConfiguration.TIMELINE_SERVICE_BIND_HOST,
+                               YarnConfiguration.TIMELINE_SERVICE_ADDRESS,
+                               YarnConfiguration.DEFAULT_TIMELINE_SERVICE_ADDRESS,
+                               server.getListenerAddress());
     LOG.info("Instantiated ApplicationHistoryClientService at "
         + this.bindAddress);
 

+ 3 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java

@@ -192,7 +192,9 @@ public class ApplicationHistoryServer extends CompositeService {
           TimelineAuthenticationFilterInitializer.class.getName()
               + initializers);
     }
-    String bindAddress = WebAppUtils.getAHSWebAppURLWithoutScheme(conf);
+    String bindAddress = WebAppUtils.getWebAppBindURL(conf,
+                          YarnConfiguration.TIMELINE_SERVICE_BIND_HOST,
+                          WebAppUtils.getAHSWebAppURLWithoutScheme(conf));
     LOG.info("Instantiating AHSWebApp at " + bindAddress);
     try {
       AHSWebApp ahsWebApp = AHSWebApp.getInstance();

+ 14 - 10
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/records/NMDBSchemaVersion.java → hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/records/Version.java

@@ -15,21 +15,26 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.yarn.server.nodemanager.recovery.records;
 
-import org.apache.hadoop.classification.InterfaceAudience.Private;
+package org.apache.hadoop.yarn.server.records;
+
+import org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.yarn.util.Records;
 
 /**
- * The version information of DB Schema for NM.
+ * The version information for state get stored in YARN components,
+ * i.e. RMState, NMState, etc., which include: majorVersion and 
+ * minorVersion.
+ * The major version update means incompatible changes happen while
+ * minor version update indicates compatible changes.
  */
-@Private
+@LimitedPrivate({"YARN", "MapReduce"})
 @Unstable
-public abstract class NMDBSchemaVersion {
+public abstract class Version {
 
-  public static NMDBSchemaVersion newInstance(int majorVersion, int minorVersion) {
-    NMDBSchemaVersion version = Records.newRecord(NMDBSchemaVersion.class);
+  public static Version newInstance(int majorVersion, int minorVersion) {
+    Version version = Records.newRecord(Version.class);
     version.setMajorVersion(majorVersion);
     version.setMinorVersion(minorVersion);
     return version;
@@ -47,7 +52,7 @@ public abstract class NMDBSchemaVersion {
     return getMajorVersion() + "." + getMinorVersion();
   }
 
-  public boolean isCompatibleTo(NMDBSchemaVersion version) {
+  public boolean isCompatibleTo(Version version) {
     return getMajorVersion() == version.getMajorVersion();
   }
 
@@ -68,7 +73,7 @@ public abstract class NMDBSchemaVersion {
       return false;
     if (getClass() != obj.getClass())
       return false;
-    NMDBSchemaVersion other = (NMDBSchemaVersion) obj;
+    Version other = (Version) obj;
     if (this.getMajorVersion() == other.getMajorVersion()
         && this.getMinorVersion() == other.getMinorVersion()) {
       return true;
@@ -76,5 +81,4 @@ public abstract class NMDBSchemaVersion {
       return false;
     }
   }
-
 }

+ 15 - 14
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/impl/pb/RMStateVersionPBImpl.java → hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/records/impl/pb/VersionPBImpl.java

@@ -16,28 +16,29 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.yarn.server.resourcemanager.recovery.records.impl.pb;
+package org.apache.hadoop.yarn.server.records.impl.pb;
 
-import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RMStateVersionProto;
-import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RMStateVersionProtoOrBuilder;
-import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.RMStateVersion;
+import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.VersionProto;
+import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.VersionProtoOrBuilder;
 
-public class RMStateVersionPBImpl extends RMStateVersion {
+import org.apache.hadoop.yarn.server.records.Version;
 
-  RMStateVersionProto proto = RMStateVersionProto.getDefaultInstance();
-  RMStateVersionProto.Builder builder = null;
+public class VersionPBImpl extends Version {
+
+  VersionProto proto = VersionProto.getDefaultInstance();
+  VersionProto.Builder builder = null;
   boolean viaProto = false;
 
-  public RMStateVersionPBImpl() {
-    builder = RMStateVersionProto.newBuilder();
+  public VersionPBImpl() {
+    builder = VersionProto.newBuilder();
   }
 
-  public RMStateVersionPBImpl(RMStateVersionProto proto) {
+  public VersionPBImpl(VersionProto proto) {
     this.proto = proto;
     viaProto = true;
   }
 
-  public RMStateVersionProto getProto() {
+  public VersionProto getProto() {
     proto = viaProto ? proto : builder.build();
     viaProto = true;
     return proto;
@@ -45,14 +46,14 @@ public class RMStateVersionPBImpl extends RMStateVersion {
 
   private void maybeInitBuilder() {
     if (viaProto || builder == null) {
-      builder = RMStateVersionProto.newBuilder(proto);
+      builder = VersionProto.newBuilder(proto);
     }
     viaProto = false;
   }
 
   @Override
   public int getMajorVersion() {
-    RMStateVersionProtoOrBuilder p = viaProto ? proto : builder;
+    VersionProtoOrBuilder p = viaProto ? proto : builder;
     return p.getMajorVersion();
   }
 
@@ -64,7 +65,7 @@ public class RMStateVersionPBImpl extends RMStateVersion {
 
   @Override
   public int getMinorVersion() {
-    RMStateVersionProtoOrBuilder p = viaProto ? proto : builder;
+    VersionProtoOrBuilder p = viaProto ? proto : builder;
     return p.getMinorVersion();
   }
 

+ 7 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/proto/yarn_server_common_protos.proto

@@ -47,4 +47,10 @@ message NodeHealthStatusProto {
   optional bool is_node_healthy = 1;
   optional string health_report = 2;
   optional int64 last_health_report_time = 3;
-}
+}
+
+message VersionProto {
+  optional int32 major_version = 1;
+  optional int32 minor_version = 2;
+}
+

+ 18 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java

@@ -275,6 +275,7 @@ public class ContainerManagerImpl extends CompositeService implements
     YarnRPC rpc = YarnRPC.create(conf);
 
     InetSocketAddress initialAddress = conf.getSocketAddr(
+        YarnConfiguration.NM_BIND_HOST,
         YarnConfiguration.NM_ADDRESS,
         YarnConfiguration.DEFAULT_NM_ADDRESS,
         YarnConfiguration.DEFAULT_NM_PORT);
@@ -296,7 +297,22 @@ public class ContainerManagerImpl extends CompositeService implements
     		" server is still starting.");
     this.setBlockNewContainerRequests(true);
     server.start();
-    InetSocketAddress connectAddress = NetUtils.getConnectAddress(server);
+    
+    InetSocketAddress connectAddress;
+    String bindHost = conf.get(YarnConfiguration.NM_BIND_HOST);
+    String nmAddress = conf.getTrimmed(YarnConfiguration.NM_ADDRESS);
+    if (bindHost == null || bindHost.isEmpty() ||
+	nmAddress == null || nmAddress.isEmpty()) {
+      connectAddress = NetUtils.getConnectAddress(server);
+    } else {
+      //a bind-host case with an address, to support overriding the first hostname
+      //found when querying for our hostname with the specified address, combine
+      //the specified address with the actual port listened on by the server
+      connectAddress = NetUtils.getConnectAddress(
+	new InetSocketAddress(nmAddress.split(":")[0],
+			      server.getListenerAddress().getPort()));
+    }
+
     NodeId nodeId = NodeId.newInstance(
         connectAddress.getAddress().getCanonicalHostName(),
         connectAddress.getPort());
@@ -304,6 +320,7 @@ public class ContainerManagerImpl extends CompositeService implements
     this.context.getNMTokenSecretManager().setNodeId(nodeId);
     this.context.getContainerTokenSecretManager().setNodeId(nodeId);
     LOG.info("ContainerManager started at " + connectAddress);
+    LOG.info("ContainerManager bound to " + initialAddress);
     super.serviceStart();
   }
 

Some files were not shown because too many files changed in this diff