Browse Source

Merged changes from trunk

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-5442@1601151 13f79535-47bb-0310-9956-ffa450edef68
Vinayakumar B 11 years ago
parent
commit
e695b4e5a6
100 changed files with 2317 additions and 552 deletions
  1. 14 0
      hadoop-common-project/hadoop-common/CHANGES.txt
  2. 3 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
  3. 0 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
  4. 24 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
  5. 6 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
  6. 34 10
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
  7. 5 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
  8. 5 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java
  9. 5 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
  10. 7 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
  11. 1 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java
  12. 116 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/GraphiteSink.java
  13. 91 52
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
  14. 33 72
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopologyWithNodeGroup.java
  15. 26 26
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java
  16. 10 11
      hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
  17. 2 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java
  18. 110 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestGraphiteMetrics.java
  19. 5 5
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetworkTopologyWithNodeGroup.java
  20. 54 31
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/KeyStoreTestUtil.java
  21. 21 8
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestSSLFactory.java
  22. 3 6
      hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSCacheKeyProvider.java
  23. 6 1
      hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
  24. 9 7
      hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/NfsExports.java
  25. 7 18
      hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/IdUserGroup.java
  26. 2 5
      hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Base.java
  27. 12 30
      hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Constant.java
  28. 3 6
      hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/TestNfsExports.java
  29. 9 7
      hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/nfs3/TestIdUserGroup.java
  30. 5 0
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/hadoop/FileSystemAccessService.java
  31. 99 0
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java
  32. 57 0
      hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/conf/NfsConfigKeys.java
  33. 54 0
      hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/conf/NfsConfiguration.java
  34. 3 3
      hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/Mountd.java
  35. 13 15
      hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java
  36. 4 4
      hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/DFSClientCache.java
  37. 8 13
      hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3.java
  38. 2 1
      hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java
  39. 6 6
      hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java
  40. 6 7
      hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/PrivilegedNfsGatewayStarter.java
  41. 28 25
      hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
  42. 13 12
      hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java
  43. 2 2
      hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestMountd.java
  44. 5 4
      hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestOutOfOrderWrite.java
  45. 2 2
      hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestReaddir.java
  46. 6 6
      hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestDFSClientCache.java
  47. 4 4
      hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestExportsTable.java
  48. 20 20
      hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestOpenFileCtxCache.java
  49. 42 0
      hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestRpcProgramNfs3.java
  50. 6 6
      hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestWrites.java
  51. 2 2
      hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/resources/core-site.xml
  52. 37 3
      hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
  53. 5 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java
  54. 1 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReader.java
  55. 15 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
  56. 2 6
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
  57. 17 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
  58. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/XAttrHelper.java
  59. 21 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
  60. 13 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
  61. 14 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
  62. 17 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
  63. 6 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
  64. 2 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
  65. 14 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
  66. 4 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java
  67. 44 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
  68. 10 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/AvailableSpaceVolumeChoosingPolicy.java
  69. 8 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java
  70. 19 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java
  71. 8 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
  72. 27 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
  73. 5 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
  74. 23 14
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
  75. 33 40
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
  76. 37 8
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
  77. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java
  78. 2 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
  79. 8 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/proto/xattr.proto
  80. 51 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
  81. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js
  82. 7 7
      hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm
  83. 3 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java
  84. 7 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
  85. 5 4
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/TestAvailableSpaceVolumeChoosingPolicy.java
  86. 111 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
  87. 116 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyBlockManagement.java
  88. 12 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java
  89. 10 8
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotFileLength.java
  90. 12 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java
  91. 38 8
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java
  92. 358 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
  93. 6 0
      hadoop-mapreduce-project/CHANGES.txt
  94. 1 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml
  95. 41 2
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/LineRecordReader.java
  96. 41 3
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/LineRecordReader.java
  97. 37 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestLineRecordReader.java
  98. 38 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestLineRecordReader.java
  99. 2 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/resources/testBOM.txt
  100. 16 0
      hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListing.java

+ 14 - 0
hadoop-common-project/hadoop-common/CHANGES.txt

@@ -334,6 +334,11 @@ Trunk (Unreleased)
     HADOOP-10625. Trim configuration names when putting/getting them
     to properties. (Wangda Tan via xgong)
 
+    HADOOP-10645. TestKMS fails because race condition writing acl files. (tucu)
+
+    HADOOP-10611. KMS, keyVersion name should not be assumed to be 
+    keyName@versionNumber. (tucu)
+
   OPTIMIZATIONS
 
     HADOOP-7761. Improve the performance of raw comparisons. (todd)
@@ -366,6 +371,8 @@ Release 2.5.0 - UNRELEASED
 
     HADOOP-10498. Add support for proxy server. (daryn)
 
+    HADOOP-9704. Write metrics sink plugin for Hadoop/Graphite (Chu Tong, Alex Newman and Babak Behzad via raviprak)
+
   IMPROVEMENTS
 
     HADOOP-10451. Remove unused field and imports from SaslRpcServer.
@@ -522,6 +529,13 @@ Release 2.5.0 - UNRELEASED
     HADOOP-10638. Updating hadoop-daemon.sh to work as expected when nfs is
     started as a privileged user. (Manikandan Narayanaswamy via atm)
 
+    HADOOP-10630. Possible race condition in RetryInvocationHandler. (jing9)
+
+    HADOOP-10658. SSLFactory expects truststores being configured. (tucu via atm)
+
+    HADOOP-10647. String Format Exception in SwiftNativeFileSystemStore.java.
+    (Gene Kim via stevel)
+
 Release 2.4.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

+ 3 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java

@@ -423,7 +423,9 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
       new DeprecationDelta("fs.default.name", 
         CommonConfigurationKeys.FS_DEFAULT_NAME_KEY),
       new DeprecationDelta("dfs.umaskmode",
-        CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY)
+        CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY),
+      new DeprecationDelta("dfs.nfs.exports.allowed.hosts",
+          CommonConfigurationKeys.NFS_EXPORTS_ALLOWED_HOSTS_KEY)
     };
 
   /**

+ 0 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java

@@ -517,8 +517,4 @@ public class KMSClientProvider extends KeyProvider {
     // the server should not keep in memory state on behalf of clients either.
   }
 
-  @VisibleForTesting
-  public static String buildVersionName(String name, int version) {
-    return KeyProvider.buildVersionName(name, version);
-  }
 }

+ 24 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java

@@ -1168,6 +1168,30 @@ public abstract class AbstractFileSystem {
         + " doesn't support getXAttrs");
   }
 
+  /**
+   * Get all of the xattr names for a file or directory.
+   * Only the xattr names for which the logged-in user has permissions to view
+   * are returned.
+   * <p/>
+   * A regular user can only get xattr names for the "user" namespace.
+   * The super user can only get xattr names for the "user" and "trusted"
+   * namespaces.
+   * The xattr names in the "security" and "system" namespaces are only
+   * used/exposed internally by/to the FS impl.
+   * <p/>
+   * @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
+   * http://en.wikipedia.org/wiki/Extended_file_attributes</a>
+   *
+   * @param path Path to get extended attributes
+   * @return Map<String, byte[]> describing the XAttrs of the file or directory
+   * @throws IOException
+   */
+  public List<String> listXAttrs(Path path)
+          throws IOException {
+    throw new UnsupportedOperationException(getClass().getSimpleName()
+            + " doesn't support listXAttrs");
+  }
+
   /**
    * Remove an xattr of a file or directory.
    * The name must be prefixed with user/trusted/security/system and

+ 6 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java

@@ -265,4 +265,10 @@ public class CommonConfigurationKeys extends CommonConfigurationKeysPublic {
   public static final boolean RPC_METRICS_QUANTILE_ENABLE_DEFAULT = false;
   public static final String  RPC_METRICS_PERCENTILES_INTERVALS_KEY =
       "rpc.metrics.percentiles.intervals";
+  
+  /** Allowed hosts for nfs exports */
+  public static final String NFS_EXPORTS_ALLOWED_HOSTS_SEPARATOR = ";";
+  public static final String NFS_EXPORTS_ALLOWED_HOSTS_KEY = "nfs.exports.allowed.hosts";
+  public static final String NFS_EXPORTS_ALLOWED_HOSTS_KEY_DEFAULT = "* rw";
+
 }

+ 34 - 10
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java

@@ -2403,8 +2403,8 @@ public abstract class FileSystem extends Configured implements Closeable {
    * <p/>
    * The access permissions of an xattr in the "user" namespace are
    * defined by the file and directory permission bits.
-   * An xattr can only be set when the logged-in user has the correct permissions.
-   * If the xattr exists, it will be replaced.
+   * An xattr can only be set if the logged-in user has the correct permissions.
+   * If the xattr exists, it is replaced.
    * <p/>
    * @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
    * http://en.wikipedia.org/wiki/Extended_file_attributes</a>
@@ -2422,7 +2422,7 @@ public abstract class FileSystem extends Configured implements Closeable {
   }
 
   /**
-   * Get an xattr for a file or directory.
+   * Get an xattr name and value for a file or directory.
    * The name must be prefixed with user/trusted/security/system and
    * followed by ".". For example, "user.attr".
    * <p/>
@@ -2432,7 +2432,8 @@ public abstract class FileSystem extends Configured implements Closeable {
    * The xattrs of the "security" and "system" namespaces are only used/exposed 
    * internally by/to the FS impl.
    * <p/>
-   * An xattr will only be returned when the logged-in user has the correct permissions.
+   * An xattr will only be returned if the logged-in user has the
+   * correct permissions.
    * <p/>
    * @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
    * http://en.wikipedia.org/wiki/Extended_file_attributes</a>
@@ -2448,13 +2449,13 @@ public abstract class FileSystem extends Configured implements Closeable {
   }
 
   /**
-   * Get all of the xattrs for a file or directory.
-   * Only those xattrs for which the logged-in user has permissions to view
+   * Get all of the xattr name/value pairs for a file or directory.
+   * Only those xattrs which the logged-in user has permissions to view
    * are returned.
    * <p/>
    * A regular user can only get xattrs for the "user" namespace.
    * The super user can only get xattrs for "user" and "trusted" namespaces.
-   * The xattr of "security" and "system" namespaces are only used/exposed 
+   * The xattrs of the "security" and "system" namespaces are only used/exposed
    * internally by/to the FS impl.
    * <p/>
    * @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
@@ -2470,13 +2471,13 @@ public abstract class FileSystem extends Configured implements Closeable {
   }
 
   /**
-   * Get all of the xattrs for a file or directory.
-   * Only those xattrs for which the logged-in user has permissions to view
+   * Get all of the xattrs name/value pairs for a file or directory.
+   * Only those xattrs which the logged-in user has permissions to view
    * are returned.
    * <p/>
    * A regular user can only get xattrs for the "user" namespace.
    * The super user can only get xattrs for "user" and "trusted" namespaces.
-   * The xattr of "security" and "system" namespaces are only used/exposed 
+   * The xattrs of the "security" and "system" namespaces are only used/exposed
    * internally by/to the FS impl.
    * <p/>
    * @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
@@ -2493,6 +2494,29 @@ public abstract class FileSystem extends Configured implements Closeable {
         + " doesn't support getXAttrs");
   }
 
+  /**
+   * Get all of the xattr names for a file or directory.
+   * Only those xattr names which the logged-in user has permissions to view
+   * are returned.
+   * <p/>
+   * A regular user can only get xattr names for the "user" namespace.
+   * The super user can only get xattr names for "user" and "trusted"
+   * namespaces.
+   * The xattrs of the "security" and "system" namespaces are only
+   * used/exposed internally by/to the FS impl.
+   * <p/>
+   * @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
+   * http://en.wikipedia.org/wiki/Extended_file_attributes</a>
+   *
+   * @param path Path to get extended attributes
+   * @return Map<String, byte[]> describing the XAttrs of the file or directory
+   * @throws IOException
+   */
+  public List<String> listXAttrs(Path path) throws IOException {
+    throw new UnsupportedOperationException(getClass().getSimpleName()
+            + " doesn't support listXAttrs");
+  }
+
   /**
    * Remove an xattr of a file or directory.
    * The name must be prefixed with user/trusted/security/system and

+ 5 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java

@@ -572,6 +572,11 @@ public class FilterFileSystem extends FileSystem {
     return fs.getXAttrs(path, names);
   }
 
+  @Override
+  public List<String> listXAttrs(Path path) throws IOException {
+    return fs.listXAttrs(path);
+  }
+
   @Override
   public void removeXAttr(Path path, String name) throws IOException {
     fs.removeXAttr(path, name);

+ 5 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java

@@ -346,6 +346,11 @@ public abstract class FilterFs extends AbstractFileSystem {
     return myFs.getXAttrs(path, names);
   }
 
+  @Override
+  public List<String> listXAttrs(Path path) throws IOException {
+    return myFs.listXAttrs(path);
+  }
+
   @Override
   public void removeXAttr(Path path, String name) throws IOException {
     myFs.removeXAttr(path, name);

+ 5 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java

@@ -337,6 +337,11 @@ class ChRootedFileSystem extends FilterFileSystem {
     return super.getXAttrs(fullPath(path), names);
   }
 
+  @Override
+  public List<String> listXAttrs(Path path) throws IOException {
+    return super.listXAttrs(fullPath(path));
+  }
+
   @Override
   public void removeXAttr(Path path, String name) throws IOException {
     super.removeXAttr(fullPath(path), name);

+ 7 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java

@@ -551,6 +551,13 @@ public class ViewFileSystem extends FileSystem {
     return res.targetFileSystem.getXAttrs(res.remainingPath, names);
   }
 
+  @Override
+  public List<String> listXAttrs(Path path) throws IOException {
+    InodeTree.ResolveResult<FileSystem> res =
+      fsState.resolve(getUriPath(path), true);
+    return res.targetFileSystem.listXAttrs(res.remainingPath);
+  }
+
   @Override
   public void removeXAttr(Path path, String name) throws IOException {
     InodeTree.ResolveResult<FileSystem> res = fsState.resolve(getUriPath(path),

+ 1 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java

@@ -36,7 +36,6 @@ import org.apache.hadoop.ipc.ProtocolTranslator;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RpcConstants;
 import org.apache.hadoop.ipc.RpcInvocationHandler;
-import org.apache.hadoop.util.ThreadUtil;
 
 import com.google.common.annotations.VisibleForTesting;
 
@@ -160,11 +159,11 @@ public class RetryInvocationHandler<T> implements RpcInvocationHandler {
               if (invocationAttemptFailoverCount == proxyProviderFailoverCount) {
                 proxyProvider.performFailover(currentProxy.proxy);
                 proxyProviderFailoverCount++;
-                currentProxy = proxyProvider.getProxy();
               } else {
                 LOG.warn("A failover has occurred since the start of this method"
                     + " invocation attempt.");
               }
+              currentProxy = proxyProvider.getProxy();
             }
             invocationFailoverCount++;
           }

+ 116 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/GraphiteSink.java

@@ -0,0 +1,116 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.metrics2.sink;
+
+import java.io.OutputStreamWriter;
+import java.io.Writer;
+import java.net.Socket;
+
+import org.apache.commons.configuration.SubsetConfiguration;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.metrics2.AbstractMetric;
+import org.apache.hadoop.metrics2.MetricsException;
+import org.apache.hadoop.metrics2.MetricsRecord;
+import org.apache.hadoop.metrics2.MetricsSink;
+import org.apache.hadoop.metrics2.MetricsTag;
+
+/**
+ * A metrics sink that writes to a Graphite server
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class GraphiteSink implements MetricsSink {
+    private static final String SERVER_HOST_KEY = "server_host";
+    private static final String SERVER_PORT_KEY = "server_port";
+    private static final String METRICS_PREFIX = "metrics_prefix";
+    private Writer writer = null;
+    private String metricsPrefix = null;
+
+    public void setWriter(Writer writer) {
+        this.writer = writer;
+    }
+
+    @Override
+    public void init(SubsetConfiguration conf) {
+        // Get Graphite host configurations.
+        String serverHost = conf.getString(SERVER_HOST_KEY);
+        Integer serverPort = Integer.parseInt(conf.getString(SERVER_PORT_KEY));
+
+        // Get Graphite metrics graph prefix.
+        metricsPrefix = conf.getString(METRICS_PREFIX);
+        if (metricsPrefix == null)
+            metricsPrefix = "";
+
+        try {
+            // Open an connection to Graphite server.
+            Socket socket = new Socket(serverHost, serverPort);
+            setWriter(new OutputStreamWriter(socket.getOutputStream()));
+        } catch (Exception e) {
+            throw new MetricsException("Error creating connection, "
+                    + serverHost + ":" + serverPort, e);
+        }
+    }
+
+    @Override
+    public void putMetrics(MetricsRecord record) {
+        StringBuilder lines = new StringBuilder();
+        StringBuilder metricsPathPrefix = new StringBuilder();
+
+        // Configure the hierarchical place to display the graph.
+        metricsPathPrefix.append(metricsPrefix).append(".")
+                .append(record.context()).append(".").append(record.name());
+
+        for (MetricsTag tag : record.tags()) {
+            if (tag.value() != null) {
+                metricsPathPrefix.append(".");
+                metricsPathPrefix.append(tag.name());
+                metricsPathPrefix.append("=");
+                metricsPathPrefix.append(tag.value());
+            }
+        }
+
+        // Round the timestamp to second as Graphite accepts it in such format.
+        int timestamp = Math.round(record.timestamp() / 1000.0f);
+
+        // Collect datapoints.
+        for (AbstractMetric metric : record.metrics()) {
+            lines.append(
+                    metricsPathPrefix.toString() + "."
+                            + metric.name().replace(' ', '.')).append(" ")
+                    .append(metric.value()).append(" ").append(timestamp)
+                    .append("\n");
+        }
+
+        try {
+            writer.write(lines.toString());
+        } catch (Exception e) {
+            throw new MetricsException("Error sending metrics", e);
+        }
+    }
+
+    @Override
+    public void flush() {
+        try {
+            writer.flush();
+        } catch (Exception e) {
+            throw new MetricsException("Error flushing metrics", e);
+        }
+    }
+}

+ 91 - 52
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java

@@ -19,8 +19,10 @@ package org.apache.hadoop.net;
 
 import java.util.ArrayList;
 import java.util.Collection;
+import java.util.Collections;
 import java.util.List;
 import java.util.Random;
+import java.util.TreeMap;
 import java.util.concurrent.locks.ReadWriteLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 
@@ -32,6 +34,9 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.util.ReflectionUtils;
 
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
+
 /** The class represents a cluster of computer with a tree hierarchical
  * network topology.
  * For example, a cluster may be consists of many data centers filled 
@@ -667,7 +672,23 @@ public class NetworkTopology {
     return node1.getParent()==node2.getParent();
   }
 
-  final protected static Random r = new Random();
+  private static final ThreadLocal<Random> r = new ThreadLocal<Random>();
+
+  /**
+   * Getter for thread-local Random, which provides better performance than
+   * a shared Random (even though Random is thread-safe).
+   *
+   * @return Thread-local Random.
+   */
+  protected Random getRandom() {
+    Random rand = r.get();
+    if (rand == null) {
+      rand = new Random();
+      r.set(rand);
+    }
+    return rand;
+  }
+
   /** randomly choose one node from <i>scope</i>
    * if scope starts with ~, choose one from the all nodes except for the
    * ones in <i>scope</i>; otherwise, choose one from <i>scope</i>
@@ -717,7 +738,7 @@ public class NetworkTopology {
           "Failed to find datanode (scope=\"" + String.valueOf(scope) +
           "\" excludedScope=\"" + String.valueOf(excludedScope) + "\").");
     }
-    int leaveIndex = r.nextInt(numOfDatanodes);
+    int leaveIndex = getRandom().nextInt(numOfDatanodes);
     return innerNode.getLeaf(leaveIndex, node);
   }
 
@@ -824,61 +845,79 @@ public class NetworkTopology {
     return networkLocation.substring(index);
   }
 
-  /** swap two array items */
-  static protected void swap(Node[] nodes, int i, int j) {
-    Node tempNode;
-    tempNode = nodes[j];
-    nodes[j] = nodes[i];
-    nodes[i] = tempNode;
-  }
-  
-  /** Sort nodes array by their distances to <i>reader</i>
-   * It linearly scans the array, if a local node is found, swap it with
-   * the first element of the array.
-   * If a local rack node is found, swap it with the first element following
-   * the local node.
-   * If neither local node or local rack node is found, put a random replica
-   * location at position 0.
-   * It leaves the rest nodes untouched.
-   * @param reader the node that wishes to read a block from one of the nodes
-   * @param nodes the list of nodes containing data for the reader
+  /**
+   * Returns an integer weight which specifies how far away {node} is away from
+   * {reader}. A lower value signifies that a node is closer.
+   * 
+   * @param reader Node where data will be read
+   * @param node Replica of data
+   * @return weight
    */
-  public void pseudoSortByDistance( Node reader, Node[] nodes ) {
-    int tempIndex = 0;
-    int localRackNode = -1;
-    if (reader != null ) {
-      //scan the array to find the local node & local rack node
-      for(int i=0; i<nodes.length; i++) {
-        if(tempIndex == 0 && reader == nodes[i]) { //local node
-          //swap the local node and the node at position 0
-          if( i != 0 ) {
-            swap(nodes, tempIndex, i);
-          }
-          tempIndex=1;
-          if(localRackNode != -1 ) {
-            if(localRackNode == 0) {
-              localRackNode = i;
-            }
-            break;
-          }
-        } else if(localRackNode == -1 && isOnSameRack(reader, nodes[i])) {
-          //local rack
-          localRackNode = i;
-          if(tempIndex != 0 ) break;
-        }
+  protected int getWeight(Node reader, Node node) {
+    // 0 is local, 1 is same rack, 2 is off rack
+    // Start off by initializing to off rack
+    int weight = 2;
+    if (reader != null) {
+      if (reader == node) {
+        weight = 0;
+      } else if (isOnSameRack(reader, node)) {
+        weight = 1;
       }
+    }
+    return weight;
+  }
 
-      // swap the local rack node and the node at position tempIndex
-      if(localRackNode != -1 && localRackNode != tempIndex ) {
-        swap(nodes, tempIndex, localRackNode);
-        tempIndex++;
+  /**
+   * Sort nodes array by network distance to <i>reader</i>.
+   * <p/>
+   * In a three-level topology, a node can be either local, on the same rack, or
+   * on a different rack from the reader. Sorting the nodes based on network
+   * distance from the reader reduces network traffic and improves performance.
+   * <p/>
+   * As an additional twist, we also randomize the nodes at each network
+   * distance using the provided random seed. This helps with load balancing
+   * when there is data skew.
+   * 
+   * @param reader Node where data will be read
+   * @param nodes Available replicas with the requested data
+   * @param seed Used to seed the pseudo-random generator that randomizes the
+   *          set of nodes at each network distance.
+   */
+  public void sortByDistance(Node reader, Node[] nodes, long seed) {
+    /** Sort weights for the nodes array */
+    int[] weights = new int[nodes.length];
+    for (int i=0; i<nodes.length; i++) {
+      weights[i] = getWeight(reader, nodes[i]);
+    }
+    // Add weight/node pairs to a TreeMap to sort
+    TreeMap<Integer, List<Node>> tree = new TreeMap<Integer, List<Node>>();
+    for (int i=0; i<nodes.length; i++) {
+      int weight = weights[i];
+      Node node = nodes[i];
+      List<Node> list = tree.get(weight);
+      if (list == null) {
+        list = Lists.newArrayListWithExpectedSize(1);
+        tree.put(weight, list);
       }
+      list.add(node);
     }
-    
-    // put a random node at position 0 if it is not a local/local-rack node
-    if(tempIndex == 0 && localRackNode == -1 && nodes.length != 0) {
-      swap(nodes, 0, r.nextInt(nodes.length));
+
+    // Seed is normally the block id
+    // This means we use the same pseudo-random order for each block, for
+    // potentially better page cache usage.
+    Random rand = getRandom();
+    rand.setSeed(seed);
+    int idx = 0;
+    for (List<Node> list: tree.values()) {
+      if (list != null) {
+        Collections.shuffle(list, rand);
+        for (Node n: list) {
+          nodes[idx] = n;
+          idx++;
+        }
+      }
     }
+    Preconditions.checkState(idx == nodes.length,
+        "Sorted the wrong number of nodes!");
   }
-  
 }

+ 33 - 72
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopologyWithNodeGroup.java

@@ -248,25 +248,41 @@ public class NetworkTopologyWithNodeGroup extends NetworkTopology {
     }
   }
 
-  /** Sort nodes array by their distances to <i>reader</i>
-   * It linearly scans the array, if a local node is found, swap it with
-   * the first element of the array.
-   * If a local node group node is found, swap it with the first element 
-   * following the local node.
-   * If a local rack node is found, swap it with the first element following
-   * the local node group node.
-   * If neither local node, node group node or local rack node is found, put a 
-   * random replica location at position 0.
-   * It leaves the rest nodes untouched.
-   * @param reader the node that wishes to read a block from one of the nodes
-   * @param nodes the list of nodes containing data for the reader
-   */
   @Override
-  public void pseudoSortByDistance( Node reader, Node[] nodes ) {
+  protected int getWeight(Node reader, Node node) {
+    // 0 is local, 1 is same node group, 2 is same rack, 3 is off rack
+    // Start off by initializing to off rack
+    int weight = 3;
+    if (reader != null) {
+      if (reader == node) {
+        weight = 0;
+      } else if (isOnSameNodeGroup(reader, node)) {
+        weight = 1;
+      } else if (isOnSameRack(reader, node)) {
+        weight = 2;
+      }
+    }
+    return weight;
+  }
 
+  /**
+   * Sort nodes array by their distances to <i>reader</i>.
+   * <p/>
+   * This is the same as
+   * {@link NetworkTopology#sortByDistance(Node, Node[], long)} except with a
+   * four-level network topology which contains the additional network distance
+   * of a "node group" which is between local and same rack.
+   * 
+   * @param reader Node where data will be read
+   * @param nodes Available replicas with the requested data
+   * @param seed Used to seed the pseudo-random generator that randomizes the
+   *          set of nodes at each network distance.
+   */
+  @Override
+  public void sortByDistance( Node reader, Node[] nodes, long seed) {
+    // If reader is not a datanode (not in NetworkTopology tree), we need to
+    // replace this reader with a sibling leaf node in tree.
     if (reader != null && !this.contains(reader)) {
-      // if reader is not a datanode (not in NetworkTopology tree), we will 
-      // replace this reader with a sibling leaf node in tree.
       Node nodeGroup = getNode(reader.getNetworkLocation());
       if (nodeGroup != null && nodeGroup instanceof InnerNode) {
         InnerNode parentNode = (InnerNode) nodeGroup;
@@ -276,62 +292,7 @@ public class NetworkTopologyWithNodeGroup extends NetworkTopology {
         return;
       }
     }
-    int tempIndex = 0;
-    int localRackNode = -1;
-    int localNodeGroupNode = -1;
-    if (reader != null) {  
-      //scan the array to find the local node & local rack node
-      for (int i = 0; i < nodes.length; i++) {
-        if (tempIndex == 0 && reader == nodes[i]) { //local node
-          //swap the local node and the node at position 0
-          if (i != 0) {
-            swap(nodes, tempIndex, i);
-          }
-          tempIndex=1;
-
-          if (localRackNode != -1 && (localNodeGroupNode !=-1)) {
-            if (localRackNode == 0) {
-              localRackNode = i;
-            }
-            if (localNodeGroupNode == 0) {
-              localNodeGroupNode = i;
-            }
-            break;
-          }
-        } else if (localNodeGroupNode == -1 && isOnSameNodeGroup(reader, 
-            nodes[i])) {
-          //local node group
-          localNodeGroupNode = i;
-          // node local and rack local are already found
-          if(tempIndex != 0 && localRackNode != -1) break;
-        } else if (localRackNode == -1 && isOnSameRack(reader, nodes[i])) {
-          localRackNode = i;
-          if (tempIndex != 0 && localNodeGroupNode != -1) break;
-        }
-      }
-
-      // swap the local nodegroup node and the node at position tempIndex
-      if(localNodeGroupNode != -1 && localNodeGroupNode != tempIndex) {
-        swap(nodes, tempIndex, localNodeGroupNode);
-        if (localRackNode == tempIndex) {
-          localRackNode = localNodeGroupNode;
-        }
-        tempIndex++;
-      }
-
-      // swap the local rack node and the node at position tempIndex
-      if(localRackNode != -1 && localRackNode != tempIndex) {
-        swap(nodes, tempIndex, localRackNode);
-        tempIndex++;
-      }
-    }
-
-    // put a random node at position 0 if there is not a local/local-nodegroup/
-    // local-rack node
-    if (tempIndex == 0 && localNodeGroupNode == -1 && localRackNode == -1
-        && nodes.length != 0) {
-      swap(nodes, 0, r.nextInt(nodes.length));
-    }
+    super.sortByDistance(reader, nodes, seed);
   }
 
   /** InnerNodeWithNodeGroup represents a switch/router of a data center, rack

+ 26 - 26
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java

@@ -188,33 +188,33 @@ public class FileBasedKeyStoresFactory implements KeyStoresFactory {
     String locationProperty =
       resolvePropertyName(mode, SSL_TRUSTSTORE_LOCATION_TPL_KEY);
     String truststoreLocation = conf.get(locationProperty, "");
-    if (truststoreLocation.isEmpty()) {
-      throw new GeneralSecurityException("The property '" + locationProperty +
-        "' has not been set in the ssl configuration file.");
-    }
-
-    String passwordProperty = resolvePropertyName(mode,
-                                                  SSL_TRUSTSTORE_PASSWORD_TPL_KEY);
-    String truststorePassword = conf.get(passwordProperty, "");
-    if (truststorePassword.isEmpty()) {
-      throw new GeneralSecurityException("The property '" + passwordProperty +
-        "' has not been set in the ssl configuration file.");
+    if (!truststoreLocation.isEmpty()) {
+      String passwordProperty = resolvePropertyName(mode,
+          SSL_TRUSTSTORE_PASSWORD_TPL_KEY);
+      String truststorePassword = conf.get(passwordProperty, "");
+      if (truststorePassword.isEmpty()) {
+        throw new GeneralSecurityException("The property '" + passwordProperty +
+            "' has not been set in the ssl configuration file.");
+      }
+      long truststoreReloadInterval =
+          conf.getLong(
+              resolvePropertyName(mode, SSL_TRUSTSTORE_RELOAD_INTERVAL_TPL_KEY),
+              DEFAULT_SSL_TRUSTSTORE_RELOAD_INTERVAL);
+
+      LOG.debug(mode.toString() + " TrustStore: " + truststoreLocation);
+
+      trustManager = new ReloadingX509TrustManager(truststoreType,
+          truststoreLocation,
+          truststorePassword,
+          truststoreReloadInterval);
+      trustManager.init();
+      LOG.debug(mode.toString() + " Loaded TrustStore: " + truststoreLocation);
+      trustManagers = new TrustManager[]{trustManager};
+    } else {
+      LOG.warn("The property '" + locationProperty + "' has not been set, " +
+          "no TrustStore will be loaded");
+      trustManagers = null;
     }
-    long truststoreReloadInterval =
-      conf.getLong(
-        resolvePropertyName(mode, SSL_TRUSTSTORE_RELOAD_INTERVAL_TPL_KEY),
-        DEFAULT_SSL_TRUSTSTORE_RELOAD_INTERVAL);
-
-    LOG.debug(mode.toString() + " TrustStore: " + truststoreLocation);
-
-    trustManager = new ReloadingX509TrustManager(truststoreType,
-                                                 truststoreLocation,
-                                                 truststorePassword,
-                                                 truststoreReloadInterval);
-    trustManager.init();
-    LOG.debug(mode.toString() + " Loaded TrustStore: " + truststoreLocation);
-
-    trustManagers = new TrustManager[]{trustManager};
   }
 
   /**

+ 10 - 11
hadoop-common-project/hadoop-common/src/main/resources/core-default.xml

@@ -1309,18 +1309,17 @@
 </property>
 
 <property>
-  <name>nfs3.server.port</name>
-  <value>2049</value>
+  <name>nfs.exports.allowed.hosts</name>
+  <value>* rw</value>
   <description>
-      Specify the port number used by Hadoop NFS.
-  </description>
-</property>
-
-<property>
-  <name>nfs3.mountd.port</name>
-  <value>4242</value>
-  <description>
-      Specify the port number used by Hadoop mount daemon.
+    By default, the export can be mounted by any client. The value string 
+    contains machine name and access privilege, separated by whitespace 
+    characters. The machine name format can be a single host, a Java regular 
+    expression, or an IPv4 address. The access privilege uses rw or ro to 
+    specify read/write or read-only access of the machines to exports. If the 
+    access privilege is not provided, the default is read-only. Entries are separated by ";".
+    For example: "192.168.0.0/22 rw ; host.*\.example\.com ; host1.test.org ro;".
+    Only the NFS gateway needs to restart after this property is updated. 
   </description>
 </property>
 

+ 2 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java

@@ -196,6 +196,8 @@ public class TestHarFileSystem {
     public Map<String, byte[]> getXAttrs(Path path, List<String> names)
         throws IOException;
 
+    public List<String> listXAttrs(Path path) throws IOException;
+
     public void removeXAttr(Path path, String name) throws IOException;
 
     public AclStatus getAclStatus(Path path) throws IOException;

+ 110 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestGraphiteMetrics.java

@@ -0,0 +1,110 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.metrics2.impl;
+
+import static org.junit.Assert.assertEquals;
+
+import java.io.IOException;
+import java.io.OutputStreamWriter;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import static org.mockito.Mockito.*;
+
+import org.apache.hadoop.metrics2.AbstractMetric;
+import org.apache.hadoop.metrics2.MetricsRecord;
+import org.apache.hadoop.metrics2.MetricsTag;
+import org.apache.hadoop.metrics2.sink.GraphiteSink;
+import org.junit.Test;
+import org.mockito.ArgumentCaptor;
+
+public class TestGraphiteMetrics {
+    private AbstractMetric makeMetric(String name, Number value) {
+        AbstractMetric metric = mock(AbstractMetric.class);
+        when(metric.name()).thenReturn(name);
+        when(metric.value()).thenReturn(value);
+        return metric;
+    }
+
+    @Test
+    public void testPutMetrics() {
+        GraphiteSink sink = new GraphiteSink();
+        List<MetricsTag> tags = new ArrayList<MetricsTag>();
+        tags.add(new MetricsTag(MsInfo.Context, "all"));
+        tags.add(new MetricsTag(MsInfo.Hostname, "host"));
+        Set<AbstractMetric> metrics = new HashSet<AbstractMetric>();
+        metrics.add(makeMetric("foo1", 1.25));
+        metrics.add(makeMetric("foo2", 2.25));
+        MetricsRecord record = new MetricsRecordImpl(MsInfo.Context, (long) 10000, tags, metrics);
+
+        OutputStreamWriter writer = mock(OutputStreamWriter.class);
+        ArgumentCaptor<String> argument = ArgumentCaptor.forClass(String.class);
+
+        sink.setWriter(writer);
+        sink.putMetrics(record);
+
+        try {
+            verify(writer).write(argument.capture());
+        } catch (IOException e) {
+            e.printStackTrace();
+        }
+
+        String result = argument.getValue().toString();
+
+        assertEquals(true,
+            result.equals("null.all.Context.Context=all.Hostname=host.foo1 1.25 10\n" +
+            "null.all.Context.Context=all.Hostname=host.foo2 2.25 10\n") ||
+            result.equals("null.all.Context.Context=all.Hostname=host.foo2 2.25 10\n" + 
+            "null.all.Context.Context=all.Hostname=host.foo1 1.25 10\n"));
+    }
+
+    @Test
+    public void testPutMetrics2() {
+        GraphiteSink sink = new GraphiteSink();
+        List<MetricsTag> tags = new ArrayList<MetricsTag>();
+        tags.add(new MetricsTag(MsInfo.Context, "all"));
+        tags.add(new MetricsTag(MsInfo.Hostname, null));
+        Set<AbstractMetric> metrics = new HashSet<AbstractMetric>();
+        metrics.add(makeMetric("foo1", 1));
+        metrics.add(makeMetric("foo2", 2));
+        MetricsRecord record = new MetricsRecordImpl(MsInfo.Context, (long) 10000, tags, metrics);
+
+        OutputStreamWriter writer = mock(OutputStreamWriter.class);
+        ArgumentCaptor<String> argument = ArgumentCaptor.forClass(String.class);
+
+        sink.setWriter(writer);
+        sink.putMetrics(record);
+
+        try {
+            verify(writer).write(argument.capture());
+        } catch (IOException e) {
+            e.printStackTrace();
+        }
+
+        String result = argument.getValue().toString();
+
+        assertEquals(true,
+            result.equals("null.all.Context.Context=all.foo1 1 10\n" + 
+            "null.all.Context.Context=all.foo2 2 10\n") ||
+            result.equals("null.all.Context.Context=all.foo2 2 10\n" + 
+            "null.all.Context.Context=all.foo1 1 10\n"));
+    }
+}

+ 5 - 5
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetworkTopologyWithNodeGroup.java

@@ -96,7 +96,7 @@ public class TestNetworkTopologyWithNodeGroup {
   }
 
   @Test
-  public void testPseudoSortByDistance() throws Exception {
+  public void testSortByDistance() throws Exception {
     NodeBase[] testNodes = new NodeBase[4];
 
     // array contains both local node, local node group & local rack node
@@ -104,7 +104,7 @@ public class TestNetworkTopologyWithNodeGroup {
     testNodes[1] = dataNodes[2];
     testNodes[2] = dataNodes[3];
     testNodes[3] = dataNodes[0];
-    cluster.pseudoSortByDistance(dataNodes[0], testNodes );
+    cluster.sortByDistance(dataNodes[0], testNodes, 0xDEADBEEF);
     assertTrue(testNodes[0] == dataNodes[0]);
     assertTrue(testNodes[1] == dataNodes[1]);
     assertTrue(testNodes[2] == dataNodes[2]);
@@ -115,7 +115,7 @@ public class TestNetworkTopologyWithNodeGroup {
     testNodes[1] = dataNodes[4];
     testNodes[2] = dataNodes[1];
     testNodes[3] = dataNodes[0];
-    cluster.pseudoSortByDistance(dataNodes[0], testNodes );
+    cluster.sortByDistance(dataNodes[0], testNodes, 0xDEADBEEF);
     assertTrue(testNodes[0] == dataNodes[0]);
     assertTrue(testNodes[1] == dataNodes[1]);
 
@@ -124,7 +124,7 @@ public class TestNetworkTopologyWithNodeGroup {
     testNodes[1] = dataNodes[3];
     testNodes[2] = dataNodes[2];
     testNodes[3] = dataNodes[0];
-    cluster.pseudoSortByDistance(dataNodes[0], testNodes );
+    cluster.sortByDistance(dataNodes[0], testNodes, 0xDEADBEEF);
     assertTrue(testNodes[0] == dataNodes[0]);
     assertTrue(testNodes[1] == dataNodes[2]);
 
@@ -133,7 +133,7 @@ public class TestNetworkTopologyWithNodeGroup {
     testNodes[1] = dataNodes[7];
     testNodes[2] = dataNodes[2];
     testNodes[3] = dataNodes[0];
-    cluster.pseudoSortByDistance(computeNode, testNodes );
+    cluster.sortByDistance(computeNode, testNodes, 0xDEADBEEF);
     assertTrue(testNodes[0] == dataNodes[0]);
     assertTrue(testNodes[1] == dataNodes[2]);
   }

+ 54 - 31
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/KeyStoreTestUtil.java

@@ -76,8 +76,8 @@ public class KeyStoreTestUtil {
    * @throws GeneralSecurityException thrown if an Security error ocurred.
    */
   public static X509Certificate generateCertificate(String dn, KeyPair pair,
-                                                    int days, String algorithm)
-    throws GeneralSecurityException, IOException {
+      int days, String algorithm)
+      throws GeneralSecurityException, IOException {
     PrivateKey privkey = pair.getPrivate();
     X509CertInfo info = new X509CertInfo();
     Date from = new Date();
@@ -92,7 +92,7 @@ public class KeyStoreTestUtil {
     info.set(X509CertInfo.ISSUER, new CertificateIssuerName(owner));
     info.set(X509CertInfo.KEY, new CertificateX509Key(pair.getPublic()));
     info
-      .set(X509CertInfo.VERSION, new CertificateVersion(CertificateVersion.V3));
+        .set(X509CertInfo.VERSION, new CertificateVersion(CertificateVersion.V3));
     AlgorithmId algo = new AlgorithmId(AlgorithmId.md5WithRSAEncryption_oid);
     info.set(X509CertInfo.ALGORITHM_ID, new CertificateAlgorithmId(algo));
 
@@ -103,30 +103,30 @@ public class KeyStoreTestUtil {
     // Update the algorith, and resign.
     algo = (AlgorithmId) cert.get(X509CertImpl.SIG_ALG);
     info
-      .set(CertificateAlgorithmId.NAME + "." + CertificateAlgorithmId.ALGORITHM,
-           algo);
+        .set(CertificateAlgorithmId.NAME + "." + CertificateAlgorithmId.ALGORITHM,
+            algo);
     cert = new X509CertImpl(info);
     cert.sign(privkey, algorithm);
     return cert;
   }
 
   public static KeyPair generateKeyPair(String algorithm)
-    throws NoSuchAlgorithmException {
+      throws NoSuchAlgorithmException {
     KeyPairGenerator keyGen = KeyPairGenerator.getInstance(algorithm);
     keyGen.initialize(1024);
     return keyGen.genKeyPair();
   }
 
   private static KeyStore createEmptyKeyStore()
-    throws GeneralSecurityException, IOException {
+      throws GeneralSecurityException, IOException {
     KeyStore ks = KeyStore.getInstance("JKS");
     ks.load(null, null); // initialize
     return ks;
   }
 
   private static void saveKeyStore(KeyStore ks, String filename,
-                                   String password)
-    throws GeneralSecurityException, IOException {
+      String password)
+      throws GeneralSecurityException, IOException {
     FileOutputStream out = new FileOutputStream(filename);
     try {
       ks.store(out, password.toCharArray());
@@ -136,18 +136,18 @@ public class KeyStoreTestUtil {
   }
 
   public static void createKeyStore(String filename,
-                                    String password, String alias,
-                                    Key privateKey, Certificate cert)
-    throws GeneralSecurityException, IOException {
+      String password, String alias,
+      Key privateKey, Certificate cert)
+      throws GeneralSecurityException, IOException {
     KeyStore ks = createEmptyKeyStore();
     ks.setKeyEntry(alias, privateKey, password.toCharArray(),
-                   new Certificate[]{cert});
+        new Certificate[]{cert});
     saveKeyStore(ks, filename, password);
   }
 
   /**
    * Creates a keystore with a single key and saves it to a file.
-   * 
+   *
    * @param filename String file to save
    * @param password String store password to set on keystore
    * @param keyPassword String key password to set on key
@@ -158,27 +158,27 @@ public class KeyStoreTestUtil {
    * @throws IOException if there is an I/O error saving the file
    */
   public static void createKeyStore(String filename,
-                                    String password, String keyPassword, String alias,
-                                    Key privateKey, Certificate cert)
-    throws GeneralSecurityException, IOException {
+      String password, String keyPassword, String alias,
+      Key privateKey, Certificate cert)
+      throws GeneralSecurityException, IOException {
     KeyStore ks = createEmptyKeyStore();
     ks.setKeyEntry(alias, privateKey, keyPassword.toCharArray(),
-                   new Certificate[]{cert});
+        new Certificate[]{cert});
     saveKeyStore(ks, filename, password);
   }
 
   public static void createTrustStore(String filename,
-                                      String password, String alias,
-                                      Certificate cert)
-    throws GeneralSecurityException, IOException {
+      String password, String alias,
+      Certificate cert)
+      throws GeneralSecurityException, IOException {
     KeyStore ks = createEmptyKeyStore();
     ks.setCertificateEntry(alias, cert);
     saveKeyStore(ks, filename, password);
   }
 
   public static <T extends Certificate> void createTrustStore(
-    String filename, String password, Map<String, T> certs)
-    throws GeneralSecurityException, IOException {
+      String filename, String password, Map<String, T> certs)
+      throws GeneralSecurityException, IOException {
     KeyStore ks = createEmptyKeyStore();
     for (Map.Entry<String, T> cert : certs.entrySet()) {
       ks.setCertificateEntry(cert.getKey(), cert.getValue());
@@ -187,7 +187,7 @@ public class KeyStoreTestUtil {
   }
 
   public static void cleanupSSLConfig(String keystoresDir, String sslConfDir)
-    throws Exception {
+      throws Exception {
     File f = new File(keystoresDir + "/clientKS.jks");
     f.delete();
     f = new File(keystoresDir + "/serverKS.jks");
@@ -196,7 +196,7 @@ public class KeyStoreTestUtil {
     f.delete();
     f = new File(sslConfDir + "/ssl-client.xml");
     f.delete();
-    f = new File(sslConfDir +  "/ssl-server.xml");
+    f = new File(sslConfDir + "/ssl-server.xml");
     f.delete();
   }
 
@@ -205,22 +205,42 @@ public class KeyStoreTestUtil {
    * SSLFactory.  This includes keys, certs, keystores, truststores, the server
    * SSL configuration file, the client SSL configuration file, and the master
    * configuration file read by the SSLFactory.
-   * 
+   *
+   * @param keystoresDir String directory to save keystores
+   * @param sslConfDir String directory to save SSL configuration files
+   * @param conf Configuration master configuration to be used by an SSLFactory,
+   * which will be mutated by this method
+   * @param useClientCert boolean true to make the client present a cert in the
+   * SSL handshake
+   */
+  public static void setupSSLConfig(String keystoresDir, String sslConfDir,
+      Configuration conf, boolean useClientCert) throws Exception {
+    setupSSLConfig(keystoresDir, sslConfDir, conf, useClientCert, true);
+  }
+
+  /**
+   * Performs complete setup of SSL configuration in preparation for testing an
+   * SSLFactory.  This includes keys, certs, keystores, truststores, the server
+   * SSL configuration file, the client SSL configuration file, and the master
+   * configuration file read by the SSLFactory.
+   *
    * @param keystoresDir String directory to save keystores
    * @param sslConfDir String directory to save SSL configuration files
    * @param conf Configuration master configuration to be used by an SSLFactory,
-   *   which will be mutated by this method
+   * which will be mutated by this method
    * @param useClientCert boolean true to make the client present a cert in the
-   *   SSL handshake
+   * SSL handshake
+   * @param trustStore boolean true to create truststore, false not to create it
    */
   public static void setupSSLConfig(String keystoresDir, String sslConfDir,
-                                    Configuration conf, boolean useClientCert)
+                                    Configuration conf, boolean useClientCert,
+      boolean trustStore)
     throws Exception {
     String clientKS = keystoresDir + "/clientKS.jks";
     String clientPassword = "clientP";
     String serverKS = keystoresDir + "/serverKS.jks";
     String serverPassword = "serverP";
-    String trustKS = keystoresDir + "/trustKS.jks";
+    String trustKS = null;
     String trustPassword = "trustP";
 
     File sslClientConfFile = new File(sslConfDir + "/ssl-client.xml");
@@ -246,7 +266,10 @@ public class KeyStoreTestUtil {
                                     sKP.getPrivate(), sCert);
     certs.put("server", sCert);
 
-    KeyStoreTestUtil.createTrustStore(trustKS, trustPassword, certs);
+    if (trustStore) {
+      trustKS = keystoresDir + "/trustKS.jks";
+      KeyStoreTestUtil.createTrustStore(trustKS, trustPassword, certs);
+    }
 
     Configuration clientSSLConf = createClientSSLConfig(clientKS, clientPassword,
       clientPassword, trustKS);

+ 21 - 8
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestSSLFactory.java

@@ -50,11 +50,12 @@ public class TestSSLFactory {
     base.mkdirs();
   }
 
-  private Configuration createConfiguration(boolean clientCert)
+  private Configuration createConfiguration(boolean clientCert,
+      boolean trustStore)
     throws Exception {
     Configuration conf = new Configuration();
     KeyStoreTestUtil.setupSSLConfig(KEYSTORES_DIR, sslConfsDir, conf,
-      clientCert);
+      clientCert, trustStore);
     return conf;
   }
 
@@ -67,7 +68,7 @@ public class TestSSLFactory {
 
   @Test(expected = IllegalStateException.class)
   public void clientMode() throws Exception {
-    Configuration conf = createConfiguration(false);
+    Configuration conf = createConfiguration(false, true);
     SSLFactory sslFactory = new SSLFactory(SSLFactory.Mode.CLIENT, conf);
     try {
       sslFactory.init();
@@ -80,7 +81,7 @@ public class TestSSLFactory {
   }
 
   private void serverMode(boolean clientCert, boolean socket) throws Exception {
-    Configuration conf = createConfiguration(clientCert);
+    Configuration conf = createConfiguration(clientCert, true);
     SSLFactory sslFactory = new SSLFactory(SSLFactory.Mode.SERVER, conf);
     try {
       sslFactory.init();
@@ -119,7 +120,7 @@ public class TestSSLFactory {
 
   @Test
   public void validHostnameVerifier() throws Exception {
-    Configuration conf = createConfiguration(false);
+    Configuration conf = createConfiguration(false, true);
     conf.unset(SSLFactory.SSL_HOSTNAME_VERIFIER_KEY);
     SSLFactory sslFactory = new
       SSLFactory(SSLFactory.Mode.CLIENT, conf);
@@ -157,7 +158,7 @@ public class TestSSLFactory {
 
   @Test(expected = GeneralSecurityException.class)
   public void invalidHostnameVerifier() throws Exception {
-    Configuration conf = createConfiguration(false);
+    Configuration conf = createConfiguration(false, true);
     conf.set(SSLFactory.SSL_HOSTNAME_VERIFIER_KEY, "foo");
     SSLFactory sslFactory = new SSLFactory(SSLFactory.Mode.CLIENT, conf);
     try {
@@ -169,7 +170,7 @@ public class TestSSLFactory {
 
   @Test
   public void testConnectionConfigurator() throws Exception {
-    Configuration conf = createConfiguration(false);
+    Configuration conf = createConfiguration(false, true);
     conf.set(SSLFactory.SSL_HOSTNAME_VERIFIER_KEY, "STRICT_IE6");
     SSLFactory sslFactory = new SSLFactory(SSLFactory.Mode.CLIENT, conf);
     try {
@@ -275,7 +276,7 @@ public class TestSSLFactory {
 
   @Test
   public void testNoClientCertsInitialization() throws Exception {
-    Configuration conf = createConfiguration(false);
+    Configuration conf = createConfiguration(false, true);
     conf.unset(SSLFactory.SSL_REQUIRE_CLIENT_CERT_KEY);
     SSLFactory sslFactory = new SSLFactory(SSLFactory.Mode.CLIENT, conf);
     try {
@@ -285,4 +286,16 @@ public class TestSSLFactory {
     }
   }
 
+  @Test
+  public void testNoTrustStore() throws Exception {
+    Configuration conf = createConfiguration(false, false);
+    conf.unset(SSLFactory.SSL_REQUIRE_CLIENT_CERT_KEY);
+    SSLFactory sslFactory = new SSLFactory(SSLFactory.Mode.SERVER, conf);
+    try {
+      sslFactory.init();
+    } finally {
+      sslFactory.destroy();
+    }
+  }
+
 }

+ 3 - 6
hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSCacheKeyProvider.java

@@ -135,14 +135,11 @@ public class KMSCacheKeyProvider extends KeyProvider {
 
   @Override
   public void deleteKey(String name) throws IOException {
-    Metadata metadata = provider.getMetadata(name);
-    List<String> versions = new ArrayList<String>(metadata.getVersions());
-    for (int i = 0; i < metadata.getVersions(); i++) {
-      versions.add(KeyProvider.buildVersionName(name, i));
-    }
     provider.deleteKey(name);
     currentKeyCache.invalidate(name);
-    keyVersionCache.invalidateAll(versions);
+    // invalidating all key versions as we don't know which ones belonged to the
+    // deleted key
+    keyVersionCache.invalidateAll();
   }
 
   @Override

+ 6 - 1
hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java

@@ -577,7 +577,9 @@ public class TestKMS {
               Assert.fail(ex.toString());
             }
             try {
-              kp.getKeyVersion(KMSClientProvider.buildVersionName("k", 0));
+              // we are using JavaKeyStoreProvider for testing, so we know how
+              // the keyversion is created.
+              kp.getKeyVersion("k@0");
               Assert.fail();
             } catch (AuthorizationException ex) {
               //NOP
@@ -717,6 +719,9 @@ public class TestKMS {
           }
         });
 
+        //stop the reloader, to avoid running while we are writing the new file
+        KMSWebApp.getACLs().stopReloader();
+
         // test ACL reloading
         Thread.sleep(10); // to ensure the ACLs file modifiedTime is newer
         conf.set(KMSACLs.Type.CREATE.getConfigKey(), "foo");

+ 9 - 7
hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/NfsExports.java

@@ -27,6 +27,7 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.commons.net.util.SubnetUtils;
 import org.apache.commons.net.util.SubnetUtils.SubnetInfo;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.nfs.nfs3.Nfs3Constant;
 import org.apache.hadoop.util.LightWeightCache;
 import org.apache.hadoop.util.LightWeightGSet;
@@ -44,13 +45,14 @@ public class NfsExports {
   
   public static synchronized NfsExports getInstance(Configuration conf) {
     if (exports == null) {
-      String matchHosts = conf.get(Nfs3Constant.EXPORTS_ALLOWED_HOSTS_KEY,
-          Nfs3Constant.EXPORTS_ALLOWED_HOSTS_KEY_DEFAULT);
-      int cacheSize = conf.getInt(Nfs3Constant.EXPORTS_CACHE_SIZE_KEY,
-          Nfs3Constant.EXPORTS_CACHE_SIZE_DEFAULT);
+      String matchHosts = conf.get(
+          CommonConfigurationKeys.NFS_EXPORTS_ALLOWED_HOSTS_KEY,
+          CommonConfigurationKeys.NFS_EXPORTS_ALLOWED_HOSTS_KEY_DEFAULT);
+      int cacheSize = conf.getInt(Nfs3Constant.NFS_EXPORTS_CACHE_SIZE_KEY,
+          Nfs3Constant.NFS_EXPORTS_CACHE_SIZE_DEFAULT);
       long expirationPeriodNano = conf.getLong(
-          Nfs3Constant.EXPORTS_CACHE_EXPIRYTIME_MILLIS_KEY,
-          Nfs3Constant.EXPORTS_CACHE_EXPIRYTIME_MILLIS_DEFAULT) * 1000 * 1000;
+          Nfs3Constant.NFS_EXPORTS_CACHE_EXPIRYTIME_MILLIS_KEY,
+          Nfs3Constant.NFS_EXPORTS_CACHE_EXPIRYTIME_MILLIS_DEFAULT) * 1000 * 1000;
       exports = new NfsExports(cacheSize, expirationPeriodNano, matchHosts);
     }
     return exports;
@@ -140,7 +142,7 @@ public class NfsExports {
     accessCache = new LightWeightCache<AccessCacheEntry, AccessCacheEntry>(
         cacheSize, cacheSize, expirationPeriodNano, 0);        
     String[] matchStrings = matchHosts.split(
-        Nfs3Constant.EXPORTS_ALLOWED_HOSTS_SEPARATOR);
+        CommonConfigurationKeys.NFS_EXPORTS_ALLOWED_HOSTS_SEPARATOR);
     mMatches = new ArrayList<Match>(matchStrings.length);
     for(String mStr : matchStrings) {
       if (LOG.isDebugEnabled()) {

+ 7 - 18
hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/IdUserGroup.java

@@ -50,9 +50,6 @@ public class IdUserGroup {
   static final String MAC_GET_ALL_USERS_CMD = "dscl . -list /Users UniqueID";
   static final String MAC_GET_ALL_GROUPS_CMD = "dscl . -list /Groups PrimaryGroupID";
 
-  // Used for finding the configured static mapping file.
-  static final String NFS_STATIC_MAPPING_FILE_KEY = "dfs.nfs.static.mapping.file";
-  private static final String NFS_STATIC_MAPPING_FILE_DEFAULT = "/etc/nfs.map";
   private final File staticMappingFile;
 
   // Used for parsing the static mapping file.
@@ -61,11 +58,7 @@ public class IdUserGroup {
   private static final Pattern MAPPING_LINE =
       Pattern.compile("^(uid|gid)\\s+(\\d+)\\s+(\\d+)\\s*(#.*)?$");
 
-  // Do update every 15 minutes by default
-  final static long TIMEOUT_DEFAULT = 15 * 60 * 1000; // ms
-  final static long TIMEOUT_MIN = 1 * 60 * 1000; // ms
   final private long timeout;
-  final static String NFS_USERUPDATE_MILLY = "hadoop.nfs.userupdate.milly";
   
   // Maps for id to name map. Guarded by this object monitor lock
   private BiMap<Integer, String> uidNameMap = HashBiMap.create();
@@ -73,25 +66,21 @@ public class IdUserGroup {
 
   private long lastUpdateTime = 0; // Last time maps were updated
   
-  public IdUserGroup() throws IOException {
-    timeout = TIMEOUT_DEFAULT;
-    staticMappingFile = new File(NFS_STATIC_MAPPING_FILE_DEFAULT);
-    updateMaps();
-  }
-  
   public IdUserGroup(Configuration conf) throws IOException {
-    long updateTime = conf.getLong(NFS_USERUPDATE_MILLY, TIMEOUT_DEFAULT);
+    long updateTime = conf.getLong(
+        Nfs3Constant.NFS_USERGROUP_UPDATE_MILLIS_KEY,
+        Nfs3Constant.NFS_USERGROUP_UPDATE_MILLIS_DEFAULT);
     // Minimal interval is 1 minute
-    if (updateTime < TIMEOUT_MIN) {
+    if (updateTime < Nfs3Constant.NFS_USERGROUP_UPDATE_MILLIS_MIN) {
       LOG.info("User configured user account update time is less"
           + " than 1 minute. Use 1 minute instead.");
-      timeout = TIMEOUT_MIN;
+      timeout = Nfs3Constant.NFS_USERGROUP_UPDATE_MILLIS_MIN;
     } else {
       timeout = updateTime;
     }
     
-    String staticFilePath = conf.get(NFS_STATIC_MAPPING_FILE_KEY,
-        NFS_STATIC_MAPPING_FILE_DEFAULT);
+    String staticFilePath = conf.get(Nfs3Constant.NFS_STATIC_MAPPING_FILE_KEY,
+        Nfs3Constant.NFS_STATIC_MAPPING_FILE_DEFAULT);
     staticMappingFile = new File(staticFilePath);
     
     updateMaps();

+ 2 - 5
hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Base.java

@@ -33,7 +33,6 @@ import org.apache.hadoop.util.ShutdownHookManager;
 public abstract class Nfs3Base {
   public static final Log LOG = LogFactory.getLog(Nfs3Base.class);
   private final RpcProgram rpcProgram;
-  private final int nfsPort;
   private int nfsBoundPort; // Will set after server starts
     
   public RpcProgram getRpcProgram() {
@@ -42,9 +41,7 @@ public abstract class Nfs3Base {
 
   protected Nfs3Base(RpcProgram rpcProgram, Configuration conf) {
     this.rpcProgram = rpcProgram;
-    this.nfsPort = conf.getInt(Nfs3Constant.NFS3_SERVER_PORT,
-        Nfs3Constant.NFS3_SERVER_PORT_DEFAULT);
-    LOG.info("NFS server port set to: " + nfsPort);
+    LOG.info("NFS server port set to: " + rpcProgram.getPort());
   }
 
   public void start(boolean register) {
@@ -58,7 +55,7 @@ public abstract class Nfs3Base {
   }
 
   private void startTCPServer() {
-    SimpleTcpServer tcpServer = new SimpleTcpServer(nfsPort,
+    SimpleTcpServer tcpServer = new SimpleTcpServer(rpcProgram.getPort(),
         rpcProgram, 0);
     rpcProgram.startDaemons();
     tcpServer.run();

+ 12 - 30
hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Constant.java

@@ -25,10 +25,6 @@ public class Nfs3Constant {
   // The local rpcbind/portmapper port.
   public final static int SUN_RPCBIND = 111;
 
-  // The IP port number for NFS.
-  public final static String NFS3_SERVER_PORT = "nfs3.server.port";
-  public final static int NFS3_SERVER_PORT_DEFAULT = 2049;
-
   // The RPC program number for NFS.
   public final static int PROGRAM = 100003;
 
@@ -191,36 +187,22 @@ public class Nfs3Constant {
   public final static int CREATE_GUARDED = 1;
   public final static int CREATE_EXCLUSIVE = 2;
   
-  public static final String EXPORTS_ALLOWED_HOSTS_SEPARATOR = ";";
-  /** Allowed hosts for nfs exports */
-  public static final String EXPORTS_ALLOWED_HOSTS_KEY = "dfs.nfs.exports.allowed.hosts";
-  public static final String EXPORTS_ALLOWED_HOSTS_KEY_DEFAULT = "* rw";
   /** Size for nfs exports cache */
-  public static final String EXPORTS_CACHE_SIZE_KEY = "dfs.nfs.exports.cache.size";
-  public static final int EXPORTS_CACHE_SIZE_DEFAULT = 512;
+  public static final String NFS_EXPORTS_CACHE_SIZE_KEY = "nfs.exports.cache.size";
+  public static final int NFS_EXPORTS_CACHE_SIZE_DEFAULT = 512;
   /** Expiration time for nfs exports cache entry */
-  public static final String EXPORTS_CACHE_EXPIRYTIME_MILLIS_KEY = "dfs.nfs.exports.cache.expirytime.millis";
-  public static final long EXPORTS_CACHE_EXPIRYTIME_MILLIS_DEFAULT = 15 * 60 * 1000; // 15 min
-
-  public static final String FILE_DUMP_DIR_KEY = "dfs.nfs3.dump.dir";
-  public static final String FILE_DUMP_DIR_DEFAULT = "/tmp/.hdfs-nfs";
-  public static final String ENABLE_FILE_DUMP_KEY = "dfs.nfs3.enableDump";
-  public static final boolean ENABLE_FILE_DUMP_DEFAULT = true;
-  public static final String MAX_READ_TRANSFER_SIZE_KEY = "dfs.nfs.rtmax";
-  public static final int MAX_READ_TRANSFER_SIZE_DEFAULT = 1024 * 1024;
-  public static final String MAX_WRITE_TRANSFER_SIZE_KEY = "dfs.nfs.wtmax";
-  public static final int MAX_WRITE_TRANSFER_SIZE_DEFAULT = 1024 * 1024;
-  public static final String MAX_READDIR_TRANSFER_SIZE_KEY = "dfs.nfs.dtmax";
-  public static final int MAX_READDIR_TRANSFER_SIZE_DEFAULT = 64 * 1024;
-  public static final String MAX_OPEN_FILES = "dfs.nfs3.max.open.files";
-  public static final int MAX_OPEN_FILES_DEFAULT = 256;
-  public static final String OUTPUT_STREAM_TIMEOUT = "dfs.nfs3.stream.timeout";
-  public static final long OUTPUT_STREAM_TIMEOUT_DEFAULT = 10 * 60 * 1000; // 10 minutes
-  public static final long OUTPUT_STREAM_TIMEOUT_MIN_DEFAULT = 10 * 1000; //10 seconds
+  public static final String NFS_EXPORTS_CACHE_EXPIRYTIME_MILLIS_KEY = "nfs.exports.cache.expirytime.millis";
+  public static final long NFS_EXPORTS_CACHE_EXPIRYTIME_MILLIS_DEFAULT = 15 * 60 * 1000; // 15 min
+
+  /** Do user/group update every 15 minutes by default, minimum 1 minute */
+  public final static String NFS_USERGROUP_UPDATE_MILLIS_KEY = "nfs.usergroup.update.millis";
+  public final static long NFS_USERGROUP_UPDATE_MILLIS_DEFAULT = 15 * 60 * 1000; // ms
+  final static long NFS_USERGROUP_UPDATE_MILLIS_MIN = 1 * 60 * 1000; // ms
   
   public final static String UNKNOWN_USER = "nobody";
   public final static String UNKNOWN_GROUP = "nobody";
   
-  public final static String EXPORT_POINT = "dfs.nfs3.export.point";
-  public final static String EXPORT_POINT_DEFAULT = "/";
+  // Used for finding the configured static mapping file.
+  public static final String NFS_STATIC_MAPPING_FILE_KEY = "nfs.static.mapping.file";
+  public static final String NFS_STATIC_MAPPING_FILE_DEFAULT = "/etc/nfs.map";
 }

+ 3 - 6
hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/TestNfsExports.java

@@ -17,11 +17,8 @@
  */
 package org.apache.hadoop.nfs;
 
-import org.junit.Assert;
-
-import org.apache.hadoop.nfs.AccessPrivilege;
-import org.apache.hadoop.nfs.NfsExports;
 import org.apache.hadoop.nfs.nfs3.Nfs3Constant;
+import org.junit.Assert;
 import org.junit.Test;
 
 public class TestNfsExports {
@@ -32,9 +29,9 @@ public class TestNfsExports {
   private final String hostname2 = "a.b.org";
   
   private static final long ExpirationPeriod = 
-      Nfs3Constant.EXPORTS_CACHE_EXPIRYTIME_MILLIS_DEFAULT * 1000 * 1000;
+      Nfs3Constant.NFS_EXPORTS_CACHE_EXPIRYTIME_MILLIS_DEFAULT * 1000 * 1000;
   
-  private static final int CacheSize = Nfs3Constant.EXPORTS_CACHE_SIZE_DEFAULT;
+  private static final int CacheSize = Nfs3Constant.NFS_EXPORTS_CACHE_SIZE_DEFAULT;
   private static final long NanosPerMillis = 1000000;
 
   @Test

+ 9 - 7
hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/nfs3/TestIdUserGroup.java

@@ -199,17 +199,19 @@ public class TestIdUserGroup {
 
   @Test
   public void testUserUpdateSetting() throws IOException {
-    IdUserGroup iug = new IdUserGroup();
-    assertEquals(iug.getTimeout(), IdUserGroup.TIMEOUT_DEFAULT);
+    IdUserGroup iug = new IdUserGroup(new Configuration());
+    assertEquals(iug.getTimeout(),
+        Nfs3Constant.NFS_USERGROUP_UPDATE_MILLIS_DEFAULT);
 
     Configuration conf = new Configuration();
-    conf.setLong(IdUserGroup.NFS_USERUPDATE_MILLY, 0);
+    conf.setLong(Nfs3Constant.NFS_USERGROUP_UPDATE_MILLIS_KEY, 0);
     iug = new IdUserGroup(conf);
-    assertEquals(iug.getTimeout(), IdUserGroup.TIMEOUT_MIN);
+    assertEquals(iug.getTimeout(), Nfs3Constant.NFS_USERGROUP_UPDATE_MILLIS_MIN);
 
-    conf.setLong(IdUserGroup.NFS_USERUPDATE_MILLY,
-        IdUserGroup.TIMEOUT_DEFAULT * 2);
+    conf.setLong(Nfs3Constant.NFS_USERGROUP_UPDATE_MILLIS_KEY,
+        Nfs3Constant.NFS_USERGROUP_UPDATE_MILLIS_DEFAULT * 2);
     iug = new IdUserGroup(conf);
-    assertEquals(iug.getTimeout(), IdUserGroup.TIMEOUT_DEFAULT * 2);
+    assertEquals(iug.getTimeout(),
+        Nfs3Constant.NFS_USERGROUP_UPDATE_MILLIS_DEFAULT * 2);
   }
 }

+ 5 - 0
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/hadoop/FileSystemAccessService.java

@@ -23,6 +23,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.lib.server.BaseService;
 import org.apache.hadoop.lib.server.ServiceException;
 import org.apache.hadoop.lib.service.FileSystemAccess;
@@ -395,6 +396,10 @@ public class FileSystemAccessService extends BaseService implements FileSystemAc
     Configuration conf = new Configuration(true);
     ConfigurationUtils.copy(serviceHadoopConf, conf);
     conf.setBoolean(FILE_SYSTEM_SERVICE_CREATED, true);
+
+    // Force-clear server-side umask to make HttpFS match WebHDFS behavior
+    conf.set(FsPermission.UMASK_LABEL, "000");
+
     return conf;
   }
 

+ 99 - 0
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java

@@ -231,6 +231,105 @@ public class TestHttpFSServer extends HFSTestCase {
     reader.close();
   }
 
+  /**
+   * Talks to the http interface to create a file.
+   *
+   * @param filename The file to create
+   * @param perms The permission field, if any (may be null)
+   * @throws Exception
+   */
+  private void createWithHttp ( String filename, String perms )
+          throws Exception {
+    String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
+    String pathOps;
+    if ( perms == null ) {
+      pathOps = MessageFormat.format(
+              "/webhdfs/v1/{0}?user.name={1}&op=CREATE",
+              filename, user);
+    } else {
+      pathOps = MessageFormat.format(
+              "/webhdfs/v1/{0}?user.name={1}&permission={2}&op=CREATE",
+              filename, user, perms);
+    }
+    URL url = new URL(TestJettyHelper.getJettyURL(), pathOps);
+    HttpURLConnection conn = (HttpURLConnection) url.openConnection();
+    conn.addRequestProperty("Content-Type", "application/octet-stream");
+    conn.setRequestMethod("PUT");
+    conn.connect();
+    Assert.assertEquals(HttpURLConnection.HTTP_CREATED, conn.getResponseCode());
+  }
+
+  /**
+   * Talks to the http interface to get the json output of the GETFILESTATUS
+   * command on the given file.
+   *
+   * @param filename The file to query.
+   * @return A string containing the JSON output describing the file.
+   * @throws Exception
+   */
+  private String getFileStatus ( String filename ) throws Exception {
+    String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
+    String pathOps = MessageFormat.format(
+            "/webhdfs/v1/{0}?user.name={1}&op=GETFILESTATUS",
+            filename, user);
+    URL url = new URL(TestJettyHelper.getJettyURL(), pathOps);
+    HttpURLConnection conn = (HttpURLConnection) url.openConnection();
+    conn.connect();
+    Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
+
+    BufferedReader reader =
+            new BufferedReader(new InputStreamReader(conn.getInputStream()));
+
+    return reader.readLine();
+  }
+
+  /**
+   * Given the JSON output from the GETFILESTATUS call, return the
+   * 'permission' value.
+   *
+   * @param statusJson JSON from GETFILESTATUS
+   * @return The value of 'permission' in statusJson
+   * @throws Exception
+   */
+  private String getPerms ( String statusJson ) throws Exception {
+    JSONParser parser = new JSONParser();
+    JSONObject jsonObject = (JSONObject) parser.parse(statusJson);
+    JSONObject details = (JSONObject) jsonObject.get("FileStatus");
+    return (String) details.get("permission");
+  }
+
+  /**
+   * Validate that files are created with 755 permissions when no
+   * 'permissions' attribute is specified, and when 'permissions'
+   * is specified, that value is honored.
+   */
+  @Test
+  @TestDir
+  @TestJetty
+  @TestHdfs
+  public void testPerms() throws Exception {
+    createHttpFSServer(false);
+
+    FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
+    fs.mkdirs(new Path("/perm"));
+
+    createWithHttp("/perm/none", null);
+    String statusJson = getFileStatus("/perm/none");
+    Assert.assertTrue("755".equals(getPerms(statusJson)));
+
+    createWithHttp("/perm/p-777", "777");
+    statusJson = getFileStatus("/perm/p-777");
+    Assert.assertTrue("777".equals(getPerms(statusJson)));
+
+    createWithHttp("/perm/p-654", "654");
+    statusJson = getFileStatus("/perm/p-654");
+    Assert.assertTrue("654".equals(getPerms(statusJson)));
+
+    createWithHttp("/perm/p-321", "321");
+    statusJson = getFileStatus("/perm/p-321");
+    Assert.assertTrue("321".equals(getPerms(statusJson)));
+  }
+
   @Test
   @TestDir
   @TestJetty

+ 57 - 0
hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/conf/NfsConfigKeys.java

@@ -0,0 +1,57 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.hdfs.nfs.conf;
+
+public class NfsConfigKeys {
+
+  // The IP port number for NFS and mountd.
+  public final static String DFS_NFS_SERVER_PORT_KEY = "nfs.server.port";
+  public final static int DFS_NFS_SERVER_PORT_DEFAULT = 2049;
+  public final static String DFS_NFS_MOUNTD_PORT_KEY = "nfs.mountd.port";
+  public final static int DFS_NFS_MOUNTD_PORT_DEFAULT = 4242;
+  
+  public static final String DFS_NFS_FILE_DUMP_KEY = "nfs.file.dump";
+  public static final boolean DFS_NFS_FILE_DUMP_DEFAULT = true;
+  public static final String DFS_NFS_FILE_DUMP_DIR_KEY = "nfs.file.dump.dir";
+  public static final String DFS_NFS_FILE_DUMP_DIR_DEFAULT = "/tmp/.hdfs-nfs";
+  
+  public static final String DFS_NFS_MAX_READ_TRANSFER_SIZE_KEY = "nfs.rtmax";
+  public static final int DFS_NFS_MAX_READ_TRANSFER_SIZE_DEFAULT = 1024 * 1024;
+  public static final String DFS_NFS_MAX_WRITE_TRANSFER_SIZE_KEY = "nfs.wtmax";
+  public static final int DFS_NFS_MAX_WRITE_TRANSFER_SIZE_DEFAULT = 1024 * 1024;
+  public static final String DFS_NFS_MAX_READDIR_TRANSFER_SIZE_KEY = "nfs.dtmax";
+  public static final int DFS_NFS_MAX_READDIR_TRANSFER_SIZE_DEFAULT = 64 * 1024;
+
+  public static final String DFS_NFS_MAX_OPEN_FILES_KEY = "nfs.max.open.files";
+  public static final int DFS_NFS_MAX_OPEN_FILES_DEFAULT = 256;
+
+  public static final String DFS_NFS_STREAM_TIMEOUT_KEY = "nfs.stream.timeout";
+  public static final long DFS_NFS_STREAM_TIMEOUT_DEFAULT = 10 * 60 * 1000; // 10 minutes
+  public static final long DFS_NFS_STREAM_TIMEOUT_MIN_DEFAULT = 10 * 1000; // 10 seconds
+
+  public final static String DFS_NFS_EXPORT_POINT_KEY = "nfs.export.point";
+  public final static String DFS_NFS_EXPORT_POINT_DEFAULT = "/";
+  
+  public static final String DFS_NFS_KEYTAB_FILE_KEY = "nfs.keytab.file";
+  public static final String DFS_NFS_KERBEROS_PRINCIPAL_KEY = "nfs.kerberos.principal";
+  public static final String DFS_NFS_REGISTRATION_PORT_KEY = "nfs.registration.port";
+  public static final int    DFS_NFS_REGISTRATION_PORT_DEFAULT = 40; // Currently unassigned.
+  public static final String  DFS_NFS_ALLOW_INSECURE_PORTS_KEY = "nfs.allow.insecure.ports";
+  public static final boolean DFS_NFS_ALLOW_INSECURE_PORTS_DEFAULT = true;
+}

+ 54 - 0
hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/conf/NfsConfiguration.java

@@ -0,0 +1,54 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.nfs.conf;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.nfs.nfs3.Nfs3Constant;
+
+/**
+ * Adds deprecated keys into the configuration.
+ */
+public class NfsConfiguration extends HdfsConfiguration {
+  static {
+    addDeprecatedKeys();
+  }
+
+  private static void addDeprecatedKeys() {
+    Configuration.addDeprecations(new DeprecationDelta[] {
+        new DeprecationDelta("nfs3.server.port",
+            NfsConfigKeys.DFS_NFS_SERVER_PORT_KEY),
+        new DeprecationDelta("nfs3.mountd.port",
+            NfsConfigKeys.DFS_NFS_MOUNTD_PORT_KEY),
+        new DeprecationDelta("dfs.nfs.exports.cache.expirytime.millis",
+            Nfs3Constant.NFS_EXPORTS_CACHE_EXPIRYTIME_MILLIS_KEY),
+        new DeprecationDelta("hadoop.nfs.userupdate.milly",
+            Nfs3Constant.NFS_USERGROUP_UPDATE_MILLIS_KEY),
+        new DeprecationDelta("dfs.nfs3.enableDump",
+            NfsConfigKeys.DFS_NFS_FILE_DUMP_KEY),
+        new DeprecationDelta("dfs.nfs3.dump.dir",
+            NfsConfigKeys.DFS_NFS_FILE_DUMP_DIR_KEY),
+        new DeprecationDelta("dfs.nfs3.max.open.files",
+            NfsConfigKeys.DFS_NFS_MAX_OPEN_FILES_KEY),
+        new DeprecationDelta("dfs.nfs3.stream.timeout",
+            NfsConfigKeys.DFS_NFS_STREAM_TIMEOUT_KEY),
+        new DeprecationDelta("dfs.nfs3.export.point",
+            NfsConfigKeys.DFS_NFS_EXPORT_POINT_KEY) });
+  }
+}

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/Mountd.java

@@ -20,7 +20,7 @@ package org.apache.hadoop.hdfs.nfs.mount;
 import java.io.IOException;
 import java.net.DatagramSocket;
 
-import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration;
 import org.apache.hadoop.mount.MountdBase;
 
 /**
@@ -32,13 +32,13 @@ import org.apache.hadoop.mount.MountdBase;
  */
 public class Mountd extends MountdBase {
 
-  public Mountd(Configuration config, DatagramSocket registrationSocket,
+  public Mountd(NfsConfiguration config, DatagramSocket registrationSocket,
       boolean allowInsecurePorts) throws IOException {
     super(new RpcProgramMountd(config, registrationSocket, allowInsecurePorts));
   }
   
   public static void main(String[] args) throws IOException {
-    Configuration config = new Configuration();
+    NfsConfiguration config = new NfsConfiguration();
     Mountd mountd = new Mountd(config, null, true);
     mountd.start(true);
   }

+ 13 - 15
hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java

@@ -16,9 +16,6 @@
  * limitations under the License.
  */
 package org.apache.hadoop.hdfs.nfs.mount;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NFS_KEYTAB_FILE_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NFS_KERBEROS_PRINCIPAL_KEY;
-
 import java.io.IOException;
 import java.net.DatagramSocket;
 import java.net.InetAddress;
@@ -29,8 +26,9 @@ import java.util.List;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSClient;
+import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys;
+import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.mount.MountEntry;
@@ -39,7 +37,6 @@ import org.apache.hadoop.mount.MountResponse;
 import org.apache.hadoop.nfs.AccessPrivilege;
 import org.apache.hadoop.nfs.NfsExports;
 import org.apache.hadoop.nfs.nfs3.FileHandle;
-import org.apache.hadoop.nfs.nfs3.Nfs3Constant;
 import org.apache.hadoop.nfs.nfs3.Nfs3Status;
 import org.apache.hadoop.oncrpc.RpcAcceptedReply;
 import org.apache.hadoop.oncrpc.RpcCall;
@@ -66,9 +63,7 @@ public class RpcProgramMountd extends RpcProgram implements MountInterface {
   public static final int VERSION_1 = 1;
   public static final int VERSION_2 = 2;
   public static final int VERSION_3 = 3;
-  public static final int PORT = 4242;
 
-  // Need DFSClient for branch-1 to get ExtendedHdfsFileStatus
   private final DFSClient dfsClient;
   
   /** Synchronized list */
@@ -79,19 +74,22 @@ public class RpcProgramMountd extends RpcProgram implements MountInterface {
   
   private final NfsExports hostsMatcher;
 
-  public RpcProgramMountd(Configuration config, DatagramSocket registrationSocket,
-      boolean allowInsecurePorts) throws IOException {
+  public RpcProgramMountd(NfsConfiguration config,
+      DatagramSocket registrationSocket, boolean allowInsecurePorts)
+      throws IOException {
     // Note that RPC cache is not enabled
-    super("mountd", "localhost", config.getInt("nfs3.mountd.port", PORT),
-        PROGRAM, VERSION_1, VERSION_3, registrationSocket, allowInsecurePorts);
+    super("mountd", "localhost", config.getInt(
+        NfsConfigKeys.DFS_NFS_MOUNTD_PORT_KEY,
+        NfsConfigKeys.DFS_NFS_MOUNTD_PORT_DEFAULT), PROGRAM, VERSION_1,
+        VERSION_3, registrationSocket, allowInsecurePorts);
     exports = new ArrayList<String>();
-    exports.add(config.get(Nfs3Constant.EXPORT_POINT,
-        Nfs3Constant.EXPORT_POINT_DEFAULT));
+    exports.add(config.get(NfsConfigKeys.DFS_NFS_EXPORT_POINT_KEY,
+        NfsConfigKeys.DFS_NFS_EXPORT_POINT_DEFAULT));
     this.hostsMatcher = NfsExports.getInstance(config);
     this.mounts = Collections.synchronizedList(new ArrayList<MountEntry>());
     UserGroupInformation.setConfiguration(config);
-    SecurityUtil.login(config, DFS_NFS_KEYTAB_FILE_KEY,
-            DFS_NFS_KERBEROS_PRINCIPAL_KEY);
+    SecurityUtil.login(config, NfsConfigKeys.DFS_NFS_KEYTAB_FILE_KEY,
+        NfsConfigKeys.DFS_NFS_KERBEROS_PRINCIPAL_KEY);
     this.dfsClient = new DFSClient(NameNode.getAddress(config), config);
   }
   

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/DFSClientCache.java

@@ -30,10 +30,10 @@ import java.util.concurrent.TimeUnit;
 
 import com.google.common.base.Preconditions;
 import org.apache.commons.logging.Log;
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSInputStream;
+import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.io.MultipleIOException;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -72,7 +72,7 @@ class DFSClientCache {
   final static int DEFAULT_DFS_INPUTSTREAM_CACHE_SIZE = 1024;
   final static int DEFAULT_DFS_INPUTSTREAM_CACHE_TTL = 10 * 60;
 
-  private final Configuration config;
+  private final NfsConfiguration config;
 
   private static class DFSInputStreamCaheKey {
     final String userId;
@@ -99,11 +99,11 @@ class DFSClientCache {
     }
   }
 
-  DFSClientCache(Configuration config) {
+  DFSClientCache(NfsConfiguration config) {
     this(config, DEFAULT_DFS_CLIENT_CACHE_SIZE);
   }
   
-  DFSClientCache(Configuration config, int clientCache) {
+  DFSClientCache(NfsConfiguration config, int clientCache) {
     this.config = config;
     this.clientCache = CacheBuilder.newBuilder()
         .maximumSize(clientCache)

+ 8 - 13
hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3.java

@@ -20,8 +20,8 @@ package org.apache.hadoop.hdfs.nfs.nfs3;
 import java.io.IOException;
 import java.net.DatagramSocket;
 
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys;
+import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration;
 import org.apache.hadoop.hdfs.nfs.mount.Mountd;
 import org.apache.hadoop.nfs.nfs3.Nfs3Base;
 import org.apache.hadoop.util.StringUtils;
@@ -36,16 +36,11 @@ import com.google.common.annotations.VisibleForTesting;
 public class Nfs3 extends Nfs3Base {
   private Mountd mountd;
   
-  static {
-    Configuration.addDefaultResource("hdfs-default.xml");
-    Configuration.addDefaultResource("hdfs-site.xml");
-  }
-  
-  public Nfs3(Configuration conf) throws IOException {
+  public Nfs3(NfsConfiguration conf) throws IOException {
     this(conf, null, true);
   }
   
-  public Nfs3(Configuration conf, DatagramSocket registrationSocket,
+  public Nfs3(NfsConfiguration conf, DatagramSocket registrationSocket,
       boolean allowInsecurePorts) throws IOException {
     super(new RpcProgramNfs3(conf, registrationSocket, allowInsecurePorts), conf);
     mountd = new Mountd(conf, registrationSocket, allowInsecurePorts);
@@ -64,11 +59,11 @@ public class Nfs3 extends Nfs3Base {
   static void startService(String[] args,
       DatagramSocket registrationSocket) throws IOException {
     StringUtils.startupShutdownMessage(Nfs3.class, args, LOG);
-    Configuration conf = new Configuration();
+    NfsConfiguration conf = new NfsConfiguration();
     boolean allowInsecurePorts = conf.getBoolean(
-        DFSConfigKeys.DFS_NFS_ALLOW_INSECURE_PORTS_KEY,
-        DFSConfigKeys.DFS_NFS_ALLOW_INSECURE_PORTS_DEFAULT);
-    final Nfs3 nfsServer = new Nfs3(new Configuration(), registrationSocket,
+        NfsConfigKeys.DFS_NFS_ALLOW_INSECURE_PORTS_KEY,
+        NfsConfigKeys.DFS_NFS_ALLOW_INSECURE_PORTS_DEFAULT);
+    final Nfs3 nfsServer = new Nfs3(conf, registrationSocket,
         allowInsecurePorts);
     nfsServer.startServiceInternal(true);
   }

+ 2 - 1
hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java

@@ -37,6 +37,7 @@ import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag;
+import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys;
 import org.apache.hadoop.hdfs.nfs.nfs3.WriteCtx.DataState;
 import org.apache.hadoop.io.BytesWritable.Comparator;
 import org.apache.hadoop.io.IOUtils;
@@ -823,7 +824,7 @@ class OpenFileCtx {
    */
   public synchronized boolean streamCleanup(long fileId, long streamTimeout) {
     Preconditions
-        .checkState(streamTimeout >= Nfs3Constant.OUTPUT_STREAM_TIMEOUT_MIN_DEFAULT);
+        .checkState(streamTimeout >= NfsConfigKeys.DFS_NFS_STREAM_TIMEOUT_MIN_DEFAULT);
     if (!activeState) {
       return true;
     }

+ 6 - 6
hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java

@@ -24,9 +24,9 @@ import java.util.concurrent.ConcurrentMap;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys;
+import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration;
 import org.apache.hadoop.nfs.nfs3.FileHandle;
-import org.apache.hadoop.nfs.nfs3.Nfs3Constant;
 import org.apache.hadoop.util.Daemon;
 import org.apache.hadoop.util.Time;
 
@@ -48,9 +48,9 @@ class OpenFileCtxCache {
   private final long streamTimeout;
   private final StreamMonitor streamMonitor;
 
-  OpenFileCtxCache(Configuration config, long streamTimeout) {
-    maxStreams = config.getInt(Nfs3Constant.MAX_OPEN_FILES,
-        Nfs3Constant.MAX_OPEN_FILES_DEFAULT);
+  OpenFileCtxCache(NfsConfiguration config, long streamTimeout) {
+    maxStreams = config.getInt(NfsConfigKeys.DFS_NFS_MAX_OPEN_FILES_KEY,
+        NfsConfigKeys.DFS_NFS_MAX_OPEN_FILES_DEFAULT);
     LOG.info("Maximum open streams is " + maxStreams);
     this.streamTimeout = streamTimeout;
     streamMonitor = new StreamMonitor();
@@ -102,7 +102,7 @@ class OpenFileCtxCache {
     } else {
       long idleTime = Time.monotonicNow()
           - idlest.getValue().getLastAccessTime();
-      if (idleTime < Nfs3Constant.OUTPUT_STREAM_TIMEOUT_MIN_DEFAULT) {
+      if (idleTime < NfsConfigKeys.DFS_NFS_STREAM_TIMEOUT_MIN_DEFAULT) {
         if (LOG.isDebugEnabled()) {
           LOG.debug("idlest stream's idle time:" + idleTime);
         }

+ 6 - 7
hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/PrivilegedNfsGatewayStarter.java

@@ -21,9 +21,8 @@ import java.net.InetSocketAddress;
 
 import org.apache.commons.daemon.Daemon;
 import org.apache.commons.daemon.DaemonContext;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys;
+import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration;
 
 /**
  * This class is used to allow the initial registration of the NFS gateway with
@@ -42,12 +41,12 @@ public class PrivilegedNfsGatewayStarter implements Daemon {
   @Override
   public void init(DaemonContext context) throws Exception {
     System.err.println("Initializing privileged NFS client socket...");
-    Configuration conf = new HdfsConfiguration();
-    int clientPort = conf.getInt(DFSConfigKeys.DFS_NFS_REGISTRATION_PORT_KEY,
-        DFSConfigKeys.DFS_NFS_REGISTRATION_PORT_DEFAULT);
+    NfsConfiguration conf = new NfsConfiguration();
+    int clientPort = conf.getInt(NfsConfigKeys.DFS_NFS_REGISTRATION_PORT_KEY,
+        NfsConfigKeys.DFS_NFS_REGISTRATION_PORT_DEFAULT);
     if (clientPort < 1 || clientPort > 1023) {
       throw new RuntimeException("Must start privileged NFS server with '" +
-          DFSConfigKeys.DFS_NFS_REGISTRATION_PORT_KEY + "' configured to a " +
+          NfsConfigKeys.DFS_NFS_REGISTRATION_PORT_KEY + "' configured to a " +
           "privileged port.");
     }
     registrationSocket = new DatagramSocket(

+ 28 - 25
hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java

@@ -28,7 +28,6 @@ import java.util.EnumSet;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.DirectoryListingStartAfterNotFoundException;
@@ -41,6 +40,8 @@ import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
+import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys;
+import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
@@ -131,9 +132,6 @@ import org.jboss.netty.channel.ChannelHandlerContext;
 
 import com.google.common.annotations.VisibleForTesting;
 
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NFS_KEYTAB_FILE_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NFS_KERBEROS_PRINCIPAL_KEY;
-
 /**
  * RPC program corresponding to nfs daemon. See {@link Nfs3}.
  */
@@ -144,7 +142,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
   
   static final Log LOG = LogFactory.getLog(RpcProgramNfs3.class);
 
-  private final Configuration config = new Configuration();
+  private final NfsConfiguration config;
   private final WriteManager writeManager;
   private final IdUserGroup iug;
   private final DFSClientCache clientCache;
@@ -159,15 +157,17 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
   
   private final RpcCallCache rpcCallCache;
 
-  public RpcProgramNfs3(Configuration config, DatagramSocket registrationSocket,
+  public RpcProgramNfs3(NfsConfiguration config, DatagramSocket registrationSocket,
       boolean allowInsecurePorts) throws IOException {
-    super("NFS3", "localhost", config.getInt(Nfs3Constant.NFS3_SERVER_PORT,
-        Nfs3Constant.NFS3_SERVER_PORT_DEFAULT), Nfs3Constant.PROGRAM,
+    super("NFS3", "localhost", config.getInt(
+        NfsConfigKeys.DFS_NFS_SERVER_PORT_KEY,
+        NfsConfigKeys.DFS_NFS_SERVER_PORT_DEFAULT), Nfs3Constant.PROGRAM,
         Nfs3Constant.VERSION, Nfs3Constant.VERSION, registrationSocket,
         allowInsecurePorts);
    
+    this.config = config;
     config.set(FsPermission.UMASK_LABEL, "000");
-    iug = new IdUserGroup();
+    iug = new IdUserGroup(config);
     
     exports = NfsExports.getInstance(config);
     writeManager = new WriteManager(iug, config);
@@ -180,13 +180,13 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
         CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY,
         CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT);
     
-    writeDumpDir = config.get(Nfs3Constant.FILE_DUMP_DIR_KEY,
-        Nfs3Constant.FILE_DUMP_DIR_DEFAULT);
-    boolean enableDump = config.getBoolean(Nfs3Constant.ENABLE_FILE_DUMP_KEY,
-        Nfs3Constant.ENABLE_FILE_DUMP_DEFAULT);
+    writeDumpDir = config.get(NfsConfigKeys.DFS_NFS_FILE_DUMP_DIR_KEY,
+        NfsConfigKeys.DFS_NFS_FILE_DUMP_DIR_DEFAULT);
+    boolean enableDump = config.getBoolean(NfsConfigKeys.DFS_NFS_FILE_DUMP_KEY,
+        NfsConfigKeys.DFS_NFS_FILE_DUMP_DEFAULT);
     UserGroupInformation.setConfiguration(config);
-    SecurityUtil.login(config, DFS_NFS_KEYTAB_FILE_KEY,
-            DFS_NFS_KERBEROS_PRINCIPAL_KEY);
+    SecurityUtil.login(config, NfsConfigKeys.DFS_NFS_KEYTAB_FILE_KEY,
+        NfsConfigKeys.DFS_NFS_KERBEROS_PRINCIPAL_KEY);
 
     if (!enableDump) {
       writeDumpDir = null;
@@ -567,8 +567,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
             + handle.getFileId());
         return new READLINK3Response(Nfs3Status.NFS3ERR_SERVERFAULT);
       }
-      int rtmax = config.getInt(Nfs3Constant.MAX_READ_TRANSFER_SIZE_KEY,
-              Nfs3Constant.MAX_READ_TRANSFER_SIZE_DEFAULT);
+      int rtmax = config.getInt(NfsConfigKeys.DFS_NFS_MAX_READ_TRANSFER_SIZE_KEY,
+          NfsConfigKeys.DFS_NFS_MAX_READ_TRANSFER_SIZE_DEFAULT);
       if (rtmax < target.getBytes().length) {
         LOG.error("Link size: " + target.getBytes().length
             + " is larger than max transfer size: " + rtmax);
@@ -665,8 +665,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     }
 
     try {
-      int rtmax = config.getInt(Nfs3Constant.MAX_READ_TRANSFER_SIZE_KEY,
-              Nfs3Constant.MAX_READ_TRANSFER_SIZE_DEFAULT);
+      int rtmax = config.getInt(NfsConfigKeys.DFS_NFS_MAX_READ_TRANSFER_SIZE_KEY,
+          NfsConfigKeys.DFS_NFS_MAX_READ_TRANSFER_SIZE_DEFAULT);
       int buffSize = Math.min(rtmax, count);
       byte[] readbuffer = new byte[buffSize];
 
@@ -1740,12 +1740,15 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     }
 
     try {
-      int rtmax = config.getInt(Nfs3Constant.MAX_READ_TRANSFER_SIZE_KEY,
-              Nfs3Constant.MAX_READ_TRANSFER_SIZE_DEFAULT);
-      int wtmax = config.getInt(Nfs3Constant.MAX_WRITE_TRANSFER_SIZE_KEY,
-              Nfs3Constant.MAX_WRITE_TRANSFER_SIZE_DEFAULT);
-      int dtperf = config.getInt(Nfs3Constant.MAX_READDIR_TRANSFER_SIZE_KEY,
-              Nfs3Constant.MAX_READDIR_TRANSFER_SIZE_DEFAULT);
+      int rtmax = config.getInt(
+          NfsConfigKeys.DFS_NFS_MAX_READ_TRANSFER_SIZE_KEY,
+          NfsConfigKeys.DFS_NFS_MAX_READ_TRANSFER_SIZE_DEFAULT);
+      int wtmax = config.getInt(
+          NfsConfigKeys.DFS_NFS_MAX_WRITE_TRANSFER_SIZE_KEY,
+          NfsConfigKeys.DFS_NFS_MAX_WRITE_TRANSFER_SIZE_DEFAULT);
+      int dtperf = config.getInt(
+          NfsConfigKeys.DFS_NFS_MAX_READDIR_TRANSFER_SIZE_KEY,
+          NfsConfigKeys.DFS_NFS_MAX_READDIR_TRANSFER_SIZE_DEFAULT);
 
       Nfs3FileAttributes attrs = Nfs3Utils.getFileAttr(dfsClient,
           Nfs3Utils.getFileIdPath(handle), iug);

+ 13 - 12
hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java

@@ -21,10 +21,11 @@ import java.io.IOException;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
+import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys;
+import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration;
 import org.apache.hadoop.hdfs.nfs.nfs3.OpenFileCtx.COMMIT_STATUS;
 import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
 import org.apache.hadoop.ipc.RemoteException;
@@ -50,7 +51,7 @@ import com.google.common.annotations.VisibleForTesting;
 public class WriteManager {
   public static final Log LOG = LogFactory.getLog(WriteManager.class);
 
-  private final Configuration config;
+  private final NfsConfiguration config;
   private final IdUserGroup iug;
  
   private AsyncDataService asyncDataService;
@@ -78,19 +79,19 @@ public class WriteManager {
     return fileContextCache.put(h, ctx);
   }
   
-  WriteManager(IdUserGroup iug, final Configuration config) {
+  WriteManager(IdUserGroup iug, final NfsConfiguration config) {
     this.iug = iug;
     this.config = config;
-    streamTimeout = config.getLong(Nfs3Constant.OUTPUT_STREAM_TIMEOUT,
-        Nfs3Constant.OUTPUT_STREAM_TIMEOUT_DEFAULT);
+    streamTimeout = config.getLong(NfsConfigKeys.DFS_NFS_STREAM_TIMEOUT_KEY,
+        NfsConfigKeys.DFS_NFS_STREAM_TIMEOUT_DEFAULT);
     LOG.info("Stream timeout is " + streamTimeout + "ms.");
-    if (streamTimeout < Nfs3Constant.OUTPUT_STREAM_TIMEOUT_MIN_DEFAULT) {
+    if (streamTimeout < NfsConfigKeys.DFS_NFS_STREAM_TIMEOUT_MIN_DEFAULT) {
       LOG.info("Reset stream timeout to minimum value "
-          + Nfs3Constant.OUTPUT_STREAM_TIMEOUT_MIN_DEFAULT + "ms.");
-      streamTimeout = Nfs3Constant.OUTPUT_STREAM_TIMEOUT_MIN_DEFAULT;
+          + NfsConfigKeys.DFS_NFS_STREAM_TIMEOUT_MIN_DEFAULT + "ms.");
+      streamTimeout = NfsConfigKeys.DFS_NFS_STREAM_TIMEOUT_MIN_DEFAULT;
     }
-    maxStreams = config.getInt(Nfs3Constant.MAX_OPEN_FILES,
-        Nfs3Constant.MAX_OPEN_FILES_DEFAULT);
+    maxStreams = config.getInt(NfsConfigKeys.DFS_NFS_MAX_OPEN_FILES_KEY,
+        NfsConfigKeys.DFS_NFS_MAX_OPEN_FILES_DEFAULT);
     LOG.info("Maximum open streams is "+ maxStreams);
     this.fileContextCache = new OpenFileCtxCache(config, streamTimeout);
   }
@@ -171,8 +172,8 @@ public class WriteManager {
       }
 
       // Add open stream
-      String writeDumpDir = config.get(Nfs3Constant.FILE_DUMP_DIR_KEY,
-          Nfs3Constant.FILE_DUMP_DIR_DEFAULT);
+      String writeDumpDir = config.get(NfsConfigKeys.DFS_NFS_FILE_DUMP_DIR_KEY,
+          NfsConfigKeys.DFS_NFS_FILE_DUMP_DIR_DEFAULT);
       openFileCtx = new OpenFileCtx(fos, latestAttr, writeDumpDir + "/"
           + fileHandle.getFileId(), dfsClient, iug);
 

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestMountd.java

@@ -23,8 +23,8 @@ import java.net.InetAddress;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration;
 import org.apache.hadoop.hdfs.nfs.mount.RpcProgramMountd;
 import org.apache.hadoop.hdfs.nfs.nfs3.Nfs3;
 import org.apache.hadoop.hdfs.nfs.nfs3.RpcProgramNfs3;
@@ -38,7 +38,7 @@ public class TestMountd {
   @Test
   public void testStart() throws IOException {
     // Start minicluster
-    Configuration config = new Configuration();
+    NfsConfiguration config = new NfsConfiguration();
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(config).numDataNodes(1)
         .build();
     cluster.waitActive();

+ 5 - 4
hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestOutOfOrderWrite.java

@@ -23,7 +23,8 @@ import java.util.Arrays;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys;
+import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration;
 import org.apache.hadoop.hdfs.nfs.nfs3.Nfs3Utils;
 import org.apache.hadoop.nfs.nfs3.FileHandle;
 import org.apache.hadoop.nfs.nfs3.Nfs3Constant;
@@ -155,10 +156,10 @@ public class TestOutOfOrderWrite {
     Arrays.fill(data3, (byte) 9);
 
     // NFS3 Create request
-    Configuration conf = new Configuration();
+    NfsConfiguration conf = new NfsConfiguration();
     WriteClient client = new WriteClient("localhost", conf.getInt(
-        Nfs3Constant.NFS3_SERVER_PORT, Nfs3Constant.NFS3_SERVER_PORT_DEFAULT),
-        create(), false);
+        NfsConfigKeys.DFS_NFS_SERVER_PORT_KEY,
+        NfsConfigKeys.DFS_NFS_SERVER_PORT_DEFAULT), create(), false);
     client.run();
 
     while (handle == null) {

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestReaddir.java

@@ -24,11 +24,11 @@ import java.io.IOException;
 import java.net.InetAddress;
 import java.util.List;
 
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration;
 import org.apache.hadoop.hdfs.nfs.nfs3.Nfs3;
 import org.apache.hadoop.hdfs.nfs.nfs3.RpcProgramNfs3;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
@@ -53,7 +53,7 @@ import org.mockito.Mockito;
  */
 public class TestReaddir {
 
-  static Configuration config = new Configuration();
+  static NfsConfiguration config = new NfsConfiguration();
   static MiniDFSCluster cluster = null;
   static DistributedFileSystem hdfs;
   static NameNode nn;

+ 6 - 6
hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestDFSClientCache.java

@@ -18,24 +18,24 @@
 package org.apache.hadoop.hdfs.nfs.nfs3;
 
 import static org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod.KERBEROS;
+import static org.hamcrest.core.Is.is;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.assertThat;
-import static org.hamcrest.core.Is.is;
+import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
 
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hdfs.DFSClient;
+import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.junit.Test;
 
 public class TestDFSClientCache {
   @Test
   public void testEviction() throws IOException {
-    Configuration conf = new Configuration();
+    NfsConfiguration conf = new NfsConfiguration();
     conf.set(FileSystem.FS_DEFAULT_NAME_KEY, "hdfs://localhost");
 
     // Only one entry will be in the cache
@@ -59,7 +59,7 @@ public class TestDFSClientCache {
     String currentUser = "test-user";
 
 
-    Configuration conf = new Configuration();
+    NfsConfiguration conf = new NfsConfiguration();
     UserGroupInformation currentUserUgi
             = UserGroupInformation.createRemoteUser(currentUser);
     currentUserUgi.setAuthenticationMethod(KERBEROS);
@@ -83,7 +83,7 @@ public class TestDFSClientCache {
 
     UserGroupInformation currentUserUgi = UserGroupInformation
             .createUserForTesting(currentUser, new String[0]);
-    Configuration conf = new Configuration();
+    NfsConfiguration conf = new NfsConfiguration();
     conf.set(FileSystem.FS_DEFAULT_NAME_KEY, "hdfs://localhost");
     DFSClientCache cache = new DFSClientCache(conf);
     UserGroupInformation ugiResult

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestExportsTable.java

@@ -21,22 +21,22 @@ import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
 
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys;
+import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration;
 import org.apache.hadoop.hdfs.nfs.mount.Mountd;
 import org.apache.hadoop.hdfs.nfs.mount.RpcProgramMountd;
-import org.apache.hadoop.nfs.nfs3.Nfs3Constant;
 import org.junit.Test;
 
 public class TestExportsTable {
  
   @Test
   public void testExportPoint() throws IOException {
-    Configuration config = new Configuration();
+    NfsConfiguration config = new NfsConfiguration();
     MiniDFSCluster cluster = null;
 
     String exportPoint = "/myexport1";
-    config.setStrings(Nfs3Constant.EXPORT_POINT, exportPoint);
+    config.setStrings(NfsConfigKeys.DFS_NFS_EXPORT_POINT_KEY, exportPoint);
     // Use emphral port in case tests are running in parallel
     config.setInt("nfs3.mountd.port", 0);
     config.setInt("nfs3.server.port", 0);

+ 20 - 20
hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestOpenFileCtxCache.java

@@ -22,13 +22,13 @@ import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
 
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
+import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys;
+import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration;
 import org.apache.hadoop.hdfs.nfs.nfs3.OpenFileCtx.CommitCtx;
 import org.apache.hadoop.nfs.nfs3.FileHandle;
 import org.apache.hadoop.nfs.nfs3.IdUserGroup;
-import org.apache.hadoop.nfs.nfs3.Nfs3Constant;
 import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes;
 import org.junit.Test;
 import org.mockito.Mockito;
@@ -38,10 +38,10 @@ public class TestOpenFileCtxCache {
 
   @Test
   public void testEviction() throws IOException, InterruptedException {
-    Configuration conf = new Configuration();
+    NfsConfiguration conf = new NfsConfiguration();
 
     // Only two entries will be in the cache
-    conf.setInt(Nfs3Constant.MAX_OPEN_FILES, 2);
+    conf.setInt(NfsConfigKeys.DFS_NFS_MAX_OPEN_FILES_KEY, 2);
 
     DFSClient dfsClient = Mockito.mock(DFSClient.class);
     Nfs3FileAttributes attr = new Nfs3FileAttributes();
@@ -49,15 +49,15 @@ public class TestOpenFileCtxCache {
     Mockito.when(fos.getPos()).thenReturn((long) 0);
 
     OpenFileCtx context1 = new OpenFileCtx(fos, attr, "/dumpFilePath",
-        dfsClient, new IdUserGroup());
+        dfsClient, new IdUserGroup(new NfsConfiguration()));
     OpenFileCtx context2 = new OpenFileCtx(fos, attr, "/dumpFilePath",
-        dfsClient, new IdUserGroup());
+        dfsClient, new IdUserGroup(new NfsConfiguration()));
     OpenFileCtx context3 = new OpenFileCtx(fos, attr, "/dumpFilePath",
-        dfsClient, new IdUserGroup());
+        dfsClient, new IdUserGroup(new NfsConfiguration()));
     OpenFileCtx context4 = new OpenFileCtx(fos, attr, "/dumpFilePath",
-        dfsClient, new IdUserGroup());
+        dfsClient, new IdUserGroup(new NfsConfiguration()));
     OpenFileCtx context5 = new OpenFileCtx(fos, attr, "/dumpFilePath",
-        dfsClient, new IdUserGroup());
+        dfsClient, new IdUserGroup(new NfsConfiguration()));
 
     OpenFileCtxCache cache = new OpenFileCtxCache(conf, 10 * 60 * 100);
 
@@ -71,7 +71,7 @@ public class TestOpenFileCtxCache {
     assertTrue(cache.size() == 2);
 
     // Wait for the oldest stream to be evict-able, insert again
-    Thread.sleep(Nfs3Constant.OUTPUT_STREAM_TIMEOUT_MIN_DEFAULT);
+    Thread.sleep(NfsConfigKeys.DFS_NFS_STREAM_TIMEOUT_MIN_DEFAULT);
     assertTrue(cache.size() == 2);
 
     ret = cache.put(new FileHandle(3), context3);
@@ -90,17 +90,17 @@ public class TestOpenFileCtxCache {
         new WriteCtx(null, 0, 0, 0, null, null, null, 0, false, null));
     context4.getPendingCommitsForTest().put(new Long(100),
         new CommitCtx(0, null, 0, attr));
-    Thread.sleep(Nfs3Constant.OUTPUT_STREAM_TIMEOUT_MIN_DEFAULT);
+    Thread.sleep(NfsConfigKeys.DFS_NFS_STREAM_TIMEOUT_MIN_DEFAULT);
     ret = cache.put(new FileHandle(5), context5);
     assertFalse(ret);
   }
 
   @Test
   public void testScan() throws IOException, InterruptedException {
-    Configuration conf = new Configuration();
+    NfsConfiguration conf = new NfsConfiguration();
 
     // Only two entries will be in the cache
-    conf.setInt(Nfs3Constant.MAX_OPEN_FILES, 2);
+    conf.setInt(NfsConfigKeys.DFS_NFS_MAX_OPEN_FILES_KEY, 2);
 
     DFSClient dfsClient = Mockito.mock(DFSClient.class);
     Nfs3FileAttributes attr = new Nfs3FileAttributes();
@@ -108,13 +108,13 @@ public class TestOpenFileCtxCache {
     Mockito.when(fos.getPos()).thenReturn((long) 0);
 
     OpenFileCtx context1 = new OpenFileCtx(fos, attr, "/dumpFilePath",
-        dfsClient, new IdUserGroup());
+        dfsClient, new IdUserGroup(new NfsConfiguration()));
     OpenFileCtx context2 = new OpenFileCtx(fos, attr, "/dumpFilePath",
-        dfsClient, new IdUserGroup());
+        dfsClient, new IdUserGroup(new NfsConfiguration()));
     OpenFileCtx context3 = new OpenFileCtx(fos, attr, "/dumpFilePath",
-        dfsClient, new IdUserGroup());
+        dfsClient, new IdUserGroup(new NfsConfiguration()));
     OpenFileCtx context4 = new OpenFileCtx(fos, attr, "/dumpFilePath",
-        dfsClient, new IdUserGroup());
+        dfsClient, new IdUserGroup(new NfsConfiguration()));
 
     OpenFileCtxCache cache = new OpenFileCtxCache(conf, 10 * 60 * 100);
 
@@ -123,8 +123,8 @@ public class TestOpenFileCtxCache {
     assertTrue(ret);
     ret = cache.put(new FileHandle(2), context2);
     assertTrue(ret);
-    Thread.sleep(Nfs3Constant.OUTPUT_STREAM_TIMEOUT_MIN_DEFAULT + 1);
-    cache.scan(Nfs3Constant.OUTPUT_STREAM_TIMEOUT_MIN_DEFAULT);
+    Thread.sleep(NfsConfigKeys.DFS_NFS_STREAM_TIMEOUT_MIN_DEFAULT + 1);
+    cache.scan(NfsConfigKeys.DFS_NFS_STREAM_TIMEOUT_MIN_DEFAULT);
     assertTrue(cache.size() == 0);
 
     // Test cleaning inactive entry
@@ -133,7 +133,7 @@ public class TestOpenFileCtxCache {
     ret = cache.put(new FileHandle(4), context4);
     assertTrue(ret);
     context3.setActiveStatusForTest(false);
-    cache.scan(Nfs3Constant.OUTPUT_STREAM_TIMEOUT_DEFAULT);
+    cache.scan(NfsConfigKeys.DFS_NFS_STREAM_TIMEOUT_DEFAULT);
     assertTrue(cache.size() == 1);
     assertTrue(cache.get(new FileHandle(3)) == null);
     assertTrue(cache.get(new FileHandle(4)) != null);

+ 42 - 0
hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestRpcProgramNfs3.java

@@ -17,6 +17,11 @@
  */
 package org.apache.hadoop.hdfs.nfs.nfs3;
 
+import static org.junit.Assert.assertTrue;
+
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys;
+import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration;
 import org.apache.hadoop.nfs.nfs3.Nfs3Constant;
 import org.junit.Assert;
 import org.junit.Test;
@@ -63,4 +68,41 @@ public class TestRpcProgramNfs3 {
       }
     }
   }
+
+  @Test
+  public void testDeprecatedKeys() {
+    NfsConfiguration conf = new NfsConfiguration();
+    conf.setInt("nfs3.server.port", 998);
+    assertTrue(conf.getInt(NfsConfigKeys.DFS_NFS_SERVER_PORT_KEY, 0) == 998);
+
+    conf.setInt("nfs3.mountd.port", 999);
+    assertTrue(conf.getInt(NfsConfigKeys.DFS_NFS_MOUNTD_PORT_KEY, 0) == 999);
+
+    conf.set("dfs.nfs.exports.allowed.hosts", "host1");
+    assertTrue(conf.get(CommonConfigurationKeys.NFS_EXPORTS_ALLOWED_HOSTS_KEY)
+        .equals("host1"));
+
+    conf.setInt("dfs.nfs.exports.cache.expirytime.millis", 1000);
+    assertTrue(conf.getInt(
+        Nfs3Constant.NFS_EXPORTS_CACHE_EXPIRYTIME_MILLIS_KEY, 0) == 1000);
+
+    conf.setInt("hadoop.nfs.userupdate.milly", 10);
+    assertTrue(conf.getInt(Nfs3Constant.NFS_USERGROUP_UPDATE_MILLIS_KEY, 0) == 10);
+
+    conf.set("dfs.nfs3.dump.dir", "/nfs/tmp");
+    assertTrue(conf.get(NfsConfigKeys.DFS_NFS_FILE_DUMP_DIR_KEY).equals(
+        "/nfs/tmp"));
+
+    conf.setBoolean("dfs.nfs3.enableDump", false);
+    assertTrue(conf.getBoolean(NfsConfigKeys.DFS_NFS_FILE_DUMP_KEY, true) == false);
+
+    conf.setInt("dfs.nfs3.max.open.files", 500);
+    assertTrue(conf.getInt(NfsConfigKeys.DFS_NFS_MAX_OPEN_FILES_KEY, 0) == 500);
+
+    conf.setInt("dfs.nfs3.stream.timeout", 6000);
+    assertTrue(conf.getInt(NfsConfigKeys.DFS_NFS_STREAM_TIMEOUT_KEY, 0) == 6000);
+
+    conf.set("dfs.nfs3.export.point", "/dir1");
+    assertTrue(conf.get(NfsConfigKeys.DFS_NFS_EXPORT_POINT_KEY).equals("/dir1"));
+  }
 }

+ 6 - 6
hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestWrites.java

@@ -27,11 +27,10 @@ import java.nio.ByteBuffer;
 import java.util.Arrays;
 import java.util.concurrent.ConcurrentNavigableMap;
 
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSClient;
-import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
+import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration;
 import org.apache.hadoop.hdfs.nfs.nfs3.OpenFileCtx.COMMIT_STATUS;
 import org.apache.hadoop.hdfs.nfs.nfs3.OpenFileCtx.CommitCtx;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
@@ -139,7 +138,7 @@ public class TestWrites {
     Mockito.when(fos.getPos()).thenReturn((long) 0);
 
     OpenFileCtx ctx = new OpenFileCtx(fos, attr, "/dumpFilePath", dfsClient,
-        new IdUserGroup());
+        new IdUserGroup(new NfsConfiguration()));
 
     COMMIT_STATUS ret;
 
@@ -201,13 +200,14 @@ public class TestWrites {
     Nfs3FileAttributes attr = new Nfs3FileAttributes();
     HdfsDataOutputStream fos = Mockito.mock(HdfsDataOutputStream.class);
     Mockito.when(fos.getPos()).thenReturn((long) 0);
+    NfsConfiguration config = new NfsConfiguration();
 
     OpenFileCtx ctx = new OpenFileCtx(fos, attr, "/dumpFilePath", dfsClient,
-        new IdUserGroup());
+        new IdUserGroup(config));
 
     FileHandle h = new FileHandle(1); // fake handle for "/dumpFilePath"
     COMMIT_STATUS ret;
-    WriteManager wm = new WriteManager(new IdUserGroup(), new Configuration());
+    WriteManager wm = new WriteManager(new IdUserGroup(config), config);
     assertTrue(wm.addOpenFileStream(h, ctx));
     
     // Test inactive open file context
@@ -280,7 +280,7 @@ public class TestWrites {
 
   @Test
   public void testWriteStableHow() throws IOException, InterruptedException {
-    HdfsConfiguration config = new HdfsConfiguration();
+    NfsConfiguration config = new NfsConfiguration();
     DFSClient client = null;
     MiniDFSCluster cluster = null;
     RpcProgramNfs3 nfsd;

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/resources/core-site.xml

@@ -18,12 +18,12 @@
 
 <configuration>
 <property>
-  <name>nfs3.server.port</name>
+  <name>nfs.server.port</name>
   <value>2079</value>
 </property>
 
 <property>
-  <name>nfs3.mountd.port</name>
+  <name>nfs.mountd.port</name>
   <value>4272</value>
 </property>
 </configuration>

+ 37 - 3
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -254,6 +254,9 @@ Trunk (Unreleased)
     HDFS-5794. Fix the inconsistency of layout version number of 
     ADD_DATANODE_AND_STORAGE_UUIDS between trunk and branch-2. (jing9)
 
+    HDFS-6464. Support multiple xattr.name parameters for WebHDFS getXAttrs.
+    (Yi Liu via umamahesh)
+
   BREAKDOWN OF HDFS-2006 SUBTASKS AND RELATED JIRAS
 
     HDFS-6299. Protobuf for XAttr and client-side implementation. (Yi Liu via umamahesh)
@@ -317,8 +320,8 @@ Trunk (Unreleased)
     HDFS-6374. setXAttr should require the user to be the owner of the file
     or directory (Charles Lamb via wang)
 
-    HDFS-6110 adding more slow action log in critical write path
-    (Liang Xie via stack)
+    HDFS-6375. Listing extended attributes with the search permission.
+    (Charles Lamb via wang)
 
 Release 2.5.0 - UNRELEASED
 
@@ -452,11 +455,33 @@ Release 2.5.0 - UNRELEASED
     HDFS-6356. Fix typo in DatanodeLayoutVersion. (Tulasi G via wang)
 
     HDFS-6447. balancer should timestamp the completion message.
-    (Juan Yu via wang).
+    (Juan Yu via wang)
 
     HDFS-6463. Clarify behavior of AclStorage#createFsPermissionForExtendedAcl
     in comments. (cnauroth)
 
+    HDFS-6472. fix typo in webapps/hdfs/explorer.js. (Juan Yu via wang)
+
+    HDFS-6056. Clean up NFS config settings (brandonli)
+
+    HDFS-6110 adding more slow action log in critical write path
+    (Liang Xie via stack)
+
+    HDFS-6109 let sync_file_range() system call run in background
+    (Liang Xie via stack)
+
+    HDFS-6268. Better sorting in NetworkTopology#pseudoSortByDistance when
+    no local node is found. (wang)
+
+    HDFS-6369. Document that BlockReader#available() can return more bytes than
+    are remaining in the block (Ted Yu via Colin Patrick McCabe)
+
+    HDFS-6487. TestStandbyCheckpoint#testSBNCheckpoints is racy.
+    (Mit Desai via wang)
+
+    HDFS-6297. Add CLI testcases to reflect new features of dfs and dfsadmin
+    (Dasha Boudnik via cos)
+
   OPTIMIZATIONS
 
     HDFS-6214. Webhdfs has poor throughput for files >2GB (daryn)
@@ -620,6 +645,15 @@ Release 2.5.0 - UNRELEASED
 
     HDFS-6462. NFS: fsstat request fails with the secure hdfs (brandonli)
 
+    HDFS-6404. HttpFS should use a 000 umask for mkdir and create 
+    operations. (yoderme via tucu)
+
+    HDFS-6424. blockReport doesn't need to invalidate blocks on SBN. (Ming Ma
+    via jing9)
+
+    HDFS-6497. Make TestAvailableSpaceVolumeChoosingPolicy deterministic
+    (cmccabe)
+
 Release 2.4.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

+ 5 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java

@@ -437,6 +437,11 @@ public class Hdfs extends AbstractFileSystem {
       throws IOException {
     return dfs.getXAttrs(getUriPath(path), names);
   }
+
+  @Override
+  public List<String> listXAttrs(Path path) throws IOException {
+    return dfs.listXAttrs(getUriPath(path));
+  }
   
   @Override
   public void removeXAttr(Path path, String name) throws IOException {

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReader.java

@@ -51,6 +51,7 @@ public interface BlockReader extends ByteBufferReadable {
    * Returns an estimate of the number of bytes that can be read
    * (or skipped over) from this input stream without performing
    * network I/O.
+   * This may return more than what is actually present in the block.
    */
   int available() throws IOException;
 

+ 15 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java

@@ -89,6 +89,7 @@ import java.util.concurrent.atomic.AtomicInteger;
 
 import javax.net.SocketFactory;
 
+import com.google.common.collect.Lists;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -2827,6 +2828,20 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory {
     }
   }
   
+  public List<String> listXAttrs(String src)
+          throws IOException {
+    checkOpen();
+    try {
+      final Map<String, byte[]> xattrs =
+        XAttrHelper.buildXAttrMap(namenode.listXAttrs(src));
+      return Lists.newArrayList(xattrs.keySet());
+    } catch(RemoteException re) {
+      throw re.unwrapRemoteException(AccessControlException.class,
+                                     FileNotFoundException.class,
+                                     UnresolvedPathException.class);
+    }
+  }
+
   public void removeXAttr(String src, String name) throws IOException {
     checkOpen();
     try {

+ 2 - 6
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java

@@ -111,6 +111,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final boolean DFS_DATANODE_DROP_CACHE_BEHIND_WRITES_DEFAULT = false;
   public static final String  DFS_DATANODE_SYNC_BEHIND_WRITES_KEY = "dfs.datanode.sync.behind.writes";
   public static final boolean DFS_DATANODE_SYNC_BEHIND_WRITES_DEFAULT = false;
+  public static final String  DFS_DATANODE_SYNC_BEHIND_WRITES_IN_BACKGROUND_KEY = "dfs.datanode.sync.behind.writes.in.background";
+  public static final boolean DFS_DATANODE_SYNC_BEHIND_WRITES_IN_BACKGROUND_DEFAULT = false;
   public static final String  DFS_DATANODE_DROP_CACHE_BEHIND_READS_KEY = "dfs.datanode.drop.cache.behind.reads";
   public static final boolean DFS_DATANODE_DROP_CACHE_BEHIND_READS_DEFAULT = false;
   public static final String  DFS_DATANODE_USE_DN_HOSTNAME = "dfs.datanode.use.datanode.hostname";
@@ -639,12 +641,6 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String DFS_DFSCLIENT_HEDGED_READ_THREADPOOL_SIZE =
       "dfs.client.hedged.read.threadpool.size";
   public static final int     DEFAULT_DFSCLIENT_HEDGED_READ_THREADPOOL_SIZE = 0;
-  public static final String  DFS_NFS_KEYTAB_FILE_KEY = "dfs.nfs.keytab.file";
-  public static final String  DFS_NFS_KERBEROS_PRINCIPAL_KEY = "dfs.nfs.kerberos.principal";
-  public static final String  DFS_NFS_REGISTRATION_PORT_KEY = "dfs.nfs.registration.port";
-  public static final int     DFS_NFS_REGISTRATION_PORT_DEFAULT = 40; // Currently unassigned.
-  public static final String  DFS_NFS_ALLOW_INSECURE_PORTS_KEY = "dfs.nfs.allow.insecure.ports";
-  public static final boolean DFS_NFS_ALLOW_INSECURE_PORTS_DEFAULT = true;
 
    // Slow io warning log threshold settings for dfsclient and datanode.
    public static final String DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_KEY =

+ 17 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java

@@ -1864,6 +1864,23 @@ public class DistributedFileSystem extends FileSystem {
     }.resolve(this, absF);
   }
   
+  @Override
+  public List<String> listXAttrs(Path path)
+          throws IOException {
+    final Path absF = fixRelativePart(path);
+    return new FileSystemLinkResolver<List<String>>() {
+      @Override
+      public List<String> doCall(final Path p) throws IOException {
+        return dfs.listXAttrs(getPathName(p));
+      }
+      @Override
+      public List<String> next(final FileSystem fs, final Path p)
+              throws IOException, UnresolvedLinkException {
+        return fs.listXAttrs(p);
+      }
+    }.resolve(this, absF);
+  }
+
   @Override
   public void removeXAttr(Path path, final String name) throws IOException {
     Path absF = fixRelativePart(path);

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/XAttrHelper.java

@@ -152,7 +152,7 @@ public class XAttrHelper {
   public static List<XAttr> buildXAttrs(List<String> names) {
     if (names == null || names.isEmpty()) {
       throw new HadoopIllegalArgumentException("XAttr names can not be " +
-      		"null or empty.");
+          "null or empty.");
     }
     
     List<XAttr> xAttrs = Lists.newArrayListWithCapacity(names.size());

+ 21 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java

@@ -1300,6 +1300,27 @@ public interface ClientProtocol {
   @Idempotent
   public List<XAttr> getXAttrs(String src, List<XAttr> xAttrs) 
       throws IOException;
+
+  /**
+   * List the xattrs names for a file or directory.
+   * Only the xattr names for which the logged in user has the permissions to
+   * access will be returned.
+   * <p/>
+   * A regular user only can get xattr names from the "user" namespace.
+   * A super user can get xattr names of the "user" and "trusted" namespace.
+   * XAttr names of the "security" and "system" namespaces are only used/exposed
+   * internally by the file system impl.
+   * <p/>
+   * @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
+   * http://en.wikipedia.org/wiki/Extended_file_attributes</a>
+   * @param src file or directory
+   * @param xAttrs xAttrs to get
+   * @return List<XAttr> <code>XAttr</code> list
+   * @throws IOException
+   */
+  @Idempotent
+  public List<XAttr> listXAttrs(String src)
+      throws IOException;
   
   /**
    * Remove xattr of a file or directory.Value in xAttr parameter is ignored.

+ 13 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java

@@ -174,8 +174,11 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Update
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto;
+import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos;
 import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.ListXAttrsRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.ListXAttrsResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.RemoveXAttrRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.RemoveXAttrResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.SetXAttrRequestProto;
@@ -1297,6 +1300,16 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
       throw new ServiceException(e);
     }
   }
+
+  @Override
+  public ListXAttrsResponseProto listXAttrs(RpcController controller,
+    ListXAttrsRequestProto req) throws ServiceException {
+    try {
+      return PBHelper.convertListXAttrsResponse(server.listXAttrs(req.getSrc()));
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+  }
   
   @Override
   public RemoveXAttrResponseProto removeXAttr(RpcController controller,

+ 14 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java

@@ -144,6 +144,7 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTim
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.ListXAttrsRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.RemoveXAttrRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.SetXAttrRequestProto;
 import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
@@ -1305,6 +1306,19 @@ public class ClientNamenodeProtocolTranslatorPB implements
     }
   }
   
+  @Override
+  public List<XAttr> listXAttrs(String src)
+      throws IOException {
+    ListXAttrsRequestProto.Builder builder = ListXAttrsRequestProto.newBuilder();
+    builder.setSrc(src);
+    ListXAttrsRequestProto req = builder.build();
+    try {
+      return PBHelper.convert(rpcProxy.listXAttrs(null, req));
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+  }
+
   @Override
   public void removeXAttr(String src, XAttr xAttr) throws IOException {
     RemoveXAttrRequestProto req = RemoveXAttrRequestProto

+ 17 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java

@@ -44,6 +44,7 @@ import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
 import org.apache.hadoop.ha.proto.HAServiceProtocolProtos;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.StorageType;
+import org.apache.hadoop.hdfs.XAttrHelper;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
@@ -153,6 +154,7 @@ import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto;
 import org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto;
 import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.ListXAttrsResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.XAttrProto;
 import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.XAttrProto.XAttrNamespaceProto;
 import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.XAttrSetFlagProto;
@@ -2179,6 +2181,21 @@ public class PBHelper {
     return builder.build();
   }
 
+  public static List<XAttr> convert(ListXAttrsResponseProto a) {
+    final List<XAttrProto> xAttrs = a.getXAttrsList();
+    return convertXAttrs(xAttrs);
+  }
+
+  public static ListXAttrsResponseProto convertListXAttrsResponse(
+    List<XAttr> names) {
+    ListXAttrsResponseProto.Builder builder =
+      ListXAttrsResponseProto.newBuilder();
+    if (names != null) {
+      builder.addAllXAttrs(convertXAttrProto(names));
+    }
+    return builder.build();
+  }
+
   public static ShortCircuitShmSlotProto convert(SlotId slotId) {
     return ShortCircuitShmSlotProto.newBuilder().
         setShmId(convert(slotId.getShmId())).

+ 6 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java

@@ -1050,6 +1050,9 @@ public class BlockManager {
    * datanode and log the operation
    */
   void addToInvalidates(final Block block, final DatanodeInfo datanode) {
+    if (!namesystem.isPopulatingReplQueues()) {
+      return;
+    }
     invalidateBlocks.add(block, datanode, true);
   }
 
@@ -1058,6 +1061,9 @@ public class BlockManager {
    * datanodes.
    */
   private void addToInvalidates(Block b) {
+    if (!namesystem.isPopulatingReplQueues()) {
+      return;
+    }
     StringBuilder datanodes = new StringBuilder();
     for(DatanodeStorageInfo storage : blocksMap.getStorages(b, State.NORMAL)) {
       final DatanodeDescriptor node = storage.getDatanodeDescriptor();

+ 2 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java

@@ -351,7 +351,8 @@ public class DatanodeManager {
         DFSUtil.DECOM_COMPARATOR;
         
     for (LocatedBlock b : locatedblocks) {
-      networktopology.pseudoSortByDistance(client, b.getLocations());
+      networktopology.sortByDistance(client, b.getLocations(), b
+          .getBlock().getBlockId());
       // Move decommissioned/stale datanodes to the bottom
       Arrays.sort(b.getLocations(), comparator);
     }

+ 14 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java

@@ -104,6 +104,7 @@ class BlockReceiver implements Closeable {
   private boolean dropCacheBehindWrites;
   private long lastCacheManagementOffset = 0;
   private boolean syncBehindWrites;
+  private boolean syncBehindWritesInBackground;
 
   /** The client name.  It is empty if a datanode is the client */
   private final String clientname;
@@ -207,6 +208,8 @@ class BlockReceiver implements Closeable {
         datanode.getDnConf().dropCacheBehindWrites :
           cachingStrategy.getDropBehind();
       this.syncBehindWrites = datanode.getDnConf().syncBehindWrites;
+      this.syncBehindWritesInBackground = datanode.getDnConf().
+          syncBehindWritesInBackground;
       
       final boolean isCreate = isDatanode || isTransfer 
           || stage == BlockConstructionStage.PIPELINE_SETUP_CREATE;
@@ -668,10 +671,17 @@ class BlockReceiver implements Closeable {
         // of file                 
         //
         if (syncBehindWrites) {
-          NativeIO.POSIX.syncFileRangeIfPossible(outFd,
-              lastCacheManagementOffset,
-              offsetInBlock - lastCacheManagementOffset,
-              NativeIO.POSIX.SYNC_FILE_RANGE_WRITE);
+          if (syncBehindWritesInBackground) {
+            this.datanode.getFSDataset().submitBackgroundSyncFileRangeRequest(
+                block, outFd, lastCacheManagementOffset,
+                offsetInBlock - lastCacheManagementOffset,
+                NativeIO.POSIX.SYNC_FILE_RANGE_WRITE);
+          } else {
+            NativeIO.POSIX.syncFileRangeIfPossible(outFd,
+                lastCacheManagementOffset, offsetInBlock
+                    - lastCacheManagementOffset,
+                NativeIO.POSIX.SYNC_FILE_RANGE_WRITE);
+          }
         }
         //
         // For POSIX_FADV_DONTNEED, we want to drop from the beginning 

+ 4 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java

@@ -67,6 +67,7 @@ public class DNConf {
   final boolean transferToAllowed;
   final boolean dropCacheBehindWrites;
   final boolean syncBehindWrites;
+  final boolean syncBehindWritesInBackground;
   final boolean dropCacheBehindReads;
   final boolean syncOnClose;
   final boolean encryptDataTransfer;
@@ -119,6 +120,9 @@ public class DNConf {
     syncBehindWrites = conf.getBoolean(
         DFSConfigKeys.DFS_DATANODE_SYNC_BEHIND_WRITES_KEY,
         DFSConfigKeys.DFS_DATANODE_SYNC_BEHIND_WRITES_DEFAULT);
+    syncBehindWritesInBackground = conf.getBoolean(
+        DFSConfigKeys.DFS_DATANODE_SYNC_BEHIND_WRITES_IN_BACKGROUND_KEY,
+        DFSConfigKeys.DFS_DATANODE_SYNC_BEHIND_WRITES_IN_BACKGROUND_DEFAULT);
     dropCacheBehindReads = conf.getBoolean(
         DFSConfigKeys.DFS_DATANODE_DROP_CACHE_BEHIND_READS_KEY,
         DFSConfigKeys.DFS_DATANODE_DROP_CACHE_BEHIND_READS_DEFAULT);

+ 44 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java

@@ -231,6 +231,9 @@ public class DataNode extends Configured
   private boolean checkDiskErrorFlag = false;
   private Object checkDiskErrorMutex = new Object();
   private long lastDiskErrorCheck;
+  private String supergroup;
+  private boolean isPermissionEnabled;
+  private String dnUserName = null;
 
   /**
    * Create the DataNode given a configuration, an array of dataDirs,
@@ -252,6 +255,11 @@ public class DataNode extends Configured
     this.getHdfsBlockLocationsEnabled = conf.getBoolean(
         DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED, 
         DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED_DEFAULT);
+    this.supergroup = conf.get(DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_KEY,
+        DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_DEFAULT);
+    this.isPermissionEnabled = conf.getBoolean(
+        DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY,
+        DFSConfigKeys.DFS_PERMISSIONS_ENABLED_DEFAULT);
 
     confVersion = "core-" +
         conf.get("hadoop.common.configuration.version", "UNSPECIFIED") +
@@ -432,6 +440,33 @@ public class DataNode extends Configured
       ipcServer.refreshServiceAcl(conf, new HDFSPolicyProvider());
     }
   }
+
+  /** Check whether the current user is in the superuser group. */
+  private void checkSuperuserPrivilege() throws IOException, AccessControlException {
+    if (!isPermissionEnabled) {
+      return;
+    }
+    // Try to get the ugi in the RPC call.
+    UserGroupInformation callerUgi = ipcServer.getRemoteUser();
+    if (callerUgi == null) {
+      // This is not from RPC.
+      callerUgi = UserGroupInformation.getCurrentUser();
+    }
+
+    // Is this by the DN user itself?
+    assert dnUserName != null;
+    if (callerUgi.getShortUserName().equals(dnUserName)) {
+      return;
+    }
+
+    // Is the user a member of the super group?
+    List<String> groups = Arrays.asList(callerUgi.getGroupNames());
+    if (groups.contains(supergroup)) {
+      return;
+    }
+    // Not a superuser.
+    throw new AccessControlException();
+  }
   
 /**
  * Initialize the datanode's periodic scanners:
@@ -735,6 +770,11 @@ public class DataNode extends Configured
   
     // BlockPoolTokenSecretManager is required to create ipc server.
     this.blockPoolTokenSecretManager = new BlockPoolTokenSecretManager();
+
+    // Login is done by now. Set the DN user name.
+    dnUserName = UserGroupInformation.getCurrentUser().getShortUserName();
+    LOG.info("dnUserName = " + dnUserName);
+    LOG.info("supergroup = " + supergroup);
     initIpcServer(conf);
 
     metrics = DataNodeMetrics.create(conf, getDisplayName());
@@ -2414,6 +2454,7 @@ public class DataNode extends Configured
 
   @Override // ClientDatanodeProtocol
   public void refreshNamenodes() throws IOException {
+    checkSuperuserPrivilege();
     conf = new Configuration();
     refreshNamenodes(conf);
   }
@@ -2421,6 +2462,7 @@ public class DataNode extends Configured
   @Override // ClientDatanodeProtocol
   public void deleteBlockPool(String blockPoolId, boolean force)
       throws IOException {
+    checkSuperuserPrivilege();
     LOG.info("deleteBlockPool command received for block pool " + blockPoolId
         + ", force=" + force);
     if (blockPoolManager.get(blockPoolId) != null) {
@@ -2436,6 +2478,7 @@ public class DataNode extends Configured
 
   @Override // ClientDatanodeProtocol
   public synchronized void shutdownDatanode(boolean forUpgrade) throws IOException {
+    checkSuperuserPrivilege();
     LOG.info("shutdownDatanode command received (upgrade=" + forUpgrade +
         "). Shutting down Datanode...");
 
@@ -2602,4 +2645,4 @@ public class DataNode extends Configured
       return lastDiskErrorCheck;
     }
   }
-}
+}

+ 10 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/AvailableSpaceVolumeChoosingPolicy.java

@@ -45,11 +45,19 @@ public class AvailableSpaceVolumeChoosingPolicy<V extends FsVolumeSpi>
   
   private static final Log LOG = LogFactory.getLog(AvailableSpaceVolumeChoosingPolicy.class);
   
-  private static final Random RAND = new Random();
+  private final Random random;
   
   private long balancedSpaceThreshold = DFS_DATANODE_AVAILABLE_SPACE_VOLUME_CHOOSING_POLICY_BALANCED_SPACE_THRESHOLD_DEFAULT;
   private float balancedPreferencePercent = DFS_DATANODE_AVAILABLE_SPACE_VOLUME_CHOOSING_POLICY_BALANCED_SPACE_PREFERENCE_FRACTION_DEFAULT;
 
+  AvailableSpaceVolumeChoosingPolicy(Random random) {
+    this.random = random;
+  }
+
+  public AvailableSpaceVolumeChoosingPolicy() {
+    this(new Random());
+  }
+
   @Override
   public synchronized void setConf(Configuration conf) {
     balancedSpaceThreshold = conf.getLong(
@@ -128,7 +136,7 @@ public class AvailableSpaceVolumeChoosingPolicy<V extends FsVolumeSpi>
           (highAvailableVolumes.size() * balancedPreferencePercent) /
           preferencePercentScaler;
       if (mostAvailableAmongLowVolumes < replicaSize ||
-          RAND.nextFloat() < scaledPreferencePercent) {
+          random.nextFloat() < scaledPreferencePercent) {
         volume = roundRobinPolicyHighAvailable.chooseVolume(
             highAvailableVolumes,
             replicaSize);

+ 8 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java

@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.datanode.fsdataset;
 
 
 import java.io.File;
+import java.io.FileDescriptor;
 import java.io.IOException;
 import java.io.InputStream;
 import java.util.List;
@@ -431,5 +432,12 @@ public interface FsDatasetSpi<V extends FsVolumeSpi> extends FSDatasetMBean {
    * @return true when trash is enabled
    */
   public boolean trashEnabled(String bpid);
+
+  /**
+   * submit a sync_file_range request to AsyncDiskService
+   */
+  public void submitBackgroundSyncFileRangeRequest(final ExtendedBlock block,
+      final FileDescriptor fd, final long offset, final long nbytes,
+      final int flags);
 }
 

+ 19 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java

@@ -19,6 +19,7 @@
 package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
 
 import java.io.File;
+import java.io.FileDescriptor;
 import java.util.HashMap;
 import java.util.Map;
 import java.util.concurrent.LinkedBlockingQueue;
@@ -31,6 +32,8 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
+import org.apache.hadoop.io.nativeio.NativeIO;
+import org.apache.hadoop.io.nativeio.NativeIOException;
 
 /**
  * This class is a container of multiple thread pools, each for a volume,
@@ -42,6 +45,7 @@ import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
  * can be slow, and we don't want to use a single thread pool because that
  * is inefficient when we have more than 1 volume.  AsyncDiskService is the
  * solution for these.
+ * Another example of async disk operation is requesting sync_file_range().
  * 
  * This class and {@link org.apache.hadoop.util.AsyncDiskService} are similar.
  * They should be combined.
@@ -148,6 +152,21 @@ class FsDatasetAsyncDiskService {
     }
   }
 
+  public void submitSyncFileRangeRequest(FsVolumeImpl volume,
+      final FileDescriptor fd, final long offset, final long nbytes,
+      final int flags) {
+    execute(volume.getCurrentDir(), new Runnable() {
+      @Override
+      public void run() {
+        try {
+          NativeIO.POSIX.syncFileRangeIfPossible(fd, offset, nbytes, flags);
+        } catch (NativeIOException e) {
+          LOG.warn("sync_file_range error", e);
+        }
+      }
+    });
+  }
+
   /**
    * Delete the block file and meta file from the disk asynchronously, adjust
    * dfsUsed statistics accordingly.

+ 8 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java

@@ -1907,5 +1907,13 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
     }
     return new RollingLogsImpl(dir, prefix);
   }
+
+  @Override
+  public void submitBackgroundSyncFileRangeRequest(ExtendedBlock block,
+      FileDescriptor fd, long offset, long nbytes, int flags) {
+    FsVolumeImpl fsVolumeImpl = this.getVolume(block);
+    asyncDiskService.submitSyncFileRangeRequest(fsVolumeImpl, fd, offset,
+        nbytes, flags);
+  }
 }
 

+ 27 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

@@ -145,6 +145,7 @@ import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HAUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.StorageType;
+import org.apache.hadoop.hdfs.XAttrHelper;
 import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
@@ -1632,9 +1633,11 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
       blockManager.getDatanodeManager().sortLocatedBlocks(
           clientMachine, blocks.getLocatedBlocks());
       
+      // lastBlock is not part of getLocatedBlocks(), might need to sort it too
       LocatedBlock lastBlock = blocks.getLastLocatedBlock();
       if (lastBlock != null) {
-        ArrayList<LocatedBlock> lastBlockList = new ArrayList<LocatedBlock>();
+        ArrayList<LocatedBlock> lastBlockList =
+            Lists.newArrayListWithCapacity(1);
         lastBlockList.add(lastBlock);
         blockManager.getDatanodeManager().sortLocatedBlocks(
                               clientMachine, lastBlockList);
@@ -7950,6 +7953,29 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
       readUnlock();
     }
   }
+
+  List<XAttr> listXAttrs(String src) throws IOException {
+    nnConf.checkXAttrsConfigFlag();
+    final FSPermissionChecker pc = getPermissionChecker();
+    checkOperation(OperationCategory.READ);
+    readLock();
+    try {
+      checkOperation(OperationCategory.READ);
+      if (isPermissionEnabled) {
+        /* To access xattr names, you need EXECUTE in the owning directory. */
+        checkParentAccess(pc, src, FsAction.EXECUTE);
+      }
+      final List<XAttr> all = dir.getXAttrs(src);
+      final List<XAttr> filteredAll = XAttrPermissionFilter.
+        filterXAttrsForApi(pc, all);
+      return filteredAll;
+    } catch (AccessControlException e) {
+      logAuditEvent(false, "listXAttrs", src);
+      throw e;
+    } finally {
+      readUnlock();
+    }
+  }
   
   void removeXAttr(String src, XAttr xAttr) throws IOException {
     nnConf.checkXAttrsConfigFlag();

+ 5 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java

@@ -1395,6 +1395,11 @@ class NameNodeRpcServer implements NamenodeProtocols {
       throws IOException {
     return namesystem.getXAttrs(src, xAttrs);
   }
+
+  @Override
+  public List<XAttr> listXAttrs(String src) throws IOException {
+    return namesystem.listXAttrs(src);
+  }
   
   @Override
   public void removeXAttr(String src, XAttr xAttr) throws IOException {

+ 23 - 14
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java

@@ -121,6 +121,7 @@ import org.apache.hadoop.security.token.TokenIdentifier;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Charsets;
+import com.google.common.collect.Lists;
 import com.sun.jersey.spi.container.ResourceFilters;
 
 /** Web-hdfs NameNode implementation. */
@@ -712,12 +713,12 @@ public class NamenodeWebHdfsMethods {
       @QueryParam(BufferSizeParam.NAME) @DefaultValue(BufferSizeParam.DEFAULT)
           final BufferSizeParam bufferSize,
       @QueryParam(XAttrNameParam.NAME) @DefaultValue(XAttrNameParam.DEFAULT) 
-          final XAttrNameParam xattrName,
+          final List<XAttrNameParam> xattrNames,
       @QueryParam(XAttrEncodingParam.NAME) @DefaultValue(XAttrEncodingParam.DEFAULT) 
           final XAttrEncodingParam xattrEncoding
       ) throws IOException, InterruptedException {
     return get(ugi, delegation, username, doAsUser, ROOT, op, offset, length,
-        renewer, bufferSize, xattrName, xattrEncoding);
+        renewer, bufferSize, xattrNames, xattrEncoding);
   }
 
   /** Handle HTTP GET request. */
@@ -744,13 +745,13 @@ public class NamenodeWebHdfsMethods {
       @QueryParam(BufferSizeParam.NAME) @DefaultValue(BufferSizeParam.DEFAULT)
           final BufferSizeParam bufferSize,
       @QueryParam(XAttrNameParam.NAME) @DefaultValue(XAttrNameParam.DEFAULT) 
-          final XAttrNameParam xattrName,
+          final List<XAttrNameParam> xattrNames,
       @QueryParam(XAttrEncodingParam.NAME) @DefaultValue(XAttrEncodingParam.DEFAULT) 
           final XAttrEncodingParam xattrEncoding
       ) throws IOException, InterruptedException {
 
     init(ugi, delegation, username, doAsUser, path, op, offset, length,
-        renewer, bufferSize, xattrName, xattrEncoding);
+        renewer, bufferSize, xattrEncoding);
 
     return ugi.doAs(new PrivilegedExceptionAction<Response>() {
       @Override
@@ -758,7 +759,7 @@ public class NamenodeWebHdfsMethods {
         try {
           return get(ugi, delegation, username, doAsUser,
               path.getAbsolutePath(), op, offset, length, renewer, bufferSize,
-              xattrName, xattrEncoding);
+              xattrNames, xattrEncoding);
         } finally {
           reset();
         }
@@ -777,7 +778,7 @@ public class NamenodeWebHdfsMethods {
       final LengthParam length,
       final RenewerParam renewer,
       final BufferSizeParam bufferSize,
-      final XAttrNameParam xattrName,
+      final List<XAttrNameParam> xattrNames,
       final XAttrEncodingParam xattrEncoding
       ) throws IOException, URISyntaxException {
     final NameNode namenode = (NameNode)context.getAttribute("name.node");
@@ -853,19 +854,27 @@ public class NamenodeWebHdfsMethods {
       final String js = JsonUtil.toJsonString(status);
       return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
     }
-    case GETXATTR: {
-      XAttr xAttr = XAttrHelper.getFirstXAttr(np.getXAttrs(fullpath,
-          XAttrHelper.buildXAttrAsList(xattrName.getXAttrName())));
-      final String js = JsonUtil.toJsonString(xAttr,
-          xattrEncoding.getEncoding());
-      return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
-    }
     case GETXATTRS: {
-      List<XAttr> xAttrs = np.getXAttrs(fullpath, null);
+      List<String> names = null;
+      if (xattrNames != null) {
+        names = Lists.newArrayListWithCapacity(xattrNames.size());
+        for (XAttrNameParam xattrName : xattrNames) {
+          if (xattrName.getXAttrName() != null) {
+            names.add(xattrName.getXAttrName());
+          }
+        }
+      }
+      List<XAttr> xAttrs = np.getXAttrs(fullpath, (names != null && 
+          !names.isEmpty()) ? XAttrHelper.buildXAttrs(names) : null);
       final String js = JsonUtil.toJsonString(xAttrs,
           xattrEncoding.getEncoding());
       return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
     }
+    case LISTXATTRS: {
+      final List<XAttr> xAttrs = np.listXAttrs(fullpath);
+      final String js = JsonUtil.toJsonString(xAttrs);
+      return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
+    }
     default:
       throw new UnsupportedOperationException(op + " is not supported");
     }

+ 33 - 40
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java

@@ -35,6 +35,7 @@ import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.StringUtils;
 import org.mortbay.util.ajax.JSON;
 
+import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
 
 import java.io.ByteArrayInputStream;
@@ -665,21 +666,6 @@ public class JsonUtil {
     return aclStatusBuilder.build();
   }
   
-  public static String toJsonString(final XAttr xAttr, 
-      final XAttrCodec encoding) throws IOException {
-    if (xAttr == null) {
-      return "{}";
-    }
-    final Map<String, Object> m = new TreeMap<String, Object>();
-    m.put("name", XAttrHelper.getPrefixName(xAttr));
-    m.put("value", xAttr.getValue() != null ? 
-        XAttrCodec.encodeValue(xAttr.getValue(), encoding) : null);
-    final Map<String, Map<String, Object>> finalMap =
-        new TreeMap<String, Map<String, Object>>();
-    finalMap.put(XAttr.class.getSimpleName(), m);
-    return JSON.toString(finalMap);
-  }
-  
   private static Map<String, Object> toJsonMap(final XAttr xAttr,
       final XAttrCodec encoding) throws IOException {
     if (xAttr == null) {
@@ -715,18 +701,30 @@ public class JsonUtil {
     return JSON.toString(finalMap);
   }
   
-  public static XAttr toXAttr(final Map<?, ?> json) throws IOException {
+  public static String toJsonString(final List<XAttr> xAttrs)
+    throws IOException {
+    final List<String> names = Lists.newArrayListWithCapacity(xAttrs.size());
+    for (XAttr xAttr : xAttrs) {
+      names.add(XAttrHelper.getPrefixName(xAttr));
+    }
+    String ret = JSON.toString(names);
+    final Map<String, Object> finalMap = new TreeMap<String, Object>();
+    finalMap.put("XAttrNames", ret);
+    return JSON.toString(finalMap);
+  }
+
+  public static byte[] getXAttr(final Map<?, ?> json, final String name) 
+      throws IOException {
     if (json == null) {
       return null;
     }
     
-    Map<?, ?> m = (Map<?, ?>) json.get(XAttr.class.getSimpleName());
-    if (m == null) {
-      return null;
+    Map<String, byte[]> xAttrs = toXAttrs(json);
+    if (xAttrs != null) {
+      return xAttrs.get(name);
     }
-    String name = (String) m.get("name");
-    String value = (String) m.get("value");
-    return XAttrHelper.buildXAttr(name, decodeXAttrValue(value));
+    
+    return null;
   }
   
   public static Map<String, byte[]> toXAttrs(final Map<?, ?> json) 
@@ -738,28 +736,23 @@ public class JsonUtil {
     return toXAttrMap((Object[])json.get("XAttrs"));
   }
   
-  public static Map<String, byte[]> toXAttrs(final Map<?, ?> json, 
-      List<String> names) throws IOException {
-    if (json == null || names == null) {
+  public static List<String> toXAttrNames(final Map<?, ?> json)
+      throws IOException {
+    if (json == null) {
       return null;
     }
-    if (names.isEmpty()) {
-      return Maps.newHashMap();
-    }
-    Map<String, byte[]> xAttrs = toXAttrs(json);
-    if (xAttrs == null || xAttrs.isEmpty()) {
-      return xAttrs;
-    }
-    
-    Map<String, byte[]> result = Maps.newHashMap();
-    for (String name : names) {
-      if (xAttrs.containsKey(name)) {
-        result.put(name, xAttrs.get(name));
-      }
+
+    final String namesInJson = (String) json.get("XAttrNames");
+    final Object[] xattrs = (Object[]) JSON.parse(namesInJson);
+    final List<String> names =
+      Lists.newArrayListWithCapacity(json.keySet().size());
+
+    for (int i = 0; i < xattrs.length; i++) {
+        names.add((String) (xattrs[i]));
     }
-    return result;
+    return names;
   }
-  
+
   private static Map<String, byte[]> toXAttrMap(final Object[] objects) 
       throws IOException {
     if (objects == null) {

+ 37 - 8
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java

@@ -50,7 +50,6 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
 import org.apache.hadoop.fs.Options;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.XAttr;
 import org.apache.hadoop.fs.XAttrCodec;
 import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.fs.permission.AclEntry;
@@ -80,6 +79,7 @@ import org.mortbay.util.ajax.JSON;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Charsets;
+import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 
 /** A FileSystem for HDFS over the web. */
@@ -601,6 +601,13 @@ public class WebHdfsFileSystem extends FileSystem
       this.parameters = parameters;
     }
     
+    AbstractFsPathRunner(final HttpOpParam.Op op, Param<?,?>[] parameters,
+        final Path fspath) {
+      super(op, false);
+      this.fspath = fspath;
+      this.parameters = parameters;
+    }
+    
     @Override
     protected URL getUrl() throws IOException {
       return toUrl(op, fspath, parameters);
@@ -630,6 +637,11 @@ public class WebHdfsFileSystem extends FileSystem
       super(op, fspath, parameters);
     }
     
+    FsPathResponseRunner(final HttpOpParam.Op op, Param<?,?>[] parameters,
+        final Path fspath) {
+      super(op, parameters, fspath);
+    }
+    
     @Override
     final T getResponse(HttpURLConnection conn) throws IOException {
       try {
@@ -834,14 +846,13 @@ public class WebHdfsFileSystem extends FileSystem
   }
   
   @Override
-  public byte[] getXAttr(Path p, String name) throws IOException {
-    final HttpOpParam.Op op = GetOpParam.Op.GETXATTR;
+  public byte[] getXAttr(Path p, final String name) throws IOException {
+    final HttpOpParam.Op op = GetOpParam.Op.GETXATTRS;
     return new FsPathResponseRunner<byte[]>(op, p, new XAttrNameParam(name), 
         new XAttrEncodingParam(XAttrCodec.HEX)) {
       @Override
       byte[] decodeResponse(Map<?, ?> json) throws IOException {
-        XAttr xAttr = JsonUtil.toXAttr(json);
-        return xAttr != null ? xAttr.getValue() : null;
+        return JsonUtil.getXAttr(json, name);
       }
     }.run();
   }
@@ -861,16 +872,34 @@ public class WebHdfsFileSystem extends FileSystem
   @Override
   public Map<String, byte[]> getXAttrs(Path p, final List<String> names) 
       throws IOException {
+    Preconditions.checkArgument(names != null && !names.isEmpty(), 
+        "XAttr names cannot be null or empty.");
+    Param<?,?>[] parameters = new Param<?,?>[names.size() + 1];
+    for (int i = 0; i < parameters.length - 1; i++) {
+      parameters[i] = new XAttrNameParam(names.get(i));
+    }
+    parameters[parameters.length - 1] = new XAttrEncodingParam(XAttrCodec.HEX);
+    
     final HttpOpParam.Op op = GetOpParam.Op.GETXATTRS;
-    return new FsPathResponseRunner<Map<String, byte[]>>(op, p, 
-        new XAttrEncodingParam(XAttrCodec.HEX)) {
+    return new FsPathResponseRunner<Map<String, byte[]>>(op, parameters, p) {
       @Override
       Map<String, byte[]> decodeResponse(Map<?, ?> json) throws IOException {
-        return JsonUtil.toXAttrs(json, names);
+        return JsonUtil.toXAttrs(json);
       }
     }.run();
   }
   
+  @Override
+  public List<String> listXAttrs(Path p) throws IOException {
+    final HttpOpParam.Op op = GetOpParam.Op.LISTXATTRS;
+    return new FsPathResponseRunner<List<String>>(op, p) {
+      @Override
+      List<String> decodeResponse(Map<?, ?> json) throws IOException {
+        return JsonUtil.toXAttrNames(json);
+      }
+    }.run();
+  }
+
   @Override
   public void removeXAttr(Path p, String name) throws IOException {
     statistics.incrementWriteOps(1);

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java

@@ -36,8 +36,8 @@ public class GetOpParam extends HttpOpParam<GetOpParam.Op> {
     /** GET_BLOCK_LOCATIONS is a private unstable op. */
     GET_BLOCK_LOCATIONS(false, HttpURLConnection.HTTP_OK),
     GETACLSTATUS(false, HttpURLConnection.HTTP_OK),
-    GETXATTR(false, HttpURLConnection.HTTP_OK),
     GETXATTRS(false, HttpURLConnection.HTTP_OK),
+    LISTXATTRS(false, HttpURLConnection.HTTP_OK),
 
     NULL(false, HttpURLConnection.HTTP_NOT_IMPLEMENTED);
 

+ 2 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto

@@ -764,6 +764,8 @@ service ClientNamenodeProtocol {
       returns(SetXAttrResponseProto);
   rpc getXAttrs(GetXAttrsRequestProto)
       returns(GetXAttrsResponseProto);
+  rpc listXAttrs(ListXAttrsRequestProto)
+      returns(ListXAttrsResponseProto);
   rpc removeXAttr(RemoveXAttrRequestProto)
       returns(RemoveXAttrResponseProto);
 }

+ 8 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/proto/xattr.proto

@@ -62,6 +62,14 @@ message GetXAttrsResponseProto {
   repeated XAttrProto xAttrs = 1;
 }
 
+message ListXAttrsRequestProto {
+  required string src = 1;
+}
+
+message ListXAttrsResponseProto {
+  repeated XAttrProto xAttrs = 1;
+}
+
 message RemoveXAttrRequestProto {
   required string src        = 1;
   optional XAttrProto xAttr  = 2;

+ 51 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml

@@ -1296,8 +1296,56 @@
     non-zero integer.
   </description>
 </property>
+
+<property>
+  <name>nfs.server.port</name>
+  <value>2049</value>
+  <description>
+      Specify the port number used by Hadoop NFS.
+  </description>
+</property>
+
+<property>
+  <name>nfs.mountd.port</name>
+  <value>4242</value>
+  <description>
+      Specify the port number used by Hadoop mount daemon.
+  </description>
+</property>
+
+<property>    
+  <name>nfs.dump.dir</name>
+  <value>/tmp/.hdfs-nfs</value>
+  <description>
+    This directory is used to temporarily save out-of-order writes before
+    writing to HDFS. For each file, the out-of-order writes are dumped after
+    they are accumulated to exceed certain threshold (e.g., 1MB) in memory. 
+    One needs to make sure the directory has enough space.
+  </description>
+</property>
+
+<property>
+  <name>nfs.rtmax</name>
+  <value>1048576</value>
+  <description>This is the maximum size in bytes of a READ request
+    supported by the NFS gateway. If you change this, make sure you
+    also update the nfs mount's rsize(add rsize= # of bytes to the 
+    mount directive).
+  </description>
+</property>
+
+<property>
+  <name>nfs.wtmax</name>
+  <value>1048576</value>
+  <description>This is the maximum size in bytes of a WRITE request
+    supported by the NFS gateway. If you change this, make sure you
+    also update the nfs mount's wsize(add wsize= # of bytes to the 
+    mount directive).
+  </description>
+</property>
+
 <property>
-  <name>dfs.nfs.keytab.file</name>
+  <name>nfs.keytab.file</name>
   <value></value>
   <description>
     *Note*: Advanced property. Change with caution.
@@ -1307,7 +1355,7 @@
 </property>
 
 <property>
-  <name>dfs.nfs.kerberos.principal</name>
+  <name>nfs.kerberos.principal</name>
   <value></value>
   <description>
     *Note*: Advanced property. Change with caution.
@@ -1318,7 +1366,7 @@
 </property>
 
 <property>
-  <name>dfs.nfs.allow.insecure.ports</name>
+  <name>nfs.allow.insecure.ports</name>
   <value>true</value>
   <description>
     When set to false, client connections originating from unprivileged ports

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js

@@ -45,7 +45,7 @@
     return function (jqxhr, text, err) {
       switch(jqxhr.status) {
         case 401:
-          var msg = '<p>Authentication failed when trying to open ' + url + ': Unauthrozied.</p>';
+          var msg = '<p>Authentication failed when trying to open ' + url + ': Unauthorized.</p>';
           break;
         case 403:
           if(jqxhr.responseJSON !== undefined && jqxhr.responseJSON.RemoteException !== undefined) {

+ 7 - 7
hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm

@@ -76,14 +76,14 @@ HDFS NFS Gateway
 
 ----
   <property>
-    <name>dfs.nfs.keytab.file</name>
+    <name>nfs.keytab.file</name>
     <value>/etc/hadoop/conf/nfsserver.keytab</value> <!-- path to the nfs gateway keytab -->
   </property>
 ----
 
 ----
   <property>
-    <name>dfs.nfs.kerberos.principal</name>
+    <name>nfs.kerberos.principal</name>
     <value>nfsserver/_HOST@YOUR-REALM.COM</value>
   </property>
 ----
@@ -121,7 +121,7 @@ HDFS NFS Gateway
 
 ----
   <property>    
-    <name>dfs.nfs3.dump.dir</name>
+    <name>nfs.dump.dir</name>
     <value>/tmp/.hdfs-nfs</value>
   </property>
 ---- 
@@ -134,7 +134,7 @@ HDFS NFS Gateway
 
 ----
 <property>
-  <name>dfs.nfs.rtmax</name>
+  <name>nfs.rtmax</name>
   <value>1048576</value>
   <description>This is the maximum size in bytes of a READ request
     supported by the NFS gateway. If you change this, make sure you
@@ -146,7 +146,7 @@ HDFS NFS Gateway
 
 ----
 <property>
-  <name>dfs.nfs.wtmax</name>
+  <name>nfs.wtmax</name>
   <value>65536</value>
   <description>This is the maximum size in bytes of a WRITE request
     supported by the NFS gateway. If you change this, make sure you
@@ -167,7 +167,7 @@ HDFS NFS Gateway
 
 ----
 <property>
-  <name>dfs.nfs.exports.allowed.hosts</name>
+  <name>nfs.exports.allowed.hosts</name>
   <value>* rw</value>
 </property>
 ----
@@ -345,7 +345,7 @@ HDFS NFS Gateway
   file in the event one wishes to access the HDFS NFS Gateway from a system with
   a completely disparate set of UIDs/GIDs. By default this file is located at
   "/etc/nfs.map", but a custom location can be configured by setting the
-  "dfs.nfs.static.mapping.file" property to the path of the static mapping file.
+  "nfs.static.mapping.file" property to the path of the static mapping file.
   The format of the static mapping file is similar to what is described in the
   exports(5) manual page, but roughly it is:
 

+ 3 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java

@@ -167,6 +167,9 @@ public class TestGetBlocks {
       if (stm != null) {
         stm.close();
       }
+      if (client != null) {
+        client.close();
+      }
       cluster.shutdown();
     }
   }

+ 7 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java

@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs.server.datanode;
 
 import java.io.File;
+import java.io.FileDescriptor;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.OutputStream;
@@ -1112,5 +1113,11 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
   public FsVolumeSpi getVolume(ExtendedBlock b) {
     throw new UnsupportedOperationException();
   }
+
+  @Override
+  public void submitBackgroundSyncFileRangeRequest(ExtendedBlock block,
+      FileDescriptor fd, long offset, long nbytes, int flags) {
+    throw new UnsupportedOperationException();
+  }
 }
 

+ 5 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/TestAvailableSpaceVolumeChoosingPolicy.java

@@ -22,6 +22,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_AVAILABLE_SPACE_
 
 import java.util.ArrayList;
 import java.util.List;
+import java.util.Random;
 
 import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.conf.Configuration;
@@ -251,10 +252,10 @@ public class TestAvailableSpaceVolumeChoosingPolicy {
    */
   public void doRandomizedTest(float preferencePercent, int lowSpaceVolumes,
       int highSpaceVolumes) throws Exception {
-    @SuppressWarnings("unchecked")
-    final AvailableSpaceVolumeChoosingPolicy<FsVolumeSpi> policy = 
-        ReflectionUtils.newInstance(AvailableSpaceVolumeChoosingPolicy.class, null);
-    
+    Random random = new Random(123L);
+    final AvailableSpaceVolumeChoosingPolicy<FsVolumeSpi> policy =
+        new AvailableSpaceVolumeChoosingPolicy<FsVolumeSpi>(random);
+
     List<FsVolumeSpi> volumes = new ArrayList<FsVolumeSpi>();
     
     // Volumes with 1MB free space

+ 111 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java

@@ -17,7 +17,10 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
+import java.io.FileNotFoundException;
 import java.io.IOException;
+import java.io.FileNotFoundException;
+import java.security.PrivilegedExceptionAction;
 import java.util.EnumSet;
 import java.util.List;
 import java.util.Map;
@@ -32,8 +35,11 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.test.GenericTestUtils;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.Assert;
@@ -381,6 +387,111 @@ public class FSXAttrBaseTest {
     
     fs.removeXAttr(path, name3);
   }
+
+  /**
+   * Test the listXAttrs api.
+   * listXAttrs on a path that doesn't exist.
+   * listXAttrs on a path with no XAttrs
+   * Check basic functionality.
+   * Check that read access to parent dir is not enough to get xattr names
+   * Check that write access to the parent dir is not enough to get names
+   * Check that execute/scan access to the parent dir is sufficient to get
+   *  xattr names.
+   */
+  @Test(timeout = 120000)
+  public void testListXAttrs() throws Exception {
+    final UserGroupInformation user = UserGroupInformation.
+      createUserForTesting("user", new String[] {"mygroup"});
+
+    /* listXAttrs in a path that doesn't exist. */
+    try {
+      fs.listXAttrs(path);
+      fail("expected FileNotFoundException");
+    } catch (FileNotFoundException e) {
+      GenericTestUtils.assertExceptionContains("cannot find", e);
+    }
+
+    FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) 0750));
+
+    /* listXAttrs on a path with no XAttrs.*/
+    final List<String> noXAttrs = fs.listXAttrs(path);
+    assertTrue("XAttrs were found?", noXAttrs.size() == 0);
+
+    fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE));
+    fs.setXAttr(path, name2, value2, EnumSet.of(XAttrSetFlag.CREATE));
+
+    /** Check basic functionality. */
+    final List<String> xattrNames = fs.listXAttrs(path);
+    assertTrue(xattrNames.contains(name1));
+    assertTrue(xattrNames.contains(name2));
+    assertTrue(xattrNames.size() == 2);
+
+    /* Check that read access to parent dir is not enough to get xattr names. */
+    fs.setPermission(path, new FsPermission((short) 0704));
+    final Path childDir = new Path(path, "child" + pathCount);
+    FileSystem.mkdirs(fs, childDir, FsPermission.createImmutable((short) 0700));
+    fs.setXAttr(childDir, name1, "1234".getBytes());
+    try {
+      user.doAs(new PrivilegedExceptionAction<Object>() {
+          @Override
+          public Object run() throws Exception {
+            final FileSystem userFs = dfsCluster.getFileSystem();
+            userFs.listXAttrs(childDir);
+            return null;
+          }
+        });
+      fail("expected IOException");
+    } catch (IOException e) {
+      GenericTestUtils.assertExceptionContains("Permission denied", e);
+    }
+
+    /*
+     * Check that write access to the parent dir is not enough to get names.
+     */
+    fs.setPermission(path, new FsPermission((short) 0702));
+    try {
+      user.doAs(new PrivilegedExceptionAction<Object>() {
+          @Override
+          public Object run() throws Exception {
+            final FileSystem userFs = dfsCluster.getFileSystem();
+            userFs.listXAttrs(childDir);
+            return null;
+          }
+        });
+      fail("expected IOException");
+    } catch (IOException e) {
+      GenericTestUtils.assertExceptionContains("Permission denied", e);
+    }
+
+    /*
+     * Check that execute/scan access to the parent dir is sufficient to get
+     * xattr names.
+     */
+    fs.setPermission(path, new FsPermission((short) 0701));
+    user.doAs(new PrivilegedExceptionAction<Object>() {
+        @Override
+        public Object run() throws Exception {
+          final FileSystem userFs = dfsCluster.getFileSystem();
+          userFs.listXAttrs(childDir);
+          return null;
+        }
+      });
+
+    /*
+     * Test that xattrs in the "trusted" namespace are filtered correctly.
+     */
+    fs.setXAttr(childDir, "trusted.myxattr", "1234".getBytes());
+    user.doAs(new PrivilegedExceptionAction<Object>() {
+        @Override
+        public Object run() throws Exception {
+          final FileSystem userFs = dfsCluster.getFileSystem();
+          assertTrue(userFs.listXAttrs(childDir).size() == 1);
+          return null;
+        }
+      });
+
+    assertTrue(fs.listXAttrs(childDir).size() == 2);
+  }
   
   /**
    * Steps:

+ 116 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyBlockManagement.java

@@ -0,0 +1,116 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode.ha;
+
+import static org.junit.Assert.assertEquals;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.commons.logging.impl.Log4JLogger;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.HAUtil;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
+import org.apache.hadoop.hdfs.MiniDFSNNTopology;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.log4j.Level;
+import org.junit.Assert;
+import org.junit.Test;
+
+import com.google.common.base.Supplier;
+
+/**
+ * Makes sure that standby doesn't do the unnecessary block management such as
+ * invalidate block, etc.
+ */
+public class TestStandbyBlockManagement {
+  protected static final Log LOG = LogFactory.getLog(
+      TestStandbyBlockManagement.class);
+  private static final String TEST_FILE_DATA = "hello world";
+  private static final String TEST_FILE = "/TestStandbyBlockManagement";
+  private static final Path TEST_FILE_PATH = new Path(TEST_FILE);
+
+  static {
+    ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger)LogFactory.getLog(BlockManager.class)).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
+  }
+
+  @Test(timeout=60000)
+  public void testInvalidateBlock() throws Exception {
+    Configuration conf = new Configuration();
+    HAUtil.setAllowStandbyReads(conf, true);
+    conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+        .nnTopology(MiniDFSNNTopology.simpleHATopology())
+        .numDataNodes(3)
+        .build();
+    try {
+      cluster.waitActive();
+      cluster.transitionToActive(0);
+
+      NameNode nn1 = cluster.getNameNode(0);
+      NameNode nn2 = cluster.getNameNode(1);
+
+      FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
+
+      Thread.sleep(1000);
+      LOG.info("==================================");
+      DFSTestUtil.writeFile(fs, TEST_FILE_PATH, TEST_FILE_DATA);
+      // Have to force an edit log roll so that the standby catches up
+      nn1.getRpcServer().rollEditLog();
+      LOG.info("==================================");
+
+      // delete the file
+      fs.delete(TEST_FILE_PATH, false);
+      BlockManagerTestUtil.computeAllPendingWork(
+          nn1.getNamesystem().getBlockManager());
+
+      nn1.getRpcServer().rollEditLog();
+
+      // standby nn doesn't need to invalidate blocks.
+      assertEquals(0,
+          nn2.getNamesystem().getBlockManager().getPendingDeletionBlocksCount());
+
+      cluster.triggerHeartbeats();
+      cluster.triggerBlockReports();
+
+      // standby nn doesn't need to invalidate blocks.
+      assertEquals(0,
+          nn2.getNamesystem().getBlockManager().getPendingDeletionBlocksCount());
+
+    } finally {
+      cluster.shutdown();
+    }
+  }
+
+}

+ 12 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java

@@ -153,6 +153,18 @@ public class TestStandbyCheckpoints {
     // do a checkpoint and save one to its local directories.
     HATestUtil.waitForCheckpoint(cluster, 1, ImmutableList.of(12));
 
+    GenericTestUtils.waitFor(new Supplier<Boolean>() {
+      @Override
+      public Boolean get() {
+        if(tmpOivImgDir.list().length > 0) {
+          return true;
+        }
+        else {
+          return false;
+        }
+      }
+    }, 1000, 60000);
+    
     // It should have saved the oiv image too.
     assertEquals("One file is expected", 1, tmpOivImgDir.list().length);
     

+ 10 - 8
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotFileLength.java

@@ -143,10 +143,10 @@ public class TestSnapshotFileLength {
 
     // Make sure we can read the entire file via its non-snapshot path.
     fileStatus = hdfs.getFileStatus(file1);
-    assertEquals(fileStatus.getLen(), BLOCKSIZE * 2);
+    assertEquals("Unexpected file length", BLOCKSIZE * 2, fileStatus.getLen());
     fis = hdfs.open(file1);
     bytesRead = fis.read(buffer, 0, buffer.length);
-    assertEquals(bytesRead, BLOCKSIZE * 2);
+    assertEquals("Unexpected # bytes read", BLOCKSIZE * 2, bytesRead);
     fis.close();
 
     Path file1snap1 =
@@ -156,21 +156,23 @@ public class TestSnapshotFileLength {
     assertEquals(fileStatus.getLen(), BLOCKSIZE);
     // Make sure we can only read up to the snapshot length.
     bytesRead = fis.read(buffer, 0, buffer.length);
-    assertEquals(bytesRead, BLOCKSIZE);
+    assertEquals("Unexpected # bytes read", BLOCKSIZE, bytesRead);
     fis.close();
 
-    PrintStream psBackup = System.out;
+    PrintStream outBackup = System.out;
+    PrintStream errBackup = System.err;
     ByteArrayOutputStream bao = new ByteArrayOutputStream();
     System.setOut(new PrintStream(bao));
     System.setErr(new PrintStream(bao));
     // Make sure we can cat the file upto to snapshot length
     FsShell shell = new FsShell();
-    try{
+    try {
       ToolRunner.run(conf, shell, new String[] { "-cat",
       "/TestSnapshotFileLength/sub1/.snapshot/snapshot1/file1" });
-      assertEquals(bao.size(), BLOCKSIZE);
-    }finally{
-      System.setOut(psBackup);
+      assertEquals("Unexpected # bytes from -cat", BLOCKSIZE, bao.size());
+    } finally {
+      System.setOut(outBackup);
+      System.setErr(errBackup);
     }
   }
 }

+ 12 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java

@@ -234,6 +234,18 @@ public class TestJsonUtil {
           parsedXAttrMap.get(entry.getKey()));
     }
   }
+  
+  @Test
+  public void testGetXAttrFromJson() throws IOException {
+    String jsonString = 
+        "{\"XAttrs\":[{\"name\":\"user.a1\",\"value\":\"0x313233\"}," +
+        "{\"name\":\"user.a2\",\"value\":\"0x313131\"}]}";
+    Map<?, ?> json = (Map<?, ?>) JSON.parse(jsonString);
+    
+    // Get xattr: user.a2
+    byte[] value = JsonUtil.getXAttr(json, "user.a2");
+    Assert.assertArrayEquals(XAttrCodec.decodeValue("0x313131"), value);
+  }
 
   private void checkDecodeFailure(Map<String, Object> map) {
     try {

+ 38 - 8
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java

@@ -54,7 +54,8 @@ public class TestNetworkTopology {
         DFSTestUtil.getDatanodeDescriptor("4.4.4.4", "/d1/r2"),
         DFSTestUtil.getDatanodeDescriptor("5.5.5.5", "/d1/r2"),
         DFSTestUtil.getDatanodeDescriptor("6.6.6.6", "/d2/r3"),
-        DFSTestUtil.getDatanodeDescriptor("7.7.7.7", "/d2/r3")
+        DFSTestUtil.getDatanodeDescriptor("7.7.7.7", "/d2/r3"),
+        DFSTestUtil.getDatanodeDescriptor("8.8.8.8", "/d2/r3")
     };
     for (int i = 0; i < dataNodes.length; i++) {
       cluster.add(dataNodes[i]);
@@ -117,14 +118,14 @@ public class TestNetworkTopology {
   }
 
   @Test
-  public void testPseudoSortByDistance() throws Exception {
+  public void testSortByDistance() throws Exception {
     DatanodeDescriptor[] testNodes = new DatanodeDescriptor[3];
     
     // array contains both local node & local rack node
     testNodes[0] = dataNodes[1];
     testNodes[1] = dataNodes[2];
     testNodes[2] = dataNodes[0];
-    cluster.pseudoSortByDistance(dataNodes[0], testNodes );
+    cluster.sortByDistance(dataNodes[0], testNodes, 0xDEADBEEF);
     assertTrue(testNodes[0] == dataNodes[0]);
     assertTrue(testNodes[1] == dataNodes[1]);
     assertTrue(testNodes[2] == dataNodes[2]);
@@ -133,7 +134,7 @@ public class TestNetworkTopology {
     testNodes[0] = dataNodes[1];
     testNodes[1] = dataNodes[3];
     testNodes[2] = dataNodes[0];
-    cluster.pseudoSortByDistance(dataNodes[0], testNodes );
+    cluster.sortByDistance(dataNodes[0], testNodes, 0xDEADBEEF);
     assertTrue(testNodes[0] == dataNodes[0]);
     assertTrue(testNodes[1] == dataNodes[1]);
     assertTrue(testNodes[2] == dataNodes[3]);
@@ -142,21 +143,50 @@ public class TestNetworkTopology {
     testNodes[0] = dataNodes[5];
     testNodes[1] = dataNodes[3];
     testNodes[2] = dataNodes[1];
-    cluster.pseudoSortByDistance(dataNodes[0], testNodes );
+    cluster.sortByDistance(dataNodes[0], testNodes, 0xDEADBEEF);
     assertTrue(testNodes[0] == dataNodes[1]);
     assertTrue(testNodes[1] == dataNodes[3]);
     assertTrue(testNodes[2] == dataNodes[5]);
-    
+
     // array contains local rack node which happens to be in position 0
     testNodes[0] = dataNodes[1];
     testNodes[1] = dataNodes[5];
     testNodes[2] = dataNodes[3];
-    cluster.pseudoSortByDistance(dataNodes[0], testNodes );
-    // peudoSortByDistance does not take the "data center" layer into consideration 
+    cluster.sortByDistance(dataNodes[0], testNodes, 0xDEADBEEF);
+    assertTrue(testNodes[0] == dataNodes[1]);
+    assertTrue(testNodes[1] == dataNodes[3]);
+    assertTrue(testNodes[2] == dataNodes[5]);
+
+    // Same as previous, but with a different random seed to test randomization
+    testNodes[0] = dataNodes[1];
+    testNodes[1] = dataNodes[5];
+    testNodes[2] = dataNodes[3];
+    cluster.sortByDistance(dataNodes[0], testNodes, 0xDEAD);
+    // sortByDistance does not take the "data center" layer into consideration
     // and it doesn't sort by getDistance, so 1, 5, 3 is also valid here
     assertTrue(testNodes[0] == dataNodes[1]);
     assertTrue(testNodes[1] == dataNodes[5]);
     assertTrue(testNodes[2] == dataNodes[3]);
+
+    // Array is just local rack nodes
+    // Expect a random first node depending on the seed (normally the block ID).
+    DatanodeDescriptor first = null;
+    boolean foundRandom = false;
+    for (int i=5; i<=7; i++) {
+      testNodes[0] = dataNodes[5];
+      testNodes[1] = dataNodes[6];
+      testNodes[2] = dataNodes[7];
+      cluster.sortByDistance(dataNodes[i], testNodes, 0xBEADED+i);
+      if (first == null) {
+        first = testNodes[0];
+      } else {
+        if (first != testNodes[0]) {
+          foundRandom = true;
+          break;
+        }
+      }
+    }
+    assertTrue("Expected to find a different first location", foundRandom);
   }
   
   @Test

+ 358 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml

@@ -16118,6 +16118,364 @@
       </comparators>
     </test>
 
+    <!-- DFS tests
+        Must come before moveFromLocal tests until HDFS-6471 is fixed -->
+    <test>
+      <description>appendToFile</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir -p /user/USERNAME/dir1</command>
+        <command>-fs NAMENODE -touchz /user/USERNAME/dir1/file0</command>
+        <command>-fs NAMENODE -appendToFile CLITEST_DATA/data15bytes /user/USERNAME/dir1/file0</command>
+        <command>-fs NAMENODE -cat /user/USERNAME/dir1/file0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm -r /user/USERNAME</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>12345678901234</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test>
+      <description>text</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir -p /user/USERNAME/dir1</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes /user/USERNAME/dir1/file0</command>
+        <command>-fs NAMENODE -text /user/USERNAME/dir1/file0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm -r /user/USERNAME/dir1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>12345678901234</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test>
+      <description>rmdir</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir -p /user/USERNAME/dir1</command>
+        <command>-fs NAMENODE -rmdir /user/USERNAME/dir1</command>
+        <command>-fs NAMENODE -ls /user/USERNAME/</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm -r /user/USERNAME</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>ExactComparator</type>
+          <expected-output></expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test>
+      <description>rmdir with ignore-fail-on-non-empty</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir -p /user/USERNAME/dir1</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes /user/USERNAME/dir1/file0</command>
+        <command>-fs NAMENODE -rmdir --ignore-fail-on-non-empty /user/USERNAME/dir1</command>
+        <command>-fs NAMENODE -ls /user/USERNAME/</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm -r /user/USERNAME</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>Found 1 items</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*USERNAME( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/USERNAME/dir1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test>
+      <description>df</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir -p /user/USERNAME/dir1</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes  /user/USERNAME/dir1/file0</command>
+        <command>-fs NAMENODE -df /user/USERNAME</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm -r /user/USERNAME</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>Filesystem( )*Size( )*Used( )*Available( )*Use%.*</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test>
+      <description>expunge</description>
+      <test-commands>
+        <command>-fs NAMENODE -expunge</command>
+      </test-commands>
+      <cleanup-commands>
+        <command></command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>ExactComparator</type>
+          <expected-output></expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test>
+      <description>getmerge</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir -p /user/USERNAME/dir1</command>
+        <command>-fs NAMENODE -copyFromLocal CLITEST_DATA/data15bytes /user/USERNAME/dir1</command>
+        <command>-fs NAMENODE -copyFromLocal CLITEST_DATA/data30bytes /user/USERNAME/dir1</command>
+        <command>-fs NAMENODE -getmerge /user/USERNAME/dir1 data</command>
+        <command>-cat data</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm -r /user/USERNAME</command>
+        <command>rm data</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>12345678901234.*</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <!-- Tests for snapshots -->
+    <test>
+      <description>allowSnapshot</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir -p /user/USERNAME/dir1</command>
+        <dfs-admin-command>-fs NAMENODE -allowSnapshot /user/USERNAME/dir1</dfs-admin-command>
+      </test-commands>
+      <cleanup-commands>
+        <dfs-admin-command>-fs NAMENODE -disallowSnapshot /user/USERNAME/dir1</dfs-admin-command>
+        <command>-fs NAMENODE -rm -r /user/USERNAME</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>Allowing snaphot on /user/USERNAME/dir1 succeeded</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test>
+      <description>disallowSnapshot</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir -p /user/USERNAME/dir1</command>
+        <dfs-admin-command>-fs NAMENODE -allowSnapshot /user/USERNAME/dir1</dfs-admin-command>
+        <dfs-admin-command>-fs NAMENODE -disallowSnapshot /user/USERNAME/dir1</dfs-admin-command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm -r /user/USERNAME</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>Disallowing snaphot on /user/USERNAME/dir1 succeeded</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test>
+      <description>createSnapshot</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir -p /user/USERNAME/dir1</command>
+        <dfs-admin-command>-fs NAMENODE -allowSnapshot /user/USERNAME/dir1</dfs-admin-command>
+        <command>-fs NAMENODE -createSnapshot /user/USERNAME/dir1 snapshot1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -deleteSnapshot /user/USERNAME/dir1 snapshot1</command>
+        <dfs-admin-command>-fs NAMENODE -disallowSnapshot /user/USERNAME/dir1</dfs-admin-command>
+        <command>-fs NAMENODE -rm -r /user/USERNAME</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>Created snapshot /user/USERNAME/dir1/.snapshot/snapshot1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test>
+      <description>renameSnapshot</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir -p /user/USERNAME/dir1</command>
+        <dfs-admin-command>-fs NAMENODE -allowSnapshot /user/USERNAME/dir1</dfs-admin-command>
+        <command>-fs NAMENODE -createSnapshot /user/USERNAME/dir1 snapshot1</command>
+        <command>-fs NAMENODE -renameSnapshot /user/USERNAME/dir1 snapshot1 snapshot2</command>
+        <command>-fs NAMENODE -ls /user/USERNAME/dir1/.snapshot</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -deleteSnapshot /user/USERNAME/dir1 snapshot2</command>
+        <dfs-admin-command>-fs NAMENODE -disallowSnapshot /user/USERNAME/dir1</dfs-admin-command>
+        <command>-fs NAMENODE -rm -r /user/USERNAME</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>Found 1 items</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*USERNAME( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/USERNAME/dir1/.snapshot/snapshot2</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test>
+      <description>deleteSnapshot</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir -p /user/USERNAME/dir1</command>
+        <dfs-admin-command>-fs NAMENODE -allowSnapshot /user/USERNAME/dir1</dfs-admin-command>
+        <command>-fs NAMENODE -createSnapshot /user/USERNAME/dir1 snapshot1</command>
+        <command>-fs NAMENODE -deleteSnapshot /user/USERNAME/dir1 snapshot1</command>
+        <command>-fs NAMENODE -ls /user/USERNAME/dir1/.snapshot</command>
+      </test-commands>
+      <cleanup-commands>
+        <dfs-admin-command>-fs NAMENODE -disallowSnapshot /user/USERNAME/dir1</dfs-admin-command>
+        <command>-fs NAMENODE -rm -r /user/USERNAME</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>ExactComparator</type>
+          <expected-output></expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <!-- DFSadmin tests -->
+    <test>
+      <description>refreshUserToGroupsMappings</description>
+      <test-commands>
+        <dfs-admin-command>-fs NAMENODE -refreshUserToGroupsMappings</dfs-admin-command>
+      </test-commands>
+      <cleanup-commands>
+        <command></command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>ExactComparator</type>
+          <expected-output></expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test>
+      <description>refreshSuperUserGroupsConfiguration</description>
+      <test-commands>
+        <dfs-admin-command>-fs NAMENODE -refreshSuperUserGroupsConfiguration</dfs-admin-command>
+      </test-commands>
+      <cleanup-commands>
+        <command></command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>ExactComparator</type>
+          <expected-output></expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test>
+      <description>setQuota</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir -p /user/USERNAME/dir1</command>
+        <dfs-admin-command>-fs NAMENODE -setQuota 3 /user/USERNAME/dir1</dfs-admin-command>
+        <command>-fs NAMENODE -count -q /user/USERNAME/dir1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm -r /user/USERNAME</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( )*3.*/user/USERNAME/dir1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test>
+      <description>clrQuota</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir -p /user/USERNAME/dir1</command>
+        <dfs-admin-command>-fs NAMENODE -setQuota 3 /user/USERNAME/dir1</dfs-admin-command>
+        <dfs-admin-command>-fs NAMENODE -clrQuota /user/USERNAME/dir1</dfs-admin-command>
+        <command>-fs NAMENODE -count -q /user/USERNAME/dir1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm -r /user/USERNAME</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( )*none.*/user/USERNAME/dir1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test>
+      <description>setSpaceQuota</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir -p /user/USERNAME/dir1</command>
+        <dfs-admin-command>-fs NAMENODE -setSpaceQuota 1G /user/USERNAME/dir1</dfs-admin-command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm -r /user/USERNAME/dir1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>ExactComparator</type>
+          <expected-output></expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test>
+      <description>setBalancerBandwidth</description>
+      <test-commands>
+        <dfs-admin-command>-fs NAMENODE -setBalancerBandwidth 104857600</dfs-admin-command>
+      </test-commands>
+      <cleanup-commands>
+        <command></command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>ExactComparator</type>
+          <expected-output></expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test>
+      <description>finalizeUpgrade</description>
+      <test-commands>
+        <dfs-admin-command>-fs NAMENODE -finalizeUpgrade</dfs-admin-command>
+      </test-commands>
+      <cleanup-commands>
+        <command></command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>ExactComparator</type>
+          <expected-output></expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
     <!-- Tests for moveFromLocal -->
     <test> <!-- TESTED -->
       <description>moveFromLocal: moving non existent file(absolute path)</description>

+ 6 - 0
hadoop-mapreduce-project/CHANGES.txt

@@ -142,6 +142,9 @@ Trunk (Unreleased)
     MAPREDUCE-5867. Fix NPE in KillAMPreemptionPolicy related to 
     ProportionalCapacityPreemptionPolicy (Sunil G via devaraj)
 
+    MAPREDUCE-5898. distcp to support preserving HDFS extended attributes(XAttrs)
+    (Yi Liu via umamahesh)
+
 Release 2.5.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES
@@ -247,6 +250,9 @@ Release 2.5.0 - UNRELEASED
     MAPREDUCE-5895. Close streams properly to avoid leakage in TaskLog. 
     (Kousuke Saruta via devaraj)
 
+    MAPREDUCE-5777. Support utf-8 text with Byte Order Marker.
+    (Zhihai Xu via kasha)
+
 Release 2.4.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

+ 1 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml

@@ -91,6 +91,7 @@
         <configuration>
           <excludes>
             <exclude>src/test/resources/recordSpanningMultipleSplits.txt</exclude>
+            <exclude>src/test/resources/testBOM.txt</exclude>
           </excludes>
         </configuration>
       </plugin>

+ 41 - 2
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/LineRecordReader.java

@@ -197,6 +197,39 @@ public class LineRecordReader implements RecordReader<LongWritable, Text> {
     return retVal;
   }
 
+  private int skipUtfByteOrderMark(Text value) throws IOException {
+    // Strip BOM(Byte Order Mark)
+    // Text only support UTF-8, we only need to check UTF-8 BOM
+    // (0xEF,0xBB,0xBF) at the start of the text stream.
+    int newMaxLineLength = (int) Math.min(3L + (long) maxLineLength,
+        Integer.MAX_VALUE);
+    int newSize = in.readLine(value, newMaxLineLength, maxBytesToConsume(pos));
+    // Even we read 3 extra bytes for the first line,
+    // we won't alter existing behavior (no backwards incompat issue).
+    // Because the newSize is less than maxLineLength and
+    // the number of bytes copied to Text is always no more than newSize.
+    // If the return size from readLine is not less than maxLineLength,
+    // we will discard the current line and read the next line.
+    pos += newSize;
+    int textLength = value.getLength();
+    byte[] textBytes = value.getBytes();
+    if ((textLength >= 3) && (textBytes[0] == (byte)0xEF) &&
+        (textBytes[1] == (byte)0xBB) && (textBytes[2] == (byte)0xBF)) {
+      // find UTF-8 BOM, strip it.
+      LOG.info("Found UTF-8 BOM and skipped it");
+      textLength -= 3;
+      newSize -= 3;
+      if (textLength > 0) {
+        // It may work to use the same buffer and not do the copyBytes
+        textBytes = value.copyBytes();
+        value.set(textBytes, 3, textLength);
+      } else {
+        value.clear();
+      }
+    }
+    return newSize;
+  }
+
   /** Read a line. */
   public synchronized boolean next(LongWritable key, Text value)
     throws IOException {
@@ -206,11 +239,17 @@ public class LineRecordReader implements RecordReader<LongWritable, Text> {
     while (getFilePosition() <= end || in.needAdditionalRecordAfterSplit()) {
       key.set(pos);
 
-      int newSize = in.readLine(value, maxLineLength, maxBytesToConsume(pos));
+      int newSize = 0;
+      if (pos == 0) {
+        newSize = skipUtfByteOrderMark(value);
+      } else {
+        newSize = in.readLine(value, maxLineLength, maxBytesToConsume(pos));
+        pos += newSize;
+      }
+
       if (newSize == 0) {
         return false;
       }
-      pos += newSize;
       if (newSize < maxLineLength) {
         return true;
       }

+ 41 - 3
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/LineRecordReader.java

@@ -134,6 +134,39 @@ public class LineRecordReader extends RecordReader<LongWritable, Text> {
     return retVal;
   }
 
+  private int skipUtfByteOrderMark() throws IOException {
+    // Strip BOM(Byte Order Mark)
+    // Text only support UTF-8, we only need to check UTF-8 BOM
+    // (0xEF,0xBB,0xBF) at the start of the text stream.
+    int newMaxLineLength = (int) Math.min(3L + (long) maxLineLength,
+        Integer.MAX_VALUE);
+    int newSize = in.readLine(value, newMaxLineLength, maxBytesToConsume(pos));
+    // Even we read 3 extra bytes for the first line,
+    // we won't alter existing behavior (no backwards incompat issue).
+    // Because the newSize is less than maxLineLength and
+    // the number of bytes copied to Text is always no more than newSize.
+    // If the return size from readLine is not less than maxLineLength,
+    // we will discard the current line and read the next line.
+    pos += newSize;
+    int textLength = value.getLength();
+    byte[] textBytes = value.getBytes();
+    if ((textLength >= 3) && (textBytes[0] == (byte)0xEF) &&
+        (textBytes[1] == (byte)0xBB) && (textBytes[2] == (byte)0xBF)) {
+      // find UTF-8 BOM, strip it.
+      LOG.info("Found UTF-8 BOM and skipped it");
+      textLength -= 3;
+      newSize -= 3;
+      if (textLength > 0) {
+        // It may work to use the same buffer and not do the copyBytes
+        textBytes = value.copyBytes();
+        value.set(textBytes, 3, textLength);
+      } else {
+        value.clear();
+      }
+    }
+    return newSize;
+  }
+
   public boolean nextKeyValue() throws IOException {
     if (key == null) {
       key = new LongWritable();
@@ -146,9 +179,14 @@ public class LineRecordReader extends RecordReader<LongWritable, Text> {
     // We always read one extra line, which lies outside the upper
     // split limit i.e. (end - 1)
     while (getFilePosition() <= end || in.needAdditionalRecordAfterSplit()) {
-      newSize = in.readLine(value, maxLineLength, maxBytesToConsume(pos));
-      pos += newSize;
-      if (newSize < maxLineLength) {
+      if (pos == 0) {
+        newSize = skipUtfByteOrderMark();
+      } else {
+        newSize = in.readLine(value, maxLineLength, maxBytesToConsume(pos));
+        pos += newSize;
+      }
+
+      if ((newSize == 0) || (newSize < maxLineLength)) {
         break;
       }
 

+ 37 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestLineRecordReader.java

@@ -188,4 +188,41 @@ public class TestLineRecordReader {
     checkRecordSpanningMultipleSplits("recordSpanningMultipleSplits.txt.bz2",
         200 * 1000, true);
   }
+
+  @Test
+  public void testStripBOM() throws IOException {
+    // the test data contains a BOM at the start of the file
+    // confirm the BOM is skipped by LineRecordReader
+    String UTF8_BOM = "\uFEFF";
+    URL testFileUrl = getClass().getClassLoader().getResource("testBOM.txt");
+    assertNotNull("Cannot find testBOM.txt", testFileUrl);
+    File testFile = new File(testFileUrl.getFile());
+    Path testFilePath = new Path(testFile.getAbsolutePath());
+    long testFileSize = testFile.length();
+    Configuration conf = new Configuration();
+    conf.setInt(org.apache.hadoop.mapreduce.lib.input.
+        LineRecordReader.MAX_LINE_LENGTH, Integer.MAX_VALUE);
+
+    // read the data and check whether BOM is skipped
+    FileSplit split = new FileSplit(testFilePath, 0, testFileSize,
+        (String[])null);
+    LineRecordReader reader = new LineRecordReader(conf, split);
+    LongWritable key = new LongWritable();
+    Text value = new Text();
+    int numRecords = 0;
+    boolean firstLine = true;
+    boolean skipBOM = true;
+    while (reader.next(key, value)) {
+      if (firstLine) {
+        firstLine = false;
+        if (value.toString().startsWith(UTF8_BOM)) {
+          skipBOM = false;
+        }
+      }
+      ++numRecords;
+    }
+    reader.close();
+
+    assertTrue("BOM is not skipped", skipBOM);
+  }
 }

+ 38 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestLineRecordReader.java

@@ -193,4 +193,42 @@ public class TestLineRecordReader {
                                       200 * 1000,
                                       true);
   }
+
+  @Test
+  public void testStripBOM() throws IOException {
+    // the test data contains a BOM at the start of the file
+    // confirm the BOM is skipped by LineRecordReader
+    String UTF8_BOM = "\uFEFF";
+    URL testFileUrl = getClass().getClassLoader().getResource("testBOM.txt");
+    assertNotNull("Cannot find testBOM.txt", testFileUrl);
+    File testFile = new File(testFileUrl.getFile());
+    Path testFilePath = new Path(testFile.getAbsolutePath());
+    long testFileSize = testFile.length();
+    Configuration conf = new Configuration();
+    conf.setInt(org.apache.hadoop.mapreduce.lib.input.
+        LineRecordReader.MAX_LINE_LENGTH, Integer.MAX_VALUE);
+
+    TaskAttemptContext context = new TaskAttemptContextImpl(conf, new TaskAttemptID());
+
+    // read the data and check whether BOM is skipped
+    FileSplit split = new FileSplit(testFilePath, 0, testFileSize,
+        (String[])null);
+    LineRecordReader reader = new LineRecordReader();
+    reader.initialize(split, context);
+    int numRecords = 0;
+    boolean firstLine = true;
+    boolean skipBOM = true;
+    while (reader.nextKeyValue()) {
+      if (firstLine) {
+        firstLine = false;
+        if (reader.getCurrentValue().toString().startsWith(UTF8_BOM)) {
+          skipBOM = false;
+        }
+      }
+      ++numRecords;
+    }
+    reader.close();
+
+    assertTrue("BOM is not skipped", skipBOM);
+  }
 }

+ 2 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/resources/testBOM.txt

@@ -0,0 +1,2 @@
+BOM(Byte Order Mark) test file
+BOM(Byte Order Mark) test file

+ 16 - 0
hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListing.java

@@ -129,6 +129,7 @@ public abstract class CopyListing extends Configured {
   /**
    * Validate the final resulting path listing.  Checks if there are duplicate
    * entries.  If preserving ACLs, checks that file system can support ACLs.
+   * If preserving XAttrs, checks that file system can support XAttrs.
    *
    * @param pathToListFile - path listing build by doBuildListing
    * @param options - Input options to distcp
@@ -151,6 +152,7 @@ public abstract class CopyListing extends Configured {
 
       Text currentKey = new Text();
       Set<URI> aclSupportCheckFsSet = Sets.newHashSet();
+      Set<URI> xAttrSupportCheckFsSet = Sets.newHashSet();
       while (reader.next(currentKey)) {
         if (currentKey.equals(lastKey)) {
           CopyListingFileStatus currentFileStatus = new CopyListingFileStatus();
@@ -167,6 +169,14 @@ public abstract class CopyListing extends Configured {
             aclSupportCheckFsSet.add(lastFsUri);
           }
         }
+        if (options.shouldPreserve(DistCpOptions.FileAttribute.XATTR)) {
+          FileSystem lastFs = lastFileStatus.getPath().getFileSystem(config);
+          URI lastFsUri = lastFs.getUri();
+          if (!xAttrSupportCheckFsSet.contains(lastFsUri)) {
+            DistCpUtils.checkFileSystemXAttrSupport(lastFs);
+            xAttrSupportCheckFsSet.add(lastFsUri);
+          }
+        }
         lastKey.set(currentKey);
       }
     } finally {
@@ -256,4 +266,10 @@ public abstract class CopyListing extends Configured {
       super(message);
     }
   }
+  
+  public static class XAttrsNotSupportedException extends RuntimeException {
+    public XAttrsNotSupportedException(String message) {
+      super(message);
+    }
+  }
 }

Some files were not shown because too many files changed in this diff