Browse Source

Merge branch 'trunk' into HDFS-7240

Anu Engineer 8 years ago
parent
commit
0951726233
100 changed files with 6525 additions and 1369 deletions
  1. 5 8
      .gitignore
  2. 37 1
      BUILDING.txt
  3. 785 539
      LICENSE.txt
  4. 126 3
      NOTICE.txt
  5. 4 1
      dev-support/bin/create-release
  6. 13 0
      dev-support/bin/dist-copynativelibs
  7. 7 1
      dev-support/bin/dist-layout-stitching
  8. 0 0
      dev-support/bin/qbt
  9. 1 0
      dev-support/docker/Dockerfile
  10. 2 3
      hadoop-assemblies/pom.xml
  11. 0 14
      hadoop-assemblies/src/main/resources/assemblies/hadoop-dist.xml
  12. 24 1
      hadoop-assemblies/src/main/resources/assemblies/hadoop-httpfs-dist.xml
  13. 24 1
      hadoop-assemblies/src/main/resources/assemblies/hadoop-kms-dist.xml
  14. 0 14
      hadoop-assemblies/src/main/resources/assemblies/hadoop-mapreduce-dist.xml
  15. 0 14
      hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml
  16. 3 1
      hadoop-build-tools/pom.xml
  17. 1 3
      hadoop-build-tools/src/main/resources/checkstyle/checkstyle.xml
  18. 254 0
      hadoop-client-modules/hadoop-client-api/pom.xml
  19. 124 0
      hadoop-client-modules/hadoop-client-check-invariants/pom.xml
  20. 143 0
      hadoop-client-modules/hadoop-client-check-test-invariants/pom.xml
  21. 159 0
      hadoop-client-modules/hadoop-client-integration-tests/pom.xml
  22. 113 0
      hadoop-client-modules/hadoop-client-integration-tests/src/test/java/org/apache/hadoop/example/ITUseMiniCluster.java
  23. 18 8
      hadoop-client-modules/hadoop-client-integration-tests/src/test/resources/hdfs-site.xml
  24. 24 0
      hadoop-client-modules/hadoop-client-integration-tests/src/test/resources/log4j.properties
  25. 792 0
      hadoop-client-modules/hadoop-client-minicluster/pom.xml
  26. 359 0
      hadoop-client-modules/hadoop-client-runtime/pom.xml
  27. 26 13
      hadoop-client-modules/hadoop-client/pom.xml
  28. 45 0
      hadoop-client-modules/pom.xml
  29. 2 7
      hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml
  30. 2 3
      hadoop-cloud-storage-project/pom.xml
  31. 2 3
      hadoop-common-project/hadoop-annotations/pom.xml
  32. 2 3
      hadoop-common-project/hadoop-auth-examples/pom.xml
  33. 2 3
      hadoop-common-project/hadoop-auth/pom.xml
  34. 3 3
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java
  35. 177 77
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java
  36. 2 2
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosName.java
  37. 1 1
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/ZKSignerSecretProvider.java
  38. 22 0
      hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestKerberosName.java
  39. 6 0
      hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
  40. 40 20
      hadoop-common-project/hadoop-common/pom.xml
  41. 29 0
      hadoop-common-project/hadoop-common/src/CMakeLists.txt
  42. 1 0
      hadoop-common-project/hadoop-common/src/config.h.cmake
  43. 12 1
      hadoop-common-project/hadoop-common/src/main/bin/hadoop
  44. 240 26
      hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
  45. 18 5
      hadoop-common-project/hadoop-common/src/main/bin/start-all.sh
  46. 16 3
      hadoop-common-project/hadoop-common/src/main/bin/stop-all.sh
  47. 16 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
  48. 113 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfigurationWithLogging.java
  49. 11 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/CachingKeyProvider.java
  50. 14 17
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/JavaKeyStoreProvider.java
  51. 40 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java
  52. 84 10
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderCryptoExtension.java
  53. 5 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderExtension.java
  54. 66 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyShell.java
  55. 107 46
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
  56. 2 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSRESTConstants.java
  57. 35 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java
  58. 102 15
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/ValueQueue.java
  59. 5 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BlockLocation.java
  60. 2 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
  61. 27 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
  62. 42 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
  63. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegateToFileSystem.java
  64. 3 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSExceptionMessages.java
  65. 19 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java
  66. 337 200
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
  67. 13 6
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
  68. 7 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
  69. 6 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
  70. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/GlobPattern.java
  71. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java
  72. 4 8
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
  73. 2 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
  74. 70 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
  75. 1 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsCreateModes.java
  76. 19 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java
  77. 12 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Command.java
  78. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandFormat.java
  79. 1 10
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Display.java
  80. 95 29
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsUsage.java
  81. 2 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Stat.java
  82. 11 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
  83. 112 24
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
  84. 164 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystemUtil.java
  85. 6 6
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java
  86. 34 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java
  87. 43 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java
  88. 5 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
  89. 219 63
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
  90. 70 24
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java
  91. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/Decompressor.java
  92. 11 67
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/GzipCodec.java
  93. 242 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/ZStandardCodec.java
  94. 305 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.java
  95. 323 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zstd/ZStandardDecompressor.java
  96. 22 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zstd/package-info.java
  97. 14 14
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecUtil.java
  98. 9 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeConstants.java
  99. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/HHXORErasureDecoder.java
  100. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/HHXORErasureEncoder.java

+ 5 - 8
.gitignore

@@ -15,6 +15,11 @@
 .settings
 target
 build
+dependency-reduced-pom.xml
+
+# Filesystem contract test options and credentials
+auth-keys.xml
+azure-auth-keys.xml
 
 # External tool builders
 */.externalToolBuilders
@@ -23,8 +28,6 @@ build
 hadoop-common-project/hadoop-kms/downloads/
 hadoop-hdfs-project/hadoop-hdfs/downloads
 hadoop-hdfs-project/hadoop-hdfs-httpfs/downloads
-hadoop-common-project/hadoop-common/src/test/resources/contract-test-options.xml
-hadoop-tools/hadoop-openstack/src/test/resources/contract-test-options.xml
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/tla/yarnregistry.toolbox
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/dist
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tmp
@@ -40,10 +43,4 @@ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/testem.log
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/dist
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/tmp
 yarnregistry.pdf
-hadoop-tools/hadoop-aws/src/test/resources/auth-keys.xml
-hadoop-tools/hadoop-aws/src/test/resources/contract-test-options.xml
-hadoop-tools/hadoop-azure/src/test/resources/azure-auth-keys.xml
-hadoop-tools/hadoop-openstack/src/test/resources/auth-keys.xml
 patchprocess/
-hadoop-tools/hadoop-aliyun/src/test/resources/auth-keys.xml
-hadoop-tools/hadoop-aliyun/src/test/resources/contract-test-options.xml

+ 37 - 1
BUILDING.txt

@@ -83,6 +83,8 @@ Optional packages:
   $ sudo apt-get install libjansson-dev
 * Linux FUSE
   $ sudo apt-get install fuse libfuse-dev
+* ZStandard compression
+    $ sudo apt-get install zstd
 
 ----------------------------------------------------------------------------------
 Maven main modules:
@@ -131,6 +133,8 @@ Maven build goals:
   * Use -Dtar to create a TAR with the distribution (using -Pdist)
   * Use -Preleasedocs to include the changelog and release docs (requires Internet connectivity)
   * Use -Pyarn-ui to build YARN UI v2. (Requires Internet connectivity)
+  * Use -DskipShade to disable client jar shading to speed up build times (in
+    development environments only, not to build release artifacts)
 
  Snappy build options:
 
@@ -155,6 +159,29 @@ Maven build goals:
     and it ignores the -Dsnappy.prefix option. If -Dsnappy.lib isn't given, the
     bundling and building will fail.
 
+
+ ZStandard build options:
+
+   ZStandard is a compression library that can be utilized by the native code.
+   It is currently an optional component, meaning that Hadoop can be built with
+   or without this dependency.
+
+  * Use -Drequire.zstd to fail the build if libzstd.so is not found.
+    If this option is not specified and the zstd library is missing.
+
+  * Use -Dzstd.prefix to specify a nonstandard location for the libzstd
+    header files and library files. You do not need this option if you have
+    installed zstandard using a package manager.
+
+  * Use -Dzstd.lib to specify a nonstandard location for the libzstd library
+    files.  Similarly to zstd.prefix, you do not need this option if you have
+    installed using a package manager.
+
+  * Use -Dbundle.zstd to copy the contents of the zstd.lib directory into
+    the final tar file. This option requires that -Dzstd.lib is also given,
+    and it ignores the -Dzstd.prefix option. If -Dzstd.lib isn't given, the
+    bundling and building will fail.
+
  OpenSSL build options:
 
    OpenSSL includes a crypto library that can be utilized by the native code.
@@ -390,7 +417,7 @@ http://www.zlib.net/
 ----------------------------------------------------------------------------------
 Building distributions:
 
- * Build distribution with native code    : mvn package [-Pdist][-Pdocs][-Psrc][-Dtar]
+ * Build distribution with native code    : mvn package [-Pdist][-Pdocs][-Psrc][-Dtar][-Dmaven.javadoc.skip=true]
 
 ----------------------------------------------------------------------------------
 Running compatibility checks with checkcompatibility.py
@@ -402,3 +429,12 @@ managers to compare the compatibility of a previous and current release.
 As an example, this invocation will check the compatibility of interfaces annotated as Public or LimitedPrivate:
 
 ./dev-support/bin/checkcompatibility.py --annotation org.apache.hadoop.classification.InterfaceAudience.Public --annotation org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate --include "hadoop.*" branch-2.7.2 trunk
+
+----------------------------------------------------------------------------------
+Changing the Hadoop version declared returned by VersionInfo
+
+If for compatibility reasons the version of Hadoop has to be declared as a 2.x release in the information returned by
+org.apache.hadoop.util.VersionInfo, set the property declared.hadoop.version to the desired version.
+For example: mvn package -Pdist -Ddeclared.hadoop.version=2.11
+
+If unset, the project version declared in the POM file is used.

+ 785 - 539
LICENSE.txt

@@ -289,7 +289,7 @@ For src/main/native/src/org/apache/hadoop/io/compress/lz4/{lz4.h,lz4.c,lz4hc.h,l
 */
 
 
-For hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/native/gtest
+For hadoop-common-project/hadoop-common/src/main/native/gtest
 ---------------------------------------------------------------------
 Copyright 2008, Google Inc.
 All rights reserved.
@@ -324,6 +324,7 @@ The binary distribution of this product bundles these dependencies under the
 following license:
 re2j 1.0
 ---------------------------------------------------------------------
+(GO license)
 This is a work derived from Russ Cox's RE2 in Go, whose license
 http://golang.org/LICENSE is as follows:
 
@@ -577,17 +578,62 @@ WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 OTHER DEALINGS IN THE SOFTWARE.
 
-For:
+The binary distribution of this product bundles these dependencies under the
+following license:
 hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.0.2
 hadoop-tools/hadoop-sls/src/main/html/js/thirdparty/bootstrap.min.js
 hadoop-tools/hadoop-sls/src/main/html/css/bootstrap.min.css
 hadoop-tools/hadoop-sls/src/main/html/css/bootstrap-responsive.min.css
-And the binary distribution of this product bundles these dependencies under the
-following license:
-Mockito 1.8.5
-SLF4J 1.7.10
+bootstrap v3.3.6
+broccoli-asset-rev v2.4.2
+broccoli-funnel v1.0.1
+datatables v1.10.8
+em-helpers v0.5.13
+em-table v0.1.6
+ember v2.2.0
+ember-array-contains-helper v1.0.2
+ember-bootstrap v0.5.1
+ember-cli v1.13.13
+ember-cli-app-version v1.0.0
+ember-cli-babel v5.1.6
+ember-cli-content-security-policy v0.4.0
+ember-cli-dependency-checker v1.2.0
+ember-cli-htmlbars v1.0.2
+ember-cli-htmlbars-inline-precompile v0.3.1
+ember-cli-ic-ajax v0.2.1
+ember-cli-inject-live-reload v1.4.0
+ember-cli-jquery-ui v0.0.20
+ember-cli-qunit v1.2.1
+ember-cli-release v0.2.8
+ember-cli-shims v0.0.6
+ember-cli-sri v1.2.1
+ember-cli-test-loader v0.2.1
+ember-cli-uglify v1.2.0
+ember-d3 v0.1.0
+ember-data v2.1.0
+ember-disable-proxy-controllers v1.0.1
+ember-export-application-global v1.0.5
+ember-load-initializers v0.1.7
+ember-qunit v0.4.16
+ember-qunit-notifications v0.1.0
+ember-resolver v2.0.3
+ember-spin-spinner v0.2.3
+ember-truth-helpers v1.2.0
+jquery v2.1.4
+jquery-ui v1.11.4
+loader.js v3.3.0
+momentjs v2.10.6
+qunit v1.19.0
+select2 v4.0.0
+snippet-ss v1.11.0
+spin.js v2.3.2
+Azure Data Lake Store - Java client SDK 2.0.11
 JCodings 1.0.8
 Joni 2.1.2
+Mockito 1.8.5
+JUL to SLF4J bridge 1.7.10
+SLF4J API Module 1.7.10
+SLF4J LOG4J-12 Binding 1.7.10
 --------------------------------------------------------------------------------
 
 The MIT License (MIT)
@@ -614,9 +660,10 @@ For:
 hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/jquery-1.10.2.min.js
 hadoop-tools/hadoop-sls/src/main/html/js/thirdparty/jquery.js
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery
+Apache HBase - Server which contains JQuery minified javascript library version 1.8.3
 --------------------------------------------------------------------------------
 
-Copyright jQuery Foundation and other contributors, https://jquery.org/
+Copyright 2005, 2012, 2013 jQuery Foundation and other contributors, https://jquery.org/
 
 This software consists of voluntary contributions made by many
 individuals. For exact contribution history, see the revision history
@@ -654,7 +701,7 @@ own licenses; we recommend you read them, as their terms may differ from
 the terms above.
 
 For:
-hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jt/jquery.jstree.js.gz
+hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jt/jquery.jstree.js
 --------------------------------------------------------------------------------
 
 Copyright (c) 2014 Ivan Bozhanov
@@ -691,6 +738,7 @@ The binary distribution of this product bundles these dependencies under the
 following license:
 HSQLDB Database 2.0.0
 --------------------------------------------------------------------------------
+(HSQL License)
 "COPYRIGHTS AND LICENSES (based on BSD License)
 
 For work developed by the HSQL Development Group:
@@ -760,10 +808,14 @@ Hypersonic SQL Group."
 
 The binary distribution of this product bundles these dependencies under the
 following license:
+Java Servlet API 3.1.0
 servlet-api 2.5
 jsp-api 2.1
-Streaming API for XML 1.0
+jsr311-api 1.1.1
+Glassfish Jasper 6.1.14
+Servlet Specification 2.5 API 6.1.14
 --------------------------------------------------------------------------------
+(CDDL 1.0)
 COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.0
 1. Definitions. 
 
@@ -1075,10 +1127,24 @@ of liability. 
 
 The binary distribution of this product bundles these dependencies under the
 following license:
-Jersey 1.9
-JAXB API bundle for GlassFish V3 2.2.2
+jersey-client 1.19
+jersey-core 1.19
+jersey-grizzly2 1.19
+jersey-grizzly2-servlet 1.19
+jersey-json 1.19
+jersey-server 1.19
+jersey-servlet 1.19
+jersey-guice 1.19
+Jersey Test Framework - Grizzly 2 Module 1.19
 JAXB RI 2.2.3
+Java Architecture for XML Binding 2.2.11
+grizzly-framework 2.2.21
+grizzly-http 2.2.21
+grizzly-http-server 2.2.21
+grizzly-http-servlet 2.2.21
+grizzly-rcm 2.2.21
 --------------------------------------------------------------------------------
+(CDDL 1.1)
 COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL)Version 1.1
 
 1. Definitions.
@@ -1438,8 +1504,9 @@ derived.)
 The binary distribution of this product bundles these dependencies under the
 following license:
 JUnit 4.11
-ecj-4.3.1.jar
+Eclipse JDT Core 3.1.1
 --------------------------------------------------------------------------------
+(EPL v1.0)
 Eclipse Public License - v 1.0
 
 THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS ECLIPSE PUBLIC
@@ -1636,20 +1703,17 @@ any resulting litigation.
 
 The binary distribution of this product bundles these dependencies under the
 following license:
-ASM Core 3.2
 JSch 0.1.51
 ParaNamer Core 2.3
 JLine 0.9.94
 leveldbjni-all 1.8
 Hamcrest Core 1.3
+ASM Core 5.0.4
+ASM Commons 5.0.2
+ASM Tree 5.0.2
 xmlenc Library 0.52
-StringTemplate 4 4.0.7
-ANTLR 3 Tool 3.5
-ANTLR 3 Runtime 3.5
-ANTLR StringTemplate 3.2.1
-ASM All 5.0.2
-sqlline 1.1.8
 --------------------------------------------------------------------------------
+(3-clause BSD)
 Redistribution and use in source and binary forms, with or without
 modification, are permitted provided that the following conditions are met:
     * Redistributions of source code must retain the above copyright
@@ -1676,6 +1740,7 @@ The binary distribution of this product bundles these dependencies under the
 following license:
 FindBugs-jsr305 3.0.0
 --------------------------------------------------------------------------------
+(2-clause BSD)
 Redistribution and use in source and binary forms, with or without
 modification, are permitted provided that the following conditions are met:
 
@@ -1702,8 +1767,9 @@ either expressed or implied, of the FreeBSD Project.
 
 The binary distribution of this product bundles these dependencies under the
 following license:
-Java Concurrency in Practice book annotations 1.0
+"Java Concurrency in Practice" book annotations 1.0
 --------------------------------------------------------------------------------
+(CCAL v2.5)
 THE WORK (AS DEFINED BELOW) IS PROVIDED UNDER THE TERMS OF THIS CREATIVE COMMONS
 PUBLIC LICENSE ("CCPL" OR "LICENSE"). THE WORK IS PROTECTED BY COPYRIGHT AND/OR
 OTHER APPLICABLE LAW. ANY USE OF THE WORK OTHER THAN AS AUTHORIZED UNDER THIS
@@ -1879,538 +1945,718 @@ the Licensor and You.
 
 The binary distribution of this product bundles these dependencies under the
 following license:
-jamon-runtime 2.3.1
+jamon-runtime 2.4.1
 --------------------------------------------------------------------------------
-                          MOZILLA PUBLIC LICENSE
-                                Version 1.1
+(MPL 2.0)
+                            Mozilla Public License
+                                Version 2.0
 
-                              ---------------
+1. Definitions
 
-1. Definitions.
+1.1. “Contributor”
+means each individual or legal entity that creates, contributes to the creation
+of, or owns Covered Software.
 
-     1.0.1. "Commercial Use" means distribution or otherwise making the
-     Covered Code available to a third party.
-
-     1.1. "Contributor" means each entity that creates or contributes to
-     the creation of Modifications.
-
-     1.2. "Contributor Version" means the combination of the Original
-     Code, prior Modifications used by a Contributor, and the Modifications
-     made by that particular Contributor.
-
-     1.3. "Covered Code" means the Original Code or Modifications or the
-     combination of the Original Code and Modifications, in each case
-     including portions thereof.
-
-     1.4. "Electronic Distribution Mechanism" means a mechanism generally
-     accepted in the software development community for the electronic
-     transfer of data.
-
-     1.5. "Executable" means Covered Code in any form other than Source
-     Code.
-
-     1.6. "Initial Developer" means the individual or entity identified
-     as the Initial Developer in the Source Code notice required by Exhibit
-     A.
-
-     1.7. "Larger Work" means a work which combines Covered Code or
-     portions thereof with code not governed by the terms of this License.
-
-     1.8. "License" means this document.
-
-     1.8.1. "Licensable" means having the right to grant, to the maximum
-     extent possible, whether at the time of the initial grant or
-     subsequently acquired, any and all of the rights conveyed herein.
-
-     1.9. "Modifications" means any addition to or deletion from the
-     substance or structure of either the Original Code or any previous
-     Modifications. When Covered Code is released as a series of files, a
-     Modification is:
-          A. Any addition to or deletion from the contents of a file
-          containing Original Code or previous Modifications.
-
-          B. Any new file that contains any part of the Original Code or
-          previous Modifications.
-
-     1.10. "Original Code" means Source Code of computer software code
-     which is described in the Source Code notice required by Exhibit A as
-     Original Code, and which, at the time of its release under this
-     License is not already Covered Code governed by this License.
-
-     1.10.1. "Patent Claims" means any patent claim(s), now owned or
-     hereafter acquired, including without limitation,  method, process,
-     and apparatus claims, in any patent Licensable by grantor.
-
-     1.11. "Source Code" means the preferred form of the Covered Code for
-     making modifications to it, including all modules it contains, plus
-     any associated interface definition files, scripts used to control
-     compilation and installation of an Executable, or source code
-     differential comparisons against either the Original Code or another
-     well known, available Covered Code of the Contributor's choice. The
-     Source Code can be in a compressed or archival form, provided the
-     appropriate decompression or de-archiving software is widely available
-     for no charge.
-
-     1.12. "You" (or "Your")  means an individual or a legal entity
-     exercising rights under, and complying with all of the terms of, this
-     License or a future version of this License issued under Section 6.1.
-     For legal entities, "You" includes any entity which controls, is
-     controlled by, or is under common control with You. For purposes of
-     this definition, "control" means (a) the power, direct or indirect,
-     to cause the direction or management of such entity, whether by
-     contract or otherwise, or (b) ownership of more than fifty percent
-     (50%) of the outstanding shares or beneficial ownership of such
-     entity.
-
-2. Source Code License.
-
-     2.1. The Initial Developer Grant.
-     The Initial Developer hereby grants You a world-wide, royalty-free,
-     non-exclusive license, subject to third party intellectual property
-     claims:
-          (a)  under intellectual property rights (other than patent or
-          trademark) Licensable by Initial Developer to use, reproduce,
-          modify, display, perform, sublicense and distribute the Original
-          Code (or portions thereof) with or without Modifications, and/or
-          as part of a Larger Work; and
-
-          (b) under Patents Claims infringed by the making, using or
-          selling of Original Code, to make, have made, use, practice,
-          sell, and offer for sale, and/or otherwise dispose of the
-          Original Code (or portions thereof).
-
-          (c) the licenses granted in this Section 2.1(a) and (b) are
-          effective on the date Initial Developer first distributes
-          Original Code under the terms of this License.
-
-          (d) Notwithstanding Section 2.1(b) above, no patent license is
-          granted: 1) for code that You delete from the Original Code; 2)
-          separate from the Original Code;  or 3) for infringements caused
-          by: i) the modification of the Original Code or ii) the
-          combination of the Original Code with other software or devices.
-
-     2.2. Contributor Grant.
-     Subject to third party intellectual property claims, each Contributor
-     hereby grants You a world-wide, royalty-free, non-exclusive license
-
-          (a)  under intellectual property rights (other than patent or
-          trademark) Licensable by Contributor, to use, reproduce, modify,
-          display, perform, sublicense and distribute the Modifications
-          created by such Contributor (or portions thereof) either on an
-          unmodified basis, with other Modifications, as Covered Code
-          and/or as part of a Larger Work; and
-
-          (b) under Patent Claims infringed by the making, using, or
-          selling of  Modifications made by that Contributor either alone
-          and/or in combination with its Contributor Version (or portions
-          of such combination), to make, use, sell, offer for sale, have
-          made, and/or otherwise dispose of: 1) Modifications made by that
-          Contributor (or portions thereof); and 2) the combination of
-          Modifications made by that Contributor with its Contributor
-          Version (or portions of such combination).
-
-          (c) the licenses granted in Sections 2.2(a) and 2.2(b) are
-          effective on the date Contributor first makes Commercial Use of
-          the Covered Code.
-
-          (d)    Notwithstanding Section 2.2(b) above, no patent license is
-          granted: 1) for any code that Contributor has deleted from the
-          Contributor Version; 2)  separate from the Contributor Version;
-          3)  for infringements caused by: i) third party modifications of
-          Contributor Version or ii)  the combination of Modifications made
-          by that Contributor with other software  (except as part of the
-          Contributor Version) or other devices; or 4) under Patent Claims
-          infringed by Covered Code in the absence of Modifications made by
-          that Contributor.
+1.2. “Contributor Version”
+means the combination of the Contributions of others (if any) used by a
+Contributor and that particular Contributor’s Contribution.
 
-3. Distribution Obligations.
+1.3. “Contribution”
+means Covered Software of a particular Contributor.
 
-     3.1. Application of License.
-     The Modifications which You create or to which You contribute are
-     governed by the terms of this License, including without limitation
-     Section 2.2. The Source Code version of Covered Code may be
-     distributed only under the terms of this License or a future version
-     of this License released under Section 6.1, and You must include a
-     copy of this License with every copy of the Source Code You
-     distribute. You may not offer or impose any terms on any Source Code
-     version that alters or restricts the applicable version of this
-     License or the recipients' rights hereunder. However, You may include
-     an additional document offering the additional rights described in
-     Section 3.5.
-
-     3.2. Availability of Source Code.
-     Any Modification which You create or to which You contribute must be
-     made available in Source Code form under the terms of this License
-     either on the same media as an Executable version or via an accepted
-     Electronic Distribution Mechanism to anyone to whom you made an
-     Executable version available; and if made available via Electronic
-     Distribution Mechanism, must remain available for at least twelve (12)
-     months after the date it initially became available, or at least six
-     (6) months after a subsequent version of that particular Modification
-     has been made available to such recipients. You are responsible for
-     ensuring that the Source Code version remains available even if the
-     Electronic Distribution Mechanism is maintained by a third party.
-
-     3.3. Description of Modifications.
-     You must cause all Covered Code to which You contribute to contain a
-     file documenting the changes You made to create that Covered Code and
-     the date of any change. You must include a prominent statement that
-     the Modification is derived, directly or indirectly, from Original
-     Code provided by the Initial Developer and including the name of the
-     Initial Developer in (a) the Source Code, and (b) in any notice in an
-     Executable version or related documentation in which You describe the
-     origin or ownership of the Covered Code.
-
-     3.4. Intellectual Property Matters
-          (a) Third Party Claims.
-          If Contributor has knowledge that a license under a third party's
-          intellectual property rights is required to exercise the rights
-          granted by such Contributor under Sections 2.1 or 2.2,
-          Contributor must include a text file with the Source Code
-          distribution titled "LEGAL" which describes the claim and the
-          party making the claim in sufficient detail that a recipient will
-          know whom to contact. If Contributor obtains such knowledge after
-          the Modification is made available as described in Section 3.2,
-          Contributor shall promptly modify the LEGAL file in all copies
-          Contributor makes available thereafter and shall take other steps
-          (such as notifying appropriate mailing lists or newsgroups)
-          reasonably calculated to inform those who received the Covered
-          Code that new knowledge has been obtained.
-
-          (b) Contributor APIs.
-          If Contributor's Modifications include an application programming
-          interface and Contributor has knowledge of patent licenses which
-          are reasonably necessary to implement that API, Contributor must
-          also include this information in the LEGAL file.
-
-               (c)    Representations.
-          Contributor represents that, except as disclosed pursuant to
-          Section 3.4(a) above, Contributor believes that Contributor's
-          Modifications are Contributor's original creation(s) and/or
-          Contributor has sufficient rights to grant the rights conveyed by
-          this License.
-
-     3.5. Required Notices.
-     You must duplicate the notice in Exhibit A in each file of the Source
-     Code.  If it is not possible to put such notice in a particular Source
-     Code file due to its structure, then You must include such notice in a
-     location (such as a relevant directory) where a user would be likely
-     to look for such a notice.  If You created one or more Modification(s)
-     You may add your name as a Contributor to the notice described in
-     Exhibit A.  You must also duplicate this License in any documentation
-     for the Source Code where You describe recipients' rights or ownership
-     rights relating to Covered Code.  You may choose to offer, and to
-     charge a fee for, warranty, support, indemnity or liability
-     obligations to one or more recipients of Covered Code. However, You
-     may do so only on Your own behalf, and not on behalf of the Initial
-     Developer or any Contributor. You must make it absolutely clear than
-     any such warranty, support, indemnity or liability obligation is
-     offered by You alone, and You hereby agree to indemnify the Initial
-     Developer and every Contributor for any liability incurred by the
-     Initial Developer or such Contributor as a result of warranty,
-     support, indemnity or liability terms You offer.
-
-     3.6. Distribution of Executable Versions.
-     You may distribute Covered Code in Executable form only if the
-     requirements of Section 3.1-3.5 have been met for that Covered Code,
-     and if You include a notice stating that the Source Code version of
-     the Covered Code is available under the terms of this License,
-     including a description of how and where You have fulfilled the
-     obligations of Section 3.2. The notice must be conspicuously included
-     in any notice in an Executable version, related documentation or
-     collateral in which You describe recipients' rights relating to the
-     Covered Code. You may distribute the Executable version of Covered
-     Code or ownership rights under a license of Your choice, which may
-     contain terms different from this License, provided that You are in
-     compliance with the terms of this License and that the license for the
-     Executable version does not attempt to limit or alter the recipient's
-     rights in the Source Code version from the rights set forth in this
-     License. If You distribute the Executable version under a different
-     license You must make it absolutely clear that any terms which differ
-     from this License are offered by You alone, not by the Initial
-     Developer or any Contributor. You hereby agree to indemnify the
-     Initial Developer and every Contributor for any liability incurred by
-     the Initial Developer or such Contributor as a result of any such
-     terms You offer.
-
-     3.7. Larger Works.
-     You may create a Larger Work by combining Covered Code with other code
-     not governed by the terms of this License and distribute the Larger
-     Work as a single product. In such a case, You must make sure the
-     requirements of this License are fulfilled for the Covered Code.
-
-4. Inability to Comply Due to Statute or Regulation.
-
-     If it is impossible for You to comply with any of the terms of this
-     License with respect to some or all of the Covered Code due to
-     statute, judicial order, or regulation then You must: (a) comply with
-     the terms of this License to the maximum extent possible; and (b)
-     describe the limitations and the code they affect. Such description
-     must be included in the LEGAL file described in Section 3.4 and must
-     be included with all distributions of the Source Code. Except to the
-     extent prohibited by statute or regulation, such description must be
-     sufficiently detailed for a recipient of ordinary skill to be able to
-     understand it.
-
-5. Application of this License.
-
-     This License applies to code to which the Initial Developer has
-     attached the notice in Exhibit A and to related Covered Code.
-
-6. Versions of the License.
-
-     6.1. New Versions.
-     Netscape Communications Corporation ("Netscape") may publish revised
-     and/or new versions of the License from time to time. Each version
-     will be given a distinguishing version number.
-
-     6.2. Effect of New Versions.
-     Once Covered Code has been published under a particular version of the
-     License, You may always continue to use it under the terms of that
-     version. You may also choose to use such Covered Code under the terms
-     of any subsequent version of the License published by Netscape. No one
-     other than Netscape has the right to modify the terms applicable to
-     Covered Code created under this License.
-
-     6.3. Derivative Works.
-     If You create or use a modified version of this License (which you may
-     only do in order to apply it to code which is not already Covered Code
-     governed by this License), You must (a) rename Your license so that
-     the phrases "Mozilla", "MOZILLAPL", "MOZPL", "Netscape",
-     "MPL", "NPL" or any confusingly similar phrase do not appear in your
-     license (except to note that your license differs from this License)
-     and (b) otherwise make it clear that Your version of the license
-     contains terms which differ from the Mozilla Public License and
-     Netscape Public License. (Filling in the name of the Initial
-     Developer, Original Code or Contributor in the notice described in
-     Exhibit A shall not of themselves be deemed to be modifications of
-     this License.)
-
-7. DISCLAIMER OF WARRANTY.
-
-     COVERED CODE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" BASIS,
-     WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
-     WITHOUT LIMITATION, WARRANTIES THAT THE COVERED CODE IS FREE OF
-     DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR NON-INFRINGING.
-     THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE COVERED CODE
-     IS WITH YOU. SHOULD ANY COVERED CODE PROVE DEFECTIVE IN ANY RESPECT,
-     YOU (NOT THE INITIAL DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME THE
-     COST OF ANY NECESSARY SERVICING, REPAIR OR CORRECTION. THIS DISCLAIMER
-     OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO USE OF
-     ANY COVERED CODE IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER.
-
-8. TERMINATION.
-
-     8.1.  This License and the rights granted hereunder will terminate
-     automatically if You fail to comply with terms herein and fail to cure
-     such breach within 30 days of becoming aware of the breach. All
-     sublicenses to the Covered Code which are properly granted shall
-     survive any termination of this License. Provisions which, by their
-     nature, must remain in effect beyond the termination of this License
-     shall survive.
-
-     8.2.  If You initiate litigation by asserting a patent infringement
-     claim (excluding declatory judgment actions) against Initial Developer
-     or a Contributor (the Initial Developer or Contributor against whom
-     You file such action is referred to as "Participant")  alleging that:
-
-     (a)  such Participant's Contributor Version directly or indirectly
-     infringes any patent, then any and all rights granted by such
-     Participant to You under Sections 2.1 and/or 2.2 of this License
-     shall, upon 60 days notice from Participant terminate prospectively,
-     unless if within 60 days after receipt of notice You either: (i)
-     agree in writing to pay Participant a mutually agreeable reasonable
-     royalty for Your past and future use of Modifications made by such
-     Participant, or (ii) withdraw Your litigation claim with respect to
-     the Contributor Version against such Participant.  If within 60 days
-     of notice, a reasonable royalty and payment arrangement are not
-     mutually agreed upon in writing by the parties or the litigation claim
-     is not withdrawn, the rights granted by Participant to You under
-     Sections 2.1 and/or 2.2 automatically terminate at the expiration of
-     the 60 day notice period specified above.
-
-     (b)  any software, hardware, or device, other than such Participant's
-     Contributor Version, directly or indirectly infringes any patent, then
-     any rights granted to You by such Participant under Sections 2.1(b)
-     and 2.2(b) are revoked effective as of the date You first made, used,
-     sold, distributed, or had made, Modifications made by that
-     Participant.
-
-     8.3.  If You assert a patent infringement claim against Participant
-     alleging that such Participant's Contributor Version directly or
-     indirectly infringes any patent where such claim is resolved (such as
-     by license or settlement) prior to the initiation of patent
-     infringement litigation, then the reasonable value of the licenses
-     granted by such Participant under Sections 2.1 or 2.2 shall be taken
-     into account in determining the amount or value of any payment or
-     license.
-
-     8.4.  In the event of termination under Sections 8.1 or 8.2 above,
-     all end user license agreements (excluding distributors and resellers)
-     which have been validly granted by You or any distributor hereunder
-     prior to termination shall survive termination.
-
-9. LIMITATION OF LIABILITY.
-
-     UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT
-     (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE INITIAL
-     DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF COVERED CODE,
-     OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE TO ANY PERSON FOR
-     ANY INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES OF ANY
-     CHARACTER INCLUDING, WITHOUT LIMITATION, DAMAGES FOR LOSS OF GOODWILL,
-     WORK STOPPAGE, COMPUTER FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER
-     COMMERCIAL DAMAGES OR LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN
-     INFORMED OF THE POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF
-     LIABILITY SHALL NOT APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY
-     RESULTING FROM SUCH PARTY'S NEGLIGENCE TO THE EXTENT APPLICABLE LAW
-     PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE
-     EXCLUSION OR LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO
-     THIS EXCLUSION AND LIMITATION MAY NOT APPLY TO YOU.
-
-10. U.S. GOVERNMENT END USERS.
-
-     The Covered Code is a "commercial item," as that term is defined in
-     48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial computer
-     software" and "commercial computer software documentation," as such
-     terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent with 48
-     C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 (June 1995),
-     all U.S. Government End Users acquire Covered Code with only those
-     rights set forth herein.
-
-11. MISCELLANEOUS.
-
-     This License represents the complete agreement concerning subject
-     matter hereof. If any provision of this License is held to be
-     unenforceable, such provision shall be reformed only to the extent
-     necessary to make it enforceable. This License shall be governed by
-     California law provisions (except to the extent applicable law, if
-     any, provides otherwise), excluding its conflict-of-law provisions.
-     With respect to disputes in which at least one party is a citizen of,
-     or an entity chartered or registered to do business in the United
-     States of America, any litigation relating to this License shall be
-     subject to the jurisdiction of the Federal Courts of the Northern
-     District of California, with venue lying in Santa Clara County,
-     California, with the losing party responsible for costs, including
-     without limitation, court costs and reasonable attorneys' fees and
-     expenses. The application of the United Nations Convention on
-     Contracts for the International Sale of Goods is expressly excluded.
-     Any law or regulation which provides that the language of a contract
-     shall be construed against the drafter shall not apply to this
-     License.
-
-12. RESPONSIBILITY FOR CLAIMS.
-
-     As between Initial Developer and the Contributors, each party is
-     responsible for claims and damages arising, directly or indirectly,
-     out of its utilization of rights under this License and You agree to
-     work with Initial Developer and Contributors to distribute such
-     responsibility on an equitable basis. Nothing herein is intended or
-     shall be deemed to constitute any admission of liability.
-
-13. MULTIPLE-LICENSED CODE.
-
-     Initial Developer may designate portions of the Covered Code as
-     "Multiple-Licensed".  "Multiple-Licensed" means that the Initial
-     Developer permits you to utilize portions of the Covered Code under
-     Your choice of the MPL or the alternative licenses, if any, specified
-     by the Initial Developer in the file described in Exhibit A.
+1.4. “Covered Software”
+means Source Code Form to which the initial Contributor has attached the notice
+in Exhibit A, the Executable Form of such Source Code Form, and Modifications of
+such Source Code Form, in each case including portions thereof.
 
-The binary distribution of this product bundles these dependencies under the
-following license:
-bootstrap v3.3.6
-broccoli-asset-rev v2.4.2
-broccoli-funnel v1.0.1
-datatables v1.10.8
-em-helpers v0.5.13
-em-table v0.1.6
-ember v2.2.0
-ember-array-contains-helper v1.0.2
-ember-bootstrap v0.5.1
-ember-cli v1.13.13
-ember-cli-app-version v1.0.0
-ember-cli-babel v5.1.6
-ember-cli-content-security-policy v0.4.0
-ember-cli-dependency-checker v1.2.0
-ember-cli-htmlbars v1.0.2
-ember-cli-htmlbars-inline-precompile v0.3.1
-ember-cli-ic-ajax v0.2.1
-ember-cli-inject-live-reload v1.4.0
-ember-cli-jquery-ui v0.0.20
-ember-cli-qunit v1.2.1
-ember-cli-release v0.2.8
-ember-cli-shims v0.0.6
-ember-cli-sri v1.2.1
-ember-cli-test-loader v0.2.1
-ember-cli-uglify v1.2.0
-ember-d3 v0.1.0
-ember-data v2.1.0
-ember-disable-proxy-controllers v1.0.1
-ember-export-application-global v1.0.5
-ember-load-initializers v0.1.7
-ember-qunit v0.4.16
-ember-qunit-notifications v0.1.0
-ember-resolver v2.0.3
-ember-spin-spinner v0.2.3
-ember-truth-helpers v1.2.0
-jquery v2.1.4
-jquery-ui v1.11.4
-loader.js v3.3.0
-momentjs v2.10.6
-qunit v1.19.0
-select2 v4.0.0
-snippet-ss v1.11.0
-spin.js v2.3.2
--------------------------------------------------------------------------------
-The MIT License (MIT)
+1.5. “Incompatible With Secondary Licenses”
+means
 
-All rights reserved.
+that the initial Contributor has attached the notice described in Exhibit B to
+the Covered Software; or
 
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and assocated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
+that the Covered Software was made available under the terms of version 1.1 or
+earlier of the License, but not also under the terms of a Secondary License.
 
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
+1.6. “Executable Form”
+means any form of the work other than Source Code Form.
 
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
+1.7. “Larger Work”
+means a work that combines Covered Software with other material, in a separate
+file or files, that is not Covered Software.
+
+1.8. “License”
+means this document.
+
+1.9. “Licensable”
+means having the right to grant, to the maximum extent possible, whether at the
+time of the initial grant or subsequently, any and all of the rights conveyed by
+this License.
+
+1.10. “Modifications”
+means any of the following:
+
+any file in Source Code Form that results from an addition to, deletion from, or
+modification of the contents of Covered Software; or
+
+any new file in Source Code Form that contains any Covered Software.
+
+1.11. “Patent Claims” of a Contributor
+means any patent claim(s), including without limitation, method, process, and
+apparatus claims, in any patent Licensable by such Contributor that would be
+infringed, but for the grant of the License, by the making, using, selling,
+offering for sale, having made, import, or transfer of either its Contributions
+or its Contributor Version.
+
+1.12. “Secondary License”
+means either the GNU General Public License, Version 2.0, the GNU Lesser General
+Public License, Version 2.1, the GNU Affero General Public License, Version 3.0,
+or any later versions of those licenses.
+
+1.13. “Source Code Form”
+means the form of the work preferred for making modifications.
+
+1.14. “You” (or “Your”)
+means an individual or a legal entity exercising rights under this License. For
+legal entities, “You” includes any entity that controls, is controlled by,
+or is under common control with You. For purposes of this definition,
+“control” means (a) the power, direct or indirect, to cause the direction or
+management of such entity, whether by contract or otherwise, or (b) ownership of
+more than fifty percent (50%) of the outstanding shares or beneficial ownership
+of such entity.
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free, non-exclusive
+license:
+
+under intellectual property rights (other than patent or trademark) Licensable
+by such Contributor to use, reproduce, make available, modify, display, perform,
+distribute, and otherwise exploit its Contributions, either on an unmodified
+basis, with Modifications, or as part of a Larger Work; and
+
+under Patent Claims of such Contributor to make, use, sell, offer for sale, have
+made, import, and otherwise transfer either its Contributions or its Contributor
+Version.
+
+2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution become
+effective for each Contribution on the date the Contributor first distributes
+such Contribution.
+
+2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under this
+License. No additional rights or licenses will be implied from the distribution
+or licensing of Covered Software under this License. Notwithstanding Section
+2.1(b) above, no patent license is granted by a Contributor:
+
+for any code that a Contributor has removed from Covered Software; or
+
+for infringements caused by: (i) Your and any other third party’s
+modifications of Covered Software, or (ii) the combination of its Contributions
+with other software (except as part of its Contributor Version); or
+
+under Patent Claims infringed by Covered Software in the absence of its
+Contributions.
+
+This License does not grant any rights in the trademarks, service marks, or
+logos of any Contributor (except as may be necessary to comply with the notice
+requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to distribute
+the Covered Software under a subsequent version of this License (see Section
+10.2) or under the terms of a Secondary License (if permitted under the terms of
+Section 3.3).
+
+2.5. Representation
+
+Each Contributor represents that the Contributor believes its Contributions are
+its original creation(s) or it has sufficient rights to grant the rights to its
+Contributions conveyed by this License.
+
+2.6. Fair Use
+
+This License is not intended to limit any rights You have under applicable
+copyright doctrines of fair use, fair dealing, or other equivalents.
+
+2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+Section 2.1.
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under the
+terms of this License. You must inform recipients that the Source Code Form of
+the Covered Software is governed by the terms of this License, and how they can
+obtain a copy of this License. You may not attempt to alter or restrict the
+recipients’ rights in the Source Code Form.
+
+3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+such Covered Software must also be made available in Source Code Form, as
+described in Section 3.1, and You must inform recipients of the Executable Form
+how they can obtain a copy of such Source Code Form by reasonable means in a
+timely manner, at a charge no more than the cost of distribution to the
+recipient; and
+
+You may distribute such Executable Form under the terms of this License, or
+sublicense it under different terms, provided that the license for the
+Executable Form does not attempt to limit or alter the recipients’ rights in
+the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice, provided
+that You also comply with the requirements of this License for the Covered
+Software. If the Larger Work is a combination of Covered Software with a work
+governed by one or more Secondary Licenses, and the Covered Software is not
+Incompatible With Secondary Licenses, this License permits You to additionally
+distribute such Covered Software under the terms of such Secondary License(s),
+so that the recipient of the Larger Work may, at their option, further
+distribute the Covered Software under the terms of either this License or such
+Secondary License(s).
+
+3.4. Notices
+
+You may not remove or alter the substance of any license notices (including
+copyright notices, patent notices, disclaimers of warranty, or limitations of
+liability) contained within the Source Code Form of the Covered Software, except
+that You may alter any license notices to the extent required to remedy known
+factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support, indemnity
+or liability obligations to one or more recipients of Covered Software. However,
+You may do so only on Your own behalf, and not on behalf of any Contributor. You
+must make it absolutely clear that any such warranty, support, indemnity, or
+liability obligation is offered by You alone, and You hereby agree to indemnify
+every Contributor for any liability incurred by such Contributor as a result of
+warranty, support, indemnity or liability terms You offer. You may include
+additional disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+If it is impossible for You to comply with any of the terms of this License with
+respect to some or all of the Covered Software due to statute, judicial order,
+or regulation then You must: (a) comply with the terms of this License to the
+maximum extent possible; and (b) describe the limitations and the code they
+affect. Such description must be placed in a text file included with all
+distributions of the Covered Software under this License. Except to the extent
+prohibited by statute or regulation, such description must be sufficiently
+detailed for a recipient of ordinary skill to be able to understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+fail to comply with any of its terms. However, if You become compliant, then the
+rights granted under this License from a particular Contributor are reinstated
+(a) provisionally, unless and until such Contributor explicitly and finally
+terminates Your grants, and (b) on an ongoing basis, if such Contributor fails
+to notify You of the non-compliance by some reasonable means prior to 60 days
+after You have come back into compliance. Moreover, Your grants from a
+particular Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the first
+time You have received notice of non-compliance with this License from such
+Contributor, and You become compliant prior to 30 days after Your receipt of the
+notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions, counter-claims, and
+cross-claims) alleging that a Contributor Version directly or indirectly
+infringes any patent, then the rights granted to You by any and all Contributors
+for the Covered Software under Section 2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+license agreements (excluding distributors and resellers) which have been
+validly granted by You or Your distributors under this License prior to
+termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+Covered Software is provided under this License on an “as is” basis, without
+warranty of any kind, either expressed, implied, or statutory, including,
+without limitation, warranties that the Covered Software is free of defects,
+merchantable, fit for a particular purpose or non-infringing. The entire risk as
+to the quality and performance of the Covered Software is with You. Should any
+Covered Software prove defective in any respect, You (not any Contributor)
+assume the cost of any necessary servicing, repair, or correction. This
+disclaimer of warranty constitutes an essential part of this License. No use of
+any Covered Software is authorized under this License except under this
+disclaimer.
+
+7. Limitation of Liability
+
+Under no circumstances and under no legal theory, whether tort (including
+negligence), contract, or otherwise, shall any Contributor, or anyone who
+distributes Covered Software as permitted above, be liable to You for any
+direct, indirect, special, incidental, or consequential damages of any character
+including, without limitation, damages for lost profits, loss of goodwill, work
+stoppage, computer failure or malfunction, or any and all other commercial
+damages or losses, even if such party shall have been informed of the
+possibility of such damages. This limitation of liability shall not apply to
+liability for death or personal injury resulting from such party’s negligence
+to the extent applicable law prohibits such limitation. Some jurisdictions do
+not allow the exclusion or limitation of incidental or consequential damages, so
+this exclusion and limitation may not apply to You.
+
+8. Litigation
+
+Any litigation relating to this License may be brought only in the courts of a
+jurisdiction where the defendant maintains its principal place of business and
+such litigation shall be governed by laws of that jurisdiction, without
+reference to its conflict-of-law provisions. Nothing in this Section shall
+prevent a party’s ability to bring cross-claims or counter-claims.
+
+9. Miscellaneous
+
+This License represents the complete agreement concerning the subject matter
+hereof. If any provision of this License is held to be unenforceable, such
+provision shall be reformed only to the extent necessary to make it enforceable.
+Any law or regulation which provides that the language of a contract shall be
+construed against the drafter shall not be used to construe this License against
+a Contributor.
+
+10. Versions of the License
+
+10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section 10.3,
+no one other than the license steward has the right to modify or publish new
+versions of this License. Each version will be given a distinguishing version
+number.
+
+10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version of the
+License under which You originally received the Covered Software, or under the
+terms of any subsequent version published by the license steward.
+
+10.3. Modified Versions
+
+If you create software not governed by this License, and you want to create a
+new license for such software, you may create and use a modified version of this
+License if you rename the license and remove any references to the name of the
+license steward (except to note that such modified license differs from this
+License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With Secondary
+Licenses under the terms of this version of the License, the notice described in
+Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+
+This Source Code Form is subject to the terms of the Mozilla Public License, v.
+2.0. If a copy of the MPL was not distributed with this file, You can obtain one
+at https://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file, then
+You may include the notice in a location (such as a LICENSE file in a relevant
+directory) where a recipient would be likely to look for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - “Incompatible With Secondary Licenses” Notice
+
+This Source Code Form is “Incompatible With Secondary Licenses”, as defined
+by the Mozilla Public License, v. 2.0.
 
 The binary distribution of this product bundles these dependencies under the
 following license:
-D3 v3.5.6
+JDOM 1.1
 --------------------------------------------------------------------------------
-(3-clause BSD license)
-All rights reserved.
+/*--
+
+ Copyright (C) 2000-2004 Jason Hunter & Brett McLaughlin.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ 1. Redistributions of source code must retain the above copyright
+    notice, this list of conditions, and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions, and the disclaimer that follows
+    these conditions in the documentation and/or other materials
+    provided with the distribution.
+
+ 3. The name "JDOM" must not be used to endorse or promote products
+    derived from this software without prior written permission.  For
+    written permission, please contact <request_AT_jdom_DOT_org>.
+
+ 4. Products derived from this software may not be called "JDOM", nor
+    may "JDOM" appear in their name, without prior written permission
+    from the JDOM Project Management <request_AT_jdom_DOT_org>.
+
+ In addition, we request (but do not require) that you include in the
+ end-user documentation provided with the redistribution and/or in the
+ software itself an acknowledgement equivalent to the following:
+     "This product includes software developed by the
+      JDOM Project (http://www.jdom.org/)."
+ Alternatively, the acknowledgment may be graphical using the logos
+ available at http://www.jdom.org/images/logos.
+
+ THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESSED OR IMPLIED
+ WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED.  IN NO EVENT SHALL THE JDOM AUTHORS OR THE PROJECT
+ CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ SUCH DAMAGE.
+
+ This software consists of voluntary contributions made by many
+ individuals on behalf of the JDOM Project and was originally
+ created by Jason Hunter <jhunter_AT_jdom_DOT_org> and
+ Brett McLaughlin <brett_AT_jdom_DOT_org>.  For more information
+ on the JDOM Project, please see <http://www.jdom.org/>.
 
-Redistribution and use in source and binary forms, with or without modification,
-are permitted provided that the following conditions are met:
-
-1. Redistributions of source code must retain the above copyright notice, this list
-   of conditions and the following disclaimer.
-
-2. Redistributions in binary form must reproduce the above copyright notice, this
-   list of conditions and the following disclaimer in the documentation and/or
-   other materials provided with the distribution.
-
-3. Neither the name of the copyright holder nor the names of its contributors may
-   be used to endorse or promote products derived from this software without
-   specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
-INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
-WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
+ */
+
+The binary distribution of this product bundles these dependencies under the
+following license:
+Hbase Server 1.2.4
+--------------------------------------------------------------------------------
+This project bundles a derivative image for our Orca Logo. This image is
+available under the Creative Commons By Attribution 3.0 License.
+
+    Creative Commons Legal Code
+
+    Attribution 3.0 Unported
+
+        CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE
+        LEGAL SERVICES. DISTRIBUTION OF THIS LICENSE DOES NOT CREATE AN
+        ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS
+        INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES
+        REGARDING THE INFORMATION PROVIDED, AND DISCLAIMS LIABILITY FOR
+        DAMAGES RESULTING FROM ITS USE.
+
+    License
+
+    THE WORK (AS DEFINED BELOW) IS PROVIDED UNDER THE TERMS OF THIS CREATIVE
+    COMMONS PUBLIC LICENSE ("CCPL" OR "LICENSE"). THE WORK IS PROTECTED BY
+    COPYRIGHT AND/OR OTHER APPLICABLE LAW. ANY USE OF THE WORK OTHER THAN AS
+    AUTHORIZED UNDER THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
+
+    BY EXERCISING ANY RIGHTS TO THE WORK PROVIDED HERE, YOU ACCEPT AND AGREE
+    TO BE BOUND BY THE TERMS OF THIS LICENSE. TO THE EXTENT THIS LICENSE MAY
+    BE CONSIDERED TO BE A CONTRACT, THE LICENSOR GRANTS YOU THE RIGHTS
+    CONTAINED HERE IN CONSIDERATION OF YOUR ACCEPTANCE OF SUCH TERMS AND
+    CONDITIONS.
+
+    1. Definitions
+
+     a. "Adaptation" means a work based upon the Work, or upon the Work and
+        other pre-existing works, such as a translation, adaptation,
+        derivative work, arrangement of music or other alterations of a
+        literary or artistic work, or phonogram or performance and includes
+        cinematographic adaptations or any other form in which the Work may be
+        recast, transformed, or adapted including in any form recognizably
+        derived from the original, except that a work that constitutes a
+        Collection will not be considered an Adaptation for the purpose of
+        this License. For the avoidance of doubt, where the Work is a musical
+        work, performance or phonogram, the synchronization of the Work in
+        timed-relation with a moving image ("synching") will be considered an
+        Adaptation for the purpose of this License.
+     b. "Collection" means a collection of literary or artistic works, such as
+        encyclopedias and anthologies, or performances, phonograms or
+        broadcasts, or other works or subject matter other than works listed
+        in Section 1(f) below, which, by reason of the selection and
+        arrangement of their contents, constitute intellectual creations, in
+        which the Work is included in its entirety in unmodified form along
+        with one or more other contributions, each constituting separate and
+        independent works in themselves, which together are assembled into a
+        collective whole. A work that constitutes a Collection will not be
+        considered an Adaptation (as defined above) for the purposes of this
+        License.
+     c. "Distribute" means to make available to the public the original and
+        copies of the Work or Adaptation, as appropriate, through sale or
+        other transfer of ownership.
+     d. "Licensor" means the individual, individuals, entity or entities that
+        offer(s) the Work under the terms of this License.
+     e. "Original Author" means, in the case of a literary or artistic work,
+        the individual, individuals, entity or entities who created the Work
+        or if no individual or entity can be identified, the publisher; and in
+        addition (i) in the case of a performance the actors, singers,
+        musicians, dancers, and other persons who act, sing, deliver, declaim,
+        play in, interpret or otherwise perform literary or artistic works or
+        expressions of folklore; (ii) in the case of a phonogram the producer
+        being the person or legal entity who first fixes the sounds of a
+        performance or other sounds; and, (iii) in the case of broadcasts, the
+        organization that transmits the broadcast.
+     f. "Work" means the literary and/or artistic work offered under the terms
+        of this License including without limitation any production in the
+        literary, scientific and artistic domain, whatever may be the mode or
+        form of its expression including digital form, such as a book,
+        pamphlet and other writing; a lecture, address, sermon or other work
+        of the same nature; a dramatic or dramatico-musical work; a
+        choreographic work or entertainment in dumb show; a musical
+        composition with or without words; a cinematographic work to which are
+        assimilated works expressed by a process analogous to cinematography;
+        a work of drawing, painting, architecture, sculpture, engraving or
+        lithography; a photographic work to which are assimilated works
+        expressed by a process analogous to photography; a work of applied
+        art; an illustration, map, plan, sketch or three-dimensional work
+        relative to geography, topography, architecture or science; a
+        performance; a broadcast; a phonogram; a compilation of data to the
+        extent it is protected as a copyrightable work; or a work performed by
+        a variety or circus performer to the extent it is not otherwise
+        considered a literary or artistic work.
+     g. "You" means an individual or entity exercising rights under this
+        License who has not previously violated the terms of this License with
+        respect to the Work, or who has received express permission from the
+        Licensor to exercise rights under this License despite a previous
+        violation.
+     h. "Publicly Perform" means to perform public recitations of the Work and
+        to communicate to the public those public recitations, by any means or
+        process, including by wire or wireless means or public digital
+        performances; to make available to the public Works in such a way that
+        members of the public may access these Works from a place and at a
+        place individually chosen by them; to perform the Work to the public
+        by any means or process and the communication to the public of the
+        performances of the Work, including by public digital performance; to
+        broadcast and rebroadcast the Work by any means including signs,
+        sounds or images.
+     i. "Reproduce" means to make copies of the Work by any means including
+        without limitation by sound or visual recordings and the right of
+        fixation and reproducing fixations of the Work, including storage of a
+        protected performance or phonogram in digital form or other electronic
+        medium.
+
+    2. Fair Dealing Rights. Nothing in this License is intended to reduce,
+    limit, or restrict any uses free from copyright or rights arising from
+    limitations or exceptions that are provided for in connection with the
+    copyright protection under copyright law or other applicable laws.
+
+    3. License Grant. Subject to the terms and conditions of this License,
+    Licensor hereby grants You a worldwide, royalty-free, non-exclusive,
+    perpetual (for the duration of the applicable copyright) license to
+    exercise the rights in the Work as stated below:
+
+     a. to Reproduce the Work, to incorporate the Work into one or more
+        Collections, and to Reproduce the Work as incorporated in the
+        Collections;
+     b. to create and Reproduce Adaptations provided that any such Adaptation,
+        including any translation in any medium, takes reasonable steps to
+        clearly label, demarcate or otherwise identify that changes were made
+        to the original Work. For example, a translation could be marked "The
+        original work was translated from English to Spanish," or a
+        modification could indicate "The original work has been modified.";
+     c. to Distribute and Publicly Perform the Work including as incorporated
+        in Collections; and,
+     d. to Distribute and Publicly Perform Adaptations.
+     e. For the avoidance of doubt:
+
+         i. Non-waivable Compulsory License Schemes. In those jurisdictions in
+            which the right to collect royalties through any statutory or
+            compulsory licensing scheme cannot be waived, the Licensor
+            reserves the exclusive right to collect such royalties for any
+            exercise by You of the rights granted under this License;
+        ii. Waivable Compulsory License Schemes. In those jurisdictions in
+            which the right to collect royalties through any statutory or
+            compulsory licensing scheme can be waived, the Licensor waives the
+            exclusive right to collect such royalties for any exercise by You
+            of the rights granted under this License; and,
+       iii. Voluntary License Schemes. The Licensor waives the right to
+            collect royalties, whether individually or, in the event that the
+            Licensor is a member of a collecting society that administers
+            voluntary licensing schemes, via that society, from any exercise
+            by You of the rights granted under this License.
+
+    The above rights may be exercised in all media and formats whether now
+    known or hereafter devised. The above rights include the right to make
+    such modifications as are technically necessary to exercise the rights in
+    other media and formats. Subject to Section 8(f), all rights not expressly
+    granted by Licensor are hereby reserved.
+
+    4. Restrictions. The license granted in Section 3 above is expressly made
+    subject to and limited by the following restrictions:
+
+     a. You may Distribute or Publicly Perform the Work only under the terms
+        of this License. You must include a copy of, or the Uniform Resource
+        Identifier (URI) for, this License with every copy of the Work You
+        Distribute or Publicly Perform. You may not offer or impose any terms
+        on the Work that restrict the terms of this License or the ability of
+        the recipient of the Work to exercise the rights granted to that
+        recipient under the terms of the License. You may not sublicense the
+        Work. You must keep intact all notices that refer to this License and
+        to the disclaimer of warranties with every copy of the Work You
+        Distribute or Publicly Perform. When You Distribute or Publicly
+        Perform the Work, You may not impose any effective technological
+        measures on the Work that restrict the ability of a recipient of the
+        Work from You to exercise the rights granted to that recipient under
+        the terms of the License. This Section 4(a) applies to the Work as
+        incorporated in a Collection, but this does not require the Collection
+        apart from the Work itself to be made subject to the terms of this
+        License. If You create a Collection, upon notice from any Licensor You
+        must, to the extent practicable, remove from the Collection any credit
+        as required by Section 4(b), as requested. If You create an
+        Adaptation, upon notice from any Licensor You must, to the extent
+        practicable, remove from the Adaptation any credit as required by
+        Section 4(b), as requested.
+     b. If You Distribute, or Publicly Perform the Work or any Adaptations or
+        Collections, You must, unless a request has been made pursuant to
+        Section 4(a), keep intact all copyright notices for the Work and
+        provide, reasonable to the medium or means You are utilizing: (i) the
+        name of the Original Author (or pseudonym, if applicable) if supplied,
+        and/or if the Original Author and/or Licensor designate another party
+        or parties (e.g., a sponsor institute, publishing entity, journal) for
+        attribution ("Attribution Parties") in Licensor's copyright notice,
+        terms of service or by other reasonable means, the name of such party
+        or parties; (ii) the title of the Work if supplied; (iii) to the
+        extent reasonably practicable, the URI, if any, that Licensor
+        specifies to be associated with the Work, unless such URI does not
+        refer to the copyright notice or licensing information for the Work;
+        and (iv) , consistent with Section 3(b), in the case of an Adaptation,
+        a credit identifying the use of the Work in the Adaptation (e.g.,
+        "French translation of the Work by Original Author," or "Screenplay
+        based on original Work by Original Author"). The credit required by
+        this Section 4 (b) may be implemented in any reasonable manner;
+        provided, however, that in the case of a Adaptation or Collection, at
+        a minimum such credit will appear, if a credit for all contributing
+        authors of the Adaptation or Collection appears, then as part of these
+        credits and in a manner at least as prominent as the credits for the
+        other contributing authors. For the avoidance of doubt, You may only
+        use the credit required by this Section for the purpose of attribution
+        in the manner set out above and, by exercising Your rights under this
+        License, You may not implicitly or explicitly assert or imply any
+        connection with, sponsorship or endorsement by the Original Author,
+        Licensor and/or Attribution Parties, as appropriate, of You or Your
+        use of the Work, without the separate, express prior written
+        permission of the Original Author, Licensor and/or Attribution
+        Parties.
+     c. Except as otherwise agreed in writing by the Licensor or as may be
+        otherwise permitted by applicable law, if You Reproduce, Distribute or
+        Publicly Perform the Work either by itself or as part of any
+        Adaptations or Collections, You must not distort, mutilate, modify or
+        take other derogatory action in relation to the Work which would be
+        prejudicial to the Original Author's honor or reputation. Licensor
+        agrees that in those jurisdictions (e.g. Japan), in which any exercise
+        of the right granted in Section 3(b) of this License (the right to
+        make Adaptations) would be deemed to be a distortion, mutilation,
+        modification or other derogatory action prejudicial to the Original
+        Author's honor and reputation, the Licensor will waive or not assert,
+        as appropriate, this Section, to the fullest extent permitted by the
+        applicable national law, to enable You to reasonably exercise Your
+        right under Section 3(b) of this License (right to make Adaptations)
+        but not otherwise.
+
+    5. Representations, Warranties and Disclaimer
+
+    UNLESS OTHERWISE MUTUALLY AGREED TO BY THE PARTIES IN WRITING, LICENSOR
+    OFFERS THE WORK AS-IS AND MAKES NO REPRESENTATIONS OR WARRANTIES OF ANY
+    KIND CONCERNING THE WORK, EXPRESS, IMPLIED, STATUTORY OR OTHERWISE,
+    INCLUDING, WITHOUT LIMITATION, WARRANTIES OF TITLE, MERCHANTIBILITY,
+    FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT, OR THE ABSENCE OF
+    LATENT OR OTHER DEFECTS, ACCURACY, OR THE PRESENCE OF ABSENCE OF ERRORS,
+    WHETHER OR NOT DISCOVERABLE. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION
+    OF IMPLIED WARRANTIES, SO SUCH EXCLUSION MAY NOT APPLY TO YOU.
+
+    6. Limitation on Liability. EXCEPT TO THE EXTENT REQUIRED BY APPLICABLE
+    LAW, IN NO EVENT WILL LICENSOR BE LIABLE TO YOU ON ANY LEGAL THEORY FOR
+    ANY SPECIAL, INCIDENTAL, CONSEQUENTIAL, PUNITIVE OR EXEMPLARY DAMAGES
+    ARISING OUT OF THIS LICENSE OR THE USE OF THE WORK, EVEN IF LICENSOR HAS
+    BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
+
+    7. Termination
+
+     a. This License and the rights granted hereunder will terminate
+        automatically upon any breach by You of the terms of this License.
+        Individuals or entities who have received Adaptations or Collections
+        from You under this License, however, will not have their licenses
+        terminated provided such individuals or entities remain in full
+        compliance with those licenses. Sections 1, 2, 5, 6, 7, and 8 will
+        survive any termination of this License.
+     b. Subject to the above terms and conditions, the license granted here is
+        perpetual (for the duration of the applicable copyright in the Work).
+        Notwithstanding the above, Licensor reserves the right to release the
+        Work under different license terms or to stop distributing the Work at
+        any time; provided, however that any such election will not serve to
+        withdraw this License (or any other license that has been, or is
+        required to be, granted under the terms of this License), and this
+        License will continue in full force and effect unless terminated as
+        stated above.
+
+    8. Miscellaneous
+
+     a. Each time You Distribute or Publicly Perform the Work or a Collection,
+        the Licensor offers to the recipient a license to the Work on the same
+        terms and conditions as the license granted to You under this License.
+     b. Each time You Distribute or Publicly Perform an Adaptation, Licensor
+        offers to the recipient a license to the original Work on the same
+        terms and conditions as the license granted to You under this License.
+     c. If any provision of this License is invalid or unenforceable under
+        applicable law, it shall not affect the validity or enforceability of
+        the remainder of the terms of this License, and without further action
+        by the parties to this agreement, such provision shall be reformed to
+        the minimum extent necessary to make such provision valid and
+        enforceable.
+     d. No term or provision of this License shall be deemed waived and no
+        breach consented to unless such waiver or consent shall be in writing
+        and signed by the party to be charged with such waiver or consent.
+     e. This License constitutes the entire agreement between the parties with
+        respect to the Work licensed here. There are no understandings,
+        agreements or representations with respect to the Work not specified
+        here. Licensor shall not be bound by any additional provisions that
+        may appear in any communication from You. This License may not be
+        modified without the mutual written agreement of the Licensor and You.
+     f. The rights granted under, and the subject matter referenced, in this
+        License were drafted utilizing the terminology of the Berne Convention
+        for the Protection of Literary and Artistic Works (as amended on
+        September 28, 1979), the Rome Convention of 1961, the WIPO Copyright
+        Treaty of 1996, the WIPO Performances and Phonograms Treaty of 1996
+        and the Universal Copyright Convention (as revised on July 24, 1971).
+        These rights and subject matter take effect in the relevant
+        jurisdiction in which the License terms are sought to be enforced
+        according to the corresponding provisions of the implementation of
+        those treaty provisions in the applicable national law. If the
+        standard suite of rights granted under applicable copyright law
+        includes additional rights not granted under this License, such
+        additional rights are deemed to be included in the License; this
+        License is not intended to restrict the license of any rights under
+        applicable law.
+
+
+    Creative Commons Notice
+
+        Creative Commons is not a party to this License, and makes no warranty
+        whatsoever in connection with the Work. Creative Commons will not be
+        liable to You or any party on any legal theory for any damages
+        whatsoever, including without limitation any general, special,
+        incidental or consequential damages arising in connection to this
+        license. Notwithstanding the foregoing two (2) sentences, if Creative
+        Commons has expressly identified itself as the Licensor hereunder, it
+        shall have all rights and obligations of Licensor.
+
+        Except for the limited purpose of indicating to the public that the
+        Work is licensed under the CCPL, Creative Commons does not authorize
+        the use by either party of the trademark "Creative Commons" or any
+        related trademark or logo of Creative Commons without the prior
+        written consent of Creative Commons. Any permitted use will be in
+        compliance with Creative Commons' then-current trademark usage
+        guidelines, as may be published on its website or otherwise made
+        available upon request from time to time. For the avoidance of doubt,
+        this trademark restriction does not form part of this License.
+
+        Creative Commons may be contacted at https://creativecommons.org/.

+ 126 - 3
NOTICE.txt

@@ -17,7 +17,11 @@ which has the following notices:
 * This product includes software developed by IBM Corporation and others.
 
 The binary distribution of this product bundles binaries of
-AWS Java SDK 1.10.6,
+AWS SDK for Java - Core 1.11.45,
+AWS Java SDK for AWS KMS 1.11.45,
+AWS Java SDK for Amazon S3 1.11.45,
+AWS Java SDK for AWS STS 1.11.45,
+JMES Path Query library 1.0,
 which has the following notices:
  * This software includes third party software subject to the following
  copyrights: - XML parsing and utility functions from JetS3t - Copyright
@@ -257,6 +261,13 @@ provides utilities for the java.lang API, which can be obtained at:
   * HOMEPAGE:
     * https://commons.apache.org/proper/commons-lang/
 
+This product contains a modified portion of 'JDOM 1.1', which can be obtained at:
+
+  * LICENSE:
+    * https://github.com/hunterhacker/jdom/blob/jdom-1.1/core/LICENSE.txt
+  * HOMEPAGE:
+    * http://www.jdom.org/
+
 The binary distribution of this product bundles binaries of
 Commons Codec 1.4,
 which has the following notices:
@@ -283,7 +294,7 @@ which has the following notices:
     Copyright 2002-2012 Ramnivas Laddad, Juergen Hoeller, Chris Beams
 
 The binary distribution of this product bundles binaries of
-Java Concurrency in Practice book annotations 1.0,
+"Java Concurrency in Practice" book annotations 1.0,
 which has the following notices:
  * Copyright (c) 2005 Brian Goetz and Tim Peierls Released under the Creative
   Commons Attribution License (http://creativecommons.org/licenses/by/2.5)
@@ -292,7 +303,15 @@ which has the following notices:
   notice.
 
 The binary distribution of this product bundles binaries of
-Jetty 6.1.26,
+Jetty :: Http Utility 9.3.11.,
+Jetty :: IO Utility 9.3.11.,
+Jetty :: Security 9.3.11.,
+Jetty :: Server Core 9.3.11.,
+Jetty :: Servlet Handling 9.3.11.,
+Jetty :: Utilities 9.3.11.,
+Jetty :: Utilities :: Ajax,
+Jetty :: Webapp Application Support 9.3.11.,
+Jetty :: XML utilities 9.3.11.,
 which has the following notices:
  * ==============================================================
     Jetty Web Container
@@ -453,3 +472,107 @@ which has the following notices:
      - voluntary contributions made by Paul Eng on behalf of the
        Apache Software Foundation that were originally developed at iClick, Inc.,
        software copyright (c) 1999.
+
+The binary distribution of this product bundles binaries of
+Logback Classic Module 1.1.2,
+Logback Core Module 1.1.2,
+which has the following notices:
+ * Logback: the reliable, generic, fast and flexible logging framework.
+   Copyright (C) 1999-2012, QOS.ch. All rights reserved.
+
+The binary distribution of this product bundles binaries of
+Apache HBase - Annotations 1.2.4,
+Apache HBase - Client 1.2.4,
+Apache HBase - Common 1.2.4,
+Apache HBase - Hadoop Compatibility 1.2.4,
+Apache HBase - Hadoop Two Compatibility 1.2.4,
+Apache HBase - Prefix Tree 1.2.4,
+Apache HBase - Procedure 1.2.4,
+Apache HBase - Protocol 1.2.4,
+Apache HBase - Server 1.2.4,
+which has the following notices:
+ * Apache HBase
+   Copyright 2007-2015 The Apache Software Foundation
+
+   This product includes software developed at
+   The Apache Software Foundation (http://www.apache.org/).
+
+   --
+   This product incorporates portions of the 'Hadoop' project
+
+   Copyright 2007-2009 The Apache Software Foundation
+
+   Licensed under the Apache License v2.0
+   --
+   Our Orca logo we got here: http://www.vectorfree.com/jumping-orca
+   It is licensed Creative Commons Attribution 3.0.
+   See https://creativecommons.org/licenses/by/3.0/us/
+   We changed the logo by stripping the colored background, inverting
+   it and then rotating it some.
+
+   Later we found that vectorfree.com image is not properly licensed.
+   The original is owned by vectorportal.com. The original was
+   relicensed so we could use it as Creative Commons Attribution 3.0.
+   The license is bundled with the download available here:
+   http://www.vectorportal.com/subcategory/205/KILLER-WHALE-FREE-VECTOR.eps/ifile/9136/detailtest.asp
+   --
+   This product includes portions of the Bootstrap project v3.0.0
+
+   Copyright 2013 Twitter, Inc.
+
+   Licensed under the Apache License v2.0
+
+   This product uses the Glyphicons Halflings icon set.
+
+   http://glyphicons.com/
+
+   Copyright Jan Kovařík
+
+   Licensed under the Apache License v2.0 as a part of the Bootstrap project.
+
+   --
+   This product includes portions of the Guava project v14, specifically
+   'hbase-common/src/main/java/org/apache/hadoop/hbase/io/LimitInputStream.java'
+
+   Copyright (C) 2007 The Guava Authors
+
+   Licensed under the Apache License, Version 2.0
+
+The binary distribution of this product bundles binaries of
+Phoenix Core 4.7.0,
+which has the following notices:
+   Apache Phoenix
+   Copyright 2013-2016 The Apache Software Foundation
+
+   This product includes software developed by The Apache Software
+   Foundation (http://www.apache.org/).
+
+   This also includes:
+
+   The phoenix-spark module has been adapted from the phoenix-spark library
+   distributed under the terms of the Apache 2 license. Original source copyright:
+   Copyright 2014 Simply Measured, Inc.
+   Copyright 2015 Interset Software Inc.
+
+   The file bin/daemon.py is based on the file of the same name in python-daemon 2.0.5
+   (https://pypi.python.org/pypi/python-daemon/). Original source copyright:
+   # Copyright © 2008–2015 Ben Finney <ben+python@benfinney.id.au>
+   # Copyright © 2007–2008 Robert Niederreiter, Jens Klein
+   # Copyright © 2004–2005 Chad J. Schroeder
+   # Copyright © 2003 Clark Evans
+   # Copyright © 2002 Noah Spurrier
+   # Copyright © 2001 Jürgen Hermann
+
+The binary distribution of this product bundles binaries of
+Plexus Cipher: encryption/decryption Component 1.4,
+which has the following notices:
+ * The code in this component contains a class - Base64 taken from http://juliusdavies.ca/svn/not-yet-commons-ssl/tags/commons-ssl-0.3.10/src/java/org/apache/commons/ssl/Base64.java
+   which is Apache license: http://www.apache.org/licenses/LICENSE-2.0
+
+   The PBE key processing routine PBECipher.createCipher() is adopted from http://juliusdavies.ca/svn/not-yet-commons-ssl/tags/commons-ssl-0.3.10/src/java/org/apache/commons/ssl/OpenSSL.java
+    which is also Apache APL-2.0 license: http://www.apache.org/licenses/LICENSE-2.0
+
+The binary distribution of this product bundles binaries of
+software.amazon.ion:ion-java 1.0.1,
+which has the following notices:
+ * Amazon Ion Java Copyright 2007-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.

+ 4 - 1
dev-support/bin/create-release

@@ -249,7 +249,7 @@ function startgpgagent
   if [[ "${SIGN}" = true ]]; then
     if [[ -n "${GPGAGENT}" && -z "${GPG_AGENT_INFO}" ]]; then
       echo "starting gpg agent"
-      echo "default-cache-ttl 7200" > "${LOGDIR}/gpgagent.conf"
+      echo "default-cache-ttl 14400" > "${LOGDIR}/gpgagent.conf"
       # shellcheck disable=2046
       eval $("${GPGAGENT}" --daemon \
         --options "${LOGDIR}/gpgagent.conf" \
@@ -506,6 +506,9 @@ function makearelease
 
   mkdir -p "${LOGDIR}"
 
+  # Install the Hadoop maven plugins first
+  run_and_redirect "${LOGDIR}/mvn_install_maven_plugins.log" "${MVN}" "${MVN_ARGS[@]}" -pl hadoop-maven-plugins -am clean install
+
   # mvn clean for sanity
   run_and_redirect "${LOGDIR}/mvn_clean.log" "${MVN}" "${MVN_ARGS[@]}" clean
 

+ 13 - 0
dev-support/bin/dist-copynativelibs

@@ -114,6 +114,15 @@ for i in "$@"; do
     --snappylibbundle=*)
       SNAPPYLIBBUNDLE=${i#*=}
     ;;
+    --zstdbinbundle=*)
+      ZSTDBINBUNDLE=${i#*=}
+    ;;
+     --zstdlib=*)
+      ZSTDLIB=${i#*=}
+    ;;
+    --zstdlibbundle=*)
+      ZSTDLIBBUNDLE=${i#*=}
+    ;;
 
   esac
 done
@@ -139,6 +148,8 @@ if [[ -d "${LIB_DIR}" ]]; then
 
   bundle_native_lib "${SNAPPYLIBBUNDLE}" "snappy.lib" "snappy" "${SNAPPYLIB}"
 
+  bundle_native_lib "${ZSTDLIBBUNDLE}" "zstd.lib" "zstd" "${ZSTDLIB}"
+
   bundle_native_lib "${OPENSSLLIBBUNDLE}" "openssl.lib" "crypto" "${OPENSSLLIB}"
 
   bundle_native_lib "${ISALBUNDLE}" "isal.lib" "isa" "${ISALLIB}"
@@ -159,6 +170,8 @@ if [[ -d "${BIN_DIR}" ]] ; then
 
   bundle_native_bin "${SNAPPYBINBUNDLE}" "${SNAPPYLIBBUNDLE}" "snappy.lib" "snappy" "${SNAPPYLIB}"
 
+  bundle_native_bin "${ZSTDBINBUNDLE}" "${ZSTDLIBBUNDLE}" "zstd.lib" "zstd" "${ZSTDLIB}"
+
   bundle_native_bin "${OPENSSLBINBUNDLE}" "${OPENSSLLIBBUNDLE}" "openssl.lib" "crypto" "${OPENSSLLIB}"
 
 fi

+ 7 - 1
dev-support/bin/dist-layout-stitching

@@ -137,6 +137,12 @@ run copy "${ROOT}/hadoop-tools/hadoop-tools-dist/target/hadoop-tools-dist-${VERS
 run cp -pr "${ROOT}/hadoop-hdfs-project/hadoop-hdfs-httpfs/target/hadoop-hdfs-httpfs-${VERSION}"/* .
 run cp -pr "${ROOT}/hadoop-common-project/hadoop-kms/target/hadoop-kms-${VERSION}"/* .
 
+# copy client jars as-is
+run mkdir -p "share/hadoop/client"
+run cp -p "${ROOT}/hadoop-client-modules/hadoop-client-api/target/hadoop-client-api-${VERSION}.jar" share/hadoop/client/
+run cp -p "${ROOT}/hadoop-client-modules/hadoop-client-runtime/target/hadoop-client-runtime-${VERSION}.jar" share/hadoop/client/
+run cp -p "${ROOT}/hadoop-client-modules/hadoop-client-minicluster/target/hadoop-client-minicluster-${VERSION}.jar" share/hadoop/client/
+
 echo
 echo "Hadoop dist layout available at: ${BASEDIR}/hadoop-${VERSION}"
-echo
+echo

+ 0 - 0
dev-support/bin/qbt


+ 1 - 0
dev-support/docker/Dockerfile

@@ -138,6 +138,7 @@ ENV MAVEN_OPTS -Xms256m -Xmx512m
 RUN apt-get -y install nodejs && \
     ln -s /usr/bin/nodejs /usr/bin/node && \
     apt-get -y install npm && \
+    npm install npm@latest -g && \
     npm install -g bower && \
     npm install -g ember-cli
 

+ 2 - 3
hadoop-assemblies/pom.xml

@@ -23,12 +23,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha2-SNAPSHOT</version>
+    <version>3.0.0-alpha3-SNAPSHOT</version>
     <relativePath>../hadoop-project</relativePath>
   </parent>
-  <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-assemblies</artifactId>
-  <version>3.0.0-alpha2-SNAPSHOT</version>
+  <version>3.0.0-alpha3-SNAPSHOT</version>
   <name>Apache Hadoop Assemblies</name>
   <description>Apache Hadoop Assemblies</description>
 

+ 0 - 14
hadoop-assemblies/src/main/resources/assemblies/hadoop-dist.xml

@@ -93,20 +93,6 @@
       <directory>${project.build.directory}/webapps</directory>
       <outputDirectory>/share/hadoop/${hadoop.component}/webapps</outputDirectory>
     </fileSet>
-    <fileSet>
-      <directory>${basedir}/src/main/conf</directory>
-      <outputDirectory>/share/hadoop/${hadoop.component}/templates</outputDirectory>
-      <includes>
-        <include>*-site.xml</include>
-      </includes>
-    </fileSet>
-    <fileSet>
-      <directory>${basedir}/src/main/packages/templates/conf</directory>
-      <outputDirectory>/share/hadoop/${hadoop.component}/templates/conf</outputDirectory>
-      <includes>
-        <include>*</include>
-      </includes>
-    </fileSet>
     <fileSet>
       <directory>${project.build.directory}</directory>
       <outputDirectory>/share/hadoop/${hadoop.component}</outputDirectory>

+ 24 - 1
hadoop-assemblies/src/main/resources/assemblies/hadoop-httpfs-dist.xml

@@ -21,6 +21,14 @@
   </formats>
   <includeBaseDirectory>false</includeBaseDirectory>
   <fileSets>
+    <!-- Jar file -->
+    <fileSet>
+      <directory>target</directory>
+      <outputDirectory>/share/hadoop/hdfs</outputDirectory>
+      <includes>
+        <include>${project.artifactId}-${project.version}.jar</include>
+      </includes>
+    </fileSet>
     <!-- Configuration files -->
     <fileSet>
       <directory>${basedir}/src/main/conf</directory>
@@ -41,7 +49,7 @@
       <directory>${basedir}/src/main/libexec</directory>
       <outputDirectory>/libexec</outputDirectory>
       <includes>
-        <include>*</include>
+        <include>**/*</include>
       </includes>
       <fileMode>0755</fileMode>
     </fileSet>
@@ -51,4 +59,19 @@
       <outputDirectory>/share/doc/hadoop/httpfs</outputDirectory>
     </fileSet>
   </fileSets>
+  <dependencySets>
+    <dependencySet>
+      <useProjectArtifact>false</useProjectArtifact>
+      <outputDirectory>/share/hadoop/hdfs/lib</outputDirectory>
+      <!-- Exclude hadoop artifacts. They will be found via HADOOP* env -->
+      <excludes>
+        <exclude>org.apache.hadoop:hadoop-common</exclude>
+        <exclude>org.apache.hadoop:hadoop-hdfs</exclude>
+        <!-- use slf4j from common to avoid multiple binding warnings -->
+        <exclude>org.slf4j:slf4j-api</exclude>
+        <exclude>org.slf4j:slf4j-log4j12</exclude>
+        <exclude>org.hsqldb:hsqldb</exclude>
+      </excludes>
+    </dependencySet>
+  </dependencySets>
 </assembly>

+ 24 - 1
hadoop-assemblies/src/main/resources/assemblies/hadoop-kms-dist.xml

@@ -21,6 +21,14 @@
   </formats>
   <includeBaseDirectory>false</includeBaseDirectory>
   <fileSets>
+    <!-- Jar file -->
+    <fileSet>
+      <directory>target</directory>
+      <outputDirectory>/share/hadoop/common</outputDirectory>
+      <includes>
+        <include>${project.artifactId}-${project.version}.jar</include>
+      </includes>
+    </fileSet>
     <!-- Configuration files -->
     <fileSet>
       <directory>${basedir}/src/main/conf</directory>
@@ -41,7 +49,7 @@
       <directory>${basedir}/src/main/libexec</directory>
       <outputDirectory>/libexec</outputDirectory>
       <includes>
-        <include>*</include>
+        <include>**/*</include>
       </includes>
       <fileMode>0755</fileMode>
     </fileSet>
@@ -51,4 +59,19 @@
       <outputDirectory>/share/doc/hadoop/kms</outputDirectory>
     </fileSet>
   </fileSets>
+  <dependencySets>
+    <dependencySet>
+      <useProjectArtifact>false</useProjectArtifact>
+      <outputDirectory>/share/hadoop/common/lib</outputDirectory>
+      <!-- Exclude hadoop artifacts. They will be found via HADOOP* env -->
+      <excludes>
+        <exclude>org.apache.hadoop:hadoop-common</exclude>
+        <exclude>org.apache.hadoop:hadoop-hdfs</exclude>
+        <!-- use slf4j from common to avoid multiple binding warnings -->
+        <exclude>org.slf4j:slf4j-api</exclude>
+        <exclude>org.slf4j:slf4j-log4j12</exclude>
+        <exclude>org.hsqldb:hsqldb</exclude>
+      </excludes>
+    </dependencySet>
+  </dependencySets>
 </assembly>

+ 0 - 14
hadoop-assemblies/src/main/resources/assemblies/hadoop-mapreduce-dist.xml

@@ -72,20 +72,6 @@
       <directory>${project.build.directory}/webapps</directory>
       <outputDirectory>/share/hadoop/${hadoop.component}/webapps</outputDirectory>
     </fileSet>
-    <fileSet>
-      <directory>${basedir}/src/main/conf</directory>
-      <outputDirectory>/share/hadoop/${hadoop.component}/templates</outputDirectory>
-      <includes>
-        <include>*-site.xml</include>
-      </includes>
-    </fileSet>
-    <fileSet>
-      <directory>${basedir}/src/main/packages/templates/conf</directory>
-      <outputDirectory>/share/hadoop/${hadoop.component}/templates/conf</outputDirectory>
-      <includes>
-        <include>*</include>
-      </includes>
-    </fileSet>
     <fileSet>
       <directory>${basedir}/dev-support/jdiff</directory>
       <outputDirectory>/share/hadoop/${hadoop.component}/jdiff</outputDirectory>

+ 0 - 14
hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml

@@ -166,20 +166,6 @@
       <directory>${project.build.directory}/webapps</directory>
       <outputDirectory>/share/hadoop/${hadoop.component}/webapps</outputDirectory>
     </fileSet>
-    <fileSet>
-      <directory>${basedir}/src/main/conf</directory>
-      <outputDirectory>/share/hadoop/${hadoop.component}/templates</outputDirectory>
-      <includes>
-        <include>*-site.xml</include>
-      </includes>
-    </fileSet>
-    <fileSet>
-      <directory>${basedir}/src/main/packages/templates/conf</directory>
-      <outputDirectory>/share/hadoop/${hadoop.component}/templates/conf</outputDirectory>
-      <includes>
-        <include>*</include>
-      </includes>
-    </fileSet>
     <fileSet>
       <directory>${basedir}/dev-support/jdiff</directory>
       <outputDirectory>/share/hadoop/${hadoop.component}/jdiff</outputDirectory>

+ 3 - 1
hadoop-build-tools/pom.xml

@@ -18,7 +18,7 @@
   <parent>
     <artifactId>hadoop-main</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.0.0-alpha2-SNAPSHOT</version>
+    <version>3.0.0-alpha3-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-build-tools</artifactId>
@@ -54,6 +54,7 @@
       <plugin>
         <groupId>org.apache.maven.plugins</groupId>
         <artifactId>maven-resources-plugin</artifactId>
+        <version>${maven-resources-plugin.version}</version>
         <executions>
           <execution>
             <id>copy-resources</id>
@@ -80,6 +81,7 @@
       <plugin>
         <groupId>org.apache.maven.plugins</groupId>
         <artifactId>maven-remote-resources-plugin</artifactId>
+        <version>${maven-remote-resources-plugin.version}</version>
         <executions>
           <execution>
             <phase>process-resources</phase>

+ 1 - 3
hadoop-build-tools/src/main/resources/checkstyle/checkstyle.xml

@@ -123,9 +123,7 @@
 
         <!-- Checks for Size Violations.                    -->
         <!-- See http://checkstyle.sf.net/config_sizes.html -->
-        <module name="LineLength">
-          <property name="ignorePattern" value="^(package|import) .*"/>
-        </module>
+        <module name="LineLength"/>
         <module name="MethodLength"/>
         <module name="ParameterNumber"/>
 

+ 254 - 0
hadoop-client-modules/hadoop-client-api/pom.xml

@@ -0,0 +1,254 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License. See accompanying LICENSE file.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+  <modelVersion>4.0.0</modelVersion>
+<parent>
+   <groupId>org.apache.hadoop</groupId>
+   <artifactId>hadoop-project</artifactId>
+   <version>3.0.0-alpha3-SNAPSHOT</version>
+   <relativePath>../../hadoop-project</relativePath>
+</parent>
+  <artifactId>hadoop-client-api</artifactId>
+  <version>3.0.0-alpha3-SNAPSHOT</version>
+  <packaging>jar</packaging>
+
+  <description>Apache Hadoop Client</description>
+  <name>Apache Hadoop Client API</name>
+
+  <properties>
+    <shaded.dependency.prefix>org.apache.hadoop.shaded</shaded.dependency.prefix>
+    <!-- We contain no source -->
+    <maven.javadoc.skip>true</maven.javadoc.skip>
+  </properties>
+
+  <dependencies>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-client</artifactId>
+      <!-- We list this as optional because as a type-pom it won't get included in the shading.
+           Marking it optional means it doesn't count as a transitive dependency of this artifact.
+        -->
+      <optional>true</optional>
+      <exclusions>
+        <!-- these APIs are a part of the SE JDK -->
+        <exclusion>
+          <groupId>javax.xml.bind</groupId>
+          <artifactId>jaxb-api</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>xml-apis</groupId>
+          <artifactId>xml-apis</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+    <!-- This comes from our parent pom. If we don't expressly change it here to get included,
+         downstream will get warnings at compile time. -->
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-annotations</artifactId>
+      <scope>compile</scope>
+      <exclusions>
+        <exclusion>
+          <groupId>jdk.tools</groupId>
+          <artifactId>jdk.tools</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+  </dependencies>
+  <profiles>
+    <profile>
+      <id>shade</id>
+      <activation>
+        <property><name>!skipShade</name></property>
+      </activation>
+      <build>
+        <plugins>
+          <!-- We contain no source -->
+          <plugin>
+            <groupId>org.apache.maven.plugins</groupId>
+            <artifactId>maven-source-plugin</artifactId>
+              <configuration>
+                <skipSource>true</skipSource>
+              </configuration>
+          </plugin>
+          <plugin>
+            <groupId>org.apache.maven.plugins</groupId>
+            <artifactId>maven-shade-plugin</artifactId>
+            <dependencies>
+              <dependency>
+                <groupId>org.apache.hadoop</groupId>
+                <artifactId>hadoop-maven-plugins</artifactId>
+                <version>${project.version}</version>
+              </dependency>
+            </dependencies>
+            <executions>
+              <execution>
+                <phase>package</phase>
+                <goals>
+                  <goal>shade</goal>
+                </goals>
+                <configuration>
+                  <artifactSet>
+                    <includes>
+                      <include>org.apache.hadoop:*</include>
+                    </includes>
+                  </artifactSet>
+                  <filters>
+                    <!-- We get these package level classes from various yarn api jars -->
+                    <filter>
+                      <artifact>org.apache.hadoop:hadoop-yarn-common</artifact>
+                      <excludes>
+                        <exclude>org/apache/hadoop/yarn/factories/package-info.class</exclude>
+                        <exclude>org/apache/hadoop/yarn/util/package-info.class</exclude>
+                        <exclude>org/apache/hadoop/yarn/factory/providers/package-info.class</exclude>
+                        <exclude>org/apache/hadoop/yarn/client/api/impl/package-info.class</exclude>
+                        <exclude>org/apache/hadoop/yarn/client/api/package-info.class</exclude>
+                      </excludes>
+                    </filter>
+                  </filters>
+                  <relocations>
+                    <relocation>
+                      <pattern>org/</pattern>
+                      <shadedPattern>${shaded.dependency.prefix}.org.</shadedPattern>
+                      <excludes>
+                        <exclude>org/apache/hadoop/*</exclude>
+                        <exclude>org/apache/hadoop/**/*</exclude>
+                        <!-- Our non-shaded htrace and logging libraries -->
+                        <exclude>org/apache/htrace/*</exclude>
+                        <exclude>org/apache/htrace/**/*</exclude>
+                        <exclude>org/slf4j/*</exclude>
+                        <exclude>org/slf4j/**/*</exclude>
+                        <exclude>org/apache/commons/logging/*</exclude>
+                        <exclude>org/apache/commons/logging/**/*</exclude>
+                        <exclude>org/apache/log4j/*</exclude>
+                        <exclude>org/apache/log4j/**/*</exclude>
+                        <exclude>**/pom.xml</exclude>
+                        <!-- Not the org/ packages that are a part of the jdk -->
+                        <exclude>org/ietf/jgss/*</exclude>
+                        <exclude>org/omg/**/*</exclude>
+                        <exclude>org/w3c/dom/*</exclude>
+                        <exclude>org/w3c/dom/**/*</exclude>
+                        <exclude>org/xml/sax/*</exclude>
+                        <exclude>org/xml/sax/**/*</exclude>
+                      </excludes>
+                    </relocation>
+                    <relocation>
+                      <pattern>com/</pattern>
+                      <shadedPattern>${shaded.dependency.prefix}.com.</shadedPattern>
+                      <excludes>
+                        <exclude>**/pom.xml</exclude>
+                        <!-- Not the com/ packages that are a part of particular jdk implementations -->
+                        <exclude>com/sun/tools/*</exclude>
+                        <exclude>com/sun/javadoc/*</exclude>
+                        <exclude>com/sun/security/*</exclude>
+                        <exclude>com/sun/jndi/*</exclude>
+                        <exclude>com/sun/management/*</exclude>
+                        <exclude>com/sun/tools/**/*</exclude>
+                        <exclude>com/sun/javadoc/**/*</exclude>
+                        <exclude>com/sun/security/**/*</exclude>
+                        <exclude>com/sun/jndi/**/*</exclude>
+                        <exclude>com/sun/management/**/*</exclude>
+                      </excludes>
+                    </relocation>
+                    <relocation>
+                      <pattern>io/</pattern>
+                      <shadedPattern>${shaded.dependency.prefix}.io.</shadedPattern>
+                      <excludes>
+                        <exclude>**/pom.xml</exclude>
+                        <!-- Exclude config keys for Hadoop that look like package names -->
+                        <exclude>io/compression/*</exclude>
+                        <exclude>io/compression/**/*</exclude>
+                        <exclude>io/mapfile/*</exclude>
+                        <exclude>io/mapfile/**/*</exclude>
+                        <exclude>io/map/index/*</exclude>
+                        <exclude>io/seqfile/*</exclude>
+                        <exclude>io/seqfile/**/*</exclude>
+                        <exclude>io/file/buffer/size</exclude>
+                        <exclude>io/skip/checksum/errors</exclude>
+                        <exclude>io/sort/*</exclude>
+                        <exclude>io/serializations</exclude>
+                      </excludes>
+                    </relocation>
+                    <relocation>
+                      <pattern>javax/servlet/</pattern>
+                      <shadedPattern>${shaded.dependency.prefix}.javax.servlet.</shadedPattern>
+                      <excludes>
+                        <exclude>**/pom.xml</exclude>
+                      </excludes>
+                    </relocation>
+                    <relocation>
+                      <pattern>net/</pattern>
+                      <shadedPattern>${shaded.dependency.prefix}.net.</shadedPattern>
+                      <excludes>
+                        <exclude>**/pom.xml</exclude>
+                        <!-- Exclude config keys for Hadoop that look like package names -->
+                        <exclude>net/topology/*</exclude>
+                        <exclude>net/topology/**/*</exclude>
+                      </excludes>
+                    </relocation>
+                  </relocations>
+                  <transformers>
+                    <!-- Needed until MSHADE-182 -->
+                    <transformer implementation="org.apache.hadoop.maven.plugin.shade.resource.ServicesResourceTransformer"/>
+                    <transformer implementation="org.apache.maven.plugins.shade.resource.ApacheLicenseResourceTransformer"/>
+                    <transformer implementation="org.apache.maven.plugins.shade.resource.DontIncludeResourceTransformer">
+                      <resource>NOTICE.txt</resource>
+                    </transformer>
+                    <transformer implementation="org.apache.maven.plugins.shade.resource.IncludeResourceTransformer">
+                      <resource>META-INF/LICENSE.txt</resource>
+                      <file>${basedir}/../../LICENSE.txt</file>
+                    </transformer>
+                    <transformer implementation="org.apache.maven.plugins.shade.resource.IncludeResourceTransformer">
+                      <resource>META-INF/NOTICE.txt</resource>
+                      <file>${basedir}/../../NOTICE.txt</file>
+                    </transformer>
+                  </transformers>
+                </configuration>
+              </execution>
+            </executions>
+          </plugin>
+          <plugin>
+            <groupId>org.codehaus.mojo</groupId>
+            <artifactId>license-maven-plugin</artifactId>
+          </plugin>
+        </plugins>
+      </build>
+    </profile>
+    <profile>
+      <id>noshade</id>
+      <activation>
+        <property><name>skipShade</name></property>
+      </activation>
+      <build>
+        <plugins>
+          <!-- We contain no source -->
+          <plugin>
+            <groupId>org.apache.maven.plugins</groupId>
+            <artifactId>maven-source-plugin</artifactId>
+              <configuration>
+                <skipSource>true</skipSource>
+              </configuration>
+          </plugin>
+          <plugin>
+            <groupId>org.codehaus.mojo</groupId>
+            <artifactId>license-maven-plugin</artifactId>
+          </plugin>
+        </plugins>
+      </build>
+    </profile>
+  </profiles>
+</project>
+

+ 124 - 0
hadoop-client-modules/hadoop-client-check-invariants/pom.xml

@@ -0,0 +1,124 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License. See accompanying LICENSE file.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+  <modelVersion>4.0.0</modelVersion>
+  <parent>
+    <groupId>org.apache.hadoop</groupId>
+    <artifactId>hadoop-project</artifactId>
+    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <relativePath>../../hadoop-project</relativePath>
+  </parent>
+  <artifactId>hadoop-client-check-invariants</artifactId>
+  <version>3.0.0-alpha3-SNAPSHOT</version>
+  <packaging>pom</packaging>
+
+  <description>Enforces our invariants for the api and runtime client modules.</description>
+  <name>Apache Hadoop Client Packaging Invariants</name>
+
+  <properties>
+  </properties>
+
+  <dependencies>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-client-api</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-client-runtime</artifactId>
+    </dependency>
+  </dependencies>
+  <build>
+    <plugins>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-enforcer-plugin</artifactId>
+        <version>1.4</version>
+        <dependencies>
+          <dependency>
+            <groupId>org.codehaus.mojo</groupId>
+            <artifactId>extra-enforcer-rules</artifactId>
+            <version>1.0-beta-3</version>
+          </dependency>
+        </dependencies>
+        <executions>
+          <execution>
+            <id>enforce-banned-dependencies</id>
+            <goals>
+              <goal>enforce</goal>
+            </goals>
+            <configuration>
+              <rules>
+                <banTransitiveDependencies>
+<!--
+                  <message>
+    Our client-facing artifacts are not supposed to have additional dependencies
+    and one or more of them do. The output from the enforcer plugin should give
+    specifics.
+                  </message>
+-->
+                  <excludes>
+                    <!-- annotations is provided, and both artifacts exclude the tools transitive,
+                         but enforcer still sees it.
+                    -->
+                    <exclude>org.apache.hadoop:hadoop-annotations</exclude>
+                    <!-- We leave HTrace as an unshaded dependnecy on purpose so that tracing within a JVM will work -->
+                    <exclude>org.apache.htrace:htrace-core4</exclude>
+                    <!-- Leave slf4j unshaded so downstream users can configure logging. -->
+                    <exclude>org.slf4j:slf4j-api</exclude>
+                    <!-- Leave commons-logging unshaded so downstream users can configure logging. -->
+                    <exclude>commons-logging:commons-logging</exclude>
+                    <!-- Leave log4j unshaded so downstream users can configure logging. -->
+                    <exclude>log4j:log4j</exclude>
+                  </excludes>
+                </banTransitiveDependencies>
+                <banDuplicateClasses>
+                  <findAllDuplicates>true</findAllDuplicates>
+                  <dependencies>
+                    <dependency>
+                      <groupId>org.apache.hadoop</groupId>
+                      <artifactId>hadoop-annotations</artifactId>
+                      <ignoreClasses>
+                        <ignoreClass>*</ignoreClass>
+                      </ignoreClasses>
+                    </dependency>
+                  </dependencies>
+                </banDuplicateClasses>
+              </rules>
+<!-- TODO we need a rule for "we don't have classes that are outside of the org.apache.hadoop package" -->
+<!-- TODO we need a rule for "the constants in this set of classes haven't been shaded / don't have this prefix"
+     Manually checking the set of Keys that look like packages we relocate:
+
+      cat `find . \( -name '*Keys.java' -o -name '*KeysPublic.java' \) -a -path '*/src/main/*'`  | grep -E "\"(io\.|org\.|com\.|net\.)" | grep -v "^package" | grep -v "^import" | grep -v "\"org.apache.hadoop"
+
+     Manually check the set of shaded artifacts to see if the Keys constants have been relocated:
+
+     for clazz in `find . \( -name '*Keys.java' -o -name '*KeysPublic.java' \) -a -path '*/src/main/*'`; do
+       clazz=${clazz#*src/main/java/}
+       clazz="${clazz%.java}"
+       javap -cp hadoop-client-modules/hadoop-client-api/target/hadoop-client-api-3.0.0-alpha2-SNAPSHOT.jar:hadoop-client-modules/hadoop-client-runtime/target/hadoop-client-runtime-3.0.0-alpha2-SNAPSHOT.jar:hadoop-client-modules/hadoop-client-minicluster/target/hadoop-client-minicluster-3.0.0-alpha2-SNAPSHOT.jar \
+           -constants "${clazz//\//.}" | grep "org.apache.hadoop.shaded"
+     done
+-->
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+    </plugins>
+  </build>
+
+</project>
+

+ 143 - 0
hadoop-client-modules/hadoop-client-check-test-invariants/pom.xml

@@ -0,0 +1,143 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License. See accompanying LICENSE file.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+  <modelVersion>4.0.0</modelVersion>
+  <parent>
+    <groupId>org.apache.hadoop</groupId>
+    <artifactId>hadoop-project</artifactId>
+    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <relativePath>../../hadoop-project</relativePath>
+  </parent>
+  <artifactId>hadoop-client-check-test-invariants</artifactId>
+  <version>3.0.0-alpha3-SNAPSHOT</version>
+  <packaging>pom</packaging>
+
+  <description>Enforces our invariants for the testing client modules.</description>
+  <name>Apache Hadoop Client Packaging Invariants for Test</name>
+
+  <properties>
+  </properties>
+
+  <dependencies>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-client-api</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-client-runtime</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-client-minicluster</artifactId>
+    </dependency>
+  </dependencies>
+  <build>
+    <plugins>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-enforcer-plugin</artifactId>
+        <version>1.4</version>
+        <dependencies>
+          <dependency>
+            <groupId>org.codehaus.mojo</groupId>
+            <artifactId>extra-enforcer-rules</artifactId>
+            <version>1.0-beta-3</version>
+          </dependency>
+        </dependencies>
+        <executions>
+          <execution>
+            <id>enforce-banned-dependencies</id>
+            <goals>
+              <goal>enforce</goal>
+            </goals>
+            <configuration>
+              <rules>
+                <banTransitiveDependencies>
+<!--
+                  <message>
+    Our client-facing artifacts are not supposed to have additional dependencies
+    and one or more of them do. The output from the enforcer plugin should give
+    specifics.
+                  </message>
+-->
+                  <excludes>
+                    <!-- annotations is provided, and both artifacts exclude the tools transitive,
+                         but enforcer still sees it.
+                    -->
+                    <exclude>org.apache.hadoop:hadoop-annotations</exclude>
+                    <!-- We leave HTrace as an unshaded dependnecy on purpose so that tracing within a JVM will work -->
+                    <exclude>org.apache.htrace:htrace-core4</exclude>
+                    <!-- Leave slf4j unshaded so downstream users can configure logging. -->
+                    <exclude>org.slf4j:slf4j-api</exclude>
+                    <!-- Leave commons-logging unshaded so downstream users can configure logging. -->
+                    <exclude>commons-logging:commons-logging</exclude>
+                    <!-- Leave log4j unshaded so downstream users can configure logging. -->
+                    <exclude>log4j:log4j</exclude>
+                    <!-- Leave JUnit unshaded so downstream can use our test helper classes -->
+                    <exclude>junit:junit</exclude>
+                    <!-- JUnit brings in hamcrest -->
+                    <exclude> org.hamcrest:hamcrest-core</exclude>
+                  </excludes>
+                </banTransitiveDependencies>
+                <banDuplicateClasses>
+                  <findAllDuplicates>true</findAllDuplicates>
+                  <dependencies>
+                    <dependency>
+                      <groupId>org.apache.hadoop</groupId>
+                      <artifactId>hadoop-annotations</artifactId>
+                      <ignoreClasses>
+                        <ignoreClass>*</ignoreClass>
+                      </ignoreClasses>
+                    </dependency>
+                    <dependency>
+                      <!--Duplicate classes found:-->
+                      <!--Found in:-->
+                      <!--org.apache.hadoop:hadoop-client-runtime:jar:3.0.0-alpha3-SNAPSHOT:compile-->
+                      <!--org.apache.hadoop:hadoop-client-minicluster:jar:3.0.0-alpha3-SNAPSHOT:compile-->
+                      <ignoreClasses>
+                        <groupId>io.netty</groupId>
+                        <artifactId>netty</artifactId>
+                        <ignoreClass>*</ignoreClass>
+                      </ignoreClasses>
+                    </dependency>
+                  </dependencies>
+                </banDuplicateClasses>
+              </rules>
+<!-- TODO we need a rule for "we don't have classes that are outside of the org.apache.hadoop package" -->
+<!-- TODO we need a rule for "the constants in this set of classes haven't been shaded / don't have this prefix"
+     Manually checking the set of Keys that look like packages we relocate:
+
+      cat `find . \( -name '*Keys.java' -o -name '*KeysPublic.java' \) -a -path '*/src/main/*'`  | grep -E "\"(io\.|org\.|com\.|net\.)" | grep -v "^package" | grep -v "^import" | grep -v "\"org.apache.hadoop"
+
+     Manually check the set of shaded artifacts to see if the Keys constants have been relocated:
+
+     for clazz in `find . \( -name '*Keys.java' -o -name '*KeysPublic.java' \) -a -path '*/src/main/*'`; do
+       clazz=${clazz#*src/main/java/}
+       clazz="${clazz%.java}"
+       javap -cp hadoop-client-modules/hadoop-client-api/target/hadoop-client-api-3.0.0-alpha2-SNAPSHOT.jar:hadoop-client-modules/hadoop-client-runtime/target/hadoop-client-runtime-3.0.0-alpha2-SNAPSHOT.jar:hadoop-client-modules/hadoop-client-minicluster/target/hadoop-client-minicluster-3.0.0-alpha2-SNAPSHOT.jar \
+           -constants "${clazz//\//.}" | grep "org.apache.hadoop.shaded"
+     done
+-->
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+    </plugins>
+  </build>
+
+</project>
+

+ 159 - 0
hadoop-client-modules/hadoop-client-integration-tests/pom.xml

@@ -0,0 +1,159 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License. See accompanying LICENSE file.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+  <modelVersion>4.0.0</modelVersion>
+  <parent>
+    <groupId>org.apache.hadoop</groupId>
+    <artifactId>hadoop-project</artifactId>
+    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <relativePath>../../hadoop-project</relativePath>
+  </parent>
+  <artifactId>hadoop-client-integration-tests</artifactId>
+  <version>3.0.0-alpha3-SNAPSHOT</version>
+
+  <description>Checks that we can use the generated artifacts</description>
+  <name>Apache Hadoop Client Packaging Integration Tests</name>
+
+  <properties>
+    <failsafe.timeout>400</failsafe.timeout>
+  </properties>
+
+  <dependencies>
+    <dependency>
+      <groupId>log4j</groupId>
+      <artifactId>log4j</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.slf4j</groupId>
+      <artifactId>slf4j-api</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.slf4j</groupId>
+      <artifactId>slf4j-log4j12</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>junit</groupId>
+      <artifactId>junit</artifactId>
+      <scope>test</scope>
+    </dependency>
+  </dependencies>
+  <profiles>
+    <profile>
+      <id>shade</id>
+      <activation>
+        <property><name>!skipShade</name></property>
+      </activation>
+      <dependencies>
+        <dependency>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-client-api</artifactId>
+          <scope>test</scope>
+        </dependency>
+        <dependency>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-client-runtime</artifactId>
+          <scope>test</scope>
+        </dependency>
+        <dependency>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-client-minicluster</artifactId>
+          <scope>test</scope>
+        </dependency>
+      </dependencies>
+      <build>
+        <plugins>
+          <!-- Because our tests rely on our shaded artifacts, we can't compile
+               them until after the package phase has run.
+            -->
+          <plugin>
+            <groupId>org.apache.maven.plugins</groupId>
+            <artifactId>maven-compiler-plugin</artifactId>
+            <executions>
+            <!--
+               First, let's make sure the normal test-compile doesn't try to
+               compile our integration tests.
+            -->
+              <execution>
+                <id>default-testCompile</id>
+                <phase>test-compile</phase>
+                <configuration>
+                  <testExcludes>
+                    <testExclude>**/IT*</testExclude>
+                    <testExclude>**/*IT</testExclude>
+                  </testExcludes>
+                </configuration>
+              </execution>
+            <!--
+               Finally, let's make a 'just for integration tests'-compile that
+               fires off prior to our integration tests but after the package
+               phase has created our shaded artifacts.
+            -->
+              <execution>
+                <id>compile-integration-tests</id>
+                <phase>pre-integration-test</phase>
+                <goals>
+                  <goal>testCompile</goal>
+                </goals>
+                <configuration>
+                  <testIncludes>
+                    <testInclude>**/IT*</testInclude>
+                    <testInclude>**/*IT</testInclude>
+                  </testIncludes>
+                </configuration>
+              </execution>
+            </executions>
+          </plugin>
+          <plugin>
+            <groupId>org.apache.maven.plugins</groupId>
+            <artifactId>maven-failsafe-plugin</artifactId>
+            <executions>
+              <execution>
+                <goals>
+                  <goal>integration-test</goal>
+                  <goal>verify</goal>
+                </goals>
+              </execution>
+            </executions>
+          </plugin>
+        </plugins>
+      </build>
+    </profile>
+    <profile>
+      <id>noshade</id>
+      <activation>
+        <property><name>skipShade</name></property>
+      </activation>
+      <dependencies>
+        <dependency>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-common</artifactId>
+          <scope>test</scope>
+        </dependency>
+        <dependency>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-hdfs</artifactId>
+          <scope>test</scope>
+          <type>test-jar</type>
+        </dependency>
+      </dependencies>
+    </profile>
+  </profiles>
+
+</project>
+

+ 113 - 0
hadoop-client-modules/hadoop-client-integration-tests/src/test/java/org/apache/hadoop/example/ITUseMiniCluster.java

@@ -0,0 +1,113 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+package org.apache.hadoop.example;
+
+import java.io.IOException;
+import java.net.URISyntaxException;
+
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.Path;
+
+import org.apache.hadoop.conf.Configuration;
+
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+
+import org.apache.hadoop.hdfs.web.WebHdfsTestUtil;
+import org.apache.hadoop.hdfs.web.WebHdfsConstants;
+
+/**
+ * Ensure that we can perform operations against the shaded minicluster
+ * given the API and runtime jars by performing some simple smoke tests.
+ */
+public class ITUseMiniCluster {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(ITUseMiniCluster.class);
+
+  private MiniDFSCluster cluster;
+
+  private static final String TEST_PATH = "/foo/bar/cats/dee";
+  private static final String FILENAME = "test.file";
+
+  private static final String TEXT = "Lorem ipsum dolor sit amet, consectetur "
+      + "adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore "
+      + "magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation "
+      + "ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute "
+      + "irure dolor in reprehenderit in voluptate velit esse cillum dolore eu "
+      + "fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident,"
+      + " sunt in culpa qui officia deserunt mollit anim id est laborum.";
+
+  @Before
+  public void clusterUp() throws IOException {
+    final Configuration conf = new HdfsConfiguration();
+    cluster = new MiniDFSCluster.Builder(conf)
+        .numDataNodes(3)
+        .build();
+    cluster.waitActive();
+  }
+
+  @After
+  public void clusterDown() {
+    cluster.close();
+  }
+
+  @Test
+  public void useHdfsFileSystem() throws IOException {
+    try (final FileSystem fs = cluster.getFileSystem()) {
+      simpleReadAfterWrite(fs);
+    }
+  }
+
+  public void simpleReadAfterWrite(final FileSystem fs) throws IOException {
+    LOG.info("Testing read-after-write with FS implementation: {}", fs);
+    final Path path = new Path(TEST_PATH, FILENAME);
+    if (!fs.mkdirs(path.getParent())) {
+      throw new IOException("Mkdirs failed to create " +
+          TEST_PATH);
+    }
+    try (final FSDataOutputStream out = fs.create(path)) {
+      out.writeUTF(TEXT);
+    }
+    try (final FSDataInputStream in = fs.open(path)) {
+      final String result = in.readUTF();
+      Assert.assertEquals("Didn't read back text we wrote.", TEXT, result);
+    }
+  }
+
+  @Test
+  public void useWebHDFS() throws IOException, URISyntaxException {
+    try (final FileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystem(
+        cluster.getConfiguration(0), WebHdfsConstants.WEBHDFS_SCHEME)) {
+      simpleReadAfterWrite(fs);
+    }
+  }
+}

+ 18 - 8
hadoop-tools/hadoop-azure-datalake/dev-support/findbugs-exclude.xml → hadoop-client-modules/hadoop-client-integration-tests/src/test/resources/hdfs-site.xml

@@ -1,3 +1,5 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
 <!--
    Licensed to the Apache Software Foundation (ASF) under one or more
    contributor license agreements.  See the NOTICE file distributed with
@@ -14,11 +16,19 @@
    See the License for the specific language governing permissions and
    limitations under the License.
 -->
-<FindBugsFilter>
-    <!-- Buffer object is accessed withing trusted code and intentionally assigned instead of array copy -->
-    <Match>
-        <Class name="org.apache.hadoop.hdfs.web.PrivateAzureDataLakeFileSystem$BatchAppendOutputStream$CommitTask"/>
-        <Bug pattern="EI_EXPOSE_REP2"/>
-        <Priority value="2"/>
-    </Match>
-</FindBugsFilter>
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+  <!-- Turn security off for tests by default -->
+  <property>
+    <name>hadoop.security.authentication</name>
+    <value>simple</value>
+  </property>
+  <!-- Disable min block size since most tests use tiny blocks -->
+  <property>
+    <name>dfs.namenode.fs-limits.min-block-size</name>
+    <value>0</value>
+  </property>
+
+</configuration>

+ 24 - 0
hadoop-client-modules/hadoop-client-integration-tests/src/test/resources/log4j.properties

@@ -0,0 +1,24 @@
+#
+#   Licensed to the Apache Software Foundation (ASF) under one or more
+#   contributor license agreements.  See the NOTICE file distributed with
+#   this work for additional information regarding copyright ownership.
+#   The ASF licenses this file to You under the Apache License, Version 2.0
+#   (the "License"); you may not use this file except in compliance with
+#   the License.  You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# log4j configuration used during build and unit tests
+
+log4j.rootLogger=info,stdout
+log4j.threshold=ALL
+log4j.appender.stdout=org.apache.log4j.ConsoleAppender
+log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
+log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{2} (%F:%M(%L)) - %m%n
+

+ 792 - 0
hadoop-client-modules/hadoop-client-minicluster/pom.xml

@@ -0,0 +1,792 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License. See accompanying LICENSE file.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+  <modelVersion>4.0.0</modelVersion>
+  <parent>
+    <groupId>org.apache.hadoop</groupId>
+    <artifactId>hadoop-project</artifactId>
+    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <relativePath>../../hadoop-project</relativePath>
+  </parent>
+  <artifactId>hadoop-client-minicluster</artifactId>
+  <version>3.0.0-alpha3-SNAPSHOT</version>
+  <packaging>jar</packaging>
+
+  <description>Apache Hadoop Minicluster for Clients</description>
+  <name>Apache Hadoop Client Test Minicluster</name>
+
+  <properties>
+    <shaded.dependency.prefix>org.apache.hadoop.shaded</shaded.dependency.prefix>
+    <!-- We contain no source -->
+    <maven.javadoc.skip>true</maven.javadoc.skip>
+  </properties>
+
+  <dependencies>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-client-api</artifactId>
+      <scope>runtime</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-client-runtime</artifactId>
+      <scope>runtime</scope>
+    </dependency>
+    <!-- Leave JUnit as a direct dependency -->
+    <dependency>
+      <groupId>junit</groupId>
+      <artifactId>junit</artifactId>
+      <scope>runtime</scope>
+    </dependency>
+    <!-- Adding hadoop-annotations so we can make it optional to remove from our transitive tree -->
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-annotations</artifactId>
+      <scope>compile</scope>
+      <optional>true</optional>
+      <exclusions>
+        <exclusion>
+          <groupId>jdk.tools</groupId>
+          <artifactId>jdk.tools</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+    <!-- uncomment this dependency if you need to use
+         `mvn dependency:tree -Dverbose` to determine if a dependency shows up
+         in both the hadoop-client-* artifacts and something under minicluster.
+    -->
+<!--
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-client</artifactId>
+      <scope>provided</scope>
+    </dependency>
+ -->
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-minicluster</artifactId>
+      <optional>true</optional>
+      <exclusions>
+        <!-- Exclude the in-development timeline service and
+             add it as an optional runtime dependency
+          -->
+        <exclusion>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-yarn-server-timelineservice</artifactId>
+        </exclusion>
+        <!-- exclude everything that comes in via the shaded runtime and api  TODO remove once we have a filter for "is in these artifacts" -->
+        <!-- Skip jersey, since we need it again here. -->
+        <exclusion>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-common</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-hdfs-client</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-mapreduce-client-app</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-yarn-api</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-mapreduce-client-core</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-mapreduce-client-jobclient</artifactId>
+        </exclusion>
+        <!-- exclude things that came in via transitive in shaded runtime and api -->
+        <exclusion>
+          <groupId>io.netty</groupId>
+          <artifactId>netty</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>javax.servlet</groupId>
+          <artifactId>javax.servlet-api</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>javax.xml.bind</groupId>
+          <artifactId>jaxb-api</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.apache.avro</groupId>
+          <artifactId>avro</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.apache.curator</groupId>
+          <artifactId>curator-client</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-mapreduce-client-common</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-yarn-common</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-yarn-server-common</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.apache.zookeeper</groupId>
+          <artifactId>zookeeper</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.fusesource.leveldbjni</groupId>
+          <artifactId>leveldbjni-all</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.eclipse.jetty</groupId>
+          <artifactId>jetty-util</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>com.google.guava</groupId>
+          <artifactId>guava</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>com.google.protobuf</groupId>
+          <artifactId>protobuf-java</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>commons-collections</groupId>
+          <artifactId>commons-collections</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>commons-io</groupId>
+          <artifactId>commons-io</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>commons-lang</groupId>
+          <artifactId>commons-lang</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>commons-logging</groupId>
+          <artifactId>commons-logging</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.slf4j</groupId>
+          <artifactId>slf4j-api</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.slf4j</groupId>
+          <artifactId>slf4j-log4j12</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>log4j</groupId>
+          <artifactId>log4j</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>com.fasterxml.jackson.core</groupId>
+          <artifactId>jackson-annotations</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>com.fasterxml.jackson.core</groupId>
+          <artifactId>jackson-core</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>com.fasterxml.jackson.core</groupId>
+          <artifactId>jackson-databind</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>commons-cli</groupId>
+          <artifactId>commons-cli</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>commons-codec</groupId>
+          <artifactId>commons-codec</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>javax.servlet</groupId>
+          <artifactId>javax.servlet-api</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>xmlenc</groupId>
+          <artifactId>xmlenc</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+    <!-- Add optional runtime dependency on the in-development timeline server module
+         to indicate that downstream folks interested in turning it on need that dep.
+      -->
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-yarn-server-timelineservice</artifactId>
+      <scope>runtime</scope>
+      <optional>true</optional>
+      <exclusions>
+        <exclusion>
+          <groupId>*</groupId>
+          <artifactId>*</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+    <!-- Add back in transitive dependencies of hadoop-minicluster that are test-jar artifacts excluded as a side effect of excluding the jar
+         Note that all of these must be marked "optional" because they won't be removed from the reduced-dependencies pom after they're included.
+      -->
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-common</artifactId>
+      <scope>compile</scope>
+      <type>test-jar</type>
+      <optional>true</optional>
+      <exclusions>
+        <exclusion>
+          <groupId>*</groupId>
+          <artifactId>*</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-hdfs</artifactId>
+      <scope>compile</scope>
+      <type>test-jar</type>
+      <optional>true</optional>
+      <exclusions>
+        <exclusion>
+          <groupId>*</groupId>
+          <artifactId>*</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-mapreduce-client-jobclient</artifactId>
+      <scope>compile</scope>
+      <type>test-jar</type>
+      <optional>true</optional>
+      <exclusions>
+        <exclusion>
+          <groupId>*</groupId>
+          <artifactId>*</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+
+    <!-- Add back in the transitive dependencies excluded from hadoop-common in client TODO remove once we have a filter for "is in these artifacts" -->
+    <!-- skip javax.servlet:servlet-api because it's in client -->
+    <!-- Skip commons-logging:commons-logging-api because it looks like nothing actually included it -->
+    <!-- Skip jetty-util because it's in client -->
+    <dependency>
+          <groupId>com.sun.jersey</groupId>
+          <artifactId>jersey-core</artifactId>
+      <optional>true</optional>
+      <exclusions>
+        <exclusion>
+          <groupId>javax.ws.rs</groupId>
+          <artifactId>jsr311-api</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+    <dependency>
+          <groupId>com.sun.jersey</groupId>
+          <artifactId>jersey-client</artifactId>
+      <optional>true</optional>
+    </dependency>
+    <dependency>
+      <groupId>com.sun.jersey</groupId>
+      <artifactId>jersey-json</artifactId>
+      <optional>true</optional>
+      <exclusions>
+        <exclusion>
+          <groupId>javax.xml.bind</groupId>
+          <artifactId>jaxb-api</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.codehaus.jackson</groupId>
+          <artifactId>jackson-core-asl</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.codehaus.jackson</groupId>
+          <artifactId>jackson-mapper-asl</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.codehaus.jackson</groupId>
+          <artifactId>jackson-jaxrs</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.codehaus.jackson</groupId>
+          <artifactId>jackson-xc</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+    <dependency>
+      <groupId>com.sun.jersey</groupId>
+      <artifactId>jersey-server</artifactId>
+      <optional>true</optional>
+    </dependency>
+    <dependency>
+          <groupId>com.sun.jersey</groupId>
+          <artifactId>jersey-servlet</artifactId>
+      <optional>true</optional>
+    </dependency>
+    <dependency>
+      <groupId>org.eclipse.jdt</groupId>
+      <artifactId>core</artifactId>
+      <optional>true</optional>
+    </dependency>
+    <!-- skip org.apache.avro:avro-ipc because it doesn't look like hadoop-common actually uses it -->
+    <dependency>
+      <groupId>net.sf.kosmosfs</groupId>
+      <artifactId>kfs</artifactId>
+      <optional>true</optional>
+    </dependency>
+    <dependency>
+      <groupId>net.java.dev.jets3t</groupId>
+      <artifactId>jets3t</artifactId>
+      <optional>true</optional>
+      <exclusions>
+        <exclusion>
+          <groupId>commons-codec</groupId>
+          <artifactId>commons-codec</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>commons-logging</groupId>
+          <artifactId>commons-logging</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.apache.httpcomponents</groupId>
+          <artifactId>httpclient</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.apache.httpcomponents</groupId>
+          <artifactId>httpcore</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+    <dependency>
+      <groupId>com.jcraft</groupId>
+      <artifactId>jsch</artifactId>
+      <optional>true</optional>
+    </dependency>
+    <!-- add back in transitive dependencies of hadoop-mapreduce-client-app removed in client -->
+    <!-- Skipping javax.servlet:servlet-api because it's in client -->
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-yarn-server-nodemanager</artifactId>
+      <optional>true</optional>
+      <exclusions>
+        <exclusion>
+          <groupId>javax.servlet</groupId>
+          <artifactId>javax.servlet-api</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-yarn-api</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-yarn-common</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-yarn-server-common</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.fusesource.leveldbjni</groupId>
+          <artifactId>leveldbjni-all</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.eclipse.jetty</groupId>
+          <artifactId>jetty-util</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>com.google.guava</groupId>
+          <artifactId>guava</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>com.google.protobuf</groupId>
+          <artifactId>protobuf-java</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>commons-codec</groupId>
+          <artifactId>commons-codec</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>commons-lang</groupId>
+          <artifactId>commons-lang</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>commons-logging</groupId>
+          <artifactId>commons-logging</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.slf4j</groupId>
+          <artifactId>slf4j-api</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>javax.xml.bind</groupId>
+          <artifactId>jaxb-api</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-yarn-server-web-proxy</artifactId>
+      <optional>true</optional>
+      <exclusions>
+        <exclusion>
+          <groupId>javax.servlet</groupId>
+          <artifactId>javax.servlet-api</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-yarn-api</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-yarn-common</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-yarn-server-common</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>com.google.guava</groupId>
+          <artifactId>guava</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>commons-httpclient</groupId>
+          <artifactId>commons-httpclient</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>commons-logging</groupId>
+          <artifactId>commons-logging</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.eclipse.jetty</groupId>
+          <artifactId>jetty-util</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+    <!-- skipping hadoop-annotations -->
+    <dependency>
+      <groupId>com.google.inject.extensions</groupId>
+      <artifactId>guice-servlet</artifactId>
+      <optional>true</optional>
+    </dependency>
+    <!-- skipping junit:junit because it is test scope -->
+    <!-- skipping avro because it is in client via hadoop-common -->
+    <!-- skipping jline:jline because it is only present at test scope in the original -->
+    <!-- skipping io.netty:netty because it's in client -->
+    <!-- add back in transitive dependencies of hadoop-yarn-api removed in client -->
+    <!-- skipping hadoop-annotations -->
+    <dependency>
+      <groupId>com.google.inject</groupId>
+      <artifactId>guice</artifactId>
+      <optional>true</optional>
+      <exclusions>
+        <exclusion>
+          <groupId>com.google.guava</groupId>
+          <artifactId>guava</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+    <dependency>
+      <groupId>com.sun.jersey.jersey-test-framework</groupId>
+      <artifactId>jersey-test-framework-grizzly2</artifactId>
+      <optional>true</optional>
+      <exclusions>
+        <!-- excluding because client already has the tomcat version -->
+        <exclusion>
+          <groupId>org.glassfish</groupId>
+          <artifactId>javax.servlet</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+    <!-- skipping jersey-server because it's above -->
+    <dependency>
+      <groupId>com.sun.jersey.contribs</groupId>
+      <artifactId>jersey-guice</artifactId>
+      <optional>true</optional>
+    </dependency>
+    <!-- skipping guice-servlet because it's above -->
+    <!-- skipping avro because it is in client via hadoop-common -->
+    <!-- skipping jersey-core because it's above -->
+    <!-- skipping jersey-json because it's above. -->
+    <!-- skipping io.netty:netty because it's in client -->
+    <!-- Add back in transitive dependencies from hadoop-mapreduce-client-core that were excluded by client -->
+    <!-- skipping junit:junit because it is test scope -->
+    <!-- skipping guice because it's above -->
+    <!-- skipping jersey-test-framework-grizzly2 because it's above -->
+    <!-- skipping jersey-server because it's above -->
+    <!-- skipping jersey-guice because it's above -->
+    <!-- skipping avro because it is in client via hadoop-common -->
+    <!-- skipping hadoop-annotations -->
+    <!-- skipping guice-servlet because it's above -->
+    <!-- skipping jersey-json because it's above. -->
+    <!-- skipping io.netty:netty because it's in client -->
+    <!-- add back in transitive dependencies of hadoop-mapreduce-client-jobclient that were excluded from client -->
+    <!-- skipping junit:junit because it is test scope -->
+    <!-- skipping avro because it is in client via hadoop-common -->
+    <!-- skipping hadoop-annotations -->
+    <!-- skipping guice-servlet because it's above -->
+    <!-- skipping io.netty:netty because it's in client -->
+  </dependencies>
+  <profiles>
+    <profile>
+      <id>shade</id>
+      <activation>
+        <property><name>!skipShade</name></property>
+      </activation>
+      <build>
+        <plugins>
+          <!-- We contain no source -->
+          <plugin>
+            <groupId>org.apache.maven.plugins</groupId>
+            <artifactId>maven-source-plugin</artifactId>
+              <configuration>
+                <skipSource>true</skipSource>
+              </configuration>
+          </plugin>
+          <plugin>
+            <groupId>org.apache.maven.plugins</groupId>
+            <artifactId>maven-shade-plugin</artifactId>
+            <dependencies>
+              <dependency>
+                <groupId>org.apache.hadoop</groupId>
+                <artifactId>hadoop-maven-plugins</artifactId>
+                <version>${project.version}</version>
+              </dependency>
+            </dependencies>
+            <executions>
+              <execution>
+                <phase>package</phase>
+                <goals>
+                  <goal>shade</goal>
+                </goals>
+                <configuration>
+                  <artifactSet>
+                    <excludes>
+                      <!-- Fine to expose our already-shaded deps as dependencies -->
+                      <exclude>org.apache.hadoop:hadoop-annotations</exclude>
+                      <exclude>org.apache.hadoop:hadoop-client-api</exclude>
+                      <exclude>org.apache.hadoop:hadoop-client-runtime</exclude>
+                      <!-- Fine to expose our purposefully not-shaded deps as dependencies -->
+                      <exclude>org.apache.htrace:htrace-core4</exclude>
+                      <exclude>org.slf4j:slf4j-api</exclude>
+                      <exclude>commons-logging:commons-logging</exclude>
+                      <exclude>junit:junit</exclude>
+                      <!-- Keep optional runtime deps out of the shading -->
+                      <exclude>org.apache.hadoop:hadoop-yarn-server-timelineservice</exclude>
+                      <exclude>log4j:log4j</exclude>
+                      <!-- We need a filter that matches just those things that are included in the above artiacts -->
+                    </excludes>
+                  </artifactSet>
+                  <filters>
+                    <!-- Some of our dependencies include source, so remove it. -->
+                    <filter>
+                      <artifact>*:*</artifact>
+                      <excludes>
+                        <exclude>**/*.java</exclude>
+                      </excludes>
+                    </filter>
+                    <!-- We pull in several test jars; keep out the actual test classes -->
+                    <filter>
+                      <artifact>*:*</artifact>
+                      <excludes>
+                        <exclude>**/Test*.class</exclude>
+                      </excludes>
+                    </filter>
+                    <!-- Since runtime has classes for these jars, we exclude them.
+                         We still want the java services api files, since those were excluded in runtime
+                      -->
+                    <filter>
+                      <artifact>com.sun.jersey:jersey-client</artifact>
+                      <excludes>
+                        <exclude>**/*.class</exclude>
+                      </excludes>
+                    </filter>
+                    <filter>
+                      <artifact>com.sun.jersey:jersey-core</artifact>
+                      <excludes>
+                        <exclude>**/*.class</exclude>
+                      </excludes>
+                    </filter>
+                    <filter>
+                      <artifact>com.sun.jersey:jersey-servlet</artifact>
+                      <excludes>
+                        <exclude>**/*.class</exclude>
+                      </excludes>
+                    </filter>
+                  </filters>
+                  <relocations>
+                    <relocation>
+                      <pattern>org/</pattern>
+                      <shadedPattern>${shaded.dependency.prefix}.org.</shadedPattern>
+                      <excludes>
+                        <exclude>org/apache/hadoop/*</exclude>
+                        <exclude>org/apache/hadoop/**/*</exclude>
+                        <!-- Our non-shaded htrace and logging libraries -->
+                        <exclude>org/apache/htrace/*</exclude>
+                        <exclude>org/apache/htrace/**/*</exclude>
+                        <exclude>org/slf4j/*</exclude>
+                        <exclude>org/slf4j/**/*</exclude>
+                        <exclude>org/apache/commons/logging/*</exclude>
+                        <exclude>org/apache/commons/logging/**/*</exclude>
+                        <exclude>org/apache/log4j/*</exclude>
+                        <exclude>org/apache/log4j/**/*</exclude>
+                        <exclude>**/pom.xml</exclude>
+                        <!-- Our non-shaded JUnit library -->
+                        <exclude>org/junit/*</exclude>
+                        <exclude>org/junit/**/*</exclude>
+                        <!-- Not the org/ packages that are a part of the jdk -->
+                        <exclude>org/ietf/jgss/*</exclude>
+                        <exclude>org/omg/**/*</exclude>
+                        <exclude>org/w3c/dom/*</exclude>
+                        <exclude>org/w3c/dom/**/*</exclude>
+                        <exclude>org/xml/sax/*</exclude>
+                        <exclude>org/xml/sax/**/*</exclude>
+                      </excludes>
+                    </relocation>
+                    <relocation>
+                      <pattern>com/</pattern>
+                      <shadedPattern>${shaded.dependency.prefix}.com.</shadedPattern>
+                      <excludes>
+                        <exclude>**/pom.xml</exclude>
+                        <!-- Not the com/ packages that are a part of particular jdk implementations -->
+                        <exclude>com/sun/tools/*</exclude>
+                        <exclude>com/sun/javadoc/*</exclude>
+                        <exclude>com/sun/security/*</exclude>
+                        <exclude>com/sun/jndi/*</exclude>
+                        <exclude>com/sun/management/*</exclude>
+                        <exclude>com/sun/tools/**/*</exclude>
+                        <exclude>com/sun/javadoc/**/*</exclude>
+                        <exclude>com/sun/security/**/*</exclude>
+                        <exclude>com/sun/jndi/**/*</exclude>
+                        <exclude>com/sun/management/**/*</exclude>
+                      </excludes>
+                    </relocation>
+                    <relocation>
+                      <pattern>io/</pattern>
+                      <shadedPattern>${shaded.dependency.prefix}.io.</shadedPattern>
+                      <excludes>
+                        <exclude>**/pom.xml</exclude>
+                        <!-- Exclude config keys for Hadoop that look like package names -->
+                        <exclude>io/compression/*</exclude>
+                        <exclude>io/compression/**/*</exclude>
+                        <exclude>io/mapfile/*</exclude>
+                        <exclude>io/mapfile/**/*</exclude>
+                        <exclude>io/map/index/*</exclude>
+                        <exclude>io/seqfile/*</exclude>
+                        <exclude>io/seqfile/**/*</exclude>
+                        <exclude>io/file/buffer/size</exclude>
+                        <exclude>io/skip/checksum/errors</exclude>
+                        <exclude>io/sort/*</exclude>
+                        <exclude>io/serializations</exclude>
+                      </excludes>
+                    </relocation>
+                    <relocation>
+                      <pattern>javax/el/</pattern>
+                      <shadedPattern>${shaded.dependency.prefix}.javax.el.</shadedPattern>
+                      <excludes>
+                        <exclude>**/pom.xml</exclude>
+                      </excludes>
+                    </relocation>
+                    <relocation>
+                      <pattern>javax/inject/</pattern>
+                      <shadedPattern>${shaded.dependency.prefix}.javax.inject.</shadedPattern>
+                      <excludes>
+                        <exclude>**/pom.xml</exclude>
+                      </excludes>
+                    </relocation>
+                    <relocation>
+                      <pattern>javax/servlet/</pattern>
+                      <shadedPattern>${shaded.dependency.prefix}.javax.servlet.</shadedPattern>
+                      <excludes>
+                        <exclude>**/pom.xml</exclude>
+                      </excludes>
+                    </relocation>
+                    <relocation>
+                      <pattern>net/</pattern>
+                      <shadedPattern>${shaded.dependency.prefix}.net.</shadedPattern>
+                      <excludes>
+                        <exclude>**/pom.xml</exclude>
+                        <!-- Exclude config keys for Hadoop that look like package names -->
+                        <exclude>net/topology/*</exclude>
+                        <exclude>net/topology/**/*</exclude>
+                      </excludes>
+                    </relocation>
+                  </relocations>
+                  <transformers>
+                    <!-- Needed until MSHADE-182 -->
+                    <transformer implementation="org.apache.hadoop.maven.plugin.shade.resource.ServicesResourceTransformer"/>
+                    <transformer implementation="org.apache.maven.plugins.shade.resource.ApacheLicenseResourceTransformer"/>
+                    <transformer implementation="org.apache.maven.plugins.shade.resource.DontIncludeResourceTransformer">
+                      <resources>
+                        <resource>LICENSE</resource>
+                        <resource>LICENSE.txt</resource>
+                        <resource>NOTICE</resource>
+                        <resource>NOTICE.txt</resource>
+                        <resource>Grizzly_THIRDPARTYLICENSEREADME.txt</resource>
+                        <resource>LICENSE.dom-documentation.txt</resource>
+                        <resource>LICENSE.dom-software.txt</resource>
+                        <resource>LICENSE.dom-documentation.txt</resource>
+                        <resource>LICENSE.sax.txt</resource>
+                      </resources>
+                    </transformer>
+                    <transformer implementation="org.apache.maven.plugins.shade.resource.IncludeResourceTransformer">
+                      <resource>META-INF/LICENSE.txt</resource>
+                      <file>${basedir}/../../LICENSE.txt</file>
+                    </transformer>
+                    <transformer implementation="org.apache.maven.plugins.shade.resource.IncludeResourceTransformer">
+                      <resource>META-INF/NOTICE.txt</resource>
+                      <file>${basedir}/../../NOTICE.txt</file>
+                    </transformer>
+                  </transformers>
+                </configuration>
+              </execution>
+            </executions>
+          </plugin>
+          <plugin>
+            <groupId>org.codehaus.mojo</groupId>
+            <artifactId>license-maven-plugin</artifactId>
+          </plugin>
+        </plugins>
+      </build>
+    </profile>
+    <profile>
+      <id>noshade</id>
+      <activation>
+        <property><name>skipShade</name></property>
+      </activation>
+      <build>
+        <plugins>
+          <!-- We contain no source -->
+          <plugin>
+            <groupId>org.apache.maven.plugins</groupId>
+            <artifactId>maven-source-plugin</artifactId>
+              <configuration>
+                <skipSource>true</skipSource>
+              </configuration>
+          </plugin>
+          <plugin>
+            <groupId>org.codehaus.mojo</groupId>
+            <artifactId>license-maven-plugin</artifactId>
+          </plugin>
+        </plugins>
+      </build>
+    </profile>
+  </profiles>
+
+</project>
+

+ 359 - 0
hadoop-client-modules/hadoop-client-runtime/pom.xml

@@ -0,0 +1,359 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License. See accompanying LICENSE file.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+  <modelVersion>4.0.0</modelVersion>
+<parent>
+   <groupId>org.apache.hadoop</groupId>
+   <artifactId>hadoop-project</artifactId>
+   <version>3.0.0-alpha3-SNAPSHOT</version>
+   <relativePath>../../hadoop-project</relativePath>
+</parent>
+  <artifactId>hadoop-client-runtime</artifactId>
+  <version>3.0.0-alpha3-SNAPSHOT</version>
+  <packaging>jar</packaging>
+
+  <description>Apache Hadoop Client</description>
+  <name>Apache Hadoop Client Runtime</name>
+
+  <properties>
+    <shaded.dependency.prefix>org.apache.hadoop.shaded</shaded.dependency.prefix>
+    <!-- We contain no source -->
+    <maven.javadoc.skip>true</maven.javadoc.skip>
+  </properties>
+
+  <dependencies>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-client</artifactId>
+      <!-- We list this as optional because as a type-pom it won't get included in the shading.
+           Marking it optional means it doesn't count as a transitive dependency of this artifact.
+        -->
+      <optional>true</optional>
+      <exclusions>
+        <!-- these APIs are a part of the SE JDK -->
+        <exclusion>
+          <groupId>javax.xml.bind</groupId>
+          <artifactId>jaxb-api</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>xml-apis</groupId>
+          <artifactId>xml-apis</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+    <!-- At runtime anyone using us must have the api present -->
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-client-api</artifactId>
+      <scope>runtime</scope>
+    </dependency>
+    <!-- This comes from our parent pom. If we don't expressly change it here to get included,
+         downstream will get warnings at compile time. -->
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-annotations</artifactId>
+      <scope>compile</scope>
+      <exclusions>
+        <exclusion>
+          <groupId>jdk.tools</groupId>
+          <artifactId>jdk.tools</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+    <!-- Since hadoop-client is listed as optional, we have to list transitive
+         dependencies that we still want to show up.
+         * HTrace
+         * Slf4j API
+         * commons-logging
+      -->
+    <dependency>
+      <groupId>org.apache.htrace</groupId>
+      <artifactId>htrace-core4</artifactId>
+      <scope>runtime</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.slf4j</groupId>
+      <artifactId>slf4j-api</artifactId>
+      <scope>runtime</scope>
+    </dependency>
+    <dependency>
+      <groupId>commons-logging</groupId>
+      <artifactId>commons-logging</artifactId>
+      <scope>runtime</scope>
+    </dependency>
+    <!-- Move log4j to optional, since it is needed for some pieces folks might not use:
+         * one of the three custom log4j appenders we have
+         * JobConf (?!) (so essentially any user of MapReduce)
+      -->
+    <dependency>
+      <groupId>log4j</groupId>
+      <artifactId>log4j</artifactId>
+      <scope>runtime</scope>
+      <optional>true</optional>
+    </dependency>
+  </dependencies>
+  <profiles>
+    <profile>
+      <id>shade</id>
+      <activation>
+        <property><name>!skipShade</name></property>
+      </activation>
+      <build>
+        <plugins>
+          <!-- We contain no source -->
+          <plugin>
+            <groupId>org.apache.maven.plugins</groupId>
+            <artifactId>maven-source-plugin</artifactId>
+              <configuration>
+                <skipSource>true</skipSource>
+              </configuration>
+          </plugin>
+          <plugin>
+            <groupId>org.apache.maven.plugins</groupId>
+            <artifactId>maven-shade-plugin</artifactId>
+            <dependencies>
+              <dependency>
+                <groupId>org.apache.hadoop</groupId>
+                <artifactId>hadoop-maven-plugins</artifactId>
+                <version>${project.version}</version>
+              </dependency>
+            </dependencies>
+            <executions>
+              <execution>
+                <phase>package</phase>
+                <goals>
+                  <goal>shade</goal>
+                </goals>
+                <configuration>
+                  <artifactSet>
+                    <excludes>
+                      <!-- We need a filter that matches just those things that aer included in the api jar -->
+                      <exclude>org.apache.hadoop:hadoop-client-api</exclude>
+                      <!-- Leave HTrace as an unshaded dependency on purpose, since a static class member is used to trace within a given JVM instance -->
+                      <exclude>org.apache.htrace:htrace-core4</exclude>
+                      <!-- Leave slf4j unshaded so downstream users can configure logging. -->
+                      <exclude>org.slf4j:slf4j-api</exclude>
+                      <!-- Leave commons-logging unshaded so downstream users can configure logging. -->
+                      <exclude>commons-logging:commons-logging</exclude>
+                      <!-- Leave log4j unshaded so downstream users can configure logging. -->
+                      <exclude>log4j:log4j</exclude>
+                    </excludes>
+                  </artifactSet>
+                  <filters>
+                    <!-- We need a filter that matches just those things that are included in the api jar -->
+                    <filter>
+                      <artifact>org.apache.hadoop:*</artifact>
+                      <excludes>
+                        <exclude>**/*</exclude>
+                        <exclude>*</exclude>
+                      </excludes>
+                    </filter>
+                    <!-- Some of our dependencies include source, so remove it. -->
+                    <filter>
+                      <artifact>*:*</artifact>
+                      <excludes>
+                        <exclude>**/*.java</exclude>
+                      </excludes>
+                    </filter>
+                    <!-- We only want one copy of the Localizer class. So long as we keep jasper compiler and runtime on the same version, which one doesn't matter -->
+                    <filter>
+                      <artifact>tomcat:jasper-compiler</artifact>
+                      <excludes>
+                        <exclude>org/apache/jasper/compiler/Localizer.class</exclude>
+                      </excludes>
+                    </filter>
+                    <!-- We only have xerces as a dependency for XML output for the fsimage edits, we don't need anything specific to it for javax xml support -->
+                    <filter>
+                      <artifact>xerces:xercesImpl</artifact>
+                      <excludes>
+                        <exclude>META-INF/services/*</exclude>
+                      </excludes>
+                    </filter>
+                    <!-- We rely on jersey for our web interfaces. We want to use its java services stuff only internal to jersey -->
+                    <filter>
+                      <artifact>com.sun.jersey:*</artifact>
+                      <excludes>
+                        <exclude>META-INF/services/javax.*</exclude>
+                      </excludes>
+                    </filter>
+                  </filters>
+                  <relocations>
+                    <relocation>
+                      <pattern>org/</pattern>
+                      <shadedPattern>${shaded.dependency.prefix}.org.</shadedPattern>
+                      <excludes>
+                        <exclude>org/apache/hadoop/*</exclude>
+                        <exclude>org/apache/hadoop/**/*</exclude>
+                        <!-- Our non-shaded htrace and logging libraries -->
+                        <exclude>org/apache/htrace/*</exclude>
+                        <exclude>org/apache/htrace/**/*</exclude>
+                        <exclude>org/slf4j/*</exclude>
+                        <exclude>org/slf4j/**/*</exclude>
+                        <exclude>org/apache/commons/logging/*</exclude>
+                        <exclude>org/apache/commons/logging/**/*</exclude>
+                        <exclude>org/apache/log4j/*</exclude>
+                        <exclude>org/apache/log4j/**/*</exclude>
+                        <exclude>**/pom.xml</exclude>
+                        <!-- Not the org/ packages that are a part of the jdk -->
+                        <exclude>org/ietf/jgss/*</exclude>
+                        <exclude>org/omg/**/*</exclude>
+                        <exclude>org/w3c/dom/*</exclude>
+                        <exclude>org/w3c/dom/**/*</exclude>
+                        <exclude>org/xml/sax/*</exclude>
+                        <exclude>org/xml/sax/**/*</exclude>
+                      </excludes>
+                    </relocation>
+                    <relocation>
+                      <pattern>com/</pattern>
+                      <shadedPattern>${shaded.dependency.prefix}.com.</shadedPattern>
+                      <excludes>
+                        <exclude>**/pom.xml</exclude>
+                        <!-- Not the com/ packages that are a part of particular jdk implementations -->
+                        <exclude>com/sun/tools/*</exclude>
+                        <exclude>com/sun/javadoc/*</exclude>
+                        <exclude>com/sun/security/*</exclude>
+                        <exclude>com/sun/jndi/*</exclude>
+                        <exclude>com/sun/management/*</exclude>
+                        <exclude>com/sun/tools/**/*</exclude>
+                        <exclude>com/sun/javadoc/**/*</exclude>
+                        <exclude>com/sun/security/**/*</exclude>
+                        <exclude>com/sun/jndi/**/*</exclude>
+                        <exclude>com/sun/management/**/*</exclude>
+                      </excludes>
+                    </relocation>
+                    <relocation>
+                      <pattern>io/</pattern>
+                      <shadedPattern>${shaded.dependency.prefix}.io.</shadedPattern>
+                      <excludes>
+                        <exclude>**/pom.xml</exclude>
+                        <!-- Exclude config keys for Hadoop that look like package names -->
+                        <exclude>io/compression/*</exclude>
+                        <exclude>io/compression/**/*</exclude>
+                        <exclude>io/mapfile/*</exclude>
+                        <exclude>io/mapfile/**/*</exclude>
+                        <exclude>io/map/index/*</exclude>
+                        <exclude>io/seqfile/*</exclude>
+                        <exclude>io/seqfile/**/*</exclude>
+                        <exclude>io/file/buffer/size</exclude>
+                        <exclude>io/skip/checksum/errors</exclude>
+                        <exclude>io/sort/*</exclude>
+                        <exclude>io/serializations</exclude>
+                      </excludes>
+                    </relocation>
+                    <relocation>
+                      <pattern>javax/el/</pattern>
+                      <shadedPattern>${shaded.dependency.prefix}.javax.el.</shadedPattern>
+                      <excludes>
+                        <exclude>**/pom.xml</exclude>
+                      </excludes>
+                    </relocation>
+                    <relocation>
+                      <pattern>javax/servlet/</pattern>
+                      <shadedPattern>${shaded.dependency.prefix}.javax.servlet.</shadedPattern>
+                      <excludes>
+                        <exclude>**/pom.xml</exclude>
+                      </excludes>
+                    </relocation>
+                    <relocation>
+                      <pattern>net/</pattern>
+                      <shadedPattern>${shaded.dependency.prefix}.net.</shadedPattern>
+                      <excludes>
+                        <exclude>**/pom.xml</exclude>
+                        <!-- Exclude config keys for Hadoop that look like package names -->
+                        <exclude>net/topology/*</exclude>
+                        <exclude>net/topology/**/*</exclude>
+                      </excludes>
+                    </relocation>
+                    <!-- probably not. -->
+    <!--
+                    <relocation>
+                      <pattern>javax/</pattern>
+                      <shadedPattern>${shaded.dependency.prefix}.javax.</shadedPattern>
+                      <excludes>
+                        <exclude>**/pom.xml</exclude>
+                      </excludes>
+                    </relocation>
+    -->
+                  </relocations>
+                  <transformers>
+                    <!-- Needed until MSHADE-182 -->
+                    <transformer implementation="org.apache.hadoop.maven.plugin.shade.resource.ServicesResourceTransformer"/>
+                    <transformer implementation="org.apache.maven.plugins.shade.resource.ApacheLicenseResourceTransformer"/>
+                    <transformer implementation="org.apache.maven.plugins.shade.resource.DontIncludeResourceTransformer">
+                      <resources>
+                        <resource>NOTICE.txt</resource>
+                        <resource>NOTICE</resource>
+                        <resource>LICENSE</resource>
+                      </resources>
+                    </transformer>
+                    <transformer implementation="org.apache.maven.plugins.shade.resource.IncludeResourceTransformer">
+                      <resource>META-INF/LICENSE.txt</resource>
+                      <file>${basedir}/../../LICENSE.txt</file>
+                    </transformer>
+                    <transformer implementation="org.apache.maven.plugins.shade.resource.IncludeResourceTransformer">
+                      <resource>META-INF/NOTICE.txt</resource>
+                      <file>${basedir}/../../NOTICE.txt</file>
+                    </transformer>
+                    <transformer implementation="org.apache.maven.plugins.shade.resource.XmlAppendingTransformer">
+                      <resource>META-INF/jboss-beans.xml</resource>
+                      <!-- Add this to enable loading of DTDs
+                      <ignoreDtd>false</ignoreDtd>
+                      -->
+                    </transformer>
+                    <transformer implementation="org.apache.maven.plugins.shade.resource.AppendingTransformer">
+                      <resource>META-INF/mailcap.default</resource>
+                    </transformer>
+                    <transformer implementation="org.apache.maven.plugins.shade.resource.AppendingTransformer">
+                      <resource>META-INF/mimetypes.default</resource>
+                    </transformer>
+                  </transformers>
+                </configuration>
+              </execution>
+            </executions>
+          </plugin>
+          <plugin>
+            <groupId>org.codehaus.mojo</groupId>
+            <artifactId>license-maven-plugin</artifactId>
+          </plugin>
+        </plugins>
+      </build>
+    </profile>
+    <profile>
+      <id>noshade</id>
+      <activation>
+        <property><name>skipShade</name></property>
+      </activation>
+      <build>
+        <plugins>
+          <!-- We contain no source -->
+          <plugin>
+            <groupId>org.apache.maven.plugins</groupId>
+            <artifactId>maven-source-plugin</artifactId>
+              <configuration>
+                <skipSource>true</skipSource>
+              </configuration>
+          </plugin>
+          <plugin>
+            <groupId>org.codehaus.mojo</groupId>
+            <artifactId>license-maven-plugin</artifactId>
+          </plugin>
+        </plugins>
+      </build>
+    </profile>
+  </profiles>
+
+</project>
+

+ 26 - 13
hadoop-client/pom.xml → hadoop-client-modules/hadoop-client/pom.xml

@@ -18,16 +18,14 @@
 <parent>
    <groupId>org.apache.hadoop</groupId>
    <artifactId>hadoop-project-dist</artifactId>
-   <version>3.0.0-alpha2-SNAPSHOT</version>
-   <relativePath>../hadoop-project-dist</relativePath>
- </parent>
-  <groupId>org.apache.hadoop</groupId>
+   <version>3.0.0-alpha3-SNAPSHOT</version>
+   <relativePath>../../hadoop-project-dist</relativePath>
+</parent>
   <artifactId>hadoop-client</artifactId>
-  <version>3.0.0-alpha2-SNAPSHOT</version>
-  <packaging>jar</packaging>
+  <version>3.0.0-alpha3-SNAPSHOT</version>
 
-  <description>Apache Hadoop Client</description>
-  <name>Apache Hadoop Client</name>
+  <description>Apache Hadoop Client aggregation pom with dependencies exposed</description>
+  <name>Apache Hadoop Client Aggregator</name>
 
 <properties>
    <hadoop.component>client</hadoop.component>
@@ -87,10 +85,6 @@
           <groupId>net.sf.kosmosfs</groupId>
           <artifactId>kfs</artifactId>
         </exclusion>
-        <exclusion>
-          <groupId>net.java.dev.jets3t</groupId>
-          <artifactId>jets3t</artifactId>
-        </exclusion>
         <exclusion>
           <groupId>com.jcraft</groupId>
           <artifactId>jsch</artifactId>
@@ -99,6 +93,11 @@
           <groupId>org.apache.zookeeper</groupId>
           <artifactId>zookeeper</artifactId>
         </exclusion>
+        <!-- No slf4j backends for downstream clients -->
+        <exclusion>
+          <groupId>org.slf4j</groupId>
+          <artifactId>slf4j-log4j12</artifactId>
+        </exclusion>
       </exclusions>
     </dependency>
 
@@ -171,6 +170,11 @@
           <groupId>io.netty</groupId>
           <artifactId>netty</artifactId>
         </exclusion>
+        <!-- No slf4j backends for downstream clients -->
+        <exclusion>
+          <groupId>org.slf4j</groupId>
+          <artifactId>slf4j-log4j12</artifactId>
+        </exclusion>
         <exclusion>
           <groupId>org.apache.zookeeper</groupId>
           <artifactId>zookeeper</artifactId>
@@ -271,6 +275,11 @@
           <groupId>io.netty</groupId>
           <artifactId>netty</artifactId>
         </exclusion>
+        <!-- No slf4j backends for downstream clients -->
+        <exclusion>
+          <groupId>org.slf4j</groupId>
+          <artifactId>slf4j-log4j12</artifactId>
+        </exclusion>
       </exclusions>
     </dependency>
 
@@ -299,6 +308,11 @@
           <groupId>io.netty</groupId>
           <artifactId>netty</artifactId>
         </exclusion>
+        <!-- No slf4j backends for downstream clients -->
+        <exclusion>
+          <groupId>org.slf4j</groupId>
+          <artifactId>slf4j-log4j12</artifactId>
+        </exclusion>
       </exclusions>
     </dependency>
 
@@ -313,7 +327,6 @@
         </exclusion>
       </exclusions>
     </dependency>
-    
   </dependencies>
 
 </project>

+ 45 - 0
hadoop-client-modules/pom.xml

@@ -0,0 +1,45 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+  <modelVersion>4.0.0</modelVersion>
+  <parent>
+    <groupId>org.apache.hadoop</groupId>
+    <artifactId>hadoop-project</artifactId>
+    <version>3.0.0-alpha3-SNAPSHOT</version>
+    <relativePath>../hadoop-project</relativePath>
+  </parent>
+  <artifactId>hadoop-client-modules</artifactId>
+  <packaging>pom</packaging>
+  <description>multi-module for Apache Hadoop client artifacts</description>
+  <name>Apache Hadoop Client Modules</name>
+
+  <modules>
+    <!-- Left as an empty artifact w/dep for compat -->
+    <module>hadoop-client</module>
+    <!-- Should be used at compile scope for access to IA.Public classes -->
+    <module>hadoop-client-api</module>
+    <!-- Should be used at runtime scope for remaining classes necessary for hadoop-client-api to function -->
+    <module>hadoop-client-runtime</module>
+    <!-- Should be used at test scope for those that need access to mini cluster that works with above api and runtime -->
+    <module>hadoop-client-minicluster</module>
+    <!-- Checks invariants above -->
+    <module>hadoop-client-check-invariants</module>
+    <module>hadoop-client-check-test-invariants</module>
+    <!-- Attempt to use the created libraries -->
+    <module>hadoop-client-integration-tests</module>
+  </modules>
+
+</project>

+ 2 - 7
hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml

@@ -18,12 +18,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha2-SNAPSHOT</version>
+    <version>3.0.0-alpha3-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
-  <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-cloud-storage</artifactId>
-  <version>3.0.0-alpha2-SNAPSHOT</version>
+  <version>3.0.0-alpha3-SNAPSHOT</version>
   <packaging>jar</packaging>
 
   <description>Apache Hadoop Cloud Storage</description>
@@ -94,10 +93,6 @@
           <groupId>net.sf.kosmosfs</groupId>
           <artifactId>kfs</artifactId>
         </exclusion>
-        <exclusion>
-          <groupId>net.java.dev.jets3t</groupId>
-          <artifactId>jets3t</artifactId>
-        </exclusion>
         <exclusion>
           <groupId>com.jcraft</groupId>
           <artifactId>jsch</artifactId>

+ 2 - 3
hadoop-cloud-storage-project/pom.xml

@@ -20,12 +20,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha2-SNAPSHOT</version>
+    <version>3.0.0-alpha3-SNAPSHOT</version>
     <relativePath>../hadoop-project</relativePath>
   </parent>
-  <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-cloud-storage-project</artifactId>
-  <version>3.0.0-alpha2-SNAPSHOT</version>
+  <version>3.0.0-alpha3-SNAPSHOT</version>
   <description>Apache Hadoop Cloud Storage Project</description>
   <name>Apache Hadoop Cloud Storage Project</name>
   <packaging>pom</packaging>

+ 2 - 3
hadoop-common-project/hadoop-annotations/pom.xml

@@ -20,12 +20,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha2-SNAPSHOT</version>
+    <version>3.0.0-alpha3-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
-  <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-annotations</artifactId>
-  <version>3.0.0-alpha2-SNAPSHOT</version>
+  <version>3.0.0-alpha3-SNAPSHOT</version>
   <description>Apache Hadoop Annotations</description>
   <name>Apache Hadoop Annotations</name>
   <packaging>jar</packaging>

+ 2 - 3
hadoop-common-project/hadoop-auth-examples/pom.xml

@@ -20,12 +20,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha2-SNAPSHOT</version>
+    <version>3.0.0-alpha3-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
-  <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-auth-examples</artifactId>
-  <version>3.0.0-alpha2-SNAPSHOT</version>
+  <version>3.0.0-alpha3-SNAPSHOT</version>
   <packaging>war</packaging>
 
   <name>Apache Hadoop Auth Examples</name>

+ 2 - 3
hadoop-common-project/hadoop-auth/pom.xml

@@ -20,12 +20,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha2-SNAPSHOT</version>
+    <version>3.0.0-alpha3-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
-  <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-auth</artifactId>
-  <version>3.0.0-alpha2-SNAPSHOT</version>
+  <version>3.0.0-alpha3-SNAPSHOT</version>
   <packaging>jar</packaging>
 
   <name>Apache Hadoop Auth</name>

+ 3 - 3
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java

@@ -142,7 +142,7 @@ public class AuthenticationFilter implements Filter {
   private String cookieDomain;
   private String cookiePath;
   private boolean isCookiePersistent;
-  private boolean isInitializedByTomcat;
+  private boolean destroySecretProvider;
 
   /**
    * <p>Initializes the authentication filter and signer secret provider.</p>
@@ -209,7 +209,7 @@ public class AuthenticationFilter implements Filter {
         secretProvider = constructSecretProvider(
             filterConfig.getServletContext(),
             config, false);
-        isInitializedByTomcat = true;
+        destroySecretProvider = true;
       } catch (Exception ex) {
         throw new ServletException(ex);
       }
@@ -356,7 +356,7 @@ public class AuthenticationFilter implements Filter {
       authHandler.destroy();
       authHandler = null;
     }
-    if (secretProvider != null && isInitializedByTomcat) {
+    if (secretProvider != null && destroySecretProvider) {
       secretProvider.destroy();
       secretProvider = null;
     }

+ 177 - 77
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java

@@ -18,6 +18,7 @@ import org.apache.hadoop.security.authentication.client.KerberosAuthenticator;
 import org.apache.commons.codec.binary.Base64;
 import org.apache.hadoop.security.authentication.util.KerberosName;
 import org.apache.hadoop.security.authentication.util.KerberosUtil;
+import org.ietf.jgss.GSSException;
 import org.ietf.jgss.GSSContext;
 import org.ietf.jgss.GSSCredential;
 import org.ietf.jgss.GSSManager;
@@ -48,25 +49,32 @@ import java.util.Properties;
 import java.util.Set;
 import java.util.regex.Pattern;
 
+import com.google.common.collect.HashMultimap;
+
 import static org.apache.hadoop.util.PlatformName.IBM_JAVA;
 
 /**
- * The {@link KerberosAuthenticationHandler} implements the Kerberos SPNEGO authentication mechanism for HTTP.
+ * The {@link KerberosAuthenticationHandler} implements the Kerberos SPNEGO
+ * authentication mechanism for HTTP.
  * <p>
  * The supported configuration properties are:
  * <ul>
- * <li>kerberos.principal: the Kerberos principal to used by the server. As stated by the Kerberos SPNEGO
- * specification, it should be <code>HTTP/${HOSTNAME}@{REALM}</code>. The realm can be omitted from the
- * principal as the JDK GSS libraries will use the realm name of the configured default realm.
+ * <li>kerberos.principal: the Kerberos principal to used by the server. As
+ * stated by the Kerberos SPNEGO specification, it should be
+ * <code>HTTP/${HOSTNAME}@{REALM}</code>. The realm can be omitted from the
+ * principal as the JDK GSS libraries will use the realm name of the configured
+ * default realm.
  * It does not have a default value.</li>
- * <li>kerberos.keytab: the keytab file containing the credentials for the Kerberos principal.
+ * <li>kerberos.keytab: the keytab file containing the credentials for the
+ * Kerberos principal.
  * It does not have a default value.</li>
- * <li>kerberos.name.rules: kerberos names rules to resolve principal names, see 
+ * <li>kerberos.name.rules: kerberos names rules to resolve principal names, see
  * {@link KerberosName#setRules(String)}</li>
  * </ul>
  */
 public class KerberosAuthenticationHandler implements AuthenticationHandler {
-  private static Logger LOG = LoggerFactory.getLogger(KerberosAuthenticationHandler.class);
+  public static final Logger LOG = LoggerFactory.getLogger(
+      KerberosAuthenticationHandler.class);
 
   /**
    * Kerberos context configuration for the JDK GSS library.
@@ -117,8 +125,8 @@ public class KerberosAuthenticationHandler implements AuthenticationHandler {
 
       return new AppConfigurationEntry[]{
           new AppConfigurationEntry(KerberosUtil.getKrb5LoginModuleName(),
-                                  AppConfigurationEntry.LoginModuleControlFlag.REQUIRED,
-                                  options),};
+              AppConfigurationEntry.LoginModuleControlFlag.REQUIRED,
+              options), };
     }
   }
 
@@ -128,12 +136,14 @@ public class KerberosAuthenticationHandler implements AuthenticationHandler {
   public static final String TYPE = "kerberos";
 
   /**
-   * Constant for the configuration property that indicates the kerberos principal.
+   * Constant for the configuration property that indicates the kerberos
+   * principal.
    */
   public static final String PRINCIPAL = TYPE + ".principal";
 
   /**
-   * Constant for the configuration property that indicates the keytab file path.
+   * Constant for the configuration property that indicates the keytab
+   * file path.
    */
   public static final String KEYTAB = TYPE + ".keytab";
 
@@ -148,6 +158,42 @@ public class KerberosAuthenticationHandler implements AuthenticationHandler {
   private GSSManager gssManager;
   private Subject serverSubject = new Subject();
   private List<LoginContext> loginContexts = new ArrayList<LoginContext>();
+  /**
+   * HADOOP-10158 added support of running HTTP with multiple SPNs
+   * but implicit requirements is that they must come from the SAME local realm.
+   *
+   * This is a regression for use cases where HTTP service needs to run with
+   * with SPN from foreign realm, which is not supported after HADOOP-10158.
+   *
+   * HADOOP-13565 brings back support of SPNs from foreign realms
+   * without dependency on specific Kerberos domain_realm mapping mechanism.
+   *
+   * There are several reasons for not using native Kerberos domain_realm
+   * mapping:
+   * 1. As commented in KerberosUtil#getDomainRealm(), JDK's
+   * domain_realm mapping routines are private to the security.krb5
+   * package. As a result, KerberosUtil#getDomainRealm() always return local
+   * realm.
+   *
+   * 2. Server krb5.conf is not the only place that contains the domain_realm
+   * mapping in real deployment. Based on MIT KDC document here:
+   * https://web.mit.edu/kerberos/krb5-1.13/doc/admin/realm_config.html, the
+   * Kerberos domain_realm mapping can be implemented in one of the three
+   * mechanisms:
+   * 1) Server host-based krb5.conf on HTTP server
+   * 2) KDC-based krb5.conf on KDC server
+   * 3) DNS-based with TXT record with _kerberos prefix to the hostname.
+   *
+   * We choose to maintain domain_realm mapping based on HTTP principals
+   * from keytab. The mapping is built at login time with HTTP principals
+   * key-ed by server name and is used later to
+   * looked up SPNs based on server name from request for authentication.
+   * The multi-map implementation allows SPNs of same server from
+   * different realms.
+   *
+   */
+  private HashMultimap<String, String> serverPrincipalMap =
+      HashMultimap.create();
 
   /**
    * Creates a Kerberos SPNEGO authentication handler with the default
@@ -170,7 +216,8 @@ public class KerberosAuthenticationHandler implements AuthenticationHandler {
   /**
    * Initializes the authentication handler instance.
    * <p>
-   * It creates a Kerberos context using the principal and keytab specified in the configuration.
+   * It creates a Kerberos context using the principal and keytab specified in
+   * the configuration.
    * <p>
    * This method is invoked by the {@link AuthenticationFilter#init} method.
    *
@@ -225,15 +272,27 @@ public class KerberosAuthenticationHandler implements AuthenticationHandler {
           throw new AuthenticationException(le);          
         }
         loginContexts.add(loginContext);
+        KerberosName kerbName = new KerberosName(spnegoPrincipal);
+        if (kerbName.getHostName() != null
+            && kerbName.getServiceName() != null
+            && kerbName.getServiceName().equals("HTTP")) {
+          boolean added = serverPrincipalMap.put(kerbName.getHostName(),
+              spnegoPrincipal);
+          LOG.info("Map server: {} to principal: [{}], added = {}",
+              kerbName.getHostName(), spnegoPrincipal, added);
+        } else {
+          LOG.warn("HTTP principal: [{}] is invalid for SPNEGO!",
+              spnegoPrincipal);
+        }
       }
       try {
-        gssManager = Subject.doAs(serverSubject, new PrivilegedExceptionAction<GSSManager>() {
-
-          @Override
-          public GSSManager run() throws Exception {
-            return GSSManager.getInstance();
-          }
-        });
+        gssManager = Subject.doAs(serverSubject,
+            new PrivilegedExceptionAction<GSSManager>() {
+              @Override
+              public GSSManager run() throws Exception {
+                return GSSManager.getInstance();
+              }
+            });
       } catch (PrivilegedActionException ex) {
         throw ex.getException();
       }
@@ -312,91 +371,84 @@ public class KerberosAuthenticationHandler implements AuthenticationHandler {
   }
 
   /**
-   * It enforces the the Kerberos SPNEGO authentication sequence returning an {@link AuthenticationToken} only
-   * after the Kerberos SPNEGO sequence has completed successfully.
+   * It enforces the the Kerberos SPNEGO authentication sequence returning an
+   * {@link AuthenticationToken} only after the Kerberos SPNEGO sequence has
+   * completed successfully.
    *
    * @param request the HTTP client request.
    * @param response the HTTP client response.
    *
-   * @return an authentication token if the Kerberos SPNEGO sequence is complete and valid,
-   *         <code>null</code> if it is in progress (in this case the handler handles the response to the client).
+   * @return an authentication token if the Kerberos SPNEGO sequence is complete
+   * and valid, <code>null</code> if it is in progress (in this case the handler
+   * handles the response to the client).
    *
    * @throws IOException thrown if an IO error occurred.
    * @throws AuthenticationException thrown if Kerberos SPNEGO sequence failed.
    */
   @Override
-  public AuthenticationToken authenticate(HttpServletRequest request, final HttpServletResponse response)
-    throws IOException, AuthenticationException {
+  public AuthenticationToken authenticate(HttpServletRequest request,
+      final HttpServletResponse response)
+      throws IOException, AuthenticationException {
     AuthenticationToken token = null;
-    String authorization = request.getHeader(KerberosAuthenticator.AUTHORIZATION);
+    String authorization = request.getHeader(
+        KerberosAuthenticator.AUTHORIZATION);
 
-    if (authorization == null || !authorization.startsWith(KerberosAuthenticator.NEGOTIATE)) {
+    if (authorization == null
+        || !authorization.startsWith(KerberosAuthenticator.NEGOTIATE)) {
       response.setHeader(WWW_AUTHENTICATE, KerberosAuthenticator.NEGOTIATE);
       response.setStatus(HttpServletResponse.SC_UNAUTHORIZED);
       if (authorization == null) {
-        LOG.trace("SPNEGO starting");
+        LOG.trace("SPNEGO starting for url: {}", request.getRequestURL());
       } else {
-        LOG.warn("'" + KerberosAuthenticator.AUTHORIZATION + "' does not start with '" +
+        LOG.warn("'" + KerberosAuthenticator.AUTHORIZATION +
+            "' does not start with '" +
             KerberosAuthenticator.NEGOTIATE + "' :  {}", authorization);
       }
     } else {
-      authorization = authorization.substring(KerberosAuthenticator.NEGOTIATE.length()).trim();
+      authorization = authorization.substring(
+          KerberosAuthenticator.NEGOTIATE.length()).trim();
       final Base64 base64 = new Base64(0);
       final byte[] clientToken = base64.decode(authorization);
       final String serverName = InetAddress.getByName(request.getServerName())
                                            .getCanonicalHostName();
       try {
-        token = Subject.doAs(serverSubject, new PrivilegedExceptionAction<AuthenticationToken>() {
-
-          @Override
-          public AuthenticationToken run() throws Exception {
-            AuthenticationToken token = null;
-            GSSContext gssContext = null;
-            GSSCredential gssCreds = null;
-            try {
-              gssCreds = gssManager.createCredential(
-                  gssManager.createName(
-                      KerberosUtil.getServicePrincipal("HTTP", serverName),
-                      KerberosUtil.getOidInstance("NT_GSS_KRB5_PRINCIPAL")),
-                  GSSCredential.INDEFINITE_LIFETIME,
-                  new Oid[]{
-                    KerberosUtil.getOidInstance("GSS_SPNEGO_MECH_OID"),
-                    KerberosUtil.getOidInstance("GSS_KRB5_MECH_OID")},
-                  GSSCredential.ACCEPT_ONLY);
-              gssContext = gssManager.createContext(gssCreds);
-              byte[] serverToken = gssContext.acceptSecContext(clientToken, 0, clientToken.length);
-              if (serverToken != null && serverToken.length > 0) {
-                String authenticate = base64.encodeToString(serverToken);
-                response.setHeader(KerberosAuthenticator.WWW_AUTHENTICATE,
-                                   KerberosAuthenticator.NEGOTIATE + " " + authenticate);
+        token = Subject.doAs(serverSubject,
+            new PrivilegedExceptionAction<AuthenticationToken>() {
+              private Set<String> serverPrincipals =
+                  serverPrincipalMap.get(serverName);
+              @Override
+              public AuthenticationToken run() throws Exception {
+                if (LOG.isTraceEnabled()) {
+                  LOG.trace("SPNEGO with server principals: {} for {}",
+                      serverPrincipals.toString(), serverName);
+                }
+                AuthenticationToken token = null;
+                Exception lastException = null;
+                for (String serverPrincipal : serverPrincipals) {
+                  try {
+                    token = runWithPrincipal(serverPrincipal, clientToken,
+                        base64, response);
+                  } catch (Exception ex) {
+                    lastException = ex;
+                    LOG.trace("Auth {} failed with {}", serverPrincipal, ex);
+                  } finally {
+                      if (token != null) {
+                        LOG.trace("Auth {} successfully", serverPrincipal);
+                        break;
+                    }
+                  }
+                }
+                if (token != null) {
+                  return token;
+                } else {
+                  throw new AuthenticationException(lastException);
+                }
               }
-              if (!gssContext.isEstablished()) {
-                response.setStatus(HttpServletResponse.SC_UNAUTHORIZED);
-                LOG.trace("SPNEGO in progress");
-              } else {
-                String clientPrincipal = gssContext.getSrcName().toString();
-                KerberosName kerberosName = new KerberosName(clientPrincipal);
-                String userName = kerberosName.getShortName();
-                token = new AuthenticationToken(userName, clientPrincipal, getType());
-                response.setStatus(HttpServletResponse.SC_OK);
-                LOG.trace("SPNEGO completed for principal [{}]", clientPrincipal);
-              }
-            } finally {
-              if (gssContext != null) {
-                gssContext.dispose();
-              }
-              if (gssCreds != null) {
-                gssCreds.dispose();
-              }
-            }
-            return token;
-          }
-        });
+            });
       } catch (PrivilegedActionException ex) {
         if (ex.getException() instanceof IOException) {
           throw (IOException) ex.getException();
-        }
-        else {
+        } else {
           throw new AuthenticationException(ex.getException());
         }
       }
@@ -404,4 +456,52 @@ public class KerberosAuthenticationHandler implements AuthenticationHandler {
     return token;
   }
 
+  private AuthenticationToken runWithPrincipal(String serverPrincipal,
+      byte[] clientToken, Base64 base64, HttpServletResponse response) throws
+      IOException, AuthenticationException, ClassNotFoundException,
+      GSSException, IllegalAccessException, NoSuchFieldException {
+    GSSContext gssContext = null;
+    GSSCredential gssCreds = null;
+    AuthenticationToken token = null;
+    try {
+      LOG.trace("SPNEGO initiated with server principal [{}]", serverPrincipal);
+      gssCreds = this.gssManager.createCredential(
+          this.gssManager.createName(serverPrincipal,
+              KerberosUtil.getOidInstance("NT_GSS_KRB5_PRINCIPAL")),
+          GSSCredential.INDEFINITE_LIFETIME,
+          new Oid[]{
+              KerberosUtil.getOidInstance("GSS_SPNEGO_MECH_OID"),
+              KerberosUtil.getOidInstance("GSS_KRB5_MECH_OID")},
+          GSSCredential.ACCEPT_ONLY);
+      gssContext = this.gssManager.createContext(gssCreds);
+      byte[] serverToken = gssContext.acceptSecContext(clientToken, 0,
+          clientToken.length);
+      if (serverToken != null && serverToken.length > 0) {
+        String authenticate = base64.encodeToString(serverToken);
+        response.setHeader(KerberosAuthenticator.WWW_AUTHENTICATE,
+                           KerberosAuthenticator.NEGOTIATE + " " +
+                           authenticate);
+      }
+      if (!gssContext.isEstablished()) {
+        response.setStatus(HttpServletResponse.SC_UNAUTHORIZED);
+        LOG.trace("SPNEGO in progress");
+      } else {
+        String clientPrincipal = gssContext.getSrcName().toString();
+        KerberosName kerberosName = new KerberosName(clientPrincipal);
+        String userName = kerberosName.getShortName();
+        token = new AuthenticationToken(userName, clientPrincipal, getType());
+        response.setStatus(HttpServletResponse.SC_OK);
+        LOG.trace("SPNEGO completed for client principal [{}]",
+            clientPrincipal);
+      }
+    } finally {
+      if (gssContext != null) {
+        gssContext.dispose();
+      }
+      if (gssCreds != null) {
+        gssCreds.dispose();
+      }
+    }
+    return token;
+  }
 }

+ 2 - 2
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosName.java

@@ -54,7 +54,7 @@ public class KerberosName {
    * A pattern that matches a Kerberos name with at most 2 components.
    */
   private static final Pattern nameParser =
-    Pattern.compile("([^/@]*)(/([^/@]*))?@([^/@]*)");
+      Pattern.compile("([^/@]+)(/([^/@]+))?(@([^/@]+))?");
 
   /**
    * A pattern that matches a string with out '$' and then a single
@@ -109,7 +109,7 @@ public class KerberosName {
     } else {
       serviceName = match.group(1);
       hostName = match.group(3);
-      realm = match.group(4);
+      realm = match.group(5);
     }
   }
 

+ 1 - 1
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/ZKSignerSecretProvider.java

@@ -258,7 +258,7 @@ public class ZKSignerSecretProvider extends RolloverSignerSecretProvider {
     } catch (KeeperException.BadVersionException bve) {
       LOG.debug("Unable to push to znode; another server already did it");
     } catch (Exception ex) {
-      LOG.error("An unexpected exception occured pushing data to ZooKeeper",
+      LOG.error("An unexpected exception occurred pushing data to ZooKeeper",
               ex);
     }
   }

+ 22 - 0
hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestKerberosName.java

@@ -82,6 +82,28 @@ public class TestKerberosName {
     checkTranslation("root/joe@FOO.COM", "root/joe@FOO.COM");
   }
 
+  @Test
+  public void testParsing() throws Exception {
+    final String principalNameFull = "HTTP/abc.com@EXAMPLE.COM";
+    final String principalNameWoRealm = "HTTP/abc.com";
+    final String principalNameWoHost = "HTTP@EXAMPLE.COM";
+
+    final KerberosName kerbNameFull = new KerberosName(principalNameFull);
+    Assert.assertEquals("HTTP", kerbNameFull.getServiceName());
+    Assert.assertEquals("abc.com", kerbNameFull.getHostName());
+    Assert.assertEquals("EXAMPLE.COM", kerbNameFull.getRealm());
+
+    final KerberosName kerbNamewoRealm = new KerberosName(principalNameWoRealm);
+    Assert.assertEquals("HTTP", kerbNamewoRealm.getServiceName());
+    Assert.assertEquals("abc.com", kerbNamewoRealm.getHostName());
+    Assert.assertEquals(null, kerbNamewoRealm.getRealm());
+
+    final KerberosName kerbNameWoHost = new KerberosName(principalNameWoHost);
+    Assert.assertEquals("HTTP", kerbNameWoHost.getServiceName());
+    Assert.assertEquals(null, kerbNameWoHost.getHostName());
+    Assert.assertEquals("EXAMPLE.COM", kerbNameWoHost.getRealm());
+  }
+
   @Test
   public void testToLowerCase() throws Exception {
     String rules =

+ 6 - 0
hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml

@@ -410,4 +410,10 @@
     <Filed name="done"/>
     <Bug pattern="JLM_JSR166_UTILCONCURRENT_MONITORENTER"/>
   </Match>
+
+  <Match>
+    <Class name="org.apache.hadoop.metrics2.impl.MetricsConfig"/>
+    <Method name="toString"/>
+    <Bug pattern="DM_DEFAULT_ENCODING"/>
+  </Match>
 </FindBugsFilter>

+ 40 - 20
hadoop-common-project/hadoop-common/pom.xml

@@ -20,12 +20,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project-dist</artifactId>
-    <version>3.0.0-alpha2-SNAPSHOT</version>
+    <version>3.0.0-alpha3-SNAPSHOT</version>
     <relativePath>../../hadoop-project-dist</relativePath>
   </parent>
-  <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-common</artifactId>
-  <version>3.0.0-alpha2-SNAPSHOT</version>
+  <version>3.0.0-alpha3-SNAPSHOT</version>
   <description>Apache Hadoop Common</description>
   <name>Apache Hadoop Common</name>
   <packaging>jar</packaging>
@@ -46,6 +45,11 @@
       <artifactId>hadoop-annotations</artifactId>
       <scope>compile</scope>
     </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-minikdc</artifactId>
+      <scope>test</scope>
+    </dependency>
     <dependency>
       <groupId>com.google.guava</groupId>
       <artifactId>guava</artifactId>
@@ -157,11 +161,6 @@
       <artifactId>log4j</artifactId>
       <scope>compile</scope>
     </dependency>
-    <dependency>
-      <groupId>net.java.dev.jets3t</groupId>
-      <artifactId>jets3t</artifactId>
-      <scope>compile</scope>
-    </dependency>
     <dependency>
       <groupId>junit</groupId>
       <artifactId>junit</artifactId>
@@ -173,8 +172,13 @@
       <scope>compile</scope>
     </dependency>
     <dependency>
-      <groupId>commons-configuration</groupId>
-      <artifactId>commons-configuration</artifactId>
+      <groupId>commons-beanutils</groupId>
+      <artifactId>commons-beanutils</artifactId>
+      <scope>compile</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.commons</groupId>
+      <artifactId>commons-configuration2</artifactId>
       <scope>compile</scope>
     </dependency>
     <dependency>
@@ -187,16 +191,6 @@
       <artifactId>slf4j-log4j12</artifactId>
       <scope>runtime</scope>
     </dependency>
-    <dependency>
-      <groupId>org.codehaus.jackson</groupId>
-      <artifactId>jackson-core-asl</artifactId>
-      <scope>compile</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.codehaus.jackson</groupId>
-      <artifactId>jackson-mapper-asl</artifactId>
-      <scope>compile</scope>
-    </dependency>
     <dependency>
       <groupId>org.mockito</groupId>
       <artifactId>mockito-all</artifactId>
@@ -320,6 +314,10 @@
       <groupId>org.apache.kerby</groupId>
       <artifactId>kerb-simplekdc</artifactId>
     </dependency>
+    <dependency>
+      <groupId>com.fasterxml.jackson.core</groupId>
+      <artifactId>jackson-databind</artifactId>
+    </dependency>
   </dependencies>
 
   <build>
@@ -525,6 +523,7 @@
             <exclude>src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4hc.h</exclude>
             <exclude>src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4hc.c</exclude>
             <exclude>src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4hc_encoder.h</exclude>
+            <exclude>src/main/native/gtest/**/*</exclude>
             <exclude>src/test/resources/test-untar.tgz</exclude>
             <exclude>src/test/resources/test.har/_SUCCESS</exclude>
             <exclude>src/test/resources/test.har/_index</exclude>
@@ -596,6 +595,10 @@
         <snappy.lib></snappy.lib>
         <snappy.include></snappy.include>
         <require.snappy>false</require.snappy>
+        <zstd.prefix></zstd.prefix>
+        <zstd.lib></zstd.lib>
+        <zstd.include></zstd.include>
+        <require.zstd>false</require.zstd>
         <openssl.prefix></openssl.prefix>
         <openssl.lib></openssl.lib>
         <openssl.include></openssl.include>
@@ -653,6 +656,8 @@
                     <javahClassName>org.apache.hadoop.security.JniBasedUnixGroupsNetgroupMapping</javahClassName>
                     <javahClassName>org.apache.hadoop.io.compress.snappy.SnappyCompressor</javahClassName>
                     <javahClassName>org.apache.hadoop.io.compress.snappy.SnappyDecompressor</javahClassName>
+                    <javahClassName>org.apache.hadoop.io.compress.zstd.ZStandardCompressor</javahClassName>
+                    <javahClassName>org.apache.hadoop.io.compress.zstd.ZStandardDecompressor</javahClassName>
                     <javahClassName>org.apache.hadoop.io.compress.lz4.Lz4Compressor</javahClassName>
                     <javahClassName>org.apache.hadoop.io.compress.lz4.Lz4Decompressor</javahClassName>
                     <javahClassName>org.apache.hadoop.io.erasurecode.ErasureCodeNative</javahClassName>
@@ -686,9 +691,13 @@
                     <JVM_ARCH_DATA_MODEL>${sun.arch.data.model}</JVM_ARCH_DATA_MODEL>
                     <REQUIRE_BZIP2>${require.bzip2}</REQUIRE_BZIP2>
                     <REQUIRE_SNAPPY>${require.snappy}</REQUIRE_SNAPPY>
+                    <REQUIRE_ZSTD>${require.zstd}</REQUIRE_ZSTD>
                     <CUSTOM_SNAPPY_PREFIX>${snappy.prefix}</CUSTOM_SNAPPY_PREFIX>
                     <CUSTOM_SNAPPY_LIB>${snappy.lib} </CUSTOM_SNAPPY_LIB>
                     <CUSTOM_SNAPPY_INCLUDE>${snappy.include} </CUSTOM_SNAPPY_INCLUDE>
+                    <CUSTOM_ZSTD_PREFIX>${zstd.prefix}</CUSTOM_ZSTD_PREFIX>
+                    <CUSTOM_ZSTD_LIB>${zstd.lib} </CUSTOM_ZSTD_LIB>
+                    <CUSTOM_ZSTD_INCLUDE>${zstd.include} </CUSTOM_ZSTD_INCLUDE>
                     <REQUIRE_ISAL>${require.isal} </REQUIRE_ISAL>
                     <CUSTOM_ISAL_PREFIX>${isal.prefix} </CUSTOM_ISAL_PREFIX>
                     <CUSTOM_ISAL_LIB>${isal.lib} </CUSTOM_ISAL_LIB>
@@ -746,6 +755,11 @@
         <isal.lib></isal.lib>
         <require.snappy>false</require.snappy>
         <bundle.snappy.in.bin>true</bundle.snappy.in.bin>
+        <zstd.prefix></zstd.prefix>
+        <zstd.lib></zstd.lib>
+        <zstd.include></zstd.include>
+        <require.ztsd>false</require.ztsd>
+        <bundle.zstd.in.bin>true</bundle.zstd.in.bin>
         <openssl.prefix></openssl.prefix>
         <openssl.lib></openssl.lib>
         <openssl.include></openssl.include>
@@ -795,6 +809,8 @@
                     <javahClassName>org.apache.hadoop.security.JniBasedUnixGroupsNetgroupMapping</javahClassName>
                     <javahClassName>org.apache.hadoop.io.compress.snappy.SnappyCompressor</javahClassName>
                     <javahClassName>org.apache.hadoop.io.compress.snappy.SnappyDecompressor</javahClassName>
+                    <javahClassName>org.apache.hadoop.io.compress.zstd.ZStandardCompressor</javahClassName>
+                    <javahClassName>org.apache.hadoop.io.compress.zstd.ZStandardDecompressor</javahClassName>
                     <javahClassName>org.apache.hadoop.io.compress.lz4.Lz4Compressor</javahClassName>
                     <javahClassName>org.apache.hadoop.io.compress.lz4.Lz4Decompressor</javahClassName>
                     <javahClassName>org.apache.hadoop.io.erasurecode.ErasureCodeNative</javahClassName>
@@ -851,6 +867,10 @@
                     <argument>/p:CustomSnappyLib=${snappy.lib}</argument>
                     <argument>/p:CustomSnappyInclude=${snappy.include}</argument>
                     <argument>/p:RequireSnappy=${require.snappy}</argument>
+                    <argument>/p:CustomZstdPrefix=${zstd.prefix}</argument>
+                    <argument>/p:CustomZstdLib=${zstd.lib}</argument>
+                    <argument>/p:CustomZstdInclude=${zstd.include}</argument>
+                    <argument>/p:RequireZstd=${require.ztsd}</argument>
                     <argument>/p:CustomOpensslPrefix=${openssl.prefix}</argument>
                     <argument>/p:CustomOpensslLib=${openssl.lib}</argument>
                     <argument>/p:CustomOpensslInclude=${openssl.include}</argument>

+ 29 - 0
hadoop-common-project/hadoop-common/src/CMakeLists.txt

@@ -94,6 +94,33 @@ else()
     endif()
 endif()
 
+# Require zstandard
+SET(STORED_CMAKE_FIND_LIBRARY_SUFFIXES ${CMAKE_FIND_LIBRARY_SUFFIXES})
+hadoop_set_find_shared_library_version("1")
+find_library(ZSTD_LIBRARY
+    NAMES zstd
+    PATHS ${CUSTOM_ZSTD_PREFIX} ${CUSTOM_ZSTD_PREFIX}/lib
+          ${CUSTOM_ZSTD_PREFIX}/lib64 ${CUSTOM_ZSTD_LIB})
+SET(CMAKE_FIND_LIBRARY_SUFFIXES ${STORED_CMAKE_FIND_LIBRARY_SUFFIXES})
+find_path(ZSTD_INCLUDE_DIR
+    NAMES zstd.h
+    PATHS ${CUSTOM_ZSTD_PREFIX} ${CUSTOM_ZSTD_PREFIX}/include
+          ${CUSTOM_ZSTD_INCLUDE})
+if (ZSTD_LIBRARY AND ZSTD_INCLUDE_DIR)
+    GET_FILENAME_COMPONENT(HADOOP_ZSTD_LIBRARY ${ZSTD_LIBRARY} NAME)
+    set(ZSTD_SOURCE_FILES
+        "${SRC}/io/compress/zstd/ZStandardCompressor.c"
+        "${SRC}/io/compress/zstd/ZStandardDecompressor.c")
+    set(REQUIRE_ZSTD ${REQUIRE_ZSTD}) # Stop warning about unused variable.
+        message(STATUS "Found ZStandard: ${ZSTD_LIBRARY}")
+else ()
+    set(ZSTD_INCLUDE_DIR "")
+    set(ZSTD_SOURCE_FILES "")
+    IF(REQUIRE_ZSTD)
+        MESSAGE(FATAL_ERROR "Required zstandard library could not be found.  ZSTD_LIBRARY=${ZSTD_LIBRARY}, ZSTD_INCLUDE_DIR=${ZSTD_INCLUDE_DIR}, CUSTOM_ZSTD_INCLUDE_DIR=${CUSTOM_ZSTD_INCLUDE_DIR}, CUSTOM_ZSTD_PREFIX=${CUSTOM_ZSTD_PREFIX}, CUSTOM_ZSTD_INCLUDE=${CUSTOM_ZSTD_INCLUDE}")
+    ENDIF(REQUIRE_ZSTD)
+endif ()
+
 set(STORED_CMAKE_FIND_LIBRARY_SUFFIXES ${CMAKE_FIND_LIBRARY_SUFFIXES})
 hadoop_set_find_shared_library_version("2")
 find_library(ISAL_LIBRARY
@@ -208,6 +235,7 @@ include_directories(
     ${BZIP2_INCLUDE_DIR}
     ${SNAPPY_INCLUDE_DIR}
     ${ISAL_INCLUDE_DIR}
+    ${ZSTD_INCLUDE_DIR}
     ${OPENSSL_INCLUDE_DIR}
     ${SRC}/util
 )
@@ -222,6 +250,7 @@ hadoop_add_dual_library(hadoop
     ${SRC}/io/compress/lz4/lz4hc.c
     ${ISAL_SOURCE_FILES}
     ${SNAPPY_SOURCE_FILES}
+    ${ZSTD_SOURCE_FILES}
     ${OPENSSL_SOURCE_FILES}
     ${SRC}/io/compress/zlib/ZlibCompressor.c
     ${SRC}/io/compress/zlib/ZlibDecompressor.c

+ 1 - 0
hadoop-common-project/hadoop-common/src/config.h.cmake

@@ -21,6 +21,7 @@
 #cmakedefine HADOOP_ZLIB_LIBRARY "@HADOOP_ZLIB_LIBRARY@"
 #cmakedefine HADOOP_BZIP2_LIBRARY "@HADOOP_BZIP2_LIBRARY@"
 #cmakedefine HADOOP_SNAPPY_LIBRARY "@HADOOP_SNAPPY_LIBRARY@"
+#cmakedefine HADOOP_ZSTD_LIBRARY "@HADOOP_ZSTD_LIBRARY@"
 #cmakedefine HADOOP_OPENSSL_LIBRARY "@HADOOP_OPENSSL_LIBRARY@"
 #cmakedefine HADOOP_ISAL_LIBRARY "@HADOOP_ISAL_LIBRARY@"
 #cmakedefine HAVE_SYNC_FILE_RANGE

+ 12 - 1
hadoop-common-project/hadoop-common/src/main/bin/hadoop

@@ -183,13 +183,24 @@ else
   exit 1
 fi
 
-if [ $# = 0 ]; then
+# now that we have support code, let's abs MYNAME so we can use it later
+MYNAME=$(hadoop_abs "${MYNAME}")
+
+if [[ $# = 0 ]]; then
   hadoop_exit_with_usage 1
 fi
 
 HADOOP_SUBCMD=$1
 shift
 
+if hadoop_need_reexec hadoop "${HADOOP_SUBCMD}"; then
+  hadoop_uservar_su hadoop "${HADOOP_SUBCMD}" \
+    "${MYNAME}" \
+    "--reexec" \
+    "${HADOOP_USER_PARAMS[@]}"
+  exit $?
+fi
+
 hadoop_verify_user "${HADOOP_SHELL_EXECNAME}" "${HADOOP_SUBCMD}"
 
 HADOOP_SUBCMD_ARGS=("$@")

+ 240 - 26
hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh

@@ -41,6 +41,42 @@ function hadoop_debug
   fi
 }
 
+## @description  Given a filename or dir, return the absolute version of it
+## @description  This works as an alternative to readlink, which isn't
+## @description  portable.
+## @audience     public
+## @stability    stable
+## @param        fsobj
+## @replaceable  no
+## @return       0 success
+## @return       1 failure
+## @return       stdout abspath
+function hadoop_abs
+{
+  declare obj=$1
+  declare dir
+  declare fn
+  declare dirret
+
+  if [[ ! -e ${obj} ]]; then
+    return 1
+  elif [[ -d ${obj} ]]; then
+    dir=${obj}
+  else
+    dir=$(dirname -- "${obj}")
+    fn=$(basename -- "${obj}")
+    fn="/${fn}"
+  fi
+
+  dir=$(cd -P -- "${dir}" >/dev/null 2>/dev/null && pwd -P)
+  dirret=$?
+  if [[ ${dirret} = 0 ]]; then
+    echo "${dir}${fn}"
+    return 0
+  fi
+  return 1
+}
+
 ## @description  Given variable $1 delete $2 from it
 ## @audience     public
 ## @stability    stable
@@ -79,6 +115,101 @@ function hadoop_verify_entry
   [[ ${!1} =~ \ ${2}\  ]]
 }
 
+## @description  Check if we are running with privilege
+## @description  by default, this implementation looks for
+## @description  EUID=0.  For OSes that have true privilege
+## @description  separation, this should be something more complex
+## @audience     private
+## @stability    evolving
+## @replaceable  yes
+## @return       1 = no priv
+## @return       0 = priv
+function hadoop_privilege_check
+{
+  [[ "${EUID}" = 0 ]]
+}
+
+## @description  Execute a command via su when running as root
+## @description  if the given user is found or exit with
+## @description  failure if not.
+## @description  otherwise just run it.  (This is intended to
+## @description  be used by the start-*/stop-* scripts.)
+## @audience     private
+## @stability    evolving
+## @replaceable  yes
+## @param        user
+## @param        commandstring
+## @return       exitstatus
+function hadoop_su
+{
+  declare user=$1
+  shift
+  declare idret
+
+  if hadoop_privilege_check; then
+    id -u "${user}" >/dev/null 2>&1
+    idret=$?
+    if [[ ${idret} != 0 ]]; then
+      hadoop_error "ERROR: Refusing to run as root: ${user} account is not found. Aborting."
+      return 1
+    else
+      su -l "${user}" -- "$@"
+    fi
+  else
+    "$@"
+  fi
+}
+
+## @description  Execute a command via su when running as root
+## @description  with extra support for commands that might
+## @description  legitimately start as root (e.g., datanode)
+## @description  (This is intended to
+## @description  be used by the start-*/stop-* scripts.)
+## @audience     private
+## @stability    evolving
+## @replaceable  no
+## @param        user
+## @param        commandstring
+## @return       exitstatus
+function hadoop_uservar_su
+{
+
+  ## startup matrix:
+  #
+  # if $EUID != 0, then exec
+  # if $EUID =0 then
+  #    if hdfs_subcmd_user is defined, call hadoop_su to exec
+  #    if hdfs_subcmd_user is not defined, error
+  #
+  # For secure daemons, this means both the secure and insecure env vars need to be
+  # defined.  e.g., HDFS_DATANODE_USER=root HDFS_DATANODE_SECURE_USER=hdfs
+  # This function will pick up the "normal" var, switch to that user, then
+  # execute the command which will then pick up the "secure" version.
+  #
+
+  declare program=$1
+  declare command=$2
+  shift 2
+
+  declare uprogram
+  declare ucommand
+  declare uvar
+
+  if hadoop_privilege_check; then
+    uvar=$(hadoop_get_verify_uservar "${program}" "${command}")
+
+    if [[ -n "${!uvar}" ]]; then
+      hadoop_su "${!uvar}" "$@"
+    else
+      hadoop_error "ERROR: Attempting to launch ${program} ${command} as root"
+      hadoop_error "ERROR: but there is no ${uvar} defined. Aborting launch."
+      return 1
+    fi
+  else
+    "$@"
+  fi
+}
+
 ## @description  Add a subcommand to the usage output
 ## @audience     private
 ## @stability    evolving
@@ -262,6 +393,39 @@ function hadoop_deprecate_envvar
   fi
 }
 
+## @description  Declare `var` being used and print its value.
+## @audience     public
+## @stability    stable
+## @replaceable  yes
+## @param        var
+function hadoop_using_envvar
+{
+  local var=$1
+  local val=${!var}
+
+  if [[ -n "${val}" ]]; then
+    hadoop_debug "${var} = ${val}"
+  fi
+}
+
+## @description  Create the directory 'dir'.
+## @audience     public
+## @stability    stable
+## @replaceable  yes
+## @param        dir
+function hadoop_mkdir
+{
+  local dir=$1
+
+  if [[ ! -w "${dir}" ]] && [[ ! -d "${dir}" ]]; then
+    hadoop_error "WARNING: ${dir} does not exist. Creating."
+    if ! mkdir -p "${dir}"; then
+      hadoop_error "ERROR: Unable to create ${dir}. Aborting."
+      exit 1
+    fi
+  fi
+}
+
 ## @description  Bootstraps the Hadoop shell environment
 ## @audience     private
 ## @stability    evolving
@@ -310,6 +474,9 @@ function hadoop_bootstrap
   # daemonization
   HADOOP_SUBCMD_SUPPORTDAEMONIZATION=false
 
+  # by default, we have not been self-re-execed
+  HADOOP_REEXECED_CMD=false
+
   # shellcheck disable=SC2034
   HADOOP_SUBCMD_SECURESERVICE=false
 
@@ -591,9 +758,10 @@ function hadoop_basic_init
   fi
 
   # if for some reason the shell doesn't have $USER defined
-  # let's define it as 'hadoop'
+  # (e.g., ssh'd in to execute a command)
+  # let's get the effective username and use that
+  USER=${USER:-$(id -nu)}
   HADOOP_IDENT_STRING=${HADOOP_IDENT_STRING:-$USER}
-  HADOOP_IDENT_STRING=${HADOOP_IDENT_STRING:-hadoop}
   HADOOP_LOG_DIR=${HADOOP_LOG_DIR:-"${HADOOP_HOME}/logs"}
   HADOOP_LOGFILE=${HADOOP_LOGFILE:-hadoop.log}
   HADOOP_LOGLEVEL=${HADOOP_LOGLEVEL:-INFO}
@@ -1367,8 +1535,7 @@ function hadoop_verify_secure_prereq
   # and you are using pfexec, you'll probably want to change
   # this.
 
-  # ${EUID} comes from the shell itself!
-  if [[ "${EUID}" -ne 0 ]] && [[ -z "${HADOOP_SECURE_COMMAND}" ]]; then
+  if ! hadoop_privilege_check && [[ -z "${HADOOP_SECURE_COMMAND}" ]]; then
     hadoop_error "ERROR: You must be a privileged user in order to run a secure service."
     exit 1
   else
@@ -1396,14 +1563,7 @@ function hadoop_verify_piddir
     hadoop_error "No pid directory defined."
     exit 1
   fi
-  if [[ ! -w "${HADOOP_PID_DIR}" ]] && [[ ! -d "${HADOOP_PID_DIR}" ]]; then
-    hadoop_error "WARNING: ${HADOOP_PID_DIR} does not exist. Creating."
-    mkdir -p "${HADOOP_PID_DIR}" > /dev/null 2>&1
-    if [[ $? -gt 0 ]]; then
-      hadoop_error "ERROR: Unable to create ${HADOOP_PID_DIR}. Aborting."
-      exit 1
-    fi
-  fi
+  hadoop_mkdir "${HADOOP_PID_DIR}"
   touch "${HADOOP_PID_DIR}/$$" >/dev/null 2>&1
   if [[ $? -gt 0 ]]; then
     hadoop_error "ERROR: Unable to write in ${HADOOP_PID_DIR}. Aborting."
@@ -1421,14 +1581,7 @@ function hadoop_verify_logdir
     hadoop_error "No log directory defined."
     exit 1
   fi
-  if [[ ! -w "${HADOOP_LOG_DIR}" ]] && [[ ! -d "${HADOOP_LOG_DIR}" ]]; then
-    hadoop_error "WARNING: ${HADOOP_LOG_DIR} does not exist. Creating."
-    mkdir -p "${HADOOP_LOG_DIR}" > /dev/null 2>&1
-    if [[ $? -gt 0 ]]; then
-      hadoop_error "ERROR: Unable to create ${HADOOP_LOG_DIR}. Aborting."
-      exit 1
-    fi
-  fi
+  hadoop_mkdir "${HADOOP_LOG_DIR}"
   touch "${HADOOP_LOG_DIR}/$$" >/dev/null 2>&1
   if [[ $? -gt 0 ]]; then
     hadoop_error "ERROR: Unable to write in ${HADOOP_LOG_DIR}. Aborting."
@@ -1975,20 +2128,18 @@ function hadoop_secure_daemon_handler
   esac
 }
 
-## @description  Verify that ${USER} is allowed to execute the
-## @description  given subcommand.
+## @description  Get the environment variable used to validate users
 ## @audience     public
 ## @stability    stable
 ## @replaceable  yes
 ## @param        subcommand
-## @return       will exit on failure conditions
-function hadoop_verify_user
+## @return       string
+function hadoop_get_verify_uservar
 {
   declare program=$1
   declare command=$2
   declare uprogram
   declare ucommand
-  declare uvar
 
   if [[ -z "${BASH_VERSINFO[0]}" ]] \
      || [[ "${BASH_VERSINFO[0]}" -lt 4 ]]; then
@@ -1999,7 +2150,25 @@ function hadoop_verify_user
     ucommand=${command^^}
   fi
 
-  uvar="${uprogram}_${ucommand}_USER"
+  echo "${uprogram}_${ucommand}_USER"
+}
+
+## @description  Verify that ${USER} is allowed to execute the
+## @description  given subcommand.
+## @audience     public
+## @stability    stable
+## @replaceable  yes
+## @param        command
+## @param        subcommand
+## @return       return 0 on success
+## @return       exit 1 on failure
+function hadoop_verify_user
+{
+  declare program=$1
+  declare command=$2
+  declare uvar
+
+  uvar=$(hadoop_get_verify_uservar "${program}" "${command}")
 
   if [[ -n ${!uvar} ]]; then
     if [[ ${!uvar} !=  "${USER}" ]]; then
@@ -2007,6 +2176,42 @@ function hadoop_verify_user
       exit 1
     fi
   fi
+  return 0
+}
+
+## @description  Verify that ${USER} is allowed to execute the
+## @description  given subcommand.
+## @audience     public
+## @stability    stable
+## @replaceable  yes
+## @param        subcommand
+## @return       1 on no re-exec needed
+## @return       0 on need to re-exec
+function hadoop_need_reexec
+{
+  declare program=$1
+  declare command=$2
+  declare uvar
+
+  # we've already been re-execed, bail
+
+  if [[ "${HADOOP_REEXECED_CMD}" = true ]]; then
+    return 1
+  fi
+
+  # if we have privilege, and the _USER is defined, and _USER is
+  # set to someone who isn't us, then yes, we should re-exec.
+  # otherwise no, don't re-exec and let the system deal with it.
+
+  if hadoop_privilege_check; then
+    uvar=$(hadoop_get_verify_uservar "${program}" "${command}")
+    if [[ -n ${!uvar} ]]; then
+      if [[ ${!uvar} !=  "${USER}" ]]; then
+        return 0
+      fi
+    fi
+  fi
+  return 1
 }
 
 ## @description  Add custom (program)_(command)_OPTS to HADOOP_OPTS.
@@ -2209,6 +2414,15 @@ function hadoop_parse_args
         shift
         ((HADOOP_PARSE_COUNTER=HADOOP_PARSE_COUNTER+2))
       ;;
+      --reexec)
+        shift
+        if [[ "${HADOOP_REEXECED_CMD}" = true ]]; then
+          hadoop_error "ERROR: re-exec fork bomb prevention: --reexec already called"
+          exit 1
+        fi
+        HADOOP_REEXECED_CMD=true
+        ((HADOOP_PARSE_COUNTER=HADOOP_PARSE_COUNTER+1))
+      ;;
       --workers)
         shift
         # shellcheck disable=SC2034

+ 18 - 5
hadoop-common-project/hadoop-common/src/main/bin/start-all.sh

@@ -15,10 +15,14 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-echo "This script is deprecated. Use start-dfs.sh and start-yarn.sh instead."
-exit 1
-
-
+## @description  catch the ctrl-c
+## @audience     private
+## @stability    evolving
+## @replaceable  no
+function hadoop_abort_startall()
+{
+  exit 1
+}
 
 # let's locate libexec...
 if [[ -n "${HADOOP_HOME}" ]]; then
@@ -38,6 +42,16 @@ else
   echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/hadoop-config.sh." 2>&1
   exit 1
 fi
+
+if ! hadoop_privilege_check; then
+  trap hadoop_abort_startall INT
+  hadoop_error "WARNING: Attempting to start all Apache Hadoop daemons as ${USER} in 10 seconds."
+  hadoop_error "WARNING: This is not a recommended production deployment configuration."
+  hadoop_error "WARNING: Use CTRL-C to abort."
+  sleep 10
+  trap - INT
+fi
+
 # start hdfs daemons if hdfs is present
 if [[ -f "${HADOOP_HDFS_HOME}/sbin/start-dfs.sh" ]]; then
   "${HADOOP_HDFS_HOME}/sbin/start-dfs.sh" --config "${HADOOP_CONF_DIR}"
@@ -49,4 +63,3 @@ if [[ -f "${HADOOP_YARN_HOME}/sbin/start-yarn.sh" ]]; then
 fi
 
 
-

+ 16 - 3
hadoop-common-project/hadoop-common/src/main/bin/stop-all.sh

@@ -15,12 +15,17 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+## @description  catch the ctrl-c
+## @audience     private
+## @stability    evolving
+## @replaceable  no
+function hadoop_abort_stopall()
+{
+  exit 1
+}
 
 # Stop all hadoop daemons.  Run this on master node.
 
-echo "This script is deprecated. Use stop-dfs.sh and stop-yarn.sh instead."
-exit 1
-
 # let's locate libexec...
 if [[ -n "${HADOOP_HOME}" ]]; then
   HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
@@ -40,6 +45,14 @@ else
   exit 1
 fi
 
+if ! hadoop_privilege_check; then
+  trap hadoop_abort_stopall INT
+  hadoop_error "WARNING: Stopping all Apache Hadoop daemons as ${USER} in 10 seconds."
+  hadoop_error "WARNING: Use CTRL-C to abort."
+  sleep 10
+  trap - INT
+fi
+
 # stop hdfs daemons if hdfs is present
 if [[ -f "${HADOOP_HDFS_HOME}/sbin/stop-dfs.sh" ]]; then
   "${HADOOP_HDFS_HOME}/sbin/stop-dfs.sh" --config "${HADOOP_CONF_DIR}"

+ 16 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java

@@ -18,6 +18,8 @@
 
 package org.apache.hadoop.conf;
 
+import com.fasterxml.jackson.core.JsonFactory;
+import com.fasterxml.jackson.core.JsonGenerator;
 import com.google.common.annotations.VisibleForTesting;
 
 import java.io.BufferedInputStream;
@@ -91,8 +93,6 @@ import org.apache.hadoop.security.alias.CredentialProviderFactory;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.StringInterner;
 import org.apache.hadoop.util.StringUtils;
-import org.codehaus.jackson.JsonFactory;
-import org.codehaus.jackson.JsonGenerator;
 import org.w3c.dom.Attr;
 import org.w3c.dom.DOMException;
 import org.w3c.dom.Document;
@@ -1887,6 +1887,18 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
       return result.toString();
     }
 
+    /**
+     * Get range start for the first integer range.
+     * @return range start.
+     */
+    public int getRangeStart() {
+      if (ranges == null || ranges.isEmpty()) {
+        return -1;
+      }
+      Range r = ranges.get(0);
+      return r.start;
+    }
+
     @Override
     public Iterator<Integer> iterator() {
       return new RangeNumberIterator(ranges);
@@ -3028,7 +3040,7 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
           propertyName + " not found");
     } else {
       JsonFactory dumpFactory = new JsonFactory();
-      JsonGenerator dumpGenerator = dumpFactory.createJsonGenerator(out);
+      JsonGenerator dumpGenerator = dumpFactory.createGenerator(out);
       dumpGenerator.writeStartObject();
       dumpGenerator.writeFieldName("property");
       appendJSONProperty(dumpGenerator, config, propertyName);
@@ -3066,7 +3078,7 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
   public static void dumpConfiguration(Configuration config,
       Writer out) throws IOException {
     JsonFactory dumpFactory = new JsonFactory();
-    JsonGenerator dumpGenerator = dumpFactory.createJsonGenerator(out);
+    JsonGenerator dumpGenerator = dumpFactory.createGenerator(out);
     dumpGenerator.writeStartObject();
     dumpGenerator.writeFieldName("properties");
     dumpGenerator.writeStartArray();

+ 113 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfigurationWithLogging.java

@@ -0,0 +1,113 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.conf;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Logs access to {@link Configuration}.
+ * Sensitive data will be redacted.
+ */
+@InterfaceAudience.Private
+public class ConfigurationWithLogging extends Configuration {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(ConfigurationWithLogging.class);
+
+  private final Logger log;
+  private final ConfigRedactor redactor;
+
+  public ConfigurationWithLogging(Configuration conf) {
+    super(conf);
+    log = LOG;
+    redactor = new ConfigRedactor(conf);
+  }
+
+  /**
+   * @see Configuration#get(String).
+   */
+  @Override
+  public String get(String name) {
+    String value = super.get(name);
+    log.info("Got {} = '{}'", name, redactor.redact(name, value));
+    return value;
+  }
+
+  /**
+   * @see Configuration#get(String, String).
+   */
+  @Override
+  public String get(String name, String defaultValue) {
+    String value = super.get(name, defaultValue);
+    log.info("Got {} = '{}' (default '{}')", name,
+        redactor.redact(name, value), redactor.redact(name, defaultValue));
+    return value;
+  }
+
+  /**
+   * @see Configuration#getBoolean(String, boolean).
+   */
+  @Override
+  public boolean getBoolean(String name, boolean defaultValue) {
+    boolean value = super.getBoolean(name, defaultValue);
+    log.info("Got {} = '{}' (default '{}')", name, value, defaultValue);
+    return value;
+  }
+
+  /**
+   * @see Configuration#getFloat(String, float).
+   */
+  @Override
+  public float getFloat(String name, float defaultValue) {
+    float value = super.getFloat(name, defaultValue);
+    log.info("Got {} = '{}' (default '{}')", name, value, defaultValue);
+    return value;
+  }
+
+  /**
+   * @see Configuration#getInt(String, int).
+   */
+  @Override
+  public int getInt(String name, int defaultValue) {
+    int value = super.getInt(name, defaultValue);
+    log.info("Got {} = '{}' (default '{}')", name, value, defaultValue);
+    return value;
+  }
+
+  /**
+   * @see Configuration#getLong(String, long).
+   */
+  @Override
+  public long getLong(String name, long defaultValue) {
+    long value = super.getLong(name, defaultValue);
+    log.info("Got {} = '{}' (default '{}')", name, value, defaultValue);
+    return value;
+  }
+
+  /**
+   * @see Configuration#set(String, String, String).
+   */
+  @Override
+  public void set(String name, String value, String source) {
+    log.info("Set {} to '{}'{}", name, redactor.redact(name, value),
+        source == null ? "" : " from " + source);
+    super.set(name, value, source);
+  }
+}

+ 11 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/CachingKeyProvider.java

@@ -141,8 +141,7 @@ public class CachingKeyProvider extends
   public KeyVersion rollNewVersion(String name, byte[] material)
       throws IOException {
     KeyVersion key = getKeyProvider().rollNewVersion(name, material);
-    getExtension().currentKeyCache.invalidate(name);
-    getExtension().keyMetadataCache.invalidate(name);
+    invalidateCache(name);
     return key;
   }
 
@@ -150,9 +149,18 @@ public class CachingKeyProvider extends
   public KeyVersion rollNewVersion(String name)
       throws NoSuchAlgorithmException, IOException {
     KeyVersion key = getKeyProvider().rollNewVersion(name);
+    invalidateCache(name);
+    return key;
+  }
+
+  @Override
+  public void invalidateCache(String name) throws IOException {
+    getKeyProvider().invalidateCache(name);
     getExtension().currentKeyCache.invalidate(name);
     getExtension().keyMetadataCache.invalidate(name);
-    return key;
+    // invalidating all key versions as we don't know
+    // which ones belonged to the deleted key
+    getExtension().keyVersionCache.invalidateAll();
   }
 
   @Override

+ 14 - 17
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/JavaKeyStoreProvider.java

@@ -36,6 +36,7 @@ import com.google.common.annotations.VisibleForTesting;
 
 import javax.crypto.spec.SecretKeySpec;
 
+import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.ObjectInputStream;
 import java.io.ObjectOutputStream;
@@ -167,9 +168,9 @@ public class JavaKeyStoreProvider extends KeyProvider {
       // rewrite the keystore in flush()
       permissions = perm;
     } catch (KeyStoreException e) {
-      throw new IOException("Can't create keystore", e);
+      throw new IOException("Can't create keystore: " + e, e);
     } catch (GeneralSecurityException e) {
-      throw new IOException("Can't load keystore " + path, e);
+      throw new IOException("Can't load keystore " + path + " : " + e , e);
     }
   }
 
@@ -190,9 +191,7 @@ public class JavaKeyStoreProvider extends KeyProvider {
     try {
       perm = loadFromPath(path, password);
       // Remove _OLD if exists
-      if (fs.exists(backupPath)) {
-        fs.delete(backupPath, true);
-      }
+      fs.delete(backupPath, true);
       LOG.debug("KeyStore loaded successfully !!");
     } catch (IOException ioe) {
       // If file is corrupted for some reason other than
@@ -260,9 +259,7 @@ public class JavaKeyStoreProvider extends KeyProvider {
         LOG.debug(String.format("KeyStore loaded successfully from '%s'!!",
             pathToLoad));
       }
-      if (fs.exists(pathToDelete)) {
-        fs.delete(pathToDelete, true);
-      }
+      fs.delete(pathToDelete, true);
     } catch (IOException e) {
       // Check for password issue : don't want to trash file due
       // to wrong password
@@ -539,13 +536,15 @@ public class JavaKeyStoreProvider extends KeyProvider {
         return;
       }
       // Might exist if a backup has been restored etc.
-      if (fs.exists(newPath)) {
+      try {
         renameOrFail(newPath, new Path(newPath.toString()
             + "_ORPHANED_" + System.currentTimeMillis()));
+      } catch (FileNotFoundException ignored) {
       }
-      if (fs.exists(oldPath)) {
+      try {
         renameOrFail(oldPath, new Path(oldPath.toString()
             + "_ORPHANED_" + System.currentTimeMillis()));
+      } catch (FileNotFoundException ignored) {
       }
       // put all of the updates into the keystore
       for(Map.Entry<String, Metadata> entry: cache.entrySet()) {
@@ -601,9 +600,7 @@ public class JavaKeyStoreProvider extends KeyProvider {
     // Rename _NEW to CURRENT
     renameOrFail(newPath, path);
     // Delete _OLD
-    if (fs.exists(oldPath)) {
-      fs.delete(oldPath, true);
-    }
+    fs.delete(oldPath, true);
   }
 
   protected void writeToNew(Path newPath) throws IOException {
@@ -623,12 +620,12 @@ public class JavaKeyStoreProvider extends KeyProvider {
 
   protected boolean backupToOld(Path oldPath)
       throws IOException {
-    boolean fileExisted = false;
-    if (fs.exists(path)) {
+    try {
       renameOrFail(path, oldPath);
-      fileExisted = true;
+      return true;
+    } catch (FileNotFoundException e) {
+      return false;
     }
-    return fileExisted;
   }
 
   private void revertFromOld(Path oldPath, boolean fileExisted)

+ 40 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java

@@ -33,6 +33,8 @@ import java.util.Map;
 
 import com.google.gson.stream.JsonReader;
 import com.google.gson.stream.JsonWriter;
+import org.apache.commons.lang.builder.EqualsBuilder;
+import org.apache.commons.lang.builder.HashCodeBuilder;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -86,6 +88,7 @@ public abstract class KeyProvider {
       return material;
     }
 
+    @Override
     public String toString() {
       StringBuilder buf = new StringBuilder();
       buf.append("key(");
@@ -105,6 +108,31 @@ public abstract class KeyProvider {
       }
       return buf.toString();
     }
+
+    @Override
+    public boolean equals(Object rhs) {
+      if (this == rhs) {
+        return true;
+      }
+      if (rhs == null || getClass() != rhs.getClass()) {
+        return false;
+      }
+      final KeyVersion kv = (KeyVersion) rhs;
+      return new EqualsBuilder().
+          append(name, kv.name).
+          append(versionName, kv.versionName).
+          append(material, kv.material).
+          isEquals();
+    }
+
+    @Override
+    public int hashCode() {
+      return new HashCodeBuilder().
+          append(name).
+          append(versionName).
+          append(material).
+          toHashCode();
+    }
   }
 
   /**
@@ -565,6 +593,18 @@ public abstract class KeyProvider {
     return rollNewVersion(name, material);
   }
 
+  /**
+   * Can be used by implementing classes to invalidate the caches. This could be
+   * used after rollNewVersion to provide a strong guarantee to return the new
+   * version of the given key.
+   *
+   * @param name the basename of the key
+   * @throws IOException
+   */
+  public void invalidateCache(String name) throws IOException {
+    // NOP
+  }
+
   /**
    * Ensures that any changes to the keys are written to persistent store.
    * @throws IOException

+ 84 - 10
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderCryptoExtension.java

@@ -188,8 +188,8 @@ public class KeyProviderCryptoExtension extends
     public void drain(String keyName);
 
     /**
-     * Generates a key material and encrypts it using the given key version name
-     * and initialization vector. The generated key material is of the same
+     * Generates a key material and encrypts it using the given key name.
+     * The generated key material is of the same
      * length as the <code>KeyVersion</code> material of the latest key version
      * of the key and is encrypted using the same cipher.
      * <p/>
@@ -210,7 +210,7 @@ public class KeyProviderCryptoExtension extends
         GeneralSecurityException;
 
     /**
-     * Decrypts an encrypted byte[] key material using the given a key version
+     * Decrypts an encrypted byte[] key material using the given key version
      * name and initialization vector.
      * 
      * @param encryptedKeyVersion
@@ -227,6 +227,26 @@ public class KeyProviderCryptoExtension extends
     public KeyVersion decryptEncryptedKey(
         EncryptedKeyVersion encryptedKeyVersion) throws IOException,
         GeneralSecurityException;
+
+    /**
+     * Re-encrypts an encrypted key version, using its initialization vector
+     * and key material, but with the latest key version name of its key name
+     * in the key provider.
+     * <p>
+     * If the latest key version name in the provider is the
+     * same as the one encrypted the passed-in encrypted key version, the same
+     * encrypted key version is returned.
+     * <p>
+     * NOTE: The generated key is not stored by the <code>KeyProvider</code>
+     *
+     * @param  ekv The EncryptedKeyVersion containing keyVersionName and IV.
+     * @return     The re-encrypted EncryptedKeyVersion.
+     * @throws IOException If the key material could not be re-encrypted.
+     * @throws GeneralSecurityException If the key material could not be
+     *                            re-encrypted because of a cryptographic issue.
+     */
+    EncryptedKeyVersion reencryptEncryptedKey(EncryptedKeyVersion ekv)
+        throws IOException, GeneralSecurityException;
   }
 
   private static class DefaultCryptoExtension implements CryptoExtension {
@@ -258,24 +278,55 @@ public class KeyProviderCryptoExtension extends
       cc.generateSecureRandom(newKey);
       final byte[] iv = new byte[cc.getCipherSuite().getAlgorithmBlockSize()];
       cc.generateSecureRandom(iv);
+      Encryptor encryptor = cc.createEncryptor();
+      return generateEncryptedKey(encryptor, encryptionKey, newKey, iv);
+    }
+
+    private EncryptedKeyVersion generateEncryptedKey(final Encryptor encryptor,
+        final KeyVersion encryptionKey, final byte[] key, final byte[] iv)
+        throws IOException, GeneralSecurityException {
       // Encryption key IV is derived from new key's IV
       final byte[] encryptionIV = EncryptedKeyVersion.deriveIV(iv);
-      Encryptor encryptor = cc.createEncryptor();
       encryptor.init(encryptionKey.getMaterial(), encryptionIV);
-      int keyLen = newKey.length;
+      final int keyLen = key.length;
       ByteBuffer bbIn = ByteBuffer.allocateDirect(keyLen);
       ByteBuffer bbOut = ByteBuffer.allocateDirect(keyLen);
-      bbIn.put(newKey);
+      bbIn.put(key);
       bbIn.flip();
       encryptor.encrypt(bbIn, bbOut);
       bbOut.flip();
       byte[] encryptedKey = new byte[keyLen];
-      bbOut.get(encryptedKey);    
-      return new EncryptedKeyVersion(encryptionKeyName,
+      bbOut.get(encryptedKey);
+      return new EncryptedKeyVersion(encryptionKey.getName(),
           encryptionKey.getVersionName(), iv,
           new KeyVersion(encryptionKey.getName(), EEK, encryptedKey));
     }
 
+    @Override
+    public EncryptedKeyVersion reencryptEncryptedKey(EncryptedKeyVersion ekv)
+        throws IOException, GeneralSecurityException {
+      final String ekName = ekv.getEncryptionKeyName();
+      final KeyVersion ekNow = keyProvider.getCurrentKey(ekName);
+      Preconditions
+          .checkNotNull(ekNow, "KeyVersion name '%s' does not exist", ekName);
+      Preconditions.checkArgument(ekv.getEncryptedKeyVersion().getVersionName()
+              .equals(KeyProviderCryptoExtension.EEK),
+          "encryptedKey version name must be '%s', is '%s'",
+          KeyProviderCryptoExtension.EEK,
+          ekv.getEncryptedKeyVersion().getVersionName());
+
+      if (ekv.getEncryptedKeyVersion().equals(ekNow)) {
+        // no-op if same key version
+        return ekv;
+      }
+
+      final KeyVersion dek = decryptEncryptedKey(ekv);
+      final CryptoCodec cc = CryptoCodec.getInstance(keyProvider.getConf());
+      final Encryptor encryptor = cc.createEncryptor();
+      return generateEncryptedKey(encryptor, ekNow, dek.getMaterial(),
+          ekv.getEncryptedKeyIv());
+    }
+
     @Override
     public KeyVersion decryptEncryptedKey(
         EncryptedKeyVersion encryptedKeyVersion) throws IOException,
@@ -388,6 +439,28 @@ public class KeyProviderCryptoExtension extends
     return getExtension().decryptEncryptedKey(encryptedKey);
   }
 
+  /**
+   * Re-encrypts an encrypted key version, using its initialization vector
+   * and key material, but with the latest key version name of its key name
+   * in the key provider.
+   * <p>
+   * If the latest key version name in the provider is the
+   * same as the one encrypted the passed-in encrypted key version, the same
+   * encrypted key version is returned.
+   * <p>
+   * NOTE: The generated key is not stored by the <code>KeyProvider</code>
+   *
+   * @param  ekv The EncryptedKeyVersion containing keyVersionName and IV.
+   * @return     The re-encrypted EncryptedKeyVersion.
+   * @throws IOException If the key material could not be re-encrypted
+   * @throws GeneralSecurityException If the key material could not be
+   *                            re-encrypted because of a cryptographic issue.
+   */
+  public EncryptedKeyVersion reencryptEncryptedKey(EncryptedKeyVersion ekv)
+      throws IOException, GeneralSecurityException {
+    return getExtension().reencryptEncryptedKey(ekv);
+  }
+
   /**
    * Creates a <code>KeyProviderCryptoExtension</code> using a given
    * {@link KeyProvider}.
@@ -427,8 +500,9 @@ public class KeyProviderCryptoExtension extends
 
   @Override
   public void close() throws IOException {
-    if (getKeyProvider() != null) {
-      getKeyProvider().close();
+    KeyProvider provider = getKeyProvider();
+    if (provider != null && provider != this) {
+      provider.close();
     }
   }
 

+ 5 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderExtension.java

@@ -117,6 +117,11 @@ public abstract class KeyProviderExtension
     return keyProvider.rollNewVersion(name, material);
   }
 
+  @Override
+  public void invalidateCache(String name) throws IOException {
+    keyProvider.invalidateCache(name);
+  }
+
   @Override
   public void flush() throws IOException {
     keyProvider.flush();

+ 66 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyShell.java

@@ -46,7 +46,8 @@ public class KeyShell extends CommandShell {
       "   [" + CreateCommand.USAGE + "]\n" +
       "   [" + RollCommand.USAGE + "]\n" +
       "   [" + DeleteCommand.USAGE + "]\n" +
-      "   [" + ListCommand.USAGE + "]\n";
+      "   [" + ListCommand.USAGE + "]\n" +
+      "   [" + InvalidateCacheCommand.USAGE + "]\n";
   private static final String LIST_METADATA = "keyShell.list.metadata";
   @VisibleForTesting
   public static final String NO_VALID_PROVIDERS =
@@ -70,6 +71,7 @@ public class KeyShell extends CommandShell {
    * % hadoop key roll keyName [-provider providerPath]
    * % hadoop key list [-provider providerPath]
    * % hadoop key delete keyName [-provider providerPath] [-i]
+   * % hadoop key invalidateCache keyName [-provider providerPath]
    * </pre>
    * @param args Command line arguments.
    * @return 0 on success, 1 on failure.
@@ -111,6 +113,15 @@ public class KeyShell extends CommandShell {
         }
       } else if ("list".equals(args[i])) {
         setSubCommand(new ListCommand());
+      } else if ("invalidateCache".equals(args[i])) {
+        String keyName = "-help";
+        if (moreTokens) {
+          keyName = args[++i];
+        }
+        setSubCommand(new InvalidateCacheCommand(keyName));
+        if ("-help".equals(keyName)) {
+          return 1;
+        }
       } else if ("-size".equals(args[i]) && moreTokens) {
         options.setBitLength(Integer.parseInt(args[++i]));
       } else if ("-cipher".equals(args[i]) && moreTokens) {
@@ -168,6 +179,9 @@ public class KeyShell extends CommandShell {
     sbuf.append(DeleteCommand.USAGE + ":\n\n" + DeleteCommand.DESC + "\n");
     sbuf.append(banner + "\n");
     sbuf.append(ListCommand.USAGE + ":\n\n" + ListCommand.DESC + "\n");
+    sbuf.append(banner + "\n");
+    sbuf.append(InvalidateCacheCommand.USAGE + ":\n\n"
+        + InvalidateCacheCommand.DESC + "\n");
     return sbuf.toString();
   }
 
@@ -466,6 +480,57 @@ public class KeyShell extends CommandShell {
     }
   }
 
+  private class InvalidateCacheCommand extends Command {
+    public static final String USAGE =
+        "invalidateCache <keyname> [-provider <provider>] [-help]";
+    public static final String DESC =
+        "The invalidateCache subcommand invalidates the cached key versions\n"
+            + "of the specified key, on the provider indicated using the"
+            + " -provider argument.\n";
+
+    private String keyName = null;
+
+    InvalidateCacheCommand(String keyName) {
+      this.keyName = keyName;
+    }
+
+    public boolean validate() {
+      boolean rc = true;
+      provider = getKeyProvider();
+      if (provider == null) {
+        getOut().println("Invalid provider.");
+        rc = false;
+      }
+      if (keyName == null) {
+        getOut().println("Please provide a <keyname>.\n" +
+            "See the usage description by using -help.");
+        rc = false;
+      }
+      return rc;
+    }
+
+    public void execute() throws NoSuchAlgorithmException, IOException {
+      try {
+        warnIfTransientProvider();
+        getOut().println("Invalidating cache on KeyProvider: "
+            + provider + "\n  for key name: " + keyName);
+        provider.invalidateCache(keyName);
+        getOut().println("Cached keyversions of " + keyName
+            + " has been successfully invalidated.");
+        printProviderWritten();
+      } catch (IOException e) {
+        getOut().println("Cannot invalidate cache for key: " + keyName +
+            " within KeyProvider: " + provider + ". " + e.toString());
+        throw e;
+      }
+    }
+
+    @Override
+    public String getUsage() {
+      return USAGE + ":\n\n" + DESC;
+    }
+  }
+
   /**
    * main() entry point for the KeyShell.  While strictly speaking the
    * return is void, it will System.exit() with a return code: 0 is for

+ 107 - 46
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java

@@ -44,7 +44,6 @@ import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthentica
 import org.apache.hadoop.util.HttpExceptionUtils;
 import org.apache.hadoop.util.KMSUtil;
 import org.apache.http.client.utils.URIBuilder;
-import org.codehaus.jackson.map.ObjectMapper;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -80,6 +79,7 @@ import java.util.concurrent.ExecutionException;
 import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
 import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.CryptoExtension;
 
+import com.fasterxml.jackson.databind.ObjectMapper;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import com.google.common.base.Strings;
@@ -146,7 +146,7 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension,
       List response = call(conn, null,
           HttpURLConnection.HTTP_OK, List.class);
       List<EncryptedKeyVersion> ekvs =
-          parseJSONEncKeyVersion(keyName, response);
+          parseJSONEncKeyVersions(keyName, response);
       keyQueue.addAll(ekvs);
     }
   }
@@ -173,14 +173,20 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension,
       LOG.debug("Renewing delegation token {}", token);
       KeyProvider keyProvider = KMSUtil.createKeyProvider(conf,
           KeyProviderFactory.KEY_PROVIDER_PATH);
-      if (!(keyProvider instanceof
-          KeyProviderDelegationTokenExtension.DelegationTokenExtension)) {
-        LOG.warn("keyProvider {} cannot renew dt.", keyProvider == null ?
-            "null" : keyProvider.getClass());
-        return 0;
+      try {
+        if (!(keyProvider instanceof
+            KeyProviderDelegationTokenExtension.DelegationTokenExtension)) {
+          LOG.warn("keyProvider {} cannot renew dt.", keyProvider == null ?
+              "null" : keyProvider.getClass());
+          return 0;
+        }
+        return ((KeyProviderDelegationTokenExtension.DelegationTokenExtension)
+            keyProvider).renewDelegationToken(token);
+      } finally {
+        if (keyProvider != null) {
+          keyProvider.close();
+        }
       }
-      return ((KeyProviderDelegationTokenExtension.DelegationTokenExtension)
-          keyProvider).renewDelegationToken(token);
     }
 
     @Override
@@ -188,14 +194,20 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension,
       LOG.debug("Canceling delegation token {}", token);
       KeyProvider keyProvider = KMSUtil.createKeyProvider(conf,
           KeyProviderFactory.KEY_PROVIDER_PATH);
-      if (!(keyProvider instanceof
-          KeyProviderDelegationTokenExtension.DelegationTokenExtension)) {
-        LOG.warn("keyProvider {} cannot cancel dt.", keyProvider == null ?
-            "null" : keyProvider.getClass());
-        return;
+      try {
+        if (!(keyProvider instanceof
+            KeyProviderDelegationTokenExtension.DelegationTokenExtension)) {
+          LOG.warn("keyProvider {} cannot cancel dt.", keyProvider == null ?
+              "null" : keyProvider.getClass());
+          return;
+        }
+        ((KeyProviderDelegationTokenExtension.DelegationTokenExtension)
+            keyProvider).cancelDelegationToken(token);
+      } finally {
+        if (keyProvider != null) {
+          keyProvider.close();
+        }
       }
-      ((KeyProviderDelegationTokenExtension.DelegationTokenExtension)
-          keyProvider).cancelDelegationToken(token);
     }
   }
 
@@ -209,37 +221,41 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension,
 
   @SuppressWarnings("rawtypes")
   private static List<EncryptedKeyVersion>
-      parseJSONEncKeyVersion(String keyName, List valueList) {
+      parseJSONEncKeyVersions(String keyName, List valueList) {
     List<EncryptedKeyVersion> ekvs = new LinkedList<EncryptedKeyVersion>();
     if (!valueList.isEmpty()) {
       for (Object values : valueList) {
         Map valueMap = (Map) values;
+        ekvs.add(parseJSONEncKeyVersion(keyName, valueMap));
+      }
+    }
+    return ekvs;
+  }
 
-        String versionName = checkNotNull(
-                (String) valueMap.get(KMSRESTConstants.VERSION_NAME_FIELD),
-                KMSRESTConstants.VERSION_NAME_FIELD);
+  private static EncryptedKeyVersion parseJSONEncKeyVersion(String keyName,
+      Map valueMap) {
+    String versionName = checkNotNull(
+        (String) valueMap.get(KMSRESTConstants.VERSION_NAME_FIELD),
+        KMSRESTConstants.VERSION_NAME_FIELD);
 
-        byte[] iv = Base64.decodeBase64(checkNotNull(
-                (String) valueMap.get(KMSRESTConstants.IV_FIELD),
-                KMSRESTConstants.IV_FIELD));
+    byte[] iv = Base64.decodeBase64(checkNotNull(
+        (String) valueMap.get(KMSRESTConstants.IV_FIELD),
+        KMSRESTConstants.IV_FIELD));
 
-        Map encValueMap = checkNotNull((Map)
-                valueMap.get(KMSRESTConstants.ENCRYPTED_KEY_VERSION_FIELD),
-                KMSRESTConstants.ENCRYPTED_KEY_VERSION_FIELD);
+    Map encValueMap = checkNotNull((Map)
+            valueMap.get(KMSRESTConstants.ENCRYPTED_KEY_VERSION_FIELD),
+        KMSRESTConstants.ENCRYPTED_KEY_VERSION_FIELD);
 
-        String encVersionName = checkNotNull((String)
-                encValueMap.get(KMSRESTConstants.VERSION_NAME_FIELD),
-                KMSRESTConstants.VERSION_NAME_FIELD);
+    String encVersionName = checkNotNull((String)
+            encValueMap.get(KMSRESTConstants.VERSION_NAME_FIELD),
+        KMSRESTConstants.VERSION_NAME_FIELD);
 
-        byte[] encKeyMaterial = Base64.decodeBase64(checkNotNull((String)
-                encValueMap.get(KMSRESTConstants.MATERIAL_FIELD),
-                KMSRESTConstants.MATERIAL_FIELD));
+    byte[] encKeyMaterial = Base64.decodeBase64(checkNotNull((String)
+            encValueMap.get(KMSRESTConstants.MATERIAL_FIELD),
+        KMSRESTConstants.MATERIAL_FIELD));
 
-        ekvs.add(new KMSEncryptedKeyVersion(keyName, versionName, iv,
-            encVersionName, encKeyMaterial));
-      }
-    }
-    return ekvs;
+    return new KMSEncryptedKeyVersion(keyName, versionName, iv,
+        encVersionName, encKeyMaterial);
   }
 
   private static KeyVersion parseJSONKeyVersion(Map valueMap) {
@@ -741,6 +757,17 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension,
     }
   }
 
+  @Override
+  public void invalidateCache(String name) throws IOException {
+    checkNotEmpty(name, "name");
+    final URL url = createURL(KMSRESTConstants.KEY_RESOURCE, name,
+        KMSRESTConstants.INVALIDATECACHE_RESOURCE, null);
+    final HttpURLConnection conn = createConnection(url, HTTP_POST);
+    // invalidate the server cache first, then drain local cache.
+    call(conn, null, HttpURLConnection.HTTP_OK, null);
+    drain(name);
+  }
+
   private KeyVersion rollNewVersionInternal(String name, byte[] material)
       throws NoSuchAlgorithmException, IOException {
     checkNotEmpty(name, "name");
@@ -755,7 +782,7 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension,
     Map response = call(conn, jsonMaterial,
         HttpURLConnection.HTTP_OK, Map.class);
     KeyVersion keyVersion = parseJSONKeyVersion(response);
-    encKeyVersionQueue.drain(name);
+    invalidateCache(name);
     return keyVersion;
   }
 
@@ -825,6 +852,35 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension,
     return parseJSONKeyVersion(response);
   }
 
+  @Override
+  public EncryptedKeyVersion reencryptEncryptedKey(EncryptedKeyVersion ekv)
+      throws IOException, GeneralSecurityException {
+    checkNotNull(ekv.getEncryptionKeyVersionName(), "versionName");
+    checkNotNull(ekv.getEncryptedKeyIv(), "iv");
+    checkNotNull(ekv.getEncryptedKeyVersion(), "encryptedKey");
+    Preconditions.checkArgument(ekv.getEncryptedKeyVersion().getVersionName()
+            .equals(KeyProviderCryptoExtension.EEK),
+        "encryptedKey version name must be '%s', is '%s'",
+        KeyProviderCryptoExtension.EEK,
+        ekv.getEncryptedKeyVersion().getVersionName());
+    final Map<String, String> params = new HashMap<>();
+    params.put(KMSRESTConstants.EEK_OP, KMSRESTConstants.EEK_REENCRYPT);
+    final Map<String, Object> jsonPayload = new HashMap<>();
+    jsonPayload.put(KMSRESTConstants.NAME_FIELD, ekv.getEncryptionKeyName());
+    jsonPayload.put(KMSRESTConstants.IV_FIELD,
+        Base64.encodeBase64String(ekv.getEncryptedKeyIv()));
+    jsonPayload.put(KMSRESTConstants.MATERIAL_FIELD,
+        Base64.encodeBase64String(ekv.getEncryptedKeyVersion().getMaterial()));
+    final URL url = createURL(KMSRESTConstants.KEY_VERSION_RESOURCE,
+        ekv.getEncryptionKeyVersionName(), KMSRESTConstants.EEK_SUB_RESOURCE,
+        params);
+    final HttpURLConnection conn = createConnection(url, HTTP_POST);
+    conn.setRequestProperty(CONTENT_TYPE, APPLICATION_JSON_MIME);
+    final Map response =
+        call(conn, jsonPayload, HttpURLConnection.HTTP_OK, Map.class);
+    return parseJSONEncKeyVersion(ekv.getEncryptionKeyName(), response);
+  }
+
   @Override
   public List<KeyVersion> getKeyVersions(String name) throws IOException {
     checkNotEmpty(name, "name");
@@ -1026,10 +1082,9 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension,
     return dtService;
   }
 
-  private boolean currentUgiContainsKmsDt() throws IOException {
-    // Add existing credentials from current UGI, since provider is cached.
-    Credentials creds = UserGroupInformation.getCurrentUser().
-        getCredentials();
+  private boolean containsKmsDt(UserGroupInformation ugi) throws IOException {
+    // Add existing credentials from the UGI, since provider is cached.
+    Credentials creds = ugi.getCredentials();
     if (!creds.getAllTokens().isEmpty()) {
       org.apache.hadoop.security.token.Token<? extends TokenIdentifier>
           dToken = creds.getToken(getDelegationTokenService());
@@ -1051,11 +1106,16 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension,
     if (currentUgi.getRealUser() != null) {
       // Use real user for proxy user
       actualUgi = currentUgi.getRealUser();
-    } else if (!currentUgiContainsKmsDt() &&
-        !currentUgi.hasKerberosCredentials()) {
-      // Use login user for user that does not have either
+    }
+    if (UserGroupInformation.isSecurityEnabled() &&
+        !containsKmsDt(actualUgi) &&
+        !actualUgi.hasKerberosCredentials()) {
+      // Use login user is only necessary when Kerberos is enabled
+      // but the actual user does not have either
       // Kerberos credential or KMS delegation token for KMS operations
-      actualUgi = currentUgi.getLoginUser();
+      LOG.debug("Using loginUser when Kerberos is enabled but the actual user" +
+          " does not have either KMS Delegation Token or Kerberos Credentials");
+      actualUgi = UserGroupInformation.getLoginUser();
     }
     return actualUgi;
   }
@@ -1072,6 +1132,7 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension,
     } finally {
       if (sslFactory != null) {
         sslFactory.destroy();
+        sslFactory = null;
       }
     }
   }

+ 2 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSRESTConstants.java

@@ -36,12 +36,14 @@ public class KMSRESTConstants {
   public static final String VERSIONS_SUB_RESOURCE = "_versions";
   public static final String EEK_SUB_RESOURCE = "_eek";
   public static final String CURRENT_VERSION_SUB_RESOURCE = "_currentversion";
+  public static final String INVALIDATECACHE_RESOURCE = "_invalidatecache";
 
   public static final String KEY = "key";
   public static final String EEK_OP = "eek_op";
   public static final String EEK_GENERATE = "generate";
   public static final String EEK_DECRYPT = "decrypt";
   public static final String EEK_NUM_KEYS = "num_keys";
+  public static final String EEK_REENCRYPT = "reencrypt";
 
   public static final String IV_FIELD = "iv";
   public static final String NAME_FIELD = "name";

+ 35 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java

@@ -178,6 +178,14 @@ public class LoadBalancingKMSClientProvider extends KeyProvider implements
     }
   }
 
+  // This request is sent to all providers in the load-balancing group
+  @Override
+  public void invalidateCache(String keyName) throws IOException {
+    for (KMSClientProvider provider : providers) {
+      provider.invalidateCache(keyName);
+    }
+  }
+
   @Override
   public EncryptedKeyVersion
       generateEncryptedKey(final String encryptionKeyName)
@@ -218,6 +226,24 @@ public class LoadBalancingKMSClientProvider extends KeyProvider implements
     }
   }
 
+  public EncryptedKeyVersion reencryptEncryptedKey(EncryptedKeyVersion ekv)
+      throws IOException, GeneralSecurityException {
+    try {
+      return doOp(new ProviderCallable<EncryptedKeyVersion>() {
+        @Override
+        public EncryptedKeyVersion call(KMSClientProvider provider)
+            throws IOException, GeneralSecurityException {
+          return provider.reencryptEncryptedKey(ekv);
+        }
+      }, nextIdx());
+    } catch (WrapperException we) {
+      if (we.getCause() instanceof GeneralSecurityException) {
+        throw (GeneralSecurityException) we.getCause();
+      }
+      throw new IOException(we.getCause());
+    }
+  }
+
   @Override
   public KeyVersion getKeyVersion(final String versionName) throws IOException {
     return doOp(new ProviderCallable<KeyVersion>() {
@@ -307,6 +333,7 @@ public class LoadBalancingKMSClientProvider extends KeyProvider implements
       throw new IOException(e.getCause());
     }
   }
+
   @Override
   public void deleteKey(final String name) throws IOException {
     doOp(new ProviderCallable<Void>() {
@@ -317,28 +344,33 @@ public class LoadBalancingKMSClientProvider extends KeyProvider implements
       }
     }, nextIdx());
   }
+
   @Override
   public KeyVersion rollNewVersion(final String name, final byte[] material)
       throws IOException {
-    return doOp(new ProviderCallable<KeyVersion>() {
+    final KeyVersion newVersion = doOp(new ProviderCallable<KeyVersion>() {
       @Override
       public KeyVersion call(KMSClientProvider provider) throws IOException {
         return provider.rollNewVersion(name, material);
       }
     }, nextIdx());
+    invalidateCache(name);
+    return newVersion;
   }
 
   @Override
   public KeyVersion rollNewVersion(final String name)
       throws NoSuchAlgorithmException, IOException {
     try {
-      return doOp(new ProviderCallable<KeyVersion>() {
+      final KeyVersion newVersion = doOp(new ProviderCallable<KeyVersion>() {
         @Override
         public KeyVersion call(KMSClientProvider provider) throws IOException,
-        NoSuchAlgorithmException {
+            NoSuchAlgorithmException {
           return provider.rollNewVersion(name);
         }
       }, nextIdx());
+      invalidateCache(name);
+      return newVersion;
     } catch (WrapperException e) {
       if (e.getCause() instanceof GeneralSecurityException) {
         throw (NoSuchAlgorithmException) e.getCause();

+ 102 - 15
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/ValueQueue.java

@@ -18,8 +18,9 @@
 package org.apache.hadoop.crypto.key.kms;
 
 import java.io.IOException;
+import java.util.ArrayList;
 import java.util.Arrays;
-import java.util.HashSet;
+import java.util.HashMap;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
@@ -28,6 +29,9 @@ import java.util.concurrent.ExecutionException;
 import java.util.concurrent.LinkedBlockingQueue;
 import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
 
 import com.google.common.base.Preconditions;
 import com.google.common.cache.CacheBuilder;
@@ -67,8 +71,17 @@ public class ValueQueue <E> {
 
   private static final String REFILL_THREAD =
       ValueQueue.class.getName() + "_thread";
+  private static final int LOCK_ARRAY_SIZE = 16;
+  // Using a mask assuming array size is the power of 2, of MAX_VALUE.
+  private static final int MASK = LOCK_ARRAY_SIZE == Integer.MAX_VALUE ?
+      LOCK_ARRAY_SIZE :
+      LOCK_ARRAY_SIZE - 1;
 
   private final LoadingCache<String, LinkedBlockingQueue<E>> keyQueues;
+  // Stripped rwlocks based on key name to synchronize the queue from
+  // the sync'ed rw-thread and the background async refill thread.
+  private final List<ReadWriteLock> lockArray =
+      new ArrayList<>(LOCK_ARRAY_SIZE);
   private final ThreadPoolExecutor executor;
   private final UniqueKeyBlockingQueue queue = new UniqueKeyBlockingQueue();
   private final QueueRefiller<E> refiller;
@@ -84,9 +97,47 @@ public class ValueQueue <E> {
    */
   private abstract static class NamedRunnable implements Runnable {
     final String name;
+    private AtomicBoolean canceled = new AtomicBoolean(false);
     private NamedRunnable(String keyName) {
       this.name = keyName;
     }
+
+    public void cancel() {
+      canceled.set(true);
+    }
+
+    public boolean isCanceled() {
+      return canceled.get();
+    }
+  }
+
+  private void readLock(String keyName) {
+    getLock(keyName).readLock().lock();
+  }
+
+  private void readUnlock(String keyName) {
+    getLock(keyName).readLock().unlock();
+  }
+
+  private void writeUnlock(String keyName) {
+    getLock(keyName).writeLock().unlock();
+  }
+
+  private void writeLock(String keyName) {
+    getLock(keyName).writeLock().lock();
+  }
+
+  /**
+   * Get the stripped lock given a key name.
+   *
+   * @param keyName The key name.
+   */
+  private ReadWriteLock getLock(String keyName) {
+    return lockArray.get(indexFor(keyName));
+  }
+
+  private static int indexFor(String keyName) {
+    return keyName.hashCode() & MASK;
   }
 
   /**
@@ -103,11 +154,12 @@ public class ValueQueue <E> {
       LinkedBlockingQueue<Runnable> {
 
     private static final long serialVersionUID = -2152747693695890371L;
-    private HashSet<String> keysInProgress = new HashSet<String>();
+    private HashMap<String, Runnable> keysInProgress = new HashMap<>();
 
     @Override
     public synchronized void put(Runnable e) throws InterruptedException {
-      if (keysInProgress.add(((NamedRunnable)e).name)) {
+      if (!keysInProgress.containsKey(((NamedRunnable)e).name)) {
+        keysInProgress.put(((NamedRunnable)e).name, e);
         super.put(e);
       }
     }
@@ -131,6 +183,14 @@ public class ValueQueue <E> {
       return k;
     }
 
+    public Runnable deleteByName(String name) {
+      NamedRunnable e = (NamedRunnable) keysInProgress.remove(name);
+      if (e != null) {
+        e.cancel();
+        super.remove(e);
+      }
+      return e;
+    }
   }
 
   /**
@@ -172,6 +232,9 @@ public class ValueQueue <E> {
     this.policy = policy;
     this.numValues = numValues;
     this.lowWatermark = lowWatermark;
+    for (int i = 0; i < LOCK_ARRAY_SIZE; ++i) {
+      lockArray.add(i, new ReentrantReadWriteLock());
+    }
     keyQueues = CacheBuilder.newBuilder()
             .expireAfterAccess(expiry, TimeUnit.MILLISECONDS)
             .build(new CacheLoader<String, LinkedBlockingQueue<E>>() {
@@ -233,9 +296,18 @@ public class ValueQueue <E> {
    *
    * @param keyName the key to drain the Queue for
    */
-  public void drain(String keyName ) {
+  public void drain(String keyName) {
     try {
-      keyQueues.get(keyName).clear();
+      Runnable e;
+      while ((e = queue.deleteByName(keyName)) != null) {
+        executor.remove(e);
+      }
+      writeLock(keyName);
+      try {
+        keyQueues.get(keyName).clear();
+      } finally {
+        writeUnlock(keyName);
+      }
     } catch (ExecutionException ex) {
       //NOP
     }
@@ -247,14 +319,19 @@ public class ValueQueue <E> {
    * @return int queue size
    */
   public int getSize(String keyName) {
-    // We can't do keyQueues.get(keyName).size() here,
-    // since that will have the side effect of populating the cache.
-    Map<String, LinkedBlockingQueue<E>> map =
-        keyQueues.getAllPresent(Arrays.asList(keyName));
-    if (map.get(keyName) == null) {
-      return 0;
+    readLock(keyName);
+    try {
+      // We can't do keyQueues.get(keyName).size() here,
+      // since that will have the side effect of populating the cache.
+      Map<String, LinkedBlockingQueue<E>> map =
+          keyQueues.getAllPresent(Arrays.asList(keyName));
+      if (map.get(keyName) == null) {
+        return 0;
+      }
+      return map.get(keyName).size();
+    } finally {
+      readUnlock(keyName);
     }
-    return map.get(keyName).size();
   }
 
   /**
@@ -276,7 +353,9 @@ public class ValueQueue <E> {
     LinkedList<E> ekvs = new LinkedList<E>();
     try {
       for (int i = 0; i < num; i++) {
+        readLock(keyName);
         E val = keyQueue.poll();
+        readUnlock(keyName);
         // If queue is empty now, Based on the provided SyncGenerationPolicy,
         // figure out how many new values need to be generated synchronously
         if (val == null) {
@@ -336,9 +415,17 @@ public class ValueQueue <E> {
             int threshold = (int) (lowWatermark * (float) cacheSize);
             // Need to ensure that only one refill task per key is executed
             try {
-              if (keyQueue.size() < threshold) {
-                refiller.fillQueueForKey(name, keyQueue,
-                    cacheSize - keyQueue.size());
+              writeLock(keyName);
+              try {
+                if (keyQueue.size() < threshold && !isCanceled()) {
+                  refiller.fillQueueForKey(name, keyQueue,
+                      cacheSize - keyQueue.size());
+                }
+                if (isCanceled()) {
+                  keyQueue.clear();
+                }
+              } finally {
+                writeUnlock(keyName);
               }
             } catch (final Exception e) {
               throw new RuntimeException(e);

+ 5 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BlockLocation.java

@@ -18,6 +18,7 @@
 package org.apache.hadoop.fs;
 
 import java.io.IOException;
+import java.io.Serializable;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
@@ -29,7 +30,9 @@ import org.apache.hadoop.classification.InterfaceStability;
  */
 @InterfaceAudience.Public
 @InterfaceStability.Stable
-public class BlockLocation {
+public class BlockLocation implements Serializable {
+  private static final long serialVersionUID = 0x22986f6d;
+
   private String[] hosts; // Datanode hostnames
   private String[] cachedHosts; // Datanode hostnames with a cached replica
   private String[] names; // Datanode IP:xferPort for accessing the block
@@ -303,4 +306,4 @@ public class BlockLocation {
     }
     return result.toString();
   }
-}
+}

+ 2 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java

@@ -605,6 +605,7 @@ public abstract class ChecksumFileSystem extends FilterFileSystem {
    * Rename files/dirs
    */
   @Override
+  @SuppressWarnings("deprecation")
   public boolean rename(Path src, Path dst) throws IOException {
     if (fs.isDirectory(src)) {
       return fs.rename(src, dst);
@@ -721,6 +722,7 @@ public abstract class ChecksumFileSystem extends FilterFileSystem {
    * If src and dst are directories, the copyCrc parameter
    * determines whether to copy CRC files.
    */
+  @SuppressWarnings("deprecation")
   public void copyToLocalFile(Path src, Path dst, boolean copyCrc)
     throws IOException {
     if (!fs.isDirectory(src)) { // source is a file

+ 27 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java

@@ -141,6 +141,22 @@ public class CommonConfigurationKeys extends CommonConfigurationKeysPublic {
   public static final int IO_COMPRESSION_CODEC_SNAPPY_BUFFERSIZE_DEFAULT =
       256 * 1024;
 
+  /** ZStandard compression level. */
+  public static final String IO_COMPRESSION_CODEC_ZSTD_LEVEL_KEY =
+      "io.compression.codec.zstd.level";
+
+  /** Default value for IO_COMPRESSION_CODEC_ZSTD_LEVEL_KEY. */
+  public static final int IO_COMPRESSION_CODEC_ZSTD_LEVEL_DEFAULT = 3;
+
+  /** ZStandard buffer size. */
+  public static final String IO_COMPRESSION_CODEC_ZSTD_BUFFER_SIZE_KEY =
+      "io.compression.codec.zstd.buffersize";
+
+  /** ZStandard buffer size a value of 0 means use the recommended zstd
+   * buffer size that the library recommends. */
+  public static final int
+      IO_COMPRESSION_CODEC_ZSTD_BUFFER_SIZE_DEFAULT = 0;
+
   /** Internal buffer size for Lz4 compressor/decompressors */
   public static final String IO_COMPRESSION_CODEC_LZ4_BUFFERSIZE_KEY =
       "io.compression.codec.lz4.buffersize";
@@ -337,6 +353,17 @@ public class CommonConfigurationKeys extends CommonConfigurationKeysPublic {
   public static final String HADOOP_USER_GROUP_METRICS_PERCENTILES_INTERVALS =
     "hadoop.user.group.metrics.percentiles.intervals";
 
+  /* When creating UGI with UserGroupInformation(Subject), treat the passed
+   * subject external if set to true, and assume the owner of the subject
+   * should do the credential renewal.
+   *
+   * This is a temporary config to solve the compatibility issue with
+   * HADOOP-13558 and HADOOP-13805 fix, see the jiras for discussions.
+   */
+  public static final String HADOOP_TREAT_SUBJECT_EXTERNAL_KEY =
+      "hadoop.treat.subject.external";
+  public static final boolean HADOOP_TREAT_SUBJECT_EXTERNAL_DEFAULT = false;
+
   public static final String RPC_METRICS_QUANTILE_ENABLE =
       "rpc.metrics.quantile.enable";
   public static final boolean RPC_METRICS_QUANTILE_ENABLE_DEFAULT = false;

+ 42 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java

@@ -250,18 +250,43 @@ public class CommonConfigurationKeysPublic {
    * @deprecated Moved to mapreduce, see mapreduce.task.io.sort.mb
    * in mapred-default.xml
    * See https://issues.apache.org/jira/browse/HADOOP-6801
+   *
+   * For {@link org.apache.hadoop.io.SequenceFile.Sorter} control
+   * instead, see {@link #SEQ_IO_SORT_MB_KEY}.
    */
   public static final String  IO_SORT_MB_KEY = "io.sort.mb";
-  /** Default value for IO_SORT_MB_DEFAULT */
+  /** Default value for {@link #IO_SORT_MB_KEY}. */
   public static final int     IO_SORT_MB_DEFAULT = 100;
   /**
    * @deprecated Moved to mapreduce, see mapreduce.task.io.sort.factor
    * in mapred-default.xml
    * See https://issues.apache.org/jira/browse/HADOOP-6801
+   *
+   * For {@link org.apache.hadoop.io.SequenceFile.Sorter} control
+   * instead, see {@link #SEQ_IO_SORT_FACTOR_KEY}.
    */
   public static final String  IO_SORT_FACTOR_KEY = "io.sort.factor";
-  /** Default value for IO_SORT_FACTOR_DEFAULT */
+  /** Default value for {@link #IO_SORT_FACTOR_KEY}. */
   public static final int     IO_SORT_FACTOR_DEFAULT = 100;
+
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
+  public static final String  SEQ_IO_SORT_MB_KEY = "seq.io.sort.mb";
+  /** Default value for {@link #SEQ_IO_SORT_MB_KEY}. */
+  public static final int     SEQ_IO_SORT_MB_DEFAULT = 100;
+
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
+  public static final String  SEQ_IO_SORT_FACTOR_KEY = "seq.io.sort.factor";
+  /** Default value for {@link #SEQ_IO_SORT_FACTOR_KEY}. */
+  public static final int     SEQ_IO_SORT_FACTOR_DEFAULT = 100;
+
   /**
    * @see
    * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
@@ -517,6 +542,21 @@ public class CommonConfigurationKeysPublic {
    * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
    * core-default.xml</a>
    */
+  public static final String HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_SECS =
+      "hadoop.security.groups.shell.command.timeout";
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
+  public static final long
+          HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_SECS_DEFAULT =
+          0L;
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
   public static final String  HADOOP_SECURITY_AUTHENTICATION =
     "hadoop.security.authentication";
   /**

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegateToFileSystem.java

@@ -160,7 +160,7 @@ public abstract class DelegateToFileSystem extends AbstractFileSystem {
 
   @Override
   public int getUriDefaultPort() {
-    return DELEGATE_TO_FS_DEFAULT_PORT;
+    return getDefaultPortIfDefined(fsImpl);
   }
 
   @Override

+ 3 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSExceptionMessages.java

@@ -48,4 +48,7 @@ public class FSExceptionMessages {
       = "Requested more bytes than destination buffer size";
 
   public static final String PERMISSION_DENIED = "Permission denied";
+
+  public static final String PERMISSION_DENIED_BY_STICKY_BIT =
+      "Permission denied by sticky bit";
 }

+ 19 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java

@@ -20,6 +20,9 @@ package org.apache.hadoop.fs;
 import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.IOException;
+import java.io.InvalidObjectException;
+import java.io.ObjectInputValidation;
+import java.io.Serializable;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
@@ -31,11 +34,14 @@ import org.apache.hadoop.io.Writable;
  */
 @InterfaceAudience.Public
 @InterfaceStability.Stable
-public class FileStatus implements Writable, Comparable<FileStatus> {
+public class FileStatus implements Writable, Comparable<FileStatus>,
+    Serializable, ObjectInputValidation {
+
+  private static final long serialVersionUID = 0x13caeae8;
 
   private Path path;
   private long length;
-  private boolean isdir;
+  private Boolean isdir;
   private short block_replication;
   private long blocksize;
   private long modification_time;
@@ -387,4 +393,15 @@ public class FileStatus implements Writable, Comparable<FileStatus> {
     sb.append("}");
     return sb.toString();
   }
+
+  @Override
+  public void validateObject() throws InvalidObjectException {
+    if (null == path) {
+      throw new InvalidObjectException("No Path in deserialized FileStatus");
+    }
+    if (null == isdir) {
+      throw new InvalidObjectException("No type in deserialized FileStatus");
+    }
+  }
+
 }

File diff suppressed because it is too large
+ 337 - 200
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java


+ 13 - 6
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java

@@ -22,6 +22,7 @@ import java.io.BufferedInputStream;
 import java.io.BufferedOutputStream;
 import java.io.File;
 import java.io.FileInputStream;
+import java.io.FileNotFoundException;
 import java.io.FileOutputStream;
 import java.io.IOException;
 import java.io.InputStream;
@@ -326,14 +327,15 @@ public class FileUtil {
       return copy(srcFS, srcs[0], dstFS, dst, deleteSource, overwrite, conf);
 
     // Check if dest is directory
-    if (!dstFS.exists(dst)) {
-      throw new IOException("`" + dst +"': specified destination directory " +
-                            "does not exist");
-    } else {
+    try {
       FileStatus sdst = dstFS.getFileStatus(dst);
       if (!sdst.isDirectory())
         throw new IOException("copying multiple files, but last argument `" +
                               dst + "' is not a directory");
+    } catch (FileNotFoundException e) {
+      throw new IOException(
+          "`" + dst + "': specified destination directory " +
+              "does not exist", e);
     }
 
     for (Path src : srcs) {
@@ -481,8 +483,13 @@ public class FileUtil {
 
   private static Path checkDest(String srcName, FileSystem dstFS, Path dst,
       boolean overwrite) throws IOException {
-    if (dstFS.exists(dst)) {
-      FileStatus sdst = dstFS.getFileStatus(dst);
+    FileStatus sdst;
+    try {
+      sdst = dstFS.getFileStatus(dst);
+    } catch (FileNotFoundException e) {
+      sdst = null;
+    }
+    if (null != sdst) {
       if (sdst.isDirectory()) {
         if (null == srcName) {
           throw new IOException("Target " + dst + " is a directory");

+ 7 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java

@@ -34,6 +34,7 @@ import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.Options.ChecksumOpt;
+import org.apache.hadoop.fs.Options.Rename;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.util.Progressable;
 
@@ -234,6 +235,12 @@ public class FilterFileSystem extends FileSystem {
     return fs.rename(src, dst);
   }
 
+  @Override
+  protected void rename(Path src, Path dst, Rename... options)
+      throws IOException {
+    fs.rename(src, dst, options);
+  }
+
   @Override
   public boolean truncate(Path f, final long newLength) throws IOException {
     return fs.truncate(f, newLength);

+ 6 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java

@@ -328,7 +328,12 @@ public class FsShell extends Configured implements Tool {
           scope.close();
         }
       } catch (IllegalArgumentException e) {
-        displayError(cmd, e.getLocalizedMessage());
+        if (e.getMessage() == null) {
+          displayError(cmd, "Null exception message");
+          e.printStackTrace(System.err);
+        } else {
+          displayError(cmd, e.getLocalizedMessage());
+        }
         printUsage(System.err);
         if (instance != null) {
           printInstanceUsage(System.err, instance);

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/GlobPattern.java

@@ -153,7 +153,7 @@ public class GlobPattern {
     if (curlyOpen > 0) {
       error("Unclosed group", glob, len);
     }
-    compiled = Pattern.compile(regex.toString());
+    compiled = Pattern.compile(regex.toString(), Pattern.DOTALL);
   }
 
   /**

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java

@@ -525,7 +525,7 @@ public class LocalDirAllocator {
         try {
           advance();
         } catch (IOException ie) {
-          throw new RuntimeException("Can't check existance of " + next, ie);
+          throw new RuntimeException("Can't check existence of " + next, ie);
         }
         if (result == null) {
           throw new NoSuchElementException();

+ 4 - 8
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java

@@ -208,9 +208,7 @@ public class RawLocalFileSystem extends FileSystem {
   
   @Override
   public FSDataInputStream open(Path f, int bufferSize) throws IOException {
-    if (!exists(f)) {
-      throw new FileNotFoundException(f.toString());
-    }
+    getFileStatus(f);
     return new FSDataInputStream(new BufferedFSInputStream(
         new LocalFSFileInputStream(f), bufferSize));
   }
@@ -278,9 +276,6 @@ public class RawLocalFileSystem extends FileSystem {
   @Override
   public FSDataOutputStream append(Path f, int bufferSize,
       Progressable progress) throws IOException {
-    if (!exists(f)) {
-      throw new FileNotFoundException("File " + f + " not found");
-    }
     FileStatus status = getFileStatus(f);
     if (status.isDirectory()) {
       throw new IOException("Cannot append to a diretory (=" + f + " )");
@@ -387,17 +382,18 @@ public class RawLocalFileSystem extends FileSystem {
     // platforms (notably Windows) do not provide this behavior, so the Java API
     // call renameTo(dstFile) fails. Delete destination and attempt rename
     // again.
-    if (this.exists(dst)) {
+    try {
       FileStatus sdst = this.getFileStatus(dst);
       if (sdst.isDirectory() && dstFile.list().length == 0) {
         if (LOG.isDebugEnabled()) {
           LOG.debug("Deleting empty destination and renaming " + src + " to " +
-            dst);
+              dst);
         }
         if (this.delete(dst, false) && srcFile.renameTo(dstFile)) {
           return true;
         }
       }
+    } catch (FileNotFoundException ignored) {
     }
     return false;
   }

+ 2 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java

@@ -121,9 +121,8 @@ public class TrashPolicyDefault extends TrashPolicy {
     if (!path.isAbsolute())                       // make path absolute
       path = new Path(fs.getWorkingDirectory(), path);
 
-    if (!fs.exists(path))                         // check that path exists
-      throw new FileNotFoundException(path.toString());
-
+    // check that path exists
+    fs.getFileStatus(path);
     String qpath = fs.makeQualified(path).toString();
 
     Path trashRoot = fs.getTrashRoot(path);

+ 70 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java

@@ -23,6 +23,7 @@ import java.io.InputStream;
 import java.net.ConnectException;
 import java.net.URI;
 
+import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -65,6 +66,9 @@ public class FTPFileSystem extends FileSystem {
   public static final String FS_FTP_HOST = "fs.ftp.host";
   public static final String FS_FTP_HOST_PORT = "fs.ftp.host.port";
   public static final String FS_FTP_PASSWORD_PREFIX = "fs.ftp.password.";
+  public static final String FS_FTP_DATA_CONNECTION_MODE =
+      "fs.ftp.data.connection.mode";
+  public static final String FS_FTP_TRANSFER_MODE = "fs.ftp.transfer.mode";
   public static final String E_SAME_DIRECTORY_ONLY =
       "only same directory renames are supported";
 
@@ -143,9 +147,10 @@ public class FTPFileSystem extends FileSystem {
                    NetUtils.UNKNOWN_HOST, 0,
                    new ConnectException("Server response " + reply));
     } else if (client.login(user, password)) {
-      client.setFileTransferMode(FTP.BLOCK_TRANSFER_MODE);
+      client.setFileTransferMode(getTransferMode(conf));
       client.setFileType(FTP.BINARY_FILE_TYPE);
       client.setBufferSize(DEFAULT_BUFFER_SIZE);
+      setDataConnectionMode(client, conf);
     } else {
       throw new IOException("Login failed on server - " + host + ", port - "
           + port + " as user '" + user + "'");
@@ -154,6 +159,69 @@ public class FTPFileSystem extends FileSystem {
     return client;
   }
 
+  /**
+   * Set FTP's transfer mode based on configuration. Valid values are
+   * STREAM_TRANSFER_MODE, BLOCK_TRANSFER_MODE and COMPRESSED_TRANSFER_MODE.
+   * <p/>
+   * Defaults to BLOCK_TRANSFER_MODE.
+   *
+   * @param conf
+   * @return
+   */
+  @VisibleForTesting
+  int getTransferMode(Configuration conf) {
+    final String mode = conf.get(FS_FTP_TRANSFER_MODE);
+    // FTP default is STREAM_TRANSFER_MODE, but Hadoop FTPFS's default is
+    // FTP.BLOCK_TRANSFER_MODE historically.
+    int ret = FTP.BLOCK_TRANSFER_MODE;
+    if (mode == null) {
+      return ret;
+    }
+    final String upper = mode.toUpperCase();
+    if (upper.equals("STREAM_TRANSFER_MODE")) {
+      ret = FTP.STREAM_TRANSFER_MODE;
+    } else if (upper.equals("COMPRESSED_TRANSFER_MODE")) {
+      ret = FTP.COMPRESSED_TRANSFER_MODE;
+    } else {
+      if (!upper.equals("BLOCK_TRANSFER_MODE")) {
+        LOG.warn("Cannot parse the value for " + FS_FTP_TRANSFER_MODE + ": "
+            + mode + ". Using default.");
+      }
+    }
+    return ret;
+  }
+
+  /**
+   * Set the FTPClient's data connection mode based on configuration. Valid
+   * values are ACTIVE_LOCAL_DATA_CONNECTION_MODE,
+   * PASSIVE_LOCAL_DATA_CONNECTION_MODE and PASSIVE_REMOTE_DATA_CONNECTION_MODE.
+   * <p/>
+   * Defaults to ACTIVE_LOCAL_DATA_CONNECTION_MODE.
+   *
+   * @param client
+   * @param conf
+   * @throws IOException
+   */
+  @VisibleForTesting
+  void setDataConnectionMode(FTPClient client, Configuration conf)
+      throws IOException {
+    final String mode = conf.get(FS_FTP_DATA_CONNECTION_MODE);
+    if (mode == null) {
+      return;
+    }
+    final String upper = mode.toUpperCase();
+    if (upper.equals("PASSIVE_LOCAL_DATA_CONNECTION_MODE")) {
+      client.enterLocalPassiveMode();
+    } else if (upper.equals("PASSIVE_REMOTE_DATA_CONNECTION_MODE")) {
+      client.enterRemotePassiveMode();
+    } else {
+      if (!upper.equals("ACTIVE_LOCAL_DATA_CONNECTION_MODE")) {
+        LOG.warn("Cannot parse the value for " + FS_FTP_DATA_CONNECTION_MODE
+            + ": " + mode + ". Using default.");
+      }
+    }
+  }
+
   /**
    * Logout and disconnect the given FTPClient. *
    * 
@@ -576,6 +644,7 @@ public class FTPFileSystem extends FileSystem {
    * @return
    * @throws IOException
    */
+  @SuppressWarnings("deprecation")
   private boolean rename(FTPClient client, Path src, Path dst)
       throws IOException {
     Path workDir = new Path(client.printWorkingDirectory());

+ 1 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsCreateModes.java

@@ -29,6 +29,7 @@ import org.apache.hadoop.classification.InterfaceStability;
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
 public final class FsCreateModes extends FsPermission {
+  private static final long serialVersionUID = 0x22986f6d;
   private final FsPermission unmasked;
 
   /**

+ 19 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/FsPermission.java

@@ -20,6 +20,9 @@ package org.apache.hadoop.fs.permission;
 import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.IOException;
+import java.io.InvalidObjectException;
+import java.io.ObjectInputValidation;
+import java.io.Serializable;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -36,8 +39,10 @@ import org.apache.hadoop.io.WritableFactory;
  */
 @InterfaceAudience.Public
 @InterfaceStability.Stable
-public class FsPermission implements Writable {
+public class FsPermission implements Writable, Serializable,
+    ObjectInputValidation {
   private static final Log LOG = LogFactory.getLog(FsPermission.class);
+  private static final long serialVersionUID = 0x2fe08564;
 
   static final WritableFactory FACTORY = new WritableFactory() {
     @Override
@@ -60,7 +65,7 @@ public class FsPermission implements Writable {
   private FsAction useraction = null;
   private FsAction groupaction = null;
   private FsAction otheraction = null;
-  private boolean stickyBit = false;
+  private Boolean stickyBit = false;
 
   private FsPermission() {}
 
@@ -202,7 +207,7 @@ public class FsPermission implements Writable {
       return this.useraction == that.useraction
           && this.groupaction == that.groupaction
           && this.otheraction == that.otheraction
-          && this.stickyBit == that.stickyBit;
+          && this.stickyBit.booleanValue() == that.stickyBit.booleanValue();
     }
     return false;
   }
@@ -377,6 +382,7 @@ public class FsPermission implements Writable {
   }
   
   private static class ImmutableFsPermission extends FsPermission {
+    private static final long serialVersionUID = 0x1bab54bd;
     public ImmutableFsPermission(short permission) {
       super(permission);
     }
@@ -386,4 +392,14 @@ public class FsPermission implements Writable {
       throw new UnsupportedOperationException();
     }
   }
+
+  @Override
+  public void validateObject() throws InvalidObjectException {
+    if (null == useraction || null == groupaction || null == otheraction) {
+      throw new InvalidObjectException("Invalid mode in FsPermission");
+    }
+    if (null == stickyBit) {
+      throw new InvalidObjectException("No sticky bit in FsPermission");
+    }
+  }
 }

+ 12 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Command.java

@@ -101,7 +101,17 @@ abstract public class Command extends Configured {
    * @throws IOException if any error occurs
    */
   abstract protected void run(Path path) throws IOException;
-  
+
+  /**
+   * Execute the command on the input path data. Commands can override to make
+   * use of the resolved filesystem.
+   * @param pathData The input path with resolved filesystem
+   * @throws IOException
+   */
+  protected void run(PathData pathData) throws IOException {
+    run(pathData.path);
+  }
+
   /** 
    * For each source path, execute the command
    * 
@@ -113,7 +123,7 @@ abstract public class Command extends Configured {
       try {
         PathData[] srcs = PathData.expandAsGlob(src, getConf());
         for (PathData s : srcs) {
-          run(s.path);
+          run(s);
         }
       } catch (IOException e) {
         exitCode = -1;

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandFormat.java

@@ -248,7 +248,7 @@ public class CommandFormat {
     private static final long serialVersionUID = 0L;
 
     public DuplicatedOptionException(String duplicatedOption) {
-      super("option " + duplicatedOption + " already exsits!");
+      super("option " + duplicatedOption + " already exists!");
     }
   }
 }

+ 1 - 10
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Display.java

@@ -52,10 +52,6 @@ import org.apache.hadoop.io.compress.CompressionCodec;
 import org.apache.hadoop.io.compress.CompressionCodecFactory;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.StringUtils;
-import org.codehaus.jackson.JsonEncoding;
-import org.codehaus.jackson.JsonFactory;
-import org.codehaus.jackson.JsonGenerator;
-import org.codehaus.jackson.util.MinimalPrettyPrinter;
 
 /**
  * Display contents or checksums of files 
@@ -277,12 +273,7 @@ class Display extends FsCommand {
       Schema schema = fileReader.getSchema();
       writer = new GenericDatumWriter<Object>(schema);
       output = new ByteArrayOutputStream();
-      JsonGenerator generator =
-        new JsonFactory().createJsonGenerator(output, JsonEncoding.UTF8);
-      MinimalPrettyPrinter prettyPrinter = new MinimalPrettyPrinter();
-      prettyPrinter.setRootValueSeparator(System.getProperty("line.separator"));
-      generator.setPrettyPrinter(prettyPrinter);
-      encoder = EncoderFactory.get().jsonEncoder(schema, generator);
+      encoder = EncoderFactory.get().jsonEncoder(schema, output);
     }
 
     /**

+ 95 - 29
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsUsage.java

@@ -20,19 +20,24 @@ package org.apache.hadoop.fs.shell;
 
 import java.io.IOException;
 import java.io.PrintStream;
+import java.net.URI;
 import java.util.ArrayList;
 import java.util.LinkedList;
 import java.util.List;
+import java.util.Map;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.FsStatus;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.viewfs.ViewFileSystem;
+import org.apache.hadoop.fs.viewfs.ViewFileSystemUtil;
 import org.apache.hadoop.util.StringUtils;
 
-/** Base class for commands related to viewing filesystem usage, such as
- * du and df
+/**
+ * Base class for commands related to viewing filesystem usage,
+ * such as du and df.
  */
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
@@ -44,15 +49,27 @@ class FsUsage extends FsCommand {
     factory.addClass(Dus.class, "-dus");
   }
 
-  protected boolean humanReadable = false;
-  protected TableBuilder usagesTable;
-  
+  private boolean humanReadable = false;
+  private TableBuilder usagesTable;
+
   protected String formatSize(long size) {
     return humanReadable
         ? StringUtils.TraditionalBinaryPrefix.long2String(size, "", 1)
         : String.valueOf(size);
   }
 
+  public TableBuilder getUsagesTable() {
+    return usagesTable;
+  }
+
+  public void setUsagesTable(TableBuilder usagesTable) {
+    this.usagesTable = usagesTable;
+  }
+
+  public void setHumanReadable(boolean humanReadable) {
+    this.humanReadable = humanReadable;
+  }
+
   /** Show the size of a partition in the filesystem */
   public static class Df extends FsUsage {
     public static final String NAME = "df";
@@ -70,38 +87,74 @@ class FsUsage extends FsCommand {
     throws IOException {
       CommandFormat cf = new CommandFormat(0, Integer.MAX_VALUE, "h");
       cf.parse(args);
-      humanReadable = cf.getOpt("h");
+      setHumanReadable(cf.getOpt("h"));
       if (args.isEmpty()) args.add(Path.SEPARATOR);
     }
 
     @Override
     protected void processArguments(LinkedList<PathData> args)
     throws IOException {
-      usagesTable = new TableBuilder(
-          "Filesystem", "Size", "Used", "Available", "Use%");
-      usagesTable.setRightAlign(1, 2, 3, 4);
-      
+      setUsagesTable(new TableBuilder(
+          "Filesystem", "Size", "Used", "Available", "Use%", "Mounted on"));
+      getUsagesTable().setRightAlign(1, 2, 3, 4);
+
       super.processArguments(args);
-      if (!usagesTable.isEmpty()) {
-        usagesTable.printToStream(out);
+      if (!getUsagesTable().isEmpty()) {
+        getUsagesTable().printToStream(out);
       }
     }
 
-    @Override
-    protected void processPath(PathData item) throws IOException {
-      FsStatus fsStats = item.fs.getStatus(item.path);
-      long size = fsStats.getCapacity();
-      long used = fsStats.getUsed();
-      long free = fsStats.getRemaining();
-
-      usagesTable.addRow(
-          item.fs.getUri(),
+    /**
+     * Add a new row to the usages table for the given FileSystem URI.
+     *
+     * @param uri - FileSystem URI
+     * @param fsStatus - FileSystem status
+     * @param mountedOnPath - FileSystem mounted on path
+     */
+    private void addToUsagesTable(URI uri, FsStatus fsStatus,
+        String mountedOnPath) {
+      long size = fsStatus.getCapacity();
+      long used = fsStatus.getUsed();
+      long free = fsStatus.getRemaining();
+      getUsagesTable().addRow(
+          uri,
           formatSize(size),
           formatSize(used),
           formatSize(free),
-          StringUtils.formatPercent((double)used/(double)size, 0)
+          StringUtils.formatPercent((double) used / (double) size, 0),
+          mountedOnPath
       );
     }
+
+    @Override
+    protected void processPath(PathData item) throws IOException {
+      if (ViewFileSystemUtil.isViewFileSystem(item.fs)) {
+        ViewFileSystem viewFileSystem = (ViewFileSystem) item.fs;
+        Map<ViewFileSystem.MountPoint, FsStatus>  fsStatusMap =
+            ViewFileSystemUtil.getStatus(viewFileSystem, item.path);
+
+        for (Map.Entry<ViewFileSystem.MountPoint, FsStatus> entry :
+            fsStatusMap.entrySet()) {
+          ViewFileSystem.MountPoint viewFsMountPoint = entry.getKey();
+          FsStatus fsStatus = entry.getValue();
+
+          // Add the viewfs mount point status to report
+          URI[] mountPointFileSystemURIs =
+              viewFsMountPoint.getTargetFileSystemURIs();
+          // Since LinkMerge is not supported yet, we
+          // should ideally see mountPointFileSystemURIs
+          // array with only one element.
+          addToUsagesTable(mountPointFileSystemURIs[0],
+              fsStatus, viewFsMountPoint.getMountedOnPath().toString());
+        }
+      } else {
+        // Hide the columns specific to ViewFileSystem
+        getUsagesTable().setColumnHide(5, true);
+        FsStatus fsStatus = item.fs.getStatus(item.path);
+        addToUsagesTable(item.fs.getUri(), fsStatus, "/");
+      }
+    }
+
   }
 
   /** show disk usage */
@@ -128,7 +181,7 @@ class FsUsage extends FsCommand {
     protected void processOptions(LinkedList<String> args) throws IOException {
       CommandFormat cf = new CommandFormat(0, Integer.MAX_VALUE, "h", "s", "x");
       cf.parse(args);
-      humanReadable = cf.getOpt("h");
+      setHumanReadable(cf.getOpt("h"));
       summary = cf.getOpt("s");
       excludeSnapshots = cf.getOpt("x");
       if (args.isEmpty()) args.add(Path.CUR_DIR);
@@ -137,10 +190,10 @@ class FsUsage extends FsCommand {
     @Override
     protected void processArguments(LinkedList<PathData> args)
         throws IOException {
-      usagesTable = new TableBuilder(3);
+      setUsagesTable(new TableBuilder(3));
       super.processArguments(args);
-      if (!usagesTable.isEmpty()) {
-        usagesTable.printToStream(out);
+      if (!getUsagesTable().isEmpty()) {
+        getUsagesTable().printToStream(out);
       }
     }
 
@@ -163,7 +216,8 @@ class FsUsage extends FsCommand {
         length -= contentSummary.getSnapshotLength();
         spaceConsumed -= contentSummary.getSnapshotSpaceConsumed();
       }
-      usagesTable.addRow(formatSize(length), formatSize(spaceConsumed), item);
+      getUsagesTable().addRow(formatSize(length),
+          formatSize(spaceConsumed), item);
     }
   }
   /** show disk usage summary */
@@ -191,6 +245,7 @@ class FsUsage extends FsCommand {
     protected List<String[]> rows;
     protected int[] widths;
     protected boolean[] rightAlign;
+    private boolean[] hide;
     
     /**
      * Create a table w/o headers
@@ -200,6 +255,7 @@ class FsUsage extends FsCommand {
       rows = new ArrayList<String[]>();
       widths = new int[columns];
       rightAlign = new boolean[columns];
+      hide = new boolean[columns];
     }
 
     /**
@@ -219,7 +275,14 @@ class FsUsage extends FsCommand {
     public void setRightAlign(int ... indexes) {
       for (int i : indexes) rightAlign[i] = true;
     }
-    
+
+    /**
+     * Hide the given column index
+     */
+    public void setColumnHide(int columnIndex, boolean hideCol) {
+      hide[columnIndex] = hideCol;
+    }
+
     /**
      * Add a row of objects to the table
      * @param objects the values
@@ -234,7 +297,7 @@ class FsUsage extends FsCommand {
     }
 
     /**
-     * Render the table to a stream 
+     * Render the table to a stream.
      * @param out PrintStream for output
      */
     public void printToStream(PrintStream out) {
@@ -242,6 +305,9 @@ class FsUsage extends FsCommand {
 
       StringBuilder fmt = new StringBuilder();      
       for (int i=0; i < widths.length; i++) {
+        if (hide[i]) {
+          continue;
+        }
         if (fmt.length() != 0) fmt.append("  ");
         if (rightAlign[i]) {
           fmt.append("%"+widths[i]+"s");

+ 2 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Stat.java

@@ -33,7 +33,7 @@ import org.apache.hadoop.fs.FileStatus;
  * Format sequences:<br>
  *   %a: Permissions in octal<br>
  *   %A: Permissions in symbolic style<br>
- *   %b: Size of file in blocks<br>
+ *   %b: Size of file in bytes<br>
  *   %F: Type<br>
  *   %g: Group name of owner<br>
  *   %n: Filename<br>
@@ -60,7 +60,7 @@ class Stat extends FsCommand {
     "Print statistics about the file/directory at <path>" + NEWLINE +
     "in the specified format. Format accepts permissions in" + NEWLINE +
     "octal (%a) and symbolic (%A), filesize in" + NEWLINE +
-    "blocks (%b), type (%F), group name of owner (%g)," + NEWLINE +
+    "bytes (%b), type (%F), group name of owner (%g)," + NEWLINE +
     "name (%n), block size (%o), replication (%r), user name" + NEWLINE +
     "of owner (%u), modification date (%y, %Y)." + NEWLINE +
     "%y shows UTC date as \"yyyy-MM-dd HH:mm:ss\" and" + NEWLINE +

+ 11 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java

@@ -221,12 +221,23 @@ class ChRootedFileSystem extends FilterFileSystem {
     return super.getFileChecksum(fullPath(f));
   }
 
+  @Override
+  public FileChecksum getFileChecksum(final Path f, final long length)
+      throws IOException {
+    return super.getFileChecksum(fullPath(f), length);
+  }
+
   @Override
   public FileStatus getFileStatus(final Path f) 
       throws IOException {
     return super.getFileStatus(fullPath(f));
   }
 
+  @Override
+  public Path getLinkTarget(Path f) throws IOException {
+    return super.getLinkTarget(fullPath(f));
+  }
+
   @Override
   public void access(Path path, FsAction mode) throws AccessControlException,
       FileNotFoundException, IOException {

+ 112 - 24
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java

@@ -23,6 +23,7 @@ import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.net.URI;
 import java.net.URISyntaxException;
+import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.EnumSet;
@@ -32,7 +33,6 @@ import java.util.Map;
 import java.util.Set;
 import java.util.Map.Entry;
 
-import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -48,6 +48,7 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FsConstants;
 import org.apache.hadoop.fs.FsServerDefaults;
+import org.apache.hadoop.fs.FsStatus;
 import org.apache.hadoop.fs.LocatedFileStatus;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathFilter;
@@ -89,34 +90,35 @@ public class ViewFileSystem extends FileSystem {
     return readOnlyMountTable(operation, p.toString());
   }
 
-  static public class MountPoint {
+  /**
+   * MountPoint representation built from the configuration.
+   */
+  public static class MountPoint {
+
     /**
-     *  The source of the mount.
+     * The mounted on path location.
      */
-    private Path src;
+    private final Path mountedOnPath;
 
     /**
-     * One or more targets of the mount.
-     * Multiple targets imply MergeMount.
+     * Array of target FileSystem URIs.
      */
-    private URI[] targets;
+    private final URI[] targetFileSystemURIs;
 
-    MountPoint(Path srcPath, URI[] targetURIs) {
-      src = srcPath;
-      targets = targetURIs;
+    MountPoint(Path srcPath, URI[] targetFs) {
+      mountedOnPath = srcPath;
+      targetFileSystemURIs = targetFs;
     }
 
-    @VisibleForTesting
-    Path getSrc() {
-      return src;
+    public Path getMountedOnPath() {
+      return mountedOnPath;
     }
 
-    @VisibleForTesting
-    URI[] getTargets() {
-      return targets;
+    public URI[] getTargetFileSystemURIs() {
+      return targetFileSystemURIs;
     }
   }
-  
+
   final long creationTime; // of the the mount table
   final UserGroupInformation ugi; // the user/group of user who created mtable
   URI myUri;
@@ -133,7 +135,7 @@ public class ViewFileSystem extends FileSystem {
    * @param p path
    * @return path-part of the Path p
    */
-  private String getUriPath(final Path p) {
+  String getUriPath(final Path p) {
     checkPath(p);
     return makeAbsolute(p).toUri().getPath();
   }
@@ -348,6 +350,15 @@ public class ViewFileSystem extends FileSystem {
     return res.targetFileSystem.getFileChecksum(res.remainingPath);
   }
 
+  @Override
+  public FileChecksum getFileChecksum(final Path f, final long length)
+      throws AccessControlException, FileNotFoundException,
+      IOException {
+    InodeTree.ResolveResult<FileSystem> res =
+        fsState.resolve(getUriPath(f), true);
+    return res.targetFileSystem.getFileChecksum(res.remainingPath, length);
+  }
+
   private static FileStatus fixFileStatus(FileStatus orig,
       Path qualified) throws IOException {
     // FileStatus#getPath is a fully qualified path relative to the root of
@@ -731,8 +742,8 @@ public class ViewFileSystem extends FileSystem {
     
     MountPoint[] result = new MountPoint[mountPoints.size()];
     for ( int i = 0; i < mountPoints.size(); ++i ) {
-      result[i] = new MountPoint(new Path(mountPoints.get(i).src), 
-                              mountPoints.get(i).target.targetDirLinkList);
+      result[i] = new MountPoint(new Path(mountPoints.get(i).src),
+          mountPoints.get(i).target.targetDirLinkList);
     }
     return result;
   }
@@ -799,6 +810,83 @@ public class ViewFileSystem extends FileSystem {
     return allPolicies;
   }
 
+  /**
+   * Get the trash root directory for current user when the path
+   * specified is deleted.
+   *
+   * @param path the trash root of the path to be determined.
+   * @return the trash root path.
+   */
+  @Override
+  public Path getTrashRoot(Path path) {
+    try {
+      InodeTree.ResolveResult<FileSystem> res =
+          fsState.resolve(getUriPath(path), true);
+      return res.targetFileSystem.getTrashRoot(res.remainingPath);
+    } catch (Exception e) {
+      throw new NotInMountpointException(path, "getTrashRoot");
+    }
+  }
+
+  /**
+   * Get all the trash roots for current user or all users.
+   *
+   * @param allUsers return trash roots for all users if true.
+   * @return all Trash root directories.
+   */
+  @Override
+  public Collection<FileStatus> getTrashRoots(boolean allUsers) {
+    List<FileStatus> trashRoots = new ArrayList<>();
+    for (FileSystem fs : getChildFileSystems()) {
+      trashRoots.addAll(fs.getTrashRoots(allUsers));
+    }
+    return trashRoots;
+  }
+
+  @Override
+  public FsStatus getStatus() throws IOException {
+    return getStatus(null);
+  }
+
+  @Override
+  public FsStatus getStatus(Path p) throws IOException {
+    if (p == null) {
+      p = InodeTree.SlashPath;
+    }
+    InodeTree.ResolveResult<FileSystem> res = fsState.resolve(
+        getUriPath(p), true);
+    return res.targetFileSystem.getStatus(p);
+  }
+
+  /**
+   * Return the total size of all files under "/", if {@link
+   * Constants#CONFIG_VIEWFS_LINK_MERGE_SLASH} is supported and is a valid
+   * mount point. Else, throw NotInMountpointException.
+   *
+   * @throws IOException
+   */
+  @Override
+  public long getUsed() throws IOException {
+    InodeTree.ResolveResult<FileSystem> res = fsState.resolve(
+        getUriPath(InodeTree.SlashPath), true);
+    if (res.isInternalDir()) {
+      throw new NotInMountpointException(InodeTree.SlashPath, "getUsed");
+    } else {
+      return res.targetFileSystem.getUsed();
+    }
+  }
+
+  @Override
+  public Path getLinkTarget(Path path) throws IOException {
+    InodeTree.ResolveResult<FileSystem> res;
+    try {
+      res = fsState.resolve(getUriPath(path), true);
+    } catch (FileNotFoundException e) {
+      throw new NotInMountpointException(path, "getLinkTarget");
+    }
+    return res.targetFileSystem.getLinkTarget(res.remainingPath);
+  }
+
   /**
    * An instance of this class represents an internal dir of the viewFs
    * that is internal dir of the mount table.
@@ -901,7 +989,7 @@ public class ViewFileSystem extends FileSystem {
     public FileStatus getFileStatus(Path f) throws IOException {
       checkPathIsSlash(f);
       return new FileStatus(0, true, 0, 0, creationTime, creationTime,
-          PERMISSION_555, ugi.getUserName(), ugi.getPrimaryGroupName(),
+          PERMISSION_555, ugi.getShortUserName(), ugi.getPrimaryGroupName(),
 
           new Path(theInternalDir.fullPath).makeQualified(
               myUri, ROOT_PATH));
@@ -922,14 +1010,14 @@ public class ViewFileSystem extends FileSystem {
 
           result[i++] = new FileStatus(0, false, 0, 0,
             creationTime, creationTime, PERMISSION_555,
-            ugi.getUserName(), ugi.getPrimaryGroupName(),
+            ugi.getShortUserName(), ugi.getPrimaryGroupName(),
             link.getTargetLink(),
             new Path(inode.fullPath).makeQualified(
                 myUri, null));
         } else {
           result[i++] = new FileStatus(0, true, 0, 0,
             creationTime, creationTime, PERMISSION_555,
-            ugi.getUserName(), ugi.getGroupNames()[0],
+            ugi.getShortUserName(), ugi.getGroupNames()[0],
             new Path(inode.fullPath).makeQualified(
                 myUri, null));
         }
@@ -1053,7 +1141,7 @@ public class ViewFileSystem extends FileSystem {
     @Override
     public AclStatus getAclStatus(Path path) throws IOException {
       checkPathIsSlash(path);
-      return new AclStatus.Builder().owner(ugi.getUserName())
+      return new AclStatus.Builder().owner(ugi.getShortUserName())
           .group(ugi.getPrimaryGroupName())
           .addEntries(AclUtil.getMinimalAcl(PERMISSION_555))
           .stickyBit(false).build();

+ 164 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystemUtil.java

@@ -0,0 +1,164 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.viewfs;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FsConstants;
+import org.apache.hadoop.fs.FsStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.UnsupportedFileSystemException;
+import org.apache.hadoop.fs.viewfs.ViewFileSystem.MountPoint;
+
+/**
+ * Utility APIs for ViewFileSystem.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public final class ViewFileSystemUtil {
+
+  private ViewFileSystemUtil() {
+    // Private Constructor
+  }
+
+  /**
+   * Check if the FileSystem is a ViewFileSystem.
+   *
+   * @param fileSystem
+   * @return true if the fileSystem is ViewFileSystem
+   */
+  public static boolean isViewFileSystem(final FileSystem fileSystem) {
+    return fileSystem.getScheme().equals(FsConstants.VIEWFS_SCHEME);
+  }
+
+  /**
+   * Get FsStatus for all ViewFsMountPoints matching path for the given
+   * ViewFileSystem.
+   *
+   * Say ViewFileSystem has following mount points configured
+   *  (1) hdfs://NN0_host:port/sales mounted on /dept/sales
+   *  (2) hdfs://NN1_host:port/marketing mounted on /dept/marketing
+   *  (3) hdfs://NN2_host:port/eng_usa mounted on /dept/eng/usa
+   *  (4) hdfs://NN3_host:port/eng_asia mounted on /dept/eng/asia
+   *
+   * For the above config, here is a sample list of paths and their matching
+   * mount points while getting FsStatus
+   *
+   *  Path                  Description                      Matching MountPoint
+   *
+   *  "/"                   Root ViewFileSystem lists all    (1), (2), (3), (4)
+   *                         mount points.
+   *
+   *  "/dept"               Not a mount point, but a valid   (1), (2), (3), (4)
+   *                         internal dir in the mount tree
+   *                         and resolved down to "/" path.
+   *
+   *  "/dept/sales"         Matches a mount point            (1)
+   *
+   *  "/dept/sales/india"   Path is over a valid mount point (1)
+   *                         and resolved down to
+   *                         "/dept/sales"
+   *
+   *  "/dept/eng"           Not a mount point, but a valid   (1), (2), (3), (4)
+   *                         internal dir in the mount tree
+   *                         and resolved down to "/" path.
+   *
+   *  "/erp"                Doesn't match or leads to or
+   *                         over any valid mount points     None
+   *
+   *
+   * @param fileSystem - ViewFileSystem on which mount point exists
+   * @param path - URI for which FsStatus is requested
+   * @return Map of ViewFsMountPoint and FsStatus
+   */
+  public static Map<MountPoint, FsStatus> getStatus(
+      FileSystem fileSystem, Path path) throws IOException {
+    if (!isViewFileSystem(fileSystem)) {
+      throw new UnsupportedFileSystemException("FileSystem '"
+          + fileSystem.getUri() + "'is not a ViewFileSystem.");
+    }
+    ViewFileSystem viewFileSystem = (ViewFileSystem) fileSystem;
+    String viewFsUriPath = viewFileSystem.getUriPath(path);
+    boolean isPathOverMountPoint = false;
+    boolean isPathLeadingToMountPoint = false;
+    boolean isPathIncludesAllMountPoint = false;
+    Map<MountPoint, FsStatus> mountPointMap = new HashMap<>();
+    for (MountPoint mountPoint : viewFileSystem.getMountPoints()) {
+      String[] mountPointPathComponents = InodeTree.breakIntoPathComponents(
+          mountPoint.getMountedOnPath().toString());
+      String[] incomingPathComponents =
+          InodeTree.breakIntoPathComponents(viewFsUriPath);
+
+      int pathCompIndex;
+      for (pathCompIndex = 0; pathCompIndex < mountPointPathComponents.length &&
+          pathCompIndex < incomingPathComponents.length; pathCompIndex++) {
+        if (!mountPointPathComponents[pathCompIndex].equals(
+            incomingPathComponents[pathCompIndex])) {
+          break;
+        }
+      }
+
+      if (pathCompIndex >= mountPointPathComponents.length) {
+        // Patch matches or over a valid mount point
+        isPathOverMountPoint = true;
+        mountPointMap.clear();
+        updateMountPointFsStatus(viewFileSystem, mountPointMap, mountPoint,
+            new Path(viewFsUriPath));
+        break;
+      } else {
+        if (pathCompIndex > 1) {
+          // Path is in the mount tree
+          isPathLeadingToMountPoint = true;
+        } else if (incomingPathComponents.length <= 1) {
+          // Special case of "/" path
+          isPathIncludesAllMountPoint = true;
+        }
+        updateMountPointFsStatus(viewFileSystem, mountPointMap, mountPoint,
+            mountPoint.getMountedOnPath());
+      }
+    }
+
+    if (!isPathOverMountPoint && !isPathLeadingToMountPoint &&
+        !isPathIncludesAllMountPoint) {
+      throw new NotInMountpointException(path, "getStatus");
+    }
+    return mountPointMap;
+  }
+
+  /**
+   * Update FsStatus for the given the mount point.
+   *
+   * @param viewFileSystem
+   * @param mountPointMap
+   * @param mountPoint
+   * @param path
+   */
+  private static void updateMountPointFsStatus(
+      final ViewFileSystem viewFileSystem,
+      final Map<MountPoint, FsStatus> mountPointMap,
+      final MountPoint mountPoint, final Path path) throws IOException {
+    FsStatus fsStatus = viewFileSystem.getStatus(path);
+    mountPointMap.put(mountPoint, fsStatus);
+  }
+
+}

+ 6 - 6
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java

@@ -845,7 +845,7 @@ public class ViewFs extends AbstractFileSystem {
     public FileStatus getFileStatus(final Path f) throws IOException {
       checkPathIsSlash(f);
       return new FileStatus(0, true, 0, 0, creationTime, creationTime,
-          PERMISSION_555, ugi.getUserName(), ugi.getPrimaryGroupName(),
+          PERMISSION_555, ugi.getShortUserName(), ugi.getPrimaryGroupName(),
           new Path(theInternalDir.fullPath).makeQualified(
               myUri, null));
     }
@@ -865,13 +865,13 @@ public class ViewFs extends AbstractFileSystem {
         INodeLink<AbstractFileSystem> inodelink = 
           (INodeLink<AbstractFileSystem>) inode;
         result = new FileStatus(0, false, 0, 0, creationTime, creationTime,
-            PERMISSION_555, ugi.getUserName(), ugi.getPrimaryGroupName(),
+            PERMISSION_555, ugi.getShortUserName(), ugi.getPrimaryGroupName(),
             inodelink.getTargetLink(),
             new Path(inode.fullPath).makeQualified(
                 myUri, null));
       } else {
         result = new FileStatus(0, true, 0, 0, creationTime, creationTime,
-          PERMISSION_555, ugi.getUserName(), ugi.getPrimaryGroupName(),
+          PERMISSION_555, ugi.getShortUserName(), ugi.getPrimaryGroupName(),
           new Path(inode.fullPath).makeQualified(
               myUri, null));
       }
@@ -910,14 +910,14 @@ public class ViewFs extends AbstractFileSystem {
 
           result[i++] = new FileStatus(0, false, 0, 0,
             creationTime, creationTime,
-            PERMISSION_555, ugi.getUserName(), ugi.getPrimaryGroupName(),
+            PERMISSION_555, ugi.getShortUserName(), ugi.getPrimaryGroupName(),
             link.getTargetLink(),
             new Path(inode.fullPath).makeQualified(
                 myUri, null));
         } else {
           result[i++] = new FileStatus(0, true, 0, 0,
             creationTime, creationTime,
-            PERMISSION_555, ugi.getUserName(), ugi.getGroupNames()[0],
+            PERMISSION_555, ugi.getShortUserName(), ugi.getGroupNames()[0],
             new Path(inode.fullPath).makeQualified(
                 myUri, null));
         }
@@ -1043,7 +1043,7 @@ public class ViewFs extends AbstractFileSystem {
     @Override
     public AclStatus getAclStatus(Path path) throws IOException {
       checkPathIsSlash(path);
-      return new AclStatus.Builder().owner(ugi.getUserName())
+      return new AclStatus.Builder().owner(ugi.getShortUserName())
           .group(ugi.getPrimaryGroupName())
           .addEntries(AclUtil.getMinimalAcl(PERMISSION_555))
           .stickyBit(false).build();

+ 34 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java

@@ -346,8 +346,13 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
         createWithRetries(prefixPath, new byte[]{}, zkAcl, CreateMode.PERSISTENT);
       } catch (KeeperException e) {
         if (isNodeExists(e.code())) {
-          // This is OK - just ensuring existence.
-          continue;
+          // Set ACLs for parent node, if they do not exist or are different
+          try {
+            setAclsWithRetries(prefixPath);
+          } catch (KeeperException e1) {
+            throw new IOException("Couldn't set ACLs on parent ZNode: " +
+                prefixPath, e1);
+          }
         } else {
           throw new IOException("Couldn't create " + prefixPath, e);
         }
@@ -1066,14 +1071,36 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
     });
   }
 
+  private void setAclsWithRetries(final String path)
+      throws KeeperException, InterruptedException {
+    Stat stat = new Stat();
+    zkDoWithRetries(new ZKAction<Void>() {
+      @Override
+      public Void run() throws KeeperException, InterruptedException {
+        List<ACL> acl = zkClient.getACL(path, stat);
+        if (acl == null || !acl.containsAll(zkAcl) ||
+            !zkAcl.containsAll(acl)) {
+          zkClient.setACL(path, zkAcl, stat.getVersion());
+        }
+        return null;
+      }
+    }, Code.BADVERSION);
+  }
+
   private <T> T zkDoWithRetries(ZKAction<T> action) throws KeeperException,
       InterruptedException {
+    return zkDoWithRetries(action, null);
+  }
+
+  private <T> T zkDoWithRetries(ZKAction<T> action, Code retryCode)
+      throws KeeperException, InterruptedException {
     int retry = 0;
     while (true) {
       try {
         return action.run();
       } catch (KeeperException ke) {
-        if (shouldRetry(ke.code()) && ++retry < maxRetryNum) {
+        if ((shouldRetry(ke.code()) || shouldRetry(ke.code(), retryCode))
+            && ++retry < maxRetryNum) {
           continue;
         }
         throw ke;
@@ -1189,6 +1216,10 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
   private static boolean shouldRetry(Code code) {
     return code == Code.CONNECTIONLOSS || code == Code.OPERATIONTIMEOUT;
   }
+
+  private static boolean shouldRetry(Code code, Code retryIfCode) {
+    return (retryIfCode == null ? false : retryIfCode == code);
+  }
   
   @Override
   public String toString() {

+ 43 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java

@@ -80,6 +80,8 @@ public abstract class HAAdmin extends Configured implements Tool {
             "--" + FORCEACTIVE + " option is used."))
     .put("-getServiceState",
         new UsageInfo("<serviceId>", "Returns the state of the service"))
+      .put("-getAllServiceState",
+          new UsageInfo(null, "Returns the state of all the services"))
     .put("-checkHealth",
         new UsageInfo("<serviceId>",
             "Requests that the service perform a health check.\n" + 
@@ -119,7 +121,11 @@ public abstract class HAAdmin extends Configured implements Tool {
       String cmd = e.getKey();
       UsageInfo usage = e.getValue();
       
-      errOut.println("    [" + cmd + " " + usage.args + "]"); 
+      if (usage.args == null) {
+        errOut.println("    [" + cmd + "]");
+      } else {
+        errOut.println("    [" + cmd + " " + usage.args + "]");
+      }
     }
     errOut.println();
     ToolRunner.printGenericCommandUsage(errOut);    
@@ -130,7 +136,11 @@ public abstract class HAAdmin extends Configured implements Tool {
     if (usage == null) {
       throw new RuntimeException("No usage for cmd " + cmd);
     }
-    errOut.println(getUsageString() + " [" + cmd + " " + usage.args + "]");
+    if (usage.args == null) {
+      errOut.println(getUsageString() + " [" + cmd + "]");
+    } else {
+      errOut.println(getUsageString() + " [" + cmd + " " + usage.args + "]");
+    }
   }
 
   private int transitionToActive(final CommandLine cmd)
@@ -455,6 +465,8 @@ public abstract class HAAdmin extends Configured implements Tool {
       return failover(cmdLine);
     } else if ("-getServiceState".equals(cmd)) {
       return getServiceState(cmdLine);
+    } else if ("-getAllServiceState".equals(cmd)) {
+      return getAllServiceState();
     } else if ("-checkHealth".equals(cmd)) {
       return checkHealth(cmdLine);
     } else if ("-help".equals(cmd)) {
@@ -465,7 +477,30 @@ public abstract class HAAdmin extends Configured implements Tool {
       throw new AssertionError("Should not get here, command: " + cmd);
     } 
   }
-  
+
+  protected int getAllServiceState() {
+    Collection<String> targetIds = getTargetIds(null);
+    if (targetIds.isEmpty()) {
+      errOut.println("Failed to get service IDs");
+      return -1;
+    }
+    for (String targetId : targetIds) {
+      HAServiceTarget target = resolveTarget(targetId);
+      String address = target.getAddress().getHostName() + ":"
+          + target.getAddress().getPort();
+      try {
+        HAServiceProtocol proto = target.getProxy(getConf(),
+            rpcTimeoutForChecks);
+        out.println(String.format("%-50s %-10s", address, proto
+            .getServiceStatus().getState()));
+      } catch (IOException e) {
+        out.println(String.format("%-50s %-10s", address,
+            "Failed to connect: " + e.getMessage()));
+      }
+    }
+    return 0;
+  }
+
   private boolean confirmForceManual() throws IOException {
      return ToolRunner.confirmPrompt(
         "You have specified the --" + FORCEMANUAL + " flag. This flag is " +
@@ -532,7 +567,11 @@ public abstract class HAAdmin extends Configured implements Tool {
       return -1;
     }
     
-    out.println(cmd + " [" + usageInfo.args + "]: " + usageInfo.help);
+    if (usageInfo.args == null) {
+      out.println(cmd + ": " + usageInfo.help);
+    } else {
+      out.println(cmd + " [" + usageInfo.args + "]: " + usageInfo.help);
+    }
     return 0;
   }
   

+ 5 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java

@@ -84,8 +84,11 @@ public abstract class ZKFailoverController {
     ZK_AUTH_KEY
   };
   
-  protected static final String USAGE = 
-      "Usage: hdfs zkfc [ -formatZK [-force] [-nonInteractive] ]";
+  protected static final String USAGE =
+      "Usage: hdfs zkfc [ -formatZK [-force] [-nonInteractive] ]\n"
+      + "\t-force: formats the znode if the znode exists.\n"
+      + "\t-nonInteractive: formats the znode aborts if the znode exists,\n"
+      + "\tunless -force option is specified.";
 
   /** Unable to format the parent znode in ZK */
   static final int ERR_CODE_FORMAT_DENIED = 2;

+ 219 - 63
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java

@@ -17,6 +17,10 @@
  */
 package org.apache.hadoop.http;
 
+import static org.apache.hadoop.fs.CommonConfigurationKeys.DEFAULT_HADOOP_HTTP_STATIC_USER;
+import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_HTTP_STATIC_USER;
+
+import java.io.File;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.InterruptedIOException;
@@ -45,7 +49,10 @@ import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletRequestWrapper;
 import javax.servlet.http.HttpServletResponse;
 
+import com.google.common.base.Preconditions;
 import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Lists;
+import com.sun.jersey.spi.container.servlet.ServletContainer;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.HadoopIllegalArgumentException;
@@ -53,15 +60,17 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.ConfServlet;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.conf.Configuration.IntegerRanges;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
-import org.apache.hadoop.security.AuthenticationFilterInitializer;
-import org.apache.hadoop.security.authentication.util.SignerSecretProvider;
 import org.apache.hadoop.jmx.JMXJsonServlet;
 import org.apache.hadoop.log.LogLevel;
+import org.apache.hadoop.security.AuthenticationFilterInitializer;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
+import org.apache.hadoop.security.authentication.util.SignerSecretProvider;
 import org.apache.hadoop.security.authorize.AccessControlList;
+import org.apache.hadoop.security.ssl.SSLFactory;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.Shell;
 import org.eclipse.jetty.http.HttpVersion;
@@ -90,16 +99,9 @@ import org.eclipse.jetty.servlet.ServletHolder;
 import org.eclipse.jetty.servlet.ServletMapping;
 import org.eclipse.jetty.util.ArrayUtil;
 import org.eclipse.jetty.util.MultiException;
-import org.eclipse.jetty.webapp.WebAppContext;
-import org.eclipse.jetty.util.thread.QueuedThreadPool;
-
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-import com.sun.jersey.spi.container.servlet.ServletContainer;
 import org.eclipse.jetty.util.ssl.SslContextFactory;
-
-import static org.apache.hadoop.fs.CommonConfigurationKeys.DEFAULT_HADOOP_HTTP_STATIC_USER;
-import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_HTTP_STATIC_USER;
+import org.eclipse.jetty.util.thread.QueuedThreadPool;
+import org.eclipse.jetty.webapp.WebAppContext;
 
 /**
  * Create a Jetty embedded server to answer http requests. The primary goal is
@@ -116,9 +118,20 @@ import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_HTTP_STATIC_US
 public final class HttpServer2 implements FilterContainer {
   public static final Log LOG = LogFactory.getLog(HttpServer2.class);
 
+  public static final String HTTP_SCHEME = "http";
+  public static final String HTTPS_SCHEME = "https";
+
+  public static final String HTTP_MAX_REQUEST_HEADER_SIZE_KEY =
+      "hadoop.http.max.request.header.size";
+  public static final int HTTP_MAX_REQUEST_HEADER_SIZE_DEFAULT = 65536;
+  public static final String HTTP_MAX_RESPONSE_HEADER_SIZE_KEY =
+      "hadoop.http.max.response.header.size";
+  public static final int HTTP_MAX_RESPONSE_HEADER_SIZE_DEFAULT = 65536;
+  public static final String HTTP_MAX_THREADS_KEY = "hadoop.http.max.threads";
+  public static final String HTTP_TEMP_DIR_KEY = "hadoop.http.temp.dir";
+
   static final String FILTER_INITIALIZER_PROPERTY
       = "hadoop.http.filter.initializers";
-  public static final String HTTP_MAX_THREADS = "hadoop.http.max.threads";
 
   // The ServletContext attribute where the daemon Configuration
   // gets stored.
@@ -139,6 +152,7 @@ public final class HttpServer2 implements FilterContainer {
 
   protected final WebAppContext webAppContext;
   protected final boolean findPort;
+  protected final IntegerRanges portRanges;
   private final Map<ServletContextHandler, Boolean> defaultContexts =
       new HashMap<>();
   protected final List<String> filterNames = new ArrayList<>();
@@ -158,6 +172,7 @@ public final class HttpServer2 implements FilterContainer {
     private ArrayList<URI> endpoints = Lists.newArrayList();
     private String name;
     private Configuration conf;
+    private Configuration sslConf;
     private String[] pathSpecs;
     private AccessControlList adminsAcl;
     private boolean securityEnabled = false;
@@ -176,6 +191,7 @@ public final class HttpServer2 implements FilterContainer {
     private String keyPassword;
 
     private boolean findPort;
+    private IntegerRanges portRanges = null;
 
     private String hostName;
     private boolean disallowFallbackToRandomSignerSecretProvider;
@@ -248,11 +264,25 @@ public final class HttpServer2 implements FilterContainer {
       return this;
     }
 
+    public Builder setPortRanges(IntegerRanges ranges) {
+      this.portRanges = ranges;
+      return this;
+    }
+
     public Builder setConf(Configuration conf) {
       this.conf = conf;
       return this;
     }
 
+    /**
+     * Specify the SSL configuration to load. This API provides an alternative
+     * to keyStore/keyPassword/trustStore.
+     */
+    public Builder setSSLConf(Configuration sslCnf) {
+      this.sslConf = sslCnf;
+      return this;
+    }
+
     public Builder setPathSpec(String[] pathSpec) {
       this.pathSpecs = pathSpec;
       return this;
@@ -315,7 +345,45 @@ public final class HttpServer2 implements FilterContainer {
       return this;
     }
 
+    /**
+     * A wrapper of {@link Configuration#getPassword(String)}. It returns
+     * <code>String</code> instead of <code>char[]</code> and throws
+     * {@link IOException} when the password not found.
+     *
+     * @param conf the configuration
+     * @param name the property name
+     * @return the password string
+     */
+    private static String getPassword(Configuration conf, String name)
+        throws IOException {
+      char[] passchars = conf.getPassword(name);
+      if (passchars == null) {
+        throw new IOException("Password " + name + " not found");
+      }
+      return new String(passchars);
+    }
 
+    /**
+     * Load SSL properties from the SSL configuration.
+     */
+    private void loadSSLConfiguration() throws IOException {
+      if (sslConf == null) {
+        return;
+      }
+      needsClientAuth(sslConf.getBoolean(
+          SSLFactory.SSL_SERVER_NEED_CLIENT_AUTH,
+          SSLFactory.SSL_SERVER_NEED_CLIENT_AUTH_DEFAULT));
+      keyStore(sslConf.get(SSLFactory.SSL_SERVER_KEYSTORE_LOCATION),
+          getPassword(sslConf, SSLFactory.SSL_SERVER_KEYSTORE_PASSWORD),
+          sslConf.get(SSLFactory.SSL_SERVER_KEYSTORE_TYPE,
+              SSLFactory.SSL_SERVER_KEYSTORE_TYPE_DEFAULT));
+      keyPassword(getPassword(sslConf,
+          SSLFactory.SSL_SERVER_KEYSTORE_KEYPASSWORD));
+      trustStore(sslConf.get(SSLFactory.SSL_SERVER_TRUSTSTORE_LOCATION),
+          getPassword(sslConf, SSLFactory.SSL_SERVER_TRUSTSTORE_PASSWORD),
+          sslConf.get(SSLFactory.SSL_SERVER_TRUSTSTORE_TYPE,
+              SSLFactory.SSL_SERVER_TRUSTSTORE_TYPE_DEFAULT));
+    }
 
     public HttpServer2 build() throws IOException {
       Preconditions.checkNotNull(name, "name is not set");
@@ -335,15 +403,33 @@ public final class HttpServer2 implements FilterContainer {
         server.initSpnego(conf, hostName, usernameConfKey, keytabConfKey);
       }
 
+      for (URI ep : endpoints) {
+        if (HTTPS_SCHEME.equals(ep.getScheme())) {
+          loadSSLConfiguration();
+          break;
+        }
+      }
+
+      int requestHeaderSize = conf.getInt(
+          HTTP_MAX_REQUEST_HEADER_SIZE_KEY,
+          HTTP_MAX_REQUEST_HEADER_SIZE_DEFAULT);
+      int responseHeaderSize = conf.getInt(
+          HTTP_MAX_RESPONSE_HEADER_SIZE_KEY,
+          HTTP_MAX_RESPONSE_HEADER_SIZE_DEFAULT);
+
+      HttpConfiguration httpConfig = new HttpConfiguration();
+      httpConfig.setRequestHeaderSize(requestHeaderSize);
+      httpConfig.setResponseHeaderSize(responseHeaderSize);
+
       for (URI ep : endpoints) {
         final ServerConnector connector;
         String scheme = ep.getScheme();
-        if ("http".equals(scheme)) {
-          connector =
-              HttpServer2.createDefaultChannelConnector(server.webServer);
-        } else if ("https".equals(scheme)) {
-          connector = createHttpsChannelConnector(server.webServer);
-
+        if (HTTP_SCHEME.equals(scheme)) {
+          connector = createHttpChannelConnector(server.webServer,
+              httpConfig);
+        } else if (HTTPS_SCHEME.equals(scheme)) {
+          connector = createHttpsChannelConnector(server.webServer,
+              httpConfig);
         } else {
           throw new HadoopIllegalArgumentException(
               "unknown scheme for endpoint:" + ep);
@@ -356,16 +442,20 @@ public final class HttpServer2 implements FilterContainer {
       return server;
     }
 
-    private ServerConnector createHttpsChannelConnector(Server server) {
+    private ServerConnector createHttpChannelConnector(
+        Server server, HttpConfiguration httpConfig) {
       ServerConnector conn = new ServerConnector(server);
-      HttpConfiguration httpConfig = new HttpConfiguration();
-      httpConfig.setRequestHeaderSize(JettyUtils.HEADER_SIZE);
-      httpConfig.setResponseHeaderSize(JettyUtils.HEADER_SIZE);
-      httpConfig.setSecureScheme("https");
-      httpConfig.addCustomizer(new SecureRequestCustomizer());
       ConnectionFactory connFactory = new HttpConnectionFactory(httpConfig);
       conn.addConnectionFactory(connFactory);
       configureChannelConnector(conn);
+      return conn;
+    }
+
+    private ServerConnector createHttpsChannelConnector(
+        Server server, HttpConfiguration httpConfig) {
+      httpConfig.setSecureScheme(HTTPS_SCHEME);
+      httpConfig.addCustomizer(new SecureRequestCustomizer());
+      ServerConnector conn = createHttpChannelConnector(server, httpConfig);
 
       SslContextFactory sslContextFactory = new SslContextFactory();
       sslContextFactory.setNeedClientAuth(needsClientAuth);
@@ -397,7 +487,7 @@ public final class HttpServer2 implements FilterContainer {
     this.webServer = new Server();
     this.adminsAcl = b.adminsAcl;
     this.handlers = new HandlerCollection();
-    this.webAppContext = createWebAppContext(b.name, b.conf, adminsAcl, appDir);
+    this.webAppContext = createWebAppContext(b, adminsAcl, appDir);
     this.xFrameOptionIsEnabled = b.xFrameEnabled;
     this.xFrameOption = b.xFrameOption;
 
@@ -414,6 +504,7 @@ public final class HttpServer2 implements FilterContainer {
     }
 
     this.findPort = b.findPort;
+    this.portRanges = b.portRanges;
     initializeWebServer(b.name, b.hostName, b.conf, b.pathSpecs);
   }
 
@@ -423,7 +514,7 @@ public final class HttpServer2 implements FilterContainer {
 
     Preconditions.checkNotNull(webAppContext);
 
-    int maxThreads = conf.getInt(HTTP_MAX_THREADS, -1);
+    int maxThreads = conf.getInt(HTTP_MAX_THREADS_KEY, -1);
     // If HTTP_MAX_THREADS is not configured, QueueThreadPool() will use the
     // default value (currently 250).
 
@@ -482,8 +573,8 @@ public final class HttpServer2 implements FilterContainer {
     listeners.add(connector);
   }
 
-  private static WebAppContext createWebAppContext(String name,
-      Configuration conf, AccessControlList adminsAcl, final String appDir) {
+  private static WebAppContext createWebAppContext(Builder b,
+      AccessControlList adminsAcl, final String appDir) {
     WebAppContext ctx = new WebAppContext();
     ctx.setDefaultsDescriptor(null);
     ServletHolder holder = new ServletHolder(new DefaultServlet());
@@ -496,10 +587,15 @@ public final class HttpServer2 implements FilterContainer {
     holder.setInitParameters(params);
     ctx.setWelcomeFiles(new String[] {"index.html"});
     ctx.addServlet(holder, "/");
-    ctx.setDisplayName(name);
+    ctx.setDisplayName(b.name);
     ctx.setContextPath("/");
-    ctx.setWar(appDir + "/" + name);
-    ctx.getServletContext().setAttribute(CONF_CONTEXT_ATTRIBUTE, conf);
+    ctx.setWar(appDir + "/" + b.name);
+    String tempDirectory = b.conf.get(HTTP_TEMP_DIR_KEY);
+    if (tempDirectory != null && !tempDirectory.isEmpty()) {
+      ctx.setTempDirectory(new File(tempDirectory));
+      ctx.setAttribute("javax.servlet.context.tempdir", tempDirectory);
+    }
+    ctx.getServletContext().setAttribute(CONF_CONTEXT_ATTRIBUTE, b.conf);
     ctx.getServletContext().setAttribute(ADMINS_ACL, adminsAcl);
     addNoCacheFilter(ctx);
     return ctx;
@@ -541,18 +637,6 @@ public final class HttpServer2 implements FilterContainer {
     }
   }
 
-  @InterfaceAudience.Private
-  public static ServerConnector createDefaultChannelConnector(Server server) {
-    ServerConnector conn = new ServerConnector(server);
-    HttpConfiguration httpConfig = new HttpConfiguration();
-    httpConfig.setRequestHeaderSize(JettyUtils.HEADER_SIZE);
-    httpConfig.setResponseHeaderSize(JettyUtils.HEADER_SIZE);
-    ConnectionFactory connFactory = new HttpConnectionFactory(httpConfig);
-    conn.addConnectionFactory(connFactory);
-    configureChannelConnector(conn);
-    return conn;
-  }
-
   /** Get an array of FilterConfiguration specified in the conf */
   private static FilterInitializer[] getFilterInitializers(Configuration conf) {
     if (conf == null) {
@@ -1004,6 +1088,93 @@ public final class HttpServer2 implements FilterContainer {
     }
   }
 
+  /**
+   * Bind listener by closing and opening the listener.
+   * @param listener
+   * @throws Exception
+   */
+  private static void bindListener(ServerConnector listener) throws Exception {
+    // jetty has a bug where you can't reopen a listener that previously
+    // failed to open w/o issuing a close first, even if the port is changed
+    listener.close();
+    listener.open();
+    LOG.info("Jetty bound to port " + listener.getLocalPort());
+  }
+
+  /**
+   * Create bind exception by wrapping the bind exception thrown.
+   * @param listener
+   * @param ex
+   * @return
+   */
+  private static BindException constructBindException(ServerConnector listener,
+      BindException ex) {
+    BindException be = new BindException("Port in use: "
+        + listener.getHost() + ":" + listener.getPort());
+    if (ex != null) {
+      be.initCause(ex);
+    }
+    return be;
+  }
+
+  /**
+   * Bind using single configured port. If findPort is true, we will try to bind
+   * after incrementing port till a free port is found.
+   * @param listener jetty listener.
+   * @param port port which is set in the listener.
+   * @throws Exception
+   */
+  private void bindForSinglePort(ServerConnector listener, int port)
+      throws Exception {
+    while (true) {
+      try {
+        bindListener(listener);
+        break;
+      } catch (BindException ex) {
+        if (port == 0 || !findPort) {
+          throw constructBindException(listener, ex);
+        }
+      }
+      // try the next port number
+      listener.setPort(++port);
+      Thread.sleep(100);
+    }
+  }
+
+  /**
+   * Bind using port ranges. Keep on looking for a free port in the port range
+   * and throw a bind exception if no port in the configured range binds.
+   * @param listener jetty listener.
+   * @param startPort initial port which is set in the listener.
+   * @throws Exception
+   */
+  private void bindForPortRange(ServerConnector listener, int startPort)
+      throws Exception {
+    BindException bindException = null;
+    try {
+      bindListener(listener);
+      return;
+    } catch (BindException ex) {
+      // Ignore exception.
+      bindException = ex;
+    }
+    for(Integer port : portRanges) {
+      if (port == startPort) {
+        continue;
+      }
+      Thread.sleep(100);
+      listener.setPort(port);
+      try {
+        bindListener(listener);
+        return;
+      } catch (BindException ex) {
+        // Ignore exception. Move to next port.
+        bindException = ex;
+      }
+    }
+    throw constructBindException(listener, bindException);
+  }
+
   /**
    * Open the main listener for the server
    * @throws Exception
@@ -1016,25 +1187,10 @@ public final class HttpServer2 implements FilterContainer {
         continue;
       }
       int port = listener.getPort();
-      while (true) {
-        // jetty has a bug where you can't reopen a listener that previously
-        // failed to open w/o issuing a close first, even if the port is changed
-        try {
-          listener.close();
-          listener.open();
-          LOG.info("Jetty bound to port " + listener.getLocalPort());
-          break;
-        } catch (BindException ex) {
-          if (port == 0 || !findPort) {
-            BindException be = new BindException("Port in use: "
-                + listener.getHost() + ":" + listener.getPort());
-            be.initCause(ex);
-            throw be;
-          }
-        }
-        // try the next port number
-        listener.setPort(++port);
-        Thread.sleep(100);
+      if (portRanges != null && port != 0) {
+        bindForPortRange(listener, port);
+      } else {
+        bindForSinglePort(listener, port);
       }
     }
   }
@@ -1056,7 +1212,7 @@ public final class HttpServer2 implements FilterContainer {
     }
 
     try {
-      // explicitly destroy the secrete provider
+      // explicitly destroy the secret provider
       secretProvider.destroy();
       // clear & stop webAppContext attributes to avoid memory leaks.
       webAppContext.clearAttributes();

+ 70 - 24
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java

@@ -24,6 +24,7 @@ import java.util.*;
 import java.rmi.server.UID;
 import java.security.MessageDigest;
 
+import com.google.common.annotations.VisibleForTesting;
 import org.apache.commons.logging.*;
 import org.apache.hadoop.util.Options;
 import org.apache.hadoop.fs.*;
@@ -146,7 +147,7 @@ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_SKIP_CHECKSU
  *   </ul>
  * </li>
  * <li>
- * A sync-marker every few <code>100</code> bytes or so.
+ * A sync-marker every few <code>100</code> kilobytes or so.
  * </li>
  * </ul>
  *
@@ -165,7 +166,7 @@ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_SKIP_CHECKSU
  *   </ul>
  * </li>
  * <li>
- * A sync-marker every few <code>100</code> bytes or so.
+ * A sync-marker every few <code>100</code> kilobytes or so.
  * </li>
  * </ul>
  * 
@@ -217,8 +218,11 @@ public class SequenceFile {
   private static final int SYNC_HASH_SIZE = 16;   // number of bytes in hash 
   private static final int SYNC_SIZE = 4+SYNC_HASH_SIZE; // escape + hash
 
-  /** The number of bytes between sync points.*/
-  public static final int SYNC_INTERVAL = 100*SYNC_SIZE; 
+  /**
+   * The number of bytes between sync points. 100 KB, default.
+   * Computed as 5 KB * 20 = 100 KB
+   */
+  public static final int SYNC_INTERVAL = 5 * 1024 * SYNC_SIZE; // 5KB*(16+4)
 
   /** 
    * The compression type used to compress key/value pairs in the 
@@ -856,6 +860,9 @@ public class SequenceFile {
     // starts and ends by scanning for this value.
     long lastSyncPos;                     // position of last sync
     byte[] sync;                          // 16 random bytes
+    @VisibleForTesting
+    int syncInterval;
+
     {
       try {                                       
         MessageDigest digester = MessageDigest.getInstance("MD5");
@@ -987,7 +994,16 @@ public class SequenceFile {
     private static Option filesystem(FileSystem fs) {
       return new SequenceFile.Writer.FileSystemOption(fs);
     }
-    
+
+    private static class SyncIntervalOption extends Options.IntegerOption
+        implements Option {
+      SyncIntervalOption(int val) {
+        // If a negative sync interval is provided,
+        // fall back to the default sync interval.
+        super(val < 0 ? SYNC_INTERVAL : val);
+      }
+    }
+
     public static Option bufferSize(int value) {
       return new BufferSizeOption(value);
     }
@@ -1032,11 +1048,15 @@ public class SequenceFile {
         CompressionCodec codec) {
       return new CompressionOption(value, codec);
     }
-    
+
+    public static Option syncInterval(int value) {
+      return new SyncIntervalOption(value);
+    }
+
     /**
      * Construct a uncompressed writer from a set of options.
      * @param conf the configuration to use
-     * @param options the options used when creating the writer
+     * @param opts the options used when creating the writer
      * @throws IOException if it fails
      */
     Writer(Configuration conf, 
@@ -1062,6 +1082,8 @@ public class SequenceFile {
         Options.getOption(MetadataOption.class, opts);
       CompressionOption compressionTypeOption =
         Options.getOption(CompressionOption.class, opts);
+      SyncIntervalOption syncIntervalOption =
+          Options.getOption(SyncIntervalOption.class, opts);
       // check consistency of options
       if ((fileOption == null) == (streamOption == null)) {
         throw new IllegalArgumentException("file or stream must be specified");
@@ -1163,7 +1185,12 @@ public class SequenceFile {
                                            "GzipCodec without native-hadoop " +
                                            "code!");
       }
-      init(conf, out, ownStream, keyClass, valueClass, codec, metadata);
+      this.syncInterval = (syncIntervalOption == null) ?
+          SYNC_INTERVAL :
+          syncIntervalOption.getValue();
+      init(
+          conf, out, ownStream, keyClass, valueClass,
+          codec, metadata, syncInterval);
     }
 
     /** Create the named file.
@@ -1176,7 +1203,7 @@ public class SequenceFile {
                   Class keyClass, Class valClass) throws IOException {
       this.compress = CompressionType.NONE;
       init(conf, fs.create(name), true, keyClass, valClass, null, 
-           new Metadata());
+           new Metadata(), SYNC_INTERVAL);
     }
     
     /** Create the named file with write-progress reporter.
@@ -1190,7 +1217,7 @@ public class SequenceFile {
                   Progressable progress, Metadata metadata) throws IOException {
       this.compress = CompressionType.NONE;
       init(conf, fs.create(name, progress), true, keyClass, valClass,
-           null, metadata);
+           null, metadata, SYNC_INTERVAL);
     }
     
     /** Create the named file with write-progress reporter. 
@@ -1206,7 +1233,7 @@ public class SequenceFile {
       this.compress = CompressionType.NONE;
       init(conf,
            fs.create(name, true, bufferSize, replication, blockSize, progress),
-           true, keyClass, valClass, null, metadata);
+           true, keyClass, valClass, null, metadata, SYNC_INTERVAL);
     }
 
     boolean isCompressed() { return compress != CompressionType.NONE; }
@@ -1234,18 +1261,21 @@ public class SequenceFile {
 
     /** Initialize. */
     @SuppressWarnings("unchecked")
-    void init(Configuration conf, FSDataOutputStream out, boolean ownStream,
-              Class keyClass, Class valClass,
-              CompressionCodec codec, Metadata metadata) 
+    void init(Configuration config, FSDataOutputStream outStream,
+              boolean ownStream, Class key, Class val,
+              CompressionCodec compCodec, Metadata meta,
+              int syncIntervalVal)
       throws IOException {
-      this.conf = conf;
-      this.out = out;
+      this.conf = config;
+      this.out = outStream;
       this.ownOutputStream = ownStream;
-      this.keyClass = keyClass;
-      this.valClass = valClass;
-      this.codec = codec;
-      this.metadata = metadata;
-      SerializationFactory serializationFactory = new SerializationFactory(conf);
+      this.keyClass = key;
+      this.valClass = val;
+      this.codec = compCodec;
+      this.metadata = meta;
+      this.syncInterval = syncIntervalVal;
+      SerializationFactory serializationFactory =
+          new SerializationFactory(config);
       this.keySerializer = serializationFactory.getSerializer(keyClass);
       if (this.keySerializer == null) {
         throw new IOException(
@@ -1366,7 +1396,7 @@ public class SequenceFile {
 
     synchronized void checkAndWriteSync() throws IOException {
       if (sync != null &&
-          out.getPos() >= lastSyncPos+SYNC_INTERVAL) { // time to emit sync
+          out.getPos() >= lastSyncPos+this.syncInterval) { // time to emit sync
         sync();
       }
     }
@@ -2786,14 +2816,30 @@ public class SequenceFile {
     }
 
     /** Sort and merge using an arbitrary {@link RawComparator}. */
+    @SuppressWarnings("deprecation")
     public Sorter(FileSystem fs, RawComparator comparator, Class keyClass,
                   Class valClass, Configuration conf, Metadata metadata) {
       this.fs = fs;
       this.comparator = comparator;
       this.keyClass = keyClass;
       this.valClass = valClass;
-      this.memory = conf.getInt("io.sort.mb", 100) * 1024 * 1024;
-      this.factor = conf.getInt("io.sort.factor", 100);
+      // Remember to fall-back on the deprecated MB and Factor keys
+      // until they are removed away permanently.
+      if (conf.get(CommonConfigurationKeys.IO_SORT_MB_KEY) != null) {
+        this.memory = conf.getInt(CommonConfigurationKeys.IO_SORT_MB_KEY,
+          CommonConfigurationKeys.SEQ_IO_SORT_MB_DEFAULT) * 1024 * 1024;
+      } else {
+        this.memory = conf.getInt(CommonConfigurationKeys.SEQ_IO_SORT_MB_KEY,
+          CommonConfigurationKeys.SEQ_IO_SORT_MB_DEFAULT) * 1024 * 1024;
+      }
+      if (conf.get(CommonConfigurationKeys.IO_SORT_FACTOR_KEY) != null) {
+        this.factor = conf.getInt(CommonConfigurationKeys.IO_SORT_FACTOR_KEY,
+            CommonConfigurationKeys.SEQ_IO_SORT_FACTOR_DEFAULT);
+      } else {
+        this.factor = conf.getInt(
+            CommonConfigurationKeys.SEQ_IO_SORT_FACTOR_KEY,
+            CommonConfigurationKeys.SEQ_IO_SORT_FACTOR_DEFAULT);
+      }
       this.conf = conf;
       this.metadata = metadata;
     }

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/Decompressor.java

@@ -95,7 +95,7 @@ public interface Decompressor {
    * @param b Buffer for the compressed data
    * @param off Start offset of the data
    * @param len Size of the buffer
-   * @return The actual number of bytes of compressed data.
+   * @return The actual number of bytes of uncompressed data.
    * @throws IOException
    */
   public int decompress(byte[] b, int off, int len) throws IOException;

+ 11 - 67
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/GzipCodec.java

@@ -18,18 +18,21 @@
 
 package org.apache.hadoop.io.compress;
 
-import java.io.*;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
 import java.util.zip.GZIPOutputStream;
+
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.compress.DefaultCodec;
-import org.apache.hadoop.io.compress.zlib.*;
-import org.apache.hadoop.io.compress.zlib.ZlibDecompressor.ZlibDirectDecompressor;
-
-import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT;
-import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
-import static org.apache.hadoop.util.PlatformName.IBM_JAVA;
+import org.apache.hadoop.io.compress.zlib.BuiltInGzipDecompressor;
+import org.apache.hadoop.io.compress.zlib.ZlibCompressor;
+import org.apache.hadoop.io.compress.zlib.ZlibDecompressor;
+import org.apache.hadoop.io.compress.zlib.ZlibFactory;
 
 /**
  * This class creates gzip compressors/decompressors. 
@@ -45,10 +48,6 @@ public class GzipCodec extends DefaultCodec {
   protected static class GzipOutputStream extends CompressorStream {
 
     private static class ResetableGZIPOutputStream extends GZIPOutputStream {
-      private static final int TRAILER_SIZE = 8;
-      public static final String JVMVersion= System.getProperty("java.version");
-      private static final boolean HAS_BROKEN_FINISH =
-          (IBM_JAVA && JVMVersion.contains("1.6.0"));
 
       public ResetableGZIPOutputStream(OutputStream out) throws IOException {
         super(out);
@@ -57,61 +56,6 @@ public class GzipCodec extends DefaultCodec {
       public void resetState() throws IOException {
         def.reset();
       }
-
-      /**
-       * Override this method for HADOOP-8419.
-       * Override because IBM implementation calls def.end() which
-       * causes problem when reseting the stream for reuse.
-       *
-       */
-      @Override
-      public void finish() throws IOException {
-        if (HAS_BROKEN_FINISH) {
-          if (!def.finished()) {
-            def.finish();
-            while (!def.finished()) {
-              int i = def.deflate(this.buf, 0, this.buf.length);
-              if ((def.finished()) && (i <= this.buf.length - TRAILER_SIZE)) {
-                writeTrailer(this.buf, i);
-                i += TRAILER_SIZE;
-                out.write(this.buf, 0, i);
-
-                return;
-              }
-              if (i > 0) {
-                out.write(this.buf, 0, i);
-              }
-            }
-
-            byte[] arrayOfByte = new byte[TRAILER_SIZE];
-            writeTrailer(arrayOfByte, 0);
-            out.write(arrayOfByte);
-          }
-        } else {
-          super.finish();
-        }
-      }
-
-      /** re-implement for HADOOP-8419 because the relative method in jdk is invisible */
-      private void writeTrailer(byte[] paramArrayOfByte, int paramInt)
-        throws IOException {
-        writeInt((int)this.crc.getValue(), paramArrayOfByte, paramInt);
-        writeInt(this.def.getTotalIn(), paramArrayOfByte, paramInt + 4);
-      }
-
-      /** re-implement for HADOOP-8419 because the relative method in jdk is invisible */
-      private void writeInt(int paramInt1, byte[] paramArrayOfByte, int paramInt2)
-        throws IOException {
-        writeShort(paramInt1 & 0xFFFF, paramArrayOfByte, paramInt2);
-        writeShort(paramInt1 >> 16 & 0xFFFF, paramArrayOfByte, paramInt2 + 2);
-      }
-
-      /** re-implement for HADOOP-8419 because the relative method in jdk is invisible */
-      private void writeShort(int paramInt1, byte[] paramArrayOfByte, int paramInt2)
-        throws IOException {
-        paramArrayOfByte[paramInt2] = (byte)(paramInt1 & 0xFF);
-        paramArrayOfByte[(paramInt2 + 1)] = (byte)(paramInt1 >> 8 & 0xFF);
-      }
     }
 
     public GzipOutputStream(OutputStream out) throws IOException {

+ 242 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/ZStandardCodec.java

@@ -0,0 +1,242 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.io.compress;
+
+import org.apache.hadoop.conf.Configurable;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.io.compress.zstd.ZStandardCompressor;
+import org.apache.hadoop.io.compress.zstd.ZStandardDecompressor;
+import org.apache.hadoop.util.NativeCodeLoader;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+
+import static org.apache.hadoop.fs.CommonConfigurationKeys.IO_COMPRESSION_CODEC_ZSTD_BUFFER_SIZE_DEFAULT;
+import static org.apache.hadoop.fs.CommonConfigurationKeys.IO_COMPRESSION_CODEC_ZSTD_BUFFER_SIZE_KEY;
+
+/**
+ * This class creates zstd compressors/decompressors.
+ */
+public class ZStandardCodec implements
+    Configurable, CompressionCodec, DirectDecompressionCodec  {
+  private Configuration conf;
+
+  /**
+   * Set the configuration to be used by this object.
+   *
+   * @param conf the configuration object.
+   */
+  @Override
+  public void setConf(Configuration conf) {
+    this.conf = conf;
+  }
+
+  /**
+   * Return the configuration used by this object.
+   *
+   * @return the configuration object used by this object.
+   */
+  @Override
+  public Configuration getConf() {
+    return conf;
+  }
+
+  public static void checkNativeCodeLoaded() {
+    if (!NativeCodeLoader.isNativeCodeLoaded() ||
+        !NativeCodeLoader.buildSupportsZstd()) {
+      throw new RuntimeException("native zStandard library "
+          + "not available: this version of libhadoop was built "
+          + "without zstd support.");
+    }
+    if (!ZStandardCompressor.isNativeCodeLoaded()) {
+      throw new RuntimeException("native zStandard library not "
+          + "available: ZStandardCompressor has not been loaded.");
+    }
+    if (!ZStandardDecompressor.isNativeCodeLoaded()) {
+      throw new RuntimeException("native zStandard library not "
+          + "available: ZStandardDecompressor has not been loaded.");
+    }
+  }
+
+  public static boolean isNativeCodeLoaded() {
+    return ZStandardCompressor.isNativeCodeLoaded()
+        && ZStandardDecompressor.isNativeCodeLoaded();
+  }
+
+  public static String getLibraryName() {
+    return ZStandardCompressor.getLibraryName();
+  }
+
+  public static int getCompressionLevel(Configuration conf) {
+    return conf.getInt(
+        CommonConfigurationKeys.IO_COMPRESSION_CODEC_ZSTD_LEVEL_KEY,
+        CommonConfigurationKeys.IO_COMPRESSION_CODEC_ZSTD_LEVEL_DEFAULT);
+  }
+
+  public static int getCompressionBufferSize(Configuration conf) {
+    int bufferSize = getBufferSize(conf);
+    return bufferSize == 0 ?
+        ZStandardCompressor.getRecommendedBufferSize() :
+        bufferSize;
+  }
+
+  public static int getDecompressionBufferSize(Configuration conf) {
+    int bufferSize = getBufferSize(conf);
+    return bufferSize == 0 ?
+        ZStandardDecompressor.getRecommendedBufferSize() :
+        bufferSize;
+  }
+
+  private static int getBufferSize(Configuration conf) {
+    return conf.getInt(IO_COMPRESSION_CODEC_ZSTD_BUFFER_SIZE_KEY,
+        IO_COMPRESSION_CODEC_ZSTD_BUFFER_SIZE_DEFAULT);
+  }
+
+  /**
+   * Create a {@link CompressionOutputStream} that will write to the given
+   * {@link OutputStream}.
+   *
+   * @param out the location for the final output stream
+   * @return a stream the user can write uncompressed data to have compressed
+   * @throws IOException
+   */
+  @Override
+  public CompressionOutputStream createOutputStream(OutputStream out)
+      throws IOException {
+    return Util.
+        createOutputStreamWithCodecPool(this, conf, out);
+  }
+
+  /**
+   * Create a {@link CompressionOutputStream} that will write to the given
+   * {@link OutputStream} with the given {@link Compressor}.
+   *
+   * @param out        the location for the final output stream
+   * @param compressor compressor to use
+   * @return a stream the user can write uncompressed data to have compressed
+   * @throws IOException
+   */
+  @Override
+  public CompressionOutputStream createOutputStream(OutputStream out,
+      Compressor compressor)
+      throws IOException {
+    checkNativeCodeLoaded();
+    return new CompressorStream(out, compressor,
+        getCompressionBufferSize(conf));
+  }
+
+  /**
+   * Get the type of {@link Compressor} needed by this {@link CompressionCodec}.
+   *
+   * @return the type of compressor needed by this codec.
+   */
+  @Override
+  public Class<? extends Compressor> getCompressorType() {
+    checkNativeCodeLoaded();
+    return ZStandardCompressor.class;
+  }
+
+  /**
+   * Create a new {@link Compressor} for use by this {@link CompressionCodec}.
+   *
+   * @return a new compressor for use by this codec
+   */
+  @Override
+  public Compressor createCompressor() {
+    checkNativeCodeLoaded();
+    return new ZStandardCompressor(
+        getCompressionLevel(conf), getCompressionBufferSize(conf));
+  }
+
+
+  /**
+   * Create a {@link CompressionInputStream} that will read from the given
+   * input stream.
+   *
+   * @param in the stream to read compressed bytes from
+   * @return a stream to read uncompressed bytes from
+   * @throws IOException
+   */
+  @Override
+  public CompressionInputStream createInputStream(InputStream in)
+      throws IOException {
+    return Util.
+        createInputStreamWithCodecPool(this, conf, in);
+  }
+
+  /**
+   * Create a {@link CompressionInputStream} that will read from the given
+   * {@link InputStream} with the given {@link Decompressor}.
+   *
+   * @param in           the stream to read compressed bytes from
+   * @param decompressor decompressor to use
+   * @return a stream to read uncompressed bytes from
+   * @throws IOException
+   */
+  @Override
+  public CompressionInputStream createInputStream(InputStream in,
+                                                  Decompressor decompressor)
+      throws IOException {
+    checkNativeCodeLoaded();
+    return new DecompressorStream(in, decompressor,
+        getDecompressionBufferSize(conf));
+  }
+
+  /**
+   * Get the type of {@link Decompressor} needed by
+   * this {@link CompressionCodec}.
+   *
+   * @return the type of decompressor needed by this codec.
+   */
+  @Override
+  public Class<? extends Decompressor> getDecompressorType() {
+    checkNativeCodeLoaded();
+    return ZStandardDecompressor.class;
+  }
+
+  /**
+   * Create a new {@link Decompressor} for use by this {@link CompressionCodec}.
+   *
+   * @return a new decompressor for use by this codec
+   */
+  @Override
+  public Decompressor createDecompressor() {
+    checkNativeCodeLoaded();
+    return new ZStandardDecompressor(getDecompressionBufferSize(conf));
+  }
+
+  /**
+   * Get the default filename extension for this kind of compression.
+   *
+   * @return <code>.zst</code>.
+   */
+  @Override
+  public String getDefaultExtension() {
+    return ".zst";
+  }
+
+  @Override
+  public DirectDecompressor createDirectDecompressor() {
+    return new ZStandardDecompressor.ZStandardDirectDecompressor(
+        getDecompressionBufferSize(conf)
+    );
+  }
+}

+ 305 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.java

@@ -0,0 +1,305 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.io.compress.zstd;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.io.compress.Compressor;
+import org.apache.hadoop.io.compress.ZStandardCodec;
+import org.apache.hadoop.util.NativeCodeLoader;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+
+/**
+ * A {@link Compressor} based on the zStandard compression algorithm.
+ * https://github.com/facebook/zstd
+ */
+public class ZStandardCompressor implements Compressor {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(ZStandardCompressor.class);
+
+  private long stream;
+  private int level;
+  private int directBufferSize;
+  private byte[] userBuf = null;
+  private int userBufOff = 0, userBufLen = 0;
+  private ByteBuffer uncompressedDirectBuf = null;
+  private int uncompressedDirectBufOff = 0, uncompressedDirectBufLen = 0;
+  private boolean keepUncompressedBuf = false;
+  private ByteBuffer compressedDirectBuf = null;
+  private boolean finish, finished;
+  private long bytesRead = 0;
+  private long bytesWritten = 0;
+
+  private static boolean nativeZStandardLoaded = false;
+
+  static {
+    if (NativeCodeLoader.isNativeCodeLoaded()) {
+      try {
+        // Initialize the native library
+        initIDs();
+        nativeZStandardLoaded = true;
+      } catch (Throwable t) {
+        LOG.warn("Error loading zstandard native libraries: " + t);
+      }
+    }
+  }
+
+  public static boolean isNativeCodeLoaded() {
+    return nativeZStandardLoaded;
+  }
+
+  public static int getRecommendedBufferSize() {
+    return getStreamSize();
+  }
+
+  @VisibleForTesting
+  ZStandardCompressor() {
+    this(CommonConfigurationKeys.IO_COMPRESSION_CODEC_ZSTD_LEVEL_DEFAULT,
+        CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT);
+  }
+
+  /**
+   * Creates a new compressor with the default compression level.
+   * Compressed data will be generated in ZStandard format.
+   */
+  public ZStandardCompressor(int level, int bufferSize) {
+    this(level, bufferSize, bufferSize);
+  }
+
+  @VisibleForTesting
+  ZStandardCompressor(int level, int inputBufferSize, int outputBufferSize) {
+    this.level = level;
+    stream = create();
+    this.directBufferSize = outputBufferSize;
+    uncompressedDirectBuf = ByteBuffer.allocateDirect(inputBufferSize);
+    compressedDirectBuf = ByteBuffer.allocateDirect(outputBufferSize);
+    compressedDirectBuf.position(outputBufferSize);
+    reset();
+  }
+
+  /**
+   * Prepare the compressor to be used in a new stream with settings defined in
+   * the given Configuration. It will reset the compressor's compression level
+   * and compression strategy.
+   *
+   * @param conf Configuration storing new settings
+   */
+  @Override
+  public void reinit(Configuration conf) {
+    if (conf == null) {
+      return;
+    }
+    level = ZStandardCodec.getCompressionLevel(conf);
+    reset();
+    LOG.debug("Reinit compressor with new compression configuration");
+  }
+
+  @Override
+  public void setInput(byte[] b, int off, int len) {
+    if (b == null) {
+      throw new NullPointerException();
+    }
+    if (off < 0 || len < 0 || off > b.length - len) {
+      throw new ArrayIndexOutOfBoundsException();
+    }
+
+    this.userBuf = b;
+    this.userBufOff = off;
+    this.userBufLen = len;
+    uncompressedDirectBufOff = 0;
+    setInputFromSavedData();
+
+    compressedDirectBuf.limit(directBufferSize);
+    compressedDirectBuf.position(directBufferSize);
+  }
+
+  //copy enough data from userBuf to uncompressedDirectBuf
+  private void setInputFromSavedData() {
+    int len = Math.min(userBufLen, uncompressedDirectBuf.remaining());
+    uncompressedDirectBuf.put(userBuf, userBufOff, len);
+    userBufLen -= len;
+    userBufOff += len;
+    uncompressedDirectBufLen = uncompressedDirectBuf.position();
+  }
+
+  @Override
+  public void setDictionary(byte[] b, int off, int len) {
+    throw new UnsupportedOperationException(
+        "Dictionary support is not enabled");
+  }
+
+  @Override
+  public boolean needsInput() {
+    // Consume remaining compressed data?
+    if (compressedDirectBuf.remaining() > 0) {
+      return false;
+    }
+
+    // have we consumed all input
+    if (keepUncompressedBuf && uncompressedDirectBufLen > 0) {
+      return false;
+    }
+
+    if (uncompressedDirectBuf.remaining() > 0) {
+      // Check if we have consumed all user-input
+      if (userBufLen <= 0) {
+        return true;
+      } else {
+        // copy enough data from userBuf to uncompressedDirectBuf
+        setInputFromSavedData();
+        // uncompressedDirectBuf is not full
+        return uncompressedDirectBuf.remaining() > 0;
+      }
+    }
+
+    return false;
+  }
+
+  @Override
+  public void finish() {
+    finish = true;
+  }
+
+  @Override
+  public boolean finished() {
+    // Check if 'zstd' says its 'finished' and all compressed
+    // data has been consumed
+    return (finished && compressedDirectBuf.remaining() == 0);
+  }
+
+  @Override
+  public int compress(byte[] b, int off, int len) throws IOException {
+    checkStream();
+    if (b == null) {
+      throw new NullPointerException();
+    }
+    if (off < 0 || len < 0 || off > b.length - len) {
+      throw new ArrayIndexOutOfBoundsException();
+    }
+
+    // Check if there is compressed data
+    int n = compressedDirectBuf.remaining();
+    if (n > 0) {
+      n = Math.min(n, len);
+      compressedDirectBuf.get(b, off, n);
+      return n;
+    }
+
+    // Re-initialize the output direct buffer
+    compressedDirectBuf.rewind();
+    compressedDirectBuf.limit(directBufferSize);
+
+    // Compress data
+    n = deflateBytesDirect(
+        uncompressedDirectBuf,
+        uncompressedDirectBufOff,
+        uncompressedDirectBufLen,
+        compressedDirectBuf,
+        directBufferSize
+    );
+    compressedDirectBuf.limit(n);
+
+    // Check if we have consumed all input buffer
+    if (uncompressedDirectBufLen <= 0) {
+      // consumed all input buffer
+      keepUncompressedBuf = false;
+      uncompressedDirectBuf.clear();
+      uncompressedDirectBufOff = 0;
+      uncompressedDirectBufLen = 0;
+    } else {
+      //  did not consume all input buffer
+      keepUncompressedBuf = true;
+    }
+
+    // Get at most 'len' bytes
+    n = Math.min(n, len);
+    compressedDirectBuf.get(b, off, n);
+    return n;
+  }
+
+  /**
+   * Returns the total number of compressed bytes output so far.
+   *
+   * @return the total (non-negative) number of compressed bytes output so far
+   */
+  @Override
+  public long getBytesWritten() {
+    checkStream();
+    return bytesWritten;
+  }
+
+  /**
+   * <p>Returns the total number of uncompressed bytes input so far.</p>
+   *
+   * @return the total (non-negative) number of uncompressed bytes input so far
+   */
+  @Override
+  public long getBytesRead() {
+    checkStream();
+    return bytesRead;
+  }
+
+  @Override
+  public void reset() {
+    checkStream();
+    init(level, stream);
+    finish = false;
+    finished = false;
+    bytesRead = 0;
+    bytesWritten = 0;
+    uncompressedDirectBuf.rewind();
+    uncompressedDirectBufOff = 0;
+    uncompressedDirectBufLen = 0;
+    keepUncompressedBuf = false;
+    compressedDirectBuf.limit(directBufferSize);
+    compressedDirectBuf.position(directBufferSize);
+    userBufOff = 0;
+    userBufLen = 0;
+  }
+
+  @Override
+  public void end() {
+    if (stream != 0) {
+      end(stream);
+      stream = 0;
+    }
+  }
+
+  private void checkStream() {
+    if (stream == 0) {
+      throw new NullPointerException();
+    }
+  }
+
+  private native static long create();
+  private native static void init(int level, long stream);
+  private native int deflateBytesDirect(ByteBuffer src, int srcOffset,
+      int srcLen, ByteBuffer dst, int dstLen);
+  private static native int getStreamSize();
+  private native static void end(long strm);
+  private native static void initIDs();
+  public native static String getLibraryName();
+}

+ 323 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zstd/ZStandardDecompressor.java

@@ -0,0 +1,323 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.io.compress.zstd;
+
+import org.apache.hadoop.io.compress.Decompressor;
+import org.apache.hadoop.io.compress.DirectDecompressor;
+import org.apache.hadoop.util.NativeCodeLoader;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+
+/**
+ * A {@link Decompressor} based on the zStandard compression algorithm.
+ * https://github.com/facebook/zstd
+ */
+public class ZStandardDecompressor implements Decompressor {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(ZStandardDecompressor.class);
+
+  private long stream;
+  private int directBufferSize;
+  private ByteBuffer compressedDirectBuf = null;
+  private int compressedDirectBufOff, bytesInCompressedBuffer;
+  private ByteBuffer uncompressedDirectBuf = null;
+  private byte[] userBuf = null;
+  private int userBufOff = 0, userBufferBytesToConsume = 0;
+  private boolean finished;
+  private int remaining = 0;
+
+  private static boolean nativeZStandardLoaded = false;
+
+  static {
+    if (NativeCodeLoader.isNativeCodeLoaded()) {
+      try {
+        // Initialize the native library
+        initIDs();
+        nativeZStandardLoaded = true;
+      } catch (Throwable t) {
+        LOG.warn("Error loading zstandard native libraries: " + t);
+      }
+    }
+  }
+
+  public static boolean isNativeCodeLoaded() {
+    return nativeZStandardLoaded;
+  }
+
+  public static int getRecommendedBufferSize() {
+    return getStreamSize();
+  }
+
+  public ZStandardDecompressor() {
+    this(getStreamSize());
+  }
+
+  /**
+   * Creates a new decompressor.
+   */
+  public ZStandardDecompressor(int bufferSize) {
+    this.directBufferSize = bufferSize;
+    compressedDirectBuf = ByteBuffer.allocateDirect(directBufferSize);
+    uncompressedDirectBuf = ByteBuffer.allocateDirect(directBufferSize);
+    uncompressedDirectBuf.position(directBufferSize);
+    stream = create();
+    reset();
+  }
+
+  @Override
+  public void setInput(byte[] b, int off, int len) {
+    if (b == null) {
+      throw new NullPointerException();
+    }
+    if (off < 0 || len < 0 || off > b.length - len) {
+      throw new ArrayIndexOutOfBoundsException();
+    }
+
+    this.userBuf = b;
+    this.userBufOff = off;
+    this.userBufferBytesToConsume = len;
+
+    setInputFromSavedData();
+
+    uncompressedDirectBuf.limit(directBufferSize);
+    uncompressedDirectBuf.position(directBufferSize);
+  }
+
+  private void setInputFromSavedData() {
+    compressedDirectBufOff = 0;
+    bytesInCompressedBuffer = userBufferBytesToConsume;
+    if (bytesInCompressedBuffer > directBufferSize) {
+      bytesInCompressedBuffer = directBufferSize;
+    }
+
+    compressedDirectBuf.rewind();
+    compressedDirectBuf.put(
+        userBuf, userBufOff, bytesInCompressedBuffer);
+
+    userBufOff += bytesInCompressedBuffer;
+    userBufferBytesToConsume -= bytesInCompressedBuffer;
+  }
+
+  // dictionary is not supported
+  @Override
+  public void setDictionary(byte[] b, int off, int len) {
+    throw new UnsupportedOperationException(
+        "Dictionary support is not enabled");
+  }
+
+  @Override
+  public boolean needsInput() {
+    // Consume remaining compressed data?
+    if (uncompressedDirectBuf.remaining() > 0) {
+      return false;
+    }
+
+    // Check if we have consumed all input
+    if (bytesInCompressedBuffer - compressedDirectBufOff <= 0) {
+      // Check if we have consumed all user-input
+      if (userBufferBytesToConsume <= 0) {
+        return true;
+      } else {
+        setInputFromSavedData();
+      }
+    }
+    return false;
+  }
+
+  // dictionary is not supported.
+  @Override
+  public boolean needsDictionary() {
+    return false;
+  }
+
+  @Override
+  public boolean finished() {
+    // finished == true if ZSTD_decompressStream() returns 0
+    // also check we have nothing left in our buffer
+    return (finished && uncompressedDirectBuf.remaining() == 0);
+  }
+
+  @Override
+  public int decompress(byte[] b, int off, int len)
+      throws IOException {
+    checkStream();
+    if (b == null) {
+      throw new NullPointerException();
+    }
+    if (off < 0 || len < 0 || off > b.length - len) {
+      throw new ArrayIndexOutOfBoundsException();
+    }
+
+    // Check if there is uncompressed data
+    int n = uncompressedDirectBuf.remaining();
+    if (n > 0) {
+      return populateUncompressedBuffer(b, off, len, n);
+    }
+
+    // Re-initialize the output direct buffer
+    uncompressedDirectBuf.rewind();
+    uncompressedDirectBuf.limit(directBufferSize);
+
+    // Decompress data
+    n = inflateBytesDirect(
+        compressedDirectBuf,
+        compressedDirectBufOff,
+        bytesInCompressedBuffer,
+        uncompressedDirectBuf,
+        0,
+        directBufferSize
+    );
+    uncompressedDirectBuf.limit(n);
+
+    // Get at most 'len' bytes
+    return populateUncompressedBuffer(b, off, len, n);
+  }
+
+  /**
+   * <p>Returns the number of bytes remaining in the input buffers;
+   * normally called when finished() is true to determine amount of post-stream
+   * data.</p>
+   *
+   * @return the total (non-negative) number of unprocessed bytes in input
+   */
+  @Override
+  public int getRemaining() {
+    checkStream();
+    // userBuf + compressedDirectBuf
+    return userBufferBytesToConsume + remaining;
+  }
+
+  /**
+   * Resets everything including the input buffers (user and direct).
+   */
+  @Override
+  public void reset() {
+    checkStream();
+    init(stream);
+    remaining = 0;
+    finished = false;
+    compressedDirectBufOff = 0;
+    bytesInCompressedBuffer = 0;
+    uncompressedDirectBuf.limit(directBufferSize);
+    uncompressedDirectBuf.position(directBufferSize);
+    userBufOff = 0;
+    userBufferBytesToConsume = 0;
+  }
+
+  @Override
+  public void end() {
+    if (stream != 0) {
+      free(stream);
+      stream = 0;
+    }
+  }
+
+  @Override
+  protected void finalize() {
+    reset();
+  }
+
+  private void checkStream() {
+    if (stream == 0) {
+      throw new NullPointerException("Stream not initialized");
+    }
+  }
+
+  private int populateUncompressedBuffer(byte[] b, int off, int len, int n) {
+    n = Math.min(n, len);
+    uncompressedDirectBuf.get(b, off, n);
+    return n;
+  }
+
+  private native static void initIDs();
+  private native static long create();
+  private native static void init(long stream);
+  private native int inflateBytesDirect(ByteBuffer src, int srcOffset,
+      int srcLen, ByteBuffer dst, int dstOffset, int dstLen);
+  private native static void free(long strm);
+  private native static int getStreamSize();
+
+  int inflateDirect(ByteBuffer src, ByteBuffer dst) throws IOException {
+    assert
+        (this instanceof ZStandardDecompressor.ZStandardDirectDecompressor);
+
+    int originalPosition = dst.position();
+    int n = inflateBytesDirect(
+        src, src.position(), src.remaining(), dst, dst.position(),
+        dst.remaining()
+    );
+    dst.position(originalPosition + n);
+    if (bytesInCompressedBuffer > 0) {
+      src.position(compressedDirectBufOff);
+    } else {
+      src.position(src.limit());
+    }
+    return n;
+  }
+
+  /**
+   * A {@link DirectDecompressor} for ZStandard
+   * https://github.com/facebook/zstd.
+   */
+  public static class ZStandardDirectDecompressor
+      extends ZStandardDecompressor implements DirectDecompressor {
+
+    public ZStandardDirectDecompressor(int directBufferSize) {
+      super(directBufferSize);
+    }
+
+    @Override
+    public boolean finished() {
+      return (endOfInput && super.finished());
+    }
+
+    @Override
+    public void reset() {
+      super.reset();
+      endOfInput = true;
+    }
+
+    private boolean endOfInput;
+
+    @Override
+    public void decompress(ByteBuffer src, ByteBuffer dst)
+        throws IOException {
+      assert dst.isDirect() : "dst.isDirect()";
+      assert src.isDirect() : "src.isDirect()";
+      assert dst.remaining() > 0 : "dst.remaining() > 0";
+      this.inflateDirect(src, dst);
+      endOfInput = !src.hasRemaining();
+    }
+
+    @Override
+    public void setDictionary(byte[] b, int off, int len) {
+      throw new UnsupportedOperationException(
+          "byte[] arrays are not supported for DirectDecompressor");
+    }
+
+    @Override
+    public int decompress(byte[] b, int off, int len) {
+      throw new UnsupportedOperationException(
+          "byte[] arrays are not supported for DirectDecompressor");
+    }
+  }
+}

+ 22 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zstd/package-info.java

@@ -0,0 +1,22 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+package org.apache.hadoop.io.compress.zstd;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;

+ 14 - 14
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecUtil.java

@@ -55,9 +55,9 @@ public final class CodecUtil {
   public static final String IO_ERASURECODE_CODEC_XOR =
       XORErasureCodec.class.getCanonicalName();
   /** Erasure coder Reed-Solomon codec. */
-  public static final String IO_ERASURECODE_CODEC_RS_DEFAULT_KEY =
+  public static final String IO_ERASURECODE_CODEC_RS_KEY =
       "io.erasurecode.codec.rs";
-  public static final String IO_ERASURECODE_CODEC_RS_DEFAULT =
+  public static final String IO_ERASURECODE_CODEC_RS =
       RSErasureCodec.class.getCanonicalName();
   /** Erasure coder hitch hiker XOR codec. */
   public static final String IO_ERASURECODE_CODEC_HHXOR_KEY =
@@ -67,10 +67,10 @@ public final class CodecUtil {
 
   /** Supported erasure codec classes. */
 
-  /** Raw coder factory for the RS default codec. */
-  public static final String IO_ERASURECODE_CODEC_RS_DEFAULT_RAWCODER_KEY =
-      "io.erasurecode.codec.rs-default.rawcoder";
-  public static final String IO_ERASURECODE_CODEC_RS_DEFAULT_RAWCODER_DEFAULT =
+  /** Raw coder factory for the RS codec. */
+  public static final String IO_ERASURECODE_CODEC_RS_RAWCODER_KEY =
+      "io.erasurecode.codec.rs.rawcoder";
+  public static final String IO_ERASURECODE_CODEC_RS_RAWCODER_DEFAULT =
       RSRawErasureCoderFactory.class.getCanonicalName();
 
   /** Raw coder factory for the RS legacy codec. */
@@ -183,10 +183,10 @@ public final class CodecUtil {
   private static String getRawCoderFactNameFromCodec(Configuration conf,
                                                      String codec) {
     switch (codec) {
-    case ErasureCodeConstants.RS_DEFAULT_CODEC_NAME:
+    case ErasureCodeConstants.RS_CODEC_NAME:
       return conf.get(
-          IO_ERASURECODE_CODEC_RS_DEFAULT_RAWCODER_KEY,
-          IO_ERASURECODE_CODEC_RS_DEFAULT_RAWCODER_DEFAULT);
+          IO_ERASURECODE_CODEC_RS_RAWCODER_KEY,
+          IO_ERASURECODE_CODEC_RS_RAWCODER_DEFAULT);
     case ErasureCodeConstants.RS_LEGACY_CODEC_NAME:
       return conf.get(
           IO_ERASURECODE_CODEC_RS_LEGACY_RAWCODER_KEY,
@@ -233,15 +233,15 @@ public final class CodecUtil {
 
   private static String getCodecClassName(Configuration conf, String codec) {
     switch (codec) {
-    case ErasureCodeConstants.RS_DEFAULT_CODEC_NAME:
+    case ErasureCodeConstants.RS_CODEC_NAME:
       return conf.get(
-          CodecUtil.IO_ERASURECODE_CODEC_RS_DEFAULT_KEY,
-          CodecUtil.IO_ERASURECODE_CODEC_RS_DEFAULT);
+          CodecUtil.IO_ERASURECODE_CODEC_RS_KEY,
+          CodecUtil.IO_ERASURECODE_CODEC_RS);
     case ErasureCodeConstants.RS_LEGACY_CODEC_NAME:
       //TODO:rs-legacy should be handled differently.
       return conf.get(
-          CodecUtil.IO_ERASURECODE_CODEC_RS_DEFAULT_KEY,
-          CodecUtil.IO_ERASURECODE_CODEC_RS_DEFAULT);
+          CodecUtil.IO_ERASURECODE_CODEC_RS_KEY,
+          CodecUtil.IO_ERASURECODE_CODEC_RS);
     case ErasureCodeConstants.XOR_CODEC_NAME:
       return conf.get(
           CodecUtil.IO_ERASURECODE_CODEC_XOR_KEY,

+ 9 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeConstants.java

@@ -25,17 +25,23 @@ public final class ErasureCodeConstants {
   private ErasureCodeConstants() {
   }
 
-  public static final String RS_DEFAULT_CODEC_NAME = "rs-default";
+  public static final String RS_CODEC_NAME = "rs";
   public static final String RS_LEGACY_CODEC_NAME = "rs-legacy";
   public static final String XOR_CODEC_NAME = "xor";
   public static final String HHXOR_CODEC_NAME = "hhxor";
 
   public static final ECSchema RS_6_3_SCHEMA = new ECSchema(
-      RS_DEFAULT_CODEC_NAME, 6, 3);
+      RS_CODEC_NAME, 6, 3);
 
   public static final ECSchema RS_3_2_SCHEMA = new ECSchema(
-      RS_DEFAULT_CODEC_NAME, 3, 2);
+      RS_CODEC_NAME, 3, 2);
 
   public static final ECSchema RS_6_3_LEGACY_SCHEMA = new ECSchema(
       RS_LEGACY_CODEC_NAME, 6, 3);
+
+  public static final ECSchema XOR_2_1_SCHEMA = new ECSchema(
+      XOR_CODEC_NAME, 2, 1);
+
+  public static final ECSchema RS_10_4_SCHEMA = new ECSchema(
+      RS_CODEC_NAME, 10, 4);
 }

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/HHXORErasureDecoder.java

@@ -67,7 +67,7 @@ public class HHXORErasureDecoder extends ErasureDecoder {
   private RawErasureDecoder checkCreateRSRawDecoder() {
     if (rsRawDecoder == null) {
       rsRawDecoder = CodecUtil.createRawDecoder(getConf(),
-              ErasureCodeConstants.RS_DEFAULT_CODEC_NAME, getOptions());
+              ErasureCodeConstants.RS_CODEC_NAME, getOptions());
     }
     return rsRawDecoder;
   }

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/HHXORErasureEncoder.java

@@ -61,7 +61,7 @@ public class HHXORErasureEncoder extends ErasureEncoder {
   private RawErasureEncoder checkCreateRSRawEncoder() {
     if (rsRawEncoder == null) {
       rsRawEncoder = CodecUtil.createRawEncoder(getConf(),
-          ErasureCodeConstants.RS_DEFAULT_CODEC_NAME, getOptions());
+          ErasureCodeConstants.RS_CODEC_NAME, getOptions());
     }
     return rsRawEncoder;
   }

Some files were not shown because too many files changed in this diff