浏览代码

Merge branch 'trunk' into HDFS-7240

 Conflicts:
	hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
	hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerLocationManagerImpl.java
	hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
Anu Engineer 8 年之前
父节点
当前提交
e55bdefdab
共有 100 个文件被更改,包括 3881 次插入550 次删除
  1. 14 0
      .gitignore
  2. 22 1
      BUILDING.txt
  3. 546 0
      LICENSE.txt
  4. 345 0
      dev-support/bin/checkcompatibility.py
  5. 1 1
      dev-support/bin/create-release
  6. 9 0
      dev-support/docker/Dockerfile
  7. 7 0
      hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml
  8. 1 1
      hadoop-build-tools/src/main/resources/checkstyle/checkstyle.xml
  9. 10 10
      hadoop-client/pom.xml
  10. 127 0
      hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml
  11. 54 0
      hadoop-cloud-storage-project/pom.xml
  12. 1 1
      hadoop-common-project/hadoop-auth-examples/pom.xml
  13. 12 0
      hadoop-common-project/hadoop-auth-examples/src/main/java/org/apache/hadoop/security/authentication/examples/RequestLoggerFilter.java
  14. 9 4
      hadoop-common-project/hadoop-auth/pom.xml
  15. 6 1
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java
  16. 18 11
      hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/AuthenticatorTestCase.java
  17. 33 12
      hadoop-common-project/hadoop-common/pom.xml
  18. 3 1
      hadoop-common-project/hadoop-common/src/CMakeLists.txt
  19. 1 1
      hadoop-common-project/hadoop-common/src/main/bin/hadoop
  20. 0 6
      hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh
  21. 19 3
      hadoop-common-project/hadoop-common/src/main/conf/hadoop-metrics2.properties
  22. 7 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfServlet.java
  23. 28 28
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
  24. 52 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSDelegationToken.java
  25. 2 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
  26. 2 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSExceptionMessages.java
  27. 7 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
  28. 7 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
  29. 3 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java
  30. 7 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Count.java
  31. 6 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java
  32. 12 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
  33. 5 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java
  34. 9 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/AdminAuthorizedServlet.java
  35. 2 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpRequestLog.java
  36. 213 124
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
  37. 35 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/JettyUtils.java
  38. 59 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/NativeXORRawDecoder.java
  39. 60 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/NativeXORRawEncoder.java
  40. 39 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/NativeXORRawErasureCoderFactory.java
  41. 2 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/RpcDetailedMetrics.java
  42. 7 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/jmx/JMXJsonServlet.java
  43. 8 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java
  44. 5 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableMetricsFactory.java
  45. 6 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRates.java
  46. 148 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRatesWithAggregation.java
  47. 4 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableStat.java
  48. 15 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/SampleStat.java
  49. 4 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslInputStream.java
  50. 3 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcClient.java
  51. 80 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
  52. 0 58
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/SslSelectChannelConnectorSecure.java
  53. 44 25
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java
  54. 31 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java
  55. 158 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DiskChecker.java
  56. 22 15
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java
  57. 21 9
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/InstrumentedLock.java
  58. 92 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/InstrumentedReadLock.java
  59. 58 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/InstrumentedReadWriteLock.java
  60. 54 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/InstrumentedWriteLock.java
  61. 3 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LimitInputStream.java
  62. 5 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
  63. 18 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Time.java
  64. 4 2
      hadoop-common-project/hadoop-common/src/main/native/native.vcxproj
  65. 80 0
      hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/jni_xor_decoder.c
  66. 82 0
      hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/jni_xor_encoder.c
  67. 14 0
      hadoop-common-project/hadoop-common/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier
  68. 106 58
      hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
  69. 0 24
      hadoop-common-project/hadoop-common/src/site/markdown/DeprecatedProperties.md
  70. 1 1
      hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
  71. 15 0
      hadoop-common-project/hadoop-common/src/site/markdown/filesystem/introduction.md
  72. 1 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfServlet.java
  73. 2 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigRedactor.java
  74. 47 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java
  75. 2 2
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FSMainOperationsBaseTest.java
  76. 115 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDefaultUri.java
  77. 51 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellCopy.java
  78. 9 9
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCreateTest.java
  79. 18 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractOptions.java
  80. 6 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java
  81. 7 2
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCount.java
  82. 2 2
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestFSMainOperationsLocalFileSystem.java
  83. 39 2
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java
  84. 5 5
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemTestSetup.java
  85. 5 5
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsTestSetup.java
  86. 6 5
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestAuthenticationSessionCookie.java
  87. 2 2
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpRequestLog.java
  88. 30 9
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java
  89. 5 2
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestServletFilter.java
  90. 3 2
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/resource/JerseyResource.java
  91. 36 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/rawcoder/TestNativeXORRawCoder.java
  92. 2 36
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/rawcoder/TestXORRawCoder.java
  93. 59 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/rawcoder/TestXORRawCoderBase.java
  94. 36 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/rawcoder/TestXORRawCoderInteroperable1.java
  95. 37 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/rawcoder/TestXORRawCoderInteroperable2.java
  96. 24 28
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/Timer.java
  97. 162 3
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/lib/TestMutableMetrics.java
  98. 144 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGIWithMiniKdc.java
  99. 93 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java
  100. 50 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestDelegationTokenAuthenticationHandlerWithMocks.java

+ 14 - 0
.gitignore

@@ -26,10 +26,24 @@ hadoop-hdfs-project/hadoop-hdfs-httpfs/downloads
 hadoop-common-project/hadoop-common/src/test/resources/contract-test-options.xml
 hadoop-tools/hadoop-openstack/src/test/resources/contract-test-options.xml
 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/tla/yarnregistry.toolbox
+hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/dist
+hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tmp
+hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/node
+hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/node_modules
+hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/bower_components
+hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.sass-cache
+hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/connect.lock
+hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/coverage/*
+hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/libpeerconnection.log
+hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/npm-debug.log
+hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/testem.log
+hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/dist
+hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/tmp
 yarnregistry.pdf
 hadoop-tools/hadoop-aws/src/test/resources/auth-keys.xml
 hadoop-tools/hadoop-aws/src/test/resources/contract-test-options.xml
 hadoop-tools/hadoop-azure/src/test/resources/azure-auth-keys.xml
+hadoop-tools/hadoop-openstack/src/test/resources/auth-keys.xml
 patchprocess/
 hadoop-tools/hadoop-aliyun/src/test/resources/auth-keys.xml
 hadoop-tools/hadoop-aliyun/src/test/resources/contract-test-options.xml

+ 22 - 1
BUILDING.txt

@@ -15,6 +15,7 @@ Requirements:
 * Internet connection for first build (to fetch all Maven and Hadoop dependencies)
 * python (for releasedocs)
 * bats (for shell code testing)
+* Node.js / bower / Ember-cli (for YARN UI v2 building)
 
 ----------------------------------------------------------------------------------
 The easiest way to get an environment with all the appropriate tools is by means
@@ -119,7 +120,7 @@ Maven build goals:
  * Run clover                : mvn test -Pclover [-DcloverLicenseLocation=${user.name}/.clover.license]
  * Run Rat                   : mvn apache-rat:check
  * Build javadocs            : mvn javadoc:javadoc
- * Build distribution        : mvn package [-Pdist][-Pdocs][-Psrc][-Pnative][-Dtar][-Preleasedocs]
+ * Build distribution        : mvn package [-Pdist][-Pdocs][-Psrc][-Pnative][-Dtar][-Preleasedocs][-Pyarn-ui]
  * Change Hadoop version     : mvn versions:set -DnewVersion=NEWVERSION
 
  Build options:
@@ -129,6 +130,7 @@ Maven build goals:
   * Use -Psrc to create a project source TAR.GZ
   * Use -Dtar to create a TAR with the distribution (using -Pdist)
   * Use -Preleasedocs to include the changelog and release docs (requires Internet connectivity)
+  * Use -Pyarn-ui to build YARN UI v2. (Requires Internet connectivity)
 
  Snappy build options:
 
@@ -212,6 +214,14 @@ Maven build goals:
     and it ignores the -Disal.prefix option. If -Disal.lib isn't given, the
     bundling and building will fail.
 
+ Special plugins: OWASP's dependency-check:
+
+   OWASP's dependency-check plugin will scan the third party dependencies
+   of this project for known CVEs (security vulnerabilities against them).
+   It will produce a report in target/dependency-check-report.html. To
+   invoke, run 'mvn dependency-check:aggregate'. Note that this plugin
+   requires maven 3.1.1 or greater.
+
 ----------------------------------------------------------------------------------
 Building components separately
 
@@ -381,3 +391,14 @@ http://www.zlib.net/
 Building distributions:
 
  * Build distribution with native code    : mvn package [-Pdist][-Pdocs][-Psrc][-Dtar]
+
+----------------------------------------------------------------------------------
+Running compatibility checks with checkcompatibility.py
+
+Invoke `./dev-support/bin/checkcompatibility.py` to run Java API Compliance Checker
+to compare the public Java APIs of two git objects. This can be used by release
+managers to compare the compatibility of a previous and current release.
+
+As an example, this invocation will check the compatibility of interfaces annotated as Public or LimitedPrivate:
+
+./dev-support/bin/checkcompatibility.py --annotation org.apache.hadoop.classification.InterfaceAudience.Public --annotation org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate --include "hadoop.*" branch-2.7.2 trunk

+ 546 - 0
LICENSE.txt

@@ -586,6 +586,8 @@ And the binary distribution of this product bundles these dependencies under the
 following license:
 Mockito 1.8.5
 SLF4J 1.7.10
+JCodings 1.0.8
+Joni 2.1.2
 --------------------------------------------------------------------------------
 
 The MIT License (MIT)
@@ -1641,6 +1643,12 @@ JLine 0.9.94
 leveldbjni-all 1.8
 Hamcrest Core 1.3
 xmlenc Library 0.52
+StringTemplate 4 4.0.7
+ANTLR 3 Tool 3.5
+ANTLR 3 Runtime 3.5
+ANTLR StringTemplate 3.2.1
+ASM All 5.0.2
+sqlline 1.1.8
 --------------------------------------------------------------------------------
 Redistribution and use in source and binary forms, with or without
 modification, are permitted provided that the following conditions are met:
@@ -1868,3 +1876,541 @@ representations with respect to the Work not specified here. Licensor shall not
 be bound by any additional provisions that may appear in any communication from
 You. This License may not be modified without the mutual written agreement of
 the Licensor and You.
+
+The binary distribution of this product bundles these dependencies under the
+following license:
+jamon-runtime 2.3.1
+--------------------------------------------------------------------------------
+                          MOZILLA PUBLIC LICENSE
+                                Version 1.1
+
+                              ---------------
+
+1. Definitions.
+
+     1.0.1. "Commercial Use" means distribution or otherwise making the
+     Covered Code available to a third party.
+
+     1.1. "Contributor" means each entity that creates or contributes to
+     the creation of Modifications.
+
+     1.2. "Contributor Version" means the combination of the Original
+     Code, prior Modifications used by a Contributor, and the Modifications
+     made by that particular Contributor.
+
+     1.3. "Covered Code" means the Original Code or Modifications or the
+     combination of the Original Code and Modifications, in each case
+     including portions thereof.
+
+     1.4. "Electronic Distribution Mechanism" means a mechanism generally
+     accepted in the software development community for the electronic
+     transfer of data.
+
+     1.5. "Executable" means Covered Code in any form other than Source
+     Code.
+
+     1.6. "Initial Developer" means the individual or entity identified
+     as the Initial Developer in the Source Code notice required by Exhibit
+     A.
+
+     1.7. "Larger Work" means a work which combines Covered Code or
+     portions thereof with code not governed by the terms of this License.
+
+     1.8. "License" means this document.
+
+     1.8.1. "Licensable" means having the right to grant, to the maximum
+     extent possible, whether at the time of the initial grant or
+     subsequently acquired, any and all of the rights conveyed herein.
+
+     1.9. "Modifications" means any addition to or deletion from the
+     substance or structure of either the Original Code or any previous
+     Modifications. When Covered Code is released as a series of files, a
+     Modification is:
+          A. Any addition to or deletion from the contents of a file
+          containing Original Code or previous Modifications.
+
+          B. Any new file that contains any part of the Original Code or
+          previous Modifications.
+
+     1.10. "Original Code" means Source Code of computer software code
+     which is described in the Source Code notice required by Exhibit A as
+     Original Code, and which, at the time of its release under this
+     License is not already Covered Code governed by this License.
+
+     1.10.1. "Patent Claims" means any patent claim(s), now owned or
+     hereafter acquired, including without limitation,  method, process,
+     and apparatus claims, in any patent Licensable by grantor.
+
+     1.11. "Source Code" means the preferred form of the Covered Code for
+     making modifications to it, including all modules it contains, plus
+     any associated interface definition files, scripts used to control
+     compilation and installation of an Executable, or source code
+     differential comparisons against either the Original Code or another
+     well known, available Covered Code of the Contributor's choice. The
+     Source Code can be in a compressed or archival form, provided the
+     appropriate decompression or de-archiving software is widely available
+     for no charge.
+
+     1.12. "You" (or "Your")  means an individual or a legal entity
+     exercising rights under, and complying with all of the terms of, this
+     License or a future version of this License issued under Section 6.1.
+     For legal entities, "You" includes any entity which controls, is
+     controlled by, or is under common control with You. For purposes of
+     this definition, "control" means (a) the power, direct or indirect,
+     to cause the direction or management of such entity, whether by
+     contract or otherwise, or (b) ownership of more than fifty percent
+     (50%) of the outstanding shares or beneficial ownership of such
+     entity.
+
+2. Source Code License.
+
+     2.1. The Initial Developer Grant.
+     The Initial Developer hereby grants You a world-wide, royalty-free,
+     non-exclusive license, subject to third party intellectual property
+     claims:
+          (a)  under intellectual property rights (other than patent or
+          trademark) Licensable by Initial Developer to use, reproduce,
+          modify, display, perform, sublicense and distribute the Original
+          Code (or portions thereof) with or without Modifications, and/or
+          as part of a Larger Work; and
+
+          (b) under Patents Claims infringed by the making, using or
+          selling of Original Code, to make, have made, use, practice,
+          sell, and offer for sale, and/or otherwise dispose of the
+          Original Code (or portions thereof).
+
+          (c) the licenses granted in this Section 2.1(a) and (b) are
+          effective on the date Initial Developer first distributes
+          Original Code under the terms of this License.
+
+          (d) Notwithstanding Section 2.1(b) above, no patent license is
+          granted: 1) for code that You delete from the Original Code; 2)
+          separate from the Original Code;  or 3) for infringements caused
+          by: i) the modification of the Original Code or ii) the
+          combination of the Original Code with other software or devices.
+
+     2.2. Contributor Grant.
+     Subject to third party intellectual property claims, each Contributor
+     hereby grants You a world-wide, royalty-free, non-exclusive license
+
+          (a)  under intellectual property rights (other than patent or
+          trademark) Licensable by Contributor, to use, reproduce, modify,
+          display, perform, sublicense and distribute the Modifications
+          created by such Contributor (or portions thereof) either on an
+          unmodified basis, with other Modifications, as Covered Code
+          and/or as part of a Larger Work; and
+
+          (b) under Patent Claims infringed by the making, using, or
+          selling of  Modifications made by that Contributor either alone
+          and/or in combination with its Contributor Version (or portions
+          of such combination), to make, use, sell, offer for sale, have
+          made, and/or otherwise dispose of: 1) Modifications made by that
+          Contributor (or portions thereof); and 2) the combination of
+          Modifications made by that Contributor with its Contributor
+          Version (or portions of such combination).
+
+          (c) the licenses granted in Sections 2.2(a) and 2.2(b) are
+          effective on the date Contributor first makes Commercial Use of
+          the Covered Code.
+
+          (d)    Notwithstanding Section 2.2(b) above, no patent license is
+          granted: 1) for any code that Contributor has deleted from the
+          Contributor Version; 2)  separate from the Contributor Version;
+          3)  for infringements caused by: i) third party modifications of
+          Contributor Version or ii)  the combination of Modifications made
+          by that Contributor with other software  (except as part of the
+          Contributor Version) or other devices; or 4) under Patent Claims
+          infringed by Covered Code in the absence of Modifications made by
+          that Contributor.
+
+3. Distribution Obligations.
+
+     3.1. Application of License.
+     The Modifications which You create or to which You contribute are
+     governed by the terms of this License, including without limitation
+     Section 2.2. The Source Code version of Covered Code may be
+     distributed only under the terms of this License or a future version
+     of this License released under Section 6.1, and You must include a
+     copy of this License with every copy of the Source Code You
+     distribute. You may not offer or impose any terms on any Source Code
+     version that alters or restricts the applicable version of this
+     License or the recipients' rights hereunder. However, You may include
+     an additional document offering the additional rights described in
+     Section 3.5.
+
+     3.2. Availability of Source Code.
+     Any Modification which You create or to which You contribute must be
+     made available in Source Code form under the terms of this License
+     either on the same media as an Executable version or via an accepted
+     Electronic Distribution Mechanism to anyone to whom you made an
+     Executable version available; and if made available via Electronic
+     Distribution Mechanism, must remain available for at least twelve (12)
+     months after the date it initially became available, or at least six
+     (6) months after a subsequent version of that particular Modification
+     has been made available to such recipients. You are responsible for
+     ensuring that the Source Code version remains available even if the
+     Electronic Distribution Mechanism is maintained by a third party.
+
+     3.3. Description of Modifications.
+     You must cause all Covered Code to which You contribute to contain a
+     file documenting the changes You made to create that Covered Code and
+     the date of any change. You must include a prominent statement that
+     the Modification is derived, directly or indirectly, from Original
+     Code provided by the Initial Developer and including the name of the
+     Initial Developer in (a) the Source Code, and (b) in any notice in an
+     Executable version or related documentation in which You describe the
+     origin or ownership of the Covered Code.
+
+     3.4. Intellectual Property Matters
+          (a) Third Party Claims.
+          If Contributor has knowledge that a license under a third party's
+          intellectual property rights is required to exercise the rights
+          granted by such Contributor under Sections 2.1 or 2.2,
+          Contributor must include a text file with the Source Code
+          distribution titled "LEGAL" which describes the claim and the
+          party making the claim in sufficient detail that a recipient will
+          know whom to contact. If Contributor obtains such knowledge after
+          the Modification is made available as described in Section 3.2,
+          Contributor shall promptly modify the LEGAL file in all copies
+          Contributor makes available thereafter and shall take other steps
+          (such as notifying appropriate mailing lists or newsgroups)
+          reasonably calculated to inform those who received the Covered
+          Code that new knowledge has been obtained.
+
+          (b) Contributor APIs.
+          If Contributor's Modifications include an application programming
+          interface and Contributor has knowledge of patent licenses which
+          are reasonably necessary to implement that API, Contributor must
+          also include this information in the LEGAL file.
+
+               (c)    Representations.
+          Contributor represents that, except as disclosed pursuant to
+          Section 3.4(a) above, Contributor believes that Contributor's
+          Modifications are Contributor's original creation(s) and/or
+          Contributor has sufficient rights to grant the rights conveyed by
+          this License.
+
+     3.5. Required Notices.
+     You must duplicate the notice in Exhibit A in each file of the Source
+     Code.  If it is not possible to put such notice in a particular Source
+     Code file due to its structure, then You must include such notice in a
+     location (such as a relevant directory) where a user would be likely
+     to look for such a notice.  If You created one or more Modification(s)
+     You may add your name as a Contributor to the notice described in
+     Exhibit A.  You must also duplicate this License in any documentation
+     for the Source Code where You describe recipients' rights or ownership
+     rights relating to Covered Code.  You may choose to offer, and to
+     charge a fee for, warranty, support, indemnity or liability
+     obligations to one or more recipients of Covered Code. However, You
+     may do so only on Your own behalf, and not on behalf of the Initial
+     Developer or any Contributor. You must make it absolutely clear than
+     any such warranty, support, indemnity or liability obligation is
+     offered by You alone, and You hereby agree to indemnify the Initial
+     Developer and every Contributor for any liability incurred by the
+     Initial Developer or such Contributor as a result of warranty,
+     support, indemnity or liability terms You offer.
+
+     3.6. Distribution of Executable Versions.
+     You may distribute Covered Code in Executable form only if the
+     requirements of Section 3.1-3.5 have been met for that Covered Code,
+     and if You include a notice stating that the Source Code version of
+     the Covered Code is available under the terms of this License,
+     including a description of how and where You have fulfilled the
+     obligations of Section 3.2. The notice must be conspicuously included
+     in any notice in an Executable version, related documentation or
+     collateral in which You describe recipients' rights relating to the
+     Covered Code. You may distribute the Executable version of Covered
+     Code or ownership rights under a license of Your choice, which may
+     contain terms different from this License, provided that You are in
+     compliance with the terms of this License and that the license for the
+     Executable version does not attempt to limit or alter the recipient's
+     rights in the Source Code version from the rights set forth in this
+     License. If You distribute the Executable version under a different
+     license You must make it absolutely clear that any terms which differ
+     from this License are offered by You alone, not by the Initial
+     Developer or any Contributor. You hereby agree to indemnify the
+     Initial Developer and every Contributor for any liability incurred by
+     the Initial Developer or such Contributor as a result of any such
+     terms You offer.
+
+     3.7. Larger Works.
+     You may create a Larger Work by combining Covered Code with other code
+     not governed by the terms of this License and distribute the Larger
+     Work as a single product. In such a case, You must make sure the
+     requirements of this License are fulfilled for the Covered Code.
+
+4. Inability to Comply Due to Statute or Regulation.
+
+     If it is impossible for You to comply with any of the terms of this
+     License with respect to some or all of the Covered Code due to
+     statute, judicial order, or regulation then You must: (a) comply with
+     the terms of this License to the maximum extent possible; and (b)
+     describe the limitations and the code they affect. Such description
+     must be included in the LEGAL file described in Section 3.4 and must
+     be included with all distributions of the Source Code. Except to the
+     extent prohibited by statute or regulation, such description must be
+     sufficiently detailed for a recipient of ordinary skill to be able to
+     understand it.
+
+5. Application of this License.
+
+     This License applies to code to which the Initial Developer has
+     attached the notice in Exhibit A and to related Covered Code.
+
+6. Versions of the License.
+
+     6.1. New Versions.
+     Netscape Communications Corporation ("Netscape") may publish revised
+     and/or new versions of the License from time to time. Each version
+     will be given a distinguishing version number.
+
+     6.2. Effect of New Versions.
+     Once Covered Code has been published under a particular version of the
+     License, You may always continue to use it under the terms of that
+     version. You may also choose to use such Covered Code under the terms
+     of any subsequent version of the License published by Netscape. No one
+     other than Netscape has the right to modify the terms applicable to
+     Covered Code created under this License.
+
+     6.3. Derivative Works.
+     If You create or use a modified version of this License (which you may
+     only do in order to apply it to code which is not already Covered Code
+     governed by this License), You must (a) rename Your license so that
+     the phrases "Mozilla", "MOZILLAPL", "MOZPL", "Netscape",
+     "MPL", "NPL" or any confusingly similar phrase do not appear in your
+     license (except to note that your license differs from this License)
+     and (b) otherwise make it clear that Your version of the license
+     contains terms which differ from the Mozilla Public License and
+     Netscape Public License. (Filling in the name of the Initial
+     Developer, Original Code or Contributor in the notice described in
+     Exhibit A shall not of themselves be deemed to be modifications of
+     this License.)
+
+7. DISCLAIMER OF WARRANTY.
+
+     COVERED CODE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" BASIS,
+     WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
+     WITHOUT LIMITATION, WARRANTIES THAT THE COVERED CODE IS FREE OF
+     DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR NON-INFRINGING.
+     THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE COVERED CODE
+     IS WITH YOU. SHOULD ANY COVERED CODE PROVE DEFECTIVE IN ANY RESPECT,
+     YOU (NOT THE INITIAL DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME THE
+     COST OF ANY NECESSARY SERVICING, REPAIR OR CORRECTION. THIS DISCLAIMER
+     OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO USE OF
+     ANY COVERED CODE IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER.
+
+8. TERMINATION.
+
+     8.1.  This License and the rights granted hereunder will terminate
+     automatically if You fail to comply with terms herein and fail to cure
+     such breach within 30 days of becoming aware of the breach. All
+     sublicenses to the Covered Code which are properly granted shall
+     survive any termination of this License. Provisions which, by their
+     nature, must remain in effect beyond the termination of this License
+     shall survive.
+
+     8.2.  If You initiate litigation by asserting a patent infringement
+     claim (excluding declatory judgment actions) against Initial Developer
+     or a Contributor (the Initial Developer or Contributor against whom
+     You file such action is referred to as "Participant")  alleging that:
+
+     (a)  such Participant's Contributor Version directly or indirectly
+     infringes any patent, then any and all rights granted by such
+     Participant to You under Sections 2.1 and/or 2.2 of this License
+     shall, upon 60 days notice from Participant terminate prospectively,
+     unless if within 60 days after receipt of notice You either: (i)
+     agree in writing to pay Participant a mutually agreeable reasonable
+     royalty for Your past and future use of Modifications made by such
+     Participant, or (ii) withdraw Your litigation claim with respect to
+     the Contributor Version against such Participant.  If within 60 days
+     of notice, a reasonable royalty and payment arrangement are not
+     mutually agreed upon in writing by the parties or the litigation claim
+     is not withdrawn, the rights granted by Participant to You under
+     Sections 2.1 and/or 2.2 automatically terminate at the expiration of
+     the 60 day notice period specified above.
+
+     (b)  any software, hardware, or device, other than such Participant's
+     Contributor Version, directly or indirectly infringes any patent, then
+     any rights granted to You by such Participant under Sections 2.1(b)
+     and 2.2(b) are revoked effective as of the date You first made, used,
+     sold, distributed, or had made, Modifications made by that
+     Participant.
+
+     8.3.  If You assert a patent infringement claim against Participant
+     alleging that such Participant's Contributor Version directly or
+     indirectly infringes any patent where such claim is resolved (such as
+     by license or settlement) prior to the initiation of patent
+     infringement litigation, then the reasonable value of the licenses
+     granted by such Participant under Sections 2.1 or 2.2 shall be taken
+     into account in determining the amount or value of any payment or
+     license.
+
+     8.4.  In the event of termination under Sections 8.1 or 8.2 above,
+     all end user license agreements (excluding distributors and resellers)
+     which have been validly granted by You or any distributor hereunder
+     prior to termination shall survive termination.
+
+9. LIMITATION OF LIABILITY.
+
+     UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT
+     (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE INITIAL
+     DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF COVERED CODE,
+     OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE TO ANY PERSON FOR
+     ANY INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES OF ANY
+     CHARACTER INCLUDING, WITHOUT LIMITATION, DAMAGES FOR LOSS OF GOODWILL,
+     WORK STOPPAGE, COMPUTER FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER
+     COMMERCIAL DAMAGES OR LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN
+     INFORMED OF THE POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF
+     LIABILITY SHALL NOT APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY
+     RESULTING FROM SUCH PARTY'S NEGLIGENCE TO THE EXTENT APPLICABLE LAW
+     PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE
+     EXCLUSION OR LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO
+     THIS EXCLUSION AND LIMITATION MAY NOT APPLY TO YOU.
+
+10. U.S. GOVERNMENT END USERS.
+
+     The Covered Code is a "commercial item," as that term is defined in
+     48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial computer
+     software" and "commercial computer software documentation," as such
+     terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent with 48
+     C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 (June 1995),
+     all U.S. Government End Users acquire Covered Code with only those
+     rights set forth herein.
+
+11. MISCELLANEOUS.
+
+     This License represents the complete agreement concerning subject
+     matter hereof. If any provision of this License is held to be
+     unenforceable, such provision shall be reformed only to the extent
+     necessary to make it enforceable. This License shall be governed by
+     California law provisions (except to the extent applicable law, if
+     any, provides otherwise), excluding its conflict-of-law provisions.
+     With respect to disputes in which at least one party is a citizen of,
+     or an entity chartered or registered to do business in the United
+     States of America, any litigation relating to this License shall be
+     subject to the jurisdiction of the Federal Courts of the Northern
+     District of California, with venue lying in Santa Clara County,
+     California, with the losing party responsible for costs, including
+     without limitation, court costs and reasonable attorneys' fees and
+     expenses. The application of the United Nations Convention on
+     Contracts for the International Sale of Goods is expressly excluded.
+     Any law or regulation which provides that the language of a contract
+     shall be construed against the drafter shall not apply to this
+     License.
+
+12. RESPONSIBILITY FOR CLAIMS.
+
+     As between Initial Developer and the Contributors, each party is
+     responsible for claims and damages arising, directly or indirectly,
+     out of its utilization of rights under this License and You agree to
+     work with Initial Developer and Contributors to distribute such
+     responsibility on an equitable basis. Nothing herein is intended or
+     shall be deemed to constitute any admission of liability.
+
+13. MULTIPLE-LICENSED CODE.
+
+     Initial Developer may designate portions of the Covered Code as
+     "Multiple-Licensed".  "Multiple-Licensed" means that the Initial
+     Developer permits you to utilize portions of the Covered Code under
+     Your choice of the MPL or the alternative licenses, if any, specified
+     by the Initial Developer in the file described in Exhibit A.
+
+The binary distribution of this product bundles these dependencies under the
+following license:
+bootstrap v3.3.6
+broccoli-asset-rev v2.4.2
+broccoli-funnel v1.0.1
+datatables v1.10.8
+em-helpers v0.5.13
+em-table v0.1.6
+ember v2.2.0
+ember-array-contains-helper v1.0.2
+ember-bootstrap v0.5.1
+ember-cli v1.13.13
+ember-cli-app-version v1.0.0
+ember-cli-babel v5.1.6
+ember-cli-content-security-policy v0.4.0
+ember-cli-dependency-checker v1.2.0
+ember-cli-htmlbars v1.0.2
+ember-cli-htmlbars-inline-precompile v0.3.1
+ember-cli-ic-ajax v0.2.1
+ember-cli-inject-live-reload v1.4.0
+ember-cli-jquery-ui v0.0.20
+ember-cli-qunit v1.2.1
+ember-cli-release v0.2.8
+ember-cli-shims v0.0.6
+ember-cli-sri v1.2.1
+ember-cli-test-loader v0.2.1
+ember-cli-uglify v1.2.0
+ember-d3 v0.1.0
+ember-data v2.1.0
+ember-disable-proxy-controllers v1.0.1
+ember-export-application-global v1.0.5
+ember-load-initializers v0.1.7
+ember-qunit v0.4.16
+ember-qunit-notifications v0.1.0
+ember-resolver v2.0.3
+ember-spin-spinner v0.2.3
+ember-truth-helpers v1.2.0
+jquery v2.1.4
+jquery-ui v1.11.4
+loader.js v3.3.0
+momentjs v2.10.6
+qunit v1.19.0
+select2 v4.0.0
+snippet-ss v1.11.0
+spin.js v2.3.2
+-------------------------------------------------------------------------------
+The MIT License (MIT)
+
+All rights reserved.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and assocated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+
+The binary distribution of this product bundles these dependencies under the
+following license:
+D3 v3.5.6
+--------------------------------------------------------------------------------
+(3-clause BSD license)
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this list
+   of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice, this
+   list of conditions and the following disclaimer in the documentation and/or
+   other materials provided with the distribution.
+
+3. Neither the name of the copyright holder nor the names of its contributors may
+   be used to endorse or promote products derived from this software without
+   specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.

+ 345 - 0
dev-support/bin/checkcompatibility.py

@@ -0,0 +1,345 @@
+#!/usr/bin/env python
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+# Script which checks Java API compatibility between two revisions of the
+# Java client.
+#
+# Originally sourced from Apache Kudu, which was based on the
+# compatibility checker from the Apache HBase project, but ported to
+# Python for better readability.
+
+import logging
+import os
+import re
+import shutil
+import subprocess
+import sys
+import urllib2
+try:
+  import argparse
+except ImportError:
+  sys.stderr.write("Please install argparse, e.g. via `pip install argparse`.")
+  sys.exit(2)
+
+# Various relative paths
+REPO_DIR = os.getcwd()
+
+def check_output(*popenargs, **kwargs):
+  r"""Run command with arguments and return its output as a byte string.
+  Backported from Python 2.7 as it's implemented as pure python on stdlib.
+  >>> check_output(['/usr/bin/python', '--version'])
+  Python 2.6.2
+  """
+  process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
+  output, _ = process.communicate()
+  retcode = process.poll()
+  if retcode:
+    cmd = kwargs.get("args")
+    if cmd is None:
+      cmd = popenargs[0]
+    error = subprocess.CalledProcessError(retcode, cmd)
+    error.output = output
+    raise error
+  return output
+
+def get_repo_dir():
+  """ Return the path to the top of the repo. """
+  dirname, _ = os.path.split(os.path.abspath(__file__))
+  return os.path.join(dirname, "../..")
+
+def get_scratch_dir():
+  """ Return the path to the scratch dir that we build within. """
+  scratch_dir = os.path.join(get_repo_dir(), "target", "compat-check")
+  if not os.path.exists(scratch_dir):
+    os.makedirs(scratch_dir)
+  return scratch_dir
+
+def get_java_acc_dir():
+  """ Return the path where we check out the Java API Compliance Checker. """
+  return os.path.join(get_repo_dir(), "target", "java-acc")
+
+
+def clean_scratch_dir(scratch_dir):
+  """ Clean up and re-create the scratch directory. """
+  if os.path.exists(scratch_dir):
+    logging.info("Removing scratch dir %s...", scratch_dir)
+    shutil.rmtree(scratch_dir)
+  logging.info("Creating empty scratch dir %s...", scratch_dir)
+  os.makedirs(scratch_dir)
+
+
+def checkout_java_tree(rev, path):
+  """ Check out the Java source tree for the given revision into
+  the given path. """
+  logging.info("Checking out %s in %s", rev, path)
+  os.makedirs(path)
+  # Extract java source
+  subprocess.check_call(["bash", '-o', 'pipefail', "-c",
+                         ("git archive --format=tar %s | " +
+                          "tar -C \"%s\" -xf -") % (rev, path)],
+                        cwd=get_repo_dir())
+
+def get_git_hash(revname):
+  """ Convert 'revname' to its SHA-1 hash. """
+  return check_output(["git", "rev-parse", revname],
+                      cwd=get_repo_dir()).strip()
+
+def get_repo_name():
+  """Get the name of the repo based on the git remote."""
+  remotes = check_output(["git", "remote", "-v"],
+                         cwd=get_repo_dir()).strip().split("\n")
+  # Example output:
+  # origin	https://github.com/apache/hadoop.git (fetch)
+  # origin	https://github.com/apache/hadoop.git (push)
+  remote_url = remotes[0].split("\t")[1].split(" ")[0]
+  remote = remote_url.split("/")[-1]
+  if remote.endswith(".git"):
+    remote = remote[:-4]
+  return remote
+
+def build_tree(java_path):
+  """ Run the Java build within 'path'. """
+  logging.info("Building in %s...", java_path)
+  subprocess.check_call(["mvn", "-DskipTests", "-Dmaven.javadoc.skip=true",
+                         "package"],
+                        cwd=java_path)
+
+
+def checkout_java_acc(force):
+  """
+  Check out the Java API Compliance Checker. If 'force' is true, will
+  re-download even if the directory exists.
+  """
+  acc_dir = get_java_acc_dir()
+  if os.path.exists(acc_dir):
+    logging.info("Java ACC is already downloaded.")
+    if not force:
+      return
+    logging.info("Forcing re-download.")
+    shutil.rmtree(acc_dir)
+
+  logging.info("Downloading Java ACC...")
+
+  url = "https://github.com/lvc/japi-compliance-checker/archive/1.8.tar.gz"
+  scratch_dir = get_scratch_dir()
+  path = os.path.join(scratch_dir, os.path.basename(url))
+  jacc = urllib2.urlopen(url)
+  with open(path, 'wb') as w:
+    w.write(jacc.read())
+
+  subprocess.check_call(["tar", "xzf", path],
+                        cwd=scratch_dir)
+
+  shutil.move(os.path.join(scratch_dir, "japi-compliance-checker-1.8"),
+              os.path.join(acc_dir))
+
+
+def find_jars(path):
+  """ Return a list of jars within 'path' to be checked for compatibility. """
+  all_jars = set(check_output(["find", path, "-name", "*.jar"]).splitlines())
+
+  return [j for j in all_jars if (
+      "-tests" not in j and
+      "-sources" not in j and
+      "-with-dependencies" not in j)]
+
+def write_xml_file(path, version, jars):
+  """Write the XML manifest file for JACC."""
+  with open(path, "wt") as f:
+    f.write("<version>" + version + "</version>\n")
+    f.write("<archives>")
+    for j in jars:
+      f.write(j + "\n")
+    f.write("</archives>")
+
+def run_java_acc(src_name, src_jars, dst_name, dst_jars, annotations):
+  """ Run the compliance checker to compare 'src' and 'dst'. """
+  logging.info("Will check compatibility between original jars:\n\t%s\n" +
+               "and new jars:\n\t%s",
+               "\n\t".join(src_jars),
+               "\n\t".join(dst_jars))
+
+  java_acc_path = os.path.join(get_java_acc_dir(), "japi-compliance-checker.pl")
+
+  src_xml_path = os.path.join(get_scratch_dir(), "src.xml")
+  dst_xml_path = os.path.join(get_scratch_dir(), "dst.xml")
+  write_xml_file(src_xml_path, src_name, src_jars)
+  write_xml_file(dst_xml_path, dst_name, dst_jars)
+
+  out_path = os.path.join(get_scratch_dir(), "report.html")
+
+  args = ["perl", java_acc_path,
+          "-l", get_repo_name(),
+          "-d1", src_xml_path,
+          "-d2", dst_xml_path,
+          "-report-path", out_path]
+
+  if annotations is not None:
+    annotations_path = os.path.join(get_scratch_dir(), "annotations.txt")
+    with file(annotations_path, "w") as f:
+      for ann in annotations:
+        print >>f, ann
+    args += ["-annotations-list", annotations_path]
+
+  subprocess.check_call(args)
+
+def filter_jars(jars, include_filters, exclude_filters):
+  """Filter the list of JARs based on include and exclude filters."""
+  filtered = []
+  # Apply include filters
+  for j in jars:
+    found = False
+    basename = os.path.basename(j)
+    for f in include_filters:
+      if f.match(basename):
+        found = True
+        break
+    if found:
+      filtered += [j]
+    else:
+      logging.debug("Ignoring JAR %s", j)
+  # Apply exclude filters
+  exclude_filtered = []
+  for j in filtered:
+    basename = os.path.basename(j)
+    found = False
+    for f in exclude_filters:
+      if f.match(basename):
+        found = True
+        break
+    if found:
+      logging.debug("Ignoring JAR %s", j)
+    else:
+      exclude_filtered += [j]
+
+  return exclude_filtered
+
+
+def main():
+  """Main function."""
+  logging.basicConfig(level=logging.INFO)
+  parser = argparse.ArgumentParser(
+      description="Run Java API Compliance Checker.")
+  parser.add_argument("-f", "--force-download",
+                      action="store_true",
+                      help="Download dependencies (i.e. Java JAVA_ACC) " +
+                      "even if they are already present")
+  parser.add_argument("-i", "--include-file",
+                      action="append",
+                      dest="include_files",
+                      help="Regex filter for JAR files to be included. " +
+                      "Applied before the exclude filters. " +
+                      "Can be specified multiple times.")
+  parser.add_argument("-e", "--exclude-file",
+                      action="append",
+                      dest="exclude_files",
+                      help="Regex filter for JAR files to be excluded. " +
+                      "Applied after the include filters. " +
+                      "Can be specified multiple times.")
+  parser.add_argument("-a", "--annotation",
+                      action="append",
+                      dest="annotations",
+                      help="Fully-qualified Java annotation. " +
+                      "Java ACC will only check compatibility of " +
+                      "annotated classes. Can be specified multiple times.")
+  parser.add_argument("--skip-clean",
+                      action="store_true",
+                      help="Skip cleaning the scratch directory.")
+  parser.add_argument("--skip-build",
+                      action="store_true",
+                      help="Skip building the projects.")
+  parser.add_argument("src_rev", nargs=1, help="Source revision.")
+  parser.add_argument("dst_rev", nargs="?", default="HEAD",
+                      help="Destination revision. " +
+                      "If not specified, will use HEAD.")
+
+  if len(sys.argv) == 1:
+    parser.print_help()
+    sys.exit(1)
+
+  args = parser.parse_args()
+
+  src_rev, dst_rev = args.src_rev[0], args.dst_rev
+
+  logging.info("Source revision: %s", src_rev)
+  logging.info("Destination revision: %s", dst_rev)
+
+  # Construct the JAR regex patterns for filtering.
+  include_filters = []
+  if args.include_files is not None:
+    for f in args.include_files:
+      logging.info("Applying JAR filename include filter: %s", f)
+      include_filters += [re.compile(f)]
+  else:
+    include_filters = [re.compile(".*")]
+
+  exclude_filters = []
+  if args.exclude_files is not None:
+    for f in args.exclude_files:
+      logging.info("Applying JAR filename exclude filter: %s", f)
+      exclude_filters += [re.compile(f)]
+
+  # Construct the annotation list
+  annotations = args.annotations
+  if annotations is not None:
+    logging.info("Filtering classes using %d annotation(s):", len(annotations))
+    for a in annotations:
+      logging.info("\t%s", a)
+
+  # Download deps.
+  checkout_java_acc(args.force_download)
+
+  # Set up the build.
+  scratch_dir = get_scratch_dir()
+  src_dir = os.path.join(scratch_dir, "src")
+  dst_dir = os.path.join(scratch_dir, "dst")
+
+  if args.skip_clean:
+    logging.info("Skipping cleaning the scratch directory")
+  else:
+    clean_scratch_dir(scratch_dir)
+    # Check out the src and dst source trees.
+    checkout_java_tree(get_git_hash(src_rev), src_dir)
+    checkout_java_tree(get_git_hash(dst_rev), dst_dir)
+
+  # Run the build in each.
+  if args.skip_build:
+    logging.info("Skipping the build")
+  else:
+    build_tree(src_dir)
+    build_tree(dst_dir)
+
+  # Find the JARs.
+  src_jars = find_jars(src_dir)
+  dst_jars = find_jars(dst_dir)
+
+  # Filter the JARs.
+  src_jars = filter_jars(src_jars, include_filters, exclude_filters)
+  dst_jars = filter_jars(dst_jars, include_filters, exclude_filters)
+
+  if len(src_jars) == 0 or len(dst_jars) == 0:
+    logging.error("No JARs found! Are your filters too strong?")
+    sys.exit(1)
+
+  run_java_acc(src_rev, src_jars,
+               dst_rev, dst_jars, annotations)
+
+
+if __name__ == "__main__":
+  main()

+ 1 - 1
dev-support/bin/create-release

@@ -527,7 +527,7 @@ function makearelease
   # shellcheck disable=SC2046
   run_and_redirect "${LOGDIR}/mvn_install.log" \
     "${MVN}" "${MVN_ARGS[@]}" install \
-      -Pdist,src \
+      -Pdist,src,yarn-ui \
       "${signflags[@]}" \
       -DskipTests -Dtar $(hadoop_native_flags)
 

+ 9 - 0
dev-support/docker/Dockerfile

@@ -132,6 +132,15 @@ RUN pip install python-dateutil
 ###
 ENV MAVEN_OPTS -Xms256m -Xmx512m
 
+###
+# Install node js tools for web UI frameowkr
+###
+RUN apt-get -y install nodejs && \
+    ln -s /usr/bin/nodejs /usr/bin/node && \
+    apt-get -y install npm && \
+    npm install -g bower && \
+    npm install -g ember-cli
+
 ###
 # Everything past this point is either not needed for testing or breaks Yetus.
 # So tell Yetus not to read the rest of the file:

+ 7 - 0
hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml

@@ -192,6 +192,13 @@
       <directory>${project.build.directory}/site</directory>
       <outputDirectory>/share/doc/hadoop/${hadoop.component}</outputDirectory>
     </fileSet>
+      <fileSet>
+      <directory>hadoop-yarn/hadoop-yarn-ui/target/hadoop-yarn-ui-${project.version}</directory>
+      <outputDirectory>/share/hadoop/${hadoop.component}/webapps/rm</outputDirectory>
+      <includes>
+        <include>**/*</include>
+      </includes>
+    </fileSet>
   </fileSets>
   <moduleSets>
     <moduleSet>

+ 1 - 1
hadoop-build-tools/src/main/resources/checkstyle/checkstyle.xml

@@ -124,7 +124,7 @@
         <!-- Checks for Size Violations.                    -->
         <!-- See http://checkstyle.sf.net/config_sizes.html -->
         <module name="LineLength">
-          <property name="ignorePattern" value="^import"/>
+          <property name="ignorePattern" value="^(package|import) .*"/>
         </module>
         <module name="MethodLength"/>
         <module name="ParameterNumber"/>

+ 10 - 10
hadoop-client/pom.xml

@@ -41,7 +41,7 @@
       <exclusions>
         <exclusion>
           <groupId>javax.servlet</groupId>
-          <artifactId>servlet-api</artifactId>
+          <artifactId>javax.servlet-api</artifactId>
         </exclusion>
         <exclusion>
           <groupId>commons-logging</groupId>
@@ -49,18 +49,18 @@
         </exclusion>
         <exclusion>
           <groupId>jetty</groupId>
-          <artifactId>org.mortbay.jetty</artifactId>
+          <artifactId>org.eclipse.jetty</artifactId>
         </exclusion>
         <exclusion>
-          <groupId>org.mortbay.jetty</groupId>
-          <artifactId>jetty</artifactId>
+          <groupId>org.eclipse.jetty</groupId>
+          <artifactId>jetty-server</artifactId>
         </exclusion>
         <exclusion>
-          <groupId>org.mortbay.jetty</groupId>
+          <groupId>org.eclipse.jetty</groupId>
           <artifactId>jetty-util</artifactId>
         </exclusion>
         <exclusion>
-          <groupId>org.mortbay.jetty</groupId>
+          <groupId>org.eclipse.jetty</groupId>
           <artifactId>servlet-api-2.5</artifactId>
         </exclusion>
         <exclusion>
@@ -112,8 +112,8 @@
           <artifactId>avro</artifactId>
         </exclusion>
         <exclusion>
-          <groupId>org.mortbay.jetty</groupId>
-          <artifactId>jetty</artifactId>
+          <groupId>org.eclipse.jetty</groupId>
+          <artifactId>jetty-server</artifactId>
         </exclusion>
         <exclusion>
           <groupId>com.sun.jersey</groupId>
@@ -125,7 +125,7 @@
         </exclusion>
         <exclusion>
           <groupId>javax.servlet</groupId>
-          <artifactId>servlet-api</artifactId>
+          <artifactId>javax.servlet-api</artifactId>
         </exclusion>
       </exclusions>
     </dependency>
@@ -137,7 +137,7 @@
       <exclusions>
         <exclusion>
           <groupId>javax.servlet</groupId>
-          <artifactId>servlet-api</artifactId>
+          <artifactId>javax.servlet-api</artifactId>
         </exclusion>
         <exclusion>
           <groupId>org.apache.hadoop</groupId>

+ 127 - 0
hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml

@@ -0,0 +1,127 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License. See accompanying LICENSE file.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+  <modelVersion>4.0.0</modelVersion>
+  <parent>
+    <groupId>org.apache.hadoop</groupId>
+    <artifactId>hadoop-project</artifactId>
+    <version>3.0.0-alpha2-SNAPSHOT</version>
+    <relativePath>../../hadoop-project</relativePath>
+  </parent>
+  <groupId>org.apache.hadoop</groupId>
+  <artifactId>hadoop-cloud-storage</artifactId>
+  <version>3.0.0-alpha2-SNAPSHOT</version>
+  <packaging>jar</packaging>
+
+  <description>Apache Hadoop Cloud Storage</description>
+  <name>Apache Hadoop Cloud Storage</name>
+
+  <properties>
+    <hadoop.component>cloud-storage</hadoop.component>
+  </properties>
+
+  <dependencies>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-annotations</artifactId>
+      <scope>compile</scope>
+      <exclusions>
+        <exclusion>
+          <groupId>jdk.tools</groupId>
+          <artifactId>jdk.tools</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-common</artifactId>
+      <scope>compile</scope>
+      <exclusions>
+        <exclusion>
+          <groupId>javax.servlet</groupId>
+          <artifactId>servlet-api</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>commons-logging</groupId>
+          <artifactId>commons-logging-api</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>jetty</groupId>
+          <artifactId>org.mortbay.jetty</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.mortbay.jetty</groupId>
+          <artifactId>jetty</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.mortbay.jetty</groupId>
+          <artifactId>servlet-api-2.5</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>com.sun.jersey</groupId>
+          <artifactId>jersey-core</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>com.sun.jersey</groupId>
+          <artifactId>jersey-json</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>com.sun.jersey</groupId>
+          <artifactId>jersey-server</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.eclipse.jdt</groupId>
+          <artifactId>core</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.apache.avro</groupId>
+          <artifactId>avro-ipc</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>net.sf.kosmosfs</groupId>
+          <artifactId>kfs</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>net.java.dev.jets3t</groupId>
+          <artifactId>jets3t</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>com.jcraft</groupId>
+          <artifactId>jsch</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.apache.zookeeper</groupId>
+          <artifactId>zookeeper</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-aws</artifactId>
+      <scope>compile</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-azure</artifactId>
+      <scope>compile</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-openstack</artifactId>
+      <scope>compile</scope>
+    </dependency>
+  </dependencies>
+</project>

+ 54 - 0
hadoop-cloud-storage-project/pom.xml

@@ -0,0 +1,54 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+                      http://maven.apache.org/xsd/maven-4.0.0.xsd">
+  <modelVersion>4.0.0</modelVersion>
+  <parent>
+    <groupId>org.apache.hadoop</groupId>
+    <artifactId>hadoop-project</artifactId>
+    <version>3.0.0-alpha2-SNAPSHOT</version>
+    <relativePath>../hadoop-project</relativePath>
+  </parent>
+  <groupId>org.apache.hadoop</groupId>
+  <artifactId>hadoop-cloud-storage-project</artifactId>
+  <version>3.0.0-alpha2-SNAPSHOT</version>
+  <description>Apache Hadoop Cloud Storage Project</description>
+  <name>Apache Hadoop Cloud Storage Project</name>
+  <packaging>pom</packaging>
+
+  <modules>
+    <module>hadoop-cloud-storage</module>
+  </modules>
+
+  <build>
+    <plugins>
+      <plugin>
+        <artifactId>maven-deploy-plugin</artifactId>
+        <configuration>
+          <skip>true</skip>
+        </configuration>
+      </plugin>
+      <plugin>
+        <groupId>org.apache.rat</groupId>
+        <artifactId>apache-rat-plugin</artifactId>
+        <configuration>
+        </configuration>
+      </plugin>
+    </plugins>
+  </build>
+
+</project>

+ 1 - 1
hadoop-common-project/hadoop-auth-examples/pom.xml

@@ -34,7 +34,7 @@
   <dependencies>
     <dependency>
       <groupId>javax.servlet</groupId>
-      <artifactId>servlet-api</artifactId>
+      <artifactId>javax.servlet-api</artifactId>
       <scope>provided</scope>
     </dependency>
     <dependency>

+ 12 - 0
hadoop-common-project/hadoop-auth-examples/src/main/java/org/apache/hadoop/security/authentication/examples/RequestLoggerFilter.java

@@ -139,7 +139,19 @@ public class RequestLoggerFilter implements Filter {
       status = sc;
     }
 
+    /**
+     * Calls setStatus(int sc, String msg) on the wrapped
+     * {@link HttpServletResponseWrapper} object.
+     *
+     * @param sc the status code
+     * @param msg the status message
+     * @deprecated {@link HttpServletResponseWrapper#setStatus(int, String)} is
+     * deprecated. To set a status code use {@link #setStatus(int)}, to send an
+     * error with a description use {@link #sendError(int, String)}
+     */
     @Override
+    @Deprecated
+    @SuppressWarnings("deprecation")
     public void setStatus(int sc, String msg) {
       super.setStatus(sc, msg);
       status = sc;

+ 9 - 4
hadoop-common-project/hadoop-auth/pom.xml

@@ -53,13 +53,18 @@
       <scope>test</scope>
     </dependency>
     <dependency>
-      <groupId>org.mortbay.jetty</groupId>
+      <groupId>org.eclipse.jetty</groupId>
       <artifactId>jetty-util</artifactId>
       <scope>test</scope>
     </dependency>
     <dependency>
-      <groupId>org.mortbay.jetty</groupId>
-      <artifactId>jetty</artifactId>
+      <groupId>org.eclipse.jetty</groupId>
+      <artifactId>jetty-server</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.eclipse.jetty</groupId>
+      <artifactId>jetty-servlet</artifactId>
       <scope>test</scope>
     </dependency>
      <dependency>
@@ -74,7 +79,7 @@
     </dependency>
     <dependency>
       <groupId>javax.servlet</groupId>
-      <artifactId>servlet-api</artifactId>
+      <artifactId>javax.servlet-api</artifactId>
       <scope>provided</scope>
     </dependency>
     <dependency>

+ 6 - 1
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java

@@ -343,6 +343,8 @@ public class KerberosAuthenticationHandler implements AuthenticationHandler {
       authorization = authorization.substring(KerberosAuthenticator.NEGOTIATE.length()).trim();
       final Base64 base64 = new Base64(0);
       final byte[] clientToken = base64.decode(authorization);
+      final String serverName = InetAddress.getByName(request.getServerName())
+                                           .getCanonicalHostName();
       try {
         token = Subject.doAs(serverSubject, new PrivilegedExceptionAction<AuthenticationToken>() {
 
@@ -352,7 +354,10 @@ public class KerberosAuthenticationHandler implements AuthenticationHandler {
             GSSContext gssContext = null;
             GSSCredential gssCreds = null;
             try {
-              gssCreds = gssManager.createCredential(null,
+              gssCreds = gssManager.createCredential(
+                  gssManager.createName(
+                      KerberosUtil.getServicePrincipal("HTTP", serverName),
+                      KerberosUtil.getOidInstance("NT_GSS_KRB5_PRINCIPAL")),
                   GSSCredential.INDEFINITE_LIFETIME,
                   new Oid[]{
                     KerberosUtil.getOidInstance("GSS_SPNEGO_MECH_OID"),

+ 18 - 11
hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/AuthenticatorTestCase.java

@@ -30,11 +30,14 @@ import org.apache.http.impl.auth.SPNegoScheme;
 import org.apache.http.impl.client.BasicCredentialsProvider;
 import org.apache.http.impl.client.HttpClientBuilder;
 import org.apache.http.util.EntityUtils;
-import org.mortbay.jetty.Server;
-import org.mortbay.jetty.servlet.Context;
-import org.mortbay.jetty.servlet.FilterHolder;
-import org.mortbay.jetty.servlet.ServletHolder;
-
+import org.eclipse.jetty.server.Connector;
+import org.eclipse.jetty.server.Server;
+import org.eclipse.jetty.server.ServerConnector;
+import org.eclipse.jetty.servlet.FilterHolder;
+import org.eclipse.jetty.servlet.ServletContextHandler;
+import org.eclipse.jetty.servlet.ServletHolder;
+
+import javax.servlet.DispatcherType;
 import javax.servlet.FilterConfig;
 import javax.servlet.ServletException;
 import javax.servlet.http.HttpServlet;
@@ -53,6 +56,7 @@ import java.net.HttpURLConnection;
 import java.net.ServerSocket;
 import java.net.URL;
 import java.security.Principal;
+import java.util.EnumSet;
 import java.util.Properties;
 
 import org.junit.Assert;
@@ -63,7 +67,7 @@ public class AuthenticatorTestCase {
   private int port = -1;
   private boolean useTomcat = false;
   private Tomcat tomcat = null;
-  Context context;
+  ServletContextHandler context;
 
   private static Properties authenticatorConfig;
 
@@ -121,16 +125,19 @@ public class AuthenticatorTestCase {
   }
 
   protected void startJetty() throws Exception {
-    server = new Server(0);
-    context = new Context();
+    server = new Server();
+    context = new ServletContextHandler();
     context.setContextPath("/foo");
     server.setHandler(context);
-    context.addFilter(new FilterHolder(TestFilter.class), "/*", 0);
+    context.addFilter(new FilterHolder(TestFilter.class), "/*",
+        EnumSet.of(DispatcherType.REQUEST));
     context.addServlet(new ServletHolder(TestServlet.class), "/bar");
     host = "localhost";
     port = getLocalPort();
-    server.getConnectors()[0].setHost(host);
-    server.getConnectors()[0].setPort(port);
+    ServerConnector connector = new ServerConnector(server);
+    connector.setHost(host);
+    connector.setPort(port);
+    server.setConnectors(new Connector[] {connector});
     server.start();
     System.out.println("Running embedded servlet container at: http://" + host + ":" + port);
   }

+ 33 - 12
hadoop-common-project/hadoop-common/pom.xml

@@ -93,24 +93,34 @@
     </dependency>
     <dependency>
       <groupId>javax.servlet</groupId>
-      <artifactId>servlet-api</artifactId>
+      <artifactId>javax.servlet-api</artifactId>
       <scope>compile</scope>
     </dependency>
     <dependency>
-      <groupId>org.mortbay.jetty</groupId>
-      <artifactId>jetty</artifactId>
+      <groupId>org.eclipse.jetty</groupId>
+      <artifactId>jetty-server</artifactId>
       <scope>compile</scope>
     </dependency>
     <dependency>
-      <groupId>org.mortbay.jetty</groupId>
+      <groupId>org.eclipse.jetty</groupId>
       <artifactId>jetty-util</artifactId>
       <scope>compile</scope>
     </dependency>
     <dependency>
-      <groupId>org.mortbay.jetty</groupId>
-      <artifactId>jetty-sslengine</artifactId>
+      <groupId>org.eclipse.jetty</groupId>
+      <artifactId>jetty-servlet</artifactId>
       <scope>compile</scope>
     </dependency>
+    <dependency>
+      <groupId>org.eclipse.jetty</groupId>
+      <artifactId>jetty-webapp</artifactId>
+      <scope>compile</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.eclipse.jetty</groupId>
+      <artifactId>jetty-util-ajax</artifactId>
+      <scope>test</scope>
+    </dependency>
     <dependency>
       <groupId>javax.servlet.jsp</groupId>
       <artifactId>jsp-api</artifactId>
@@ -235,7 +245,6 @@
     <dependency>
       <groupId>com.jcraft</groupId>
       <artifactId>jsch</artifactId>
-      <scope>provided</scope>
     </dependency>
     <dependency>
       <groupId>org.apache.curator</groupId>
@@ -360,7 +369,6 @@
           </execution>
           <execution>
             <id>compile-protoc</id>
-            <phase>generate-sources</phase>
             <goals>
               <goal>protoc</goal>
             </goals>
@@ -388,14 +396,12 @@
                   <include>GenericRefreshProtocol.proto</include>
                 </includes>
               </source>
-              <output>${project.build.directory}/generated-sources/java</output>
             </configuration>
           </execution>
           <execution>
             <id>compile-test-protoc</id>
-            <phase>generate-test-sources</phase>
             <goals>
-              <goal>protoc</goal>
+              <goal>test-protoc</goal>
             </goals>
             <configuration>
               <protocVersion>${protobuf.version}</protocVersion>
@@ -410,7 +416,18 @@
                   <include>test_rpc_service.proto</include>
                 </includes>
               </source>
-              <output>${project.build.directory}/generated-test-sources/java</output>
+            </configuration>
+          </execution>
+          <execution>
+            <id>resource-gz</id>
+            <phase>generate-resources</phase>
+            <goals>
+              <goal>resource-gz</goal>
+            </goals>
+            <configuration>
+              <inputDirectory>${basedir}/src/main/webapps/static</inputDirectory>
+              <outputDirectory>${basedir}/target/webapps/static</outputDirectory>
+              <extensions>js,css</extensions>
             </configuration>
           </execution>
         </executions>
@@ -641,6 +658,8 @@
                     <javahClassName>org.apache.hadoop.io.erasurecode.ErasureCodeNative</javahClassName>
                     <javahClassName>org.apache.hadoop.io.erasurecode.rawcoder.NativeRSRawEncoder</javahClassName>
                     <javahClassName>org.apache.hadoop.io.erasurecode.rawcoder.NativeRSRawDecoder</javahClassName>
+                    <javahClassName>org.apache.hadoop.io.erasurecode.rawcoder.NativeXORRawEncoder</javahClassName>
+                    <javahClassName>org.apache.hadoop.io.erasurecode.rawcoder.NativeXORRawDecoder</javahClassName>
                     <javahClassName>org.apache.hadoop.crypto.OpensslCipher</javahClassName>
                     <javahClassName>org.apache.hadoop.crypto.random.OpensslSecureRandom</javahClassName>
                     <javahClassName>org.apache.hadoop.util.NativeCrc32</javahClassName>
@@ -781,6 +800,8 @@
                     <javahClassName>org.apache.hadoop.io.erasurecode.ErasureCodeNative</javahClassName>
                     <javahClassName>org.apache.hadoop.io.erasurecode.rawcoder.NativeRSRawEncoder</javahClassName>
                     <javahClassName>org.apache.hadoop.io.erasurecode.rawcoder.NativeRSRawDecoder</javahClassName>
+                    <javahClassName>org.apache.hadoop.io.erasurecode.rawcoder.NativeXORRawEncoder</javahClassName>
+                    <javahClassName>org.apache.hadoop.io.erasurecode.rawcoder.NativeXORRawDecoder</javahClassName>
                     <javahClassName>org.apache.hadoop.crypto.OpensslCipher</javahClassName>
                     <javahClassName>org.apache.hadoop.crypto.random.OpensslSecureRandom</javahClassName>
                     <javahClassName>org.apache.hadoop.util.NativeCrc32</javahClassName>

+ 3 - 1
hadoop-common-project/hadoop-common/src/CMakeLists.txt

@@ -113,7 +113,9 @@ if (ISAL_LIBRARY)
         ${SRC}/io/erasurecode/jni_erasure_code_native.c
         ${SRC}/io/erasurecode/jni_common.c
         ${SRC}/io/erasurecode/jni_rs_encoder.c
-        ${SRC}/io/erasurecode/jni_rs_decoder.c)
+        ${SRC}/io/erasurecode/jni_rs_decoder.c
+        ${SRC}/io/erasurecode/jni_xor_encoder.c
+        ${SRC}/io/erasurecode/jni_xor_decoder.c)
 
         add_executable(erasure_code_test
         ${SRC}/io/erasurecode/isal_load.c

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/bin/hadoop

@@ -230,7 +230,7 @@ fi
 if [[ "${HADOOP_DAEMON_MODE}" != "default" ]]; then
   # shellcheck disable=SC2034
   HADOOP_ROOT_LOGGER="${HADOOP_DAEMON_ROOT_LOGGER}"
-  if [[ -n "${HADOOP_SUBCMD_SECURESERVICE}" ]]; then
+  if [[ "${HADOOP_SUBCMD_SECURESERVICE}" = true ]]; then
     # shellcheck disable=SC2034
     HADOOP_LOGFILE="hadoop-${HADOOP_SECURE_USER}-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}-${HOSTNAME}.log"
   else

+ 0 - 6
hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh

@@ -223,12 +223,6 @@ esac
 # Java property: hadoop.security.logger
 # export HADOOP_SECURITY_LOGGER=INFO,NullAppender
 
-# Default log level for file system audit messages.
-# Generally, this is specifically set in the namenode-specific
-# options line.
-# Java property: hdfs.audit.logger
-# export HADOOP_AUDIT_LOGGER=INFO,NullAppender
-
 # Default process priority level
 # Note that sub-processes will also run at this level!
 # export HADOOP_NICENESS=0

+ 19 - 3
hadoop-common-project/hadoop-common/src/main/conf/hadoop-metrics2.properties

@@ -50,9 +50,25 @@
 # If '*' all tags are used. If specifiying multiple tags separate them with 
 # commas. Note that the last segment of the property name is the context name.
 #
-#*.sink.ganglia.tagsForPrefix.jvm=ProcesName
-#*.sink.ganglia.tagsForPrefix.dfs=
-#*.sink.ganglia.tagsForPrefix.rpc=
+# A typical use of tags is separating the metrics by the HDFS rpc port
+# and HDFS service rpc port.
+# For example:
+#   With following HDFS configuration:
+#       dfs.namenode.rpc-address is set as namenodeAddress:9110
+#       dfs.namenode.servicerpc-address is set as namenodeAddress:9111
+#   If no tags are used, following metric would be gathered:
+#       rpc.rpc.NumOpenConnections
+#   If using "*.sink.ganglia.tagsForPrefix.rpc=port",
+#   following metrics would be gathered:
+#       rpc.rpc.port=9110.NumOpenConnections
+#       rpc.rpc.port=9111.NumOpenConnections
+#
+#*.sink.ganglia.tagsForPrefix.jvm=ProcessName
+#*.sink.ganglia.tagsForPrefix.dfs=HAState,IsOutOfSync
+#*.sink.ganglia.tagsForPrefix.rpc=port
+#*.sink.ganglia.tagsForPrefix.rpcdetailed=port
+#*.sink.ganglia.tagsForPrefix.metricssystem=*
+#*.sink.ganglia.tagsForPrefix.ugi=*
 #*.sink.ganglia.tagsForPrefix.mapred=
 
 #namenode.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649

+ 7 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfServlet.java

@@ -20,6 +20,7 @@ package org.apache.hadoop.conf;
 import java.io.IOException;
 import java.io.Writer;
 
+import javax.servlet.ServletContext;
 import javax.servlet.ServletException;
 import javax.servlet.http.HttpServlet;
 import javax.servlet.http.HttpServletRequest;
@@ -58,7 +59,12 @@ public class ConfServlet extends HttpServlet {
   public void doGet(HttpServletRequest request, HttpServletResponse response)
       throws ServletException, IOException {
 
-    if (!HttpServer2.isInstrumentationAccessAllowed(getServletContext(),
+    // If user is a static user and auth Type is null, that means
+    // there is a non-security environment and no need authorization,
+    // otherwise, do the authorization.
+    final ServletContext servletContext = getServletContext();
+    if (!HttpServer2.isStaticUserAndNoneAuthType(servletContext, request) &&
+        !HttpServer2.isInstrumentationAccessAllowed(servletContext,
                                                    request, response)) {
       return;
     }

+ 28 - 28
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java

@@ -98,8 +98,8 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension,
 
   private static final String ANONYMOUS_REQUESTS_DISALLOWED = "Anonymous requests are disallowed";
 
-  public static final String TOKEN_KIND_STR = "kms-dt";
-  public static final Text TOKEN_KIND = new Text(TOKEN_KIND_STR);
+  public static final String TOKEN_KIND_STR = KMSDelegationToken.TOKEN_KIND_STR;
+  public static final Text TOKEN_KIND = KMSDelegationToken.TOKEN_KIND;
 
   public static final String SCHEME_NAME = "kms";
 
@@ -373,7 +373,6 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension,
   private ConnectionConfigurator configurator;
   private DelegationTokenAuthenticatedURL.Token authToken;
   private final int authRetry;
-  private final UserGroupInformation actualUgi;
 
   @Override
   public String toString() {
@@ -455,15 +454,6 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension,
                     KMS_CLIENT_ENC_KEY_CACHE_NUM_REFILL_THREADS_DEFAULT),
             new EncryptedQueueRefiller());
     authToken = new DelegationTokenAuthenticatedURL.Token();
-    UserGroupInformation.AuthenticationMethod authMethod =
-        UserGroupInformation.getCurrentUser().getAuthenticationMethod();
-    if (authMethod == UserGroupInformation.AuthenticationMethod.PROXY) {
-      actualUgi = UserGroupInformation.getCurrentUser().getRealUser();
-    } else if (authMethod == UserGroupInformation.AuthenticationMethod.TOKEN) {
-      actualUgi = UserGroupInformation.getLoginUser();
-    } else {
-      actualUgi =UserGroupInformation.getCurrentUser();
-    }
   }
 
   private static Path extractKMSPath(URI uri) throws MalformedURLException, IOException {
@@ -530,19 +520,9 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension,
       throws IOException {
     HttpURLConnection conn;
     try {
-      // if current UGI is different from UGI at constructor time, behave as
-      // proxyuser
-      UserGroupInformation currentUgi = UserGroupInformation.getCurrentUser();
-      final String doAsUser = (currentUgi.getAuthenticationMethod() ==
-          UserGroupInformation.AuthenticationMethod.PROXY)
-                              ? currentUgi.getShortUserName() : null;
-
-      // If current UGI contains kms-dt && is not proxy, doAs it to use its dt.
-      // Otherwise, create the HTTP connection using the UGI at constructor time
-      UserGroupInformation ugiToUse =
-          (currentUgiContainsKmsDt() && doAsUser == null) ?
-              currentUgi : actualUgi;
-      conn = ugiToUse.doAs(new PrivilegedExceptionAction<HttpURLConnection>() {
+      final String doAsUser = getDoAsUser();
+      conn = getActualUgi().doAs(new PrivilegedExceptionAction
+          <HttpURLConnection>() {
         @Override
         public HttpURLConnection run() throws Exception {
           DelegationTokenAuthenticatedURL authUrl =
@@ -919,7 +899,7 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension,
           token, url, doAsUser);
       final DelegationTokenAuthenticatedURL authUrl =
           new DelegationTokenAuthenticatedURL(configurator);
-      return actualUgi.doAs(
+      return getActualUgi().doAs(
           new PrivilegedExceptionAction<Long>() {
             @Override
             public Long run() throws Exception {
@@ -942,7 +922,7 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension,
       final String doAsUser = getDoAsUser();
       final DelegationTokenAuthenticatedURL.Token token =
           generateDelegationToken(dToken);
-      return actualUgi.doAs(
+      return getActualUgi().doAs(
           new PrivilegedExceptionAction<Void>() {
             @Override
             public Void run() throws Exception {
@@ -1014,7 +994,7 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension,
           new DelegationTokenAuthenticatedURL(configurator);
       try {
         final String doAsUser = getDoAsUser();
-        token = actualUgi.doAs(new PrivilegedExceptionAction<Token<?>>() {
+        token = getActualUgi().doAs(new PrivilegedExceptionAction<Token<?>>() {
           @Override
           public Token<?> run() throws Exception {
             // Not using the cached token here.. Creating a new token here
@@ -1060,6 +1040,26 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension,
     return false;
   }
 
+  private UserGroupInformation getActualUgi() throws IOException {
+    final UserGroupInformation currentUgi = UserGroupInformation
+        .getCurrentUser();
+    if (LOG.isDebugEnabled()) {
+      UserGroupInformation.logAllUserInfo(currentUgi);
+    }
+    // Use current user by default
+    UserGroupInformation actualUgi = currentUgi;
+    if (currentUgi.getRealUser() != null) {
+      // Use real user for proxy user
+      actualUgi = currentUgi.getRealUser();
+    } else if (!currentUgiContainsKmsDt() &&
+        !currentUgi.hasKerberosCredentials()) {
+      // Use login user for user that does not have either
+      // Kerberos credential or KMS delegation token for KMS operations
+      actualUgi = currentUgi.getLoginUser();
+    }
+    return actualUgi;
+  }
+
   /**
    * Shutdown valueQueue executor threads
    */

+ 52 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSDelegationToken.java

@@ -0,0 +1,52 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.crypto.key.kms;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.token.delegation.web.DelegationTokenIdentifier;
+
+/**
+ * Holder class for KMS delegation tokens.
+ */
+@InterfaceAudience.Private
+public final class KMSDelegationToken {
+
+  public static final String TOKEN_KIND_STR = "kms-dt";
+  public static final Text TOKEN_KIND = new Text(TOKEN_KIND_STR);
+
+  // Utility class is not supposed to be instantiated.
+  private KMSDelegationToken() {
+  }
+
+  /**
+   * DelegationTokenIdentifier used for the KMS.
+   */
+  public static class KMSDelegationTokenIdentifier
+      extends DelegationTokenIdentifier {
+
+    public KMSDelegationTokenIdentifier() {
+      super(TOKEN_KIND);
+    }
+
+    @Override
+    public Text getKind() {
+      return TOKEN_KIND;
+    }
+  }
+}

+ 2 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java

@@ -770,7 +770,9 @@ public class CommonConfigurationKeysPublic {
   public static final String HADOOP_SECURITY_SENSITIVE_CONFIG_KEYS =
       "hadoop.security.sensitive-config-keys";
   public static final String HADOOP_SECURITY_SENSITIVE_CONFIG_KEYS_DEFAULT =
+      "secret$" + "," +
       "password$" + "," +
+      "ssl.keystore.pass$" + "," +
       "fs.s3.*[Ss]ecret.?[Kk]ey" + "," +
       "fs.azure\\.account.key.*" + "," +
       "dfs.webhdfs.oauth2.[a-z]+.token" + "," +

+ 2 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSExceptionMessages.java

@@ -46,4 +46,6 @@ public class FSExceptionMessages {
 
   public static final String TOO_MANY_BYTES_FOR_DEST_BUFFER
       = "Requested more bytes than destination buffer size";
+
+  public static final String PERMISSION_DENIED = "Permission denied";
 }

+ 7 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java

@@ -113,6 +113,7 @@ public abstract class FileSystem extends Configured implements Closeable {
   public static final int SHUTDOWN_HOOK_PRIORITY = 10;
 
   public static final String TRASH_PREFIX = ".Trash";
+  public static final String USER_HOME_PREFIX = "/user";
 
   /** FileSystem cache */
   static final Cache CACHE = new Cache();
@@ -188,7 +189,11 @@ public abstract class FileSystem extends Configured implements Closeable {
    * @return the uri of the default filesystem
    */
   public static URI getDefaultUri(Configuration conf) {
-    return URI.create(fixName(conf.get(FS_DEFAULT_NAME_KEY, DEFAULT_FS)));
+    URI uri = URI.create(fixName(conf.get(FS_DEFAULT_NAME_KEY, DEFAULT_FS)));
+    if (uri.getScheme() == null) {
+      throw new IllegalArgumentException("No scheme in default FS: " + uri);
+    }
+    return uri;
   }
 
   /** Set the default filesystem URI in a configuration.
@@ -1961,7 +1966,7 @@ public abstract class FileSystem extends Configured implements Closeable {
    */
   public Path getHomeDirectory() {
     return this.makeQualified(
-        new Path("/user/"+System.getProperty("user.name")));
+        new Path(USER_HOME_PREFIX + "/" + System.getProperty("user.name")));
   }
 
 

+ 7 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java

@@ -29,6 +29,7 @@ import java.io.OutputStream;
 import java.net.InetAddress;
 import java.net.URI;
 import java.net.UnknownHostException;
+import java.nio.file.AccessDeniedException;
 import java.util.ArrayList;
 import java.util.Enumeration;
 import java.util.List;
@@ -1139,9 +1140,14 @@ public class FileUtil {
    * an IOException to be thrown.
    * @param dir directory for which listing should be performed
    * @return list of file names or empty string list
-   * @exception IOException for invalid directory or for a bad disk.
+   * @exception AccessDeniedException for unreadable directory
+   * @exception IOException for invalid directory or for bad disk
    */
   public static String[] list(File dir) throws IOException {
+    if (!canRead(dir)) {
+      throw new AccessDeniedException(dir.toString(), null,
+          FSExceptionMessages.PERMISSION_DENIED);
+    }
     String[] fileNames = dir.list();
     if(fileNames == null) {
       throw new IOException("Invalid directory or I/O error occurred for dir: "

+ 3 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java

@@ -968,6 +968,9 @@ public class HarFileSystem extends FileSystem {
       @Override
       public synchronized int read(byte[] b, int offset, int len) 
         throws IOException {
+        if (len == 0) {
+          return 0;
+        }
         int newlen = len;
         int ret = -1;
         if (position + len > end) {

+ 7 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Count.java

@@ -78,12 +78,17 @@ public class Count extends FsCommand {
           "The -" + OPTION_EXCLUDE_SNAPSHOT + " option excludes snapshots " +
           "from being calculated. \n" +
           "The -" + OPTION_TYPE + " option displays quota by storage types.\n" +
-          "It must be used with -" + OPTION_QUOTA + " option.\n" +
+          "It should be used with -" + OPTION_QUOTA + " or -" +
+          OPTION_QUOTA_AND_USAGE + " option, otherwise it will be ignored.\n" +
           "If a comma-separated list of storage types is given after the -" +
           OPTION_TYPE + " option, \n" +
           "it displays the quota and usage for the specified types. \n" +
           "Otherwise, it displays the quota and usage for all the storage \n" +
-          "types that support quota \n" +
+          "types that support quota. The list of possible storage " +
+          "types(case insensitive):\n" +
+          "ram_disk, ssd, disk and archive.\n" +
+          "It can also pass the value '', 'all' or 'ALL' to specify all " +
+          "the storage types.\n" +
           "The -" + OPTION_QUOTA_AND_USAGE + " option shows the quota and \n" +
           "the usage against the quota without the detailed content summary.";
 

+ 6 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java

@@ -302,6 +302,12 @@ abstract class InodeTree<T> {
         String src = key.substring(mtPrefix.length());
         if (src.startsWith(linkPrefix)) {
           src = src.substring(linkPrefix.length());
+          if (src.equals(SlashPath.toString())) {
+            throw new UnsupportedFileSystemException("Unexpected mount table "
+                + "link entry '" + key + "'. "
+                + Constants.CONFIG_VIEWFS_LINK_MERGE_SLASH  + " is not "
+                + "supported yet.");
+          }
         } else if (src.startsWith(linkMergePrefix)) { // A merge link
           isMergeLink = true;
           src = src.substring(linkMergePrefix.length());

+ 12 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java

@@ -32,6 +32,7 @@ import java.util.Map;
 import java.util.Set;
 import java.util.Map.Entry;
 
+import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -104,9 +105,13 @@ public class ViewFileSystem extends FileSystem {
       src = srcPath;
       targets = targetURIs;
     }
+
+    @VisibleForTesting
     Path getSrc() {
       return src;
     }
+
+    @VisibleForTesting
     URI[] getTargets() {
       return targets;
     }
@@ -676,9 +681,13 @@ public class ViewFileSystem extends FileSystem {
 
   @Override
   public FsServerDefaults getServerDefaults(Path f) throws IOException {
-    InodeTree.ResolveResult<FileSystem> res =
-      fsState.resolve(getUriPath(f), true);
-    return res.targetFileSystem.getServerDefaults(res.remainingPath);    
+    try {
+      InodeTree.ResolveResult<FileSystem> res =
+          fsState.resolve(getUriPath(f), true);
+      return res.targetFileSystem.getServerDefaults(res.remainingPath);
+    } catch (FileNotFoundException e) {
+      throw new NotInMountpointException(f, "getServerDefaults");
+    }
   }
 
   @Override

+ 5 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java

@@ -472,14 +472,16 @@ public class ViewFs extends AbstractFileSystem {
   
     if (resSrc.isInternalDir()) {
       throw new AccessControlException(
-          "Cannot Rename within internal dirs of mount table: it is readOnly");
+          "Cannot Rename within internal dirs of mount table: src=" + src
+              + " is readOnly");
     }
-      
+
     InodeTree.ResolveResult<AbstractFileSystem> resDst = 
                                 fsState.resolve(getUriPath(dst), false);
     if (resDst.isInternalDir()) {
       throw new AccessControlException(
-          "Cannot Rename within internal dirs of mount table: it is readOnly");
+          "Cannot Rename within internal dirs of mount table: dest=" + dst
+              + " is readOnly");
     }
     
     /**

+ 9 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/AdminAuthorizedServlet.java

@@ -19,11 +19,12 @@ package org.apache.hadoop.http;
 
 import java.io.IOException;
 
+import javax.servlet.ServletContext;
 import javax.servlet.ServletException;
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
 
-import org.mortbay.jetty.servlet.DefaultServlet;
+import org.eclipse.jetty.servlet.DefaultServlet;
 
 /**
  * General servlet which is admin-authorized.
@@ -35,9 +36,13 @@ public class AdminAuthorizedServlet extends DefaultServlet {
 
   @Override
   protected void doGet(HttpServletRequest request, HttpServletResponse response)
- throws ServletException, IOException {
-    // Do the authorization
-    if (HttpServer2.hasAdministratorAccess(getServletContext(), request,
+      throws ServletException, IOException {
+    // If user is a static user and auth Type is null, that means
+    // there is a non-security environment and no need authorization,
+    // otherwise, do the authorization.
+    final ServletContext servletContext = getServletContext();
+    if (HttpServer2.isStaticUserAndNoneAuthType(servletContext, request) ||
+        HttpServer2.hasAdministratorAccess(servletContext, request,
         response)) {
       // Authorization is done. Just call super.
       super.doGet(request, response);

+ 2 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpRequestLog.java

@@ -25,8 +25,8 @@ import org.apache.commons.logging.LogConfigurationException;
 import org.apache.commons.logging.LogFactory;
 import org.apache.log4j.Appender;
 import org.apache.log4j.Logger;
-import org.mortbay.jetty.NCSARequestLog;
-import org.mortbay.jetty.RequestLog;
+import org.eclipse.jetty.server.NCSARequestLog;
+import org.eclipse.jetty.server.RequestLog;
 
 /**
  * RequestLog object for use with Http

+ 213 - 124
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java

@@ -56,7 +56,6 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.security.AuthenticationFilterInitializer;
 import org.apache.hadoop.security.authentication.util.SignerSecretProvider;
-import org.apache.hadoop.security.ssl.SslSelectChannelConnectorSecure;
 import org.apache.hadoop.jmx.JMXJsonServlet;
 import org.apache.hadoop.log.LogLevel;
 import org.apache.hadoop.security.SecurityUtil;
@@ -65,34 +64,42 @@ import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
 import org.apache.hadoop.security.authorize.AccessControlList;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.Shell;
-import org.mortbay.io.Buffer;
-import org.mortbay.jetty.Connector;
-import org.mortbay.jetty.Handler;
-import org.mortbay.jetty.MimeTypes;
-import org.mortbay.jetty.RequestLog;
-import org.mortbay.jetty.Server;
-import org.mortbay.jetty.SessionManager;
-import org.mortbay.jetty.handler.ContextHandler;
-import org.mortbay.jetty.handler.ContextHandlerCollection;
-import org.mortbay.jetty.handler.HandlerCollection;
-import org.mortbay.jetty.handler.RequestLogHandler;
-import org.mortbay.jetty.nio.SelectChannelConnector;
-import org.mortbay.jetty.security.SslSelectChannelConnector;
-import org.mortbay.jetty.servlet.AbstractSessionManager;
-import org.mortbay.jetty.servlet.Context;
-import org.mortbay.jetty.servlet.DefaultServlet;
-import org.mortbay.jetty.servlet.FilterHolder;
-import org.mortbay.jetty.servlet.SessionHandler;
-import org.mortbay.jetty.servlet.FilterMapping;
-import org.mortbay.jetty.servlet.ServletHandler;
-import org.mortbay.jetty.servlet.ServletHolder;
-import org.mortbay.jetty.webapp.WebAppContext;
-import org.mortbay.thread.QueuedThreadPool;
-import org.mortbay.util.MultiException;
+import org.eclipse.jetty.http.HttpVersion;
+import org.eclipse.jetty.server.ConnectionFactory;
+import org.eclipse.jetty.server.Connector;
+import org.eclipse.jetty.server.Handler;
+import org.eclipse.jetty.server.HttpConfiguration;
+import org.eclipse.jetty.server.HttpConnectionFactory;
+import org.eclipse.jetty.server.RequestLog;
+import org.eclipse.jetty.server.SecureRequestCustomizer;
+import org.eclipse.jetty.server.Server;
+import org.eclipse.jetty.server.ServerConnector;
+import org.eclipse.jetty.server.SessionManager;
+import org.eclipse.jetty.server.SslConnectionFactory;
+import org.eclipse.jetty.server.handler.ContextHandlerCollection;
+import org.eclipse.jetty.server.handler.HandlerCollection;
+import org.eclipse.jetty.server.handler.RequestLogHandler;
+import org.eclipse.jetty.server.session.AbstractSessionManager;
+import org.eclipse.jetty.server.session.SessionHandler;
+import org.eclipse.jetty.servlet.DefaultServlet;
+import org.eclipse.jetty.servlet.FilterHolder;
+import org.eclipse.jetty.servlet.FilterMapping;
+import org.eclipse.jetty.servlet.ServletContextHandler;
+import org.eclipse.jetty.servlet.ServletHandler;
+import org.eclipse.jetty.servlet.ServletHolder;
+import org.eclipse.jetty.servlet.ServletMapping;
+import org.eclipse.jetty.util.ArrayUtil;
+import org.eclipse.jetty.util.MultiException;
+import org.eclipse.jetty.webapp.WebAppContext;
+import org.eclipse.jetty.util.thread.QueuedThreadPool;
 
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 import com.sun.jersey.spi.container.servlet.ServletContainer;
+import org.eclipse.jetty.util.ssl.SslContextFactory;
+
+import static org.apache.hadoop.fs.CommonConfigurationKeys.DEFAULT_HADOOP_HTTP_STATIC_USER;
+import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_HTTP_STATIC_USER;
 
 /**
  * Create a Jetty embedded server to answer http requests. The primary goal is
@@ -126,11 +133,13 @@ public final class HttpServer2 implements FilterContainer {
 
   protected final Server webServer;
 
-  private final List<Connector> listeners = Lists.newArrayList();
+  private final HandlerCollection handlers;
+
+  private final List<ServerConnector> listeners = Lists.newArrayList();
 
   protected final WebAppContext webAppContext;
   protected final boolean findPort;
-  protected final Map<Context, Boolean> defaultContexts =
+  private final Map<ServletContextHandler, Boolean> defaultContexts =
       new HashMap<>();
   protected final List<String> filterNames = new ArrayList<>();
   static final String STATE_DESCRIPTION_ALIVE = " - alive";
@@ -327,49 +336,59 @@ public final class HttpServer2 implements FilterContainer {
       }
 
       for (URI ep : endpoints) {
-        final Connector listener;
+        final ServerConnector connector;
         String scheme = ep.getScheme();
         if ("http".equals(scheme)) {
-          listener = HttpServer2.createDefaultChannelConnector();
+          connector =
+              HttpServer2.createDefaultChannelConnector(server.webServer);
         } else if ("https".equals(scheme)) {
-          listener = createHttpsChannelConnector();
+          connector = createHttpsChannelConnector(server.webServer);
 
         } else {
           throw new HadoopIllegalArgumentException(
               "unknown scheme for endpoint:" + ep);
         }
-        listener.setHost(ep.getHost());
-        listener.setPort(ep.getPort() == -1 ? 0 : ep.getPort());
-        server.addListener(listener);
+        connector.setHost(ep.getHost());
+        connector.setPort(ep.getPort() == -1 ? 0 : ep.getPort());
+        server.addListener(connector);
       }
       server.loadListeners();
       return server;
     }
 
-    private Connector createHttpsChannelConnector() {
-      SslSelectChannelConnector c = new SslSelectChannelConnectorSecure();
-      configureChannelConnector(c);
-
-      c.setNeedClientAuth(needsClientAuth);
-      c.setKeyPassword(keyPassword);
-
+    private ServerConnector createHttpsChannelConnector(Server server) {
+      ServerConnector conn = new ServerConnector(server);
+      HttpConfiguration httpConfig = new HttpConfiguration();
+      httpConfig.setRequestHeaderSize(JettyUtils.HEADER_SIZE);
+      httpConfig.setResponseHeaderSize(JettyUtils.HEADER_SIZE);
+      httpConfig.setSecureScheme("https");
+      httpConfig.addCustomizer(new SecureRequestCustomizer());
+      ConnectionFactory connFactory = new HttpConnectionFactory(httpConfig);
+      conn.addConnectionFactory(connFactory);
+      configureChannelConnector(conn);
+
+      SslContextFactory sslContextFactory = new SslContextFactory();
+      sslContextFactory.setNeedClientAuth(needsClientAuth);
+      sslContextFactory.setKeyManagerPassword(keyPassword);
       if (keyStore != null) {
-        c.setKeystore(keyStore);
-        c.setKeystoreType(keyStoreType);
-        c.setPassword(keyStorePassword);
+        sslContextFactory.setKeyStorePath(keyStore);
+        sslContextFactory.setKeyStoreType(keyStoreType);
+        sslContextFactory.setKeyStorePassword(keyStorePassword);
       }
-
       if (trustStore != null) {
-        c.setTruststore(trustStore);
-        c.setTruststoreType(trustStoreType);
-        c.setTrustPassword(trustStorePassword);
+        sslContextFactory.setTrustStorePath(trustStore);
+        sslContextFactory.setTrustStoreType(trustStoreType);
+        sslContextFactory.setTrustStorePassword(trustStorePassword);
       }
-
       if(null != excludeCiphers && !excludeCiphers.isEmpty()) {
-        c.setExcludeCipherSuites(excludeCiphers.split(","));
+        sslContextFactory.setExcludeCipherSuites(excludeCiphers.split(","));
         LOG.info("Excluded Cipher List:" + excludeCiphers);
       }
-      return c;
+
+      conn.addFirstConnectionFactory(new SslConnectionFactory(sslContextFactory,
+          HttpVersion.HTTP_1_1.asString()));
+
+      return conn;
     }
   }
 
@@ -377,6 +396,7 @@ public final class HttpServer2 implements FilterContainer {
     final String appDir = getWebAppsPath(b.name);
     this.webServer = new Server();
     this.adminsAcl = b.adminsAcl;
+    this.handlers = new HandlerCollection();
     this.webAppContext = createWebAppContext(b.name, b.conf, adminsAcl, appDir);
     this.xFrameOptionIsEnabled = b.xFrameEnabled;
     this.xFrameOption = b.xFrameOption;
@@ -406,36 +426,33 @@ public final class HttpServer2 implements FilterContainer {
     int maxThreads = conf.getInt(HTTP_MAX_THREADS, -1);
     // If HTTP_MAX_THREADS is not configured, QueueThreadPool() will use the
     // default value (currently 250).
-    QueuedThreadPool threadPool = maxThreads == -1 ? new QueuedThreadPool()
-        : new QueuedThreadPool(maxThreads);
+
+    QueuedThreadPool threadPool = (QueuedThreadPool) webServer.getThreadPool();
     threadPool.setDaemon(true);
-    webServer.setThreadPool(threadPool);
+    if (maxThreads != -1) {
+      threadPool.setMaxThreads(maxThreads);
+    }
 
     SessionManager sm = webAppContext.getSessionHandler().getSessionManager();
     if (sm instanceof AbstractSessionManager) {
       AbstractSessionManager asm = (AbstractSessionManager)sm;
       asm.setHttpOnly(true);
-      asm.setSecureCookies(true);
+      asm.getSessionCookieConfig().setSecure(true);
     }
 
     ContextHandlerCollection contexts = new ContextHandlerCollection();
     RequestLog requestLog = HttpRequestLog.getRequestLog(name);
 
+    handlers.addHandler(contexts);
     if (requestLog != null) {
       RequestLogHandler requestLogHandler = new RequestLogHandler();
       requestLogHandler.setRequestLog(requestLog);
-      HandlerCollection handlers = new HandlerCollection();
-      handlers.setHandlers(new Handler[] {contexts, requestLogHandler});
-      webServer.setHandler(handlers);
-    } else {
-      webServer.setHandler(contexts);
+      handlers.addHandler(requestLogHandler);
     }
-
+    handlers.addHandler(webAppContext);
     final String appDir = getWebAppsPath(name);
-
-    webServer.addHandler(webAppContext);
-
     addDefaultApps(contexts, appDir, conf);
+    webServer.setHandler(handlers);
 
     Map<String, String> xFrameParams = new HashMap<>();
     xFrameParams.put(X_FRAME_ENABLED,
@@ -461,7 +478,7 @@ public final class HttpServer2 implements FilterContainer {
     }
   }
 
-  private void addListener(Connector connector) {
+  private void addListener(ServerConnector connector) {
     listeners.add(connector);
   }
 
@@ -507,16 +524,14 @@ public final class HttpServer2 implements FilterContainer {
     return prop;
   }
 
-  private static void addNoCacheFilter(WebAppContext ctxt) {
+  private static void addNoCacheFilter(ServletContextHandler ctxt) {
     defineFilter(ctxt, NO_CACHE_FILTER, NoCacheFilter.class.getName(),
                  Collections.<String, String> emptyMap(), new String[] { "/*" });
   }
 
-  private static void configureChannelConnector(SelectChannelConnector c) {
-    c.setLowResourceMaxIdleTime(10000);
+  private static void configureChannelConnector(ServerConnector c) {
+    c.setIdleTimeout(10000);
     c.setAcceptQueueSize(128);
-    c.setResolveNames(false);
-    c.setUseDirectBuffers(false);
     if(Shell.WINDOWS) {
       // result of setting the SO_REUSEADDR flag is different on Windows
       // http://msdn.microsoft.com/en-us/library/ms740621(v=vs.85).aspx
@@ -524,14 +539,18 @@ public final class HttpServer2 implements FilterContainer {
       // the same port with indeterminate routing of incoming requests to them
       c.setReuseAddress(false);
     }
-    c.setHeaderBufferSize(1024*64);
   }
 
   @InterfaceAudience.Private
-  public static Connector createDefaultChannelConnector() {
-    SelectChannelConnector ret = new SelectChannelConnector();
-    configureChannelConnector(ret);
-    return ret;
+  public static ServerConnector createDefaultChannelConnector(Server server) {
+    ServerConnector conn = new ServerConnector(server);
+    HttpConfiguration httpConfig = new HttpConfiguration();
+    httpConfig.setRequestHeaderSize(JettyUtils.HEADER_SIZE);
+    httpConfig.setResponseHeaderSize(JettyUtils.HEADER_SIZE);
+    ConnectionFactory connFactory = new HttpConnectionFactory(httpConfig);
+    conn.addConnectionFactory(connFactory);
+    configureChannelConnector(conn);
+    return conn;
   }
 
   /** Get an array of FilterConfiguration specified in the conf */
@@ -567,7 +586,8 @@ public final class HttpServer2 implements FilterContainer {
         CommonConfigurationKeys.HADOOP_HTTP_LOGS_ENABLED,
         CommonConfigurationKeys.HADOOP_HTTP_LOGS_ENABLED_DEFAULT);
     if (logDir != null && logsEnabled) {
-      Context logContext = new Context(parent, "/logs");
+      ServletContextHandler logContext =
+          new ServletContextHandler(parent, "/logs");
       logContext.setResourceBase(logDir);
       logContext.addServlet(AdminAuthorizedServlet.class, "/*");
       if (conf.getBoolean(
@@ -575,8 +595,7 @@ public final class HttpServer2 implements FilterContainer {
           CommonConfigurationKeys.DEFAULT_HADOOP_JETTY_LOGS_SERVE_ALIASES)) {
         @SuppressWarnings("unchecked")
         Map<String, String> params = logContext.getInitParams();
-        params.put(
-            "org.mortbay.jetty.servlet.Default.aliases", "true");
+        params.put("org.eclipse.jetty.servlet.Default.aliases", "true");
       }
       logContext.setDisplayName("logs");
       SessionHandler handler = new SessionHandler();
@@ -584,34 +603,37 @@ public final class HttpServer2 implements FilterContainer {
       if (sm instanceof AbstractSessionManager) {
         AbstractSessionManager asm = (AbstractSessionManager) sm;
         asm.setHttpOnly(true);
-        asm.setSecureCookies(true);
+        asm.getSessionCookieConfig().setSecure(true);
       }
       logContext.setSessionHandler(handler);
       setContextAttributes(logContext, conf);
-      addNoCacheFilter(webAppContext);
+      addNoCacheFilter(logContext);
       defaultContexts.put(logContext, true);
     }
     // set up the context for "/static/*"
-    Context staticContext = new Context(parent, "/static");
+    ServletContextHandler staticContext =
+        new ServletContextHandler(parent, "/static");
     staticContext.setResourceBase(appDir + "/static");
     staticContext.addServlet(DefaultServlet.class, "/*");
     staticContext.setDisplayName("static");
     @SuppressWarnings("unchecked")
     Map<String, String> params = staticContext.getInitParams();
-    params.put("org.mortbay.jetty.servlet.Default.dirAllowed", "false");
+    params.put("org.eclipse.jetty.servlet.Default.dirAllowed", "false");
+    params.put("org.eclipse.jetty.servlet.Default.gzip", "true");
     SessionHandler handler = new SessionHandler();
     SessionManager sm = handler.getSessionManager();
     if (sm instanceof AbstractSessionManager) {
       AbstractSessionManager asm = (AbstractSessionManager) sm;
       asm.setHttpOnly(true);
-      asm.setSecureCookies(true);
+      asm.getSessionCookieConfig().setSecure(true);
     }
     staticContext.setSessionHandler(handler);
     setContextAttributes(staticContext, conf);
     defaultContexts.put(staticContext, true);
   }
 
-  private void setContextAttributes(Context context, Configuration conf) {
+  private void setContextAttributes(ServletContextHandler context,
+                                    Configuration conf) {
     context.getServletContext().setAttribute(CONF_CONTEXT_ATTRIBUTE, conf);
     context.getServletContext().setAttribute(ADMINS_ACL, adminsAcl);
   }
@@ -627,9 +649,9 @@ public final class HttpServer2 implements FilterContainer {
     addServlet("conf", "/conf", ConfServlet.class);
   }
 
-  public void addContext(Context ctxt, boolean isFiltered) {
-    webServer.addHandler(ctxt);
-    addNoCacheFilter(webAppContext);
+  public void addContext(ServletContextHandler ctxt, boolean isFiltered) {
+    handlers.addHandler(ctxt);
+    addNoCacheFilter(ctxt);
     defaultContexts.put(ctxt, isFiltered);
   }
 
@@ -691,7 +713,7 @@ public final class HttpServer2 implements FilterContainer {
    * protect with Kerberos authentication.
    * Note: This method is to be used for adding servlets that facilitate
    * internal communication and not for user facing functionality. For
-   +   * servlets added using this method, filters (except internal Kerberos
+   * servlets added using this method, filters (except internal Kerberos
    * filters) are not enabled.
    *
    * @param name The name of the servlet (can be passed as null)
@@ -705,19 +727,58 @@ public final class HttpServer2 implements FilterContainer {
     if (name != null) {
       holder.setName(name);
     }
+    // Jetty doesn't like the same path spec mapping to different servlets, so
+    // if there's already a mapping for this pathSpec, remove it and assume that
+    // the newest one is the one we want
+    final ServletMapping[] servletMappings =
+        webAppContext.getServletHandler().getServletMappings();
+    for (int i = 0; i < servletMappings.length; i++) {
+      if (servletMappings[i].containsPathSpec(pathSpec)) {
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Found existing " + servletMappings[i].getServletName() +
+              " servlet at path " + pathSpec + "; will replace mapping" +
+              " with " + holder.getName() + " servlet");
+        }
+        ServletMapping[] newServletMappings =
+            ArrayUtil.removeFromArray(servletMappings, servletMappings[i]);
+        webAppContext.getServletHandler()
+            .setServletMappings(newServletMappings);
+        break;
+      }
+    }
     webAppContext.addServlet(holder, pathSpec);
 
     if(requireAuth && UserGroupInformation.isSecurityEnabled()) {
-       LOG.info("Adding Kerberos (SPNEGO) filter to " + name);
-       ServletHandler handler = webAppContext.getServletHandler();
-       FilterMapping fmap = new FilterMapping();
-       fmap.setPathSpec(pathSpec);
-       fmap.setFilterName(SPNEGO_FILTER);
-       fmap.setDispatches(Handler.ALL);
-       handler.addFilterMapping(fmap);
+      LOG.info("Adding Kerberos (SPNEGO) filter to " + name);
+      ServletHandler handler = webAppContext.getServletHandler();
+      FilterMapping fmap = new FilterMapping();
+      fmap.setPathSpec(pathSpec);
+      fmap.setFilterName(SPNEGO_FILTER);
+      fmap.setDispatches(FilterMapping.ALL);
+      handler.addFilterMapping(fmap);
     }
   }
 
+  /**
+   * Add the given handler to the front of the list of handlers.
+   *
+   * @param handler The handler to add
+   */
+  public void addHandlerAtFront(Handler handler) {
+    Handler[] h = ArrayUtil.prependToArray(
+        handler, this.handlers.getHandlers(), Handler.class);
+    handlers.setHandlers(h);
+  }
+
+  /**
+   * Add the given handler to the end of the list of handlers.
+   *
+   * @param handler The handler to add
+   */
+  public void addHandlerAtEnd(Handler handler) {
+    handlers.addHandler(handler);
+  }
+
   @Override
   public void addFilter(String name, String classname,
       Map<String, String> parameters) {
@@ -727,12 +788,14 @@ public final class HttpServer2 implements FilterContainer {
     FilterMapping fmap = getFilterMapping(name, USER_FACING_URLS);
     defineFilter(webAppContext, filterHolder, fmap);
     LOG.info(
-        "Added filter " + name + " (class=" + classname + ") to context " + webAppContext.getDisplayName());
+        "Added filter " + name + " (class=" + classname + ") to context "
+            + webAppContext.getDisplayName());
     final String[] ALL_URLS = { "/*" };
     fmap = getFilterMapping(name, ALL_URLS);
-    for (Map.Entry<Context, Boolean> e : defaultContexts.entrySet()) {
+    for (Map.Entry<ServletContextHandler, Boolean> e
+        : defaultContexts.entrySet()) {
       if (e.getValue()) {
-        Context ctx = e.getKey();
+        ServletContextHandler ctx = e.getKey();
         defineFilter(ctx, filterHolder, fmap);
         LOG.info("Added filter " + name + " (class=" + classname
             + ") to context " + ctx.getDisplayName());
@@ -748,7 +811,7 @@ public final class HttpServer2 implements FilterContainer {
     FilterHolder filterHolder = getFilterHolder(name, classname, parameters);
     FilterMapping fmap = getFilterMapping(name, ALL_URLS);
     defineFilter(webAppContext, filterHolder, fmap);
-    for (Context ctx : defaultContexts.keySet()) {
+    for (ServletContextHandler ctx : defaultContexts.keySet()) {
       defineFilter(ctx, filterHolder, fmap);
     }
     LOG.info("Added global filter '" + name + "' (class=" + classname + ")");
@@ -757,7 +820,7 @@ public final class HttpServer2 implements FilterContainer {
   /**
    * Define a filter for a context and set up default url mappings.
    */
-  public static void defineFilter(Context ctx, String name,
+  public static void defineFilter(ServletContextHandler ctx, String name,
       String classname, Map<String,String> parameters, String[] urls) {
     FilterHolder filterHolder = getFilterHolder(name, classname, parameters);
     FilterMapping fmap = getFilterMapping(name, urls);
@@ -767,8 +830,8 @@ public final class HttpServer2 implements FilterContainer {
   /**
    * Define a filter for a context and set up default url mappings.
    */
-  private static void defineFilter(Context ctx, FilterHolder holder,
-      FilterMapping fmap) {
+  private static void defineFilter(ServletContextHandler ctx,
+                                   FilterHolder holder, FilterMapping fmap) {
     ServletHandler handler = ctx.getServletHandler();
     handler.addFilter(holder, fmap);
   }
@@ -776,7 +839,7 @@ public final class HttpServer2 implements FilterContainer {
   private static FilterMapping getFilterMapping(String name, String[] urls) {
     FilterMapping fmap = new FilterMapping();
     fmap.setPathSpecs(urls);
-    fmap.setDispatches(Handler.ALL);
+    fmap.setDispatches(FilterMapping.ALL);
     fmap.setFilterName(name);
     return fmap;
   }
@@ -786,7 +849,9 @@ public final class HttpServer2 implements FilterContainer {
     FilterHolder holder = new FilterHolder();
     holder.setName(name);
     holder.setClassName(classname);
-    holder.setInitParameters(parameters);
+    if (parameters != null) {
+      holder.setInitParameters(parameters);
+    }
     return holder;
   }
 
@@ -796,13 +861,13 @@ public final class HttpServer2 implements FilterContainer {
    * @param webAppCtx The WebApplicationContext to add to
    */
   protected void addFilterPathMapping(String pathSpec,
-      Context webAppCtx) {
+      ServletContextHandler webAppCtx) {
     ServletHandler handler = webAppCtx.getServletHandler();
     for(String name : filterNames) {
       FilterMapping fmap = new FilterMapping();
       fmap.setPathSpec(pathSpec);
       fmap.setFilterName(name);
-      fmap.setDispatches(Handler.ALL);
+      fmap.setDispatches(FilterMapping.ALL);
       handler.addFilterMapping(fmap);
     }
   }
@@ -841,23 +906,23 @@ public final class HttpServer2 implements FilterContainer {
    */
   @Deprecated
   public int getPort() {
-    return webServer.getConnectors()[0].getLocalPort();
+    return ((ServerConnector)webServer.getConnectors()[0]).getLocalPort();
   }
 
   /**
    * Get the address that corresponds to a particular connector.
    *
    * @return the corresponding address for the connector, or null if there's no
-   *         such connector or the connector is not bounded.
+   *         such connector or the connector is not bounded or was closed.
    */
   public InetSocketAddress getConnectorAddress(int index) {
     Preconditions.checkArgument(index >= 0);
     if (index > webServer.getConnectors().length)
       return null;
 
-    Connector c = webServer.getConnectors()[index];
-    if (c.getLocalPort() == -1) {
-      // The connector is not bounded
+    ServerConnector c = (ServerConnector)webServer.getConnectors()[index];
+    if (c.getLocalPort() == -1 || c.getLocalPort() == -2) {
+      // The connector is not bounded or was closed
       return null;
     }
 
@@ -907,8 +972,8 @@ public final class HttpServer2 implements FilterContainer {
         throw ex;
       }
       // Make sure there is no handler failures.
-      Handler[] handlers = webServer.getHandlers();
-      for (Handler handler : handlers) {
+      Handler[] hs = webServer.getHandlers();
+      for (Handler handler : hs) {
         if (handler.isFailed()) {
           throw new IOException(
               "Problem in starting http server. Server handlers failed");
@@ -944,9 +1009,10 @@ public final class HttpServer2 implements FilterContainer {
    * @throws Exception
    */
   void openListeners() throws Exception {
-    for (Connector listener : listeners) {
-      if (listener.getLocalPort() != -1) {
-        // This listener is either started externally or has been bound
+    for (ServerConnector listener : listeners) {
+      if (listener.getLocalPort() != -1 && listener.getLocalPort() != -2) {
+        // This listener is either started externally or has been bound or was
+        // closed
         continue;
       }
       int port = listener.getPort();
@@ -978,7 +1044,7 @@ public final class HttpServer2 implements FilterContainer {
    */
   public void stop() throws Exception {
     MultiException exception = null;
-    for (Connector c : listeners) {
+    for (ServerConnector c : listeners) {
       try {
         c.close();
       } catch (Exception e) {
@@ -1042,12 +1108,30 @@ public final class HttpServer2 implements FilterContainer {
         .append(isAlive() ? STATE_DESCRIPTION_ALIVE
                     : STATE_DESCRIPTION_NOT_LIVE)
         .append("), listening at:");
-    for (Connector l : listeners) {
+    for (ServerConnector l : listeners) {
       sb.append(l.getHost()).append(":").append(l.getPort()).append("/,");
     }
     return sb.toString();
   }
 
+  /**
+   * check whether user is static and unauthenticated, if the
+   * answer is TRUE, that means http sever is in non-security
+   * environment.
+   * @param servletContext the servlet context.
+   * @param request the servlet request.
+   * @return TRUE/FALSE based on the logic described above.
+   */
+  public static boolean isStaticUserAndNoneAuthType(
+      ServletContext servletContext, HttpServletRequest request) {
+    Configuration conf =
+        (Configuration) servletContext.getAttribute(CONF_CONTEXT_ATTRIBUTE);
+    final String authType = request.getAuthType();
+    final String staticUser = conf.get(HADOOP_HTTP_STATIC_USER,
+        DEFAULT_HADOOP_HTTP_STATIC_USER);
+    return authType == null && staticUser.equals(request.getRemoteUser());
+  }
+
   /**
    * Checks the user has privileges to access to instrumentation servlets.
    * <p/>
@@ -1145,9 +1229,14 @@ public final class HttpServer2 implements FilterContainer {
 
     @Override
     public void doGet(HttpServletRequest request, HttpServletResponse response)
-      throws ServletException, IOException {
-      if (!HttpServer2.isInstrumentationAccessAllowed(getServletContext(),
-                                                      request, response)) {
+        throws ServletException, IOException {
+      // If user is a static user and auth Type is null, that means
+      // there is a non-security environment and no need authorization,
+      // otherwise, do the authorization.
+      final ServletContext servletContext = getServletContext();
+      if (!HttpServer2.isStaticUserAndNoneAuthType(servletContext, request) &&
+          !HttpServer2.isInstrumentationAccessAllowed(servletContext,
+              request, response)) {
         return;
       }
       response.setContentType("text/plain; charset=UTF-8");
@@ -1300,10 +1389,10 @@ public final class HttpServer2 implements FilterContainer {
      */
     private String inferMimeType(ServletRequest request) {
       String path = ((HttpServletRequest)request).getRequestURI();
-      ContextHandler.SContext sContext = (ContextHandler.SContext)config.getServletContext();
-      MimeTypes mimes = sContext.getContextHandler().getMimeTypes();
-      Buffer mimeBuffer = mimes.getMimeByExtension(path);
-      return (mimeBuffer == null) ? null : mimeBuffer.toString();
+      ServletContextHandler.Context sContext =
+          (ServletContextHandler.Context)config.getServletContext();
+      String mime = sContext.getMimeType(path);
+      return (mime == null) ? null : mime;
     }
 
   }

+ 35 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/JettyUtils.java

@@ -0,0 +1,35 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.http;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * Contains utility methods and constants relating to Jetty.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public final class JettyUtils {
+  public static final String UTF_8 = "charset=utf-8";
+  public static final int HEADER_SIZE = 1024 * 64;
+
+  private JettyUtils() {
+  }
+}

+ 59 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/NativeXORRawDecoder.java

@@ -0,0 +1,59 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.io.erasurecode.rawcoder;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.io.erasurecode.ErasureCodeNative;
+import org.apache.hadoop.io.erasurecode.ErasureCoderOptions;
+
+import java.nio.ByteBuffer;
+
+/**
+ * A XOR raw decoder using Intel ISA-L library.
+ */
+@InterfaceAudience.Private
+public class NativeXORRawDecoder extends AbstractNativeRawDecoder {
+
+  static {
+    ErasureCodeNative.checkNativeCodeLoaded();
+  }
+
+  public NativeXORRawDecoder(ErasureCoderOptions coderOptions) {
+    super(coderOptions);
+    initImpl(coderOptions.getNumDataUnits(), coderOptions.getNumParityUnits());
+  }
+
+  @Override
+  protected void performDecodeImpl(ByteBuffer[] inputs, int[] inputOffsets,
+      int dataLen, int[] erased, ByteBuffer[] outputs, int[] outputOffsets) {
+    decodeImpl(inputs, inputOffsets, dataLen, erased, outputs, outputOffsets);
+  }
+
+  @Override
+  public void release() {
+    destroyImpl();
+  }
+
+  private native void initImpl(int numDataUnits, int numParityUnits);
+
+  private native void decodeImpl(
+      ByteBuffer[] inputs, int[] inputOffsets, int dataLen, int[] erased,
+      ByteBuffer[] outputs, int[] outputOffsets);
+
+  private native void destroyImpl();
+}

+ 60 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/NativeXORRawEncoder.java

@@ -0,0 +1,60 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.io.erasurecode.rawcoder;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.io.erasurecode.ErasureCodeNative;
+import org.apache.hadoop.io.erasurecode.ErasureCoderOptions;
+
+import java.nio.ByteBuffer;
+
+/**
+ * A XOR raw encoder using Intel ISA-L library.
+ */
+@InterfaceAudience.Private
+public class NativeXORRawEncoder extends AbstractNativeRawEncoder {
+
+  static {
+    ErasureCodeNative.checkNativeCodeLoaded();
+  }
+
+  public NativeXORRawEncoder(ErasureCoderOptions coderOptions) {
+    super(coderOptions);
+    initImpl(coderOptions.getNumDataUnits(), coderOptions.getNumParityUnits());
+  }
+
+  @Override
+  protected void performEncodeImpl(
+      ByteBuffer[] inputs, int[] inputOffsets, int dataLen,
+      ByteBuffer[] outputs, int[] outputOffsets) {
+    encodeImpl(inputs, inputOffsets, dataLen, outputs, outputOffsets);
+  }
+
+  @Override
+  public void release() {
+    destroyImpl();
+  }
+
+  private native void initImpl(int numDataUnits, int numParityUnits);
+
+  private native void encodeImpl(ByteBuffer[] inputs, int[] inputOffsets,
+                                 int dataLen, ByteBuffer[] outputs,
+                                 int[] outputOffsets);
+
+  private native void destroyImpl();
+}

+ 39 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/NativeXORRawErasureCoderFactory.java

@@ -0,0 +1,39 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.io.erasurecode.rawcoder;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.io.erasurecode.ErasureCoderOptions;
+
+/**
+ * A raw coder factory for xor coder in native using Intel ISA-L library.
+ */
+
+@InterfaceAudience.Private
+public class NativeXORRawErasureCoderFactory implements RawErasureCoderFactory {
+
+  @Override
+  public RawErasureEncoder createEncoder(ErasureCoderOptions coderOptions) {
+    return new NativeXORRawEncoder(coderOptions);
+  }
+
+  @Override
+  public RawErasureDecoder createDecoder(ErasureCoderOptions coderOptions) {
+    return new NativeXORRawDecoder(coderOptions);
+  }
+}

+ 2 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/RpcDetailedMetrics.java

@@ -24,7 +24,7 @@ import org.apache.hadoop.metrics2.annotation.Metric;
 import org.apache.hadoop.metrics2.annotation.Metrics;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.metrics2.lib.MetricsRegistry;
-import org.apache.hadoop.metrics2.lib.MutableRates;
+import org.apache.hadoop.metrics2.lib.MutableRatesWithAggregation;
 
 /**
  * This class is for maintaining RPC method related statistics
@@ -34,7 +34,7 @@ import org.apache.hadoop.metrics2.lib.MutableRates;
 @Metrics(about="Per method RPC metrics", context="rpcdetailed")
 public class RpcDetailedMetrics {
 
-  @Metric MutableRates rates;
+  @Metric MutableRatesWithAggregation rates;
 
   static final Log LOG = LogFactory.getLog(RpcDetailedMetrics.class);
   final MetricsRegistry registry;

+ 7 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/jmx/JMXJsonServlet.java

@@ -38,6 +38,7 @@ import javax.management.RuntimeMBeanException;
 import javax.management.openmbean.CompositeData;
 import javax.management.openmbean.CompositeType;
 import javax.management.openmbean.TabularData;
+import javax.servlet.ServletContext;
 import javax.servlet.ServletException;
 import javax.servlet.http.HttpServlet;
 import javax.servlet.http.HttpServletRequest;
@@ -168,7 +169,12 @@ public class JMXJsonServlet extends HttpServlet {
   @Override
   public void doGet(HttpServletRequest request, HttpServletResponse response) {
     try {
-      if (!isInstrumentationAccessAllowed(request, response)) {
+      // If user is a static user and auth Type is null, that means
+      // there is a non-security environment and no need authorization,
+      // otherwise, do the authorization.
+      final ServletContext servletContext = getServletContext();
+      if (!HttpServer2.isStaticUserAndNoneAuthType(servletContext, request) &&
+          !isInstrumentationAccessAllowed(request, response)) {
         return;
       }
       JsonGenerator jg = null;

+ 8 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java

@@ -27,6 +27,7 @@ import java.util.regex.Pattern;
 
 import javax.net.ssl.HttpsURLConnection;
 import javax.net.ssl.SSLSocketFactory;
+import javax.servlet.ServletContext;
 import javax.servlet.ServletException;
 import javax.servlet.http.HttpServlet;
 import javax.servlet.http.HttpServletRequest;
@@ -323,9 +324,13 @@ public class LogLevel {
     public void doGet(HttpServletRequest request, HttpServletResponse response
         ) throws ServletException, IOException {
 
-      // Do the authorization
-      if (!HttpServer2.hasAdministratorAccess(getServletContext(), request,
-          response)) {
+      // If user is a static user and auth Type is null, that means
+      // there is a non-security environment and no need authorization,
+      // otherwise, do the authorization.
+      final ServletContext servletContext = getServletContext();
+      if (!HttpServer2.isStaticUserAndNoneAuthType(servletContext, request) &&
+          !HttpServer2.hasAdministratorAccess(servletContext,
+              request, response)) {
         return;
       }
 

+ 5 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableMetricsFactory.java

@@ -67,6 +67,11 @@ public class MutableMetricsFactory {
     if (cls == MutableRates.class) {
       return new MutableRates(registry);
     }
+    if (cls == MutableRatesWithAggregation.class) {
+      MutableRatesWithAggregation rates = new MutableRatesWithAggregation();
+      registry.add(info.name(), rates);
+      return rates;
+    }
     if (cls == MutableStat.class) {
       return registry.newStat(info.name(), info.description(),
                               annotation.sampleName(), annotation.valueName(),

+ 6 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRates.java

@@ -33,6 +33,12 @@ import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 
 /**
  * Helper class to manage a group of mutable rate metrics
+ *
+ * This class synchronizes all accesses to the metrics it
+ * contains, so it should not be used in situations where
+ * there is high contention on the metrics.
+ * {@link MutableRatesWithAggregation} is preferable in that
+ * situation.
  */
 @InterfaceAudience.Public
 @InterfaceStability.Evolving

+ 148 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRatesWithAggregation.java

@@ -0,0 +1,148 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.metrics2.lib;
+
+import com.google.common.collect.Sets;
+import java.lang.ref.WeakReference;
+import java.lang.reflect.Method;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentLinkedDeque;
+import java.util.concurrent.ConcurrentMap;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
+import org.apache.hadoop.metrics2.util.SampleStat;
+
+
+/**
+ * Helper class to manage a group of mutable rate metrics.
+ *
+ * Each thread will maintain a local rate count, and upon snapshot,
+ * these values will be aggregated into a global rate. This class
+ * should only be used for long running threads, as any metrics
+ * produced between the last snapshot and the death of a thread
+ * will be lost. This allows for significantly higher concurrency
+ * than {@link MutableRates}. See HADOOP-24420.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class MutableRatesWithAggregation extends MutableMetric {
+  static final Log LOG = LogFactory.getLog(MutableRatesWithAggregation.class);
+  private final Map<String, MutableRate> globalMetrics = new HashMap<>();
+  private final Set<Class<?>> protocolCache = Sets.newHashSet();
+
+  private final ConcurrentLinkedDeque<WeakReference<ConcurrentMap<String, ThreadSafeSampleStat>>>
+      weakReferenceQueue = new ConcurrentLinkedDeque<>();
+  private final ThreadLocal<ConcurrentMap<String, ThreadSafeSampleStat>>
+      threadLocalMetricsMap = new ThreadLocal<>();
+
+  /**
+   * Initialize the registry with all the methods in a protocol
+   * so they all show up in the first snapshot.
+   * Convenient for JMX implementations.
+   * @param protocol the protocol class
+   */
+  public void init(Class<?> protocol) {
+    if (protocolCache.contains(protocol)) {
+      return;
+    }
+    protocolCache.add(protocol);
+    for (Method method : protocol.getDeclaredMethods()) {
+      String name = method.getName();
+      LOG.debug(name);
+      addMetricIfNotExists(name);
+    }
+  }
+
+  /**
+   * Add a rate sample for a rate metric.
+   * @param name of the rate metric
+   * @param elapsed time
+   */
+  public void add(String name, long elapsed) {
+    ConcurrentMap<String, ThreadSafeSampleStat> localStats =
+        threadLocalMetricsMap.get();
+    if (localStats == null) {
+      localStats = new ConcurrentHashMap<>();
+      threadLocalMetricsMap.set(localStats);
+      weakReferenceQueue.add(new WeakReference<>(localStats));
+    }
+    ThreadSafeSampleStat stat = localStats.get(name);
+    if (stat == null) {
+      stat = new ThreadSafeSampleStat();
+      localStats.put(name, stat);
+    }
+    stat.add(elapsed);
+  }
+
+  @Override
+  public synchronized void snapshot(MetricsRecordBuilder rb, boolean all) {
+    Iterator<WeakReference<ConcurrentMap<String, ThreadSafeSampleStat>>> iter =
+        weakReferenceQueue.iterator();
+    while (iter.hasNext()) {
+      ConcurrentMap<String, ThreadSafeSampleStat> map = iter.next().get();
+      if (map == null) {
+        // Thread has died; clean up its state
+        iter.remove();
+      } else {
+        // Aggregate the thread's local samples into the global metrics
+        for (Map.Entry<String, ThreadSafeSampleStat> entry : map.entrySet()) {
+          String name = entry.getKey();
+          MutableRate globalMetric = addMetricIfNotExists(name);
+          entry.getValue().snapshotInto(globalMetric);
+        }
+      }
+    }
+    for (MutableRate globalMetric : globalMetrics.values()) {
+      globalMetric.snapshot(rb, all);
+    }
+  }
+
+  private synchronized MutableRate addMetricIfNotExists(String name) {
+    MutableRate metric = globalMetrics.get(name);
+    if (metric == null) {
+      metric = new MutableRate(name, name, false);
+      globalMetrics.put(name, metric);
+    }
+    return metric;
+  }
+
+  private static class ThreadSafeSampleStat {
+
+    private SampleStat stat = new SampleStat();
+
+    synchronized void add(double x) {
+      stat.add(x);
+    }
+
+    synchronized void snapshotInto(MutableRate metric) {
+      if (stat.numSamples() > 0) {
+        metric.add(stat.numSamples(), Math.round(stat.total()));
+        stat.reset();
+      }
+    }
+  }
+
+}

+ 4 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableStat.java

@@ -102,6 +102,10 @@ public class MutableStat extends MutableMetric {
 
   /**
    * Add a number of samples and their sum to the running stat
+   *
+   * Note that although use of this method will preserve accurate mean values,
+   * large values for numSamples may result in inaccurate variance values due
+   * to the use of a single step of the Welford method for variance calculation.
    * @param numSamples  number of samples
    * @param sum of the samples
    */

+ 15 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/SampleStat.java

@@ -27,29 +27,32 @@ import org.apache.hadoop.classification.InterfaceAudience;
 public class SampleStat {
   private final MinMax minmax = new MinMax();
   private long numSamples = 0;
-  private double a0, a1, s0, s1;
+  private double a0, a1, s0, s1, total;
 
   /**
    * Construct a new running sample stat
    */
   public SampleStat() {
     a0 = s0 = 0.0;
+    total = 0.0;
   }
 
   public void reset() {
     numSamples = 0;
     a0 = s0 = 0.0;
+    total = 0.0;
     minmax.reset();
   }
 
   // We want to reuse the object, sometimes.
   void reset(long numSamples, double a0, double a1, double s0, double s1,
-             MinMax minmax) {
+      double total, MinMax minmax) {
     this.numSamples = numSamples;
     this.a0 = a0;
     this.a1 = a1;
     this.s0 = s0;
     this.s1 = s1;
+    this.total = total;
     this.minmax.reset(minmax);
   }
 
@@ -58,7 +61,7 @@ public class SampleStat {
    * @param other the destination to hold our values
    */
   public void copyTo(SampleStat other) {
-    other.reset(numSamples, a0, a1, s0, s1, minmax);
+    other.reset(numSamples, a0, a1, s0, s1, total, minmax);
   }
 
   /**
@@ -80,6 +83,7 @@ public class SampleStat {
    */
   public SampleStat add(long nSamples, double x) {
     numSamples += nSamples;
+    total += x;
 
     if (numSamples == 1) {
       a0 = a1 = x;
@@ -102,11 +106,18 @@ public class SampleStat {
     return numSamples;
   }
 
+  /**
+   * @return the total of all samples added
+   */
+  public double total() {
+    return total;
+  }
+
   /**
    * @return  the arithmetic mean of the samples
    */
   public double mean() {
-    return numSamples > 0 ? a1 : 0.0;
+    return numSamples > 0 ? (total / numSamples) : 0.0;
   }
 
   /**

+ 4 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslInputStream.java

@@ -246,6 +246,9 @@ public class SaslInputStream extends InputStream implements ReadableByteChannel
    */
   @Override
   public int read(byte[] b, int off, int len) throws IOException {
+    if (len == 0) {
+      return 0;
+    }
     if (!useWrap) {
       return inStream.read(b, off, len);
     }
@@ -378,4 +381,4 @@ public class SaslInputStream extends InputStream implements ReadableByteChannel
     }
     return bytesRead;
   }
-}
+}

+ 3 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcClient.java

@@ -569,6 +569,9 @@ public class SaslRpcClient {
 
     @Override
     public synchronized int read(byte[] buf, int off, int len) throws IOException {
+      if (len == 0) {
+        return 0;
+      }
       // fill the buffer with the next RPC message
       if (unwrappedRpcBuffer.remaining() == 0) {
         readNextRpcPacket();

+ 80 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java

@@ -43,6 +43,7 @@ import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
+import java.util.concurrent.TimeUnit;
 
 import javax.security.auth.Subject;
 import javax.security.auth.callback.CallbackHandler;
@@ -54,14 +55,18 @@ import javax.security.auth.login.LoginContext;
 import javax.security.auth.login.LoginException;
 import javax.security.auth.spi.LoginModule;
 
+import org.apache.hadoop.io.retry.RetryPolicies;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.retry.RetryPolicy;
 import org.apache.hadoop.metrics2.annotation.Metric;
 import org.apache.hadoop.metrics2.annotation.Metrics;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.metrics2.lib.MetricsRegistry;
+import org.apache.hadoop.metrics2.lib.MutableGaugeInt;
+import org.apache.hadoop.metrics2.lib.MutableGaugeLong;
 import org.apache.hadoop.metrics2.lib.MutableQuantiles;
 import org.apache.hadoop.metrics2.lib.MutableRate;
 import org.apache.hadoop.security.SaslRpcServer.AuthMethod;
@@ -85,7 +90,8 @@ import org.slf4j.LoggerFactory;
 @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce", "HBase", "Hive", "Oozie"})
 @InterfaceStability.Evolving
 public class UserGroupInformation {
-  private static final Logger LOG = LoggerFactory.getLogger(
+  @VisibleForTesting
+  static final Logger LOG = LoggerFactory.getLogger(
       UserGroupInformation.class);
 
   /**
@@ -121,6 +127,10 @@ public class UserGroupInformation {
     MutableRate loginFailure;
     @Metric("GetGroups") MutableRate getGroups;
     MutableQuantiles[] getGroupsQuantiles;
+    @Metric("Renewal failures since startup")
+    private MutableGaugeLong renewalFailuresTotal;
+    @Metric("Renewal failures since last successful login")
+    private MutableGaugeInt renewalFailures;
 
     static UgiMetrics create() {
       return DefaultMetricsSystem.instance().register(new UgiMetrics());
@@ -138,6 +148,10 @@ public class UserGroupInformation {
         }
       }
     }
+
+    MutableGaugeInt getRenewalFailures() {
+      return renewalFailures;
+    }
   }
   
   /**
@@ -963,6 +977,7 @@ public class UserGroupInformation {
           return;
         }
         long nextRefresh = getRefreshTime(tgt);
+        RetryPolicy rp = null;
         while (true) {
           try {
             long now = Time.now();
@@ -986,13 +1001,40 @@ public class UserGroupInformation {
             }
             nextRefresh = Math.max(getRefreshTime(tgt),
               now + kerberosMinSecondsBeforeRelogin);
+            metrics.renewalFailures.set(0);
+            rp = null;
           } catch (InterruptedException ie) {
             LOG.warn("Terminating renewal thread");
             return;
           } catch (IOException ie) {
-            LOG.warn("Exception encountered while running the" +
-                " renewal command. Aborting renew thread. " + ie);
-            return;
+            metrics.renewalFailuresTotal.incr();
+            final long tgtEndTime = tgt.getEndTime().getTime();
+            LOG.warn("Exception encountered while running the renewal "
+                    + "command for {}. (TGT end time:{}, renewalFailures: {},"
+                    + "renewalFailuresTotal: {})", getUserName(), tgtEndTime,
+                metrics.renewalFailures, metrics.renewalFailuresTotal, ie);
+            final long now = Time.now();
+            if (rp == null) {
+              // Use a dummy maxRetries to create the policy. The policy will
+              // only be used to get next retry time with exponential back-off.
+              // The final retry time will be later limited within the
+              // tgt endTime in getNextTgtRenewalTime.
+              rp = RetryPolicies.exponentialBackoffRetry(Long.SIZE - 2,
+                  kerberosMinSecondsBeforeRelogin, TimeUnit.MILLISECONDS);
+            }
+            try {
+              nextRefresh = getNextTgtRenewalTime(tgtEndTime, now, rp);
+            } catch (Exception e) {
+              LOG.error("Exception when calculating next tgt renewal time", e);
+              return;
+            }
+            metrics.renewalFailures.incr();
+            // retry until close enough to tgt endTime.
+            if (now > nextRefresh) {
+              LOG.error("TGT is expired. Aborting renew thread for {}.",
+                  getUserName());
+              return;
+            }
           }
         }
       }
@@ -1001,6 +1043,26 @@ public class UserGroupInformation {
     t.setName("TGT Renewer for " + getUserName());
     t.start();
   }
+
+  /**
+   * Get time for next login retry. This will allow the thread to retry with
+   * exponential back-off, until tgt endtime.
+   * Last retry is {@link #kerberosMinSecondsBeforeRelogin} before endtime.
+   *
+   * @param tgtEndTime EndTime of the tgt.
+   * @param now Current time.
+   * @param rp The retry policy.
+   * @return Time for next login retry.
+   */
+  @VisibleForTesting
+  static long getNextTgtRenewalTime(final long tgtEndTime, final long now,
+      final RetryPolicy rp) throws Exception {
+    final long lastRetryTime = tgtEndTime - kerberosMinSecondsBeforeRelogin;
+    final RetryPolicy.RetryAction ra = rp.shouldRetry(null,
+        metrics.renewalFailures.value(), 0, false);
+    return Math.min(lastRetryTime, now + ra.delayMillis);
+  }
+
   /**
    * Log a user in from a keytab file. Loads a user identity from a keytab
    * file and logs them in. They become the currently logged-in user.
@@ -1823,6 +1885,20 @@ public class UserGroupInformation {
     }
   }
 
+  public static void logAllUserInfo(UserGroupInformation ugi) throws
+      IOException {
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("UGI: " + ugi);
+      if (ugi.getRealUser() != null) {
+        LOG.debug("+RealUGI: " + ugi.getRealUser());
+      }
+      LOG.debug("+LoginUGI: " + ugi.getLoginUser());
+      for (Token<?> token : ugi.getTokens()) {
+        LOG.debug("+UGI token:" + token);
+      }
+    }
+  }
+
   private void print() throws IOException {
     System.out.println("User: " + getUserName());
     System.out.print("Group Ids: ");

+ 0 - 58
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/SslSelectChannelConnectorSecure.java

@@ -1,58 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.security.ssl;
-
-import java.io.IOException;
-import java.util.ArrayList;
-
-import javax.net.ssl.SSLEngine;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.mortbay.jetty.security.SslSelectChannelConnector;
-
-/**
- * This subclass of the Jetty SslSelectChannelConnector exists solely to
- * control the TLS protocol versions allowed.  This is fallout from the
- * POODLE vulnerability (CVE-2014-3566), which requires that SSLv3 be disabled.
- * Only TLS 1.0 and later protocols are allowed.
- */
-@InterfaceAudience.Private
-public class SslSelectChannelConnectorSecure extends SslSelectChannelConnector {
-
-  public SslSelectChannelConnectorSecure() {
-    super();
-  }
-
-  /**
-   * Disable SSLv3 protocol.
-   */
-  @Override
-  protected SSLEngine createSSLEngine() throws IOException {
-    SSLEngine engine = super.createSSLEngine();
-    ArrayList<String> nonSSLProtocols = new ArrayList<String>();
-    for (String p : engine.getEnabledProtocols()) {
-      if (!p.contains("SSLv3")) {
-        nonSSLProtocols.add(p);
-      }
-    }
-    engine.setEnabledProtocols(nonSSLProtocols.toArray(
-        new String[nonSSLProtocols.size()]));
-    return engine;
-  }
-}

+ 44 - 25
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java

@@ -53,6 +53,10 @@ extends AbstractDelegationTokenIdentifier>
   private static final Log LOG = LogFactory
       .getLog(AbstractDelegationTokenSecretManager.class);
 
+  private String formatTokenId(TokenIdent id) {
+    return "(" + id + ")";
+  }
+
   /** 
    * Cache of currently valid tokens, mapping from DelegationTokenIdentifier 
    * to DelegationTokenInformation. Protected by this object lock.
@@ -312,7 +316,8 @@ extends AbstractDelegationTokenIdentifier>
     int keyId = identifier.getMasterKeyId();
     DelegationKey dKey = allKeys.get(keyId);
     if (dKey == null) {
-      LOG.warn("No KEY found for persisted identifier " + identifier.toString());
+      LOG.warn("No KEY found for persisted identifier "
+          + formatTokenId(identifier));
       return;
     }
     byte[] password = createPassword(identifier.getBytes(), dKey.getKey());
@@ -323,7 +328,8 @@ extends AbstractDelegationTokenIdentifier>
       currentTokens.put(identifier, new DelegationTokenInformation(renewDate,
           password, getTrackingIdIfEnabled(identifier)));
     } else {
-      throw new IOException("Same delegation token being added twice.");
+      throw new IOException("Same delegation token being added twice: "
+          + formatTokenId(identifier));
     }
   }
 
@@ -393,7 +399,7 @@ extends AbstractDelegationTokenIdentifier>
     identifier.setMaxDate(now + tokenMaxLifetime);
     identifier.setMasterKeyId(currentKey.getKeyId());
     identifier.setSequenceNumber(sequenceNum);
-    LOG.info("Creating password for identifier: " + identifier
+    LOG.info("Creating password for identifier: " + formatTokenId(identifier)
         + ", currentKey: " + currentKey.getKeyId());
     byte[] password = createPassword(identifier.getBytes(), currentKey.getKey());
     DelegationTokenInformation tokenInfo = new DelegationTokenInformation(now
@@ -401,7 +407,8 @@ extends AbstractDelegationTokenIdentifier>
     try {
       storeToken(identifier, tokenInfo);
     } catch (IOException ioe) {
-      LOG.error("Could not store token !!", ioe);
+      LOG.error("Could not store token " + formatTokenId(identifier) + "!!",
+          ioe);
     }
     return password;
   }
@@ -418,11 +425,14 @@ extends AbstractDelegationTokenIdentifier>
     assert Thread.holdsLock(this);
     DelegationTokenInformation info = getTokenInfo(identifier);
     if (info == null) {
-      throw new InvalidToken("token (" + identifier.toString()
-          + ") can't be found in cache");
+      throw new InvalidToken("token " + formatTokenId(identifier)
+          + " can't be found in cache");
     }
-    if (info.getRenewDate() < Time.now()) {
-      throw new InvalidToken("token (" + identifier.toString() + ") is expired");
+    long now = Time.now();
+    if (info.getRenewDate() < now) {
+      throw new InvalidToken("token " + formatTokenId(identifier) + " is " +
+          "expired, current time: " + Time.formatTime(now) +
+          " expected renewal time: " + Time.formatTime(info.getRenewDate()));
     }
     return info;
   }
@@ -458,8 +468,8 @@ extends AbstractDelegationTokenIdentifier>
       throws InvalidToken {
     byte[] storedPassword = retrievePassword(identifier);
     if (!Arrays.equals(password, storedPassword)) {
-      throw new InvalidToken("token (" + identifier
-          + ") is invalid, password doesn't match");
+      throw new InvalidToken("token " + formatTokenId(identifier)
+          + " is invalid, password doesn't match");
     }
   }
   
@@ -477,32 +487,39 @@ extends AbstractDelegationTokenIdentifier>
     DataInputStream in = new DataInputStream(buf);
     TokenIdent id = createIdentifier();
     id.readFields(in);
-    LOG.info("Token renewal for identifier: " + id + "; total currentTokens "
-        +  currentTokens.size());
+    LOG.info("Token renewal for identifier: " + formatTokenId(id)
+        + "; total currentTokens " +  currentTokens.size());
 
     long now = Time.now();
     if (id.getMaxDate() < now) {
-      throw new InvalidToken(renewer + " tried to renew an expired token");
+      throw new InvalidToken(renewer + " tried to renew an expired token "
+          + formatTokenId(id) + " max expiration date: "
+          + Time.formatTime(id.getMaxDate())
+          + " currentTime: " + Time.formatTime(now));
     }
     if ((id.getRenewer() == null) || (id.getRenewer().toString().isEmpty())) {
       throw new AccessControlException(renewer +
-          " tried to renew a token without a renewer");
+          " tried to renew a token " + formatTokenId(id)
+          + " without a renewer");
     }
     if (!id.getRenewer().toString().equals(renewer)) {
-      throw new AccessControlException(renewer +
-          " tries to renew a token with renewer " + id.getRenewer());
+      throw new AccessControlException(renewer
+          + " tries to renew a token " + formatTokenId(id)
+          + " with non-matching renewer " + id.getRenewer());
     }
     DelegationKey key = getDelegationKey(id.getMasterKeyId());
     if (key == null) {
       throw new InvalidToken("Unable to find master key for keyId="
           + id.getMasterKeyId()
-          + " from cache. Failed to renew an unexpired token"
-          + " with sequenceNumber=" + id.getSequenceNumber());
+          + " from cache. Failed to renew an unexpired token "
+          + formatTokenId(id) + " with sequenceNumber="
+          + id.getSequenceNumber());
     }
     byte[] password = createPassword(token.getIdentifier(), key.getKey());
     if (!Arrays.equals(password, token.getPassword())) {
-      throw new AccessControlException(renewer +
-          " is trying to renew a token with wrong password");
+      throw new AccessControlException(renewer
+          + " is trying to renew a token "
+          + formatTokenId(id) + " with wrong password");
     }
     long renewTime = Math.min(id.getMaxDate(), now + tokenRenewInterval);
     String trackingId = getTrackingIdIfEnabled(id);
@@ -510,7 +527,8 @@ extends AbstractDelegationTokenIdentifier>
         password, trackingId);
 
     if (getTokenInfo(id) == null) {
-      throw new InvalidToken("Renewal request for unknown token");
+      throw new InvalidToken("Renewal request for unknown token "
+          + formatTokenId(id));
     }
     updateToken(id, info);
     return renewTime;
@@ -528,10 +546,11 @@ extends AbstractDelegationTokenIdentifier>
     DataInputStream in = new DataInputStream(buf);
     TokenIdent id = createIdentifier();
     id.readFields(in);
-    LOG.info("Token cancellation requested for identifier: " + id);
+    LOG.info("Token cancellation requested for identifier: "
+        + formatTokenId(id));
     
     if (id.getUser() == null) {
-      throw new InvalidToken("Token with no owner");
+      throw new InvalidToken("Token with no owner " + formatTokenId(id));
     }
     String owner = id.getUser().getUserName();
     Text renewer = id.getRenewer();
@@ -541,11 +560,11 @@ extends AbstractDelegationTokenIdentifier>
         && (renewer == null || renewer.toString().isEmpty() || !cancelerShortName
             .equals(renewer.toString()))) {
       throw new AccessControlException(canceller
-          + " is not authorized to cancel the token");
+          + " is not authorized to cancel the token " + formatTokenId(id));
     }
     DelegationTokenInformation info = currentTokens.remove(id);
     if (info == null) {
-      throw new InvalidToken("Token not found");
+      throw new InvalidToken("Token not found " + formatTokenId(id));
     }
     removeStoredToken(id);
     return id;

+ 31 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java

@@ -48,6 +48,8 @@ import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdenti
 import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager;
 import org.apache.hadoop.util.HttpExceptionUtils;
 import org.apache.hadoop.util.StringUtils;
+import org.codehaus.jackson.JsonFactory;
+import org.codehaus.jackson.JsonGenerator;
 import org.codehaus.jackson.map.ObjectMapper;
 
 import com.google.common.annotations.VisibleForTesting;
@@ -89,6 +91,8 @@ public abstract class DelegationTokenAuthenticationHandler
   public static final String DELEGATION_TOKEN_UGI_ATTRIBUTE =
       "hadoop.security.delegation-token.ugi";
 
+  public static final String JSON_MAPPER_PREFIX = PREFIX + "json-mapper.";
+
   static {
     DELEGATION_TOKEN_OPS.add(KerberosDelegationTokenAuthenticator.
         DelegationTokenOperation.GETDELEGATIONTOKEN.toString());
@@ -101,6 +105,7 @@ public abstract class DelegationTokenAuthenticationHandler
   private AuthenticationHandler authHandler;
   private DelegationTokenManager tokenManager;
   private String authType;
+  private JsonFactory jsonFactory;
 
   public DelegationTokenAuthenticationHandler(AuthenticationHandler handler) {
     authHandler = handler;
@@ -120,6 +125,7 @@ public abstract class DelegationTokenAuthenticationHandler
   public void init(Properties config) throws ServletException {
     authHandler.init(config);
     initTokenManager(config);
+    initJsonFactory(config);
   }
 
   /**
@@ -153,6 +159,30 @@ public abstract class DelegationTokenAuthenticationHandler
     tokenManager.init();
   }
 
+  @VisibleForTesting
+  public void initJsonFactory(Properties config) {
+    boolean hasFeature = false;
+    JsonFactory tmpJsonFactory = new JsonFactory();
+
+    for (Map.Entry entry : config.entrySet()) {
+      String key = (String)entry.getKey();
+      if (key.startsWith(JSON_MAPPER_PREFIX)) {
+        JsonGenerator.Feature feature =
+            JsonGenerator.Feature.valueOf(key.substring(JSON_MAPPER_PREFIX
+                .length()));
+        if (feature != null) {
+          hasFeature = true;
+          boolean enabled = Boolean.parseBoolean((String)entry.getValue());
+          tmpJsonFactory.configure(feature, enabled);
+        }
+      }
+    }
+
+    if (hasFeature) {
+      jsonFactory = tmpJsonFactory;
+    }
+  }
+
   @Override
   public void destroy() {
     tokenManager.destroy();
@@ -298,7 +328,7 @@ public abstract class DelegationTokenAuthenticationHandler
             if (map != null) {
               response.setContentType(MediaType.APPLICATION_JSON);
               Writer writer = response.getWriter();
-              ObjectMapper jsonMapper = new ObjectMapper();
+              ObjectMapper jsonMapper = new ObjectMapper(jsonFactory);
               jsonMapper.writeValue(writer, map);
               writer.write(ENTER);
               writer.flush();

+ 158 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DiskChecker.java

@@ -19,14 +19,23 @@
 package org.apache.hadoop.util;
 
 import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.FileOutputStream;
 import java.io.IOException;
+import java.util.UUID;
+import java.util.concurrent.atomic.AtomicReference;
 
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.commons.io.FileUtils;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.LocalFileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.io.IOUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Class that provides utility functions for checking disk problem
@@ -34,6 +43,8 @@ import org.apache.hadoop.fs.permission.FsPermission;
 @InterfaceAudience.Private
 @InterfaceStability.Unstable
 public class DiskChecker {
+  public static final Logger LOG = LoggerFactory.getLogger(DiskChecker.class);
+
   public static class DiskErrorException extends IOException {
     public DiskErrorException(String msg) {
       super(msg);
@@ -49,7 +60,12 @@ public class DiskChecker {
       super(msg);
     }
   }
-      
+
+  // Provider that abstracts some FileOutputStream operations for
+  // testability.
+  private static AtomicReference<FileIoProvider> fileIoProvider =
+      new AtomicReference<>(new DefaultFileIoProvider());
+
   /**
    * Create the directory if it doesn't exist and check that dir is readable,
    * writable and executable
@@ -63,6 +79,7 @@ public class DiskChecker {
                                    + dir.toString());
     }
     checkAccessByFileMethods(dir);
+    doDiskIo(dir);
   }
 
   /**
@@ -80,6 +97,7 @@ public class DiskChecker {
   throws DiskErrorException, IOException {
     mkdirsWithExistsAndPermissionCheck(localFS, dir, expected);
     checkAccessByFileMethods(localFS.pathToFile(dir));
+    doDiskIo(localFS.pathToFile(dir));
   }
 
   /**
@@ -173,4 +191,143 @@ public class DiskChecker {
     if (created || !localFS.getFileStatus(dir).getPermission().equals(expected))
       localFS.setPermission(dir, expected);
   }
+
+  // State related to running disk IO checks.
+  private static final String DISK_IO_FILE_PREFIX =
+      "DiskChecker.OK_TO_DELETE_.";
+
+  @VisibleForTesting
+  static final int DISK_IO_MAX_ITERATIONS = 3;
+
+  /**
+   * Performs some disk IO by writing to a new file in the given directory
+   * and sync'ing file contents to disk.
+   *
+   * This increases the likelihood of catching catastrophic disk/controller
+   * failures sooner.
+   *
+   * @param dir directory to be checked.
+   * @throws DiskErrorException if we hit an error while trying to perform
+   *         disk IO against the file.
+   */
+  private static void doDiskIo(File dir) throws DiskErrorException {
+    try {
+      IOException ioe = null;
+
+      for (int i = 0; i < DISK_IO_MAX_ITERATIONS; ++i) {
+        final File file = getFileNameForDiskIoCheck(dir, i+1);
+        try {
+          diskIoCheckWithoutNativeIo(file);
+          return;
+        } catch (IOException e) {
+          // Let's retry a few times before we really give up and
+          // declare the disk as bad.
+          ioe = e;
+        }
+      }
+      throw ioe;  // Just rethrow the last exception to signal failure.
+    } catch(IOException e) {
+      throw new DiskErrorException("Error checking directory " + dir, e);
+    }
+  }
+
+  /**
+   * Try to perform some disk IO by writing to the given file
+   * without using Native IO.
+   *
+   * @param file
+   * @throws IOException if there was a non-retriable error.
+   */
+  private static void diskIoCheckWithoutNativeIo(File file)
+      throws IOException {
+    FileOutputStream fos = null;
+
+    try {
+      final FileIoProvider provider = fileIoProvider.get();
+      fos = provider.get(file);
+      provider.write(fos, new byte[1]);
+      fos.getFD().sync();
+      fos.close();
+      fos = null;
+      if (!file.delete() && file.exists()) {
+        throw new IOException("Failed to delete " + file);
+      }
+      file = null;
+    } finally {
+      IOUtils.cleanup(null, fos);
+      FileUtils.deleteQuietly(file);
+    }
+  }
+
+  /**
+   * Generate a path name for a test file under the given directory.
+   *
+   * @return file object.
+   */
+  @VisibleForTesting
+  static File getFileNameForDiskIoCheck(File dir, int iterationCount) {
+    if (iterationCount < DISK_IO_MAX_ITERATIONS) {
+      // Use file names of the format prefix.001 by default.
+      return new File(dir,
+          DISK_IO_FILE_PREFIX + String.format("%03d", iterationCount));
+    } else {
+      // If the first few checks then fail, try using a randomly generated
+      // file name.
+      return new File(dir, DISK_IO_FILE_PREFIX + UUID.randomUUID());
+    }
+  }
+
+  /**
+   * An interface that abstracts operations on {@link FileOutputStream}
+   * objects for testability.
+   */
+  interface FileIoProvider {
+    FileOutputStream get(File f) throws FileNotFoundException;
+    void write(FileOutputStream fos, byte[] data) throws IOException;
+  }
+
+  /**
+   * The default implementation of {@link FileIoProvider}.
+   */
+  private static class DefaultFileIoProvider implements FileIoProvider {
+    /**
+     * See {@link FileOutputStream#FileOutputStream(File)}.
+     */
+    @Override
+    public FileOutputStream get(File f) throws FileNotFoundException {
+      return new FileOutputStream(f);
+    }
+
+    /**
+     * See {@link FileOutputStream#write(byte[])}.
+     */
+    @Override
+    public void write(FileOutputStream fos, byte[] data) throws IOException {
+      fos.write(data);
+    }
+  }
+
+  /**
+   * Replace the {@link FileIoProvider} for tests.
+   * This method MUST NOT be used outside of unit tests.
+   *
+   * @param newFosProvider
+   * @return the old FileIoProvider.
+   */
+  @VisibleForTesting
+  static FileIoProvider replaceFileOutputStreamProvider(
+      FileIoProvider newFosProvider) {
+    return fileIoProvider.getAndSet(newFosProvider);
+  }
+
+  /**
+   * Retrieve the current {@link FileIoProvider}.
+   * This method MUST NOT be used outside of unit tests.
+   *
+   * @return the current FileIoProvider.
+   */
+  @VisibleForTesting
+  static FileIoProvider getFileOutputStreamProvider() {
+    return fileIoProvider.get();
+  }
 }

+ 22 - 15
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java

@@ -567,21 +567,28 @@ public class GenericOptionsParser {
    * @param out stream to print the usage message to.
    */
   public static void printGenericCommandUsage(PrintStream out) {
-    
-    out.println("Generic options supported are");
-    out.println("-conf <configuration file>     specify an application configuration file");
-    out.println("-D <property=value>            use value for given property");
-    out.println("-fs <local|namenode:port>      specify a namenode");
-    out.println("-jt <local|resourcemanager:port>    specify a ResourceManager");
-    out.println("-files <comma separated list of files>    " + 
-      "specify comma separated files to be copied to the map reduce cluster");
-    out.println("-libjars <comma separated list of jars>    " +
-      "specify comma separated jar files to include in the classpath.");
-    out.println("-archives <comma separated list of archives>    " +
-                "specify comma separated archives to be unarchived" +
-                " on the compute machines.\n");
-    out.println("The general command line syntax is");
-    out.println("command [genericOptions] [commandOptions]\n");
+    out.println("Generic options supported are:");
+    out.println("-conf <configuration file>        "
+        + "specify an application configuration file");
+    out.println("-D <property=value>               "
+        + "define a value for a given property");
+    out.println("-fs <local|namenode:port>         "
+        + "specify a namenode");
+    out.println("-jt <local|resourcemanager:port>  "
+        + "specify a ResourceManager");
+    out.println("-files <file1,...>                "
+        + "specify a comma-separated list of files to be copied to the map "
+        + "reduce cluster");
+    out.println("-libjars <jar1,...>               "
+        + "specify a comma-separated list of jar files to be included in the "
+        + "classpath");
+    out.println("-archives <archive1,...>          "
+        + "specify a comma-separated list of archives to be unarchived on the "
+        + "compute machines");
+    out.println();
+    out.println("The general command line syntax is:");
+    out.println("command [genericOptions] [commandOptions]");
+    out.println();
   }
   
 }

+ 21 - 9
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/InstrumentedLock.java → hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/InstrumentedLock.java

@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.hdfs;
+package org.apache.hadoop.util;
 
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicLong;
@@ -26,14 +26,12 @@ import java.util.concurrent.locks.ReentrantLock;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.commons.logging.Log;
-import org.apache.hadoop.util.StringUtils;
-import org.apache.hadoop.util.Timer;
 
 import com.google.common.annotations.VisibleForTesting;
 
 /**
  * This is a debugging class that can be used by callers to track
- * whether a specifc lock is being held for too long and periodically
+ * whether a specific lock is being held for too long and periodically
  * log a warning and stack trace, if so.
  *
  * The logged warnings are throttled so that logs are not spammed.
@@ -100,19 +98,19 @@ public class InstrumentedLock implements Lock {
   @Override
   public void lock() {
     lock.lock();
-    lockAcquireTimestamp = clock.monotonicNow();
+    startLockTiming();
   }
 
   @Override
   public void lockInterruptibly() throws InterruptedException {
     lock.lockInterruptibly();
-    lockAcquireTimestamp = clock.monotonicNow();
+    startLockTiming();
   }
 
   @Override
   public boolean tryLock() {
     if (lock.tryLock()) {
-      lockAcquireTimestamp = clock.monotonicNow();
+      startLockTiming();
       return true;
     }
     return false;
@@ -121,7 +119,7 @@ public class InstrumentedLock implements Lock {
   @Override
   public boolean tryLock(long time, TimeUnit unit) throws InterruptedException {
     if (lock.tryLock(time, unit)) {
-      lockAcquireTimestamp = clock.monotonicNow();
+      startLockTiming();
       return true;
     }
     return false;
@@ -150,6 +148,13 @@ public class InstrumentedLock implements Lock {
         StringUtils.getStackTrace(Thread.currentThread())));
   }
 
+  /**
+   * Starts timing for the instrumented lock.
+   */
+  protected void startLockTiming() {
+    lockAcquireTimestamp = clock.monotonicNow();
+  }
+
   /**
    * Log a warning if the lock was held for too long.
    *
@@ -158,7 +163,7 @@ public class InstrumentedLock implements Lock {
    * @param acquireTime  - timestamp just after acquiring the lock.
    * @param releaseTime - timestamp just before releasing the lock.
    */
-  private void check(long acquireTime, long releaseTime) {
+  protected void check(long acquireTime, long releaseTime) {
     if (!logger.isWarnEnabled()) {
       return;
     }
@@ -182,4 +187,11 @@ public class InstrumentedLock implements Lock {
     }
   }
 
+  protected Lock getLock() {
+    return lock;
+  }
+
+  protected Timer getTimer() {
+    return clock;
+  }
 }

+ 92 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/InstrumentedReadLock.java

@@ -0,0 +1,92 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.util;
+
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+import org.apache.commons.logging.Log;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+import com.google.common.annotations.VisibleForTesting;
+
+/**
+ * This is a wrap class of a <tt>ReadLock</tt>.
+ * It extends the class {@link InstrumentedLock}, and can be used to track
+ * whether a specific read lock is being held for too long and log
+ * warnings if so.
+ *
+ * The logged warnings are throttled so that logs are not spammed.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public class InstrumentedReadLock extends InstrumentedLock {
+
+  private final ReentrantReadWriteLock readWriteLock;
+
+  /**
+   * Uses the ThreadLocal to keep the time of acquiring locks since
+   * there can be multiple threads that hold the read lock concurrently.
+   */
+  private final ThreadLocal<Long> readLockHeldTimeStamp =
+      new ThreadLocal<Long>() {
+    @Override
+    protected Long initialValue() {
+      return Long.MAX_VALUE;
+    };
+  };
+
+  public InstrumentedReadLock(String name, Log logger,
+      ReentrantReadWriteLock readWriteLock,
+      long minLoggingGapMs, long lockWarningThresholdMs) {
+    this(name, logger, readWriteLock, minLoggingGapMs, lockWarningThresholdMs,
+        new Timer());
+  }
+
+  @VisibleForTesting
+  InstrumentedReadLock(String name, Log logger,
+      ReentrantReadWriteLock readWriteLock,
+      long minLoggingGapMs, long lockWarningThresholdMs, Timer clock) {
+    super(name, logger, readWriteLock.readLock(), minLoggingGapMs,
+        lockWarningThresholdMs, clock);
+    this.readWriteLock = readWriteLock;
+  }
+
+  @Override
+  public void unlock() {
+    boolean needReport = readWriteLock.getReadHoldCount() == 1;
+    long localLockReleaseTime = getTimer().monotonicNow();
+    long localLockAcquireTime = readLockHeldTimeStamp.get();
+    getLock().unlock();
+    if (needReport) {
+      readLockHeldTimeStamp.remove();
+      check(localLockAcquireTime, localLockReleaseTime);
+    }
+  }
+
+  /**
+   * Starts timing for the instrumented read lock.
+   * It records the time to ThreadLocal.
+   */
+  @Override
+  protected void startLockTiming() {
+    if (readWriteLock.getReadHoldCount() == 1) {
+      readLockHeldTimeStamp.set(getTimer().monotonicNow());
+    }
+  }
+}

+ 58 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/InstrumentedReadWriteLock.java

@@ -0,0 +1,58 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.util;
+
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+import org.apache.commons.logging.Log;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * This is a wrap class of a {@link ReentrantReadWriteLock}.
+ * It implements the interface {@link ReadWriteLock}, and can be used to
+ * create instrumented <tt>ReadLock</tt> and <tt>WriteLock</tt>.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public class InstrumentedReadWriteLock implements ReadWriteLock {
+
+  private final Lock readLock;
+  private final Lock writeLock;
+
+  InstrumentedReadWriteLock(boolean fair, String name, Log logger,
+      long minLoggingGapMs, long lockWarningThresholdMs) {
+    ReentrantReadWriteLock readWriteLock = new ReentrantReadWriteLock(fair);
+    readLock = new InstrumentedReadLock(name, logger, readWriteLock,
+        minLoggingGapMs, lockWarningThresholdMs);
+    writeLock = new InstrumentedWriteLock(name, logger, readWriteLock,
+        minLoggingGapMs, lockWarningThresholdMs);
+  }
+
+  @Override
+  public Lock readLock() {
+    return readLock;
+  }
+
+  @Override
+  public Lock writeLock() {
+    return writeLock;
+  }
+}

+ 54 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/InstrumentedWriteLock.java

@@ -0,0 +1,54 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.util;
+
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+import org.apache.commons.logging.Log;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+import com.google.common.annotations.VisibleForTesting;
+
+/**
+ * This is a wrap class of a <tt>WriteLock</tt>.
+ * It extends the class {@link InstrumentedLock}, and can be used to track
+ * whether a specific write lock is being held for too long and log
+ * warnings if so.
+ *
+ * The logged warnings are throttled so that logs are not spammed.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public class InstrumentedWriteLock extends InstrumentedLock {
+
+  public InstrumentedWriteLock(String name, Log logger,
+      ReentrantReadWriteLock readWriteLock,
+      long minLoggingGapMs, long lockWarningThresholdMs) {
+    this(name, logger, readWriteLock, minLoggingGapMs, lockWarningThresholdMs,
+        new Timer());
+  }
+
+  @VisibleForTesting
+  InstrumentedWriteLock(String name, Log logger,
+      ReentrantReadWriteLock readWriteLock,
+      long minLoggingGapMs, long lockWarningThresholdMs, Timer clock) {
+    super(name, logger, readWriteLock.writeLock(), minLoggingGapMs,
+        lockWarningThresholdMs, clock);
+  }
+}

+ 3 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LimitInputStream.java

@@ -74,6 +74,9 @@ public final class LimitInputStream extends FilterInputStream {
 
   @Override
   public int read(byte[] b, int off, int len) throws IOException {
+    if (len == 0) {
+      return 0;
+    }
     if (left == 0) {
       return -1;
     }

+ 5 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java

@@ -237,7 +237,7 @@ public abstract class Shell {
   /** Return a command to get permission information. */
   public static String[] getGetPermissionCommand() {
     return (WINDOWS) ? new String[] { getWinUtilsPath(), "ls", "-F" }
-                     : new String[] { "/bin/ls", "-ld" };
+                     : new String[] { "ls", "-ld" };
   }
 
   /** Return a command to set permission. */
@@ -734,8 +734,7 @@ public abstract class Shell {
     }
   }
 
-  public static final boolean isBashSupported = checkIsBashSupported();
-  private static boolean checkIsBashSupported() {
+  public static boolean checkIsBashSupported() throws InterruptedIOException {
     if (Shell.WINDOWS) {
       return false;
     }
@@ -746,6 +745,9 @@ public abstract class Shell {
       String[] args = {"bash", "-c", "echo 1000"};
       shexec = new ShellCommandExecutor(args);
       shexec.execute();
+    } catch (InterruptedIOException iioe) {
+      LOG.warn("Interrupted, unable to determine if bash is supported", iioe);
+      throw iioe;
     } catch (IOException ioe) {
       LOG.warn("Bash is not supported by the OS", ioe);
       supported = false;

+ 18 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Time.java

@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.util;
 
+import java.text.SimpleDateFormat;
+
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 
@@ -32,6 +34,14 @@ public final class Time {
    */
   private static final long NANOSECONDS_PER_MILLISECOND = 1000000;
 
+  private static final ThreadLocal<SimpleDateFormat> DATE_FORMAT =
+      new ThreadLocal<SimpleDateFormat>() {
+    @Override
+    protected SimpleDateFormat initialValue() {
+      return new SimpleDateFormat("yyyy-MM-dd HH:mm:ss,SSSZ");
+    }
+  };
+
   /**
    * Current system time.  Do not use this to calculate a duration or interval
    * to sleep, because it will be broken by settimeofday.  Instead, use
@@ -54,4 +64,12 @@ public final class Time {
   public static long monotonicNow() {
     return System.nanoTime() / NANOSECONDS_PER_MILLISECOND;
   }
+
+  /**
+   * Convert time in millisecond to human readable format.
+   * @return a human readable string for the input time
+   */
+  public static String formatTime(long millis) {
+    return DATE_FORMAT.get().format(millis);
+  }
 }

+ 4 - 2
hadoop-common-project/hadoop-common/src/main/native/native.vcxproj

@@ -156,14 +156,14 @@
     <ClCompile Include="src\org\apache\hadoop\util\bulk_crc32.c" />
     <ClCompile Include="src\org\apache\hadoop\util\NativeCodeLoader.c">
       <AdditionalOptions Condition="'$(SnappyEnabled)' == 'true'">/D HADOOP_SNAPPY_LIBRARY=L\"snappy.dll\"</AdditionalOptions>
-      <AdditionalOptions Condition="'$(IsalEnabled)' == 'true'">/D HADOOP_ISAL_LIBRARY=\"isa-l.dll\"</AdditionalOptions>
+      <AdditionalOptions Condition="'$(IsalEnabled)' == 'true'">/D HADOOP_ISAL_LIBRARY=L\"isa-l.dll\"</AdditionalOptions>
     </ClCompile>
     <ClCompile Include="src\org\apache\hadoop\util\NativeCrc32.c" />
     <ClCompile Include="src\org\apache\hadoop\yarn\server\nodemanager\windows_secure_container_executor.c">
       <AdditionalIncludeDirectories>src\org\apache\hadoop\io\nativeio;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
     </ClCompile>
     <ClCompile Include="src\org\apache\hadoop\io\erasurecode\isal_load.c" Condition="'$(IsalEnabled)' == 'true'">
-      <AdditionalOptions>/D HADOOP_ISAL_LIBRARY=\"isa-l.dll\"</AdditionalOptions>
+      <AdditionalOptions>/D HADOOP_ISAL_LIBRARY=L\"isa-l.dll\"</AdditionalOptions>
     </ClCompile>
     <ClCompile Include="src\org\apache\hadoop\io\erasurecode\erasure_code.c" Condition="'$(IsalEnabled)' == 'true'"/>
     <ClCompile Include="src\org\apache\hadoop\io\erasurecode\gf_util.c" Condition="'$(IsalEnabled)' == 'true'"/>
@@ -173,6 +173,8 @@
     <ClCompile Include="src\org\apache\hadoop\io\erasurecode\jni_common.c" Condition="'$(IsalEnabled)' == 'true'"/>
     <ClCompile Include="src\org\apache\hadoop\io\erasurecode\jni_rs_encoder.c" Condition="'$(IsalEnabled)' == 'true'"/>
     <ClCompile Include="src\org\apache\hadoop\io\erasurecode\jni_rs_decoder.c" Condition="'$(IsalEnabled)' == 'true'"/>
+    <ClCompile Include="src\org\apache\hadoop\io\erasurecode\jni_xor_encoder.c" Condition="'$(IsalEnabled)' == 'true'"/>
+    <ClCompile Include="src\org\apache\hadoop\io\erasurecode\jni_xor_decoder.c" Condition="'$(IsalEnabled)' == 'true'"/>
   </ItemGroup>
   <ItemGroup>
     <ClInclude Include="..\src\org\apache\hadoop\util\crc32c_tables.h" />

+ 80 - 0
hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/jni_xor_decoder.c

@@ -0,0 +1,80 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <jni.h>
+
+#include "org_apache_hadoop.h"
+#include "erasure_code.h"
+#include "gf_util.h"
+#include "jni_common.h"
+#include "org_apache_hadoop_io_erasurecode_rawcoder_NativeXORRawDecoder.h"
+
+typedef struct _XOREncoder {
+  IsalCoder isalCoder;
+  unsigned char* inputs[MMAX];
+  unsigned char* outputs[1];
+} XORDecoder;
+
+JNIEXPORT void JNICALL
+Java_org_apache_hadoop_io_erasurecode_rawcoder_NativeXORRawDecoder_initImpl(
+  JNIEnv *env, jobject thiz, jint numDataUnits, jint numParityUnits) {
+  XORDecoder* xorDecoder =
+                           (XORDecoder*)malloc(sizeof(XORDecoder));
+  memset(xorDecoder, 0, sizeof(*xorDecoder));
+  initCoder(&xorDecoder->isalCoder, numDataUnits, numParityUnits);
+
+  setCoder(env, thiz, &xorDecoder->isalCoder);
+}
+
+JNIEXPORT void JNICALL
+Java_org_apache_hadoop_io_erasurecode_rawcoder_NativeXORRawDecoder_decodeImpl(
+  JNIEnv *env, jobject thiz, jobjectArray inputs, jintArray inputOffsets,
+  jint dataLen, jintArray erasedIndexes, jobjectArray outputs,
+                                                    jintArray outputOffsets) {
+  int i, j, numDataUnits, numParityUnits, chunkSize;
+  XORDecoder* xorDecoder;
+
+  xorDecoder = (XORDecoder*)getCoder(env, thiz);
+  numDataUnits = ((IsalCoder*)xorDecoder)->numDataUnits;
+  numParityUnits = ((IsalCoder*)xorDecoder)->numParityUnits;
+  chunkSize = (int)dataLen;
+
+  getInputs(env, inputs, inputOffsets, xorDecoder->inputs,
+                                               numDataUnits + numParityUnits);
+  getOutputs(env, outputs, outputOffsets, xorDecoder->outputs, numParityUnits);
+
+  for (i = 0; i < numDataUnits + numParityUnits; i++) {
+    if (xorDecoder->inputs[i] == NULL) {
+      continue;
+    }
+    for (j = 0; j < chunkSize; j++) {
+      xorDecoder->outputs[0][j] ^= xorDecoder->inputs[i][j];
+    }
+  }
+}
+
+JNIEXPORT void JNICALL
+Java_org_apache_hadoop_io_erasurecode_rawcoder_NativeXORRawDecoder_destroyImpl
+  (JNIEnv *env, jobject thiz){
+  XORDecoder* xorDecoder = (XORDecoder*)getCoder(env, thiz);
+  free(xorDecoder);
+}

+ 82 - 0
hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/jni_xor_encoder.c

@@ -0,0 +1,82 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <jni.h>
+
+#include "org_apache_hadoop.h"
+#include "erasure_code.h"
+#include "gf_util.h"
+#include "jni_common.h"
+#include "org_apache_hadoop_io_erasurecode_rawcoder_NativeXORRawEncoder.h"
+
+typedef struct _XOREncoder {
+  IsalCoder isalCoder;
+  unsigned char* inputs[MMAX];
+  unsigned char* outputs[1];
+} XOREncoder;
+
+JNIEXPORT void JNICALL
+Java_org_apache_hadoop_io_erasurecode_rawcoder_NativeXORRawEncoder_initImpl
+  (JNIEnv *env, jobject thiz, jint numDataUnits, jint numParityUnits) {
+  XOREncoder* xorEncoder =
+                           (XOREncoder*)malloc(sizeof(XOREncoder));
+  memset(xorEncoder, 0, sizeof(*xorEncoder));
+  initCoder(&xorEncoder->isalCoder, numDataUnits, numParityUnits);
+
+  setCoder(env, thiz, &xorEncoder->isalCoder);
+}
+
+JNIEXPORT void JNICALL
+Java_org_apache_hadoop_io_erasurecode_rawcoder_NativeXORRawEncoder_encodeImpl(
+  JNIEnv *env, jobject thiz, jobjectArray inputs, jintArray inputOffsets,
+  jint dataLen, jobjectArray outputs, jintArray outputOffsets) {
+
+  int i, j, numDataUnits, numParityUnits, chunkSize;
+  XOREncoder* xorEncoder;
+
+  xorEncoder = (XOREncoder*)getCoder(env, thiz);
+  numDataUnits = ((IsalCoder*)xorEncoder)->numDataUnits;
+  numParityUnits = ((IsalCoder*)xorEncoder)->numParityUnits;
+  chunkSize = (int)dataLen;
+
+  getInputs(env, inputs, inputOffsets, xorEncoder->inputs, numDataUnits);
+  getOutputs(env, outputs, outputOffsets, xorEncoder->outputs, numParityUnits);
+
+  // Get the first buffer's data.
+  for (j = 0; j < chunkSize; j++) {
+    xorEncoder->outputs[0][j] = xorEncoder->inputs[0][j];
+  }
+
+  // XOR with everything else.
+  for (i = 1; i < numDataUnits; i++) {
+    for (j = 0; j < chunkSize; j++) {
+      xorEncoder->outputs[0][j] ^= xorEncoder->inputs[i][j];
+    }
+  }
+}
+
+JNIEXPORT void JNICALL
+Java_org_apache_hadoop_io_erasurecode_rawcoder_NativeXORRawEncoder_destroyImpl
+  (JNIEnv *env, jobject thiz) {
+  XOREncoder* xorEncoder = (XOREncoder*)getCoder(env, thiz);
+  free(xorEncoder);
+}

+ 14 - 0
hadoop-common-project/hadoop-common/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier

@@ -0,0 +1,14 @@
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+org.apache.hadoop.crypto.key.kms.KMSDelegationToken$KMSDelegationTokenIdentifier

+ 106 - 58
hadoop-common-project/hadoop-common/src/main/resources/core-default.xml

@@ -41,10 +41,10 @@
 <property>
   <name>hadoop.http.filter.initializers</name>
   <value>org.apache.hadoop.http.lib.StaticUserWebFilter</value>
-  <description>A comma separated list of class names. Each class in the list 
-  must extend org.apache.hadoop.http.FilterInitializer. The corresponding 
-  Filter will be initialized. Then, the Filter will be applied to all user 
-  facing jsp and servlet web pages.  The ordering of the list defines the 
+  <description>A comma separated list of class names. Each class in the list
+  must extend org.apache.hadoop.http.FilterInitializer. The corresponding
+  Filter will be initialized. Then, the Filter will be applied to all user
+  facing jsp and servlet web pages.  The ordering of the list defines the
   ordering of the filters.</description>
 </property>
 
@@ -76,14 +76,14 @@
   <name>hadoop.security.group.mapping</name>
   <value>org.apache.hadoop.security.JniBasedUnixGroupsMappingWithFallback</value>
   <description>
-    Class for user to group mapping (get groups for a given user) for ACL. 
+    Class for user to group mapping (get groups for a given user) for ACL.
     The default implementation,
-    org.apache.hadoop.security.JniBasedUnixGroupsMappingWithFallback, 
-    will determine if the Java Native Interface (JNI) is available. If JNI is 
-    available the implementation will use the API within hadoop to resolve a 
-    list of groups for a user. If JNI is not available then the shell 
-    implementation, ShellBasedUnixGroupsMapping, is used.  This implementation 
-    shells out to the Linux/Unix environment with the 
+    org.apache.hadoop.security.JniBasedUnixGroupsMappingWithFallback,
+    will determine if the Java Native Interface (JNI) is available. If JNI is
+    available the implementation will use the API within hadoop to resolve a
+    list of groups for a user. If JNI is not available then the shell
+    implementation, ShellBasedUnixGroupsMapping, is used.  This implementation
+    shells out to the Linux/Unix environment with the
     <code>bash -c groups</code> command to resolve a list of groups for a user.
   </description>
 </property>
@@ -481,10 +481,10 @@
 <property>
   <name>hadoop.rpc.protection</name>
   <value>authentication</value>
-  <description>A comma-separated list of protection values for secured sasl 
+  <description>A comma-separated list of protection values for secured sasl
       connections. Possible values are authentication, integrity and privacy.
-      authentication means authentication only and no integrity or privacy; 
-      integrity implies authentication and integrity are enabled; and privacy 
+      authentication means authentication only and no integrity or privacy;
+      integrity implies authentication and integrity are enabled; and privacy
       implies all of authentication, integrity and privacy are enabled.
       hadoop.security.saslproperties.resolver.class can be used to override
       the hadoop.rpc.protection for a connection at the server side.
@@ -494,17 +494,17 @@
 <property>
   <name>hadoop.security.saslproperties.resolver.class</name>
   <value></value>
-  <description>SaslPropertiesResolver used to resolve the QOP used for a 
-      connection. If not specified, the full set of values specified in 
-      hadoop.rpc.protection is used while determining the QOP used for the 
-      connection. If a class is specified, then the QOP values returned by 
+  <description>SaslPropertiesResolver used to resolve the QOP used for a
+      connection. If not specified, the full set of values specified in
+      hadoop.rpc.protection is used while determining the QOP used for the
+      connection. If a class is specified, then the QOP values returned by
       the class will be used while determining the QOP used for the connection.
   </description>
 </property>
 
 <property>
   <name>hadoop.security.sensitive-config-keys</name>
-  <value>password$,fs.s3.*[Ss]ecret.?[Kk]ey,fs.azure.account.key.*,dfs.webhdfs.oauth2.[a-z]+.token,hadoop.security.sensitive-config-keys</value>
+  <value>secret$,password$,ssl.keystore.pass$,fs.s3.*[Ss]ecret.?[Kk]ey,fs.azure.account.key.*,dfs.webhdfs.oauth2.[a-z]+.token,hadoop.security.sensitive-config-keys</value>
   <description>A comma-separated list of regular expressions to match against
       configuration keys that should be redacted where appropriate, for
       example, when logging modified properties during a reconfiguration,
@@ -566,7 +566,7 @@
   page size (4096 on Intel x86), and it determines how much data is
   buffered during read and write operations.</description>
 </property>
-  
+
 <property>
   <name>io.bytes.per.checksum</name>
   <value>512</value>
@@ -599,7 +599,7 @@
   either by by name or the full pathname.  In the former case, the
   library is located by the dynamic linker, usually searching the
   directories specified in the environment variable LD_LIBRARY_PATH.
-  
+
   The value of "system-native" indicates that the default system
   library should be used.  To indicate that the algorithm should
   operate entirely in Java, specify "java-builtin".</description>
@@ -647,7 +647,27 @@
   <name>io.erasurecode.codec.rs-default.rawcoder</name>
   <value>org.apache.hadoop.io.erasurecode.rawcoder.RSRawErasureCoderFactory</value>
   <description>
-    Raw coder implementation for the rs-default codec.
+    Raw coder implementation for the rs-default codec. The default value is a
+    pure Java implementation. There is also a native implementation. Its value
+    is org.apache.hadoop.io.erasurecode.rawcoder.NativeRSRawErasureCoderFactory.
+  </description>
+</property>
+
+<property>
+  <name>io.erasurecode.codec.rs-legacy.rawcoder</name>
+  <value>org.apache.hadoop.io.erasurecode.rawcoder.RSRawErasureCoderFactoryLegacy</value>
+  <description>
+    Raw coder implementation for the rs-legacy codec.
+  </description>
+</property>
+
+<property>
+  <name>io.erasurecode.codec.xor.rawcoder</name>
+  <value>org.apache.hadoop.io.erasurecode.rawcoder.XORRawErasureCoderFactory</value>
+  <description>
+    Raw coder implementation for the xor codec. The default value is a pure Java
+    implementation. There is also a native implementation. Its value is
+    org.apache.hadoop.io.erasurecode.rawcoder.NativeXORRawErasureCoderFactory.
   </description>
 </property>
 
@@ -689,8 +709,8 @@
   <description>Number of minutes between trash checkpoints.
   Should be smaller or equal to fs.trash.interval. If zero,
   the value is set to the value of fs.trash.interval.
-  Every time the checkpointer runs it creates a new checkpoint 
-  out of current and removes checkpoints created more than 
+  Every time the checkpointer runs it creates a new checkpoint
+  out of current and removes checkpoints created more than
   fs.trash.interval minutes ago.
   </description>
 </property>
@@ -715,7 +735,7 @@
   <name>fs.AbstractFileSystem.har.impl</name>
   <value>org.apache.hadoop.fs.HarFs</value>
   <description>The AbstractFileSystem for har: uris.</description>
-</property> 
+</property>
 
 <property>
   <name>fs.AbstractFileSystem.hdfs.impl</name>
@@ -786,7 +806,7 @@
 <property>
   <name>fs.s3n.maxRetries</name>
   <value>4</value>
-  <description>The maximum number of retries for reading or writing files to S3, 
+  <description>The maximum number of retries for reading or writing files to S3,
   before we signal failure to the application.
   </description>
 </property>
@@ -875,15 +895,37 @@
     com.amazonaws.auth.AWSCredentialsProvider.
 
     These are loaded and queried in sequence for a valid set of credentials.
-    Each listed class must provide either an accessible constructor accepting
-    java.net.URI and org.apache.hadoop.conf.Configuration, or an accessible
-    default constructor.
+    Each listed class must implement one of the following means of
+    construction, which are attempted in order:
+    1. a public constructor accepting java.net.URI and
+        org.apache.hadoop.conf.Configuration,
+    2. a public static method named getInstance that accepts no
+       arguments and returns an instance of
+       com.amazonaws.auth.AWSCredentialsProvider, or
+    3. a public default constructor.
 
     Specifying org.apache.hadoop.fs.s3a.AnonymousAWSCredentialsProvider allows
     anonymous access to a publicly accessible S3 bucket without any credentials.
     Please note that allowing anonymous access to an S3 bucket compromises
     security and therefore is unsuitable for most use cases. It can be useful
     for accessing public data sets without requiring AWS credentials.
+
+    If unspecified, then the default list of credential provider classes,
+    queried in sequence, is:
+    1. org.apache.hadoop.fs.s3a.BasicAWSCredentialsProvider: supports static
+        configuration of AWS access key ID and secret access key.  See also
+        fs.s3a.access.key and fs.s3a.secret.key.
+    2. com.amazonaws.auth.EnvironmentVariableCredentialsProvider: supports
+        configuration of AWS access key ID and secret access key in
+        environment variables named AWS_ACCESS_KEY_ID and
+        AWS_SECRET_ACCESS_KEY, as documented in the AWS SDK.
+    3. org.apache.hadoop.fs.s3a.SharedInstanceProfileCredentialsProvider:
+        a shared instance of
+        com.amazonaws.auth.InstanceProfileCredentialsProvider from the AWS
+        SDK, which supports use of instance profile credentials if running
+        in an EC2 VM.  Using this shared instance potentially reduces load
+        on the EC2 instance metadata service for multi-threaded
+        applications.
   </description>
 </property>
 
@@ -987,7 +1029,7 @@
 <property>
   <name>fs.s3a.paging.maximum</name>
   <value>5000</value>
-  <description>How many keys to request from S3 when doing 
+  <description>How many keys to request from S3 when doing
      directory listings at a time.</description>
 </property>
 
@@ -1013,8 +1055,10 @@
 
 <property>
   <name>fs.s3a.multipart.size</name>
-  <value>104857600</value>
-  <description>How big (in bytes) to split upload or copy operations up into.</description>
+  <value>100M</value>
+  <description>How big (in bytes) to split upload or copy operations up into.
+    A suffix from the set {K,M,G,T,P} may be used to scale the numeric value.
+  </description>
 </property>
 
 <property>
@@ -1022,7 +1066,8 @@
   <value>2147483647</value>
   <description>How big (in bytes) to split upload or copy operations up into.
     This also controls the partition size in renamed files, as rename() involves
-    copying the source file(s)
+    copying the source file(s).
+    A suffix from the set {K,M,G,T,P} may be used to scale the numeric value.
   </description>
 </property>
 
@@ -1078,15 +1123,16 @@
 
 <property>
   <name>fs.s3a.block.size</name>
-  <value>33554432</value>
+  <value>32M</value>
   <description>Block size to use when reading files using s3a: file system.
+    A suffix from the set {K,M,G,T,P} may be used to scale the numeric value.
   </description>
 </property>
 
 <property>
   <name>fs.s3a.buffer.dir</name>
   <value>${hadoop.tmp.dir}/s3a</value>
-  <description>Comma separated list of directories that will be used to buffer file 
+  <description>Comma separated list of directories that will be used to buffer file
     uploads to.</description>
 </property>
 
@@ -1141,10 +1187,12 @@
 
 <property>
   <name>fs.s3a.readahead.range</name>
-  <value>65536</value>
+  <value>64K</value>
   <description>Bytes to read ahead during a seek() before closing and
   re-opening the S3 HTTP connection. This option will be overridden if
-  any call to setReadahead() is made to an open stream.</description>
+  any call to setReadahead() is made to an open stream.
+  A suffix from the set {K,M,G,T,P} may be used to scale the numeric value.
+  </description>
 </property>
 
 <property>
@@ -1177,7 +1225,7 @@
 <property>
   <name>io.seqfile.compress.blocksize</name>
   <value>1000000</value>
-  <description>The minimum block size for compression in block compressed 
+  <description>The minimum block size for compression in block compressed
           SequenceFiles.
   </description>
 </property>
@@ -1193,7 +1241,7 @@
 <property>
   <name>io.seqfile.sorter.recordlimit</name>
   <value>1000000</value>
-  <description>The limit on number of records to be kept in memory in a spill 
+  <description>The limit on number of records to be kept in memory in a spill
           in SequenceFiles.Sorter
   </description>
 </property>
@@ -1271,7 +1319,7 @@
 <property>
   <name>ipc.client.connect.timeout</name>
   <value>20000</value>
-  <description>Indicates the number of milliseconds a client will wait for the 
+  <description>Indicates the number of milliseconds a client will wait for the
                socket to establish a server connection.
   </description>
 </property>
@@ -1368,10 +1416,10 @@
 <property>
   <name>hadoop.security.impersonation.provider.class</name>
   <value></value>
-  <description>A class which implements ImpersonationProvider interface, used to 
-       authorize whether one user can impersonate a specific user. 
-       If not specified, the DefaultImpersonationProvider will be used. 
-       If a class is specified, then that class will be used to determine 
+  <description>A class which implements ImpersonationProvider interface, used to
+       authorize whether one user can impersonate a specific user.
+       If not specified, the DefaultImpersonationProvider will be used.
+       If a class is specified, then that class will be used to determine
        the impersonation capability.
   </description>
 </property>
@@ -1433,7 +1481,7 @@
 <property>
   <name>net.topology.script.number.args</name>
   <value>100</value>
-  <description> The max number of args that the script configured with 
+  <description> The max number of args that the script configured with
     net.topology.script.file.name should be run with. Each arg is an
     IP address.
   </description>
@@ -1447,7 +1495,7 @@
     org.apache.hadoop.net.TableMapping. The file format is a two column text
     file, with columns separated by whitespace. The first column is a DNS or
     IP address and the second column specifies the rack where the address maps.
-    If no entry corresponding to a host in the cluster is found, then 
+    If no entry corresponding to a host in the cluster is found, then
     /default-rack is assumed.
   </description>
 </property>
@@ -1963,14 +2011,14 @@
   <name>nfs.exports.allowed.hosts</name>
   <value>* rw</value>
   <description>
-    By default, the export can be mounted by any client. The value string 
-    contains machine name and access privilege, separated by whitespace 
-    characters. The machine name format can be a single host, a Java regular 
-    expression, or an IPv4 address. The access privilege uses rw or ro to 
-    specify read/write or read-only access of the machines to exports. If the 
+    By default, the export can be mounted by any client. The value string
+    contains machine name and access privilege, separated by whitespace
+    characters. The machine name format can be a single host, a Java regular
+    expression, or an IPv4 address. The access privilege uses rw or ro to
+    specify read/write or read-only access of the machines to exports. If the
     access privilege is not provided, the default is read-only. Entries are separated by ";".
     For example: "192.168.0.0/22 rw ; host.*\.example\.com ; host1.test.org ro;".
-    Only the NFS gateway needs to restart after this property is updated. 
+    Only the NFS gateway needs to restart after this property is updated.
   </description>
 </property>
 
@@ -2024,7 +2072,7 @@
   <name>hadoop.security.crypto.codec.classes.aes.ctr.nopadding</name>
   <value>org.apache.hadoop.crypto.OpensslAesCtrCryptoCodec, org.apache.hadoop.crypto.JceAesCtrCryptoCodec</value>
   <description>
-    Comma-separated list of crypto codec implementations for AES/CTR/NoPadding. 
+    Comma-separated list of crypto codec implementations for AES/CTR/NoPadding.
     The first implementation will be used if available, others are fallbacks.
   </description>
 </property>
@@ -2041,7 +2089,7 @@
   <name>hadoop.security.crypto.jce.provider</name>
   <value></value>
   <description>
-    The JCE provider name used in CryptoCodec. 
+    The JCE provider name used in CryptoCodec.
   </description>
 </property>
 
@@ -2049,7 +2097,7 @@
   <name>hadoop.security.crypto.buffer.size</name>
   <value>8192</value>
   <description>
-    The buffer size used by CryptoInputStream and CryptoOutputStream. 
+    The buffer size used by CryptoInputStream and CryptoOutputStream.
   </description>
 </property>
 
@@ -2057,7 +2105,7 @@
   <name>hadoop.security.java.secure.random.algorithm</name>
   <value>SHA1PRNG</value>
   <description>
-    The java secure random algorithm. 
+    The java secure random algorithm.
   </description>
 </property>
 
@@ -2065,7 +2113,7 @@
   <name>hadoop.security.secure.random.impl</name>
   <value></value>
   <description>
-    Implementation of secure random. 
+    Implementation of secure random.
   </description>
 </property>
 
@@ -2136,7 +2184,7 @@
   <value>0</value>
   <description>The maximum number of concurrent connections a server is allowed
     to accept. If this limit is exceeded, incoming connections will first fill
-    the listen queue and then may go to an OS-specific listen overflow queue. 
+    the listen queue and then may go to an OS-specific listen overflow queue.
     The client may fail or timeout, but the server can avoid running out of file
     descriptors using this feature. 0 means no limit.
   </description>

+ 0 - 24
hadoop-common-project/hadoop-common/src/site/markdown/DeprecatedProperties.md

@@ -60,7 +60,6 @@ The following table lists the configuration property names that are deprecated i
 | fs.checkpoint.period | dfs.namenode.checkpoint.period |
 | fs.default.name | fs.defaultFS |
 | hadoop.configured.node.mapping | net.topology.configured.node.mapping |
-| hadoop.job.history.location | mapreduce.jobtracker.jobhistory.location |
 | hadoop.native.lib | io.native.lib.available |
 | hadoop.net.static.resolutions | mapreduce.tasktracker.net.static.resolutions |
 | hadoop.pipes.command-file.keep | mapreduce.pipes.commandfile.preserve |
@@ -102,7 +101,6 @@ The following table lists the configuration property names that are deprecated i
 | mapred.cache.localArchives | mapreduce.job.cache.local.archives |
 | mapred.cache.localFiles | mapreduce.job.cache.local.files |
 | mapred.child.tmp | mapreduce.task.tmp.dir |
-| mapred.cluster.average.blacklist.threshold | mapreduce.jobtracker.blacklist.average.threshold |
 | mapred.cluster.map.memory.mb | mapreduce.cluster.mapmemory.mb |
 | mapred.cluster.max.map.memory.mb | mapreduce.jobtracker.maxmapmemory.mb |
 | mapred.cluster.max.reduce.memory.mb | mapreduce.jobtracker.maxreducememory.mb |
@@ -115,9 +113,6 @@ The following table lists the configuration property names that are deprecated i
 | mapred.healthChecker.script.args | mapreduce.tasktracker.healthchecker.script.args |
 | mapred.healthChecker.script.path | mapreduce.tasktracker.healthchecker.script.path |
 | mapred.healthChecker.script.timeout | mapreduce.tasktracker.healthchecker.script.timeout |
-| mapred.heartbeats.in.second | mapreduce.jobtracker.heartbeats.in.second |
-| mapred.hosts.exclude | mapreduce.jobtracker.hosts.exclude.filename |
-| mapred.hosts | mapreduce.jobtracker.hosts.filename |
 | mapred.inmem.merge.threshold | mapreduce.reduce.merge.inmem.threshold |
 | mapred.input.dir.formats | mapreduce.input.multipleinputs.dir.formats |
 | mapred.input.dir.mappers | mapreduce.input.multipleinputs.dir.mappers |
@@ -127,7 +122,6 @@ The following table lists the configuration property names that are deprecated i
 | mapred.job.classpath.archives | mapreduce.job.classpath.archives |
 | mapred.job.classpath.files | mapreduce.job.classpath.files |
 | mapred.job.id | mapreduce.job.id |
-| mapred.jobinit.threads | mapreduce.jobtracker.jobinit.threads |
 | mapred.job.map.memory.mb | mapreduce.map.memory.mb |
 | mapred.job.name | mapreduce.job.name |
 | mapred.job.priority | mapreduce.job.priority |
@@ -139,23 +133,9 @@ The following table lists the configuration property names that are deprecated i
 | mapred.job.reuse.jvm.num.tasks | mapreduce.job.jvm.numtasks |
 | mapred.job.shuffle.input.buffer.percent | mapreduce.reduce.shuffle.input.buffer.percent |
 | mapred.job.shuffle.merge.percent | mapreduce.reduce.shuffle.merge.percent |
-| mapred.job.tracker.handler.count | mapreduce.jobtracker.handler.count |
-| mapred.job.tracker.history.completed.location | mapreduce.jobtracker.jobhistory.completed.location |
-| mapred.job.tracker.http.address | mapreduce.jobtracker.http.address |
-| mapred.jobtracker.instrumentation | mapreduce.jobtracker.instrumentation |
-| mapred.jobtracker.job.history.block.size | mapreduce.jobtracker.jobhistory.block.size |
-| mapred.job.tracker.jobhistory.lru.cache.size | mapreduce.jobtracker.jobhistory.lru.cache.size |
 | mapred.job.tracker | mapreduce.jobtracker.address |
-| mapred.jobtracker.maxtasks.per.job | mapreduce.jobtracker.maxtasks.perjob |
 | mapred.job.tracker.persist.jobstatus.active | mapreduce.jobtracker.persist.jobstatus.active |
-| mapred.job.tracker.persist.jobstatus.dir | mapreduce.jobtracker.persist.jobstatus.dir |
-| mapred.job.tracker.persist.jobstatus.hours | mapreduce.jobtracker.persist.jobstatus.hours |
-| mapred.jobtracker.restart.recover | mapreduce.jobtracker.restart.recover |
-| mapred.job.tracker.retiredjobs.cache.size | mapreduce.jobtracker.retiredjobs.cache.size |
 | mapred.job.tracker.retire.jobs | mapreduce.jobtracker.retirejobs |
-| mapred.jobtracker.taskalloc.capacitypad | mapreduce.jobtracker.taskscheduler.taskalloc.capacitypad |
-| mapred.jobtracker.taskScheduler | mapreduce.jobtracker.taskscheduler |
-| mapred.jobtracker.taskScheduler.maxRunningTasksPerJob | mapreduce.jobtracker.taskscheduler.maxrunningtasks.perjob |
 | mapred.join.expr | mapreduce.join.expr |
 | mapred.join.keycomparator | mapreduce.join.keycomparator |
 | mapred.lazy.output.format | mapreduce.output.lazyoutputformat.outputformat |
@@ -179,7 +159,6 @@ The following table lists the configuration property names that are deprecated i
 | mapred.max.map.failures.percent | mapreduce.map.failures.maxpercent |
 | mapred.max.reduce.failures.percent | mapreduce.reduce.failures.maxpercent |
 | mapred.max.split.size | mapreduce.input.fileinputformat.split.maxsize |
-| mapred.max.tracker.blacklists | mapreduce.jobtracker.tasktracker.maxblacklists |
 | mapred.max.tracker.failures | mapreduce.job.maxtaskfailures.per.tracker |
 | mapred.merge.recordsBeforeProgress | mapreduce.task.merge.progress.records |
 | mapred.min.split.size | mapreduce.input.fileinputformat.split.minsize |
@@ -232,7 +211,6 @@ The following table lists the configuration property names that are deprecated i
 | mapred.tasktracker.dns.interface | mapreduce.tasktracker.dns.interface |
 | mapred.tasktracker.dns.nameserver | mapreduce.tasktracker.dns.nameserver |
 | mapred.tasktracker.events.batchsize | mapreduce.tasktracker.events.batchsize |
-| mapred.tasktracker.expiry.interval | mapreduce.jobtracker.expire.trackers.interval |
 | mapred.task.tracker.http.address | mapreduce.tasktracker.http.address |
 | mapred.tasktracker.indexcache.mb | mapreduce.tasktracker.indexcache.mb |
 | mapred.tasktracker.instrumentation | mapreduce.tasktracker.instrumentation |
@@ -251,7 +229,6 @@ The following table lists the configuration property names that are deprecated i
 | mapreduce.combine.class | mapreduce.job.combine.class |
 | mapreduce.inputformat.class | mapreduce.job.inputformat.class |
 | mapreduce.job.counters.limit | mapreduce.job.counters.max |
-| mapreduce.jobtracker.permissions.supergroup | mapreduce.cluster.permissions.supergroup |
 | mapreduce.map.class | mapreduce.job.map.class |
 | mapreduce.outputformat.class | mapreduce.job.outputformat.class |
 | mapreduce.partitioner.class | mapreduce.job.partitioner.class |
@@ -277,7 +254,6 @@ The following table lists the configuration property names that are deprecated i
 | topology.script.file.name | net.topology.script.file.name |
 | topology.script.number.args | net.topology.script.number.args |
 | user.name | mapreduce.job.user.name |
-| webinterface.private.actions | mapreduce.jobtracker.webinterface.trusted |
 | yarn.app.mapreduce.yarn.app.mapreduce.client-am.ipc.max-retries-on-timeouts | yarn.app.mapreduce.client-am.ipc.max-retries-on-timeouts |
 | yarn.client.app-submission.poll-interval | yarn.client.application-client-protocol.poll-timeout-ms |
 

+ 1 - 1
hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md

@@ -142,7 +142,7 @@ The output columns with -count -q are: QUOTA, REMAINING\_QUOTA, SPACE\_QUOTA, RE
 
 The output columns with -count -u are: QUOTA, REMAINING\_QUOTA, SPACE\_QUOTA, REMAINING\_SPACE\_QUOTA
 
-The -t option shows the quota and usage for each storage type.
+The -t option shows the quota and usage for each storage type. The -t option is ignored if -u or -q option is not given. The list of possible parameters that can be used in -t option(case insensitive except the parameter ""): "", "all", "ram_disk", "ssd", "disk" or "archive".
 
 The -h option shows sizes in human readable format.
 

+ 15 - 0
hadoop-common-project/hadoop-common/src/site/markdown/filesystem/introduction.md

@@ -373,6 +373,21 @@ a time proportional to the quantity of data to upload, and inversely proportiona
 to the network bandwidth. It may also fail &mdash;a failure that is better
 escalated than ignored.
 
+1. **Authorization**. Hadoop uses the `FileStatus` class to
+represent core metadata of files and directories, including the owner, group and
+permissions.  Object stores might not have a viable way to persist this
+metadata, so they might need to populate `FileStatus` with stub values.  Even if
+the object store persists this metadata, it still might not be feasible for the
+object store to enforce file authorization in the same way as a traditional file
+system.  If the object store cannot persist this metadata, then the recommended
+convention is:
+    * File owner is reported as the current user.
+    * File group also is reported as the current user.
+    * Directory permissions are reported as 777.
+    * File permissions are reported as 666.
+    * File system APIs that set ownership and permissions execute successfully
+      without error, but they are no-ops.
+
 Object stores with these characteristics, can not be used as a direct replacement
 for HDFS. In terms of this specification, their implementations of the
 specified operations do not match those required. They are considered supported

+ 1 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfServlet.java

@@ -31,7 +31,7 @@ import javax.ws.rs.core.HttpHeaders;
 import javax.xml.parsers.DocumentBuilder;
 import javax.xml.parsers.DocumentBuilderFactory;
 
-import org.mortbay.util.ajax.JSON;
+import org.eclipse.jetty.util.ajax.JSON;
 import org.w3c.dom.Document;
 import org.w3c.dom.Element;
 import org.w3c.dom.Node;

+ 2 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigRedactor.java

@@ -47,6 +47,7 @@ public class TestConfigRedactor {
         "dfs.webhdfs.oauth2.refresh.token",
         "ssl.server.keystore.keypassword",
         "ssl.server.keystore.password",
+        "httpfs.ssl.keystore.pass",
         "hadoop.security.sensitive-config-keys"
     );
     for (String key : sensitiveKeys) {
@@ -60,6 +61,7 @@ public class TestConfigRedactor {
         "fs.defaultFS",
         "dfs.replication",
         "ssl.server.keystore.location",
+        "httpfs.config.dir",
         "hadoop.security.credstore.java-keystore-provider.password-file"
     );
     for (String key : normalKeys) {

+ 47 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfigurationFieldsBase.java

@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.conf;
 
+import org.apache.commons.lang.StringUtils;
 import org.junit.Before;
 import org.junit.Ignore;
 import org.junit.Test;
@@ -146,6 +147,14 @@ public abstract class TestConfigurationFieldsBase {
    */
   private Set<String> xmlFieldsMissingInConfiguration = null;
 
+  /**
+   * A set of strings used to check for collision of default values.
+   * For each of the set's strings, the default values containing that string
+   * in their name should not coincide.
+   */
+  @SuppressWarnings("checkstyle:visibilitymodifier")
+  protected Set<String> filtersForDefaultValueCollisionCheck = new HashSet<>();
+
   /**
    * Member variable for debugging base class operation
    */
@@ -719,4 +728,42 @@ public abstract class TestConfigurationFieldsBase {
     System.out.println("=====");
     System.out.println();
   }
+
+  /**
+   * For each specified string, get the default parameter values whose names
+   * contain the string. Then check whether any of these default values collide.
+   * This is, for example, useful to make sure there is no collision of default
+   * ports across different services.
+   */
+  @Test
+  public void testDefaultValueCollision() {
+    for (String filter : filtersForDefaultValueCollisionCheck) {
+      System.out.println("Checking if any of the default values whose name " +
+          "contains string \"" + filter + "\" collide.");
+
+      // Map from filtered default value to name of the corresponding parameter.
+      Map<String, String> filteredValues = new HashMap<>();
+
+      int valuesChecked = 0;
+      for (Map.Entry<String, String> ent :
+          configurationDefaultVariables.entrySet()) {
+        // Apply the name filter to the default parameters.
+        if (ent.getKey().contains(filter)) {
+          // Check only for numerical values.
+          if (StringUtils.isNumeric(ent.getValue())) {
+            String crtValue =
+                filteredValues.putIfAbsent(ent.getValue(), ent.getKey());
+            assertTrue("Parameters " + ent.getKey() + " and " + crtValue +
+                " are using the same default value!", crtValue == null);
+          }
+          valuesChecked++;
+        }
+      }
+
+      System.out.println(
+          "Checked " + valuesChecked + " default values for collision.");
+    }
+
+
+  }
 }

+ 2 - 2
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FSMainOperationsBaseTest.java

@@ -32,7 +32,7 @@ import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
-import org.mortbay.log.Log;
+import org.eclipse.jetty.util.log.Log;
 
 /**
  * <p>
@@ -797,7 +797,7 @@ public abstract class FSMainOperationsBaseTest extends FileSystemTestHelper {
       rename(src, dst, false, false, false, Rename.NONE);
       Assert.fail("Should throw FileNotFoundException");
     } catch (IOException e) {
-      Log.info("XXX", e);
+      Log.getLog().info("XXX", e);
       Assert.assertTrue(unwrapException(e) instanceof FileNotFoundException);
     }
 

+ 115 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDefaultUri.java

@@ -0,0 +1,115 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs;
+
+import static org.apache.hadoop.fs.FileSystem.FS_DEFAULT_NAME_KEY;
+import static org.hamcrest.CoreMatchers.instanceOf;
+import static org.hamcrest.CoreMatchers.is;
+import static org.junit.Assert.assertThat;
+import static org.junit.Assert.fail;
+
+import java.io.IOException;
+import java.net.URI;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.junit.Test;
+
+/**
+ * Test default URI related APIs in {@link FileSystem}.
+ */
+public class TestDefaultUri {
+  private Configuration conf = new Configuration();
+
+  @Test
+  public void tetGetDefaultUri() {
+    conf.set(FS_DEFAULT_NAME_KEY, "hdfs://nn_host");
+    URI uri = FileSystem.getDefaultUri(conf);
+    assertThat(uri.getScheme(), is("hdfs"));
+    assertThat(uri.getAuthority(), is("nn_host"));
+  }
+
+  @Test
+  public void tetGetDefaultUriWithPort() {
+    conf.set(FS_DEFAULT_NAME_KEY, "hdfs://nn_host:5432");
+    URI uri = FileSystem.getDefaultUri(conf);
+    assertThat(uri.getScheme(), is("hdfs"));
+    assertThat(uri.getAuthority(), is("nn_host:5432"));
+  }
+
+  @Test
+  public void tetGetDefaultUriTrailingSlash() {
+    conf.set(FS_DEFAULT_NAME_KEY, "hdfs://nn_host/");
+    URI uri = FileSystem.getDefaultUri(conf);
+    assertThat(uri.getScheme(), is("hdfs"));
+    assertThat(uri.getAuthority(), is("nn_host"));
+  }
+
+  @Test
+  public void tetGetDefaultUriNoScheme() {
+    conf.set(FS_DEFAULT_NAME_KEY, "nn_host");
+    URI uri = FileSystem.getDefaultUri(conf);
+    assertThat(uri.getScheme(), is("hdfs"));
+    assertThat(uri.getAuthority(), is("nn_host"));
+  }
+
+  @Test
+  public void tetGetDefaultUriNoSchemeTrailingSlash() {
+    conf.set(FS_DEFAULT_NAME_KEY, "nn_host/");
+    try {
+      FileSystem.getDefaultUri(conf);
+      fail("Expect IAE: No scheme in default FS");
+    } catch (IllegalArgumentException e) {
+      GenericTestUtils.assertExceptionContains(
+          "No scheme in default FS", e);
+    }
+  }
+
+  @Test
+  public void tetFsGet() throws IOException {
+    conf.set(FS_DEFAULT_NAME_KEY, "file:///");
+    FileSystem fs = FileSystem.get(conf);
+    assertThat(fs, instanceOf(LocalFileSystem.class));
+  }
+
+  @Test
+  public void tetFsGetNoScheme() throws IOException {
+    // Bare host name or address indicates hdfs scheme
+    conf.set(FS_DEFAULT_NAME_KEY, "nn_host");
+    try {
+      FileSystem.get(conf);
+      fail("Expect IOE: No FileSystem for scheme: hdfs");
+    } catch (IOException e) {
+      GenericTestUtils.assertExceptionContains(
+          "No FileSystem for scheme: hdfs", e);
+    }
+  }
+
+  @Test
+  public void tetFsGetNoSchemeTrailingSlash() throws IOException {
+    // Bare host name or address with trailing slash is invalid
+    conf.set(FS_DEFAULT_NAME_KEY, "nn_host/");
+    try {
+      FileSystem.get(conf);
+      fail("Expect IAE: No scheme in default FS");
+    } catch (IllegalArgumentException e) {
+      GenericTestUtils.assertExceptionContains(
+          "No scheme in default FS", e);
+    }
+  }
+}

+ 51 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellCopy.java

@@ -26,12 +26,15 @@ import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertThat;
 import static org.junit.Assert.assertTrue;
 
+import java.io.ByteArrayOutputStream;
 import java.io.File;
 import java.io.IOException;
+import java.io.PrintStream;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.junit.Before;
@@ -607,4 +610,52 @@ public class TestFsShellCopy {
         shellRun("-copyFromLocal", srcPath.toString(), noDirName + "/"),
         is(not(0)));
   }
+
+  @Test
+  public void testPutSrcDirNoPerm()
+      throws Exception {
+    final Path src = new Path(testRootDir, "srcNoPerm");
+    final Path dst = new Path(testRootDir, "dst");
+    lfs.delete(src, true);
+    lfs.mkdirs(src, new FsPermission((short)0));
+    lfs.delete(dst, true);
+
+    try {
+      final ByteArrayOutputStream err = new ByteArrayOutputStream();
+      PrintStream oldErr = System.err;
+      System.setErr(new PrintStream(err));
+      shellRun(1, "-put", src.toString(), dst.toString());
+      System.setErr(oldErr);
+      System.err.print(err.toString());
+      assertTrue(err.toString().contains(
+          FSExceptionMessages.PERMISSION_DENIED));
+    } finally {
+      // Make sure the test directory can be deleted
+      lfs.setPermission(src, new FsPermission((short)0755));
+    }
+  }
+
+  @Test
+  public void testPutSrcFileNoPerm()
+      throws Exception {
+    final Path src = new Path(testRootDir, "srcNoPerm");
+    final Path dst = new Path(testRootDir, "dst");
+    lfs.delete(src, true);
+    lfs.create(src);
+    lfs.setPermission(src, new FsPermission((short)0));
+    lfs.delete(dst, true);
+
+    try {
+      final ByteArrayOutputStream err = new ByteArrayOutputStream();
+      PrintStream oldErr = System.err;
+      System.setErr(new PrintStream(err));
+      shellRun(1, "-put", src.toString(), dst.toString());
+      System.setErr(oldErr);
+      System.err.print(err.toString());
+      assertTrue(err.toString().contains("(Permission denied)"));
+    } finally {
+      // make sure the test file can be deleted
+      lfs.setPermission(src, new FsPermission((short)0755));
+    }
+  }
 }

+ 9 - 9
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractCreateTest.java

@@ -121,7 +121,7 @@ public abstract class AbstractContractCreateTest extends
     try {
       assertIsDirectory(path);
     } catch (AssertionError failure) {
-      if (isSupported(IS_BLOBSTORE)) {
+      if (isSupported(CREATE_OVERWRITES_DIRECTORY)) {
         // file/directory hack surfaces here
         throw new AssumptionViolatedException(failure.toString(), failure);
       }
@@ -137,10 +137,10 @@ public abstract class AbstractContractCreateTest extends
       FileStatus status = getFileSystem().getFileStatus(path);
 
       boolean isDir = status.isDirectory();
-      if (!isDir && isSupported(IS_BLOBSTORE)) {
-        // object store: downgrade to a skip so that the failure is visible
-        // in test results
-        skip("Object store allows a file to overwrite a directory");
+      if (!isDir && isSupported(CREATE_OVERWRITES_DIRECTORY)) {
+        // For some file systems, downgrade to a skip so that the failure is
+        // visible in test results.
+        skip("This Filesystem allows a file to overwrite a directory");
       }
       fail("write of file over dir succeeded");
     } catch (FileAlreadyExistsException expected) {
@@ -170,10 +170,10 @@ public abstract class AbstractContractCreateTest extends
                                    1024)) {
       if (!getFileSystem().exists(path)) {
 
-        if (isSupported(IS_BLOBSTORE)) {
-          // object store: downgrade to a skip so that the failure is visible
-          // in test results
-          skip("Filesystem is an object store and newly created files are not immediately visible");
+        if (isSupported(CREATE_VISIBILITY_DELAYED)) {
+          // For some file systems, downgrade to a skip so that the failure is
+          // visible in test results.
+          skip("This Filesystem delays visibility of newly created files");
         }
         assertPathExists("expected path to be visible before anything written",
                          path);

+ 18 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractOptions.java

@@ -36,17 +36,34 @@ public interface ContractOptions {
    */
   String FS_CONTRACT_KEY = "fs.contract.";
 
+  /**
+   * Flag to indicate that a newly created file may overwrite a pre-existing
+   * directory.
+   * {@value}
+   */
+  String CREATE_OVERWRITES_DIRECTORY = "create-overwrites-directory";
+
+  /**
+   * Flag to indicate that a newly created file is not made visible in the
+   * namespace immediately.  Instead, the file becomes visible at a later point
+   * in the file creation lifecycle, such as when the client closes it.
+   * {@value}
+   */
+  String CREATE_VISIBILITY_DELAYED = "create-visibility-delayed";
+
   /**
    * Is a filesystem case sensitive.
    * Some of the filesystems that say "no" here may mean
    * that it varies from platform to platform -the localfs being the key
    * example.
+   * {@value}
    */
   String IS_CASE_SENSITIVE = "is-case-sensitive";
 
   /**
    * Blobstore flag. Implies it's not a real directory tree and
    * consistency is below that which Hadoop expects
+   * {@value}
    */
   String IS_BLOBSTORE = "is-blobstore";
 
@@ -196,6 +213,7 @@ public interface ContractOptions {
   /**
    * Limit for #of random seeks to perform.
    * Keep low for remote filesystems for faster tests
+   * {@value}
    */
   String TEST_RANDOM_SEEK_COUNT = "test.random-seek-count";
 

+ 6 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java

@@ -834,6 +834,7 @@ public class ContractTestUtils extends Assert {
 
     long totalBytesRead = 0;
     int nextExpectedNumber = 0;
+    NanoTimer timer = new NanoTimer();
     try (InputStream inputStream = fs.open(path)) {
       while (true) {
         final int bytesRead = inputStream.read(testBuffer);
@@ -862,6 +863,8 @@ public class ContractTestUtils extends Assert {
             " bytes but only received " + totalBytesRead);
       }
     }
+    timer.end("Time to read %d bytes", expectedSize);
+    bandwidth(timer, expectedSize);
   }
 
   /**
@@ -925,9 +928,12 @@ public class ContractTestUtils extends Assert {
     final Path objectPath = new Path(parent, objectName);
 
     // Write test file in a specific pattern
+    NanoTimer timer = new NanoTimer();
     assertEquals(fileSize,
         generateTestFile(fs, objectPath, fileSize, testBufferSize, modulus));
     assertPathExists(fs, "not created successful", objectPath);
+    timer.end("Time to write %d bytes", fileSize);
+    bandwidth(timer, fileSize);
 
     // Now read the same file back and verify its content
     try {

+ 7 - 2
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCount.java

@@ -467,11 +467,16 @@ public class TestCount {
         + "The -v option displays a header line.\n"
         + "The -x option excludes snapshots from being calculated. \n"
         + "The -t option displays quota by storage types.\n"
-        + "It must be used with -q option.\n"
+        + "It should be used with -q or -u option, "
+        + "otherwise it will be ignored.\n"
         + "If a comma-separated list of storage types is given after the -t option, \n"
         + "it displays the quota and usage for the specified types. \n"
         + "Otherwise, it displays the quota and usage for all the storage \n"
-        + "types that support quota \n"
+        + "types that support quota. The list of possible storage "
+        + "types(case insensitive):\n"
+        + "ram_disk, ssd, disk and archive.\n"
+        + "It can also pass the value '', 'all' or 'ALL' to specify all the "
+        + "storage types.\n"
         + "The -u option shows the quota and \n"
         + "the usage against the quota without the detailed content summary.";
 

+ 2 - 2
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestFSMainOperationsLocalFileSystem.java

@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -46,7 +46,7 @@ public class TestFSMainOperationsLocalFileSystem extends FSMainOperationsBaseTes
     fcTarget = FileSystem.getLocal(conf);
     super.setUp();
   }
-  
+
   @Override
   @After
   public void tearDown() throws Exception {

+ 39 - 2
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java

@@ -19,11 +19,13 @@ package org.apache.hadoop.fs.viewfs;
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
+import java.net.URI;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.List;
 import java.util.ArrayList;
 
+import com.google.common.base.Joiner;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.BlockStoragePolicySpi;
@@ -34,18 +36,19 @@ import org.apache.hadoop.fs.FsConstants;
 import org.apache.hadoop.fs.LocatedFileStatus;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.RemoteIterator;
+import org.apache.hadoop.fs.UnsupportedFileSystemException;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.AclUtil;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.fs.viewfs.ConfigUtil;
 import org.apache.hadoop.fs.viewfs.ViewFileSystem.MountPoint;
-import org.apache.hadoop.fs.viewfs.ViewFileSystem;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import static org.apache.hadoop.fs.FileSystemTestHelper.*;
 import static org.apache.hadoop.fs.viewfs.Constants.PERMISSION_555;
 
@@ -53,6 +56,8 @@ import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
+
+import static org.hamcrest.CoreMatchers.containsString;
 import static org.junit.Assert.*;
 
 
@@ -82,6 +87,8 @@ abstract public class ViewFileSystemBaseTest {
   Path targetTestRoot;
   Configuration conf;
   final FileSystemTestHelper fileSystemTestHelper;
+  private static final Logger LOG =
+      LoggerFactory.getLogger(ViewFileSystemBaseTest.class);
 
   public ViewFileSystemBaseTest() {
       this.fileSystemTestHelper = createFileSystemHelper();
@@ -144,6 +151,10 @@ abstract public class ViewFileSystemBaseTest {
   public void testGetMountPoints() {
     ViewFileSystem viewfs = (ViewFileSystem) fsView;
     MountPoint[] mountPoints = viewfs.getMountPoints();
+    for (MountPoint mountPoint : mountPoints) {
+      LOG.info("MountPoint: " + mountPoint.getSrc() + " => "
+          + Joiner.on(",").join(mountPoint.getTargets()));
+    }
     Assert.assertEquals(getExpectedMountPoints(), mountPoints.length); 
   }
   
@@ -915,4 +926,30 @@ abstract public class ViewFileSystemBaseTest {
     }
   }
 
+  @Test
+  public void testConfLinkSlash() throws Exception {
+    String clusterName = "ClusterX";
+    URI viewFsUri = new URI(FsConstants.VIEWFS_SCHEME, clusterName,
+        "/", null, null);
+
+    Configuration newConf = new Configuration();
+    ConfigUtil.addLink(newConf, clusterName, "/",
+        new Path(targetTestRoot, "/").toUri());
+
+    String mtPrefix = Constants.CONFIG_VIEWFS_PREFIX + "." + clusterName + ".";
+    try {
+      FileSystem.get(viewFsUri, newConf);
+      fail("ViewFileSystem should error out on mount table entry: "
+          + mtPrefix + Constants.CONFIG_VIEWFS_LINK + "." + "/");
+    } catch (Exception e) {
+      if (e instanceof UnsupportedFileSystemException) {
+        String msg = Constants.CONFIG_VIEWFS_LINK_MERGE_SLASH
+            + " is not supported yet.";
+        assertThat(e.getMessage(), containsString(msg));
+      } else {
+        fail("Unexpected exception: " + e.getMessage());
+      }
+    }
+  }
+
 }

+ 5 - 5
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemTestSetup.java

@@ -27,7 +27,7 @@ import org.apache.hadoop.fs.FsConstants;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.viewfs.ConfigUtil;
 import org.apache.hadoop.util.Shell;
-import org.mortbay.log.Log;
+import org.eclipse.jetty.util.log.Log;
 
 
 /**
@@ -84,7 +84,7 @@ public class ViewFileSystemTestSetup {
 
     FileSystem fsView = FileSystem.get(FsConstants.VIEWFS_URI, conf);
     fsView.setWorkingDirectory(new Path(wdDir)); // in case testdir relative to wd.
-    Log.info("Working dir is: " + fsView.getWorkingDirectory());
+    Log.getLog().info("Working dir is: " + fsView.getWorkingDirectory());
     return fsView;
   }
 
@@ -118,12 +118,12 @@ public class ViewFileSystemTestSetup {
     } else { // home dir is at root. Just link the home dir itse
       URI linkTarget = fsTarget.makeQualified(new Path(homeDir)).toUri();
       ConfigUtil.addLink(conf, homeDir, linkTarget);
-      Log.info("Added link for home dir " + homeDir + "->" + linkTarget);
+      Log.getLog().info("Added link for home dir " + homeDir + "->" + linkTarget);
     }
     // Now set the root of the home dir for viewfs
     String homeDirRoot = fsTarget.getHomeDirectory().getParent().toUri().getPath();
     ConfigUtil.setHomeDirConf(conf, homeDirRoot);
-    Log.info("Home dir base for viewfs" + homeDirRoot);  
+    Log.getLog().info("Home dir base for viewfs" + homeDirRoot);
   }
   
   /*
@@ -138,7 +138,7 @@ public class ViewFileSystemTestSetup {
     String firstComponent = path.substring(0, indexOfEnd);
     URI linkTarget = fsTarget.makeQualified(new Path(firstComponent)).toUri();
     ConfigUtil.addLink(conf, firstComponent, linkTarget);
-    Log.info("Added link for " + info + " " 
+    Log.getLog().info("Added link for " + info + " "
         + firstComponent + "->" + linkTarget);    
   }
 }

+ 5 - 5
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsTestSetup.java

@@ -26,7 +26,7 @@ import org.apache.hadoop.fs.FsConstants;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.viewfs.ConfigUtil;
 import org.apache.hadoop.util.Shell;
-import org.mortbay.log.Log;
+import org.eclipse.jetty.util.log.Log;
 
 
 /**
@@ -82,7 +82,7 @@ public class ViewFsTestSetup {
     
     FileContext fc = FileContext.getFileContext(FsConstants.VIEWFS_URI, conf);
     fc.setWorkingDirectory(new Path(wdDir)); // in case testdir relative to wd.
-    Log.info("Working dir is: " + fc.getWorkingDirectory());
+    Log.getLog().info("Working dir is: " + fc.getWorkingDirectory());
     //System.out.println("SRCOfTests = "+ getTestRootPath(fc, "test"));
     //System.out.println("TargetOfTests = "+ targetOfTests.toUri());
     return fc;
@@ -107,12 +107,12 @@ public class ViewFsTestSetup {
     } else { // home dir is at root. Just link the home dir itse
       URI linkTarget = fsTarget.makeQualified(new Path(homeDir)).toUri();
       ConfigUtil.addLink(conf, homeDir, linkTarget);
-      Log.info("Added link for home dir " + homeDir + "->" + linkTarget);
+      Log.getLog().info("Added link for home dir " + homeDir + "->" + linkTarget);
     }
     // Now set the root of the home dir for viewfs
     String homeDirRoot = fsTarget.getHomeDirectory().getParent().toUri().getPath();
     ConfigUtil.setHomeDirConf(conf, homeDirRoot);
-    Log.info("Home dir base for viewfs" + homeDirRoot);  
+    Log.getLog().info("Home dir base for viewfs" + homeDirRoot);
   }
   
   /*
@@ -128,7 +128,7 @@ public class ViewFsTestSetup {
     String firstComponent = path.substring(0, indexOfEnd);
     URI linkTarget = fsTarget.makeQualified(new Path(firstComponent)).toUri();
     ConfigUtil.addLink(conf, firstComponent, linkTarget);
-    Log.info("Added link for " + info + " " 
+    Log.getLog().info("Added link for " + info + " "
         + firstComponent + "->" + linkTarget);    
   }
 

+ 6 - 5
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestAuthenticationSessionCookie.java

@@ -22,7 +22,7 @@ import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.After;
 import org.junit.Test;
-import org.mortbay.log.Log;
+import org.eclipse.jetty.util.log.Log;
 
 import javax.servlet.*;
 import javax.servlet.http.HttpServletResponse;
@@ -33,6 +33,7 @@ import java.net.HttpURLConnection;
 import java.net.URI;
 import java.net.URL;
 import java.net.HttpCookie;
+import java.util.HashMap;
 import java.util.List;
 
 public class TestAuthenticationSessionCookie {
@@ -71,7 +72,7 @@ public class TestAuthenticationSessionCookie {
     @Override
     public void initFilter(FilterContainer container, Configuration conf) {
       container.addFilter("DummyAuth", DummyAuthenticationFilter.class
-              .getName(), null);
+              .getName(), new HashMap<>());
     }
   }
 
@@ -93,7 +94,7 @@ public class TestAuthenticationSessionCookie {
     @Override
     public void initFilter(FilterContainer container, Configuration conf) {
       container.addFilter("Dummy2Auth", Dummy2AuthenticationFilter.class
-              .getName(), null);
+              .getName(), new HashMap<>());
     }
   }
 
@@ -149,7 +150,7 @@ public class TestAuthenticationSessionCookie {
     String header = conn.getHeaderField("Set-Cookie");
     List<HttpCookie> cookies = HttpCookie.parse(header);
     Assert.assertTrue(!cookies.isEmpty());
-    Log.info(header);
+    Log.getLog().info(header);
     Assert.assertFalse(header.contains("; Expires="));
     Assert.assertTrue("token".equals(cookies.get(0).getValue()));
   }
@@ -171,7 +172,7 @@ public class TestAuthenticationSessionCookie {
     String header = conn.getHeaderField("Set-Cookie");
     List<HttpCookie> cookies = HttpCookie.parse(header);
     Assert.assertTrue(!cookies.isEmpty());
-    Log.info(header);
+    Log.getLog().info(header);
     Assert.assertTrue(header.contains("; Expires="));
     Assert.assertTrue("token".equals(cookies.get(0).getValue()));
   }

+ 2 - 2
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpRequestLog.java

@@ -18,9 +18,9 @@
 package org.apache.hadoop.http;
 
 import org.apache.log4j.Logger;
+import org.eclipse.jetty.server.NCSARequestLog;
+import org.eclipse.jetty.server.RequestLog;
 import org.junit.Test;
-import org.mortbay.jetty.NCSARequestLog;
-import org.mortbay.jetty.RequestLog;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;

+ 30 - 9
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java

@@ -28,6 +28,8 @@ import org.apache.hadoop.security.Groups;
 import org.apache.hadoop.security.ShellBasedUnixGroupsMapping;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authorize.AccessControlList;
+import org.eclipse.jetty.server.ServerConnector;
+import org.eclipse.jetty.util.ajax.JSON;
 import org.junit.AfterClass;
 import org.junit.Assert;
 import org.junit.BeforeClass;
@@ -36,8 +38,6 @@ import org.junit.Test;
 import org.junit.rules.ExpectedException;
 import org.mockito.Mockito;
 import org.mockito.internal.util.reflection.Whitebox;
-import org.mortbay.jetty.Connector;
-import org.mortbay.util.ajax.JSON;
 
 import javax.servlet.Filter;
 import javax.servlet.FilterChain;
@@ -50,6 +50,7 @@ import javax.servlet.http.HttpServlet;
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletRequestWrapper;
 import javax.servlet.http.HttpServletResponse;
+import javax.ws.rs.core.MediaType;
 import java.io.IOException;
 import java.io.PrintWriter;
 import java.net.HttpURLConnection;
@@ -66,6 +67,9 @@ import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.Executor;
 import java.util.concurrent.Executors;
 
+import static org.apache.hadoop.fs.CommonConfigurationKeys.DEFAULT_HADOOP_HTTP_STATIC_USER;
+import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_HTTP_STATIC_USER;
+
 public class TestHttpServer extends HttpServerFunctionalTest {
   static final Log LOG = LogFactory.getLog(TestHttpServer.class);
   private static HttpServer2 server;
@@ -81,6 +85,7 @@ public class TestHttpServer extends HttpServerFunctionalTest {
     public void doGet(HttpServletRequest request, 
                       HttpServletResponse response
                       ) throws ServletException, IOException {
+      response.setContentType(MediaType.TEXT_PLAIN + "; " + JettyUtils.UTF_8);
       PrintWriter out = response.getWriter();
       Map<String, String[]> params = request.getParameterMap();
       SortedSet<String> keys = new TreeSet<String>(params.keySet());
@@ -108,6 +113,7 @@ public class TestHttpServer extends HttpServerFunctionalTest {
     public void doGet(HttpServletRequest request, 
                       HttpServletResponse response
                       ) throws ServletException, IOException {
+      response.setContentType(MediaType.TEXT_PLAIN + "; " + JettyUtils.UTF_8);
       PrintWriter out = response.getWriter();
       SortedSet<String> sortedKeys = new TreeSet<String>();
       Enumeration<String> keys = request.getParameterNames();
@@ -130,7 +136,7 @@ public class TestHttpServer extends HttpServerFunctionalTest {
     public void doGet(HttpServletRequest request, 
                       HttpServletResponse response
                       ) throws ServletException, IOException {
-      response.setContentType("text/html");
+      response.setContentType(MediaType.TEXT_HTML + "; " + JettyUtils.UTF_8);
       PrintWriter out = response.getWriter();
       out.print("hello world");
       out.close();
@@ -222,7 +228,8 @@ public class TestHttpServer extends HttpServerFunctionalTest {
     conn = (HttpURLConnection)servletUrl.openConnection();
     conn.connect();
     assertEquals(200, conn.getResponseCode());
-    assertEquals("text/plain; charset=utf-8", conn.getContentType());
+    assertEquals(MediaType.TEXT_PLAIN + ";" + JettyUtils.UTF_8,
+        conn.getContentType());
 
     // We should ignore parameters for mime types - ie a parameter
     // ending in .css should not change mime type
@@ -230,14 +237,16 @@ public class TestHttpServer extends HttpServerFunctionalTest {
     conn = (HttpURLConnection)servletUrl.openConnection();
     conn.connect();
     assertEquals(200, conn.getResponseCode());
-    assertEquals("text/plain; charset=utf-8", conn.getContentType());
+    assertEquals(MediaType.TEXT_PLAIN + ";" + JettyUtils.UTF_8,
+        conn.getContentType());
 
     // Servlets that specify text/html should get that content type
     servletUrl = new URL(baseUrl, "/htmlcontent");
     conn = (HttpURLConnection)servletUrl.openConnection();
     conn.connect();
     assertEquals(200, conn.getResponseCode());
-    assertEquals("text/html; charset=utf-8", conn.getContentType());
+    assertEquals(MediaType.TEXT_HTML + ";" + JettyUtils.UTF_8,
+        conn.getContentType());
   }
 
   @Test
@@ -453,7 +462,7 @@ public class TestHttpServer extends HttpServerFunctionalTest {
     String serverURL = "http://"
         + NetUtils.getHostPortString(myServer.getConnectorAddress(0)) + "/";
     for (String servlet : new String[] { "conf", "logs", "stacks",
-        "logLevel" }) {
+        "logLevel", "jmx" }) {
       for (String user : new String[] { "userA", "userB", "userC", "userD" }) {
         assertEquals(HttpURLConnection.HTTP_OK, getHttpStatusCode(serverURL
             + servlet, user));
@@ -461,6 +470,18 @@ public class TestHttpServer extends HttpServerFunctionalTest {
       assertEquals(HttpURLConnection.HTTP_FORBIDDEN, getHttpStatusCode(
           serverURL + servlet, "userE"));
     }
+
+    // hadoop.security.authorization is set as true while
+    // hadoop.http.authentication.type's value is `simple`(default value)
+    // in this case, static user has administrator access
+    final String staticUser = conf.get(HADOOP_HTTP_STATIC_USER,
+        DEFAULT_HADOOP_HTTP_STATIC_USER);
+    for (String servlet : new String[] {"conf", "logs", "stacks",
+        "logLevel", "jmx"}) {
+      assertEquals(HttpURLConnection.HTTP_OK, getHttpStatusCode(
+          serverURL + servlet, staticUser));
+    }
+
     myServer.stop();
   }
   
@@ -488,7 +509,7 @@ public class TestHttpServer extends HttpServerFunctionalTest {
 
   @SuppressWarnings("unchecked")
   private static Map<String, Object> parse(String jsonString) {
-    return (Map<String, Object>)JSON.parse(jsonString);
+    return (Map<String, Object>) JSON.parse(jsonString);
   }
 
   @Test public void testJersey() throws Exception {
@@ -592,7 +613,7 @@ public class TestHttpServer extends HttpServerFunctionalTest {
       // not bound, ephemeral should return requested port (0 for ephemeral)
       List<?> listeners = (List<?>) Whitebox.getInternalState(server,
           "listeners");
-      Connector listener = (Connector) listeners.get(0);
+      ServerConnector listener = (ServerConnector)listeners.get(0);
 
       assertEquals(port, listener.getPort());
       // verify hostname is what was given

+ 5 - 2
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestServletFilter.java

@@ -146,9 +146,11 @@ public class TestServletFilter extends HttpServerFunctionalTest {
   }
   
   static public class ErrorFilter extends SimpleFilter {
+    static final String EXCEPTION_MESSAGE =
+        "Throwing the exception from Filter init";
     @Override
     public void init(FilterConfig arg0) throws ServletException {
-      throw new ServletException("Throwing the exception from Filter init");
+      throw new ServletException(EXCEPTION_MESSAGE);
     }
 
     /** Configuration for the filter */
@@ -174,7 +176,8 @@ public class TestServletFilter extends HttpServerFunctionalTest {
       http.start();
       fail("expecting exception");
     } catch (IOException e) {
-      assertTrue( e.getMessage().contains("Problem in starting http server. Server handlers failed"));
+      assertEquals("Problem starting http server", e.getMessage());
+      assertEquals(ErrorFilter.EXCEPTION_MESSAGE, e.getCause().getMessage());
     }
   }
   

+ 3 - 2
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/resource/JerseyResource.java

@@ -32,7 +32,8 @@ import javax.ws.rs.core.Response;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.mortbay.util.ajax.JSON;
+import org.apache.hadoop.http.JettyUtils;
+import org.eclipse.jetty.util.ajax.JSON;
 
 /**
  * A simple Jersey resource class TestHttpServer.
@@ -48,7 +49,7 @@ public class JerseyResource {
 
   @GET
   @Path("{" + PATH + ":.*}")
-  @Produces({MediaType.APPLICATION_JSON})
+  @Produces({MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8})
   public Response get(
       @PathParam(PATH) @DefaultValue("UNKNOWN_" + PATH) final String path,
       @QueryParam(OP) @DefaultValue("UNKNOWN_" + OP) final String op

+ 36 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/rawcoder/TestNativeXORRawCoder.java

@@ -0,0 +1,36 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.io.erasurecode.rawcoder;
+
+import org.apache.hadoop.io.erasurecode.ErasureCodeNative;
+import org.junit.Assume;
+import org.junit.Before;
+
+/**
+ * Test NativeXOR encoding and decoding.
+ */
+public class TestNativeXORRawCoder extends TestXORRawCoderBase {
+
+  @Before
+  public void setup() {
+    Assume.assumeTrue(ErasureCodeNative.isNativeCodeLoaded());
+    this.encoderClass = NativeXORRawEncoder.class;
+    this.decoderClass = NativeXORRawDecoder.class;
+    setAllowDump(true);
+  }
+}

+ 2 - 36
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/rawcoder/TestXORRawCoder.java

@@ -18,49 +18,15 @@
 package org.apache.hadoop.io.erasurecode.rawcoder;
 
 import org.junit.Before;
-import org.junit.Test;
 
 /**
- * Test XOR encoding and decoding.
+ * Test pure Java XOR encoding and decoding.
  */
-public class TestXORRawCoder extends TestRawCoderBase {
+public class TestXORRawCoder extends TestXORRawCoderBase {
 
   @Before
   public void setup() {
     this.encoderClass = XORRawEncoder.class;
     this.decoderClass = XORRawDecoder.class;
   }
-
-  @Test
-  public void testCoding_10x1_erasing_d0() {
-    prepare(null, 10, 1, new int[] {0}, new int[0]);
-    testCodingDoMixAndTwice();
-  }
-
-  @Test
-  public void testCoding_10x1_erasing_p0() {
-    prepare(null, 10, 1, new int[0], new int[] {0});
-    testCodingDoMixAndTwice();
-  }
-
-  @Test
-  public void testCoding_10x1_erasing_d5() {
-    prepare(null, 10, 1, new int[]{5}, new int[0]);
-    testCodingDoMixAndTwice();
-  }
-
-  @Test
-  public void testCodingNegative_10x1_erasing_too_many() {
-    prepare(null, 10, 1, new int[]{2}, new int[]{0});
-    testCodingWithErasingTooMany();
-  }
-
-  @Test
-  public void testCodingNegative_10x1_erasing_d5() {
-    prepare(null, 10, 1, new int[]{5}, new int[0]);
-    testCodingWithBadInput(true);
-    testCodingWithBadOutput(false);
-    testCodingWithBadInput(true);
-    testCodingWithBadOutput(false);
-  }
 }

+ 59 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/rawcoder/TestXORRawCoderBase.java

@@ -0,0 +1,59 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.io.erasurecode.rawcoder;
+
+import org.junit.Test;
+
+/**
+ * Test base for raw XOR coders.
+ */
+public abstract class TestXORRawCoderBase extends TestRawCoderBase {
+
+  @Test
+  public void testCoding_10x1_erasing_d0() {
+    prepare(null, 10, 1, new int[] {0}, new int[0]);
+    testCodingDoMixAndTwice();
+  }
+
+  @Test
+  public void testCoding_10x1_erasing_p0() {
+    prepare(null, 10, 1, new int[0], new int[] {0});
+    testCodingDoMixAndTwice();
+  }
+
+  @Test
+  public void testCoding_10x1_erasing_d5() {
+    prepare(null, 10, 1, new int[]{5}, new int[0]);
+    testCodingDoMixAndTwice();
+  }
+
+  @Test
+  public void testCodingNegative_10x1_erasing_too_many() {
+    prepare(null, 10, 1, new int[]{2}, new int[]{0});
+    testCodingWithErasingTooMany();
+  }
+
+  @Test
+  public void testCodingNegative_10x1_erasing_d5() {
+    prepare(null, 10, 1, new int[]{5}, new int[0]);
+    testCodingWithBadInput(true);
+    testCodingWithBadOutput(false);
+    testCodingWithBadInput(true);
+    testCodingWithBadOutput(false);
+  }
+}

+ 36 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/rawcoder/TestXORRawCoderInteroperable1.java

@@ -0,0 +1,36 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.io.erasurecode.rawcoder;
+
+import org.apache.hadoop.io.erasurecode.ErasureCodeNative;
+import org.junit.Assume;
+import org.junit.Before;
+
+/**
+ * Test raw XOR coder implemented in Java.
+ */
+public class TestXORRawCoderInteroperable1 extends TestXORRawCoderBase {
+
+  @Before
+  public void setup() {
+    Assume.assumeTrue(ErasureCodeNative.isNativeCodeLoaded());
+    this.encoderClass = XORRawEncoder.class;
+    this.decoderClass = NativeXORRawDecoder.class;
+    setAllowDump(true);
+  }
+}

+ 37 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/rawcoder/TestXORRawCoderInteroperable2.java

@@ -0,0 +1,37 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.io.erasurecode.rawcoder;
+
+import org.apache.hadoop.io.erasurecode.ErasureCodeNative;
+import org.junit.Assume;
+import org.junit.Before;
+
+/**
+ * Test raw XOR coder implemented in Java.
+ */
+public class TestXORRawCoderInteroperable2 extends TestXORRawCoderBase {
+
+  @Before
+  public void setup() {
+    Assume.assumeTrue(ErasureCodeNative.isNativeCodeLoaded());
+    this.encoderClass = NativeXORRawEncoder.class;
+    this.decoderClass = XORRawDecoder.class;
+    setAllowDump(true);
+  }
+
+}

+ 24 - 28
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/Timer.java

@@ -17,8 +17,6 @@
 package org.apache.hadoop.io.file.tfile;
 
 import java.io.IOException;
-import java.text.DateFormat;
-import java.text.SimpleDateFormat;
 
 import org.apache.hadoop.util.Time;
 
@@ -30,36 +28,34 @@ import org.apache.hadoop.util.Time;
 public  class Timer {
   long startTimeEpoch;
   long finishTimeEpoch;
-  private DateFormat formatter = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
   
   public void startTime() throws IOException {
-      startTimeEpoch = Time.now();
-    }
+    startTimeEpoch = Time.now();
+  }
 
-    public void stopTime() throws IOException {
-      finishTimeEpoch = Time.now();
-    }
+  public void stopTime() throws IOException {
+    finishTimeEpoch = Time.now();
+  }
 
-    public long getIntervalMillis() throws IOException {
-      return finishTimeEpoch - startTimeEpoch;
-    }
-  
-    public void printlnWithTimestamp(String message) throws IOException {
-      System.out.println(formatCurrentTime() + "  " + message);
-    }
-  
-    public String formatTime(long millis) {
-      return formatter.format(millis);
-    }
-    
-    public String getIntervalString() throws IOException {
-      long time = getIntervalMillis();
-      return formatTime(time);
-    }
-    
-    public String formatCurrentTime() {
-      return formatTime(Time.now());
-    }
+  public long getIntervalMillis() throws IOException {
+    return finishTimeEpoch - startTimeEpoch;
+  }
+
+  public void printlnWithTimestamp(String message) throws IOException {
+    System.out.println(formatCurrentTime() + "  " + message);
+  }
+
+  public String formatTime(long millis) {
+    return Time.formatTime(millis);
+  }
+
+  public String getIntervalString() throws IOException {
+    long time = getIntervalMillis();
+    return formatTime(time);
+  }
 
+  public String formatCurrentTime() {
+    return formatTime(Time.now());
+  }
 }
 

+ 162 - 3
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/lib/TestMutableMetrics.java

@@ -19,9 +19,7 @@
 package org.apache.hadoop.metrics2.lib;
 
 import static org.apache.hadoop.metrics2.lib.Interns.info;
-import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
-import static org.apache.hadoop.test.MetricsAsserts.assertGauge;
-import static org.apache.hadoop.test.MetricsAsserts.mockMetricsRecordBuilder;
+import static org.apache.hadoop.test.MetricsAsserts.*;
 import static org.mockito.AdditionalMatchers.eq;
 import static org.mockito.AdditionalMatchers.geq;
 import static org.mockito.AdditionalMatchers.leq;
@@ -29,10 +27,15 @@ import static org.mockito.Matchers.anyLong;
 import static org.mockito.Matchers.eq;
 import static org.mockito.Mockito.times;
 import static org.mockito.Mockito.verify;
+import static org.junit.Assert.*;
 
 import java.util.Map;
 import java.util.Map.Entry;
+import java.util.Random;
+import java.util.concurrent.CountDownLatch;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import org.apache.hadoop.metrics2.util.Quantile;
 import org.junit.Test;
@@ -42,6 +45,7 @@ import org.junit.Test;
  */
 public class TestMutableMetrics {
 
+  private static final Log LOG = LogFactory.getLog(TestMutableMetrics.class);
   private final double EPSILON = 1e-42;
 
   /**
@@ -129,6 +133,161 @@ public class TestMutableMetrics {
     assertGauge("BarAvgTime", 0.0, rb);
   }
 
+  @Test public void testMutableRatesWithAggregationInit() {
+    MetricsRecordBuilder rb = mockMetricsRecordBuilder();
+    MutableRatesWithAggregation rates = new MutableRatesWithAggregation();
+
+    rates.init(TestProtocol.class);
+    rates.snapshot(rb, false);
+
+    assertCounter("FooNumOps", 0L, rb);
+    assertGauge("FooAvgTime", 0.0, rb);
+    assertCounter("BarNumOps", 0L, rb);
+    assertGauge("BarAvgTime", 0.0, rb);
+  }
+
+  @Test public void testMutableRatesWithAggregationSingleThread() {
+    MutableRatesWithAggregation rates = new MutableRatesWithAggregation();
+
+    rates.add("foo", 1);
+    rates.add("bar", 5);
+
+    MetricsRecordBuilder rb = mockMetricsRecordBuilder();
+    rates.snapshot(rb, false);
+    assertCounter("FooNumOps", 1L, rb);
+    assertGauge("FooAvgTime", 1.0, rb);
+    assertCounter("BarNumOps", 1L, rb);
+    assertGauge("BarAvgTime", 5.0, rb);
+
+    rates.add("foo", 1);
+    rates.add("foo", 3);
+    rates.add("bar", 6);
+
+    rb = mockMetricsRecordBuilder();
+    rates.snapshot(rb, false);
+    assertCounter("FooNumOps", 3L, rb);
+    assertGauge("FooAvgTime", 2.0, rb);
+    assertCounter("BarNumOps", 2L, rb);
+    assertGauge("BarAvgTime", 6.0, rb);
+  }
+
+  @Test public void testMutableRatesWithAggregationManyThreads()
+      throws InterruptedException {
+    final MutableRatesWithAggregation rates = new MutableRatesWithAggregation();
+
+    final int n = 10;
+    long[] opCount = new long[n];
+    double[] opTotalTime = new double[n];
+
+    for (int i = 0; i < n; i++) {
+      opCount[i] = 0;
+      opTotalTime[i] = 0;
+      // Initialize so that the getLongCounter() method doesn't complain
+      rates.add("metric" + i, 0);
+    }
+
+    Thread[] threads = new Thread[n];
+    final CountDownLatch firstAddsFinished = new CountDownLatch(threads.length);
+    final CountDownLatch firstSnapshotsFinished = new CountDownLatch(1);
+    final CountDownLatch secondAddsFinished =
+        new CountDownLatch(threads.length);
+    final CountDownLatch secondSnapshotsFinished = new CountDownLatch(1);
+    long seed = new Random().nextLong();
+    LOG.info("Random seed = " + seed);
+    final Random sleepRandom = new Random(seed);
+    for (int tIdx = 0; tIdx < threads.length; tIdx++) {
+      final int threadIdx = tIdx;
+      threads[threadIdx] = new Thread() {
+        @Override
+        public void run() {
+          try {
+            for (int i = 0; i < 1000; i++) {
+              rates.add("metric" + (i % n), (i / n) % 2 == 0 ? 1 : 2);
+              // Sleep so additions can be interleaved with snapshots
+              Thread.sleep(sleepRandom.nextInt(5));
+            }
+            firstAddsFinished.countDown();
+
+            // Make sure all threads stay alive long enough for the first
+            // snapshot to complete; else their metrics may be lost to GC
+            firstSnapshotsFinished.await();
+
+            // Let half the threads continue with more metrics and let half die
+            if (threadIdx % 2 == 0) {
+              for (int i = 0; i < 1000; i++) {
+                rates.add("metric" + (i % n), (i / n) % 2 == 0 ? 1 : 2);
+              }
+              secondAddsFinished.countDown();
+              secondSnapshotsFinished.await();
+            } else {
+              secondAddsFinished.countDown();
+            }
+          } catch (InterruptedException e) {
+            // Ignore
+          }
+        }
+      };
+    }
+    for (Thread t : threads) {
+      t.start();
+    }
+    // Snapshot concurrently with additions but aggregate the totals into
+    // opCount / opTotalTime
+    for (int i = 0; i < 100; i++) {
+      snapshotMutableRatesWithAggregation(rates, opCount, opTotalTime);
+      Thread.sleep(sleepRandom.nextInt(20));
+    }
+    firstAddsFinished.await();
+    // Final snapshot to grab any remaining metrics and then verify that
+    // the totals are as expected
+    snapshotMutableRatesWithAggregation(rates, opCount, opTotalTime);
+    for (int i = 0; i < n; i++) {
+      assertEquals("metric" + i + " count", 1001, opCount[i]);
+      assertEquals("metric" + i + " total", 1500, opTotalTime[i], 1.0);
+    }
+    firstSnapshotsFinished.countDown();
+
+    // After half of the threads die, ensure that the remaining ones still
+    // add metrics correctly and that snapshot occurs correctly
+    secondAddsFinished.await();
+    snapshotMutableRatesWithAggregation(rates, opCount, opTotalTime);
+    for (int i = 0; i < n; i++) {
+      assertEquals("metric" + i + " count", 1501, opCount[i]);
+      assertEquals("metric" + i + " total", 2250, opTotalTime[i], 1.0);
+    }
+    secondSnapshotsFinished.countDown();
+  }
+
+  private static void snapshotMutableRatesWithAggregation(
+      MutableRatesWithAggregation rates, long[] opCount, double[] opTotalTime) {
+    MetricsRecordBuilder rb = mockMetricsRecordBuilder();
+    rates.snapshot(rb, true);
+    for (int i = 0; i < opCount.length; i++) {
+      long prevOpCount = opCount[i];
+      long newOpCount = getLongCounter("Metric" + i + "NumOps", rb);
+      opCount[i] = newOpCount;
+      double avgTime = getDoubleGauge("Metric" + i + "AvgTime", rb);
+      opTotalTime[i] += avgTime * (newOpCount - prevOpCount);
+    }
+  }
+
+  /**
+   * Tests that when using {@link MutableStat#add(long, long)}, even with a high
+   * sample count, the mean does not lose accuracy.
+   */
+  @Test public void testMutableStatWithBulkAdd() {
+    MetricsRecordBuilder rb = mockMetricsRecordBuilder();
+    MetricsRegistry registry = new MetricsRegistry("test");
+    MutableStat stat = registry.newStat("Test", "Test", "Ops", "Val", false);
+
+    stat.add(1000, 1000);
+    stat.add(1000, 2000);
+    registry.snapshot(rb, false);
+
+    assertCounter("TestNumOps", 2000L, rb);
+    assertGauge("TestAvgVal", 1.5, rb);
+  }
+
   /**
    * Ensure that quantile estimates from {@link MutableQuantiles} are within
    * specified error bounds.

+ 144 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUGIWithMiniKdc.java

@@ -0,0 +1,144 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.security;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.minikdc.MiniKdc;
+import org.apache.hadoop.security.authentication.util.KerberosUtil;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.test.LambdaTestUtils;
+import org.apache.hadoop.util.PlatformName;
+import org.apache.log4j.Level;
+import org.junit.After;
+import org.junit.Test;
+
+import javax.security.auth.Subject;
+import javax.security.auth.kerberos.KerberosPrincipal;
+import javax.security.auth.login.AppConfigurationEntry;
+import javax.security.auth.login.LoginContext;
+import java.io.File;
+import java.security.Principal;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Properties;
+import java.util.Set;
+
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_KERBEROS_MIN_SECONDS_BEFORE_RELOGIN;
+
+/**
+ * Test {@link UserGroupInformation} with a minikdc.
+ */
+public class TestUGIWithMiniKdc {
+
+  private static MiniKdc kdc;
+
+  @After
+  public void teardown() {
+    UserGroupInformation.reset();
+    if (kdc != null) {
+      kdc.stop();
+    }
+  }
+
+  private void setupKdc() throws Exception {
+    Properties kdcConf = MiniKdc.createConf();
+    // tgt expire time = 30 seconds
+    kdcConf.setProperty(MiniKdc.MAX_TICKET_LIFETIME, "30");
+    kdcConf.setProperty(MiniKdc.MIN_TICKET_LIFETIME, "30");
+    File kdcDir = new File(System.getProperty("test.dir", "target"));
+    kdc = new MiniKdc(kdcConf, kdcDir);
+    kdc.start();
+  }
+
+  @Test(timeout = 120000)
+  public void testAutoRenewalThreadRetryWithKdc() throws Exception {
+    GenericTestUtils.setLogLevel(UserGroupInformation.LOG, Level.DEBUG);
+    final Configuration conf = new Configuration();
+    // Relogin every 1 second
+    conf.setLong(HADOOP_KERBEROS_MIN_SECONDS_BEFORE_RELOGIN, 1);
+    SecurityUtil.setAuthenticationMethod(
+        UserGroupInformation.AuthenticationMethod.KERBEROS, conf);
+    UserGroupInformation.setConfiguration(conf);
+
+    LoginContext loginContext = null;
+    try {
+      final String principal = "foo";
+      final File workDir = new File(System.getProperty("test.dir", "target"));
+      final File keytab = new File(workDir, "foo.keytab");
+      final Set<Principal> principals = new HashSet<>();
+      principals.add(new KerberosPrincipal(principal));
+      setupKdc();
+      kdc.createPrincipal(keytab, principal);
+
+      // client login
+      final Subject subject =
+          new Subject(false, principals, new HashSet<>(), new HashSet<>());
+
+      loginContext = new LoginContext("", subject, null,
+          new javax.security.auth.login.Configuration() {
+            @Override
+            public AppConfigurationEntry[] getAppConfigurationEntry(
+                String name) {
+              Map<String, String> options = new HashMap<>();
+              options.put("principal", principal);
+              options.put("refreshKrb5Config", "true");
+              if (PlatformName.IBM_JAVA) {
+                options.put("useKeytab", keytab.getPath());
+                options.put("credsType", "both");
+              } else {
+                options.put("keyTab", keytab.getPath());
+                options.put("useKeyTab", "true");
+                options.put("storeKey", "true");
+                options.put("doNotPrompt", "true");
+                options.put("useTicketCache", "true");
+                options.put("renewTGT", "true");
+                options.put("isInitiator", Boolean.toString(true));
+              }
+              String ticketCache = System.getenv("KRB5CCNAME");
+              if (ticketCache != null) {
+                options.put("ticketCache", ticketCache);
+              }
+              options.put("debug", "true");
+              return new AppConfigurationEntry[] {new AppConfigurationEntry(
+                  KerberosUtil.getKrb5LoginModuleName(),
+                  AppConfigurationEntry.LoginModuleControlFlag.REQUIRED,
+                  options)};
+            }
+          });
+      loginContext.login();
+      final Subject loginSubject = loginContext.getSubject();
+      UserGroupInformation.loginUserFromSubject(loginSubject);
+
+      // Verify retry happens. Do not verify retry count to reduce flakiness.
+      // Detailed back-off logic is tested separately in
+      // TestUserGroupInformation#testGetNextRetryTime
+      LambdaTestUtils.await(30000, 500,
+          () -> {
+            final int count =
+                UserGroupInformation.metrics.getRenewalFailures().value();
+            UserGroupInformation.LOG.info("Renew failure count is {}", count);
+            return count > 0;
+          });
+    } finally {
+      if (loginContext != null) {
+        loginContext.logout();
+      }
+    }
+  }
+}

+ 93 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java

@@ -20,6 +20,8 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.retry.RetryPolicies;
+import org.apache.hadoop.io.retry.RetryPolicy;
 import org.apache.hadoop.ipc.TestRpcBase.TestTokenIdentifier;
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import org.apache.hadoop.security.SaslRpcServer.AuthMethod;
@@ -27,13 +29,18 @@ import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
 import org.apache.hadoop.security.authentication.util.KerberosName;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.util.Time;
+import org.apache.log4j.Level;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import javax.security.auth.Subject;
 import javax.security.auth.kerberos.KerberosPrincipal;
@@ -49,10 +56,13 @@ import java.lang.reflect.Method;
 import java.security.PrivilegedExceptionAction;
 import java.util.Collection;
 import java.util.ConcurrentModificationException;
+import java.util.Date;
 import java.util.LinkedHashSet;
 import java.util.Set;
+import java.util.concurrent.TimeUnit;
 
 import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_USER_GROUP_METRICS_PERCENTILES_INTERVALS;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_KERBEROS_MIN_SECONDS_BEFORE_RELOGIN;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTH_TO_LOCAL;
 import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
 import static org.apache.hadoop.test.MetricsAsserts.assertCounterGt;
@@ -74,6 +84,9 @@ import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 
 public class TestUserGroupInformation {
+
+  static final Logger LOG = LoggerFactory.getLogger(
+      TestUserGroupInformation.class);
   final private static String USER_NAME = "user1@HADOOP.APACHE.ORG";
   final private static String GROUP1_NAME = "group1";
   final private static String GROUP2_NAME = "group2";
@@ -1027,6 +1040,86 @@ public class TestUserGroupInformation {
         return null;
       }
     });
+  }
+
+  @Test
+  public void testGetNextRetryTime() throws Exception {
+    GenericTestUtils.setLogLevel(UserGroupInformation.LOG, Level.DEBUG);
+    final long reloginInterval = 1;
+    final long reloginIntervalMs = reloginInterval * 1000;
+    // Relogin happens every 1 second.
+    conf.setLong(HADOOP_KERBEROS_MIN_SECONDS_BEFORE_RELOGIN, reloginInterval);
+    SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS, conf);
+    UserGroupInformation.setConfiguration(conf);
+
+    // Suppose tgt start time is now, end time is 20 seconds from now.
+    final long now = Time.now();
+    final Date endDate = new Date(now + 20000);
+
+    // Explicitly test the exponential back-off logic.
+    // Suppose some time (10 seconds) passed.
+    // Verify exponential backoff and max=(login interval before endTime).
+    final long currentTime = now + 10000;
+    final long endTime = endDate.getTime();
+
+    assertEquals(0, UserGroupInformation.metrics.getRenewalFailures().value());
+    RetryPolicy rp = RetryPolicies.exponentialBackoffRetry(Long.SIZE - 2,
+        1000, TimeUnit.MILLISECONDS);
+    long lastRetry =
+        UserGroupInformation.getNextTgtRenewalTime(endTime, currentTime, rp);
+    assertWithinBounds(
+        UserGroupInformation.metrics.getRenewalFailures().value(),
+        lastRetry, reloginIntervalMs, currentTime);
+
+    UserGroupInformation.metrics.getRenewalFailures().incr();
+    lastRetry =
+        UserGroupInformation.getNextTgtRenewalTime(endTime, currentTime, rp);
+    assertWithinBounds(
+        UserGroupInformation.metrics.getRenewalFailures().value(),
+        lastRetry, reloginIntervalMs, currentTime);
+
+    UserGroupInformation.metrics.getRenewalFailures().incr();
+    lastRetry =
+        UserGroupInformation.getNextTgtRenewalTime(endTime, currentTime, rp);
+    assertWithinBounds(
+        UserGroupInformation.metrics.getRenewalFailures().value(),
+        lastRetry, reloginIntervalMs, currentTime);
+
+    UserGroupInformation.metrics.getRenewalFailures().incr();
+    lastRetry =
+        UserGroupInformation.getNextTgtRenewalTime(endTime, currentTime, rp);
+    assertWithinBounds(
+        UserGroupInformation.metrics.getRenewalFailures().value(),
+        lastRetry, reloginIntervalMs, currentTime);
+
+    // last try should be right before expiry.
+    UserGroupInformation.metrics.getRenewalFailures().incr();
+    lastRetry =
+        UserGroupInformation.getNextTgtRenewalTime(endTime, currentTime, rp);
+    String str =
+        "5th retry, now:" + currentTime + ", retry:" + lastRetry;
+    LOG.info(str);
+    assertEquals(str, endTime - reloginIntervalMs, lastRetry);
+
+    // make sure no more retries after (tgt endTime - login interval).
+    UserGroupInformation.metrics.getRenewalFailures().incr();
+    lastRetry =
+        UserGroupInformation.getNextTgtRenewalTime(endTime, currentTime, rp);
+    str = "overflow retry, now:" + currentTime + ", retry:" + lastRetry;
+    LOG.info(str);
+    assertEquals(str, endTime - reloginIntervalMs, lastRetry);
+  }
 
+  private void assertWithinBounds(final int numFailures, final long lastRetry,
+      final long reloginIntervalMs, long now) {
+    // shift is 2 to the power of (numFailure).
+    int shift = numFailures + 1;
+    final long lower = now + reloginIntervalMs * (long)((1 << shift) * 0.5);
+    final long upper = now + reloginIntervalMs * (long)((1 << shift) * 1.5);
+    final String str = new String("Retry#" + (numFailures + 1) + ", now:" + now
+        + ", lower bound:" + lower + ", upper bound:" + upper
+        + ", retry:" + lastRetry);
+    LOG.info(str);
+    assertTrue(str, lower <= lastRetry && lastRetry < upper);
   }
 }

+ 50 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestDelegationTokenAuthenticationHandlerWithMocks.java

@@ -17,6 +17,9 @@
  */
 package org.apache.hadoop.security.token.delegation.web;
 
+import static org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticator.DelegationTokenOperation.*;
+
+import org.apache.commons.lang.mutable.MutableBoolean;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authentication.client.AuthenticationException;
@@ -458,4 +461,51 @@ public class TestDelegationTokenAuthenticationHandlerWithMocks {
     Assert.assertFalse(handler.managementOperation(null, request, response));
     Mockito.verify(response).setStatus(HttpServletResponse.SC_UNAUTHORIZED);
   }
+
+  @Test
+  public void testWriterNotClosed() throws Exception {
+    Properties conf = new Properties();
+    conf.put(KerberosDelegationTokenAuthenticationHandler.TOKEN_KIND, "foo");
+    conf.put(DelegationTokenAuthenticationHandler.JSON_MAPPER_PREFIX
+        + "AUTO_CLOSE_TARGET", "false");
+    DelegationTokenAuthenticationHandler noAuthCloseHandler =
+        new MockDelegationTokenAuthenticationHandler();
+    try {
+      noAuthCloseHandler.initTokenManager(conf);
+      noAuthCloseHandler.initJsonFactory(conf);
+
+      DelegationTokenAuthenticator.DelegationTokenOperation op =
+          GETDELEGATIONTOKEN;
+      HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
+      HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
+      Mockito.when(request.getQueryString()).thenReturn(
+          DelegationTokenAuthenticator.OP_PARAM + "=" + op.toString());
+      Mockito.when(request.getMethod()).thenReturn(op.getHttpMethod());
+
+      AuthenticationToken token = Mockito.mock(AuthenticationToken.class);
+      Mockito.when(token.getUserName()).thenReturn("user");
+      final MutableBoolean closed = new MutableBoolean();
+      PrintWriter printWriterCloseCount = new PrintWriter(new StringWriter()) {
+        @Override
+        public void close() {
+          closed.setValue(true);
+          super.close();
+        }
+
+        @Override
+        public void write(String str) {
+          if (closed.booleanValue()) {
+            throw new RuntimeException("already closed!");
+          }
+          super.write(str);
+        }
+
+      };
+      Mockito.when(response.getWriter()).thenReturn(printWriterCloseCount);
+      Assert.assertFalse(noAuthCloseHandler.managementOperation(token, request,
+          response));
+    } finally {
+      noAuthCloseHandler.destroy();
+    }
+  }
 }

部分文件因为文件数量过多而无法显示