Explorar el Código

Merge branch 'trunk' into HDFS-7240

Anu Engineer hace 9 años
padre
commit
db22affd79
Se han modificado 100 ficheros con 3122 adiciones y 881 borrados
  1. 209 0
      LICENSE.txt
  2. 172 0
      NOTICE.txt
  3. 1 8
      dev-support/docker/Dockerfile
  4. 2 2
      hadoop-assemblies/pom.xml
  5. 44 2
      hadoop-build-tools/pom.xml
  6. 2 2
      hadoop-client/pom.xml
  7. 2 2
      hadoop-common-project/hadoop-annotations/pom.xml
  8. 2 2
      hadoop-common-project/hadoop-auth-examples/pom.xml
  9. 2 2
      hadoop-common-project/hadoop-auth/pom.xml
  10. 12 12
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosName.java
  11. 4 1
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/ZKSignerSecretProvider.java
  12. 13 0
      hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
  13. 16 2
      hadoop-common-project/hadoop-common/pom.xml
  14. 3 3
      hadoop-common-project/hadoop-common/src/main/bin/hadoop
  15. 2 2
      hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.cmd
  16. 4 0
      hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh
  17. 3 3
      hadoop-common-project/hadoop-common/src/main/bin/hadoop-daemons.sh
  18. 41 34
      hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
  19. 0 41
      hadoop-common-project/hadoop-common/src/main/bin/rcc
  20. 5 5
      hadoop-common-project/hadoop-common/src/main/bin/workers.sh
  21. 28 23
      hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh
  22. 2 2
      hadoop-common-project/hadoop-common/src/main/conf/hadoop-user-functions.sh.example
  23. 15 2
      hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
  24. 0 0
      hadoop-common-project/hadoop-common/src/main/conf/workers
  25. 84 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfigRedactor.java
  26. 4 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
  27. 10 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurableBase.java
  28. 9 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java
  29. 23 8
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderCryptoExtension.java
  30. 23 6
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
  31. 3 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java
  32. 23 5
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CachingGetSpaceUsed.java
  33. 13 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
  34. 419 70
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
  35. 7 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DU.java
  36. 3 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegateToFileSystem.java
  37. 7 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/EmptyStorageStatistics.java
  38. 4 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSInputStream.java
  39. 9 11
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
  40. 31 30
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
  41. 20 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemStorageStatistics.java
  42. 58 15
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
  43. 25 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/GetSpaceUsed.java
  44. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/GlobExpander.java
  45. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/GlobFilter.java
  46. 16 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/GlobalStorageStatistics.java
  47. 142 39
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java
  48. 2 10
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
  49. 59 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/StorageStatistics.java
  50. 22 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/UnionStorageStatistics.java
  51. 4 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/WindowsGetSpaceUsed.java
  52. 15 8
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/PermissionParser.java
  53. 17 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/AclCommands.java
  54. 4 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Display.java
  55. 54 26
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Test.java
  56. 20 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
  57. 77 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
  58. 6 6
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java
  59. 15 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java
  60. 2 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/StreamPumper.java
  61. 28 14
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HtmlQuoting.java
  62. 106 37
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
  63. 2 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/DefaultStringifier.java
  64. 1 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ElasticByteBufferPool.java
  65. 1 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SecureIOUtils.java
  66. 2 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java
  67. 3 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BZip2Codec.java
  68. 45 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/codec/HHXORErasureCodec.java
  69. 84 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractNativeRawDecoder.java
  70. 81 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractNativeRawEncoder.java
  71. 23 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/ByteArrayDecodingState.java
  72. 23 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/ByteArrayEncodingState.java
  73. 13 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/ByteBufferDecodingState.java
  74. 10 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/ByteBufferEncodingState.java
  75. 0 12
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/CoderUtil.java
  76. 66 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/NativeRSRawDecoder.java
  77. 65 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/NativeRSRawEncoder.java
  78. 39 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/NativeRSRawErasureCoderFactory.java
  79. 17 39
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RSRawDecoderLegacy.java
  80. 2 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RawErasureCoderFactory.java
  81. 2 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/util/GaloisField.java
  82. 2 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/TFileDumper.java
  83. 68 36
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/AsyncCallHandler.java
  84. 3 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/CallReturn.java
  85. 120 74
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java
  86. 1 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicy.java
  87. 22 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/package-info.java
  88. 0 48
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/package.html
  89. 2 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallQueueManager.java
  90. 2 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallerContext.java
  91. 14 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
  92. 16 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java
  93. 21 13
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueue.java
  94. 37 9
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
  95. 98 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ResponseBuffer.java
  96. 3 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcConstants.java
  97. 184 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcWritable.java
  98. 96 118
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
  99. 4 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/package-info.java
  100. 0 23
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/package.html

+ 209 - 0
LICENSE.txt

@@ -414,6 +414,38 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+For hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/compat/{fstatat|openat|unlinkat}.h:
+
+Copyright (c) 2012 The FreeBSD Foundation
+All rights reserved.
+
+This software was developed by Pawel Jakub Dawidek under sponsorship from
+the FreeBSD Foundation.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+1. Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+   notice, this list of conditions and the following disclaimer in the
+   documentation and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGE.
+
+=============
+
 The binary distribution of this product bundles binaries of leveldb
 (http://code.google.com/p/leveldb/), which is available under the following
 license:
@@ -1659,3 +1691,180 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 The views and conclusions contained in the software and documentation are those
 of the authors and should not be interpreted as representing official policies,
 either expressed or implied, of the FreeBSD Project.
+
+The binary distribution of this product bundles these dependencies under the
+following license:
+Java Concurrency in Practice book annotations 1.0
+--------------------------------------------------------------------------------
+THE WORK (AS DEFINED BELOW) IS PROVIDED UNDER THE TERMS OF THIS CREATIVE COMMONS
+PUBLIC LICENSE ("CCPL" OR "LICENSE"). THE WORK IS PROTECTED BY COPYRIGHT AND/OR
+OTHER APPLICABLE LAW. ANY USE OF THE WORK OTHER THAN AS AUTHORIZED UNDER THIS
+LICENSE OR COPYRIGHT LAW IS PROHIBITED.
+
+BY EXERCISING ANY RIGHTS TO THE WORK PROVIDED HERE, YOU ACCEPT AND AGREE TO BE
+BOUND BY THE TERMS OF THIS LICENSE. THE LICENSOR GRANTS YOU THE RIGHTS CONTAINED
+HERE IN CONSIDERATION OF YOUR ACCEPTANCE OF SUCH TERMS AND CONDITIONS.
+
+1. Definitions
+
+"Collective Work" means a work, such as a periodical issue, anthology or
+encyclopedia, in which the Work in its entirety in unmodified form, along with a
+number of other contributions, constituting separate and independent works in
+themselves, are assembled into a collective whole. A work that constitutes a
+Collective Work will not be considered a Derivative Work (as defined below) for
+the purposes of this License.
+"Derivative Work" means a work based upon the Work or upon the Work and other
+pre-existing works, such as a translation, musical arrangement, dramatization,
+fictionalization, motion picture version, sound recording, art reproduction,
+abridgment, condensation, or any other form in which the Work may be recast,
+transformed, or adapted, except that a work that constitutes a Collective Work
+will not be considered a Derivative Work for the purpose of this License. For
+the avoidance of doubt, where the Work is a musical composition or sound
+recording, the synchronization of the Work in timed-relation with a moving image
+("synching") will be considered a Derivative Work for the purpose of this
+License.
+"Licensor" means the individual or entity that offers the Work under the terms
+of this License.
+"Original Author" means the individual or entity who created the Work.
+"Work" means the copyrightable work of authorship offered under the terms of
+this License.
+"You" means an individual or entity exercising rights under this License who has
+not previously violated the terms of this License with respect to the Work, or
+who has received express permission from the Licensor to exercise rights under
+this License despite a previous violation.
+2. Fair Use Rights. Nothing in this license is intended to reduce, limit, or
+restrict any rights arising from fair use, first sale or other limitations on
+the exclusive rights of the copyright owner under copyright law or other
+applicable laws.
+
+3. License Grant. Subject to the terms and conditions of this License, Licensor
+hereby grants You a worldwide, royalty-free, non-exclusive, perpetual (for the
+duration of the applicable copyright) license to exercise the rights in the Work
+as stated below:
+
+to reproduce the Work, to incorporate the Work into one or more Collective
+Works, and to reproduce the Work as incorporated in the Collective Works;
+to create and reproduce Derivative Works;
+to distribute copies or phonorecords of, display publicly, perform publicly, and
+perform publicly by means of a digital audio transmission the Work including as
+incorporated in Collective Works;
+to distribute copies or phonorecords of, display publicly, perform publicly, and
+perform publicly by means of a digital audio transmission Derivative Works.
+For the avoidance of doubt, where the work is a musical composition:
+
+Performance Royalties Under Blanket Licenses. Licensor waives the exclusive
+right to collect, whether individually or via a performance rights society (e.g.
+ASCAP, BMI, SESAC), royalties for the public performance or public digital
+performance (e.g. webcast) of the Work.
+Mechanical Rights and Statutory Royalties. Licensor waives the exclusive right
+to collect, whether individually or via a music rights agency or designated
+agent (e.g. Harry Fox Agency), royalties for any phonorecord You create from the
+Work ("cover version") and distribute, subject to the compulsory license created
+by 17 USC Section 115 of the US Copyright Act (or the equivalent in other
+jurisdictions).
+Webcasting Rights and Statutory Royalties. For the avoidance of doubt, where the
+Work is a sound recording, Licensor waives the exclusive right to collect,
+whether individually or via a performance-rights society (e.g. SoundExchange),
+royalties for the public digital performance (e.g. webcast) of the Work, subject
+to the compulsory license created by 17 USC Section 114 of the US Copyright Act
+(or the equivalent in other jurisdictions).
+The above rights may be exercised in all media and formats whether now known or
+hereafter devised. The above rights include the right to make such modifications
+as are technically necessary to exercise the rights in other media and formats.
+All rights not expressly granted by Licensor are hereby reserved.
+
+4. Restrictions.The license granted in Section 3 above is expressly made subject
+to and limited by the following restrictions:
+
+You may distribute, publicly display, publicly perform, or publicly digitally
+perform the Work only under the terms of this License, and You must include a
+copy of, or the Uniform Resource Identifier for, this License with every copy or
+phonorecord of the Work You distribute, publicly display, publicly perform, or
+publicly digitally perform. You may not offer or impose any terms on the Work
+that alter or restrict the terms of this License or the recipients' exercise of
+the rights granted hereunder. You may not sublicense the Work. You must keep
+intact all notices that refer to this License and to the disclaimer of
+warranties. You may not distribute, publicly display, publicly perform, or
+publicly digitally perform the Work with any technological measures that control
+access or use of the Work in a manner inconsistent with the terms of this
+License Agreement. The above applies to the Work as incorporated in a Collective
+Work, but this does not require the Collective Work apart from the Work itself
+to be made subject to the terms of this License. If You create a Collective
+Work, upon notice from any Licensor You must, to the extent practicable, remove
+from the Collective Work any credit as required by clause 4(b), as requested. If
+You create a Derivative Work, upon notice from any Licensor You must, to the
+extent practicable, remove from the Derivative Work any credit as required by
+clause 4(b), as requested.
+If you distribute, publicly display, publicly perform, or publicly digitally
+perform the Work or any Derivative Works or Collective Works, You must keep
+intact all copyright notices for the Work and provide, reasonable to the medium
+or means You are utilizing: (i) the name of the Original Author (or pseudonym,
+if applicable) if supplied, and/or (ii) if the Original Author and/or Licensor
+designate another party or parties (e.g. a sponsor institute, publishing entity,
+journal) for attribution in Licensor's copyright notice, terms of service or by
+other reasonable means, the name of such party or parties; the title of the Work
+if supplied; to the extent reasonably practicable, the Uniform Resource
+Identifier, if any, that Licensor specifies to be associated with the Work,
+unless such URI does not refer to the copyright notice or licensing information
+for the Work; and in the case of a Derivative Work, a credit identifying the use
+of the Work in the Derivative Work (e.g., "French translation of the Work by
+Original Author," or "Screenplay based on original Work by Original Author").
+Such credit may be implemented in any reasonable manner; provided, however, that
+in the case of a Derivative Work or Collective Work, at a minimum such credit
+will appear where any other comparable authorship credit appears and in a manner
+at least as prominent as such other comparable authorship credit.
+5. Representations, Warranties and Disclaimer
+
+UNLESS OTHERWISE MUTUALLY AGREED TO BY THE PARTIES IN WRITING, LICENSOR OFFERS
+THE WORK AS-IS AND MAKES NO REPRESENTATIONS OR WARRANTIES OF ANY KIND CONCERNING
+THE WORK, EXPRESS, IMPLIED, STATUTORY OR OTHERWISE, INCLUDING, WITHOUT
+LIMITATION, WARRANTIES OF TITLE, MERCHANTIBILITY, FITNESS FOR A PARTICULAR
+PURPOSE, NONINFRINGEMENT, OR THE ABSENCE OF LATENT OR OTHER DEFECTS, ACCURACY,
+OR THE PRESENCE OF ABSENCE OF ERRORS, WHETHER OR NOT DISCOVERABLE. SOME
+JURISDICTIONS DO NOT ALLOW THE EXCLUSION OF IMPLIED WARRANTIES, SO SUCH
+EXCLUSION MAY NOT APPLY TO YOU.
+
+6. Limitation on Liability. EXCEPT TO THE EXTENT REQUIRED BY APPLICABLE LAW, IN
+NO EVENT WILL LICENSOR BE LIABLE TO YOU ON ANY LEGAL THEORY FOR ANY SPECIAL,
+INCIDENTAL, CONSEQUENTIAL, PUNITIVE OR EXEMPLARY DAMAGES ARISING OUT OF THIS
+LICENSE OR THE USE OF THE WORK, EVEN IF LICENSOR HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGES.
+
+7. Termination
+
+This License and the rights granted hereunder will terminate automatically upon
+any breach by You of the terms of this License. Individuals or entities who have
+received Derivative Works or Collective Works from You under this License,
+however, will not have their licenses terminated provided such individuals or
+entities remain in full compliance with those licenses. Sections 1, 2, 5, 6, 7,
+and 8 will survive any termination of this License.
+Subject to the above terms and conditions, the license granted here is perpetual
+(for the duration of the applicable copyright in the Work). Notwithstanding the
+above, Licensor reserves the right to release the Work under different license
+terms or to stop distributing the Work at any time; provided, however that any
+such election will not serve to withdraw this License (or any other license that
+has been, or is required to be, granted under the terms of this License), and
+this License will continue in full force and effect unless terminated as stated
+above.
+8. Miscellaneous
+
+Each time You distribute or publicly digitally perform the Work or a Collective
+Work, the Licensor offers to the recipient a license to the Work on the same
+terms and conditions as the license granted to You under this License.
+Each time You distribute or publicly digitally perform a Derivative Work,
+Licensor offers to the recipient a license to the original Work on the same
+terms and conditions as the license granted to You under this License.
+If any provision of this License is invalid or unenforceable under applicable
+law, it shall not affect the validity or enforceability of the remainder of the
+terms of this License, and without further action by the parties to this
+agreement, such provision shall be reformed to the minimum extent necessary to
+make such provision valid and enforceable.
+No term or provision of this License shall be deemed waived and no breach
+consented to unless such waiver or consent shall be in writing and signed by the
+party to be charged with such waiver or consent.
+This License constitutes the entire agreement between the parties with respect
+to the Work licensed here. There are no understandings, agreements or
+representations with respect to the Work not specified here. Licensor shall not
+be bound by any additional provisions that may appear in any communication from
+You. This License may not be modified without the mutual written agreement of
+the Licensor and You.

+ 172 - 0
NOTICE.txt

@@ -281,3 +281,175 @@ which has the following notices:
     Copyright 2004 Jason Paul Kitchen
   TypeUtil.java
     Copyright 2002-2012 Ramnivas Laddad, Juergen Hoeller, Chris Beams
+
+The binary distribution of this product bundles binaries of
+Java Concurrency in Practice book annotations 1.0,
+which has the following notices:
+ * Copyright (c) 2005 Brian Goetz and Tim Peierls Released under the Creative
+  Commons Attribution License (http://creativecommons.org/licenses/by/2.5)
+  Official home: http://www.jcip.net Any republication or derived work
+  distributed in source code form must include this copyright and license
+  notice.
+
+The binary distribution of this product bundles binaries of
+Jetty 6.1.26,
+which has the following notices:
+ * ==============================================================
+    Jetty Web Container
+    Copyright 1995-2016 Mort Bay Consulting Pty Ltd.
+   ==============================================================
+
+   The Jetty Web Container is Copyright Mort Bay Consulting Pty Ltd
+   unless otherwise noted.
+
+   Jetty is dual licensed under both
+
+     * The Apache 2.0 License
+       http://www.apache.org/licenses/LICENSE-2.0.html
+
+         and
+
+     * The Eclipse Public 1.0 License
+       http://www.eclipse.org/legal/epl-v10.html
+
+   Jetty may be distributed under either license.
+
+   ------
+   Eclipse
+
+   The following artifacts are EPL.
+    * org.eclipse.jetty.orbit:org.eclipse.jdt.core
+
+   The following artifacts are EPL and ASL2.
+    * org.eclipse.jetty.orbit:javax.security.auth.message
+
+
+   The following artifacts are EPL and CDDL 1.0.
+    * org.eclipse.jetty.orbit:javax.mail.glassfish
+
+
+   ------
+   Oracle
+
+   The following artifacts are CDDL + GPLv2 with classpath exception.
+   https://glassfish.dev.java.net/nonav/public/CDDL+GPL.html
+
+    * javax.servlet:javax.servlet-api
+    * javax.annotation:javax.annotation-api
+    * javax.transaction:javax.transaction-api
+    * javax.websocket:javax.websocket-api
+
+   ------
+   Oracle OpenJDK
+
+   If ALPN is used to negotiate HTTP/2 connections, then the following
+   artifacts may be included in the distribution or downloaded when ALPN
+   module is selected.
+
+    * java.sun.security.ssl
+
+   These artifacts replace/modify OpenJDK classes.  The modififications
+   are hosted at github and both modified and original are under GPL v2 with
+   classpath exceptions.
+   http://openjdk.java.net/legal/gplv2+ce.html
+
+
+   ------
+   OW2
+
+   The following artifacts are licensed by the OW2 Foundation according to the
+   terms of http://asm.ow2.org/license.html
+
+   org.ow2.asm:asm-commons
+   org.ow2.asm:asm
+
+
+   ------
+   Apache
+
+   The following artifacts are ASL2 licensed.
+
+   org.apache.taglibs:taglibs-standard-spec
+   org.apache.taglibs:taglibs-standard-impl
+
+
+   ------
+   MortBay
+
+   The following artifacts are ASL2 licensed.  Based on selected classes from
+   following Apache Tomcat jars, all ASL2 licensed.
+
+   org.mortbay.jasper:apache-jsp
+     org.apache.tomcat:tomcat-jasper
+     org.apache.tomcat:tomcat-juli
+     org.apache.tomcat:tomcat-jsp-api
+     org.apache.tomcat:tomcat-el-api
+     org.apache.tomcat:tomcat-jasper-el
+     org.apache.tomcat:tomcat-api
+     org.apache.tomcat:tomcat-util-scan
+     org.apache.tomcat:tomcat-util
+
+   org.mortbay.jasper:apache-el
+     org.apache.tomcat:tomcat-jasper-el
+     org.apache.tomcat:tomcat-el-api
+
+
+   ------
+   Mortbay
+
+   The following artifacts are CDDL + GPLv2 with classpath exception.
+
+   https://glassfish.dev.java.net/nonav/public/CDDL+GPL.html
+
+   org.eclipse.jetty.toolchain:jetty-schemas
+
+   ------
+   Assorted
+
+   The UnixCrypt.java code implements the one way cryptography used by
+   Unix systems for simple password protection.  Copyright 1996 Aki Yoshida,
+   modified April 2001  by Iris Van den Broeke, Daniel Deville.
+   Permission to use, copy, modify and distribute UnixCrypt
+   for non-commercial or commercial purposes and without fee is
+   granted provided that the copyright notice appears in all copies./
+
+The binary distribution of this product bundles binaries of
+Snappy for Java 1.0.4.1,
+which has the following notices:
+ * This product includes software developed by Google
+    Snappy: http://code.google.com/p/snappy/ (New BSD License)
+
+   This product includes software developed by Apache
+    PureJavaCrc32C from apache-hadoop-common http://hadoop.apache.org/
+    (Apache 2.0 license)
+
+   This library containd statically linked libstdc++. This inclusion is allowed by
+   "GCC RUntime Library Exception"
+   http://gcc.gnu.org/onlinedocs/libstdc++/manual/license.html
+
+   == Contributors ==
+     * Tatu Saloranta
+       * Providing benchmark suite
+     * Alec Wysoker
+       * Performance and memory usage improvement
+
+The binary distribution of this product bundles binaries of
+Xerces2 Java Parser 2.9.1,
+which has the following notices:
+ * =========================================================================
+   ==  NOTICE file corresponding to section 4(d) of the Apache License,   ==
+   ==  Version 2.0, in this case for the Apache Xerces Java distribution. ==
+   =========================================================================
+
+   Apache Xerces Java
+   Copyright 1999-2007 The Apache Software Foundation
+
+   This product includes software developed at
+   The Apache Software Foundation (http://www.apache.org/).
+
+   Portions of this software were originally based on the following:
+     - software copyright (c) 1999, IBM Corporation., http://www.ibm.com.
+     - software copyright (c) 1999, Sun Microsystems., http://www.sun.com.
+     - voluntary contributions made by Paul Eng on behalf of the
+       Apache Software Foundation that were originally developed at iClick, Inc.,
+       software copyright (c) 1999.

+ 1 - 8
dev-support/docker/Dockerfile

@@ -59,17 +59,10 @@ RUN apt-get -q update && apt-get -q install --no-install-recommends -y \
     python \
     python2.7 \
     python-pip \
+    rsync \
     snappy \
     zlib1g-dev
 
-######
-# Install ISA-L library
-######
-RUN curl -s -S -L \
-         http://http.us.debian.org/debian/pool/main/libi/libisal/libisal2_2.15.0-2_amd64.deb \
-         -o /opt/libisal2_2.15.0-2_amd64.deb && \
-    dpkg -i /opt/libisal2_2.15.0-2_amd64.deb
-
 #######
 # Oracle Java
 #######

+ 2 - 2
hadoop-assemblies/pom.xml

@@ -23,12 +23,12 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha1-SNAPSHOT</version>
+    <version>3.0.0-alpha2-SNAPSHOT</version>
     <relativePath>../hadoop-project</relativePath>
   </parent>
   <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-assemblies</artifactId>
-  <version>3.0.0-alpha1-SNAPSHOT</version>
+  <version>3.0.0-alpha2-SNAPSHOT</version>
   <name>Apache Hadoop Assemblies</name>
   <description>Apache Hadoop Assemblies</description>
 

+ 44 - 2
hadoop-build-tools/pom.xml

@@ -18,7 +18,7 @@
   <parent>
     <artifactId>hadoop-main</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.0.0-alpha1-SNAPSHOT</version>
+    <version>3.0.0-alpha2-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-build-tools</artifactId>
@@ -29,6 +29,19 @@
     <failIfNoTests>false</failIfNoTests>
   </properties>
   <build>
+    <resources>
+      <resource>
+        <directory>${project.basedir}/target/extra-resources</directory>
+        <targetPath>META-INF</targetPath>
+        <includes>
+          <include>LICENSE.txt</include>
+          <include>NOTICE.txt</include>
+        </includes>
+      </resource>
+      <resource>
+        <directory>${project.basedir}/src/main/resources</directory>
+      </resource>
+    </resources>
     <plugins>
       <plugin>
         <groupId>org.apache.maven.plugins</groupId>
@@ -37,17 +50,46 @@
           <skip>true</skip>
         </configuration>
       </plugin>
+      <!-- copy L&N files to target/extra-resources -->
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-resources-plugin</artifactId>
+        <executions>
+          <execution>
+            <id>copy-resources</id>
+            <phase>validate</phase>
+            <goals>
+              <goal>copy-resources</goal>
+            </goals>
+            <configuration>
+              <outputDirectory>${project.basedir}/target/extra-resources</outputDirectory>
+              <resources>
+                <resource>
+                  <directory>../</directory>
+                  <includes>
+                    <include>LICENSE.txt</include>
+                    <include>NOTICE.txt</include>
+                  </includes>
+                </resource>
+              </resources>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+      <!-- add entries for L&N files to remote-resources.xml in jar file -->
       <plugin>
         <groupId>org.apache.maven.plugins</groupId>
         <artifactId>maven-remote-resources-plugin</artifactId>
         <executions>
           <execution>
+            <phase>process-resources</phase>
             <goals>
               <goal>bundle</goal>
             </goals>
           </execution>
         </executions>
         <configuration>
+          <resourcesDirectory>${project.build.outputDirectory}</resourcesDirectory>
           <includes>
             <include>META-INF/LICENSE.txt</include>
             <include>META-INF/NOTICE.txt</include>
@@ -69,4 +111,4 @@
       </plugin>
     </plugins>
   </build>
-</project>
+</project>

+ 2 - 2
hadoop-client/pom.xml

@@ -18,12 +18,12 @@
 <parent>
    <groupId>org.apache.hadoop</groupId>
    <artifactId>hadoop-project-dist</artifactId>
-   <version>3.0.0-alpha1-SNAPSHOT</version>
+   <version>3.0.0-alpha2-SNAPSHOT</version>
    <relativePath>../hadoop-project-dist</relativePath>
  </parent>
   <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-client</artifactId>
-  <version>3.0.0-alpha1-SNAPSHOT</version>
+  <version>3.0.0-alpha2-SNAPSHOT</version>
   <packaging>jar</packaging>
 
   <description>Apache Hadoop Client</description>

+ 2 - 2
hadoop-common-project/hadoop-annotations/pom.xml

@@ -20,12 +20,12 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha1-SNAPSHOT</version>
+    <version>3.0.0-alpha2-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-annotations</artifactId>
-  <version>3.0.0-alpha1-SNAPSHOT</version>
+  <version>3.0.0-alpha2-SNAPSHOT</version>
   <description>Apache Hadoop Annotations</description>
   <name>Apache Hadoop Annotations</name>
   <packaging>jar</packaging>

+ 2 - 2
hadoop-common-project/hadoop-auth-examples/pom.xml

@@ -20,12 +20,12 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha1-SNAPSHOT</version>
+    <version>3.0.0-alpha2-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-auth-examples</artifactId>
-  <version>3.0.0-alpha1-SNAPSHOT</version>
+  <version>3.0.0-alpha2-SNAPSHOT</version>
   <packaging>war</packaging>
 
   <name>Apache Hadoop Auth Examples</name>

+ 2 - 2
hadoop-common-project/hadoop-auth/pom.xml

@@ -20,12 +20,12 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha1-SNAPSHOT</version>
+    <version>3.0.0-alpha2-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-auth</artifactId>
-  <version>3.0.0-alpha1-SNAPSHOT</version>
+  <version>3.0.0-alpha2-SNAPSHOT</version>
   <packaging>jar</packaging>
 
   <name>Apache Hadoop Auth</name>

+ 12 - 12
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosName.java

@@ -80,16 +80,7 @@ public class KerberosName {
    */
   private static List<Rule> rules;
 
-  private static String defaultRealm;
-
-  static {
-    try {
-      defaultRealm = KerberosUtil.getDefaultRealm();
-    } catch (Exception ke) {
-        LOG.debug("Kerberos krb5 configuration not found, setting default realm to empty");
-        defaultRealm="";
-    }
-  }
+  private static String defaultRealm = null;
 
   @VisibleForTesting
   public static void resetDefaultRealm() {
@@ -124,9 +115,18 @@ public class KerberosName {
 
   /**
    * Get the configured default realm.
+   * Used syncronized method here, because double-check locking is overhead.
    * @return the default realm from the krb5.conf
    */
-  public String getDefaultRealm() {
+  public static synchronized String getDefaultRealm() {
+    if (defaultRealm == null) {
+      try {
+        defaultRealm = KerberosUtil.getDefaultRealm();
+      } catch (Exception ke) {
+        LOG.debug("Kerberos krb5 configuration not found, setting default realm to empty");
+        defaultRealm = "";
+      }
+    }
     return defaultRealm;
   }
 
@@ -309,7 +309,7 @@ public class KerberosName {
     String apply(String[] params) throws IOException {
       String result = null;
       if (isDefault) {
-        if (defaultRealm.equals(params[0])) {
+        if (getDefaultRealm().equals(params[0])) {
           result = params[1];
         }
       } else if (params.length - 1 == numOfComponents) {

+ 4 - 1
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/ZKSignerSecretProvider.java

@@ -436,6 +436,8 @@ public class ZKSignerSecretProvider extends RolloverSignerSecretProvider {
   @InterfaceAudience.Private
   public static class JaasConfiguration extends Configuration {
 
+    private final javax.security.auth.login.Configuration baseConfig =
+        javax.security.auth.login.Configuration.getConfiguration();
     private static AppConfigurationEntry[] entry;
     private String entryName;
 
@@ -468,7 +470,8 @@ public class ZKSignerSecretProvider extends RolloverSignerSecretProvider {
 
     @Override
     public AppConfigurationEntry[] getAppConfigurationEntry(String name) {
-      return (entryName.equals(name)) ? entry : null;
+      return (entryName.equals(name)) ? entry : ((baseConfig != null)
+        ? baseConfig.getAppConfigurationEntry(name) : null);
     }
 
     private String getKrb5LoginModuleName() {

+ 13 - 0
hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml

@@ -47,6 +47,19 @@
        <Field name="out" />
        <Bug pattern="IS2_INCONSISTENT_SYNC" />
      </Match>
+    <!--
+       The nativeCoder field is get/set and used by native codes.
+    -->
+    <Match>
+        <Class name="org.apache.hadoop.io.erasurecode.rawcoder.AbstractNativeRawEncoder" />
+        <Field name="nativeCoder" />
+        <Bug pattern="UUF_UNUSED_FIELD" />
+    </Match>
+    <Match>
+        <Class name="org.apache.hadoop.io.erasurecode.rawcoder.AbstractNativeRawDecoder" />
+        <Field name="nativeCoder" />
+        <Bug pattern="UUF_UNUSED_FIELD" />
+    </Match>
      <!-- 
        Further SaslException should be ignored during cleanup and
        original exception should be re-thrown.

+ 16 - 2
hadoop-common-project/hadoop-common/pom.xml

@@ -20,12 +20,12 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project-dist</artifactId>
-    <version>3.0.0-alpha1-SNAPSHOT</version>
+    <version>3.0.0-alpha2-SNAPSHOT</version>
     <relativePath>../../hadoop-project-dist</relativePath>
   </parent>
   <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-common</artifactId>
-  <version>3.0.0-alpha1-SNAPSHOT</version>
+  <version>3.0.0-alpha2-SNAPSHOT</version>
   <description>Apache Hadoop Common</description>
   <name>Apache Hadoop Common</name>
   <packaging>jar</packaging>
@@ -105,6 +105,11 @@
       <artifactId>jetty-util</artifactId>
       <scope>compile</scope>
     </dependency>
+    <dependency>
+      <groupId>org.mortbay.jetty</groupId>
+      <artifactId>jetty-sslengine</artifactId>
+      <scope>compile</scope>
+    </dependency>
     <dependency>
       <groupId>javax.servlet.jsp</groupId>
       <artifactId>jsp-api</artifactId>
@@ -115,6 +120,11 @@
       <artifactId>jersey-core</artifactId>
       <scope>compile</scope>
     </dependency>
+    <dependency>
+      <groupId>com.sun.jersey</groupId>
+      <artifactId>jersey-servlet</artifactId>
+      <scope>compile</scope>
+    </dependency>
     <dependency>
       <!-- Used, even though 'mvn dependency:analyze' doesn't find it -->
       <groupId>com.sun.jersey</groupId>
@@ -626,6 +636,8 @@
                     <javahClassName>org.apache.hadoop.io.compress.lz4.Lz4Compressor</javahClassName>
                     <javahClassName>org.apache.hadoop.io.compress.lz4.Lz4Decompressor</javahClassName>
                     <javahClassName>org.apache.hadoop.io.erasurecode.ErasureCodeNative</javahClassName>
+                    <javahClassName>org.apache.hadoop.io.erasurecode.rawcoder.NativeRSRawEncoder</javahClassName>
+                    <javahClassName>org.apache.hadoop.io.erasurecode.rawcoder.NativeRSRawDecoder</javahClassName>
                     <javahClassName>org.apache.hadoop.crypto.OpensslCipher</javahClassName>
                     <javahClassName>org.apache.hadoop.crypto.random.OpensslSecureRandom</javahClassName>
                     <javahClassName>org.apache.hadoop.util.NativeCrc32</javahClassName>
@@ -764,6 +776,8 @@
                     <javahClassName>org.apache.hadoop.io.compress.lz4.Lz4Compressor</javahClassName>
                     <javahClassName>org.apache.hadoop.io.compress.lz4.Lz4Decompressor</javahClassName>
                     <javahClassName>org.apache.hadoop.io.erasurecode.ErasureCodeNative</javahClassName>
+                    <javahClassName>org.apache.hadoop.io.erasurecode.rawcoder.NativeRSRawEncoder</javahClassName>
+                    <javahClassName>org.apache.hadoop.io.erasurecode.rawcoder.NativeRSRawDecoder</javahClassName>
                     <javahClassName>org.apache.hadoop.crypto.OpensslCipher</javahClassName>
                     <javahClassName>org.apache.hadoop.crypto.random.OpensslSecureRandom</javahClassName>
                     <javahClassName>org.apache.hadoop.util.NativeCrc32</javahClassName>

+ 3 - 3
hadoop-common-project/hadoop-common/src/main/bin/hadoop

@@ -28,7 +28,7 @@ function hadoop_usage
   hadoop_add_option "hostnames list[,of,host,names]" "hosts to use in slave mode"
   hadoop_add_option "loglevel level" "set the log4j level for this command"
   hadoop_add_option "hosts filename" "list of hosts to use in slave mode"
-  hadoop_add_option "slaves" "turn on slave mode"
+  hadoop_add_option "workers" "turn on worker mode"
 
   hadoop_add_subcommand "checknative" "check native Hadoop and compression libraries availability"
   hadoop_add_subcommand "classpath" "prints the class path needed to get the Hadoop jar and the required libraries"
@@ -205,8 +205,8 @@ fi
 
 hadoop_verify_user "${HADOOP_SUBCMD}"
 
-if [[ ${HADOOP_SLAVE_MODE} = true ]]; then
-  hadoop_common_slave_mode_execute "${HADOOP_COMMON_HOME}/bin/hadoop" "${HADOOP_USER_PARAMS[@]}"
+if [[ ${HADOOP_WORKER_MODE} = true ]]; then
+  hadoop_common_worker_mode_execute "${HADOOP_COMMON_HOME}/bin/hadoop" "${HADOOP_USER_PARAMS[@]}"
   exit $?
 fi
 

+ 2 - 2
hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.cmd

@@ -80,12 +80,12 @@ if "%1" == "--config" (
 )
 
 @rem
-@rem check to see it is specified whether to use the slaves or the
+@rem check to see it is specified whether to use the workers or the
 @rem masters file
 @rem
 
 if "%1" == "--hosts" (
-  set HADOOP_SLAVES=%HADOOP_CONF_DIR%\%2
+  set HADOOP_WORKERS=%HADOOP_CONF_DIR%\%2
   shift
   shift
 )

+ 4 - 0
hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh

@@ -113,6 +113,10 @@ hadoop_exec_userfuncs
 hadoop_exec_user_hadoopenv
 hadoop_verify_confdir
 
+hadoop_deprecate_envvar HADOOP_SLAVES HADOOP_WORKERS
+hadoop_deprecate_envvar HADOOP_SLAVE_NAMES HADOOP_WORKER_NAMES
+hadoop_deprecate_envvar HADOOP_SLAVE_SLEEP HADOOP_WORKER_SLEEP
+
 # do all the OS-specific startup bits here
 # this allows us to get a decent JAVA_HOME,
 # call crle for LD_LIBRARY_PATH, etc.

+ 3 - 3
hadoop-common-project/hadoop-common/src/main/bin/hadoop-daemons.sh

@@ -57,13 +57,13 @@ else
 fi
 
 hadoop_error "WARNING: Use of this script to ${daemonmode} HDFS daemons is deprecated."
-hadoop_error "WARNING: Attempting to execute replacement \"hdfs --slaves --daemon ${daemonmode}\" instead."
+hadoop_error "WARNING: Attempting to execute replacement \"hdfs --workers --daemon ${daemonmode}\" instead."
 
 #
 # Original input was usually:
 #  hadoop-daemons.sh (shell options) (start|stop) (datanode|...) (daemon options)
 # we're going to turn this into
-#  hdfs --slaves --daemon (start|stop) (rest of options)
+#  hdfs --workers --daemon (start|stop) (rest of options)
 #
 for (( i = 0; i < ${#HADOOP_USER_PARAMS[@]}; i++ ))
 do
@@ -74,4 +74,4 @@ do
   fi
 done
 
-${hdfsscript} --slaves --daemon "${daemonmode}" "${HADOOP_USER_PARAMS[@]}"
+${hdfsscript} --workers --daemon "${daemonmode}" "${HADOOP_USER_PARAMS[@]}"

+ 41 - 34
hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh

@@ -602,25 +602,25 @@ function hadoop_basic_init
   HADOOP_SSH_PARALLEL=${HADOOP_SSH_PARALLEL:-10}
 }
 
-## @description  Set the slave support information to the contents
+## @description  Set the worker support information to the contents
 ## @description  of `filename`
 ## @audience     public
 ## @stability    stable
 ## @replaceable  no
 ## @param        filename
 ## @return       will exit if file does not exist
-function hadoop_populate_slaves_file
+function hadoop_populate_workers_file
 {
-  local slavesfile=$1
+  local workersfile=$1
   shift
-  if [[ -f "${slavesfile}" ]]; then
+  if [[ -f "${workersfile}" ]]; then
     # shellcheck disable=2034
-    HADOOP_SLAVES="${slavesfile}"
-  elif [[ -f "${HADOOP_CONF_DIR}/${slavesfile}" ]]; then
+    HADOOP_WORKERS="${workersfile}"
+  elif [[ -f "${HADOOP_CONF_DIR}/${workersfile}" ]]; then
     # shellcheck disable=2034
-    HADOOP_SLAVES="${HADOOP_CONF_DIR}/${slavesfile}"
+    HADOOP_WORKERS="${HADOOP_CONF_DIR}/${workersfile}"
   else
-    hadoop_error "ERROR: Cannot find hosts file \"${slavesfile}\""
+    hadoop_error "ERROR: Cannot find hosts file \"${workersfile}\""
     hadoop_exit_with_usage 1
   fi
 }
@@ -669,14 +669,14 @@ function hadoop_actual_ssh
 {
   # we are passing this function to xargs
   # should get hostname followed by rest of command line
-  local slave=$1
+  local worker=$1
   shift
 
   # shellcheck disable=SC2086
-  ssh ${HADOOP_SSH_OPTS} ${slave} $"${@// /\\ }" 2>&1 | sed "s/^/$slave: /"
+  ssh ${HADOOP_SSH_OPTS} ${worker} $"${@// /\\ }" 2>&1 | sed "s/^/$worker: /"
 }
 
-## @description  Connect to ${HADOOP_SLAVES} or ${HADOOP_SLAVE_NAMES}
+## @description  Connect to ${HADOOP_WORKERS} or ${HADOOP_WORKER_NAMES}
 ## @description  and execute command.
 ## @audience     private
 ## @stability    evolving
@@ -687,45 +687,52 @@ function hadoop_connect_to_hosts
 {
   # shellcheck disable=SC2124
   local params="$@"
-  local slave_file
+  local worker_file
   local tmpslvnames
 
   #
   # ssh (or whatever) to a host
   #
   # User can specify hostnames or a file where the hostnames are (not both)
-  if [[ -n "${HADOOP_SLAVES}" && -n "${HADOOP_SLAVE_NAMES}" ]] ; then
-    hadoop_error "ERROR: Both HADOOP_SLAVES and HADOOP_SLAVE_NAME were defined. Aborting."
+  if [[ -n "${HADOOP_WORKERS}" && -n "${HADOOP_WORKER_NAMES}" ]] ; then
+    hadoop_error "ERROR: Both HADOOP_WORKERS and HADOOP_WORKER_NAME were defined. Aborting."
     exit 1
-  elif [[ -z "${HADOOP_SLAVE_NAMES}" ]]; then
-    slave_file=${HADOOP_SLAVES:-${HADOOP_CONF_DIR}/slaves}
+  elif [[ -z "${HADOOP_WORKER_NAMES}" ]]; then
+    if [[ -n "${HADOOP_WORKERS}" ]]; then
+      worker_file=${HADOOP_WORKERS}
+    elif [[ -f "${HADOOP_CONF_DIR}/workers" ]]; then
+      worker_file=${HADOOP_CONF_DIR}/workers
+    elif [[ -f "${HADOOP_CONF_DIR}/slaves" ]]; then
+      hadoop_error "WARNING: 'slaves' file has been deprecated. Please use 'workers' file instead."
+      worker_file=${HADOOP_CONF_DIR}/slaves
+    fi
   fi
 
   # if pdsh is available, let's use it.  otherwise default
   # to a loop around ssh.  (ugh)
   if [[ -e '/usr/bin/pdsh' ]]; then
-    if [[ -z "${HADOOP_SLAVE_NAMES}" ]] ; then
+    if [[ -z "${HADOOP_WORKER_NAMES}" ]] ; then
       # if we were given a file, just let pdsh deal with it.
       # shellcheck disable=SC2086
       PDSH_SSH_ARGS_APPEND="${HADOOP_SSH_OPTS}" pdsh \
-      -f "${HADOOP_SSH_PARALLEL}" -w ^"${slave_file}" $"${@// /\\ }" 2>&1
+      -f "${HADOOP_SSH_PARALLEL}" -w ^"${worker_file}" $"${@// /\\ }" 2>&1
     else
       # no spaces allowed in the pdsh arg host list
       # shellcheck disable=SC2086
-      tmpslvnames=$(echo ${SLAVE_NAMES} | tr -s ' ' ,)
+      tmpslvnames=$(echo ${HADOOP_WORKER_NAMES} | tr -s ' ' ,)
       PDSH_SSH_ARGS_APPEND="${HADOOP_SSH_OPTS}" pdsh \
         -f "${HADOOP_SSH_PARALLEL}" \
         -w "${tmpslvnames}" $"${@// /\\ }" 2>&1
     fi
   else
-    if [[ -z "${HADOOP_SLAVE_NAMES}" ]]; then
-      HADOOP_SLAVE_NAMES=$(sed 's/#.*$//;/^$/d' "${slave_file}")
+    if [[ -z "${HADOOP_WORKER_NAMES}" ]]; then
+      HADOOP_WORKER_NAMES=$(sed 's/#.*$//;/^$/d' "${worker_file}")
     fi
     hadoop_connect_to_hosts_without_pdsh "${params}"
   fi
 }
 
-## @description  Connect to ${SLAVE_NAMES} and execute command
+## @description  Connect to ${HADOOP_WORKER_NAMES} and execute command
 ## @description  under the environment which does not support pdsh.
 ## @audience     private
 ## @stability    evolving
@@ -736,24 +743,24 @@ function hadoop_connect_to_hosts_without_pdsh
 {
   # shellcheck disable=SC2124
   local params="$@"
-  local slaves=(${HADOOP_SLAVE_NAMES})
-  for (( i = 0; i < ${#slaves[@]}; i++ ))
+  local workers=(${HADOOP_WORKER_NAMES})
+  for (( i = 0; i < ${#workers[@]}; i++ ))
   do
     if (( i != 0 && i % HADOOP_SSH_PARALLEL == 0 )); then
       wait
     fi
     # shellcheck disable=SC2086
-    hadoop_actual_ssh "${slaves[$i]}" ${params} &
+    hadoop_actual_ssh "${workers[$i]}" ${params} &
   done
   wait
 }
 
-## @description  Utility routine to handle --slaves mode
+## @description  Utility routine to handle --workers mode
 ## @audience     private
 ## @stability    evolving
 ## @replaceable  yes
 ## @param        commandarray
-function hadoop_common_slave_mode_execute
+function hadoop_common_worker_mode_execute
 {
   #
   # input should be the command line as given by the user
@@ -761,13 +768,13 @@ function hadoop_common_slave_mode_execute
   #
   local argv=("$@")
 
-  # if --slaves is still on the command line, remove it
+  # if --workers is still on the command line, remove it
   # to prevent loops
   # Also remove --hostnames and --hosts along with arg values
   local argsSize=${#argv[@]};
   for (( i = 0; i < argsSize; i++ ))
   do
-    if [[ "${argv[$i]}" =~ ^--slaves$ ]]; then
+    if [[ "${argv[$i]}" =~ ^--workers$ ]]; then
       unset argv[$i]
     elif [[ "${argv[$i]}" =~ ^--hostnames$ ]] ||
       [[ "${argv[$i]}" =~ ^--hosts$ ]]; then
@@ -2051,13 +2058,13 @@ function hadoop_parse_args
       --hostnames)
         shift
         # shellcheck disable=SC2034
-        HADOOP_SLAVE_NAMES="$1"
+        HADOOP_WORKER_NAMES="$1"
         shift
         ((HADOOP_PARSE_COUNTER=HADOOP_PARSE_COUNTER+2))
       ;;
       --hosts)
         shift
-        hadoop_populate_slaves_file "$1"
+        hadoop_populate_workers_file "$1"
         shift
         ((HADOOP_PARSE_COUNTER=HADOOP_PARSE_COUNTER+2))
       ;;
@@ -2068,10 +2075,10 @@ function hadoop_parse_args
         shift
         ((HADOOP_PARSE_COUNTER=HADOOP_PARSE_COUNTER+2))
       ;;
-      --slaves)
+      --workers)
         shift
         # shellcheck disable=SC2034
-        HADOOP_SLAVE_MODE=true
+        HADOOP_WORKER_MODE=true
         ((HADOOP_PARSE_COUNTER=HADOOP_PARSE_COUNTER+1))
       ;;
       *)
@@ -2104,4 +2111,4 @@ function hadoop_xml_escape
 function hadoop_sed_escape
 {
   sed -e 's/[\/&]/\\&/g' <<< "$1"
-}
+}

+ 0 - 41
hadoop-common-project/hadoop-common/src/main/bin/rcc

@@ -1,41 +0,0 @@
-#!/usr/bin/env bash
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# This script runs the hadoop core commands.
-this="${BASH_SOURCE-$0}"
-bin=$(cd -P -- "$(dirname -- "$this")" >/dev/null && pwd -P)
-script="$(basename -- "$this")"
-this="$bin/$script"
-
-HADOOP_DEFAULT_LIBEXEC_DIR="$bin"/../libexec
-HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$HADOOP_DEFAULT_LIBEXEC_DIR}
-# shellcheck disable=SC2034
-HADOOP_NEW_CONFIG=true
-. "$HADOOP_LIBEXEC_DIR/hadoop-config.sh"
-
-if [ $# = 0 ]; then
-  hadoop_exit_with_usage 1
-fi
-
-CLASS='org.apache.hadoop.record.compiler.generated.Rcc'
-
-# Always respect HADOOP_OPTS and HADOOP_CLIENT_OPTS
-hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
-HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
-
-hadoop_finalize
-hadoop_java_exec rcc "${CLASS}" "$@"

+ 5 - 5
hadoop-common-project/hadoop-common/src/main/bin/slaves.sh → hadoop-common-project/hadoop-common/src/main/bin/workers.sh

@@ -16,20 +16,20 @@
 # limitations under the License.
 
 
-# Run a shell command on all slave hosts.
+# Run a shell command on all worker hosts.
 #
 # Environment Variables
 #
-#   HADOOP_SLAVES    File naming remote hosts.
-#     Default is ${HADOOP_CONF_DIR}/slaves.
+#   HADOOP_WORKERS    File naming remote hosts.
+#     Default is ${HADOOP_CONF_DIR}/workers.
 #   HADOOP_CONF_DIR  Alternate conf dir. Default is ${HADOOP_HOME}/conf.
-#   HADOOP_SLAVE_SLEEP Seconds to sleep between spawning remote commands.
+#   HADOOP_WORKER_SLEEP Seconds to sleep between spawning remote commands.
 #   HADOOP_SSH_OPTS Options passed to ssh when running remote commands.
 ##
 
 function hadoop_usage
 {
-  echo "Usage: slaves.sh [--config confdir] command..."
+  echo "Usage: workers.sh [--config confdir] command..."
 }
 
 # let's locate libexec...

+ 28 - 23
hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh

@@ -115,29 +115,34 @@ esac
 #
 # A note about classpaths.
 #
-# The classpath is configured such that entries are stripped prior
-# to handing to Java based either upon duplication or non-existence.
-# Wildcards and/or directories are *NOT* expanded as the
-# de-duplication is fairly simple.  So if two directories are in
-# the classpath that both contain awesome-methods-1.0.jar,
-# awesome-methods-1.0.jar will still be seen by java.  But if
-# the classpath specifically has awesome-methods-1.0.jar from the
-# same directory listed twice, the last one will be removed.
-#
-
-# An additional, custom CLASSPATH.  This is really meant for
-# end users, but as an administrator, one might want to push
-# something extra in here too, such as the jar to the topology
-# method.  Just be sure to append to the existing HADOOP_USER_CLASSPATH
-# so end users have a way to add stuff.
-# export HADOOP_USER_CLASSPATH="/some/cool/path/on/your/machine"
-
-# Should HADOOP_USER_CLASSPATH be first in the official CLASSPATH?
+# By default, Apache Hadoop overrides Java's CLASSPATH
+# environment variable.  It is configured such
+# that it sarts out blank with new entries added after passing
+# a series of checks (file/dir exists, not already listed aka
+# de-deduplication).  During de-depulication, wildcards and/or
+# directories are *NOT* expanded to keep it simple. Therefore,
+# if the computed classpath has two specific mentions of
+# awesome-methods-1.0.jar, only the first one added will be seen.
+# If two directories are in the classpath that both contain
+# awesome-methods-1.0.jar, then Java will pick up both versions.
+
+# An additional, custom CLASSPATH. Site-wide configs should be
+# handled via the shellprofile functionality, utilizing the
+# hadoop_add_classpath function for greater control and much
+# harder for apps/end-users to accidentally override.
+# Similarly, end users should utilize ${HOME}/.hadooprc .
+# This variable should ideally only be used as a short-cut,
+# interactive way for temporary additions on the command line.
+# export HADOOP_CLASSPATH="/some/cool/path/on/your/machine"
+
+# Should HADOOP_CLASSPATH be first in the official CLASSPATH?
 # export HADOOP_USER_CLASSPATH_FIRST="yes"
 
-# If HADOOP_USE_CLIENT_CLASSLOADER is set, HADOOP_CLASSPATH along with the main
-# jar are handled by a separate isolated client classloader. If it is set,
-# HADOOP_USER_CLASSPATH_FIRST is ignored. Can be defined by doing
+# If HADOOP_USE_CLIENT_CLASSLOADER is set, the classpath along
+# with the main jar are handled by a separate isolated
+# client classloader when 'hadoop jar', 'yarn jar', or 'mapred job'
+# is utilized. If it is set, HADOOP_CLASSPATH and
+# HADOOP_USER_CLASSPATH_FIRST are ignored.
 # export HADOOP_USE_CLIENT_CLASSLOADER=true
 
 # HADOOP_CLIENT_CLASSLOADER_SYSTEM_CLASSES overrides the default definition of
@@ -169,8 +174,8 @@ esac
 # export HADOOP_SSH_PARALLEL=10
 
 # Filename which contains all of the hosts for any remote execution
-# helper scripts # such as slaves.sh, start-dfs.sh, etc.
-# export HADOOP_SLAVES="${HADOOP_CONF_DIR}/slaves"
+# helper scripts # such as workers.sh, start-dfs.sh, etc.
+# export HADOOP_WORKERS="${HADOOP_CONF_DIR}/workers"
 
 ###
 # Options for all daemons

+ 2 - 2
hadoop-common-project/hadoop-common/src/main/conf/hadoop-user-functions.sh.example

@@ -87,7 +87,7 @@
 #}
 
 #
-# Example:  efficient command execution for the slaves
+# Example:  efficient command execution for the workers
 #
 # To improve performance, you can use xargs -P
 # instead of the for loop, if supported.
@@ -108,7 +108,7 @@
 #  # list with each hostname read from stdin/pipe. But it consider one
 #  # line as one argument while reading from stdin/pipe. So place each
 #  # hostname in different lines while passing via pipe.
-#  tmpslvnames=$(echo "${HADOOP_SLAVE_NAMES}" | tr ' ' '\n' )
+#  tmpslvnames=$(echo "${HADOOP_WORKER_NAMES}" | tr ' ' '\n' )
 #    echo "${tmpslvnames}" | \
 #    xargs -n 1 -P"${HADOOP_SSH_PARALLEL}" \
 #    -I {} bash -c --  "hadoop_actual_ssh {} ${params}"

+ 15 - 2
hadoop-common-project/hadoop-common/src/main/conf/log4j.properties

@@ -73,7 +73,7 @@ log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
 log4j.appender.console=org.apache.log4j.ConsoleAppender
 log4j.appender.console.target=System.err
 log4j.appender.console.layout=org.apache.log4j.PatternLayout
-log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+log4j.appender.console.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
 
 #
 # TaskLog Appender
@@ -227,7 +227,7 @@ log4j.appender.JSA.File=${hadoop.log.dir}/${hadoop.mapreduce.jobsummary.log.file
 log4j.appender.JSA.MaxFileSize=${hadoop.mapreduce.jobsummary.log.maxfilesize}
 log4j.appender.JSA.MaxBackupIndex=${hadoop.mapreduce.jobsummary.log.maxbackupindex}
 log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
-log4j.appender.JSA.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+log4j.appender.JSA.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
 log4j.logger.org.apache.hadoop.mapred.JobInProgress$JobSummary=${hadoop.mapreduce.jobsummary.logger}
 log4j.additivity.org.apache.hadoop.mapred.JobInProgress$JobSummary=false
 
@@ -300,6 +300,19 @@ log4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
 #log4j.appender.nodemanagerrequestlog.Filename=${hadoop.log.dir}/jetty-nodemanager-yyyy_mm_dd.log
 #log4j.appender.nodemanagerrequestlog.RetainDays=3
 
+
+# WebHdfs request log on datanodes
+# Specify -Ddatanode.webhdfs.logger=INFO,HTTPDRFA on datanode startup to
+# direct the log to a separate file.
+#datanode.webhdfs.logger=INFO,console
+#log4j.logger.datanode.webhdfs=${datanode.webhdfs.logger}
+#log4j.appender.HTTPDRFA=org.apache.log4j.DailyRollingFileAppender
+#log4j.appender.HTTPDRFA.File=${hadoop.log.dir}/hadoop-datanode-webhdfs.log
+#log4j.appender.HTTPDRFA.layout=org.apache.log4j.PatternLayout
+#log4j.appender.HTTPDRFA.layout.ConversionPattern=%d{ISO8601} %m%n
+#log4j.appender.HTTPDRFA.DatePattern=.yyyy-MM-dd
+
+
 # Appender for viewing information for errors and warnings
 yarn.ewma.cleanupInterval=300
 yarn.ewma.messageAgeLimitSeconds=86400

+ 0 - 0
hadoop-yarn-project/hadoop-yarn/conf/slaves → hadoop-common-project/hadoop-common/src/main/conf/workers


+ 84 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfigRedactor.java

@@ -0,0 +1,84 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.conf;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.regex.Pattern;
+
+import static org.apache.hadoop.fs.CommonConfigurationKeys.*;
+
+/**
+ * Tool for redacting sensitive information when displaying config parameters.
+ *
+ * <p>Some config parameters contain sensitive information (for example, cloud
+ * storage keys). When these properties are displayed in plaintext, we should
+ * redactor their values as appropriate.
+ */
+public class ConfigRedactor {
+
+  private static final String REDACTED_TEXT = "<redacted>";
+
+  private List<Pattern> compiledPatterns;
+
+  public ConfigRedactor(Configuration conf) {
+    String sensitiveRegexList = conf.get(
+        HADOOP_SECURITY_SENSITIVE_CONFIG_KEYS,
+        HADOOP_SECURITY_SENSITIVE_CONFIG_KEYS_DEFAULT);
+    List<String> sensitiveRegexes = Arrays.asList(sensitiveRegexList.split(","));
+    compiledPatterns = new ArrayList<Pattern>();
+    for (String regex : sensitiveRegexes) {
+      Pattern p = Pattern.compile(regex);
+      compiledPatterns.add(p);
+    }
+  }
+
+  /**
+   * Given a key / value pair, decides whether or not to redact and returns
+   * either the original value or text indicating it has been redacted.
+   *
+   * @param key
+   * @param value
+   * @return Original value, or text indicating it has been redacted
+   */
+  public String redact(String key, String value) {
+    if (configIsSensitive(key)) {
+      return REDACTED_TEXT;
+    }
+    return value;
+  }
+
+  /**
+   * Matches given config key against patterns and determines whether or not
+   * it should be considered sensitive enough to redact in logs and other
+   * plaintext displays.
+   *
+   * @param key
+   * @return True if parameter is considered sensitive
+   */
+  private boolean configIsSensitive(String key) {
+    for (Pattern regex : compiledPatterns) {
+      if (regex.matcher(key).find()) {
+        return true;
+      }
+    }
+    return false;
+  }
+}

+ 4 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java

@@ -78,6 +78,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
@@ -2053,7 +2054,9 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
    */
   protected char[] getPasswordFromConfig(String name) {
     char[] pass = null;
-    if (getBoolean(CredentialProvider.CLEAR_TEXT_FALLBACK, true)) {
+    if (getBoolean(CredentialProvider.CLEAR_TEXT_FALLBACK,
+        CommonConfigurationKeysPublic.
+            HADOOP_SECURITY_CREDENTIAL_CLEAR_TEXT_FALLBACK_DEFAULT)) {
       String passStr = get(name);
       if (passStr != null) {
         pass = passStr.toCharArray();

+ 10 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurableBase.java

@@ -117,17 +117,24 @@ public abstract class ReconfigurableBase
       final Collection<PropertyChange> changes =
           parent.getChangedProperties(newConf, oldConf);
       Map<PropertyChange, Optional<String>> results = Maps.newHashMap();
+      ConfigRedactor oldRedactor = new ConfigRedactor(oldConf);
+      ConfigRedactor newRedactor = new ConfigRedactor(newConf);
       for (PropertyChange change : changes) {
         String errorMessage = null;
+        String oldValRedacted = oldRedactor.redact(change.prop, change.oldVal);
+        String newValRedacted = newRedactor.redact(change.prop, change.newVal);
         if (!parent.isPropertyReconfigurable(change.prop)) {
           LOG.info(String.format(
               "Property %s is not configurable: old value: %s, new value: %s",
-              change.prop, change.oldVal, change.newVal));
+              change.prop,
+              oldValRedacted,
+              newValRedacted));
           continue;
         }
         LOG.info("Change property: " + change.prop + " from \""
-            + ((change.oldVal == null) ? "<default>" : change.oldVal)
-            + "\" to \"" + ((change.newVal == null) ? "<default>" : change.newVal)
+            + ((change.oldVal == null) ? "<default>" : oldValRedacted)
+            + "\" to \""
+            + ((change.newVal == null) ? "<default>" : newValRedacted)
             + "\".");
         try {
           String effectiveValue =

+ 9 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java

@@ -23,6 +23,7 @@ import java.io.ByteArrayOutputStream;
 import java.io.IOException;
 import java.io.InputStreamReader;
 import java.io.OutputStreamWriter;
+import java.nio.charset.StandardCharsets;
 import java.security.NoSuchAlgorithmException;
 import java.util.Collections;
 import java.util.Date;
@@ -32,7 +33,6 @@ import java.util.Map;
 
 import com.google.gson.stream.JsonReader;
 import com.google.gson.stream.JsonWriter;
-import org.apache.commons.io.Charsets;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -209,7 +209,7 @@ public abstract class KeyProvider {
     protected byte[] serialize() throws IOException {
       ByteArrayOutputStream buffer = new ByteArrayOutputStream();
       JsonWriter writer = new JsonWriter(
-          new OutputStreamWriter(buffer, Charsets.UTF_8));
+          new OutputStreamWriter(buffer, StandardCharsets.UTF_8));
       try {
         writer.beginObject();
         if (cipher != null) {
@@ -252,8 +252,9 @@ public abstract class KeyProvider {
       int versions = 0;
       String description = null;
       Map<String, String> attributes = null;
-      JsonReader reader = new JsonReader(new InputStreamReader
-        (new ByteArrayInputStream(bytes), Charsets.UTF_8));
+      JsonReader reader =
+          new JsonReader(new InputStreamReader(new ByteArrayInputStream(bytes),
+              StandardCharsets.UTF_8));
       try {
         reader.beginObject();
         while (reader.hasNext()) {
@@ -556,6 +557,10 @@ public abstract class KeyProvider {
   public KeyVersion rollNewVersion(String name) throws NoSuchAlgorithmException,
                                                        IOException {
     Metadata meta = getMetadata(name);
+    if (meta == null) {
+      throw new IOException("Can't find Metadata for key " + name);
+    }
+
     byte[] material = generateKey(meta.getBitLength(), meta.getCipher());
     return rollNewVersion(name, material);
   }

+ 23 - 8
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderCryptoExtension.java

@@ -389,24 +389,39 @@ public class KeyProviderCryptoExtension extends
   }
 
   /**
-   * Creates a <code>KeyProviderCryptoExtension</code> using a given 
+   * Creates a <code>KeyProviderCryptoExtension</code> using a given
    * {@link KeyProvider}.
    * <p/>
-   * If the given <code>KeyProvider</code> implements the 
+   * If the given <code>KeyProvider</code> implements the
    * {@link CryptoExtension} interface the <code>KeyProvider</code> itself
-   * will provide the extension functionality, otherwise a default extension
+   * will provide the extension functionality.
+   * If the given <code>KeyProvider</code> implements the
+   * {@link KeyProviderExtension} interface and the KeyProvider being
+   * extended by the <code>KeyProvider</code> implements the
+   * {@link CryptoExtension} interface, the KeyProvider being extended will
+   * provide the extension functionality. Otherwise, a default extension
    * implementation will be used.
-   * 
-   * @param keyProvider <code>KeyProvider</code> to use to create the 
+   *
+   * @param keyProvider <code>KeyProvider</code> to use to create the
    * <code>KeyProviderCryptoExtension</code> extension.
    * @return a <code>KeyProviderCryptoExtension</code> instance using the
    * given <code>KeyProvider</code>.
    */
   public static KeyProviderCryptoExtension createKeyProviderCryptoExtension(
       KeyProvider keyProvider) {
-    CryptoExtension cryptoExtension = (keyProvider instanceof CryptoExtension)
-                         ? (CryptoExtension) keyProvider
-                         : new DefaultCryptoExtension(keyProvider);
+    CryptoExtension cryptoExtension = null;
+    if (keyProvider instanceof CryptoExtension) {
+      cryptoExtension = (CryptoExtension) keyProvider;
+    } else if (keyProvider instanceof KeyProviderExtension &&
+            ((KeyProviderExtension)keyProvider).getKeyProvider() instanceof
+                    KeyProviderCryptoExtension.CryptoExtension) {
+      KeyProviderExtension keyProviderExtension =
+              (KeyProviderExtension)keyProvider;
+      cryptoExtension =
+              (CryptoExtension)keyProviderExtension.getKeyProvider();
+    } else {
+      cryptoExtension = new DefaultCryptoExtension(keyProvider);
+    }
     return new KeyProviderCryptoExtension(keyProvider, cryptoExtension);
   }
 

+ 23 - 6
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java

@@ -18,7 +18,6 @@
 package org.apache.hadoop.crypto.key.kms;
 
 import org.apache.commons.codec.binary.Base64;
-import org.apache.commons.io.Charsets;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.crypto.key.KeyProvider;
@@ -38,6 +37,7 @@ import org.apache.hadoop.security.authentication.client.AuthenticationException;
 import org.apache.hadoop.security.authentication.client.ConnectionConfigurator;
 import org.apache.hadoop.security.ssl.SSLFactory;
 import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.security.token.TokenRenewer;
 import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
 import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticatedURL;
@@ -64,6 +64,7 @@ import java.net.URI;
 import java.net.URISyntaxException;
 import java.net.URL;
 import java.net.URLEncoder;
+import java.nio.charset.StandardCharsets;
 import java.security.GeneralSecurityException;
 import java.security.NoSuchAlgorithmException;
 import java.security.PrivilegedExceptionAction;
@@ -270,7 +271,7 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension,
   }
 
   private static void writeJson(Map map, OutputStream os) throws IOException {
-    Writer writer = new OutputStreamWriter(os, Charsets.UTF_8);
+    Writer writer = new OutputStreamWriter(os, StandardCharsets.UTF_8);
     ObjectMapper jsonMapper = new ObjectMapper();
     jsonMapper.writerWithDefaultPrettyPrinter().writeValue(writer, map);
   }
@@ -536,10 +537,12 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension,
           UserGroupInformation.AuthenticationMethod.PROXY)
                               ? currentUgi.getShortUserName() : null;
 
-      // check and renew TGT to handle potential expiration
-      actualUgi.checkTGTAndReloginFromKeytab();
-      // creating the HTTP connection using the current UGI at constructor time
-      conn = actualUgi.doAs(new PrivilegedExceptionAction<HttpURLConnection>() {
+      // If current UGI contains kms-dt && is not proxy, doAs it to use its dt.
+      // Otherwise, create the HTTP connection using the UGI at constructor time
+      UserGroupInformation ugiToUse =
+          (currentUgiContainsKmsDt() && doAsUser == null) ?
+              currentUgi : actualUgi;
+      conn = ugiToUse.doAs(new PrivilegedExceptionAction<HttpURLConnection>() {
         @Override
         public HttpURLConnection run() throws Exception {
           DelegationTokenAuthenticatedURL authUrl =
@@ -1043,6 +1046,20 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension,
     return dtService;
   }
 
+  private boolean currentUgiContainsKmsDt() throws IOException {
+    // Add existing credentials from current UGI, since provider is cached.
+    Credentials creds = UserGroupInformation.getCurrentUser().
+        getCredentials();
+    if (!creds.getAllTokens().isEmpty()) {
+      org.apache.hadoop.security.token.Token<? extends TokenIdentifier>
+          dToken = creds.getToken(getDelegationTokenService());
+      if (dToken != null) {
+        return true;
+      }
+    }
+    return false;
+  }
+
   /**
    * Shutdown valueQueue executor threads
    */

+ 3 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java

@@ -33,6 +33,7 @@ import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersi
 import org.apache.hadoop.crypto.key.KeyProviderDelegationTokenExtension;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Time;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -93,8 +94,8 @@ public class LoadBalancingKMSClientProvider extends KeyProvider implements
       try {
         return op.call(provider);
       } catch (IOException ioe) {
-        LOG.warn("KMS provider at [{}] threw an IOException [{}]!!",
-            provider.getKMSUrl(), ioe.getMessage());
+        LOG.warn("KMS provider at [{}] threw an IOException!! {}",
+            provider.getKMSUrl(), StringUtils.stringifyException(ioe));
         ex = ioe;
       } catch (Exception e) {
         if (e instanceof RuntimeException) {

+ 23 - 5
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CachingGetSpaceUsed.java

@@ -25,6 +25,7 @@ import org.slf4j.LoggerFactory;
 import java.io.Closeable;
 import java.io.File;
 import java.io.IOException;
+import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicLong;
 
@@ -43,6 +44,7 @@ public abstract class CachingGetSpaceUsed implements Closeable, GetSpaceUsed {
   protected final AtomicLong used = new AtomicLong();
   private final AtomicBoolean running = new AtomicBoolean(true);
   private final long refreshInterval;
+  private final long jitter;
   private final String dirPath;
   private Thread refreshUsed;
 
@@ -52,7 +54,10 @@ public abstract class CachingGetSpaceUsed implements Closeable, GetSpaceUsed {
    */
   public CachingGetSpaceUsed(CachingGetSpaceUsed.Builder builder)
       throws IOException {
-    this(builder.getPath(), builder.getInterval(), builder.getInitialUsed());
+    this(builder.getPath(),
+        builder.getInterval(),
+        builder.getJitter(),
+        builder.getInitialUsed());
   }
 
   /**
@@ -65,10 +70,12 @@ public abstract class CachingGetSpaceUsed implements Closeable, GetSpaceUsed {
    */
   CachingGetSpaceUsed(File path,
                       long interval,
+                      long jitter,
                       long initialUsed) throws IOException {
-    dirPath = path.getCanonicalPath();
-    refreshInterval = interval;
-    used.set(initialUsed);
+    this.dirPath = path.getCanonicalPath();
+    this.refreshInterval = interval;
+    this.jitter = jitter;
+    this.used.set(initialUsed);
   }
 
   void init() {
@@ -155,7 +162,18 @@ public abstract class CachingGetSpaceUsed implements Closeable, GetSpaceUsed {
     public void run() {
       while (spaceUsed.running()) {
         try {
-          Thread.sleep(spaceUsed.getRefreshInterval());
+          long refreshInterval = spaceUsed.refreshInterval;
+
+          if (spaceUsed.jitter > 0) {
+            long jitter = spaceUsed.jitter;
+            // add/subtract the jitter.
+            refreshInterval +=
+                ThreadLocalRandom.current()
+                                 .nextLong(-jitter, jitter);
+          }
+          // Make sure that after the jitter we didn't end up at 0.
+          refreshInterval = Math.max(refreshInterval, 1);
+          Thread.sleep(refreshInterval);
           // update the used variable
           spaceUsed.refresh();
         } catch (InterruptedException e) {

+ 13 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java

@@ -207,6 +207,9 @@ public class CommonConfigurationKeys extends CommonConfigurationKeysPublic {
   public static final String
       HADOOP_SECURITY_SERVICE_AUTHORIZATION_DATANODE_LIFELINE =
           "security.datanode.lifeline.protocol.acl";
+  public static final String
+      HADOOP_SECURITY_SERVICE_AUTHORIZATION_RECONFIGURATION =
+      "security.reconfiguration.protocol.acl";
   public static final String 
   SECURITY_HA_SERVICE_PROTOCOL_ACL = "security.ha.service.protocol.acl";
   public static final String 
@@ -228,12 +231,20 @@ public class CommonConfigurationKeys extends CommonConfigurationKeysPublic {
   public static final boolean HADOOP_SECURITY_TOKEN_SERVICE_USE_IP_DEFAULT =
       true;
 
-  /** See <a href="{@docRoot}/../core-default.html">core-default.xml .</a> */
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
   public static final String HADOOP_SECURITY_DNS_LOG_SLOW_LOOKUPS_ENABLED_KEY =
       "hadoop.security.dns.log-slow-lookups.enabled";
   public static final boolean
       HADOOP_SECURITY_DNS_LOG_SLOW_LOOKUPS_ENABLED_DEFAULT = false;
-  /** See <a href="{@docRoot}/../core-default.html">core-default.xml .</a> */
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
   public static final String
       HADOOP_SECURITY_DNS_LOG_SLOW_LOOKUPS_THRESHOLD_MS_KEY =
       "hadoop.security.dns.log-slow-lookups.threshold.ms";

+ 419 - 70
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java

@@ -37,26 +37,46 @@ import org.apache.hadoop.crypto.OpensslAesCtrCryptoCodec;
 public class CommonConfigurationKeysPublic {
   
   // The Keys
-  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
   public static final String  NET_TOPOLOGY_SCRIPT_NUMBER_ARGS_KEY =
     "net.topology.script.number.args";
   /** Default value for NET_TOPOLOGY_SCRIPT_NUMBER_ARGS_KEY */
   public static final int     NET_TOPOLOGY_SCRIPT_NUMBER_ARGS_DEFAULT = 100;
 
   //FS keys
-  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
   public static final String  FS_DEFAULT_NAME_KEY = "fs.defaultFS";
   /** Default value for FS_DEFAULT_NAME_KEY */
   public static final String  FS_DEFAULT_NAME_DEFAULT = "file:///";
-  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
   public static final String  FS_DF_INTERVAL_KEY = "fs.df.interval"; 
   /** Default value for FS_DF_INTERVAL_KEY */
   public static final long    FS_DF_INTERVAL_DEFAULT = 60000;
-  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
   public static final String  FS_DU_INTERVAL_KEY = "fs.du.interval";
   /** Default value for FS_DU_INTERVAL_KEY */
   public static final long    FS_DU_INTERVAL_DEFAULT = 600000;
-  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
   public static final String  FS_CLIENT_RESOLVE_REMOTE_SYMLINKS_KEY =
     "fs.client.resolve.remote.symlinks";
   /** Default value for FS_CLIENT_RESOLVE_REMOTE_SYMLINKS_KEY */
@@ -64,22 +84,42 @@ public class CommonConfigurationKeysPublic {
 
 
   //Defaults are not specified for following keys
-  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
   public static final String  NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY =
     "net.topology.script.file.name";
-  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
   public static final String  NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY =
     "net.topology.node.switch.mapping.impl";
-  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
   public static final String  NET_TOPOLOGY_IMPL_KEY =
     "net.topology.impl";
-  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
   public static final String  NET_TOPOLOGY_TABLE_MAPPING_FILE_KEY =
     "net.topology.table.file.name";
   public static final String NET_DEPENDENCY_SCRIPT_FILE_NAME_KEY = 
     "net.topology.dependency.script.file.name";
 
-  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
   public static final String  FS_TRASH_CHECKPOINT_INTERVAL_KEY =
     "fs.trash.checkpoint.interval";
   /** Default value for FS_TRASH_CHECKPOINT_INTERVAL_KEY */
@@ -97,32 +137,64 @@ public class CommonConfigurationKeysPublic {
   //
   /** Not used anywhere, looks like default value for FS_LOCAL_BLOCK_SIZE */
   public static final long    FS_LOCAL_BLOCK_SIZE_DEFAULT = 32*1024*1024;
-  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
   public static final String  FS_AUTOMATIC_CLOSE_KEY = "fs.automatic.close";
   /** Default value for FS_AUTOMATIC_CLOSE_KEY */
   public static final boolean FS_AUTOMATIC_CLOSE_DEFAULT = true;
-  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
   public static final String  FS_FILE_IMPL_KEY = "fs.file.impl";
-  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
   public static final String  FS_FTP_HOST_KEY = "fs.ftp.host";
-  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
   public static final String  FS_FTP_HOST_PORT_KEY = "fs.ftp.host.port";
-  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
   public static final String  FS_TRASH_INTERVAL_KEY = "fs.trash.interval";
   /** Default value for FS_TRASH_INTERVAL_KEY */
   public static final long    FS_TRASH_INTERVAL_DEFAULT = 0;
-  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a>. */
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
   public static final String  FS_CLIENT_TOPOLOGY_RESOLUTION_ENABLED =
       "fs.client.resolve.topology.enabled";
   /** Default value for FS_CLIENT_TOPOLOGY_RESOLUTION_ENABLED. */
   public static final boolean FS_CLIENT_TOPOLOGY_RESOLUTION_ENABLED_DEFAULT =
       false;
-  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
   public static final String  IO_MAPFILE_BLOOM_SIZE_KEY =
     "io.mapfile.bloom.size";
   /** Default value for IO_MAPFILE_BLOOM_SIZE_KEY */
   public static final int     IO_MAPFILE_BLOOM_SIZE_DEFAULT = 1024*1024;
-  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
   public static final String  IO_MAPFILE_BLOOM_ERROR_RATE_KEY =
     "io.mapfile.bloom.error.rate" ;
   /** Default value for IO_MAPFILE_BLOOM_ERROR_RATE_KEY */
@@ -130,26 +202,46 @@ public class CommonConfigurationKeysPublic {
   /** Codec class that implements Lzo compression algorithm */
   public static final String  IO_COMPRESSION_CODEC_LZO_CLASS_KEY =
     "io.compression.codec.lzo.class";
-  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
   public static final String  IO_MAP_INDEX_INTERVAL_KEY =
     "io.map.index.interval";
   /** Default value for IO_MAP_INDEX_INTERVAL_DEFAULT */
   public static final int     IO_MAP_INDEX_INTERVAL_DEFAULT = 128;
-  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
   public static final String  IO_MAP_INDEX_SKIP_KEY = "io.map.index.skip";
   /** Default value for IO_MAP_INDEX_SKIP_KEY */
   public static final int     IO_MAP_INDEX_SKIP_DEFAULT = 0;
-  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
   public static final String  IO_SEQFILE_COMPRESS_BLOCKSIZE_KEY =
     "io.seqfile.compress.blocksize";
   /** Default value for IO_SEQFILE_COMPRESS_BLOCKSIZE_KEY */
   public static final int     IO_SEQFILE_COMPRESS_BLOCKSIZE_DEFAULT = 1000000;
-  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
   public static final String  IO_FILE_BUFFER_SIZE_KEY =
     "io.file.buffer.size";
   /** Default value for IO_FILE_BUFFER_SIZE_KEY */
   public static final int     IO_FILE_BUFFER_SIZE_DEFAULT = 4096;
-  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
   public static final String  IO_SKIP_CHECKSUM_ERRORS_KEY =
     "io.skip.checksum.errors";
   /** Default value for IO_SKIP_CHECKSUM_ERRORS_KEY */
@@ -170,19 +262,35 @@ public class CommonConfigurationKeysPublic {
   public static final String  IO_SORT_FACTOR_KEY = "io.sort.factor";
   /** Default value for IO_SORT_FACTOR_DEFAULT */
   public static final int     IO_SORT_FACTOR_DEFAULT = 100;
-  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
   public static final String  IO_SERIALIZATIONS_KEY = "io.serializations";
 
-  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
   public static final String  TFILE_IO_CHUNK_SIZE_KEY = "tfile.io.chunk.size";
   /** Default value for TFILE_IO_CHUNK_SIZE_DEFAULT */
   public static final int     TFILE_IO_CHUNK_SIZE_DEFAULT = 1024*1024;
-  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
   public static final String  TFILE_FS_INPUT_BUFFER_SIZE_KEY =
     "tfile.fs.input.buffer.size";
   /** Default value for TFILE_FS_INPUT_BUFFER_SIZE_KEY */
   public static final int     TFILE_FS_INPUT_BUFFER_SIZE_DEFAULT = 256*1024;
-  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
   public static final String  TFILE_FS_OUTPUT_BUFFER_SIZE_KEY =
     "tfile.fs.output.buffer.size";
   /** Default value for TFILE_FS_OUTPUT_BUFFER_SIZE_KEY */
@@ -199,32 +307,56 @@ public class CommonConfigurationKeysPublic {
   public static final int     HADOOP_CALLER_CONTEXT_SIGNATURE_MAX_SIZE_DEFAULT =
       40;
 
-  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
   public static final String  IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY =
     "ipc.client.connection.maxidletime";
   /** Default value for IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY */
   public static final int     IPC_CLIENT_CONNECTION_MAXIDLETIME_DEFAULT = 10000; // 10s
-  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
   public static final String  IPC_CLIENT_CONNECT_TIMEOUT_KEY =
     "ipc.client.connect.timeout";
   /** Default value for IPC_CLIENT_CONNECT_TIMEOUT_KEY */
   public static final int     IPC_CLIENT_CONNECT_TIMEOUT_DEFAULT = 20000; // 20s
-  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
   public static final String  IPC_CLIENT_CONNECT_MAX_RETRIES_KEY =
     "ipc.client.connect.max.retries";
   /** Default value for IPC_CLIENT_CONNECT_MAX_RETRIES_KEY */
   public static final int     IPC_CLIENT_CONNECT_MAX_RETRIES_DEFAULT = 10;
-  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
   public static final String  IPC_CLIENT_CONNECT_RETRY_INTERVAL_KEY =
       "ipc.client.connect.retry.interval";
   /** Default value for IPC_CLIENT_CONNECT_RETRY_INTERVAL_KEY */
   public static final int     IPC_CLIENT_CONNECT_RETRY_INTERVAL_DEFAULT = 1000;
-  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
   public static final String  IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY =
     "ipc.client.connect.max.retries.on.timeouts";
   /** Default value for IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY */
   public static final int  IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_DEFAULT = 45;
-  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
   public static final String  IPC_CLIENT_TCPNODELAY_KEY =
     "ipc.client.tcpnodelay";
   /** Default value for IPC_CLIENT_TCPNODELAY_KEY */
@@ -233,26 +365,46 @@ public class CommonConfigurationKeysPublic {
   public static final String   IPC_CLIENT_LOW_LATENCY = "ipc.client.low-latency";
   /** Default value of IPC_CLIENT_LOW_LATENCY */
   public static final boolean  IPC_CLIENT_LOW_LATENCY_DEFAULT = false;
-  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
   public static final String  IPC_SERVER_LISTEN_QUEUE_SIZE_KEY =
     "ipc.server.listen.queue.size";
   /** Default value for IPC_SERVER_LISTEN_QUEUE_SIZE_KEY */
   public static final int     IPC_SERVER_LISTEN_QUEUE_SIZE_DEFAULT = 128;
-  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
   public static final String  IPC_CLIENT_KILL_MAX_KEY = "ipc.client.kill.max";
   /** Default value for IPC_CLIENT_KILL_MAX_KEY */
   public static final int     IPC_CLIENT_KILL_MAX_DEFAULT = 10;
-  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
   public static final String  IPC_CLIENT_IDLETHRESHOLD_KEY =
     "ipc.client.idlethreshold";
   /** Default value for IPC_CLIENT_IDLETHRESHOLD_DEFAULT */
   public static final int     IPC_CLIENT_IDLETHRESHOLD_DEFAULT = 4000;
-  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
   public static final String  IPC_SERVER_TCPNODELAY_KEY =
     "ipc.server.tcpnodelay";
   /** Default value for IPC_SERVER_TCPNODELAY_KEY */
   public static final boolean IPC_SERVER_TCPNODELAY_DEFAULT = true;
-  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
   public static final String  IPC_SERVER_MAX_CONNECTIONS_KEY =
     "ipc.server.max.connections";
   /** Default value for IPC_SERVER_MAX_CONNECTIONS_KEY */
@@ -263,70 +415,175 @@ public class CommonConfigurationKeysPublic {
                                                 "ipc.server.log.slow.rpc";
   public static final boolean IPC_SERVER_LOG_SLOW_RPC_DEFAULT = false;
 
-  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
   public static final String  HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_KEY =
     "hadoop.rpc.socket.factory.class.default";
   public static final String  HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_DEFAULT =
     "org.apache.hadoop.net.StandardSocketFactory";
-  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
   public static final String  HADOOP_SOCKS_SERVER_KEY = "hadoop.socks.server";
-  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
   public static final String  HADOOP_UTIL_HASH_TYPE_KEY =
     "hadoop.util.hash.type";
   /** Default value for HADOOP_UTIL_HASH_TYPE_KEY */
   public static final String  HADOOP_UTIL_HASH_TYPE_DEFAULT = "murmur";
-  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
   public static final String  HADOOP_SECURITY_GROUP_MAPPING =
     "hadoop.security.group.mapping";
-  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
   public static final String  HADOOP_SECURITY_GROUPS_CACHE_SECS =
     "hadoop.security.groups.cache.secs";
-  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
   public static final long HADOOP_SECURITY_GROUPS_CACHE_SECS_DEFAULT =
     300;
-  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
   public static final String  HADOOP_SECURITY_GROUPS_NEGATIVE_CACHE_SECS =
     "hadoop.security.groups.negative-cache.secs";
-  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
   public static final long HADOOP_SECURITY_GROUPS_NEGATIVE_CACHE_SECS_DEFAULT =
     30;
-  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
   public static final String HADOOP_SECURITY_GROUPS_CACHE_WARN_AFTER_MS =
     "hadoop.security.groups.cache.warn.after.ms";
   public static final long HADOOP_SECURITY_GROUPS_CACHE_WARN_AFTER_MS_DEFAULT =
     5000;
-  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
+  public static final String HADOOP_SECURITY_GROUPS_CACHE_BACKGROUND_RELOAD =
+      "hadoop.security.groups.cache.background.reload";
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
+  public static final boolean
+      HADOOP_SECURITY_GROUPS_CACHE_BACKGROUND_RELOAD_DEFAULT = false;
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
+  public static final String
+      HADOOP_SECURITY_GROUPS_CACHE_BACKGROUND_RELOAD_THREADS =
+          "hadoop.security.groups.cache.background.reload.threads";
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
+  public static final int
+      HADOOP_SECURITY_GROUPS_CACHE_BACKGROUND_RELOAD_THREADS_DEFAULT = 3;
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
   public static final String  HADOOP_SECURITY_AUTHENTICATION =
     "hadoop.security.authentication";
-  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
   public static final String HADOOP_SECURITY_AUTHORIZATION =
     "hadoop.security.authorization";
-  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
   public static final String HADOOP_SECURITY_INSTRUMENTATION_REQUIRES_ADMIN =
     "hadoop.security.instrumentation.requires.admin";
-  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
   public static final String  HADOOP_SECURITY_SERVICE_USER_NAME_KEY =
     "hadoop.security.service.user.name.key";
-  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
   public static final String  HADOOP_SECURITY_AUTH_TO_LOCAL =
     "hadoop.security.auth_to_local";
-  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
   public static final String HADOOP_SECURITY_DNS_INTERFACE_KEY =
     "hadoop.security.dns.interface";
-  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
   public static final String HADOOP_SECURITY_DNS_NAMESERVER_KEY =
     "hadoop.security.dns.nameserver";
-  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
   public static final String HADOOP_TOKEN_FILES =
       "hadoop.token.files";
 
-  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
   public static final String HADOOP_KERBEROS_MIN_SECONDS_BEFORE_RELOGIN =
           "hadoop.kerberos.min.seconds.before.relogin";
   /** Default value for HADOOP_KERBEROS_MIN_SECONDS_BEFORE_RELOGIN */
   public static final int HADOOP_KERBEROS_MIN_SECONDS_BEFORE_RELOGIN_DEFAULT =
           60;
-  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
   public static final String  HADOOP_RPC_PROTECTION =
     "hadoop.rpc.protection";
   /** Class to override Sasl Properties for a connection */
@@ -342,15 +599,27 @@ public class CommonConfigurationKeysPublic {
       HADOOP_SECURITY_CRYPTO_CODEC_CLASSES_AES_CTR_NOPADDING_DEFAULT =
       OpensslAesCtrCryptoCodec.class.getName() + "," +
           JceAesCtrCryptoCodec.class.getName();
-  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
   public static final String HADOOP_SECURITY_CRYPTO_CIPHER_SUITE_KEY =
     "hadoop.security.crypto.cipher.suite";
   public static final String HADOOP_SECURITY_CRYPTO_CIPHER_SUITE_DEFAULT = 
     "AES/CTR/NoPadding";
-  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
   public static final String HADOOP_SECURITY_CRYPTO_JCE_PROVIDER_KEY =
     "hadoop.security.crypto.jce.provider";
-  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
   public static final String HADOOP_SECURITY_CRYPTO_BUFFER_SIZE_KEY = 
     "hadoop.security.crypto.buffer.size";
   /** Defalt value for HADOOP_SECURITY_CRYPTO_BUFFER_SIZE_KEY */
@@ -360,63 +629,143 @@ public class CommonConfigurationKeysPublic {
     "hadoop.security.impersonation.provider.class";
 
   //  <!-- KMSClientProvider configurations -->
-  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
   public static final String KMS_CLIENT_ENC_KEY_CACHE_SIZE =
       "hadoop.security.kms.client.encrypted.key.cache.size";
   /** Default value for KMS_CLIENT_ENC_KEY_CACHE_SIZE */
   public static final int KMS_CLIENT_ENC_KEY_CACHE_SIZE_DEFAULT = 500;
 
-  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
   public static final String KMS_CLIENT_ENC_KEY_CACHE_LOW_WATERMARK =
       "hadoop.security.kms.client.encrypted.key.cache.low-watermark";
   /** Default value for KMS_CLIENT_ENC_KEY_CACHE_LOW_WATERMARK */
   public static final float KMS_CLIENT_ENC_KEY_CACHE_LOW_WATERMARK_DEFAULT =
       0.3f;
 
-  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
   public static final String KMS_CLIENT_ENC_KEY_CACHE_NUM_REFILL_THREADS =
       "hadoop.security.kms.client.encrypted.key.cache.num.refill.threads";
   /** Default value for KMS_CLIENT_ENC_KEY_NUM_REFILL_THREADS */
   public static final int KMS_CLIENT_ENC_KEY_CACHE_NUM_REFILL_THREADS_DEFAULT =
       2;
 
-  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
   public static final String KMS_CLIENT_ENC_KEY_CACHE_EXPIRY_MS =
       "hadoop.security.kms.client.encrypted.key.cache.expiry";
   /** Default value for KMS_CLIENT_ENC_KEY_CACHE_EXPIRY (12 hrs)*/
   public static final int KMS_CLIENT_ENC_KEY_CACHE_EXPIRY_DEFAULT = 43200000;
 
-  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
   public static final String HADOOP_SECURITY_JAVA_SECURE_RANDOM_ALGORITHM_KEY = 
     "hadoop.security.java.secure.random.algorithm";
   /** Defalt value for HADOOP_SECURITY_JAVA_SECURE_RANDOM_ALGORITHM_KEY */
   public static final String HADOOP_SECURITY_JAVA_SECURE_RANDOM_ALGORITHM_DEFAULT = 
     "SHA1PRNG";
-  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
   public static final String HADOOP_SECURITY_SECURE_RANDOM_IMPL_KEY = 
     "hadoop.security.secure.random.impl";
-  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
   public static final String HADOOP_SECURITY_SECURE_RANDOM_DEVICE_FILE_PATH_KEY = 
     "hadoop.security.random.device.file.path";
   public static final String HADOOP_SECURITY_SECURE_RANDOM_DEVICE_FILE_PATH_DEFAULT = 
     "/dev/urandom";
 
-  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
   public static final String HADOOP_SHELL_MISSING_DEFAULT_FS_WARNING_KEY =
       "hadoop.shell.missing.defaultFs.warning";
   public static final boolean HADOOP_SHELL_MISSING_DEFAULT_FS_WARNING_DEFAULT =
       false;
 
-  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
   public static final String HADOOP_SHELL_SAFELY_DELETE_LIMIT_NUM_FILES =
       "hadoop.shell.safely.delete.limit.num.files";
   public static final long HADOOP_SHELL_SAFELY_DELETE_LIMIT_NUM_FILES_DEFAULT =
       100;
 
-  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
   public static final String HADOOP_HTTP_LOGS_ENABLED =
       "hadoop.http.logs.enabled";
   /** Defalt value for HADOOP_HTTP_LOGS_ENABLED */
   public static final boolean HADOOP_HTTP_LOGS_ENABLED_DEFAULT = true;
+
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
+  public static final String HADOOP_SECURITY_CREDENTIAL_PROVIDER_PATH =
+      "hadoop.security.credential.provider.path";
+
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
+  public static final String HADOOP_SECURITY_CREDENTIAL_CLEAR_TEXT_FALLBACK =
+      "hadoop.security.credential.clear-text-fallback";
+  public static final boolean
+      HADOOP_SECURITY_CREDENTIAL_CLEAR_TEXT_FALLBACK_DEFAULT = true;
+
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
+  public static final String  HADOOP_SECURITY_CREDENTIAL_PASSWORD_FILE_KEY =
+      "hadoop.security.credstore.java-keystore-provider.password-file";
+
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
+  public static final String HADOOP_SECURITY_SENSITIVE_CONFIG_KEYS =
+      "hadoop.security.sensitive-config-keys";
+  public static final String HADOOP_SECURITY_SENSITIVE_CONFIG_KEYS_DEFAULT =
+      "password$" + "," +
+      "fs.s3.*[Ss]ecret.?[Kk]ey" + "," +
+      "fs.azure\\.account.key.*" + "," +
+      "dfs.webhdfs.oauth2.[a-z]+.token" + "," +
+      HADOOP_SECURITY_SENSITIVE_CONFIG_KEYS;
 }
 

+ 7 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DU.java

@@ -34,12 +34,16 @@ public class DU extends CachingGetSpaceUsed {
   private DUShell duShell;
 
   @VisibleForTesting
-   public DU(File path, long interval, long initialUsed) throws IOException {
-    super(path, interval, initialUsed);
+  public DU(File path, long interval, long jitter, long initialUsed)
+      throws IOException {
+    super(path, interval, jitter, initialUsed);
   }
 
   public DU(CachingGetSpaceUsed.Builder builder) throws IOException {
-    this(builder.getPath(), builder.getInterval(), builder.getInitialUsed());
+    this(builder.getPath(),
+        builder.getInterval(),
+        builder.getJitter(),
+        builder.getInitialUsed());
   }
 
   @Override

+ 3 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegateToFileSystem.java

@@ -40,6 +40,7 @@ import org.apache.hadoop.util.Progressable;
 @InterfaceAudience.Private
 @InterfaceStability.Unstable
 public abstract class DelegateToFileSystem extends AbstractFileSystem {
+  private static final int DELEGATE_TO_FS_DEFAULT_PORT = -1;
   protected final FileSystem fsImpl;
   
   protected DelegateToFileSystem(URI theUri, FileSystem theFsImpl,
@@ -64,7 +65,7 @@ public abstract class DelegateToFileSystem extends AbstractFileSystem {
    */
   private static int getDefaultPortIfDefined(FileSystem theFsImpl) {
     int defaultPort = theFsImpl.getDefaultPort();
-    return defaultPort != 0 ? defaultPort : -1;
+    return defaultPort != 0 ? defaultPort : DELEGATE_TO_FS_DEFAULT_PORT;
   }
 
   @Override
@@ -159,7 +160,7 @@ public abstract class DelegateToFileSystem extends AbstractFileSystem {
 
   @Override
   public int getUriDefaultPort() {
-    return 0;
+    return DELEGATE_TO_FS_DEFAULT_PORT;
   }
 
   @Override

+ 7 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/EmptyStorageStatistics.java

@@ -29,15 +29,22 @@ class EmptyStorageStatistics extends StorageStatistics {
     super(name);
   }
 
+  @Override
   public Iterator<LongStatistic> getLongStatistics() {
     return Collections.emptyIterator();
   }
 
+  @Override
   public Long getLong(String key) {
     return null;
   }
 
+  @Override
   public boolean isTracked(String key) {
     return false;
   }
+
+  @Override
+  public void reset() {
+  }
 }

+ 4 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSInputStream.java

@@ -105,7 +105,10 @@ public abstract class FSInputStream extends InputStream
     Preconditions.checkArgument(buffer != null, "Null buffer");
     if (buffer.length - offset < length) {
       throw new IndexOutOfBoundsException(
-          FSExceptionMessages.TOO_MANY_BYTES_FOR_DEST_BUFFER);
+          FSExceptionMessages.TOO_MANY_BYTES_FOR_DEST_BUFFER
+              + ": request length=" + length
+              + ", with offset ="+ offset
+              + "; buffer capacity =" + (buffer.length - offset));
     }
   }
 

+ 9 - 11
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java

@@ -215,20 +215,18 @@ public class FileContext {
    * The FileContext is defined by.
    *  1) defaultFS (slash)
    *  2) wd
-   *  3) umask
+   *  3) umask (Obtained by FsPermission.getUMask(conf))
    */   
   private final AbstractFileSystem defaultFS; //default FS for this FileContext.
   private Path workingDir;          // Fully qualified
-  private FsPermission umask;
   private final Configuration conf;
   private final UserGroupInformation ugi;
   final boolean resolveSymlinks;
   private final Tracer tracer;
 
   private FileContext(final AbstractFileSystem defFs,
-    final FsPermission theUmask, final Configuration aConf) {
+                      final Configuration aConf) {
     defaultFS = defFs;
-    umask = FsPermission.getUMask(aConf);
     conf = aConf;
     tracer = FsTracer.get(aConf);
     try {
@@ -305,7 +303,7 @@ public class FileContext {
    * 
    * @throws UnsupportedFileSystemException If the file system for
    *           <code>absOrFqPath</code> is not supported.
-   * @throws IOExcepton If the file system for <code>absOrFqPath</code> could
+   * @throws IOException If the file system for <code>absOrFqPath</code> could
    *         not be instantiated.
    */
   protected AbstractFileSystem getFSofPath(final Path absOrFqPath)
@@ -354,7 +352,7 @@ public class FileContext {
    */
   public static FileContext getFileContext(final AbstractFileSystem defFS,
                     final Configuration aConf) {
-    return new FileContext(defFS, FsPermission.getUMask(aConf), aConf);
+    return new FileContext(defFS, aConf);
   }
   
   /**
@@ -564,7 +562,7 @@ public class FileContext {
    * @return the umask of this FileContext
    */
   public FsPermission getUMask() {
-    return umask;
+    return FsPermission.getUMask(conf);
   }
   
   /**
@@ -572,7 +570,7 @@ public class FileContext {
    * @param newUmask  the new umask
    */
   public void setUMask(final FsPermission newUmask) {
-    umask = newUmask;
+    FsPermission.setUMask(conf, newUmask);
   }
   
   
@@ -673,7 +671,7 @@ public class FileContext {
     CreateOpts.Perms permOpt = CreateOpts.getOpt(CreateOpts.Perms.class, opts);
     FsPermission permission = (permOpt != null) ? permOpt.getValue() :
                                       FILE_DEFAULT_PERM;
-    permission = permission.applyUMask(umask);
+    permission = permission.applyUMask(getUMask());
 
     final CreateOpts[] updatedOpts = 
                       CreateOpts.setOpt(CreateOpts.perms(permission), opts);
@@ -720,7 +718,7 @@ public class FileContext {
       IOException {
     final Path absDir = fixRelativePart(dir);
     final FsPermission absFerms = (permission == null ? 
-          FsPermission.getDirDefault() : permission).applyUMask(umask);
+          FsPermission.getDirDefault() : permission).applyUMask(getUMask());
     new FSLinkResolver<Void>() {
       @Override
       public Void next(final AbstractFileSystem fs, final Path p) 
@@ -2715,7 +2713,7 @@ public class FileContext {
   /**
    * Query the effective storage policy ID for the given file or directory.
    *
-   * @param src file or directory path.
+   * @param path file or directory path.
    * @return storage policy for give file.
    * @throws IOException
    */

+ 31 - 30
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java

@@ -26,7 +26,6 @@ import java.net.URI;
 import java.net.URISyntaxException;
 import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.Collection;
 import java.util.EnumSet;
 import java.util.HashMap;
@@ -935,13 +934,13 @@ public abstract class FileSystem extends Configured implements Closeable {
    * Create an FSDataOutputStream at the indicated Path with write-progress
    * reporting.
    * @param f the file name to open
-   * @param permission
+   * @param permission file permission
    * @param overwrite if a file with this name already exists, then if true,
    *   the file will be overwritten, and if false an error will be thrown.
    * @param bufferSize the size of the buffer to be used.
    * @param replication required block replication for the file.
-   * @param blockSize
-   * @param progress
+   * @param blockSize block size
+   * @param progress the progress reporter
    * @throws IOException
    * @see #setPermission(Path, FsPermission)
    */
@@ -957,12 +956,12 @@ public abstract class FileSystem extends Configured implements Closeable {
    * Create an FSDataOutputStream at the indicated Path with write-progress
    * reporting.
    * @param f the file name to open
-   * @param permission
+   * @param permission file permission
    * @param flags {@link CreateFlag}s to use for this stream.
    * @param bufferSize the size of the buffer to be used.
    * @param replication required block replication for the file.
-   * @param blockSize
-   * @param progress
+   * @param blockSize block size
+   * @param progress the progress reporter
    * @throws IOException
    * @see #setPermission(Path, FsPermission)
    */
@@ -981,12 +980,12 @@ public abstract class FileSystem extends Configured implements Closeable {
    * Create an FSDataOutputStream at the indicated Path with a custom
    * checksum option
    * @param f the file name to open
-   * @param permission
+   * @param permission file permission
    * @param flags {@link CreateFlag}s to use for this stream.
    * @param bufferSize the size of the buffer to be used.
    * @param replication required block replication for the file.
-   * @param blockSize
-   * @param progress
+   * @param blockSize block size
+   * @param progress the progress reporter
    * @param checksumOpt checksum parameter. If null, the values
    *        found in conf will be used.
    * @throws IOException
@@ -1095,8 +1094,8 @@ public abstract class FileSystem extends Configured implements Closeable {
    * the file will be overwritten, and if false an error will be thrown.
    * @param bufferSize the size of the buffer to be used.
    * @param replication required block replication for the file.
-   * @param blockSize
-   * @param progress
+   * @param blockSize block size
+   * @param progress the progress reporter
    * @throws IOException
    * @see #setPermission(Path, FsPermission)
    */
@@ -1113,13 +1112,13 @@ public abstract class FileSystem extends Configured implements Closeable {
    * reporting. Same as create(), except fails if parent directory doesn't
    * already exist.
    * @param f the file name to open
-   * @param permission
+   * @param permission file permission
    * @param overwrite if a file with this name already exists, then if true,
    * the file will be overwritten, and if false an error will be thrown.
    * @param bufferSize the size of the buffer to be used.
    * @param replication required block replication for the file.
-   * @param blockSize
-   * @param progress
+   * @param blockSize block size
+   * @param progress the progress reporter
    * @throws IOException
    * @see #setPermission(Path, FsPermission)
    */
@@ -1137,12 +1136,12 @@ public abstract class FileSystem extends Configured implements Closeable {
     * reporting. Same as create(), except fails if parent directory doesn't
     * already exist.
     * @param f the file name to open
-    * @param permission
+    * @param permission file permission
     * @param flags {@link CreateFlag}s to use for this stream.
     * @param bufferSize the size of the buffer to be used.
     * @param replication required block replication for the file.
-    * @param blockSize
-    * @param progress
+    * @param blockSize block size
+    * @param progress the progress reporter
     * @throws IOException
     * @see #setPermission(Path, FsPermission)
     */
@@ -1882,7 +1881,7 @@ public abstract class FileSystem extends Configured implements Closeable {
    * Set the current working directory for the given file system. All relative
    * paths will be resolved relative to it.
    * 
-   * @param new_dir
+   * @param new_dir Path of new working directory
    */
   public abstract void setWorkingDirectory(Path new_dir);
     
@@ -2221,12 +2220,11 @@ public abstract class FileSystem extends Configured implements Closeable {
     FsPermission perm = stat.getPermission();
     UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
     String user = ugi.getShortUserName();
-    List<String> groups = Arrays.asList(ugi.getGroupNames());
     if (user.equals(stat.getOwner())) {
       if (perm.getUserAction().implies(mode)) {
         return;
       }
-    } else if (groups.contains(stat.getGroup())) {
+    } else if (ugi.getGroups().contains(stat.getGroup())) {
       if (perm.getGroupAction().implies(mode)) {
         return;
       }
@@ -2326,7 +2324,7 @@ public abstract class FileSystem extends Configured implements Closeable {
   /**
    * Set the verify checksum flag. This is only applicable if the 
    * corresponding FileSystem supports checksum. By default doesn't do anything.
-   * @param verifyChecksum
+   * @param verifyChecksum Verify checksum flag
    */
   public void setVerifyChecksum(boolean verifyChecksum) {
     //doesn't do anything
@@ -2335,7 +2333,7 @@ public abstract class FileSystem extends Configured implements Closeable {
   /**
    * Set the write checksum flag. This is only applicable if the 
    * corresponding FileSystem supports checksum. By default doesn't do anything.
-   * @param writeChecksum
+   * @param writeChecksum Write checsum flag
    */
   public void setWriteChecksum(boolean writeChecksum) {
     //doesn't do anything
@@ -2371,8 +2369,8 @@ public abstract class FileSystem extends Configured implements Closeable {
 
   /**
    * Set permission of a path.
-   * @param p
-   * @param permission
+   * @param p The path
+   * @param permission permission
    */
   public void setPermission(Path p, FsPermission permission
       ) throws IOException {
@@ -2592,7 +2590,7 @@ public abstract class FileSystem extends Configured implements Closeable {
    * Refer to the HDFS extended attributes user documentation for details.
    *
    * @param path Path to get extended attributes
-   * @return Map<String, byte[]> describing the XAttrs of the file or directory
+   * @return Map describing the XAttrs of the file or directory
    * @throws IOException
    */
   public Map<String, byte[]> getXAttrs(Path path) throws IOException {
@@ -2609,7 +2607,7 @@ public abstract class FileSystem extends Configured implements Closeable {
    *
    * @param path Path to get extended attributes
    * @param names XAttr names.
-   * @return Map<String, byte[]> describing the XAttrs of the file or directory
+   * @return Map describing the XAttrs of the file or directory
    * @throws IOException
    */
   public Map<String, byte[]> getXAttrs(Path path, List<String> names)
@@ -3522,7 +3520,7 @@ public abstract class FileSystem extends Configured implements Closeable {
      * In order to reset, we add up all the thread-local statistics data, and
      * set rootData to the negative of that.
      *
-     * This may seem like a counterintuitive way to reset the statsitics.  Why
+     * This may seem like a counterintuitive way to reset the statistics.  Why
      * can't we just zero out all the thread-local data?  Well, thread-local
      * data can only be modified by the thread that owns it.  If we tried to
      * modify the thread-local data from this thread, our modification might get
@@ -3619,8 +3617,11 @@ public abstract class FileSystem extends Configured implements Closeable {
    * Reset all statistics for all file systems
    */
   public static synchronized void clearStatistics() {
-    for(Statistics stat: statisticsTable.values()) {
-      stat.reset();
+    final Iterator<StorageStatistics> iterator =
+        GlobalStorageStatistics.INSTANCE.iterator();
+    while (iterator.hasNext()) {
+      final StorageStatistics statistics = iterator.next();
+      statistics.reset();
     }
   }
 

+ 20 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemStorageStatistics.java

@@ -20,6 +20,7 @@ package org.apache.hadoop.fs;
 import java.util.Iterator;
 import java.util.NoSuchElementException;
 
+import com.google.common.base.Preconditions;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.FileSystem.Statistics.StatisticsData;
@@ -81,13 +82,16 @@ public class FileSystemStorageStatistics extends StorageStatistics {
   }
 
   private static Long fetch(StatisticsData data, String key) {
+    Preconditions.checkArgument(key != null,
+        "The stat key of FileSystemStorageStatistics should not be null!");
+
     switch (key) {
     case "bytesRead":
       return data.getBytesRead();
     case "bytesWritten":
       return data.getBytesWritten();
     case "readOps":
-      return Long.valueOf(data.getReadOps());
+      return (long) (data.getReadOps() + data.getLargeReadOps());
     case "largeReadOps":
       return Long.valueOf(data.getLargeReadOps());
     case "writeOps":
@@ -107,9 +111,18 @@ public class FileSystemStorageStatistics extends StorageStatistics {
 
   FileSystemStorageStatistics(String name, FileSystem.Statistics stats) {
     super(name);
+    Preconditions.checkArgument(stats != null,
+        "FileSystem.Statistics can not be null");
+    Preconditions.checkArgument(stats.getData() != null,
+        "FileSystem.Statistics can not have null data");
     this.stats = stats;
   }
 
+  @Override
+  public String getScheme() {
+    return stats.getScheme();
+  }
+
   @Override
   public Iterator<LongStatistic> getLongStatistics() {
     return new LongStatisticIterator(stats.getData());
@@ -125,6 +138,7 @@ public class FileSystemStorageStatistics extends StorageStatistics {
    *
    * @return         True only if the statistic is being tracked.
    */
+  @Override
   public boolean isTracked(String key) {
     for (String k: KEYS) {
       if (k.equals(key)) {
@@ -133,4 +147,9 @@ public class FileSystemStorageStatistics extends StorageStatistics {
     }
     return false;
   }
+
+  @Override
+  public void reset() {
+    stats.reset();
+  }
 }

+ 58 - 15
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java

@@ -18,7 +18,14 @@
 
 package org.apache.hadoop.fs;
 
-import java.io.*;
+import java.io.BufferedInputStream;
+import java.io.BufferedOutputStream;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
 import java.net.InetAddress;
 import java.net.URI;
 import java.net.UnknownHostException;
@@ -36,6 +43,8 @@ import java.util.zip.ZipFile;
 import org.apache.commons.collections.map.CaseInsensitiveMap;
 import org.apache.commons.compress.archivers.tar.TarArchiveEntry;
 import org.apache.commons.compress.archivers.tar.TarArchiveInputStream;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -43,11 +52,9 @@ import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.nativeio.NativeIO;
-import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.Shell.ShellCommandExecutor;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.util.StringUtils;
 
 /**
  * A collection of file-processing util methods
@@ -1209,19 +1216,13 @@ public class FileUtil {
         continue;
       }
       if (classPathEntry.endsWith("*")) {
-        boolean foundWildCardJar = false;
         // Append all jars that match the wildcard
-        Path globPath = new Path(classPathEntry).suffix("{.jar,.JAR}");
-        FileStatus[] wildcardJars = FileContext.getLocalFSFileContext().util()
-          .globStatus(globPath);
-        if (wildcardJars != null) {
-          for (FileStatus wildcardJar: wildcardJars) {
-            foundWildCardJar = true;
-            classPathEntryList.add(wildcardJar.getPath().toUri().toURL()
-              .toExternalForm());
+        List<Path> jars = getJarsInDirectory(classPathEntry);
+        if (!jars.isEmpty()) {
+          for (Path jar: jars) {
+            classPathEntryList.add(jar.toUri().toURL().toExternalForm());
           }
-        }
-        if (!foundWildCardJar) {
+        } else {
           unexpandedWildcardClasspath.append(File.pathSeparator);
           unexpandedWildcardClasspath.append(classPathEntry);
         }
@@ -1277,6 +1278,48 @@ public class FileUtil {
     return jarCp;
   }
 
+  /**
+   * Returns all jars that are in the directory. It is useful in expanding a
+   * wildcard path to return all jars from the directory to use in a classpath.
+   * It operates only on local paths.
+   *
+   * @param path the path to the directory. The path may include the wildcard.
+   * @return the list of jars as URLs, or an empty list if there are no jars, or
+   * the directory does not exist locally
+   */
+  public static List<Path> getJarsInDirectory(String path) {
+    return getJarsInDirectory(path, true);
+  }
+
+  /**
+   * Returns all jars that are in the directory. It is useful in expanding a
+   * wildcard path to return all jars from the directory to use in a classpath.
+   *
+   * @param path the path to the directory. The path may include the wildcard.
+   * @return the list of jars as URLs, or an empty list if there are no jars, or
+   * the directory does not exist
+   */
+  public static List<Path> getJarsInDirectory(String path, boolean useLocal) {
+    List<Path> paths = new ArrayList<>();
+    try {
+      // add the wildcard if it is not provided
+      if (!path.endsWith("*")) {
+        path += File.separator + "*";
+      }
+      Path globPath = new Path(path).suffix("{.jar,.JAR}");
+      FileContext context = useLocal ?
+          FileContext.getLocalFSFileContext() :
+          FileContext.getFileContext(globPath.toUri());
+      FileStatus[] files = context.util().globStatus(globPath);
+      if (files != null) {
+        for (FileStatus file: files) {
+          paths.add(file.getPath());
+        }
+      }
+    } catch (IOException ignore) {} // return the empty list
+    return paths;
+  }
+
   public static boolean compareFs(FileSystem srcFs, FileSystem destFs) {
     if (srcFs==null || destFs==null) {
       return false;

+ 25 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/GetSpaceUsed.java

@@ -26,8 +26,11 @@ import java.io.File;
 import java.io.IOException;
 import java.lang.reflect.Constructor;
 import java.lang.reflect.InvocationTargetException;
+import java.util.concurrent.TimeUnit;
 
 public interface GetSpaceUsed {
+
+
   long getUsed() throws IOException;
 
   /**
@@ -37,11 +40,15 @@ public interface GetSpaceUsed {
     static final Logger LOG = LoggerFactory.getLogger(Builder.class);
 
     static final String CLASSNAME_KEY = "fs.getspaceused.classname";
+    static final String JITTER_KEY = "fs.getspaceused.jitterMillis";
+    static final long DEFAULT_JITTER = TimeUnit.MINUTES.toMillis(1);
+
 
     private Configuration conf;
     private Class<? extends GetSpaceUsed> klass = null;
     private File path = null;
     private Long interval = null;
+    private Long jitter = null;
     private Long initialUsed = null;
 
     public Configuration getConf() {
@@ -111,6 +118,24 @@ public interface GetSpaceUsed {
       return this;
     }
 
+
+    public long getJitter() {
+      if (jitter == null) {
+        Configuration configuration = this.conf;
+
+        if (configuration == null) {
+          return DEFAULT_JITTER;
+        }
+        return configuration.getLong(JITTER_KEY, DEFAULT_JITTER);
+      }
+      return jitter;
+    }
+
+    public Builder setJitter(Long jit) {
+      this.jitter = jit;
+      return this;
+    }
+
     public GetSpaceUsed build() throws IOException {
       GetSpaceUsed getSpaceUsed = null;
       try {

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/GlobExpander.java

@@ -26,7 +26,7 @@ import org.apache.hadoop.classification.InterfaceStability;
 
 @InterfaceAudience.Private
 @InterfaceStability.Unstable
-class GlobExpander {
+public class GlobExpander {
   
   static class StringWithOffset {
     String string;

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/GlobFilter.java

@@ -72,7 +72,7 @@ public class GlobFilter implements PathFilter {
     }
   }
 
-  boolean hasPattern() {
+  public boolean hasPattern() {
     return pattern.hasWildcard();
   }
 

+ 16 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/GlobalStorageStatistics.java

@@ -66,8 +66,9 @@ public enum GlobalStorageStatistics {
    * @param provider    An object which can create a new StorageStatistics
    *                      object if needed.
    * @return            The StorageStatistics object with the given name.
-   * @throws RuntimeException  If the StorageStatisticsProvider provides a new
-   *                           StorageStatistics object with the wrong name.
+   * @throws RuntimeException  If the StorageStatisticsProvider provides a null
+   *                           object or a new StorageStatistics object with the
+   *                           wrong name.
    */
   public synchronized StorageStatistics put(String name,
       StorageStatisticsProvider provider) {
@@ -78,6 +79,10 @@ public enum GlobalStorageStatistics {
       return stats;
     }
     stats = provider.provide();
+    if (stats == null) {
+      throw new RuntimeException("StorageStatisticsProvider for " + name +
+          " should not provide a null StorageStatistics object.");
+    }
     if (!stats.getName().equals(name)) {
       throw new RuntimeException("StorageStatisticsProvider for " + name +
           " provided a StorageStatistics object for " + stats.getName() +
@@ -87,6 +92,15 @@ public enum GlobalStorageStatistics {
     return stats;
   }
 
+  /**
+   * Reset all global storage statistics.
+   */
+  public synchronized void reset() {
+    for (StorageStatistics statistics : map.values()) {
+      statistics.reset();
+    }
+  }
+
   /**
    * Get an iterator that we can use to iterate throw all the global storage
    * statistics objects.

+ 142 - 39
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java

@@ -30,7 +30,8 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 
-/** Names a file or directory in a {@link FileSystem}.
+/**
+ * Names a file or directory in a {@link FileSystem}.
  * Path strings use slash as the directory separator.
  */
 @Stringable
@@ -38,26 +39,37 @@ import org.apache.hadoop.conf.Configuration;
 @InterfaceStability.Stable
 public class Path implements Comparable {
 
-  /** The directory separator, a slash. */
+  /**
+   * The directory separator, a slash.
+   */
   public static final String SEPARATOR = "/";
+
+  /**
+   * The directory separator, a slash, as a character.
+   */
   public static final char SEPARATOR_CHAR = '/';
   
+  /**
+   * The current directory, ".".
+   */
   public static final String CUR_DIR = ".";
   
-  public static final boolean WINDOWS
-    = System.getProperty("os.name").startsWith("Windows");
+  /**
+   * Whether the current host is a Windows machine.
+   */
+  public static final boolean WINDOWS =
+      System.getProperty("os.name").startsWith("Windows");
 
   /**
    *  Pre-compiled regular expressions to detect path formats.
    */
-  private static final Pattern hasUriScheme =
-      Pattern.compile("^[a-zA-Z][a-zA-Z0-9+-.]+:");
-  private static final Pattern hasDriveLetterSpecifier =
+  private static final Pattern HAS_DRIVE_LETTER_SPECIFIER =
       Pattern.compile("^/?[a-zA-Z]:");
 
-  private URI uri;                                // a hierarchical uri
+  private URI uri; // a hierarchical uri
 
   /**
+   * Test whether this Path uses a scheme and is relative.
    * Pathnames with scheme and relative path are illegal.
    */
   void checkNotSchemeWithRelative() {
@@ -73,6 +85,12 @@ public class Path implements Comparable {
     }
   }
 
+  /**
+   * Return a version of the given Path without the scheme information.
+   *
+   * @param path the source Path
+   * @return a copy of this Path without the scheme information
+   */
   public static Path getPathWithoutSchemeAndAuthority(Path path) {
     // This code depends on Path.toString() to remove the leading slash before
     // the drive specification on Windows.
@@ -82,22 +100,42 @@ public class Path implements Comparable {
     return newPath;
   }
 
-  /** Resolve a child path against a parent path. */
+  /**
+   * Create a new Path based on the child path resolved against the parent path.
+   *
+   * @param parent the parent path
+   * @param child the child path
+   */
   public Path(String parent, String child) {
     this(new Path(parent), new Path(child));
   }
 
-  /** Resolve a child path against a parent path. */
+  /**
+   * Create a new Path based on the child path resolved against the parent path.
+   *
+   * @param parent the parent path
+   * @param child the child path
+   */
   public Path(Path parent, String child) {
     this(parent, new Path(child));
   }
 
-  /** Resolve a child path against a parent path. */
+  /**
+   * Create a new Path based on the child path resolved against the parent path.
+   *
+   * @param parent the parent path
+   * @param child the child path
+   */
   public Path(String parent, Path child) {
     this(new Path(parent), child);
   }
 
-  /** Resolve a child path against a parent path. */
+  /**
+   * Create a new Path based on the child path resolved against the parent path.
+   *
+   * @param parent the parent path
+   * @param child the child path
+   */
   public Path(Path parent, Path child) {
     // Add a slash to parent's path so resolution is compatible with URI's
     URI parentUri = parent.uri;
@@ -127,8 +165,12 @@ public class Path implements Comparable {
     }   
   }
   
-  /** Construct a path from a String.  Path strings are URIs, but with
-   * unescaped elements and some additional normalization. */
+  /**
+   * Construct a path from a String.  Path strings are URIs, but with
+   * unescaped elements and some additional normalization.
+   *
+   * @param pathString the path string
+   */
   public Path(String pathString) throws IllegalArgumentException {
     checkPathArg( pathString );
     
@@ -172,12 +214,20 @@ public class Path implements Comparable {
 
   /**
    * Construct a path from a URI
+   *
+   * @param aUri the source URI
    */
   public Path(URI aUri) {
     uri = aUri.normalize();
   }
   
-  /** Construct a Path from components. */
+  /**
+   * Construct a Path from components.
+   *
+   * @param scheme the scheme
+   * @param authority the authority
+   * @param path the path
+   */
   public Path(String scheme, String authority, String path) {
     checkPathArg( path );
 
@@ -210,9 +260,9 @@ public class Path implements Comparable {
    * The returned path has the scheme and authority of the first path.  On
    * Windows, the drive specification in the second path is discarded.
    * 
-   * @param path1 Path first path
-   * @param path2 Path second path, to be appended relative to path1
-   * @return Path merged path
+   * @param path1 the first path
+   * @param path2 the second path, to be appended relative to path1
+   * @return the merged path
    */
   public static Path mergePaths(Path path1, Path path2) {
     String path2Str = path2.toUri().getPath();
@@ -228,10 +278,11 @@ public class Path implements Comparable {
   /**
    * Normalize a path string to use non-duplicated forward slashes as
    * the path separator and remove any trailing path separators.
-   * @param scheme Supplies the URI scheme. Used to deduce whether we
-   *               should replace backslashes or not.
-   * @param path Supplies the scheme-specific part
-   * @return Normalized path string.
+   *
+   * @param scheme the URI scheme. Used to deduce whether we
+   * should replace backslashes or not
+   * @param path the scheme-specific part
+   * @return the normalized path string
    */
   private static String normalizePath(String scheme, String path) {
     // Remove double forward slashes.
@@ -257,7 +308,7 @@ public class Path implements Comparable {
   }
 
   private static boolean hasWindowsDrive(String path) {
-    return (WINDOWS && hasDriveLetterSpecifier.matcher(path).find());
+    return (WINDOWS && HAS_DRIVE_LETTER_SPECIFIER.matcher(path).find());
   }
 
   private static int startPositionWithoutWindowsDrive(String path) {
@@ -272,10 +323,10 @@ public class Path implements Comparable {
    * Determine whether a given path string represents an absolute path on
    * Windows. e.g. "C:/a/b" is an absolute path. "C:a/b" is not.
    *
-   * @param pathString Supplies the path string to evaluate.
-   * @param slashed true if the given path is prefixed with "/".
+   * @param pathString the path string to evaluate
+   * @param slashed true if the given path is prefixed with "/"
    * @return true if the supplied path looks like an absolute path with a Windows
-   * drive-specifier.
+   * drive-specifier
    */
   public static boolean isWindowsAbsolutePath(final String pathString,
                                               final boolean slashed) {
@@ -286,17 +337,32 @@ public class Path implements Comparable {
             (pathString.charAt(start) == '\\'));
   }
 
-  /** Convert this to a URI. */
+  /**
+   * Convert this Path to a URI.
+   *
+   * @return this Path as a URI
+   */
   public URI toUri() { return uri; }
 
-  /** Return the FileSystem that owns this Path. */
+  /**
+   * Return the FileSystem that owns this Path.
+   *
+   * @param conf the configuration to use when resolving the FileSystem
+   * @return the FileSystem that owns this Path
+   * @throws java.io.IOException thrown if there's an issue resolving the
+   * FileSystem
+   */
   public FileSystem getFileSystem(Configuration conf) throws IOException {
     return FileSystem.get(this.toUri(), conf);
   }
 
   /**
-   * Is an absolute path (ie a slash relative path part)
-   *  AND  a scheme is null AND  authority is null.
+   * Returns true if the path component (i.e. directory) of this URI is
+   * absolute <strong>and</strong> the scheme is null, <b>and</b> the authority
+   * is null.
+   *
+   * @return whether the path is absolute and the URI has no scheme nor
+   * authority parts
    */
   public boolean isAbsoluteAndSchemeAuthorityNull() {
     return  (isUriPathAbsolute() && 
@@ -304,33 +370,50 @@ public class Path implements Comparable {
   }
   
   /**
-   *  True if the path component (i.e. directory) of this URI is absolute.
+   * Returns true if the path component (i.e. directory) of this URI is
+   * absolute.
+   *
+   * @return whether this URI's path is absolute
    */
   public boolean isUriPathAbsolute() {
     int start = startPositionWithoutWindowsDrive(uri.getPath());
     return uri.getPath().startsWith(SEPARATOR, start);
    }
   
-  /** True if the path is not a relative path and starts with root. */
+  /**
+   * Returns true if the path component (i.e. directory) of this URI is
+   * absolute.  This method is a wrapper for {@link #isUriPathAbsolute()}.
+   *
+   * @return whether this URI's path is absolute
+   */
   public boolean isAbsolute() {
      return isUriPathAbsolute();
   }
 
   /**
+   * Returns true if and only if this path represents the root of a file system.
+   *
    * @return true if and only if this path represents the root of a file system
    */
   public boolean isRoot() {
     return getParent() == null;
   }
 
-  /** Returns the final component of this path.*/
+  /**
+   * Returns the final component of this path.
+   *
+   * @return the final component of this path
+   */
   public String getName() {
     String path = uri.getPath();
     int slash = path.lastIndexOf(SEPARATOR);
     return path.substring(slash+1);
   }
 
-  /** Returns the parent of a path or null if at root. */
+  /**
+   * Returns the parent of a path or null if at root.
+   * @return the parent of a path or null if at root
+   */
   public Path getParent() {
     String path = uri.getPath();
     int lastSlash = path.lastIndexOf('/');
@@ -348,7 +431,12 @@ public class Path implements Comparable {
     return new Path(uri.getScheme(), uri.getAuthority(), parent);
   }
 
-  /** Adds a suffix to the final name in the path.*/
+  /**
+   * Adds a suffix to the final name in the path.
+   *
+   * @param suffix the suffix to add
+   * @return a new path with the suffix added
+   */
   public Path suffix(String suffix) {
     return new Path(getParent(), getName()+suffix);
   }
@@ -402,7 +490,10 @@ public class Path implements Comparable {
     return this.uri.compareTo(that.uri);
   }
   
-  /** Return the number of elements in this path. */
+  /**
+   * Returns the number of elements in this path.
+   * @return the number of elements in this path
+   */
   public int depth() {
     String path = uri.getPath();
     int depth = 0;
@@ -415,16 +506,28 @@ public class Path implements Comparable {
   }
 
   /**
-   *  Returns a qualified path object.
+   * Returns a qualified path object for the {@link FileSystem}'s working
+   * directory.
    *  
-   *  Deprecated - use {@link #makeQualified(URI, Path)}
+   * @param fs the target FileSystem
+   * @return a qualified path object for the FileSystem's working directory
+   * @deprecated use {@link #makeQualified(URI, Path)}
    */
   @Deprecated
   public Path makeQualified(FileSystem fs) {
     return makeQualified(fs.getUri(), fs.getWorkingDirectory());
   }
   
-  /** Returns a qualified path object. */
+  /**
+   * Returns a qualified path object.
+   *
+   * @param defaultUri if this path is missing the scheme or authority
+   * components, borrow them from this URI
+   * @param workingDir if this path isn't absolute, treat it as relative to this
+   * working directory
+   * @return this path if it contains a scheme and authority and is absolute, or
+   * a new path that includes a path and authority and is fully qualified
+   */
   @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
   public Path makeQualified(URI defaultUri, Path workingDir ) {
     Path path = this;

+ 2 - 10
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java

@@ -33,7 +33,6 @@ import java.io.OutputStream;
 import java.io.FileDescriptor;
 import java.net.URI;
 import java.nio.ByteBuffer;
-import java.nio.file.AccessDeniedException;
 import java.nio.file.Files;
 import java.nio.file.NoSuchFileException;
 import java.nio.file.attribute.BasicFileAttributes;
@@ -65,8 +64,6 @@ public class RawLocalFileSystem extends FileSystem {
   // Temporary workaround for HADOOP-9652.
   private static boolean useDeprecatedFileStatus = true;
 
-  private FsPermission umask;
-
   @VisibleForTesting
   public static void useStatIfAvailable() {
     useDeprecatedFileStatus = !Stat.isAvailable();
@@ -100,7 +97,6 @@ public class RawLocalFileSystem extends FileSystem {
   public void initialize(URI uri, Configuration conf) throws IOException {
     super.initialize(uri, conf);
     setConf(conf);
-    umask = FsPermission.getUMask(conf);
   }
   
   /*******************************************************
@@ -234,7 +230,7 @@ public class RawLocalFileSystem extends FileSystem {
       if (permission == null) {
         this.fos = new FileOutputStream(file, append);
       } else {
-        permission = permission.applyUMask(umask);
+        permission = permission.applyUMask(FsPermission.getUMask(getConf()));
         if (Shell.WINDOWS && NativeIO.isAvailable()) {
           this.fos = NativeIO.Windows.createFileOutputStreamWithMode(file,
               append, permission.toShort());
@@ -472,10 +468,6 @@ public class RawLocalFileSystem extends FileSystem {
     if (localf.isDirectory()) {
       String[] names = localf.list();
       if (names == null) {
-        if (!localf.canRead()) {
-          throw new AccessDeniedException("cannot open directory " + f +
-              ": Permission denied");
-        }
         return null;
       }
       results = new FileStatus[names.length];
@@ -515,7 +507,7 @@ public class RawLocalFileSystem extends FileSystem {
     if (permission == null) {
       permission = FsPermission.getDirDefault();
     }
-    permission = permission.applyUMask(umask);
+    permission = permission.applyUMask(FsPermission.getUMask(getConf()));
     if (Shell.WINDOWS && NativeIO.isAvailable()) {
       try {
         NativeIO.Windows.createDirectoryWithMode(p2f, permission.toShort());

+ 59 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/StorageStatistics.java

@@ -27,6 +27,51 @@ import java.util.Iterator;
  */
 @InterfaceAudience.Public
 public abstract class StorageStatistics {
+
+  /**
+   * These are common statistic names.
+   *
+   * The following names are considered general and preserved across different
+   * StorageStatistics classes. When implementing a new StorageStatistics, it is
+   * highly recommended to use the common statistic names.
+   *
+   * When adding new common statistic name constants, please make them unique.
+   * By convention, they are implicitly unique:
+   *  - the name of the constants are uppercase, words separated by underscores.
+   *  - the value of the constants are lowercase of the constant names.
+   */
+  public interface CommonStatisticNames {
+    // The following names are for file system operation invocations
+    String OP_APPEND = "op_append";
+    String OP_COPY_FROM_LOCAL_FILE = "op_copy_from_local_file";
+    String OP_CREATE = "op_create";
+    String OP_CREATE_NON_RECURSIVE = "op_create_non_recursive";
+    String OP_DELETE = "op_delete";
+    String OP_EXISTS = "op_exists";
+    String OP_GET_CONTENT_SUMMARY = "op_get_content_summary";
+    String OP_GET_FILE_CHECKSUM = "op_get_file_checksum";
+    String OP_GET_FILE_STATUS = "op_get_file_status";
+    String OP_GET_STATUS = "op_get_status";
+    String OP_GLOB_STATUS = "op_glob_status";
+    String OP_IS_FILE = "op_is_file";
+    String OP_IS_DIRECTORY = "op_is_directory";
+    String OP_LIST_FILES = "op_list_files";
+    String OP_LIST_LOCATED_STATUS = "op_list_located_status";
+    String OP_LIST_STATUS = "op_list_status";
+    String OP_MKDIRS = "op_mkdirs";
+    String OP_MODIFY_ACL_ENTRIES = "op_modify_acl_entries";
+    String OP_OPEN = "op_open";
+    String OP_REMOVE_ACL = "op_remove_acl";
+    String OP_REMOVE_ACL_ENTRIES = "op_remove_acl_entries";
+    String OP_REMOVE_DEFAULT_ACL = "op_remove_default_acl";
+    String OP_RENAME = "op_rename";
+    String OP_SET_ACL = "op_set_acl";
+    String OP_SET_OWNER = "op_set_owner";
+    String OP_SET_PERMISSION = "op_set_permission";
+    String OP_SET_TIMES = "op_set_times";
+    String OP_TRUNCATE = "op_truncate";
+  }
+
   /**
    * A 64-bit storage statistic.
    */
@@ -67,6 +112,14 @@ public abstract class StorageStatistics {
     return name;
   }
 
+  /**
+   * @return the associated file system scheme if this is scheme specific,
+   * else return null.
+   */
+  public String getScheme() {
+    return null;
+  }
+
   /**
    * Get an iterator over all the currently tracked long statistics.
    *
@@ -79,8 +132,7 @@ public abstract class StorageStatistics {
    * Get the value of a statistic.
    *
    * @return         null if the statistic is not being tracked or is not a
-   *                     long statistic.
-   *                 The value of the statistic, otherwise.
+   *                 long statistic. The value of the statistic, otherwise.
    */
   public abstract Long getLong(String key);
 
@@ -90,4 +142,9 @@ public abstract class StorageStatistics {
    * @return         True only if the statistic is being tracked.
    */
   public abstract boolean isTracked(String key);
+
+  /**
+   * Reset all the statistic data.
+   */
+  public abstract void reset();
 }

+ 22 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/UnionStorageStatistics.java

@@ -20,6 +20,7 @@ package org.apache.hadoop.fs;
 import java.util.Iterator;
 import java.util.NoSuchElementException;
 
+import com.google.common.base.Preconditions;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 
@@ -77,6 +78,16 @@ public class UnionStorageStatistics extends StorageStatistics {
 
   public UnionStorageStatistics(String name, StorageStatistics[] stats) {
     super(name);
+
+    Preconditions.checkArgument(name != null,
+        "The name of union storage statistics can not be null!");
+    Preconditions.checkArgument(stats != null,
+        "The stats of union storage statistics can not be null!");
+    for (StorageStatistics stat : stats) {
+      Preconditions.checkArgument(stat != null,
+          "The stats of union storage statistics can not have null element!");
+    }
+
     this.stats = stats;
   }
 
@@ -87,8 +98,8 @@ public class UnionStorageStatistics extends StorageStatistics {
 
   @Override
   public Long getLong(String key) {
-    for (int i = 0; i < stats.length; i++) {
-      Long val = stats[i].getLong(key);
+    for (StorageStatistics stat : stats) {
+      Long val = stat.getLong(key);
       if (val != null) {
         return val;
       }
@@ -103,11 +114,18 @@ public class UnionStorageStatistics extends StorageStatistics {
    */
   @Override
   public boolean isTracked(String key) {
-    for (int i = 0; i < stats.length; i++) {
-      if (stats[i].isTracked(key)) {
+    for (StorageStatistics stat : stats) {
+      if (stat.isTracked(key)) {
         return true;
       }
     }
     return false;
   }
+
+  @Override
+  public void reset() {
+    for (StorageStatistics stat : stats) {
+      stat.reset();
+    }
+  }
 }

+ 4 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/WindowsGetSpaceUsed.java

@@ -31,10 +31,12 @@ import java.io.IOException;
 @InterfaceStability.Evolving
 public class WindowsGetSpaceUsed extends CachingGetSpaceUsed {
 
-
   public WindowsGetSpaceUsed(CachingGetSpaceUsed.Builder builder)
       throws IOException {
-    super(builder.getPath(), builder.getInterval(), builder.getInitialUsed());
+    super(builder.getPath(),
+        builder.getInterval(),
+        builder.getJitter(),
+        builder.getInitialUsed());
   }
 
   /**

+ 15 - 8
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/PermissionParser.java

@@ -55,7 +55,7 @@ class PermissionParser {
     if ((matcher = symbolic.matcher(modeStr)).find()) {
       applyNormalPattern(modeStr, matcher);
     } else if ((matcher = octal.matcher(modeStr)).matches()) {
-      applyOctalPattern(modeStr, matcher);
+      applyOctalPattern(matcher);
     } else {
       throw new IllegalArgumentException(modeStr);
     }
@@ -63,10 +63,10 @@ class PermissionParser {
 
   private void applyNormalPattern(String modeStr, Matcher matcher) {
     // Are there multiple permissions stored in one chmod?
-    boolean commaSeperated = false;
+    boolean commaSeparated = false;
 
     for (int i = 0; i < 1 || matcher.end() < modeStr.length(); i++) {
-      if (i > 0 && (!commaSeperated || !matcher.find())) {
+      if (i > 0 && (!commaSeparated || !matcher.find())) {
         throw new IllegalArgumentException(modeStr);
       }
 
@@ -144,19 +144,26 @@ class PermissionParser {
         stickyBitType = type;
       }
 
-      commaSeperated = matcher.group(4).contains(",");
+      commaSeparated = matcher.group(4).contains(",");
     }
     symbolic = true;
   }
 
-  private void applyOctalPattern(String modeStr, Matcher matcher) {
-    userType = groupType = othersType = '=';
+  private void applyOctalPattern(final Matcher matcher) {
+    // Matcher groups: 1: [01]  2: [0-7]{3}
+    final char typeApply = '=';
+    stickyBitType = typeApply;
+    userType = typeApply;
+    groupType = typeApply;
+    othersType = typeApply;
 
-    // Check if sticky bit is specified
+    // If sticky bit is specified get the bit, else
+    // default to reset for apply condition
     String sb = matcher.group(1);
     if (!sb.isEmpty()) {
       stickyMode = Short.valueOf(sb.substring(0, 1));
-      stickyBitType = '=';
+    } else {
+      stickyMode = 0;
     }
 
     String str = matcher.group(2);

+ 17 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/AclCommands.java

@@ -192,6 +192,9 @@ class AclCommands extends FsCommand {
       boolean oneRemoveOption = cf.getOpt("b") || cf.getOpt("k");
       boolean oneModifyOption = cf.getOpt("m") || cf.getOpt("x");
       boolean setOption = cf.getOpt("-set");
+      boolean hasExpectedOptions = cf.getOpt("b") || cf.getOpt("k") ||
+          cf.getOpt("m") || cf.getOpt("x") || cf.getOpt("-set");
+
       if ((bothRemoveOptions || bothModifyOptions)
           || (oneRemoveOption && oneModifyOption)
           || (setOption && (oneRemoveOption || oneModifyOption))) {
@@ -201,10 +204,19 @@ class AclCommands extends FsCommand {
 
       // Only -m, -x and --set expects <acl_spec>
       if (oneModifyOption || setOption) {
+        if (args.isEmpty()) {
+          throw new HadoopIllegalArgumentException(
+              "Missing arguments: <acl_spec> <path>");
+        }
         if (args.size() < 2) {
-          throw new HadoopIllegalArgumentException("<acl_spec> is missing");
+          throw new HadoopIllegalArgumentException(
+              "Missing either <acl_spec> or <path>");
         }
         aclEntries = AclEntry.parseAclSpec(args.removeFirst(), !cf.getOpt("x"));
+        if (aclEntries.isEmpty()) {
+          throw new HadoopIllegalArgumentException(
+              "Missing <acl_spec> entry");
+        }
       }
 
       if (args.isEmpty()) {
@@ -214,6 +226,10 @@ class AclCommands extends FsCommand {
         throw new HadoopIllegalArgumentException("Too many arguments");
       }
 
+      if (!hasExpectedOptions) {
+        throw new HadoopIllegalArgumentException(
+            "Expected one of -b, -k, -m, -x or --set options");
+      }
       // In recursive mode, save a separate list of just the access ACL entries.
       // Only directories may have a default ACL.  When a recursive operation
       // encounters a file under the specified path, it must pass only the

+ 4 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Display.java

@@ -21,6 +21,7 @@ import java.io.ByteArrayOutputStream;
 import java.io.EOFException;
 import java.io.IOException;
 import java.io.InputStream;
+import java.nio.charset.StandardCharsets;
 import java.util.LinkedList;
 import java.util.zip.GZIPInputStream;
 
@@ -32,7 +33,6 @@ import org.apache.avro.generic.GenericDatumWriter;
 import org.apache.avro.io.DatumWriter;
 import org.apache.avro.io.EncoderFactory;
 import org.apache.avro.io.JsonEncoder;
-import org.apache.commons.io.Charsets;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -235,10 +235,10 @@ class Display extends FsCommand {
         if (!r.next(key, val)) {
           return -1;
         }
-        byte[] tmp = key.toString().getBytes(Charsets.UTF_8);
+        byte[] tmp = key.toString().getBytes(StandardCharsets.UTF_8);
         outbuf.write(tmp, 0, tmp.length);
         outbuf.write('\t');
-        tmp = val.toString().getBytes(Charsets.UTF_8);
+        tmp = val.toString().getBytes(StandardCharsets.UTF_8);
         outbuf.write(tmp, 0, tmp.length);
         outbuf.write('\n');
         inbuf.reset(outbuf.getData(), outbuf.getLength());
@@ -301,7 +301,7 @@ class Display extends FsCommand {
       if (!fileReader.hasNext()) {
         // Write a new line after the last Avro record.
         output.write(System.getProperty("line.separator")
-                         .getBytes(Charsets.UTF_8));
+                         .getBytes(StandardCharsets.UTF_8));
         output.flush();
       }
       pos = 0;

+ 54 - 26
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Test.java

@@ -18,11 +18,14 @@
 
 package org.apache.hadoop.fs.shell;
 
+import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.LinkedList;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.security.AccessControlException;
 
 /**
  * Perform shell-like file tests 
@@ -38,18 +41,25 @@ class Test extends FsCommand {
   public static final String NAME = "test";
   public static final String USAGE = "-[defsz] <path>";
   public static final String DESCRIPTION =
-    "Answer various questions about <path>, with result via exit status.\n" +
-    "  -d  return 0 if <path> is a directory.\n" +
-    "  -e  return 0 if <path> exists.\n" +
-    "  -f  return 0 if <path> is a file.\n" +
-    "  -s  return 0 if file <path> is greater than zero bytes in size.\n" +
-    "  -z  return 0 if file <path> is zero bytes in size, else return 1.";
+      "Answer various questions about <path>, with result via exit status.\n"
+          + "  -d  return 0 if <path> is a directory.\n"
+          + "  -e  return 0 if <path> exists.\n"
+          + "  -f  return 0 if <path> is a file.\n"
+          + "  -s  return 0 if file <path> is greater "
+          + "        than zero bytes in size.\n"
+          + "  -w  return 0 if file <path> exists "
+          + "        and write permission is granted.\n"
+          + "  -r  return 0 if file <path> exists "
+          + "        and read permission is granted.\n"
+          + "  -z  return 0 if file <path> is "
+          + "        zero bytes in size, else return 1.";
 
   private char flag;
   
   @Override
   protected void processOptions(LinkedList<String> args) {
-    CommandFormat cf = new CommandFormat(1, 1, "e", "d", "f", "s", "z");
+    CommandFormat cf = new CommandFormat(1, 1,
+        "e", "d", "f", "s", "z", "w", "r");
     cf.parse(args);
     
     String[] opts = cf.getOpts().toArray(new String[0]);
@@ -68,29 +78,47 @@ class Test extends FsCommand {
   protected void processPath(PathData item) throws IOException {
     boolean test = false;
     switch (flag) {
-      case 'e':
-        test = true;
-        break;
-      case 'd':
-        test = item.stat.isDirectory();
-        break;
-      case 'f':
-        test = item.stat.isFile();
-        break;
-      case 's':
-        test = (item.stat.getLen() > 0);
-        break;
-      case 'z':
-        test = (item.stat.getLen() == 0);
-        break;
-      default:
-        break;
+    case 'e':
+      test = true;
+      break;
+    case 'd':
+      test = item.stat.isDirectory();
+      break;
+    case 'f':
+      test = item.stat.isFile();
+      break;
+    case 's':
+      test = (item.stat.getLen() > 0);
+      break;
+    case 'z':
+      test = (item.stat.getLen() == 0);
+      break;
+    case 'w':
+      test = testAccess(item, FsAction.WRITE);
+      break;
+    case 'r':
+      test = testAccess(item, FsAction.READ);
+      break;
+    default:
+      break;
+    }
+    if (!test) {
+      exitCode = 1;
+    }
+  }
+
+  private boolean testAccess(PathData item, FsAction action)
+      throws IOException {
+    try {
+      item.fs.access(item.path, action);
+      return true;
+    } catch (AccessControlException | FileNotFoundException e) {
+      return false;
     }
-    if (!test) exitCode = 1;
   }
 
   @Override
   protected void processNonexistentPath(PathData item) throws IOException {
     exitCode = 1;
   }
-}
+}

+ 20 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java

@@ -27,6 +27,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
+import org.apache.hadoop.fs.BlockStoragePolicySpi;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FSDataInputStream;
@@ -39,12 +40,12 @@ import org.apache.hadoop.fs.FsServerDefaults;
 import org.apache.hadoop.fs.FsStatus;
 import org.apache.hadoop.fs.LocatedFileStatus;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.XAttrSetFlag;
+import org.apache.hadoop.fs.QuotaUsage;
 import org.apache.hadoop.fs.RemoteIterator;
+import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsAction;
-import org.apache.hadoop.fs.QuotaUsage;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.util.Progressable;
@@ -425,5 +426,21 @@ class ChRootedFileSystem extends FilterFileSystem {
   @Override
   public FsServerDefaults getServerDefaults(Path f) throws IOException {
     return super.getServerDefaults(fullPath(f));
-  }  
+  }
+
+  @Override
+  public BlockStoragePolicySpi getStoragePolicy(Path src) throws IOException {
+    return super.getStoragePolicy(fullPath(src));
+  }
+
+  @Override
+  public void setStoragePolicy(Path src, String policyName) throws IOException {
+    super.setStoragePolicy(fullPath(src), policyName);
+  }
+
+  @Override
+  public void unsetStoragePolicy(Path src) throws IOException {
+    super.unsetStoragePolicy(fullPath(src));
+  }
+
 }

+ 77 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java

@@ -24,6 +24,7 @@ import java.io.IOException;
 import java.net.URI;
 import java.net.URISyntaxException;
 import java.util.Arrays;
+import java.util.Collection;
 import java.util.EnumSet;
 import java.util.HashSet;
 import java.util.List;
@@ -35,6 +36,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
+import org.apache.hadoop.fs.BlockStoragePolicySpi;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FSDataInputStream;
@@ -48,6 +50,7 @@ import org.apache.hadoop.fs.FsServerDefaults;
 import org.apache.hadoop.fs.LocatedFileStatus;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathFilter;
+import org.apache.hadoop.fs.QuotaUsage;
 import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.fs.UnsupportedFileSystemException;
 import org.apache.hadoop.fs.XAttrSetFlag;
@@ -56,7 +59,6 @@ import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.AclUtil;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.fs.QuotaUsage;
 import org.apache.hadoop.fs.viewfs.InodeTree.INode;
 import org.apache.hadoop.fs.viewfs.InodeTree.INodeLink;
 import org.apache.hadoop.security.AccessControlException;
@@ -760,6 +762,43 @@ public class ViewFileSystem extends FileSystem {
     res.targetFileSystem.deleteSnapshot(res.remainingPath, snapshotName);
   }
 
+  @Override
+  public void setStoragePolicy(Path src, String policyName) throws IOException {
+    InodeTree.ResolveResult<FileSystem> res = fsState.resolve(getUriPath(src),
+        true);
+    res.targetFileSystem.setStoragePolicy(res.remainingPath, policyName);
+  }
+
+  @Override
+  public void unsetStoragePolicy(Path src) throws IOException {
+    InodeTree.ResolveResult<FileSystem> res = fsState.resolve(getUriPath(src),
+        true);
+    res.targetFileSystem.unsetStoragePolicy(res.remainingPath);
+  }
+
+  @Override
+  public BlockStoragePolicySpi getStoragePolicy(Path src) throws IOException {
+    InodeTree.ResolveResult<FileSystem> res = fsState.resolve(getUriPath(src),
+        true);
+    return res.targetFileSystem.getStoragePolicy(res.remainingPath);
+  }
+
+  @Override
+  public Collection<? extends BlockStoragePolicySpi> getAllStoragePolicies()
+      throws IOException {
+    Collection<BlockStoragePolicySpi> allPolicies = new HashSet<>();
+    for (FileSystem fs : getChildFileSystems()) {
+      try {
+        Collection<? extends BlockStoragePolicySpi> policies =
+            fs.getAllStoragePolicies();
+        allPolicies.addAll(policies);
+      } catch (UnsupportedOperationException e) {
+        // ignored
+      }
+    }
+    return allPolicies;
+  }
+
   /*
    * An instance of this class represents an internal dir of the viewFs 
    * that is internal dir of the mount table.
@@ -862,7 +901,7 @@ public class ViewFileSystem extends FileSystem {
     public FileStatus getFileStatus(Path f) throws IOException {
       checkPathIsSlash(f);
       return new FileStatus(0, true, 0, 0, creationTime, creationTime,
-          PERMISSION_555, ugi.getUserName(), ugi.getGroupNames()[0],
+          PERMISSION_555, ugi.getUserName(), ugi.getPrimaryGroupName(),
 
           new Path(theInternalDir.fullPath).makeQualified(
               myUri, ROOT_PATH));
@@ -883,7 +922,7 @@ public class ViewFileSystem extends FileSystem {
 
           result[i++] = new FileStatus(0, false, 0, 0,
             creationTime, creationTime, PERMISSION_555,
-            ugi.getUserName(), ugi.getGroupNames()[0],
+            ugi.getUserName(), ugi.getPrimaryGroupName(),
             link.getTargetLink(),
             new Path(inode.fullPath).makeQualified(
                 myUri, null));
@@ -1015,7 +1054,7 @@ public class ViewFileSystem extends FileSystem {
     public AclStatus getAclStatus(Path path) throws IOException {
       checkPathIsSlash(path);
       return new AclStatus.Builder().owner(ugi.getUserName())
-          .group(ugi.getGroupNames()[0])
+          .group(ugi.getPrimaryGroupName())
           .addEntries(AclUtil.getMinimalAcl(PERMISSION_555))
           .stickyBit(false).build();
     }
@@ -1079,5 +1118,39 @@ public class ViewFileSystem extends FileSystem {
     public QuotaUsage getQuotaUsage(Path f) throws IOException {
       throw new NotInMountpointException(f, "getQuotaUsage");
     }
+
+    @Override
+    public void setStoragePolicy(Path src, String policyName)
+        throws IOException {
+      checkPathIsSlash(src);
+      throw readOnlyMountTable("setStoragePolicy", src);
+    }
+
+    @Override
+    public void unsetStoragePolicy(Path src) throws IOException {
+      checkPathIsSlash(src);
+      throw readOnlyMountTable("unsetStoragePolicy", src);
+    }
+
+    @Override
+    public BlockStoragePolicySpi getStoragePolicy(Path src) throws IOException {
+      throw new NotInMountpointException(src, "getStoragePolicy");
+    }
+
+    @Override
+    public Collection<? extends BlockStoragePolicySpi> getAllStoragePolicies()
+        throws IOException {
+      Collection<BlockStoragePolicySpi> allPolicies = new HashSet<>();
+      for (FileSystem fs : getChildFileSystems()) {
+        try {
+          Collection<? extends BlockStoragePolicySpi> policies =
+              fs.getAllStoragePolicies();
+          allPolicies.addAll(policies);
+        } catch (UnsupportedOperationException e) {
+          // ignored
+        }
+      }
+      return allPolicies;
+    }
   }
 }

+ 6 - 6
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java

@@ -843,14 +843,14 @@ public class ViewFs extends AbstractFileSystem {
     public FileStatus getFileStatus(final Path f) throws IOException {
       checkPathIsSlash(f);
       return new FileStatus(0, true, 0, 0, creationTime, creationTime,
-          PERMISSION_555, ugi.getUserName(), ugi.getGroupNames()[0],
+          PERMISSION_555, ugi.getUserName(), ugi.getPrimaryGroupName(),
           new Path(theInternalDir.fullPath).makeQualified(
               myUri, null));
     }
     
     @Override
     public FileStatus getFileLinkStatus(final Path f)
-        throws FileNotFoundException {
+        throws IOException {
       // look up i internalDirs children - ignore first Slash
       INode<AbstractFileSystem> inode =
         theInternalDir.children.get(f.toUri().toString().substring(1)); 
@@ -863,13 +863,13 @@ public class ViewFs extends AbstractFileSystem {
         INodeLink<AbstractFileSystem> inodelink = 
           (INodeLink<AbstractFileSystem>) inode;
         result = new FileStatus(0, false, 0, 0, creationTime, creationTime,
-            PERMISSION_555, ugi.getUserName(), ugi.getGroupNames()[0],
+            PERMISSION_555, ugi.getUserName(), ugi.getPrimaryGroupName(),
             inodelink.getTargetLink(),
             new Path(inode.fullPath).makeQualified(
                 myUri, null));
       } else {
         result = new FileStatus(0, true, 0, 0, creationTime, creationTime,
-          PERMISSION_555, ugi.getUserName(), ugi.getGroupNames()[0],
+          PERMISSION_555, ugi.getUserName(), ugi.getPrimaryGroupName(),
           new Path(inode.fullPath).makeQualified(
               myUri, null));
       }
@@ -908,7 +908,7 @@ public class ViewFs extends AbstractFileSystem {
 
           result[i++] = new FileStatus(0, false, 0, 0,
             creationTime, creationTime,
-            PERMISSION_555, ugi.getUserName(), ugi.getGroupNames()[0],
+            PERMISSION_555, ugi.getUserName(), ugi.getPrimaryGroupName(),
             link.getTargetLink(),
             new Path(inode.fullPath).makeQualified(
                 myUri, null));
@@ -1042,7 +1042,7 @@ public class ViewFs extends AbstractFileSystem {
     public AclStatus getAclStatus(Path path) throws IOException {
       checkPathIsSlash(path);
       return new AclStatus.Builder().owner(ugi.getUserName())
-          .group(ugi.getGroupNames()[0])
+          .group(ugi.getPrimaryGroupName())
           .addEntries(AclUtil.getMinimalAcl(PERMISSION_555))
           .stickyBit(false).build();
     }

+ 15 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java

@@ -667,13 +667,13 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
 
   /**
    * Get a new zookeeper client instance. protected so that test class can
-   * inherit and pass in a mock object for zookeeper
+   * inherit and mock out the zookeeper instance
    * 
    * @return new zookeeper client instance
    * @throws IOException
    * @throws KeeperException zookeeper connectionloss exception
    */
-  protected synchronized ZooKeeper getNewZooKeeper() throws IOException,
+  protected synchronized ZooKeeper connectToZooKeeper() throws IOException,
       KeeperException {
     
     // Unfortunately, the ZooKeeper constructor connects to ZooKeeper and
@@ -682,7 +682,7 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
     // we construct the watcher first, and have it block any events it receives
     // before we can set its ZooKeeper reference.
     watcher = new WatcherWithClientRef();
-    ZooKeeper zk = new ZooKeeper(zkHostPort, zkSessionTimeout, watcher);
+    ZooKeeper zk = createZooKeeper();
     watcher.setZooKeeperRef(zk);
 
     // Wait for the asynchronous success/failure. This may throw an exception
@@ -695,6 +695,17 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
     return zk;
   }
 
+  /**
+   * Get a new zookeeper client instance. protected so that test class can
+   * inherit and pass in a mock object for zookeeper
+   *
+   * @return new zookeeper client instance
+   * @throws IOException
+   */
+  protected ZooKeeper createZooKeeper() throws IOException {
+    return new ZooKeeper(zkHostPort, zkSessionTimeout, watcher);
+  }
+
   private void fatalError(String errorMessage) {
     LOG.fatal(errorMessage);
     reset();
@@ -830,7 +841,7 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
       zkClient = null;
       watcher = null;
     }
-    zkClient = getNewZooKeeper();
+    zkClient = connectToZooKeeper();
     if (LOG.isDebugEnabled()) {
       LOG.debug("Created new connection for " + this);
     }

+ 2 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/StreamPumper.java

@@ -21,8 +21,8 @@ import java.io.BufferedReader;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.InputStreamReader;
+import java.nio.charset.StandardCharsets;
 
-import org.apache.commons.io.Charsets;
 import org.apache.commons.logging.Log;
 
 /**
@@ -78,7 +78,7 @@ class StreamPumper {
 
   protected void pump() throws IOException {
     InputStreamReader inputStreamReader = new InputStreamReader(
-        stream, Charsets.UTF_8);
+        stream, StandardCharsets.UTF_8);
     BufferedReader br = new BufferedReader(inputStreamReader);
     String line = null;
     while ((line = br.readLine()) != null) {

+ 28 - 14
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HtmlQuoting.java

@@ -17,21 +17,25 @@
  */
 package org.apache.hadoop.http;
 
-import org.apache.commons.io.Charsets;
-
 import java.io.ByteArrayOutputStream;
 import java.io.IOException;
 import java.io.OutputStream;
+import java.nio.charset.StandardCharsets;
 
 /**
  * This class is responsible for quoting HTML characters.
  */
 public class HtmlQuoting {
-  private static final byte[] ampBytes = "&amp;".getBytes(Charsets.UTF_8);
-  private static final byte[] aposBytes = "&apos;".getBytes(Charsets.UTF_8);
-  private static final byte[] gtBytes = "&gt;".getBytes(Charsets.UTF_8);
-  private static final byte[] ltBytes = "&lt;".getBytes(Charsets.UTF_8);
-  private static final byte[] quotBytes = "&quot;".getBytes(Charsets.UTF_8);
+  private static final byte[] AMP_BYTES =
+      "&amp;".getBytes(StandardCharsets.UTF_8);
+  private static final byte[] APOS_BYTES =
+      "&apos;".getBytes(StandardCharsets.UTF_8);
+  private static final byte[] GT_BYTES =
+      "&gt;".getBytes(StandardCharsets.UTF_8);
+  private static final byte[] LT_BYTES =
+      "&lt;".getBytes(StandardCharsets.UTF_8);
+  private static final byte[] QUOT_BYTES =
+      "&quot;".getBytes(StandardCharsets.UTF_8);
 
   /**
    * Does the given string need to be quoted?
@@ -65,7 +69,7 @@ public class HtmlQuoting {
     if (str == null) {
       return false;
     }
-    byte[] bytes = str.getBytes(Charsets.UTF_8);
+    byte[] bytes = str.getBytes(StandardCharsets.UTF_8);
     return needsQuoting(bytes, 0 , bytes.length);
   }
 
@@ -81,11 +85,21 @@ public class HtmlQuoting {
                                     int off, int len) throws IOException {
     for(int i=off; i < off+len; i++) {
       switch (buffer[i]) {
-      case '&': output.write(ampBytes); break;
-      case '<': output.write(ltBytes); break;
-      case '>': output.write(gtBytes); break;
-      case '\'': output.write(aposBytes); break;
-      case '"': output.write(quotBytes); break;
+      case '&':
+        output.write(AMP_BYTES);
+        break;
+      case '<':
+        output.write(LT_BYTES);
+        break;
+      case '>':
+        output.write(GT_BYTES);
+        break;
+      case '\'':
+        output.write(APOS_BYTES);
+        break;
+      case '"':
+        output.write(QUOT_BYTES);
+        break;
       default: output.write(buffer, i, 1);
       }
     }
@@ -100,7 +114,7 @@ public class HtmlQuoting {
     if (item == null) {
       return null;
     }
-    byte[] bytes = item.getBytes(Charsets.UTF_8);
+    byte[] bytes = item.getBytes(StandardCharsets.UTF_8);
     if (needsQuoting(bytes, 0, bytes.length)) {
       ByteArrayOutputStream buffer = new ByteArrayOutputStream();
       try {

+ 106 - 37
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java

@@ -56,7 +56,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.security.AuthenticationFilterInitializer;
 import org.apache.hadoop.security.authentication.util.SignerSecretProvider;
-import org.apache.hadoop.security.ssl.SslSocketConnectorSecure;
+import org.apache.hadoop.security.ssl.SslSelectChannelConnectorSecure;
 import org.apache.hadoop.jmx.JMXJsonServlet;
 import org.apache.hadoop.log.LogLevel;
 import org.apache.hadoop.security.SecurityUtil;
@@ -77,7 +77,7 @@ import org.mortbay.jetty.handler.ContextHandlerCollection;
 import org.mortbay.jetty.handler.HandlerCollection;
 import org.mortbay.jetty.handler.RequestLogHandler;
 import org.mortbay.jetty.nio.SelectChannelConnector;
-import org.mortbay.jetty.security.SslSocketConnector;
+import org.mortbay.jetty.security.SslSelectChannelConnector;
 import org.mortbay.jetty.servlet.AbstractSessionManager;
 import org.mortbay.jetty.servlet.Context;
 import org.mortbay.jetty.servlet.DefaultServlet;
@@ -136,6 +136,11 @@ public final class HttpServer2 implements FilterContainer {
   static final String STATE_DESCRIPTION_ALIVE = " - alive";
   static final String STATE_DESCRIPTION_NOT_LIVE = " - not live";
   private final SignerSecretProvider secretProvider;
+  private XFrameOption xFrameOption;
+  private boolean xFrameOptionIsEnabled;
+  private static final String X_FRAME_VALUE = "xFrameOption";
+  private static final String X_FRAME_ENABLED = "X_FRAME_ENABLED";
+
 
   /**
    * Class to construct instances of HTTP server with specific options.
@@ -168,6 +173,9 @@ public final class HttpServer2 implements FilterContainer {
     private String authFilterConfigurationPrefix = "hadoop.http.authentication.";
     private String excludeCiphers;
 
+    private boolean xFrameEnabled;
+    private XFrameOption xFrameOption = XFrameOption.SAMEORIGIN;
+
     public Builder setName(String name){
       this.name = name;
       return this;
@@ -276,6 +284,30 @@ public final class HttpServer2 implements FilterContainer {
       return this;
     }
 
+    /**
+     * Adds the ability to control X_FRAME_OPTIONS on HttpServer2.
+     * @param xFrameEnabled - True enables X_FRAME_OPTIONS false disables it.
+     * @return Builder.
+     */
+    public Builder configureXFrame(boolean xFrameEnabled) {
+      this.xFrameEnabled = xFrameEnabled;
+      return this;
+    }
+
+    /**
+     * Sets a valid X-Frame-option that can be used by HttpServer2.
+     * @param option - String DENY, SAMEORIGIN or ALLOW-FROM are the only valid
+     *               options. Any other value will throw IllegalArgument
+     *               Exception.
+     * @return  Builder.
+     */
+    public Builder setXFrameOption(String option) {
+      this.xFrameOption = XFrameOption.getEnum(option);
+      return this;
+    }
+
+
+
     public HttpServer2 build() throws IOException {
       Preconditions.checkNotNull(name, "name is not set");
       Preconditions.checkState(!endpoints.isEmpty(), "No endpoints specified");
@@ -300,29 +332,7 @@ public final class HttpServer2 implements FilterContainer {
         if ("http".equals(scheme)) {
           listener = HttpServer2.createDefaultChannelConnector();
         } else if ("https".equals(scheme)) {
-          SslSocketConnector c = new SslSocketConnectorSecure();
-          c.setHeaderBufferSize(1024*64);
-          c.setNeedClientAuth(needsClientAuth);
-          c.setKeyPassword(keyPassword);
-
-          if (keyStore != null) {
-            c.setKeystore(keyStore);
-            c.setKeystoreType(keyStoreType);
-            c.setPassword(keyStorePassword);
-          }
-
-          if (trustStore != null) {
-            c.setTruststore(trustStore);
-            c.setTruststoreType(trustStoreType);
-            c.setTrustPassword(trustStorePassword);
-          }
-
-          if(null != excludeCiphers && !excludeCiphers.isEmpty()) {
-            c.setExcludeCipherSuites(excludeCiphers.split(","));
-            LOG.info("Excluded Cipher List:" + excludeCiphers);
-          }
-
-          listener = c;
+          listener = createHttpsChannelConnector();
 
         } else {
           throw new HadoopIllegalArgumentException(
@@ -335,6 +345,32 @@ public final class HttpServer2 implements FilterContainer {
       server.loadListeners();
       return server;
     }
+
+    private Connector createHttpsChannelConnector() {
+      SslSelectChannelConnector c = new SslSelectChannelConnectorSecure();
+      configureChannelConnector(c);
+
+      c.setNeedClientAuth(needsClientAuth);
+      c.setKeyPassword(keyPassword);
+
+      if (keyStore != null) {
+        c.setKeystore(keyStore);
+        c.setKeystoreType(keyStoreType);
+        c.setPassword(keyStorePassword);
+      }
+
+      if (trustStore != null) {
+        c.setTruststore(trustStore);
+        c.setTruststoreType(trustStoreType);
+        c.setTrustPassword(trustStorePassword);
+      }
+
+      if(null != excludeCiphers && !excludeCiphers.isEmpty()) {
+        c.setExcludeCipherSuites(excludeCiphers.split(","));
+        LOG.info("Excluded Cipher List:" + excludeCiphers);
+      }
+      return c;
+    }
   }
 
   private HttpServer2(final Builder b) throws IOException {
@@ -342,6 +378,9 @@ public final class HttpServer2 implements FilterContainer {
     this.webServer = new Server();
     this.adminsAcl = b.adminsAcl;
     this.webAppContext = createWebAppContext(b.name, b.conf, adminsAcl, appDir);
+    this.xFrameOptionIsEnabled = b.xFrameEnabled;
+    this.xFrameOption = b.xFrameOption;
+
     try {
       this.secretProvider =
           constructSecretProvider(b, webAppContext.getServletContext());
@@ -398,7 +437,11 @@ public final class HttpServer2 implements FilterContainer {
 
     addDefaultApps(contexts, appDir, conf);
 
-    addGlobalFilter("safety", QuotingInputFilter.class.getName(), null);
+    Map<String, String> xFrameParams = new HashMap<>();
+    xFrameParams.put(X_FRAME_ENABLED,
+        String.valueOf(this.xFrameOptionIsEnabled));
+    xFrameParams.put(X_FRAME_VALUE,  this.xFrameOption.toString());
+    addGlobalFilter("safety", QuotingInputFilter.class.getName(), xFrameParams);
     final FilterInitializer[] initializers = getFilterInitializers(conf);
     if (initializers != null) {
       conf = new Configuration(conf);
@@ -469,21 +512,25 @@ public final class HttpServer2 implements FilterContainer {
                  Collections.<String, String> emptyMap(), new String[] { "/*" });
   }
 
-  @InterfaceAudience.Private
-  public static Connector createDefaultChannelConnector() {
-    SelectChannelConnector ret = new SelectChannelConnector();
-    ret.setLowResourceMaxIdleTime(10000);
-    ret.setAcceptQueueSize(128);
-    ret.setResolveNames(false);
-    ret.setUseDirectBuffers(false);
+  private static void configureChannelConnector(SelectChannelConnector c) {
+    c.setLowResourceMaxIdleTime(10000);
+    c.setAcceptQueueSize(128);
+    c.setResolveNames(false);
+    c.setUseDirectBuffers(false);
     if(Shell.WINDOWS) {
       // result of setting the SO_REUSEADDR flag is different on Windows
       // http://msdn.microsoft.com/en-us/library/ms740621(v=vs.85).aspx
       // without this 2 NN's can start on the same machine and listen on
       // the same port with indeterminate routing of incoming requests to them
-      ret.setReuseAddress(false);
+      c.setReuseAddress(false);
     }
-    ret.setHeaderBufferSize(1024*64);
+    c.setHeaderBufferSize(1024*64);
+  }
+
+  @InterfaceAudience.Private
+  public static Connector createDefaultChannelConnector() {
+    SelectChannelConnector ret = new SelectChannelConnector();
+    configureChannelConnector(ret);
     return ret;
   }
 
@@ -1119,7 +1166,7 @@ public final class HttpServer2 implements FilterContainer {
    * sets X-FRAME-OPTIONS in the header to mitigate clickjacking attacks.
    */
   public static class QuotingInputFilter implements Filter {
-    private static final XFrameOption X_FRAME_OPTION = XFrameOption.SAMEORIGIN;
+
     private FilterConfig config;
 
     public static class RequestQuoter extends HttpServletRequestWrapper {
@@ -1239,7 +1286,11 @@ public final class HttpServer2 implements FilterContainer {
       } else if (mime.startsWith("application/xml")) {
         httpResponse.setContentType("text/xml; charset=utf-8");
       }
-      httpResponse.addHeader("X-FRAME-OPTIONS", X_FRAME_OPTION.toString());
+
+      if(Boolean.valueOf(this.config.getInitParameter(X_FRAME_ENABLED))) {
+        httpResponse.addHeader("X-FRAME-OPTIONS",
+            this.config.getInitParameter(X_FRAME_VALUE));
+      }
       chain.doFilter(quoted, httpResponse);
     }
 
@@ -1274,5 +1325,23 @@ public final class HttpServer2 implements FilterContainer {
     public String toString() {
       return this.name;
     }
+
+    /**
+     * We cannot use valueOf since the AllowFrom enum differs from its value
+     * Allow-From. This is a helper method that does exactly what valueof does,
+     * but allows us to handle the AllowFrom issue gracefully.
+     *
+     * @param value - String must be DENY, SAMEORIGIN or ALLOW-FROM.
+     * @return XFrameOption or throws IllegalException.
+     */
+    private static XFrameOption getEnum(String value) {
+      Preconditions.checkState(value != null && !value.isEmpty());
+      for (XFrameOption xoption : values()) {
+        if (value.equals(xoption.toString())) {
+          return xoption;
+        }
+      }
+      throw new IllegalArgumentException("Unexpected value in xFrameOption.");
+    }
   }
 }

+ 2 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/DefaultStringifier.java

@@ -19,11 +19,11 @@
 package org.apache.hadoop.io;
 
 import java.io.IOException;
+import java.nio.charset.StandardCharsets;
 import java.nio.charset.UnsupportedCharsetException;
 import java.util.ArrayList;
 
 import org.apache.commons.codec.binary.Base64;
-import org.apache.commons.io.Charsets;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -91,7 +91,7 @@ public class DefaultStringifier<T> implements Stringifier<T> {
     serializer.serialize(obj);
     byte[] buf = new byte[outBuf.getLength()];
     System.arraycopy(outBuf.getData(), 0, buf, 0, buf.length);
-    return new String(Base64.encodeBase64(buf), Charsets.UTF_8);
+    return new String(Base64.encodeBase64(buf), StandardCharsets.UTF_8);
   }
 
   @Override

+ 1 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ElasticByteBufferPool.java

@@ -101,6 +101,7 @@ public final class ElasticByteBufferPool implements ByteBufferPool {
 
   @Override
   public synchronized void putBuffer(ByteBuffer buffer) {
+    buffer.clear();
     TreeMap<Key, ByteBuffer> tree = getBufferTree(buffer.isDirect());
     while (true) {
       Key key = new Key(buffer.capacity(), System.nanoTime());

+ 1 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SecureIOUtils.java

@@ -22,7 +22,6 @@ import java.io.FileInputStream;
 import java.io.FileOutputStream;
 import java.io.IOException;
 import java.io.RandomAccessFile;
-import java.util.Arrays;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
@@ -273,7 +272,7 @@ public class SecureIOUtils {
             UserGroupInformation.createRemoteUser(expectedOwner);
         final String adminsGroupString = "Administrators";
         success = owner.equals(adminsGroupString)
-            && Arrays.asList(ugi.getGroupNames()).contains(adminsGroupString);
+            && ugi.getGroups().contains(adminsGroupString);
       } else {
         success = false;
       }

+ 2 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java

@@ -19,11 +19,11 @@
 package org.apache.hadoop.io;
 
 import java.io.*;
+import java.nio.charset.StandardCharsets;
 import java.util.*;
 import java.rmi.server.UID;
 import java.security.MessageDigest;
 
-import org.apache.commons.io.Charsets;
 import org.apache.commons.logging.*;
 import org.apache.hadoop.util.Options;
 import org.apache.hadoop.fs.*;
@@ -853,7 +853,7 @@ public class SequenceFile {
       try {                                       
         MessageDigest digester = MessageDigest.getInstance("MD5");
         long time = Time.now();
-        digester.update((new UID()+"@"+time).getBytes(Charsets.UTF_8));
+        digester.update((new UID()+"@"+time).getBytes(StandardCharsets.UTF_8));
         sync = digester.digest();
       } catch (Exception e) {
         throw new RuntimeException(e);

+ 3 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BZip2Codec.java

@@ -22,8 +22,8 @@ import java.io.BufferedInputStream;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.OutputStream;
+import java.nio.charset.StandardCharsets;
 
-import org.apache.commons.io.Charsets;
 import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.conf.Configuration;
 
@@ -287,7 +287,7 @@ public class BZip2Codec implements Configurable, SplittableCompressionCodec {
         // The compressed bzip2 stream should start with the
         // identifying characters BZ. Caller of CBZip2OutputStream
         // i.e. this class must write these characters.
-        out.write(HEADER.getBytes(Charsets.UTF_8));
+        out.write(HEADER.getBytes(StandardCharsets.UTF_8));
       }
     }
 
@@ -421,7 +421,7 @@ public class BZip2Codec implements Configurable, SplittableCompressionCodec {
         byte[] headerBytes = new byte[HEADER_LEN];
         int actualRead = bufferedIn.read(headerBytes, 0, HEADER_LEN);
         if (actualRead != -1) {
-          String header = new String(headerBytes, Charsets.UTF_8);
+          String header = new String(headerBytes, StandardCharsets.UTF_8);
           if (header.compareTo(HEADER) != 0) {
             bufferedIn.reset();
           } else {

+ 45 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/codec/HHXORErasureCodec.java

@@ -0,0 +1,45 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.io.erasurecode.codec;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.io.erasurecode.ECSchema;
+import org.apache.hadoop.io.erasurecode.coder.ErasureCoder;
+import org.apache.hadoop.io.erasurecode.coder.HHXORErasureDecoder;
+import org.apache.hadoop.io.erasurecode.coder.HHXORErasureEncoder;
+
+/**
+ * A Hitchhiker-XOR erasure codec.
+ */
+@InterfaceAudience.Private
+public class HHXORErasureCodec extends AbstractErasureCodec {
+
+  public HHXORErasureCodec(ECSchema schema) {
+    super(schema);
+  }
+
+  @Override
+  public ErasureCoder createEncoder() {
+    return new HHXORErasureEncoder(getSchema());
+  }
+
+  @Override
+  public ErasureCoder createDecoder() {
+    return new HHXORErasureDecoder(getSchema());
+  }
+}

+ 84 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractNativeRawDecoder.java

@@ -0,0 +1,84 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.io.erasurecode.rawcoder;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.io.erasurecode.ErasureCoderOptions;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.nio.ByteBuffer;
+
+/**
+ * Abstract native raw decoder for all native coders to extend with.
+ */
+@InterfaceAudience.Private
+abstract class AbstractNativeRawDecoder extends RawErasureDecoder {
+  public static Logger LOG =
+      LoggerFactory.getLogger(AbstractNativeRawDecoder.class);
+
+  public AbstractNativeRawDecoder(ErasureCoderOptions coderOptions) {
+    super(coderOptions);
+  }
+
+  @Override
+  protected void doDecode(ByteBufferDecodingState decodingState) {
+    int[] inputOffsets = new int[decodingState.inputs.length];
+    int[] outputOffsets = new int[decodingState.outputs.length];
+
+    ByteBuffer buffer;
+    for (int i = 0; i < decodingState.inputs.length; ++i) {
+      buffer = decodingState.inputs[i];
+      if (buffer != null) {
+        inputOffsets[i] = buffer.position();
+      }
+    }
+
+    for (int i = 0; i < decodingState.outputs.length; ++i) {
+      buffer = decodingState.outputs[i];
+      outputOffsets[i] = buffer.position();
+    }
+
+    performDecodeImpl(decodingState.inputs, inputOffsets,
+        decodingState.decodeLength, decodingState.erasedIndexes,
+        decodingState.outputs, outputOffsets);
+  }
+
+  protected abstract void performDecodeImpl(ByteBuffer[] inputs,
+                                            int[] inputOffsets, int dataLen,
+                                            int[] erased, ByteBuffer[] outputs,
+                                            int[] outputOffsets);
+
+  @Override
+  protected void doDecode(ByteArrayDecodingState decodingState) {
+    LOG.warn("convertToByteBufferState is invoked, " +
+        "not efficiently. Please use direct ByteBuffer inputs/outputs");
+
+    ByteBufferDecodingState bbdState = decodingState.convertToByteBufferState();
+    doDecode(bbdState);
+
+    for (int i = 0; i < decodingState.outputs.length; i++) {
+      bbdState.outputs[i].get(decodingState.outputs[i],
+          decodingState.outputOffsets[i], decodingState.decodeLength);
+    }
+  }
+
+  // To link with the underlying data structure in the native layer.
+  // No get/set as only used by native codes.
+  private long nativeCoder;
+}

+ 81 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractNativeRawEncoder.java

@@ -0,0 +1,81 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.io.erasurecode.rawcoder;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.io.erasurecode.ErasureCoderOptions;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.nio.ByteBuffer;
+
+/**
+ * Abstract native raw encoder for all native coders to extend with.
+ */
+@InterfaceAudience.Private
+abstract class AbstractNativeRawEncoder extends RawErasureEncoder {
+  public static Logger LOG =
+      LoggerFactory.getLogger(AbstractNativeRawEncoder.class);
+
+  public AbstractNativeRawEncoder(ErasureCoderOptions coderOptions) {
+    super(coderOptions);
+  }
+
+  @Override
+  protected void doEncode(ByteBufferEncodingState encodingState) {
+    int[] inputOffsets = new int[encodingState.inputs.length];
+    int[] outputOffsets = new int[encodingState.outputs.length];
+    int dataLen = encodingState.inputs[0].remaining();
+
+    ByteBuffer buffer;
+    for (int i = 0; i < encodingState.inputs.length; ++i) {
+      buffer = encodingState.inputs[i];
+      inputOffsets[i] = buffer.position();
+    }
+
+    for (int i = 0; i < encodingState.outputs.length; ++i) {
+      buffer = encodingState.outputs[i];
+      outputOffsets[i] = buffer.position();
+    }
+
+    performEncodeImpl(encodingState.inputs, inputOffsets, dataLen,
+        encodingState.outputs, outputOffsets);
+  }
+
+  protected abstract void performEncodeImpl(
+          ByteBuffer[] inputs, int[] inputOffsets,
+          int dataLen, ByteBuffer[] outputs, int[] outputOffsets);
+
+  @Override
+  protected void doEncode(ByteArrayEncodingState encodingState) {
+    LOG.warn("convertToByteBufferState is invoked, " +
+        "not efficiently. Please use direct ByteBuffer inputs/outputs");
+
+    ByteBufferEncodingState bbeState = encodingState.convertToByteBufferState();
+    doEncode(bbeState);
+
+    for (int i = 0; i < encodingState.outputs.length; i++) {
+      bbeState.outputs[i].get(encodingState.outputs[i],
+          encodingState.outputOffsets[i], encodingState.encodeLength);
+    }
+  }
+
+  // To link with the underlying data structure in the native layer.
+  // No get/set as only used by native codes.
+  private long nativeCoder;
+}

+ 23 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/ByteArrayDecodingState.java

@@ -20,6 +20,8 @@ package org.apache.hadoop.io.erasurecode.rawcoder;
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 
+import java.nio.ByteBuffer;
+
 /**
  * A utility class that maintains decoding state during a decode call using
  * byte array inputs.
@@ -65,6 +67,27 @@ class ByteArrayDecodingState extends DecodingState {
     this.outputOffsets = outputOffsets;
   }
 
+  /**
+   * Convert to a ByteBufferDecodingState when it's backed by on-heap arrays.
+   */
+  ByteBufferDecodingState convertToByteBufferState() {
+    ByteBuffer[] newInputs = new ByteBuffer[inputs.length];
+    ByteBuffer[] newOutputs = new ByteBuffer[outputs.length];
+
+    for (int i = 0; i < inputs.length; i++) {
+      newInputs[i] = CoderUtil.cloneAsDirectByteBuffer(inputs[i],
+          inputOffsets[i], decodeLength);
+    }
+
+    for (int i = 0; i < outputs.length; i++) {
+      newOutputs[i] = ByteBuffer.allocateDirect(decodeLength);
+    }
+
+    ByteBufferDecodingState bbdState = new ByteBufferDecodingState(decoder,
+        decodeLength, erasedIndexes, newInputs, newOutputs);
+    return bbdState;
+  }
+
   /**
    * Check and ensure the buffers are of the desired length.
    * @param buffers the buffers to check

+ 23 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/ByteArrayEncodingState.java

@@ -20,6 +20,8 @@ package org.apache.hadoop.io.erasurecode.rawcoder;
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 
+import java.nio.ByteBuffer;
+
 /**
  * A utility class that maintains encoding state during an encode call using
  * byte array inputs.
@@ -61,6 +63,27 @@ class ByteArrayEncodingState extends EncodingState {
     this.outputOffsets = outputOffsets;
   }
 
+  /**
+   * Convert to a ByteBufferEncodingState when it's backed by on-heap arrays.
+   */
+  ByteBufferEncodingState convertToByteBufferState() {
+    ByteBuffer[] newInputs = new ByteBuffer[inputs.length];
+    ByteBuffer[] newOutputs = new ByteBuffer[outputs.length];
+
+    for (int i = 0; i < inputs.length; i++) {
+      newInputs[i] = CoderUtil.cloneAsDirectByteBuffer(inputs[i],
+          inputOffsets[i], encodeLength);
+    }
+
+    for (int i = 0; i < outputs.length; i++) {
+      newOutputs[i] = ByteBuffer.allocateDirect(encodeLength);
+    }
+
+    ByteBufferEncodingState bbeState = new ByteBufferEncodingState(encoder,
+        encodeLength, newInputs, newOutputs);
+    return bbeState;
+  }
+
   /**
    * Check and ensure the buffers are of the desired length.
    * @param buffers the buffers to check

+ 13 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/ByteBufferDecodingState.java

@@ -48,8 +48,20 @@ class ByteBufferDecodingState extends DecodingState {
     checkOutputBuffers(outputs);
   }
 
+  ByteBufferDecodingState(RawErasureDecoder decoder,
+                         int decodeLength,
+                         int[] erasedIndexes,
+                         ByteBuffer[] inputs,
+                          ByteBuffer[] outputs) {
+    this.decoder = decoder;
+    this.decodeLength = decodeLength;
+    this.erasedIndexes = erasedIndexes;
+    this.inputs = inputs;
+    this.outputs = outputs;
+  }
+
   /**
-   * Convert to a ByteArrayEncodingState when it's backed by on-heap arrays.
+   * Convert to a ByteArrayDecodingState when it's backed by on-heap arrays.
    */
   ByteArrayDecodingState convertToByteArrayState() {
     int[] inputOffsets = new int[inputs.length];

+ 10 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/ByteBufferEncodingState.java

@@ -46,6 +46,16 @@ class ByteBufferEncodingState extends EncodingState {
     checkBuffers(outputs);
   }
 
+  ByteBufferEncodingState(RawErasureEncoder encoder,
+                          int encodeLength,
+                          ByteBuffer[] inputs,
+                          ByteBuffer[] outputs) {
+    this.encoder = encoder;
+    this.encodeLength = encodeLength;
+    this.inputs = inputs;
+    this.outputs = outputs;
+  }
+
   /**
    * Convert to a ByteArrayEncodingState when it's backed by on-heap arrays.
    */

+ 0 - 12
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/CoderUtil.java

@@ -83,8 +83,6 @@ final class CoderUtil {
 
   /**
    * Initialize the output buffers with ZERO bytes.
-   * @param buffers
-   * @param dataLen
    */
   static void resetOutputBuffers(ByteBuffer[] buffers, int dataLen) {
     for (ByteBuffer buffer : buffers) {
@@ -94,8 +92,6 @@ final class CoderUtil {
 
   /**
    * Initialize the output buffers with ZERO bytes.
-   * @param buffers
-   * @param dataLen
    */
   static void resetOutputBuffers(byte[][] buffers, int[] offsets,
                                  int dataLen) {
@@ -127,10 +123,6 @@ final class CoderUtil {
 
   /**
    * Clone an input bytes array as direct ByteBuffer.
-   * @param input
-   * @param len
-   * @param offset
-   * @return direct ByteBuffer
    */
   static ByteBuffer cloneAsDirectByteBuffer(byte[] input, int offset, int len) {
     if (input == null) { // an input can be null, if erased or not to read
@@ -166,10 +158,6 @@ final class CoderUtil {
    * @return the first valid input
    */
   static <T> T findFirstValidInput(T[] inputs) {
-    if (inputs.length > 0 && inputs[0] != null) {
-      return inputs[0];
-    }
-
     for (T input : inputs) {
       if (input != null) {
         return input;

+ 66 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/NativeRSRawDecoder.java

@@ -0,0 +1,66 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.io.erasurecode.rawcoder;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.io.erasurecode.ErasureCodeNative;
+import org.apache.hadoop.io.erasurecode.ErasureCoderOptions;
+
+import java.nio.ByteBuffer;
+
+/**
+ * A Reed-Solomon raw decoder using Intel ISA-L library.
+ */
+@InterfaceAudience.Private
+public class NativeRSRawDecoder extends AbstractNativeRawDecoder {
+
+  static {
+    ErasureCodeNative.checkNativeCodeLoaded();
+  }
+
+  public NativeRSRawDecoder(ErasureCoderOptions coderOptions) {
+    super(coderOptions);
+    initImpl(coderOptions.getNumDataUnits(), coderOptions.getNumParityUnits());
+  }
+
+  @Override
+  protected void performDecodeImpl(ByteBuffer[] inputs, int[] inputOffsets,
+                                   int dataLen, int[] erased,
+                                   ByteBuffer[] outputs, int[] outputOffsets) {
+    decodeImpl(inputs, inputOffsets, dataLen, erased, outputs, outputOffsets);
+  }
+
+  @Override
+  public void release() {
+    destroyImpl();
+  }
+
+  @Override
+  public boolean preferDirectBuffer() {
+    return true;
+  }
+
+  private native void initImpl(int numDataUnits, int numParityUnits);
+
+  private native void decodeImpl(
+          ByteBuffer[] inputs, int[] inputOffsets, int dataLen, int[] erased,
+          ByteBuffer[] outputs, int[] outputOffsets);
+
+  private native void destroyImpl();
+
+}

+ 65 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/NativeRSRawEncoder.java

@@ -0,0 +1,65 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.io.erasurecode.rawcoder;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.io.erasurecode.ErasureCodeNative;
+import org.apache.hadoop.io.erasurecode.ErasureCoderOptions;
+
+import java.nio.ByteBuffer;
+
+/**
+ * A Reed-Solomon raw encoder using Intel ISA-L library.
+ */
+@InterfaceAudience.Private
+public class NativeRSRawEncoder extends AbstractNativeRawEncoder {
+
+  static {
+    ErasureCodeNative.checkNativeCodeLoaded();
+  }
+
+  public NativeRSRawEncoder(ErasureCoderOptions coderOptions) {
+    super(coderOptions);
+    initImpl(coderOptions.getNumDataUnits(), coderOptions.getNumParityUnits());
+  }
+
+  @Override
+  protected void performEncodeImpl(
+          ByteBuffer[] inputs, int[] inputOffsets, int dataLen,
+          ByteBuffer[] outputs, int[] outputOffsets) {
+    encodeImpl(inputs, inputOffsets, dataLen, outputs, outputOffsets);
+  }
+
+  @Override
+  public void release() {
+    destroyImpl();
+  }
+
+  @Override
+  public boolean preferDirectBuffer() {
+    return true;
+  }
+
+  private native void initImpl(int numDataUnits, int numParityUnits);
+
+  private native void encodeImpl(ByteBuffer[] inputs, int[] inputOffsets,
+                                        int dataLen, ByteBuffer[] outputs,
+                                        int[] outputOffsets);
+
+  private native void destroyImpl();
+}

+ 39 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/NativeRSRawErasureCoderFactory.java

@@ -0,0 +1,39 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.io.erasurecode.rawcoder;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.io.erasurecode.ErasureCoderOptions;
+
+/**
+ * A raw coder factory for raw Reed-Solomon coder in native using Intel ISA-L.
+ */
+
+@InterfaceAudience.Private
+public class NativeRSRawErasureCoderFactory implements RawErasureCoderFactory {
+
+  @Override
+  public RawErasureEncoder createEncoder(ErasureCoderOptions coderOptions) {
+    return new NativeRSRawEncoder(coderOptions);
+  }
+
+  @Override
+  public RawErasureDecoder createDecoder(ErasureCoderOptions coderOptions) {
+    return new NativeRSRawDecoder(coderOptions);
+  }
+}

+ 17 - 39
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RSRawDecoderLegacy.java

@@ -39,28 +39,6 @@ public class RSRawDecoderLegacy extends RawErasureDecoder {
   private int[] errSignature;
   private int[] primitivePower;
 
-  /**
-   * We need a set of reusable buffers either for the bytes array
-   * decoding version or direct buffer decoding version. Normally not both.
-   *
-   * For output, in addition to the valid buffers from the caller
-   * passed from above, we need to provide extra buffers for the internal
-   * decoding implementation. For output, the caller should provide no more
-   * than numParityUnits but at least one buffers. And the left buffers will be
-   * borrowed from either bytesArrayBuffers, for the bytes array version.
-   *
-   */
-  // Reused buffers for decoding with bytes arrays
-  private byte[][] bytesArrayBuffers = new byte[getNumParityUnits()][];
-  private byte[][] adjustedByteArrayOutputsParameter =
-      new byte[getNumParityUnits()][];
-  private int[] adjustedOutputOffsets = new int[getNumParityUnits()];
-
-  // Reused buffers for decoding with direct ByteBuffers
-  private ByteBuffer[] directBuffers = new ByteBuffer[getNumParityUnits()];
-  private ByteBuffer[] adjustedDirectBufferOutputsParameter =
-      new ByteBuffer[getNumParityUnits()];
-
   public RSRawDecoderLegacy(ErasureCoderOptions coderOptions) {
     super(coderOptions);
     if (getNumAllUnits() >= RSUtil.GF.getFieldSize()) {
@@ -139,16 +117,14 @@ public class RSRawDecoderLegacy extends RawErasureDecoder {
      * implementations, so we have to adjust them before calling doDecodeImpl.
      */
 
+    byte[][] bytesArrayBuffers = new byte[getNumParityUnits()][];
+    byte[][] adjustedByteArrayOutputsParameter =
+        new byte[getNumParityUnits()][];
+    int[] adjustedOutputOffsets = new int[getNumParityUnits()];
+
     int[] erasedOrNotToReadIndexes =
         CoderUtil.getNullIndexes(decodingState.inputs);
 
-    // Prepare for adjustedOutputsParameter
-
-    // First reset the positions needed this time
-    for (int i = 0; i < erasedOrNotToReadIndexes.length; i++) {
-      adjustedByteArrayOutputsParameter[i] = null;
-      adjustedOutputOffsets[i] = 0;
-    }
     // Use the caller passed buffers in erasedIndexes positions
     for (int outputIdx = 0, i = 0;
          i < decodingState.erasedIndexes.length; i++) {
@@ -174,7 +150,8 @@ public class RSRawDecoderLegacy extends RawErasureDecoder {
     for (int bufferIdx = 0, i = 0; i < erasedOrNotToReadIndexes.length; i++) {
       if (adjustedByteArrayOutputsParameter[i] == null) {
         adjustedByteArrayOutputsParameter[i] = CoderUtil.resetBuffer(
-            checkGetBytesArrayBuffer(bufferIdx, dataLen), 0, dataLen);
+            checkGetBytesArrayBuffer(bytesArrayBuffers, bufferIdx, dataLen),
+            0, dataLen);
         adjustedOutputOffsets[i] = 0; // Always 0 for such temp output
         bufferIdx++;
       }
@@ -198,12 +175,10 @@ public class RSRawDecoderLegacy extends RawErasureDecoder {
     int[] erasedOrNotToReadIndexes =
         CoderUtil.getNullIndexes(decodingState.inputs);
 
-    // Prepare for adjustedDirectBufferOutputsParameter
+    ByteBuffer[] directBuffers = new ByteBuffer[getNumParityUnits()];
+    ByteBuffer[] adjustedDirectBufferOutputsParameter =
+        new ByteBuffer[getNumParityUnits()];
 
-    // First reset the positions needed this time
-    for (int i = 0; i < erasedOrNotToReadIndexes.length; i++) {
-      adjustedDirectBufferOutputsParameter[i] = null;
-    }
     // Use the caller passed buffers in erasedIndexes positions
     for (int outputIdx = 0, i = 0;
          i < decodingState.erasedIndexes.length; i++) {
@@ -225,7 +200,8 @@ public class RSRawDecoderLegacy extends RawErasureDecoder {
     // Use shared buffers for other positions (not set yet)
     for (int bufferIdx = 0, i = 0; i < erasedOrNotToReadIndexes.length; i++) {
       if (adjustedDirectBufferOutputsParameter[i] == null) {
-        ByteBuffer buffer = checkGetDirectBuffer(bufferIdx, dataLen);
+        ByteBuffer buffer = checkGetDirectBuffer(
+            directBuffers, bufferIdx, dataLen);
         buffer.position(0);
         buffer.limit(dataLen);
         adjustedDirectBufferOutputsParameter[i] =
@@ -274,15 +250,17 @@ public class RSRawDecoderLegacy extends RawErasureDecoder {
         numErasedParityUnits, numErasedDataUnits);
   }
 
-  private byte[] checkGetBytesArrayBuffer(int idx, int bufferLen) {
+  private static byte[] checkGetBytesArrayBuffer(byte[][] bytesArrayBuffers,
+      int idx, int bufferLen) {
     if (bytesArrayBuffers[idx] == null ||
-            bytesArrayBuffers[idx].length < bufferLen) {
+        bytesArrayBuffers[idx].length < bufferLen) {
       bytesArrayBuffers[idx] = new byte[bufferLen];
     }
     return bytesArrayBuffers[idx];
   }
 
-  private ByteBuffer checkGetDirectBuffer(int idx, int bufferLen) {
+  private static ByteBuffer checkGetDirectBuffer(ByteBuffer[] directBuffers,
+      int idx, int bufferLen) {
     if (directBuffers[idx] == null ||
         directBuffers[idx].capacity() < bufferLen) {
       directBuffers[idx] = ByteBuffer.allocateDirect(bufferLen);

+ 2 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RawErasureCoderFactory.java

@@ -30,14 +30,14 @@ public interface RawErasureCoderFactory {
 
   /**
    * Create raw erasure encoder.
-   * @param conf the configuration used to create the encoder
+   * @param coderOptions the options used to create the encoder
    * @return raw erasure encoder
    */
   RawErasureEncoder createEncoder(ErasureCoderOptions coderOptions);
 
   /**
    * Create raw erasure decoder.
-   * @param conf the configuration used to create the encoder
+   * @param coderOptions the options used to create the encoder
    * @return raw erasure decoder
    */
   RawErasureDecoder createDecoder(ErasureCoderOptions coderOptions);

+ 2 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/util/GaloisField.java

@@ -17,12 +17,12 @@
  */
 package org.apache.hadoop.io.erasurecode.rawcoder.util;
 
-import org.apache.hadoop.classification.InterfaceAudience;
-
 import java.nio.ByteBuffer;
 import java.util.HashMap;
 import java.util.Map;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+
 /**
  * Implementation of Galois field arithmetic with 2^p elements. The input must
  * be unsigned integers. It's ported from HDFS-RAID, slightly adapted.

+ 2 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/TFileDumper.java

@@ -18,13 +18,13 @@ package org.apache.hadoop.io.file.tfile;
 
 import java.io.IOException;
 import java.io.PrintStream;
+import java.nio.charset.StandardCharsets;
 import java.util.Collection;
 import java.util.Iterator;
 import java.util.LinkedHashMap;
 import java.util.Map;
 import java.util.Set;
 
-import org.apache.commons.io.Charsets;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -234,7 +234,7 @@ class TFileDumper {
               out.printf("%X", b);
             }
           } else {
-            out.print(new String(key, 0, sampleLen, Charsets.UTF_8));
+            out.print(new String(key, 0, sampleLen, StandardCharsets.UTF_8));
           }
           if (sampleLen < key.length) {
             out.print("...");

+ 68 - 36
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/AsyncCallHandler.java

@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.io.retry;
 
+import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
@@ -27,17 +28,21 @@ import org.apache.hadoop.util.concurrent.AsyncGet;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import java.io.InterruptedIOException;
 import java.lang.reflect.Method;
-import java.util.LinkedList;
+import java.util.Iterator;
 import java.util.Queue;
+import java.util.concurrent.ConcurrentLinkedQueue;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
+import java.util.concurrent.atomic.AtomicLong;
 import java.util.concurrent.atomic.AtomicReference;
 
 /** Handle async calls. */
 @InterfaceAudience.Private
 public class AsyncCallHandler {
-  static final Logger LOG = LoggerFactory.getLogger(AsyncCallHandler.class);
+  public static final Logger LOG = LoggerFactory.getLogger(
+      AsyncCallHandler.class);
 
   private static final ThreadLocal<AsyncGet<?, Exception>>
       LOWER_LAYER_ASYNC_RETURN = new ThreadLocal<>();
@@ -73,35 +78,34 @@ public class AsyncCallHandler {
 
   /** A simple concurrent queue which keeping track the empty start time. */
   static class ConcurrentQueue<T> {
-    private final Queue<T> queue = new LinkedList<>();
-    private long emptyStartTime = Time.monotonicNow();
+    private final Queue<T> queue = new ConcurrentLinkedQueue<>();
+    private final AtomicLong emptyStartTime
+        = new AtomicLong(Time.monotonicNow());
 
-    synchronized int size() {
-      return queue.size();
+    Iterator<T> iterator() {
+      return queue.iterator();
     }
 
     /** Is the queue empty for more than the given time in millisecond? */
-    synchronized boolean isEmpty(long time) {
-      return queue.isEmpty() && Time.monotonicNow() - emptyStartTime > time;
+    boolean isEmpty(long time) {
+      return Time.monotonicNow() - emptyStartTime.get() > time
+          && queue.isEmpty();
     }
 
-    synchronized void offer(T c) {
+    void offer(T c) {
       final boolean added = queue.offer(c);
       Preconditions.checkState(added);
     }
 
-    synchronized T poll() {
-      Preconditions.checkState(!queue.isEmpty());
-      final T t = queue.poll();
+    void checkEmpty() {
       if (queue.isEmpty()) {
-        emptyStartTime = Time.monotonicNow();
+        emptyStartTime.set(Time.monotonicNow());
       }
-      return t;
     }
   }
 
   /** A queue for handling async calls. */
-  static class AsyncCallQueue {
+  class AsyncCallQueue {
     private final ConcurrentQueue<AsyncCall> queue = new ConcurrentQueue<>();
     private final Processor processor = new Processor();
 
@@ -113,20 +117,29 @@ public class AsyncCallHandler {
       processor.tryStart();
     }
 
-    void checkCalls() {
-      final int size = queue.size();
-      for (int i = 0; i < size; i++) {
-        final AsyncCall c = queue.poll();
-        if (!c.isDone()) {
-          queue.offer(c); // the call is not done yet, add it back.
+    long checkCalls() {
+      final long startTime = Time.monotonicNow();
+      long minWaitTime = Processor.MAX_WAIT_PERIOD;
+
+      for (final Iterator<AsyncCall> i = queue.iterator(); i.hasNext();) {
+        final AsyncCall c = i.next();
+        if (c.isDone()) {
+          i.remove(); // the call is done, remove it from the queue.
+          queue.checkEmpty();
+        } else {
+          final Long waitTime = c.getWaitTime(startTime);
+          if (waitTime != null && waitTime > 0 && waitTime < minWaitTime) {
+            minWaitTime = waitTime;
+          }
         }
       }
+      return minWaitTime;
     }
 
     /** Process the async calls in the queue. */
     private class Processor {
-      static final long GRACE_PERIOD = 10*1000L;
-      static final long SLEEP_PERIOD = 100L;
+      static final long GRACE_PERIOD = 3*1000L;
+      static final long MAX_WAIT_PERIOD = 100L;
 
       private final AtomicReference<Thread> running = new AtomicReference<>();
 
@@ -141,15 +154,16 @@ public class AsyncCallHandler {
             @Override
             public void run() {
               for (; isRunning(this);) {
+                final long waitTime = checkCalls();
+                tryStop(this);
+
                 try {
-                  Thread.sleep(SLEEP_PERIOD);
+                  synchronized (AsyncCallHandler.this) {
+                    AsyncCallHandler.this.wait(waitTime);
+                  }
                 } catch (InterruptedException e) {
                   kill(this);
-                  return;
                 }
-
-                checkCalls();
-                tryStop(this);
               }
             }
           };
@@ -215,10 +229,9 @@ public class AsyncCallHandler {
     private AsyncGet<?, Exception> lowerLayerAsyncGet;
 
     AsyncCall(Method method, Object[] args, boolean isRpc, int callId,
-              RetryInvocationHandler.Counters counters,
               RetryInvocationHandler<?> retryInvocationHandler,
               AsyncCallHandler asyncCallHandler) {
-      super(method, args, isRpc, callId, counters, retryInvocationHandler);
+      super(method, args, isRpc, callId, retryInvocationHandler);
 
       this.asyncCallHandler = asyncCallHandler;
     }
@@ -226,6 +239,7 @@ public class AsyncCallHandler {
     /** @return true if the call is done; otherwise, return false. */
     boolean isDone() {
       final CallReturn r = invokeOnce();
+      LOG.debug("#{}: {}", getCallId(), r.getState());
       switch (r.getState()) {
         case RETURNED:
         case EXCEPTION:
@@ -234,6 +248,7 @@ public class AsyncCallHandler {
         case RETRY:
           invokeOnce();
           break;
+        case WAIT_RETRY:
         case ASYNC_CALL_IN_PROGRESS:
         case ASYNC_INVOKED:
           // nothing to do
@@ -244,13 +259,25 @@ public class AsyncCallHandler {
       return false;
     }
 
+    @Override
+    CallReturn processWaitTimeAndRetryInfo() {
+      final Long waitTime = getWaitTime(Time.monotonicNow());
+      LOG.trace("#{} processRetryInfo: waitTime={}", getCallId(), waitTime);
+      if (waitTime != null && waitTime > 0) {
+        return CallReturn.WAIT_RETRY;
+      }
+      processRetryInfo();
+      return CallReturn.RETRY;
+    }
+
     @Override
     CallReturn invoke() throws Throwable {
       LOG.debug("{}.invoke {}", getClass().getSimpleName(), this);
       if (lowerLayerAsyncGet != null) {
         // async call was submitted early, check the lower level async call
         final boolean isDone = lowerLayerAsyncGet.isDone();
-        LOG.trace("invoke: lowerLayerAsyncGet.isDone()? {}", isDone);
+        LOG.trace("#{} invoke: lowerLayerAsyncGet.isDone()? {}",
+            getCallId(), isDone);
         if (!isDone) {
           return CallReturn.ASYNC_CALL_IN_PROGRESS;
         }
@@ -262,7 +289,7 @@ public class AsyncCallHandler {
       }
 
       // submit a new async call
-      LOG.trace("invoke: ASYNC_INVOKED");
+      LOG.trace("#{} invoke: ASYNC_INVOKED", getCallId());
       final boolean mode = Client.isAsynchronousMode();
       try {
         Client.setAsynchronousMode(true);
@@ -271,9 +298,9 @@ public class AsyncCallHandler {
         Preconditions.checkState(r == null);
         lowerLayerAsyncGet = getLowerLayerAsyncReturn();
 
-        if (counters.isZeros()) {
+        if (getCounters().isZeros()) {
           // first async attempt, initialize
-          LOG.trace("invoke: initAsyncCall");
+          LOG.trace("#{} invoke: initAsyncCall", getCallId());
           asyncCallHandler.initAsyncCall(this, asyncCallReturn);
         }
         return CallReturn.ASYNC_INVOKED;
@@ -287,9 +314,9 @@ public class AsyncCallHandler {
   private volatile boolean hasSuccessfulCall = false;
 
   AsyncCall newAsyncCall(Method method, Object[] args, boolean isRpc,
-                         int callId, RetryInvocationHandler.Counters counters,
+                         int callId,
                          RetryInvocationHandler<?> retryInvocationHandler) {
-    return new AsyncCall(method, args, isRpc, callId, counters,
+    return new AsyncCall(method, args, isRpc, callId,
         retryInvocationHandler, this);
   }
 
@@ -318,4 +345,9 @@ public class AsyncCallHandler {
     };
     ASYNC_RETURN.set(asyncGet);
   }
+
+  @VisibleForTesting
+  public static long getGracePeriod() {
+    return AsyncCallQueue.Processor.GRACE_PERIOD;
+  }
 }

+ 3 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/CallReturn.java

@@ -29,6 +29,8 @@ class CallReturn {
     EXCEPTION,
     /** Call should be retried according to the {@link RetryPolicy}. */
     RETRY,
+    /** Call should wait and then retry according to the {@link RetryPolicy}. */
+    WAIT_RETRY,
     /** Call, which is async, is still in progress. */
     ASYNC_CALL_IN_PROGRESS,
     /** Call, which is async, just has been invoked. */
@@ -39,6 +41,7 @@ class CallReturn {
       State.ASYNC_CALL_IN_PROGRESS);
   static final CallReturn ASYNC_INVOKED = new CallReturn(State.ASYNC_INVOKED);
   static final CallReturn RETRY = new CallReturn(State.RETRY);
+  static final CallReturn WAIT_RETRY = new CallReturn(State.WAIT_RETRY);
 
   private final Object returnValue;
   private final Throwable thrown;

+ 120 - 74
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java

@@ -18,13 +18,14 @@
 package org.apache.hadoop.io.retry;
 
 import com.google.common.annotations.VisibleForTesting;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.io.retry.FailoverProxyProvider.ProxyInfo;
 import org.apache.hadoop.io.retry.RetryPolicy.RetryAction;
 import org.apache.hadoop.ipc.*;
 import org.apache.hadoop.ipc.Client.ConnectionId;
+import org.apache.hadoop.util.Time;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.io.InterruptedIOException;
@@ -41,33 +42,51 @@ import java.util.Map;
  */
 @InterfaceAudience.Private
 public class RetryInvocationHandler<T> implements RpcInvocationHandler {
-  public static final Log LOG = LogFactory.getLog(RetryInvocationHandler.class);
+  public static final Logger LOG = LoggerFactory.getLogger(
+      RetryInvocationHandler.class);
 
   static class Call {
     private final Method method;
     private final Object[] args;
     private final boolean isRpc;
     private final int callId;
-    final Counters counters;
+    private final Counters counters = new Counters();
 
     private final RetryPolicy retryPolicy;
     private final RetryInvocationHandler<?> retryInvocationHandler;
 
+    private RetryInfo retryInfo;
+
     Call(Method method, Object[] args, boolean isRpc, int callId,
-         Counters counters, RetryInvocationHandler<?> retryInvocationHandler) {
+         RetryInvocationHandler<?> retryInvocationHandler) {
       this.method = method;
       this.args = args;
       this.isRpc = isRpc;
       this.callId = callId;
-      this.counters = counters;
 
       this.retryPolicy = retryInvocationHandler.getRetryPolicy(method);
       this.retryInvocationHandler = retryInvocationHandler;
     }
 
+    int getCallId() {
+      return callId;
+    }
+
+    Counters getCounters() {
+      return counters;
+    }
+
+    synchronized Long getWaitTime(final long now) {
+      return retryInfo == null? null: retryInfo.retryTime - now;
+    }
+
     /** Invoke the call once without retrying. */
     synchronized CallReturn invokeOnce() {
       try {
+        if (retryInfo != null) {
+          return processWaitTimeAndRetryInfo();
+        }
+
         // The number of times this invocation handler has ever been failed over
         // before this method invocation attempt. Used to prevent concurrent
         // failed method invocations from triggering multiple failover attempts.
@@ -76,28 +95,70 @@ public class RetryInvocationHandler<T> implements RpcInvocationHandler {
           return invoke();
         } catch (Exception e) {
           if (LOG.isTraceEnabled()) {
-            LOG.trace(this, e);
+            LOG.trace(toString(), e);
           }
           if (Thread.currentThread().isInterrupted()) {
             // If interrupted, do not retry.
             throw e;
           }
-          retryInvocationHandler.handleException(
-              method, retryPolicy, failoverCount, counters, e);
-          return CallReturn.RETRY;
+
+          retryInfo = retryInvocationHandler.handleException(
+              method, callId, retryPolicy, counters, failoverCount, e);
+          return processWaitTimeAndRetryInfo();
         }
       } catch(Throwable t) {
         return new CallReturn(t);
       }
     }
 
+    /**
+     * It first processes the wait time, if there is any,
+     * and then invokes {@link #processRetryInfo()}.
+     *
+     * If the wait time is positive, it either sleeps for synchronous calls
+     * or immediately returns for asynchronous calls.
+     *
+     * @return {@link CallReturn#RETRY} if the retryInfo is processed;
+     *         otherwise, return {@link CallReturn#WAIT_RETRY}.
+     */
+    CallReturn processWaitTimeAndRetryInfo() throws InterruptedIOException {
+      final Long waitTime = getWaitTime(Time.monotonicNow());
+      LOG.trace("#{} processRetryInfo: retryInfo={}, waitTime={}",
+          callId, retryInfo, waitTime);
+      if (waitTime != null && waitTime > 0) {
+        try {
+          Thread.sleep(retryInfo.delay);
+        } catch (InterruptedException e) {
+          Thread.currentThread().interrupt();
+          LOG.warn("Interrupted while waiting to retry", e);
+          InterruptedIOException intIOE = new InterruptedIOException(
+              "Retry interrupted");
+          intIOE.initCause(e);
+          throw intIOE;
+        }
+      }
+      processRetryInfo();
+      return CallReturn.RETRY;
+    }
+
+    synchronized void processRetryInfo() {
+      counters.retries++;
+      if (retryInfo.isFailover()) {
+        retryInvocationHandler.proxyDescriptor.failover(
+            retryInfo.expectedFailoverCount, method, callId);
+        counters.failovers++;
+      }
+      retryInfo = null;
+    }
+
     CallReturn invoke() throws Throwable {
       return new CallReturn(invokeMethod());
     }
 
     Object invokeMethod() throws Throwable {
       if (isRpc) {
-        Client.setCallIdAndRetryCount(callId, counters.retries);
+        Client.setCallIdAndRetryCount(callId, counters.retries,
+            retryInvocationHandler.asyncCallHandler);
       }
       return retryInvocationHandler.invokeMethod(method, args);
     }
@@ -146,15 +207,16 @@ public class RetryInvocationHandler<T> implements RpcInvocationHandler {
       return failoverCount;
     }
 
-    synchronized void failover(long expectedFailoverCount, Method method) {
+    synchronized void failover(long expectedFailoverCount, Method method,
+                               int callId) {
       // Make sure that concurrent failed invocations only cause a single
       // actual failover.
       if (failoverCount == expectedFailoverCount) {
         fpp.performFailover(proxyInfo.proxy);
         failoverCount++;
       } else {
-        LOG.warn("A failover has occurred since the start of "
-            + proxyInfo.getString(method.getName()));
+        LOG.warn("A failover has occurred since the start of call #" + callId
+            + " " + proxyInfo.getString(method.getName()));
       }
       proxyInfo = fpp.getProxy();
     }
@@ -172,22 +234,33 @@ public class RetryInvocationHandler<T> implements RpcInvocationHandler {
   }
 
   private static class RetryInfo {
+    private final long retryTime;
     private final long delay;
-    private final RetryAction failover;
-    private final RetryAction fail;
+    private final RetryAction action;
+    private final long expectedFailoverCount;
 
-    RetryInfo(long delay, RetryAction failover, RetryAction fail) {
+    RetryInfo(long delay, RetryAction action, long expectedFailoverCount) {
       this.delay = delay;
-      this.failover = failover;
-      this.fail = fail;
+      this.retryTime = Time.monotonicNow() + delay;
+      this.action = action;
+      this.expectedFailoverCount = expectedFailoverCount;
+    }
+
+    boolean isFailover() {
+      return action != null
+          && action.action ==  RetryAction.RetryDecision.FAILOVER_AND_RETRY;
+    }
+
+    boolean isFail() {
+      return action != null
+          && action.action ==  RetryAction.RetryDecision.FAIL;
     }
 
     static RetryInfo newRetryInfo(RetryPolicy policy, Exception e,
-        Counters counters, boolean idempotentOrAtMostOnce) throws Exception {
+        Counters counters, boolean idempotentOrAtMostOnce,
+        long expectedFailoverCount) throws Exception {
+      RetryAction max = null;
       long maxRetryDelay = 0;
-      RetryAction failover = null;
-      RetryAction retry = null;
-      RetryAction fail = null;
 
       final Iterable<Exception> exceptions = e instanceof MultiException ?
           ((MultiException) e).getExceptions().values()
@@ -195,23 +268,19 @@ public class RetryInvocationHandler<T> implements RpcInvocationHandler {
       for (Exception exception : exceptions) {
         final RetryAction a = policy.shouldRetry(exception,
             counters.retries, counters.failovers, idempotentOrAtMostOnce);
-        if (a.action == RetryAction.RetryDecision.FAIL) {
-          fail = a;
-        } else {
+        if (a.action != RetryAction.RetryDecision.FAIL) {
           // must be a retry or failover
-          if (a.action == RetryAction.RetryDecision.FAILOVER_AND_RETRY) {
-            failover = a;
-          } else {
-            retry = a;
-          }
           if (a.delayMillis > maxRetryDelay) {
             maxRetryDelay = a.delayMillis;
           }
         }
+
+        if (max == null || max.action.compareTo(a.action) < 0) {
+          max = a;
+        }
       }
 
-      return new RetryInfo(maxRetryDelay, failover,
-          failover == null && retry == null? fail: null);
+      return new RetryInfo(maxRetryDelay, max, expectedFailoverCount);
     }
   }
 
@@ -246,13 +315,12 @@ public class RetryInvocationHandler<T> implements RpcInvocationHandler {
     return proxyDescriptor.getFailoverCount();
   }
 
-  private Call newCall(Method method, Object[] args, boolean isRpc, int callId,
-                       Counters counters) {
+  private Call newCall(Method method, Object[] args, boolean isRpc,
+                       int callId) {
     if (Client.isAsynchronousMode()) {
-      return asyncCallHandler.newAsyncCall(method, args, isRpc, callId,
-          counters, this);
+      return asyncCallHandler.newAsyncCall(method, args, isRpc, callId, this);
     } else {
-      return new Call(method, args, isRpc, callId, counters, this);
+      return new Call(method, args, isRpc, callId, this);
     }
   }
 
@@ -261,9 +329,8 @@ public class RetryInvocationHandler<T> implements RpcInvocationHandler {
       throws Throwable {
     final boolean isRpc = isRpcInvocation(proxyDescriptor.getProxy());
     final int callId = isRpc? Client.nextCallId(): RpcConstants.INVALID_CALL_ID;
-    final Counters counters = new Counters();
 
-    final Call call = newCall(method, args, isRpc, callId, counters);
+    final Call call = newCall(method, args, isRpc, callId);
     while (true) {
       final CallReturn c = call.invokeOnce();
       final CallReturn.State state = c.getState();
@@ -275,45 +342,24 @@ public class RetryInvocationHandler<T> implements RpcInvocationHandler {
     }
   }
 
-  private void handleException(final Method method, final RetryPolicy policy,
-      final long expectedFailoverCount, final Counters counters,
-      final Exception ex) throws Exception {
-    final RetryInfo retryInfo = RetryInfo.newRetryInfo(policy, ex, counters,
-        proxyDescriptor.idempotentOrAtMostOnce(method));
-    counters.retries++;
-
-    if (retryInfo.fail != null) {
+  private RetryInfo handleException(final Method method, final int callId,
+      final RetryPolicy policy, final Counters counters,
+      final long expectFailoverCount, final Exception e) throws Exception {
+    final RetryInfo retryInfo = RetryInfo.newRetryInfo(policy, e,
+        counters, proxyDescriptor.idempotentOrAtMostOnce(method),
+        expectFailoverCount);
+    if (retryInfo.isFail()) {
       // fail.
-      if (retryInfo.fail.reason != null) {
-        LOG.warn("Exception while invoking "
+      if (retryInfo.action.reason != null) {
+        LOG.warn("Exception while invoking call #" + callId + " "
             + proxyDescriptor.getProxyInfo().getString(method.getName())
-            + ". Not retrying because " + retryInfo.fail.reason, ex);
-      }
-      throw ex;
-    }
-
-    // retry
-    final boolean isFailover = retryInfo.failover != null;
-
-    log(method, isFailover, counters.failovers, retryInfo.delay, ex);
-
-    if (retryInfo.delay > 0) {
-      try {
-        Thread.sleep(retryInfo.delay);
-      } catch (InterruptedException e) {
-        Thread.currentThread().interrupt();
-        LOG.warn("Interrupted while waiting to retry", e);
-        InterruptedIOException intIOE = new InterruptedIOException(
-            "Retry interrupted");
-        intIOE.initCause(e);
-        throw intIOE;
+            + ". Not retrying because " + retryInfo.action.reason, e);
       }
+      throw e;
     }
 
-    if (isFailover) {
-      proxyDescriptor.failover(expectedFailoverCount, method);
-      counters.failovers++;
-    }
+    log(method, retryInfo.isFailover(), counters.failovers, retryInfo.delay, e);
+    return retryInfo;
   }
 
   private void log(final Method method, final boolean isFailover,

+ 1 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicy.java

@@ -67,6 +67,7 @@ public interface RetryPolicy {
     }
     
     public enum RetryDecision {
+      // Ordering: FAIL < RETRY < FAILOVER_AND_RETRY.
       FAIL,
       RETRY,
       FAILOVER_AND_RETRY

+ 22 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/package-info.java

@@ -15,6 +15,28 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
+/**
+ * A mechanism for selectively retrying methods that throw exceptions under
+ * certain circumstances.
+ * Typical usage is
+ *  UnreliableImplementation unreliableImpl = new UnreliableImplementation();
+ *  UnreliableInterface unreliable = (UnreliableInterface)
+ *  RetryProxy.create(UnreliableInterface.class, unreliableImpl,
+ *  RetryPolicies.retryUpToMaximumCountWithFixedSleep(4, 10,
+ *      TimeUnit.SECONDS));
+ *  unreliable.call();
+ *
+ * This will retry any method called on <code>unreliable</code> four times -
+ * in this case the <code>call()</code> method - sleeping 10 seconds between
+ * each retry. There are a number of
+ * {@link org.apache.hadoop.io.retry.RetryPolicies retry policies}
+ * available, or you can implement a custom one by implementing
+ * {@link org.apache.hadoop.io.retry.RetryPolicy}.
+ * It is also possible to specify retry policies on a
+ * {@link org.apache.hadoop.io.retry.RetryProxy#create(Class, Object, Map)
+ * per-method basis}.
+ */
 @InterfaceAudience.LimitedPrivate({"HBase", "HDFS", "MapReduce"})
 @InterfaceStability.Evolving
 package org.apache.hadoop.io.retry;

+ 0 - 48
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/package.html

@@ -1,48 +0,0 @@
-<html>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<body>
-
-<p>
-A mechanism for selectively retrying methods that throw exceptions under certain circumstances.
-</p>
-
-<p>
-Typical usage is
-</p>
-
-<pre>
-UnreliableImplementation unreliableImpl = new UnreliableImplementation();
-UnreliableInterface unreliable = (UnreliableInterface)
-  RetryProxy.create(UnreliableInterface.class, unreliableImpl,
-    RetryPolicies.retryUpToMaximumCountWithFixedSleep(4, 10, TimeUnit.SECONDS));
-unreliable.call();
-</pre>
-
-<p>
-This will retry any method called on <code>unreliable</code> four times - in this case the <code>call()</code>
-method - sleeping 10 seconds between
-each retry. There are a number of {@link org.apache.hadoop.io.retry.RetryPolicies retry policies}
-available, or you can implement a custom one by implementing {@link org.apache.hadoop.io.retry.RetryPolicy}.
-It is also possible to specify retry policies on a 
-{@link org.apache.hadoop.io.retry.RetryProxy#create(Class, Object, Map) per-method basis}.
-</p>
-
-</body>
-</html>

+ 2 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallQueueManager.java

@@ -72,8 +72,8 @@ public class CallQueueManager<E> {
     this.clientBackOffEnabled = clientBackOffEnabled;
     this.putRef = new AtomicReference<BlockingQueue<E>>(bq);
     this.takeRef = new AtomicReference<BlockingQueue<E>>(bq);
-    LOG.info("Using callQueue: " + backingClass + " scheduler: " +
-        schedulerClass);
+    LOG.info("Using callQueue: " + backingClass + " queueCapacity: " +
+        maxQueueSize + " scheduler: " + schedulerClass);
   }
 
   private static <T extends RpcScheduler> T createScheduler(

+ 2 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallerContext.java

@@ -35,7 +35,7 @@ import java.util.Arrays;
 @InterfaceAudience.LimitedPrivate({"HBase", "HDFS", "Hive", "MapReduce",
     "Pig", "YARN"})
 @InterfaceStability.Evolving
-public class CallerContext {
+public final class CallerContext {
   public static final Charset SIGNATURE_ENCODING = StandardCharsets.UTF_8;
   /** The caller context.
    *
@@ -54,7 +54,7 @@ public class CallerContext {
    */
   private final byte[] signature;
 
-  public CallerContext(Builder builder) {
+  private CallerContext(Builder builder) {
     this.context = builder.context;
     this.signature = builder.signature;
   }

+ 14 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java

@@ -93,6 +93,8 @@ public class Client implements AutoCloseable {
 
   private static final ThreadLocal<Integer> callId = new ThreadLocal<Integer>();
   private static final ThreadLocal<Integer> retryCount = new ThreadLocal<Integer>();
+  private static final ThreadLocal<Object> EXTERNAL_CALL_HANDLER
+      = new ThreadLocal<>();
   private static final ThreadLocal<AsyncGet<? extends Writable, IOException>>
       ASYNC_RPC_RESPONSE = new ThreadLocal<>();
   private static final ThreadLocal<Boolean> asynchronousMode =
@@ -111,13 +113,15 @@ public class Client implements AutoCloseable {
   }
 
   /** Set call id and retry count for the next call. */
-  public static void setCallIdAndRetryCount(int cid, int rc) {
+  public static void setCallIdAndRetryCount(int cid, int rc,
+                                            Object externalHandler) {
     Preconditions.checkArgument(cid != RpcConstants.INVALID_CALL_ID);
     Preconditions.checkState(callId.get() == null);
     Preconditions.checkArgument(rc != RpcConstants.INVALID_RETRY_COUNT);
 
     callId.set(cid);
     retryCount.set(rc);
+    EXTERNAL_CALL_HANDLER.set(externalHandler);
   }
 
   private ConcurrentMap<ConnectionId, Connection> connections =
@@ -333,6 +337,7 @@ public class Client implements AutoCloseable {
     IOException error;          // exception, null if success
     final RPC.RpcKind rpcKind;      // Rpc EngineKind
     boolean done;               // true when call is done
+    private final Object externalHandler;
 
     private Call(RPC.RpcKind rpcKind, Writable param) {
       this.rpcKind = rpcKind;
@@ -352,6 +357,8 @@ public class Client implements AutoCloseable {
       } else {
         this.retry = rc;
       }
+
+      this.externalHandler = EXTERNAL_CALL_HANDLER.get();
     }
 
     @Override
@@ -364,6 +371,12 @@ public class Client implements AutoCloseable {
     protected synchronized void callComplete() {
       this.done = true;
       notify();                                 // notify caller
+
+      if (externalHandler != null) {
+        synchronized (externalHandler) {
+          externalHandler.notify();
+        }
+      }
     }
 
     /** Set the exception when there is an error.

+ 16 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java

@@ -901,9 +901,24 @@ public class DecayRpcScheduler implements RpcScheduler,
   public String getCallVolumeSummary() {
     try {
       ObjectMapper om = new ObjectMapper();
-      return om.writeValueAsString(callCounts);
+      return om.writeValueAsString(getDecayedCallCounts());
     } catch (Exception e) {
       return "Error: " + e.getMessage();
     }
   }
+
+  private Map<Object, Long> getDecayedCallCounts() {
+    Map<Object, Long> decayedCallCounts = new HashMap<>(callCounts.size());
+    Iterator<Map.Entry<Object, List<AtomicLong>>> it =
+        callCounts.entrySet().iterator();
+    while (it.hasNext()) {
+      Map.Entry<Object, List<AtomicLong>> entry = it.next();
+      Object user = entry.getKey();
+      Long decayedCount = entry.getValue().get(0).get();
+      if (decayedCount > 0) {
+        decayedCallCounts.put(user, decayedCount);
+      }
+    }
+    return decayedCallCounts;
+  }
 }

+ 21 - 13
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueue.java

@@ -75,11 +75,12 @@ public class FairCallQueue<E extends Schedulable> extends AbstractQueue<E>
 
   /**
    * Create a FairCallQueue.
-   * @param capacity the maximum size of each sub-queue
+   * @param capacity the total size of all sub-queues
    * @param ns the prefix to use for configuration
    * @param conf the configuration to read from
-   * Notes: the FairCallQueue has no fixed capacity. Rather, it has a minimum
-   * capacity of `capacity` and a maximum capacity of `capacity * number_queues`
+   * Notes: Each sub-queue has a capacity of `capacity / numSubqueues`.
+   * The first or the highest priority sub-queue has an excess capacity
+   * of `capacity % numSubqueues`
    */
   public FairCallQueue(int priorityLevels, int capacity, String ns,
       Configuration conf) {
@@ -88,13 +89,19 @@ public class FairCallQueue<E extends Schedulable> extends AbstractQueue<E>
           "at least 1");
     }
     int numQueues = priorityLevels;
-    LOG.info("FairCallQueue is in use with " + numQueues + " queues.");
+    LOG.info("FairCallQueue is in use with " + numQueues +
+        " queues with total capacity of " + capacity);
 
     this.queues = new ArrayList<BlockingQueue<E>>(numQueues);
     this.overflowedCalls = new ArrayList<AtomicLong>(numQueues);
-
+    int queueCapacity = capacity / numQueues;
+    int capacityForFirstQueue = queueCapacity + (capacity % numQueues);
     for(int i=0; i < numQueues; i++) {
-      this.queues.add(new LinkedBlockingQueue<E>(capacity));
+      if (i == 0) {
+        this.queues.add(new LinkedBlockingQueue<E>(capacityForFirstQueue));
+      } else {
+        this.queues.add(new LinkedBlockingQueue<E>(queueCapacity));
+      }
       this.overflowedCalls.add(new AtomicLong(0));
     }
 
@@ -293,7 +300,7 @@ public class FairCallQueue<E extends Schedulable> extends AbstractQueue<E>
   @Override
   public int size() {
     int size = 0;
-    for (BlockingQueue q : this.queues) {
+    for (BlockingQueue<E> q : this.queues) {
       size += q.size();
     }
     return size;
@@ -339,7 +346,7 @@ public class FairCallQueue<E extends Schedulable> extends AbstractQueue<E>
   @Override
   public int remainingCapacity() {
     int sum = 0;
-    for (BlockingQueue q : this.queues) {
+    for (BlockingQueue<E> q : this.queues) {
       sum += q.remainingCapacity();
     }
     return sum;
@@ -355,7 +362,7 @@ public class FairCallQueue<E extends Schedulable> extends AbstractQueue<E>
       new HashMap<String, MetricsProxy>();
 
     // Weakref for delegate, so we don't retain it forever if it can be GC'd
-    private WeakReference<FairCallQueue> delegate;
+    private WeakReference<FairCallQueue<? extends Schedulable>> delegate;
 
     // Keep track of how many objects we registered
     private int revisionNumber = 0;
@@ -374,14 +381,15 @@ public class FairCallQueue<E extends Schedulable> extends AbstractQueue<E>
       return mp;
     }
 
-    public void setDelegate(FairCallQueue obj) {
-      this.delegate = new WeakReference<FairCallQueue>(obj);
+    public void setDelegate(FairCallQueue<? extends Schedulable> obj) {
+      this.delegate
+          = new WeakReference<FairCallQueue<? extends Schedulable>>(obj);
       this.revisionNumber++;
     }
 
     @Override
     public int[] getQueueSizes() {
-      FairCallQueue obj = this.delegate.get();
+      FairCallQueue<? extends Schedulable> obj = this.delegate.get();
       if (obj == null) {
         return new int[]{};
       }
@@ -391,7 +399,7 @@ public class FairCallQueue<E extends Schedulable> extends AbstractQueue<E>
 
     @Override
     public long[] getOverflowedCalls() {
-      FairCallQueue obj = this.delegate.get();
+      FairCallQueue<? extends Schedulable> obj = this.delegate.get();
       if (obj == null) {
         return new long[]{};
       }

+ 37 - 9
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java

@@ -32,6 +32,7 @@ import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.retry.RetryPolicy;
 import org.apache.hadoop.ipc.Client.ConnectionId;
 import org.apache.hadoop.ipc.RPC.RpcInvoker;
+import org.apache.hadoop.ipc.RpcWritable;
 import org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos.RequestHeaderProto;
 import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcRequestHeaderProto;
 import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto;
@@ -68,7 +69,7 @@ public class ProtobufRpcEngine implements RpcEngine {
 
   static { // Register the rpcRequest deserializer for WritableRpcEngine 
     org.apache.hadoop.ipc.Server.registerProtocolEngine(
-        RPC.RpcKind.RPC_PROTOCOL_BUFFER, RpcRequestWrapper.class,
+        RPC.RpcKind.RPC_PROTOCOL_BUFFER, RpcProtobufRequest.class,
         new Server.ProtoBufRpcInvoker());
   }
 
@@ -612,11 +613,10 @@ public class ProtobufRpcEngine implements RpcEngine {
        */
       public Writable call(RPC.Server server, String connectionProtocolName,
           Writable writableRequest, long receiveTime) throws Exception {
-        RpcRequestWrapper request = (RpcRequestWrapper) writableRequest;
-        RequestHeaderProto rpcRequest = request.requestHeader;
+        RpcProtobufRequest request = (RpcProtobufRequest) writableRequest;
+        RequestHeaderProto rpcRequest = request.getRequestHeader();
         String methodName = rpcRequest.getMethodName();
-        
-        
+
         /** 
          * RPCs for a particular interface (ie protocol) are done using a
          * IPC connection that is setup using rpcProxy.
@@ -652,9 +652,8 @@ public class ProtobufRpcEngine implements RpcEngine {
           throw new RpcNoSuchMethodException(msg);
         }
         Message prototype = service.getRequestPrototype(methodDescriptor);
-        Message param = prototype.newBuilderForType()
-            .mergeFrom(request.theRequestRead).build();
-        
+        Message param = request.getValue(prototype);
+
         Message result;
         long startTime = Time.now();
         int qTime = (int) (startTime - receiveTime);
@@ -683,7 +682,36 @@ public class ProtobufRpcEngine implements RpcEngine {
               exception.getClass().getSimpleName();
           server.updateMetrics(detailedMetricsName, qTime, processingTime);
         }
-        return new RpcResponseWrapper(result);
+        return RpcWritable.wrap(result);
+      }
+    }
+  }
+
+  // htrace in the ipc layer creates the span name based on toString()
+  // which uses the rpc header.  in the normal case we want to defer decoding
+  // the rpc header until needed by the rpc engine.
+  static class RpcProtobufRequest extends RpcWritable.Buffer {
+    private RequestHeaderProto lazyHeader;
+
+    public RpcProtobufRequest() {
+    }
+
+    synchronized RequestHeaderProto getRequestHeader() throws IOException {
+      if (lazyHeader == null) {
+        lazyHeader = getValue(RequestHeaderProto.getDefaultInstance());
+      }
+      return lazyHeader;
+    }
+
+    // this is used by htrace to name the span.
+    @Override
+    public String toString() {
+      try {
+        RequestHeaderProto header = getRequestHeader();
+        return header.getDeclaringClassProtocolName() + "." +
+               header.getMethodName();
+      } catch (IOException e) {
+        throw new IllegalArgumentException(e);
       }
     }
   }

+ 98 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ResponseBuffer.java

@@ -0,0 +1,98 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ipc;
+
+import java.io.ByteArrayOutputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.util.Arrays;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+
+@InterfaceAudience.Private
+class ResponseBuffer extends DataOutputStream {
+  ResponseBuffer(int capacity) {
+    super(new FramedBuffer(capacity));
+  }
+
+  // update framing bytes based on bytes written to stream.
+  private FramedBuffer getFramedBuffer() {
+    FramedBuffer buf = (FramedBuffer)out;
+    buf.setSize(written);
+    return buf;
+  }
+
+  void writeTo(OutputStream out) throws IOException {
+    getFramedBuffer().writeTo(out);
+  }
+
+  byte[] toByteArray() {
+    return getFramedBuffer().toByteArray();
+  }
+
+  int capacity() {
+    return ((FramedBuffer)out).capacity();
+  }
+
+  void setCapacity(int capacity) {
+    ((FramedBuffer)out).setCapacity(capacity);
+  }
+
+  void ensureCapacity(int capacity) {
+    if (((FramedBuffer)out).capacity() < capacity) {
+      ((FramedBuffer)out).setCapacity(capacity);
+    }
+  }
+
+  ResponseBuffer reset() {
+    written = 0;
+    ((FramedBuffer)out).reset();
+    return this;
+  }
+
+  private static class FramedBuffer extends ByteArrayOutputStream {
+    private static final int FRAMING_BYTES = 4;
+    FramedBuffer(int capacity) {
+      super(capacity + FRAMING_BYTES);
+      reset();
+    }
+    @Override
+    public int size() {
+      return count - FRAMING_BYTES;
+    }
+    void setSize(int size) {
+      buf[0] = (byte)((size >>> 24) & 0xFF);
+      buf[1] = (byte)((size >>> 16) & 0xFF);
+      buf[2] = (byte)((size >>>  8) & 0xFF);
+      buf[3] = (byte)((size >>>  0) & 0xFF);
+    }
+    int capacity() {
+      return buf.length - FRAMING_BYTES;
+    }
+    void setCapacity(int capacity) {
+      buf = Arrays.copyOf(buf, capacity + FRAMING_BYTES);
+    }
+    @Override
+    public void reset() {
+      count = FRAMING_BYTES;
+      setSize(0);
+    }
+  };
+}

+ 3 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcConstants.java

@@ -18,8 +18,8 @@
 package org.apache.hadoop.ipc;
 
 import java.nio.ByteBuffer;
+import java.nio.charset.StandardCharsets;
 
-import org.apache.commons.io.Charsets;
 import org.apache.hadoop.classification.InterfaceAudience;
 
 @InterfaceAudience.Private
@@ -54,8 +54,8 @@ public class RpcConstants {
   /**
    * The first four bytes of Hadoop RPC connections
    */
-  public static final ByteBuffer HEADER = ByteBuffer.wrap("hrpc".getBytes
-      (Charsets.UTF_8));
+  public static final ByteBuffer HEADER =
+      ByteBuffer.wrap("hrpc".getBytes(StandardCharsets.UTF_8));
   public static final int HEADER_LEN_AFTER_HRPC_PART = 3; // 3 bytes that follow
   
   // 1 : Introduce ping and server does not throw away RPCs

+ 184 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcWritable.java

@@ -0,0 +1,184 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ipc;
+
+import java.io.ByteArrayInputStream;
+import java.io.DataInput;
+import java.io.DataInputStream;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configurable;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.Writable;
+
+import com.google.protobuf.CodedInputStream;
+import com.google.protobuf.CodedOutputStream;
+import com.google.protobuf.Message;
+
+@InterfaceAudience.Private
+public abstract class RpcWritable implements Writable {
+
+  static RpcWritable wrap(Object o) {
+    if (o instanceof RpcWritable) {
+      return (RpcWritable)o;
+    } else if (o instanceof Message) {
+      return new ProtobufWrapper((Message)o);
+    } else if (o instanceof Writable) {
+      return new WritableWrapper((Writable)o);
+    }
+    throw new IllegalArgumentException("Cannot wrap " + o.getClass());
+  }
+
+  // don't support old inefficient Writable methods.
+  @Override
+  public final void readFields(DataInput in) throws IOException {
+    throw new UnsupportedOperationException();
+  }
+  @Override
+  public final void write(DataOutput out) throws IOException {
+    throw new UnsupportedOperationException();
+  }
+
+  // methods optimized for reduced intermediate byte[] allocations.
+  abstract void writeTo(ResponseBuffer out) throws IOException;
+  abstract <T> T readFrom(ByteBuffer bb) throws IOException;
+
+  // adapter for Writables.
+  static class WritableWrapper extends RpcWritable {
+    private final Writable writable;
+
+    WritableWrapper(Writable writable) {
+      this.writable = writable;
+    }
+
+    @Override
+    public void writeTo(ResponseBuffer out) throws IOException {
+      writable.write(out);
+    }
+
+    @SuppressWarnings("unchecked")
+    @Override
+    <T> T readFrom(ByteBuffer bb) throws IOException {
+      // create a stream that may consume up to the entire ByteBuffer.
+      DataInputStream in = new DataInputStream(new ByteArrayInputStream(
+          bb.array(), bb.position() + bb.arrayOffset(), bb.remaining()));
+      try {
+        writable.readFields(in);
+      } finally {
+        // advance over the bytes read.
+        bb.position(bb.limit() - in.available());
+      }
+      return (T)writable;
+    }
+  }
+
+  // adapter for Protobufs.
+  static class ProtobufWrapper extends RpcWritable {
+    private Message message;
+
+    ProtobufWrapper(Message message) {
+      this.message = message;
+    }
+
+    @Override
+    void writeTo(ResponseBuffer out) throws IOException {
+      int length = message.getSerializedSize();
+      length += CodedOutputStream.computeRawVarint32Size(length);
+      out.ensureCapacity(length);
+      message.writeDelimitedTo(out);
+    }
+
+    @SuppressWarnings("unchecked")
+    @Override
+    <T> T readFrom(ByteBuffer bb) throws IOException {
+      // using the parser with a byte[]-backed coded input stream is the
+      // most efficient way to deserialize a protobuf.  it has a direct
+      // path to the PB ctor that doesn't create multi-layered streams
+      // that internally buffer.
+      CodedInputStream cis = CodedInputStream.newInstance(
+          bb.array(), bb.position() + bb.arrayOffset(), bb.remaining());
+      try {
+        cis.pushLimit(cis.readRawVarint32());
+        message = message.getParserForType().parseFrom(cis);
+        cis.checkLastTagWas(0);
+      } finally {
+        // advance over the bytes read.
+        bb.position(bb.position() + cis.getTotalBytesRead());
+      }
+      return (T)message;
+    }
+  }
+
+  // adapter to allow decoding of writables and protobufs from a byte buffer.
+  static class Buffer extends RpcWritable {
+    private ByteBuffer bb;
+
+    static Buffer wrap(ByteBuffer bb) {
+      return new Buffer(bb);
+    }
+
+    Buffer() {}
+
+    Buffer(ByteBuffer bb) {
+      this.bb = bb;
+    }
+
+    @Override
+    void writeTo(ResponseBuffer out) throws IOException {
+      out.ensureCapacity(bb.remaining());
+      out.write(bb.array(), bb.position() + bb.arrayOffset(), bb.remaining());
+    }
+
+    @SuppressWarnings("unchecked")
+    @Override
+    <T> T readFrom(ByteBuffer bb) throws IOException {
+      // effectively consume the rest of the buffer from the callers
+      // perspective.
+      this.bb = bb.slice();
+      bb.limit(bb.position());
+      return (T)this;
+    }
+
+    public <T> T newInstance(Class<T> valueClass,
+        Configuration conf) throws IOException {
+      T instance;
+      try {
+        // this is much faster than ReflectionUtils!
+        instance = valueClass.newInstance();
+        if (instance instanceof Configurable) {
+          ((Configurable)instance).setConf(conf);
+        }
+      } catch (Exception e) {
+        throw new RuntimeException(e);
+      }
+      return getValue(instance);
+    }
+
+    public <T> T getValue(T value) throws IOException {
+      return RpcWritable.wrap(value).readFrom(bb);
+    }
+
+    int remaining() {
+      return bb.remaining();
+    }
+  }
+}

+ 96 - 118
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java

@@ -26,7 +26,6 @@ import static org.apache.hadoop.ipc.RpcConstants.PING_CALL_ID;
 
 import java.io.ByteArrayInputStream;
 import java.io.ByteArrayOutputStream;
-import java.io.DataInputStream;
 import java.io.DataOutputStream;
 import java.io.IOException;
 import java.lang.reflect.UndeclaredThrowableException;
@@ -47,6 +46,7 @@ import java.nio.channels.Selector;
 import java.nio.channels.ServerSocketChannel;
 import java.nio.channels.SocketChannel;
 import java.nio.channels.WritableByteChannel;
+import java.nio.charset.StandardCharsets;
 import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -69,7 +69,6 @@ import javax.security.sasl.Sasl;
 import javax.security.sasl.SaslException;
 import javax.security.sasl.SaslServer;
 
-import org.apache.commons.io.Charsets;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -80,12 +79,9 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration.IntegerRanges;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
-import org.apache.hadoop.io.DataOutputBuffer;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableUtils;
-import org.apache.hadoop.ipc.ProtobufRpcEngine.RpcResponseMessageWrapper;
-import org.apache.hadoop.ipc.ProtobufRpcEngine.RpcResponseWrapper;
 import org.apache.hadoop.ipc.RPC.RpcInvoker;
 import org.apache.hadoop.ipc.RPC.VersionMismatch;
 import org.apache.hadoop.ipc.metrics.RpcDetailedMetrics;
@@ -115,7 +111,6 @@ import org.apache.hadoop.security.token.SecretManager;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.util.ProtoUtil;
-import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Time;
 import org.apache.htrace.core.SpanId;
@@ -124,9 +119,7 @@ import org.apache.htrace.core.Tracer;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.protobuf.ByteString;
-import com.google.protobuf.CodedOutputStream;
 import com.google.protobuf.Message;
-import com.google.protobuf.Message.Builder;
 
 /** An abstract IPC service.  IPC calls take a single {@link Writable} as a
  * parameter, and return a {@link Writable} as their value.  A service runs on
@@ -223,7 +216,7 @@ public abstract class Server {
    * and send back a nicer response.
    */
   private static final ByteBuffer HTTP_GET_BYTES = ByteBuffer.wrap(
-      "GET ".getBytes(Charsets.UTF_8));
+      "GET ".getBytes(StandardCharsets.UTF_8));
   
   /**
    * An HTTP response to send back if we detect an HTTP request to our IPC
@@ -423,6 +416,13 @@ public abstract class Server {
 
   private int maxQueueSize;
   private final int maxRespSize;
+  private final ThreadLocal<ResponseBuffer> responseBuffer =
+      new ThreadLocal<ResponseBuffer>(){
+        @Override
+        protected ResponseBuffer initialValue() {
+          return new ResponseBuffer(INITIAL_RESP_BUF_SIZE);
+        }
+  };
   private int socketSendBufferSize;
   private final int maxDataLength;
   private final boolean tcpNoDelay; // if T then disable Nagle's Algorithm
@@ -744,14 +744,7 @@ public abstract class Server {
     public void abortResponse(Throwable t) throws IOException {
       // don't send response if the call was already sent or aborted.
       if (responseWaitCount.getAndSet(-1) > 0) {
-        // clone the call to prevent a race with the other thread stomping
-        // on the response while being sent.  the original call is
-        // effectively discarded since the wait count won't hit zero
-        Call call = new Call(this);
-        setupResponse(new ByteArrayOutputStream(), call,
-            RpcStatusProto.FATAL, RpcErrorCodeProto.ERROR_RPC_SERVER,
-            null, t.getClass().getName(), StringUtils.stringifyException(t));
-        call.sendResponse();
+        connection.abortResponse(this, t);
       }
     }
 
@@ -1272,9 +1265,7 @@ public abstract class Server {
         // must only wrap before adding to the responseQueue to prevent
         // postponed responses from being encrypted and sent out of order.
         if (call.connection.useWrap) {
-          ByteArrayOutputStream response = new ByteArrayOutputStream();
-          wrapWithSasl(response, call);
-          call.setResponse(ByteBuffer.wrap(response.toByteArray()));
+          wrapWithSasl(call);
         }
         call.connection.responseQueue.addLast(call);
         if (call.connection.responseQueue.size() == 1) {
@@ -1349,6 +1340,7 @@ public abstract class Server {
    * A WrappedRpcServerException that is suppressed altogether
    * for the purposes of logging.
    */
+  @SuppressWarnings("serial")
   private static class WrappedRpcServerExceptionSuppressed
       extends WrappedRpcServerException {
     public WrappedRpcServerExceptionSuppressed(
@@ -1395,8 +1387,7 @@ public abstract class Server {
     // Fake 'call' for failed authorization response
     private final Call authFailedCall = new Call(AUTHORIZATION_FAILED_CALL_ID,
         RpcConstants.INVALID_RETRY_COUNT, null, this);
-    private ByteArrayOutputStream authFailedResponse = new ByteArrayOutputStream();
-    
+
     private boolean sentNegotiate = false;
     private boolean useWrap = false;
     
@@ -1482,10 +1473,10 @@ public abstract class Server {
       }
     }
 
-    private void saslReadAndProcess(DataInputStream dis) throws
+    private void saslReadAndProcess(RpcWritable.Buffer buffer) throws
     WrappedRpcServerException, IOException, InterruptedException {
       final RpcSaslProto saslMessage =
-          decodeProtobufFromStream(RpcSaslProto.newBuilder(), dis);
+          getMessage(RpcSaslProto.getDefaultInstance(), buffer);
       switch (saslMessage.getState()) {
         case WRAP: {
           if (!saslContextEstablished || !useWrap) {
@@ -1598,7 +1589,10 @@ public abstract class Server {
         String qop = (String) saslServer.getNegotiatedProperty(Sasl.QOP);
         // SASL wrapping is only used if the connection has a QOP, and
         // the value is not auth.  ex. auth-int & auth-priv
-        useWrap = (qop != null && !"auth".equalsIgnoreCase(qop));        
+        useWrap = (qop != null && !"auth".equalsIgnoreCase(qop));
+        if (!useWrap) {
+          disposeSasl();
+        }
       }
     }
     
@@ -1692,9 +1686,9 @@ public abstract class Server {
     private void switchToSimple() {
       // disable SASL and blank out any SASL server
       authProtocol = AuthProtocol.NONE;
-      saslServer = null;
+      disposeSasl();
     }
-    
+
     private RpcSaslProto buildSaslResponse(SaslState state, byte[] replyToken) {
       if (LOG.isDebugEnabled()) {
         LOG.debug("Will send " + state + " token of size "
@@ -1712,15 +1706,14 @@ public abstract class Server {
     private void doSaslReply(Message message) throws IOException {
       final Call saslCall = new Call(AuthProtocol.SASL.callId,
           RpcConstants.INVALID_RETRY_COUNT, null, this);
-      final ByteArrayOutputStream saslResponse = new ByteArrayOutputStream();
-      setupResponse(saslResponse, saslCall,
+      setupResponse(saslCall,
           RpcStatusProto.SUCCESS, null,
-          new RpcResponseWrapper(message), null, null);
+          RpcWritable.wrap(message), null, null);
       saslCall.sendResponse();
     }
 
     private void doSaslReply(Exception ioe) throws IOException {
-      setupResponse(authFailedResponse, authFailedCall,
+      setupResponse(authFailedCall,
           RpcStatusProto.FATAL, RpcErrorCodeProto.FATAL_UNAUTHORIZED,
           null, ioe.getClass().getName(), ioe.getLocalizedMessage());
       authFailedCall.sendResponse();
@@ -1731,6 +1724,8 @@ public abstract class Server {
         try {
           saslServer.dispose();
         } catch (SaslException ignored) {
+        } finally {
+          saslServer = null;
         }
       }
     }
@@ -1839,7 +1834,7 @@ public abstract class Server {
           dataLengthBuffer.clear(); // to read length of future rpc packets
           data.flip();
           boolean isHeaderRead = connectionContextRead;
-          processOneRpc(data.array());
+          processOneRpc(data);
           data = null;
           // the last rpc-request we processed could have simply been the
           // connectionContext; if so continue to read the first RPC.
@@ -1929,7 +1924,7 @@ public abstract class Server {
         // Versions >>9  understand the normal response
         Call fakeCall = new Call(-1, RpcConstants.INVALID_RETRY_COUNT, null,
             this);
-        setupResponse(buffer, fakeCall, 
+        setupResponse(fakeCall,
             RpcStatusProto.FATAL, RpcErrorCodeProto.FATAL_VERSION_MISMATCH,
             null, VersionMismatch.class.getName(), errMsg);
         fakeCall.sendResponse();
@@ -1957,7 +1952,7 @@ public abstract class Server {
     private void setupHttpRequestOnIpcPortResponse() throws IOException {
       Call fakeCall = new Call(0, RpcConstants.INVALID_RETRY_COUNT, null, this);
       fakeCall.setResponse(ByteBuffer.wrap(
-          RECEIVED_HTTP_REQ_RESPONSE.getBytes(Charsets.UTF_8)));
+          RECEIVED_HTTP_REQ_RESPONSE.getBytes(StandardCharsets.UTF_8)));
       fakeCall.sendResponse();
     }
 
@@ -1966,7 +1961,7 @@ public abstract class Server {
      * @throws WrappedRpcServerException - if the header cannot be
      *         deserialized, or the user is not authorized
      */ 
-    private void processConnectionContext(DataInputStream dis)
+    private void processConnectionContext(RpcWritable.Buffer buffer)
         throws WrappedRpcServerException {
       // allow only one connection context during a session
       if (connectionContextRead) {
@@ -1974,13 +1969,12 @@ public abstract class Server {
             RpcErrorCodeProto.FATAL_INVALID_RPC_HEADER,
             "Connection context already processed");
       }
-      connectionContext = decodeProtobufFromStream(
-          IpcConnectionContextProto.newBuilder(), dis);
+      connectionContext = getMessage(IpcConnectionContextProto.getDefaultInstance(), buffer);
       protocolName = connectionContext.hasProtocol() ? connectionContext
           .getProtocol() : null;
 
       UserGroupInformation protocolUser = ProtoUtil.getUgi(connectionContext);
-      if (saslServer == null) {
+      if (authProtocol == AuthProtocol.NONE) {
         user = protocolUser;
       } else {
         // user is authenticated
@@ -2053,7 +2047,7 @@ public abstract class Server {
         if (unwrappedData.remaining() == 0) {
           unwrappedDataLengthBuffer.clear();
           unwrappedData.flip();
-          processOneRpc(unwrappedData.array());
+          processOneRpc(unwrappedData);
           unwrappedData = null;
         }
       }
@@ -2077,36 +2071,35 @@ public abstract class Server {
      *         client in this method and does not require verbose logging by the
      *         Listener thread
      * @throws InterruptedException
-     */    
-    private void processOneRpc(byte[] buf)
+     */
+    private void processOneRpc(ByteBuffer bb)
         throws IOException, WrappedRpcServerException, InterruptedException {
       int callId = -1;
       int retry = RpcConstants.INVALID_RETRY_COUNT;
       try {
-        final DataInputStream dis =
-            new DataInputStream(new ByteArrayInputStream(buf));
+        final RpcWritable.Buffer buffer = RpcWritable.Buffer.wrap(bb);
         final RpcRequestHeaderProto header =
-            decodeProtobufFromStream(RpcRequestHeaderProto.newBuilder(), dis);
+            getMessage(RpcRequestHeaderProto.getDefaultInstance(), buffer);
         callId = header.getCallId();
         retry = header.getRetryCount();
         if (LOG.isDebugEnabled()) {
           LOG.debug(" got #" + callId);
         }
         checkRpcHeaders(header);
-        
+
         if (callId < 0) { // callIds typically used during connection setup
-          processRpcOutOfBandRequest(header, dis);
+          processRpcOutOfBandRequest(header, buffer);
         } else if (!connectionContextRead) {
           throw new WrappedRpcServerException(
               RpcErrorCodeProto.FATAL_INVALID_RPC_HEADER,
               "Connection context not established");
         } else {
-          processRpcRequest(header, dis);
+          processRpcRequest(header, buffer);
         }
       } catch (WrappedRpcServerException wrse) { // inform client of error
         Throwable ioe = wrse.getCause();
         final Call call = new Call(callId, retry, null, this);
-        setupResponse(authFailedResponse, call,
+        setupResponse(call,
             RpcStatusProto.FATAL, wrse.getRpcErrorCodeProto(), null,
             ioe.getClass().getName(), ioe.getMessage());
         call.sendResponse();
@@ -2157,7 +2150,7 @@ public abstract class Server {
      * @throws InterruptedException
      */
     private void processRpcRequest(RpcRequestHeaderProto header,
-        DataInputStream dis) throws WrappedRpcServerException,
+        RpcWritable.Buffer buffer) throws WrappedRpcServerException,
         InterruptedException {
       Class<? extends Writable> rpcRequestClass = 
           getRpcRequestWrapper(header.getRpcKind());
@@ -2171,8 +2164,7 @@ public abstract class Server {
       }
       Writable rpcRequest;
       try { //Read the rpc request
-        rpcRequest = ReflectionUtils.newInstance(rpcRequestClass, conf);
-        rpcRequest.readFields(dis);
+        rpcRequest = buffer.newInstance(rpcRequestClass, conf);
       } catch (Throwable t) { // includes runtime exception from newInstance
         LOG.warn("Unable to read call parameters for client " +
                  getHostAddress() + "on connection protocol " +
@@ -2253,8 +2245,8 @@ public abstract class Server {
      * @throws InterruptedException
      */
     private void processRpcOutOfBandRequest(RpcRequestHeaderProto header,
-        DataInputStream dis) throws WrappedRpcServerException, IOException,
-        InterruptedException {
+        RpcWritable.Buffer buffer) throws WrappedRpcServerException,
+            IOException, InterruptedException {
       final int callId = header.getCallId();
       if (callId == CONNECTION_CONTEXT_CALL_ID) {
         // SASL must be established prior to connection context
@@ -2264,7 +2256,7 @@ public abstract class Server {
               "Connection header sent during SASL negotiation");
         }
         // read and authorize the user
-        processConnectionContext(dis);
+        processConnectionContext(buffer);
       } else if (callId == AuthProtocol.SASL.callId) {
         // if client was switched to simple, ignore first SASL message
         if (authProtocol != AuthProtocol.SASL) {
@@ -2272,7 +2264,7 @@ public abstract class Server {
               RpcErrorCodeProto.FATAL_INVALID_RPC_HEADER,
               "SASL protocol not requested by client");
         }
-        saslReadAndProcess(dis);
+        saslReadAndProcess(buffer);
       } else if (callId == PING_CALL_ID) {
         LOG.debug("Received ping message");
       } else {
@@ -2319,13 +2311,12 @@ public abstract class Server {
      * @throws WrappedRpcServerException - deserialization failed
      */
     @SuppressWarnings("unchecked")
-    private <T extends Message> T decodeProtobufFromStream(Builder builder,
-        DataInputStream dis) throws WrappedRpcServerException {
+    <T extends Message> T getMessage(Message message,
+        RpcWritable.Buffer buffer) throws WrappedRpcServerException {
       try {
-        builder.mergeDelimitedFrom(dis);
-        return (T)builder.build();
+        return (T)buffer.getValue(message);
       } catch (Exception ioe) {
-        Class<?> protoClass = builder.getDefaultInstanceForType().getClass();
+        Class<?> protoClass = message.getClass();
         throw new WrappedRpcServerException(
             RpcErrorCodeProto.FATAL_DESERIALIZING_REQUEST,
             "Error decoding " + protoClass.getSimpleName() + ": "+ ioe);
@@ -2336,6 +2327,17 @@ public abstract class Server {
       responder.doRespond(call);
     }
 
+    private void abortResponse(Call call, Throwable t) throws IOException {
+      // clone the call to prevent a race with the other thread stomping
+      // on the response while being sent.  the original call is
+      // effectively discarded since the wait count won't hit zero
+      call = new Call(call);
+      setupResponse(call,
+          RpcStatusProto.FATAL, RpcErrorCodeProto.ERROR_RPC_SERVER,
+          null, t.getClass().getName(), StringUtils.stringifyException(t));
+      call.sendResponse();
+    }
+
     /**
      * Get service class for connection
      * @return the serviceClass
@@ -2379,8 +2381,6 @@ public abstract class Server {
     public void run() {
       LOG.debug(Thread.currentThread().getName() + ": starting");
       SERVER.set(Server.this);
-      ByteArrayOutputStream buf = 
-        new ByteArrayOutputStream(INITIAL_RESP_BUF_SIZE);
       while (running) {
         TraceScope traceScope = null;
         try {
@@ -2450,16 +2450,8 @@ public abstract class Server {
           }
           CurCall.set(null);
           synchronized (call.connection.responseQueue) {
-            setupResponse(buf, call, returnStatus, detailedErr,
+            setupResponse(call, returnStatus, detailedErr,
                 value, errorClass, error);
-
-            // Discard the large buf and reset it back to smaller size
-            // to free up heap.
-            if (buf.size() > maxRespSize) {
-              LOG.warn("Large response size " + buf.size() + " for call "
-                  + call.toString());
-              buf = new ByteArrayOutputStream(INITIAL_RESP_BUF_SIZE);
-            }
             call.sendResponse();
           }
         } catch (InterruptedException e) {
@@ -2677,13 +2669,11 @@ public abstract class Server {
    * @param error error message, if the call failed
    * @throws IOException
    */
-  private static void setupResponse(ByteArrayOutputStream responseBuf,
-                             Call call, RpcStatusProto status, RpcErrorCodeProto erCode,
-                             Writable rv, String errorClass, String error) 
-  throws IOException {
-    responseBuf.reset();
-    DataOutputStream out = new DataOutputStream(responseBuf);
-    RpcResponseHeaderProto.Builder headerBuilder =  
+  private void setupResponse(
+      Call call, RpcStatusProto status, RpcErrorCodeProto erCode,
+      Writable rv, String errorClass, String error)
+          throws IOException {
+    RpcResponseHeaderProto.Builder headerBuilder =
         RpcResponseHeaderProto.newBuilder();
     headerBuilder.setClientId(ByteString.copyFrom(call.clientId));
     headerBuilder.setCallId(call.callId);
@@ -2693,32 +2683,14 @@ public abstract class Server {
 
     if (status == RpcStatusProto.SUCCESS) {
       RpcResponseHeaderProto header = headerBuilder.build();
-      final int headerLen = header.getSerializedSize();
-      int fullLength  = CodedOutputStream.computeRawVarint32Size(headerLen) +
-          headerLen;
       try {
-        if (rv instanceof ProtobufRpcEngine.RpcWrapper) {
-          ProtobufRpcEngine.RpcWrapper resWrapper = 
-              (ProtobufRpcEngine.RpcWrapper) rv;
-          fullLength += resWrapper.getLength();
-          out.writeInt(fullLength);
-          header.writeDelimitedTo(out);
-          rv.write(out);
-        } else { // Have to serialize to buffer to get len
-          final DataOutputBuffer buf = new DataOutputBuffer();
-          rv.write(buf);
-          byte[] data = buf.getData();
-          fullLength += buf.getLength();
-          out.writeInt(fullLength);
-          header.writeDelimitedTo(out);
-          out.write(data, 0, buf.getLength());
-        }
+        setupResponse(call, header, rv);
       } catch (Throwable t) {
         LOG.warn("Error serializing call response for call " + call, t);
         // Call back to same function - this is OK since the
         // buffer is reset at the top, and since status is changed
         // to ERROR it won't infinite loop.
-        setupResponse(responseBuf, call, RpcStatusProto.ERROR,
+        setupResponse(call, RpcStatusProto.ERROR,
             RpcErrorCodeProto.ERROR_SERIALIZING_RESPONSE,
             null, t.getClass().getName(),
             StringUtils.stringifyException(t));
@@ -2728,16 +2700,30 @@ public abstract class Server {
       headerBuilder.setExceptionClassName(errorClass);
       headerBuilder.setErrorMsg(error);
       headerBuilder.setErrorDetail(erCode);
-      RpcResponseHeaderProto header = headerBuilder.build();
-      int headerLen = header.getSerializedSize();
-      final int fullLength  = 
-          CodedOutputStream.computeRawVarint32Size(headerLen) + headerLen;
-      out.writeInt(fullLength);
-      header.writeDelimitedTo(out);
+      setupResponse(call, headerBuilder.build(), null);
     }
-    call.setResponse(ByteBuffer.wrap(responseBuf.toByteArray()));
   }
-  
+
+  private void setupResponse(Call call,
+      RpcResponseHeaderProto header, Writable rv) throws IOException {
+    ResponseBuffer buf = responseBuffer.get().reset();
+    try {
+      RpcWritable.wrap(header).writeTo(buf);
+      if (rv != null) {
+        RpcWritable.wrap(rv).writeTo(buf);
+      }
+      call.setResponse(ByteBuffer.wrap(buf.toByteArray()));
+    } finally {
+      // Discard a large buf and reset it back to smaller size
+      // to free up heap.
+      if (buf.capacity() > maxRespSize) {
+        LOG.warn("Large response size " + buf.size() + " for call "
+            + call.toString());
+        buf.setCapacity(INITIAL_RESP_BUF_SIZE);
+      }
+    }
+  }
+
   /**
    * Setup response for the IPC Call on Fatal Error from a 
    * client that is using old version of Hadoop.
@@ -2764,10 +2750,8 @@ public abstract class Server {
     WritableUtils.writeString(out, error);
     call.setResponse(ByteBuffer.wrap(response.toByteArray()));
   }
-  
-  
-  private static void wrapWithSasl(ByteArrayOutputStream response, Call call)
-      throws IOException {
+
+  private void wrapWithSasl(Call call) throws IOException {
     if (call.connection.saslServer != null) {
       byte[] token = call.rpcResponse.array();
       // synchronization may be needed since there can be multiple Handler
@@ -2778,7 +2762,6 @@ public abstract class Server {
       if (LOG.isDebugEnabled())
         LOG.debug("Adding saslServer wrapped token of size " + token.length
             + " as call response.");
-      response.reset();
       // rebuild with sasl header and payload
       RpcResponseHeaderProto saslHeader = RpcResponseHeaderProto.newBuilder()
           .setCallId(AuthProtocol.SASL.callId)
@@ -2786,14 +2769,9 @@ public abstract class Server {
           .build();
       RpcSaslProto saslMessage = RpcSaslProto.newBuilder()
           .setState(SaslState.WRAP)
-          .setToken(ByteString.copyFrom(token, 0, token.length))
+          .setToken(ByteString.copyFrom(token))
           .build();
-      RpcResponseMessageWrapper saslResponse =
-          new RpcResponseMessageWrapper(saslHeader, saslMessage);
-
-      DataOutputStream out = new DataOutputStream(response);
-      out.writeInt(saslResponse.getLength());
-      saslResponse.write(out);
+      setupResponse(call, saslHeader, RpcWritable.wrap(saslMessage));
     }
   }
   

+ 4 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/package-info.java

@@ -15,6 +15,10 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
+/**
+ * Tools to help define network clients and servers.
+ */
 @InterfaceAudience.LimitedPrivate({"HBase", "HDFS", "MapReduce"})
 @InterfaceStability.Evolving
 package org.apache.hadoop.ipc;

+ 0 - 23
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/package.html

@@ -1,23 +0,0 @@
-<html>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<body>
-Tools to help define network clients and servers.
-</body>
-</html>

Algunos archivos no se mostraron porque demasiados archivos cambiaron en este cambio