Browse Source

Merge branch 'trunk' into HDFS-7240

Anu Engineer 9 năm trước cách đây
mục cha
commit
23923086d1
100 tập tin đã thay đổi với 5803 bổ sung1767 xóa
  1. 1014 3
      LICENSE.txt
  2. 266 0
      NOTICE.txt
  3. 18 0
      dev-support/bin/qbt
  4. 1 1
      dev-support/bin/yetus-wrapper
  5. 21 0
      dev-support/checkstyle/suppressions.xml
  6. 41 0
      hadoop-build-tools/pom.xml
  7. 4 23
      hadoop-common-project/hadoop-auth/pom.xml
  8. 9 9
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosUtil.java
  9. 0 1
      hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestKerberosAuthenticationHandler.java
  10. 14 12
      hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestKerberosUtil.java
  11. 5 4
      hadoop-common-project/hadoop-common/HadoopCommon.cmake
  12. 7 1
      hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
  13. 4 0
      hadoop-common-project/hadoop-common/pom.xml
  14. 27 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderDelegationTokenExtension.java
  15. 61 121
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyShell.java
  16. 148 10
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
  17. 37 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java
  18. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/ValueQueue.java
  19. 3 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
  20. 4 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/InvalidRequestException.java
  21. 94 59
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java
  22. 11 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/PathAccessDeniedException.java
  23. 17 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/PathNotFoundException.java
  24. 15 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/PathPermissionException.java
  25. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/FastByteComparisons.java
  26. 29 84
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecUtil.java
  27. 89 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCoderOptions.java
  28. 12 6
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/HHXORErasureDecoder.java
  29. 10 5
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/HHXORErasureEncoder.java
  30. 5 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureDecoder.java
  31. 5 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureEncoder.java
  32. 5 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/XORErasureDecoder.java
  33. 5 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/XORErasureEncoder.java
  34. 0 220
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractRawErasureCoder.java
  35. 0 181
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractRawErasureDecoder.java
  36. 0 146
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractRawErasureEncoder.java
  37. 111 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/ByteArrayDecodingState.java
  38. 81 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/ByteArrayEncodingState.java
  39. 134 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/ByteBufferDecodingState.java
  40. 98 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/ByteBufferEncodingState.java
  41. 199 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/CoderUtil.java
  42. 55 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/DecodingState.java
  43. 7 9
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/DummyRawDecoder.java
  44. 7 8
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/DummyRawEncoder.java
  45. 6 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/DummyRawErasureCoderFactory.java
  46. 44 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/EncodingState.java
  47. 25 23
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RSRawDecoder.java
  48. 36 30
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RSRawDecoderLegacy.java
  49. 25 20
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RSRawEncoder.java
  50. 50 32
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RSRawEncoderLegacy.java
  51. 5 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RSRawErasureCoderFactory.java
  52. 5 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RSRawErasureCoderFactoryLegacy.java
  53. 0 73
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RawErasureCoder.java
  54. 5 6
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RawErasureCoderFactory.java
  55. 128 9
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RawErasureDecoder.java
  56. 127 8
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RawErasureEncoder.java
  57. 27 24
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/XORRawDecoder.java
  58. 32 25
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/XORRawEncoder.java
  59. 5 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/XORRawErasureCoderFactory.java
  60. 38 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/package-info.java
  61. 0 83
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/util/CoderUtil.java
  62. 2 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/util/GaloisField.java
  63. 321 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/AsyncCallHandler.java
  64. 75 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/CallReturn.java
  65. 104 30
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java
  66. 3 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
  67. 60 64
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
  68. 80 50
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java
  69. 32 33
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
  70. 256 29
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java
  71. 1 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java
  72. 9 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/DefaultMetricsSystem.java
  73. 291 62
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/RollingFileSystemSink.java
  74. 22 12
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/KDiag.java
  75. 12 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java
  76. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedIdMapping.java
  77. 84 119
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialShell.java
  78. 23 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/DtFileOperations.java
  79. 42 5
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/DtUtilShell.java
  80. 7 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java
  81. 19 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.java
  82. 2 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tools/CommandShell.java
  83. 15 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/TraceAdmin.java
  84. 108 51
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/HostsFileReader.java
  85. 76 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/KMSUtil.java
  86. 5 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeCrc32.java
  87. 65 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/AsyncGet.java
  88. 73 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/AsyncGetFuture.java
  89. 14 0
      hadoop-common-project/hadoop-common/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer
  90. 107 2
      hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
  91. 26 6
      hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md
  92. 1 0
      hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
  93. 13 2
      hadoop-common-project/hadoop-common/src/site/markdown/SingleCluster.md.vm
  94. 9 0
      hadoop-common-project/hadoop-common/src/site/markdown/Tracing.md
  95. 6 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
  96. 94 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java
  97. 70 3
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextCreateMkdirBaseTest.java
  98. 420 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java
  99. 17 12
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestCodecRawCoderMapping.java
  100. 5 9
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestCoderBase.java

+ 1014 - 3
LICENSE.txt

@@ -320,7 +320,9 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-For com.google.re2j.* classes:
+The binary distribution of this product bundles these dependencies under the
+following license:
+re2j 1.0
 ---------------------------------------------------------------------
 This is a work derived from Russ Cox's RE2 in Go, whose license
 http://golang.org/LICENSE is as follows:
@@ -548,12 +550,14 @@ hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.0.2
 hadoop-tools/hadoop-sls/src/main/html/js/thirdparty/bootstrap.min.js
 hadoop-tools/hadoop-sls/src/main/html/css/bootstrap.min.css
 hadoop-tools/hadoop-sls/src/main/html/css/bootstrap-responsive.min.css
+And the binary distribution of this product bundles these dependencies under the
+following license:
+Mockito 1.8.5
+SLF4J 1.7.10
 --------------------------------------------------------------------------------
 
 The MIT License (MIT)
 
-Copyright (c) 2011-2016 Twitter, Inc.
-
 Permission is hereby granted, free of charge, to any person obtaining a copy
 of this software and associated documentation files (the "Software"), to deal
 in the Software without restriction, including without limitation the rights
@@ -648,3 +652,1010 @@ hadoop-tools/hadoop-sls/src/main/html/js/thirdparty/d3.v3.js
 
 D3 is available under a 3-clause BSD license. For details, see:
 hadoop-tools/hadoop-sls/src/main/html/js/thirdparty/d3-LICENSE
+
+The binary distribution of this product bundles these dependencies under the
+following license:
+HSQLDB Database 2.0.0
+--------------------------------------------------------------------------------
+"COPYRIGHTS AND LICENSES (based on BSD License)
+
+For work developed by the HSQL Development Group:
+
+Copyright (c) 2001-2016, The HSQL Development Group
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+Redistributions of source code must retain the above copyright notice, this
+list of conditions and the following disclaimer.
+
+Redistributions in binary form must reproduce the above copyright notice,
+this list of conditions and the following disclaimer in the documentation
+and/or other materials provided with the distribution.
+
+Neither the name of the HSQL Development Group nor the names of its
+contributors may be used to endorse or promote products derived from this
+software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ""AS IS""
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL HSQL DEVELOPMENT GROUP, HSQLDB.ORG,
+OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+For work originally developed by the Hypersonic SQL Group:
+
+Copyright (c) 1995-2000 by the Hypersonic SQL Group.
+All rights reserved.
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+Redistributions of source code must retain the above copyright notice, this
+list of conditions and the following disclaimer.
+
+Redistributions in binary form must reproduce the above copyright notice,
+this list of conditions and the following disclaimer in the documentation
+and/or other materials provided with the distribution.
+
+Neither the name of the Hypersonic SQL Group nor the names of its
+contributors may be used to endorse or promote products derived from this
+software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ""AS IS""
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE HYPERSONIC SQL GROUP,
+OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+This software consists of voluntary contributions made by many individuals on behalf of the
+Hypersonic SQL Group."
+
+The binary distribution of this product bundles these dependencies under the
+following license:
+servlet-api 2.5
+jsp-api 2.1
+Streaming API for XML 1.0
+--------------------------------------------------------------------------------
+COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.0
+1. Definitions. 
+
+1.1. Contributor means each individual or entity
+that creates or contributes to the creation of
+Modifications. 
+
+1.2. Contributor Version means the combination of the
+Original Software, prior Modifications used by a Contributor (if any), and the
+Modifications made by that particular Contributor. 
+
+1.3. Covered
+Software means (a) the Original Software, or (b) Modifications, or (c) the
+combination of files containing Original Software with files containing
+Modifications, in each case including portions
+thereof. 
+
+1.4. Executable means the Covered Software in any form other
+than Source Code. 
+
+1.5. Initial Developer means the individual or entity
+that first makes Original Software available under this
+License. 
+
+1.6. Larger Work means a work which combines Covered Software or
+portions thereof with code not governed by the terms of this
+License. 
+
+1.7. License means this document. 
+
+1.8. Licensable means
+having the right to grant, to the maximum extent possible, whether at the time
+of the initial grant or subsequently acquired, any and all of the rights
+conveyed herein. 
+
+1.9. Modifications means the Source Code and Executable
+form of any of the following:
+A. Any file that results from an addition to,
+deletion from or modification of the contents of a file containing Original
+Software or previous Modifications;
+B. Any new file that contains any part of the Original Software
+or previous Modification; or
+C. Any new file that is contributed or otherwise made available
+under the terms of this License. 
+
+1.10. Original Software means the Source Code and Executable form of
+computer software code that is originally released under this License. 
+
+1.11. Patent Claims means any patent claim(s), now owned or
+hereafter acquired, including without limitation, method, process, and apparatus
+claims, in any patent Licensable by grantor. 
+
+1.12. Source Code means (a) the common form of computer software code in which
+modifications are made and (b) associated documentation included in or
+with such code. 
+
+1.13. You (or Your) means an individual or a legal entity exercising rights
+under, and complying with all of the terms of, this License. For legal entities,
+You includes any entity which controls, is controlled by, or is under common control
+with You. For purposes of this definition, control means (a) the power, direct
+or indirect, to cause the direction or management of such entity, whether by
+contract or otherwise, or (b) ownership of more than fifty percent (50%) of the
+outstanding shares or beneficial ownership of such entity. 
+
+2. License Grants.
+
+2.1. The Initial Developer Grant. Conditioned upon Your compliance
+with Section 3.1 below and subject to third party intellectual property claims,
+the Initial Developer hereby grants You a world-wide, royalty-free,
+non-exclusive license: 
+
+(a) under intellectual property rights (other than
+patent or trademark) Licensable by Initial Developer, to use, reproduce, modify,
+display, perform, sublicense and distribute the Original Software (or portions
+thereof), with or without Modifications, and/or as part of a Larger Work;
+and 
+
+(b) under Patent Claims infringed by the making, using or selling of
+Original Software, to make, have made, use, practice, sell, and offer for sale,
+and/or otherwise dispose of the Original Software (or portions
+thereof);
+
+(c) The licenses granted in Sections 2.1(a) and (b) are
+effective on the date Initial Developer first distributes or otherwise makes the
+Original Software available to a third party under the terms of this
+License;
+
+(d) Notwithstanding Section 2.1(b) above, no patent license is
+granted: (1) for code that You delete from the Original Software, or (2) for
+infringements caused by: (i) the modification of the Original Software, or
+(ii) the combination of the Original Software with other software or
+devices. 
+
+2.2. Contributor Grant. Conditioned upon Your compliance with
+Section 3.1 below and subject to third party intellectual property claims, each
+Contributor hereby grants You a world-wide, royalty-free, non-exclusive
+license: 
+
+(a) under intellectual property rights (other than patent or
+trademark) Licensable by Contributor to use, reproduce, modify, display,
+perform, sublicense and distribute the Modifications created by such Contributor
+(or portions thereof), either on an unmodified basis, with other Modifications,
+as Covered Software and/or as part of a Larger Work; and 
+
+(b) under Patent
+Claims infringed by the making, using, or selling of Modifications made by that
+Contributor either alone and/or in combination with its Contributor Version (or
+portions of such combination), to make, use, sell, offer for sale, have made,
+and/or otherwise dispose of: (1) Modifications made by that Contributor (or
+portions thereof); and (2) the combination of Modifications made by that
+Contributor with its Contributor Version (or portions of such
+combination). 
+
+(c) The licenses granted in Sections 2.2(a) and 2.2(b) are
+effective on the date Contributor first distributes or otherwise makes the
+Modifications available to a third party.
+
+(d) Notwithstanding Section 2.2(b)
+above, no patent license is granted: (1) for any code that Contributor has
+deleted from the Contributor Version; (2) for infringements caused by:
+(i) third party modifications of Contributor Version, or (ii) the combination
+of Modifications made by that Contributor with other software (except as part of
+the Contributor Version) or other devices; or (3) under Patent Claims infringed
+by Covered Software in the absence of Modifications made by that
+Contributor. 
+
+3. Distribution Obligations. 
+
+3.1. Availability of Source
+Code. Any Covered Software that You distribute or otherwise make available in
+Executable form must also be made available in Source Code form and that Source
+Code form must be distributed only under the terms of this License. You must
+include a copy of this License with every copy of the Source Code form of the
+Covered Software You distribute or otherwise make available. You must inform
+recipients of any such Covered Software in Executable form as to how they can
+obtain such Covered Software in Source Code form in a reasonable manner on or
+through a medium customarily used for software exchange. 
+
+3.2.
+Modifications. The Modifications that You create or to which You contribute are
+governed by the terms of this License. You represent that You believe Your
+Modifications are Your original creation(s) and/or You have sufficient rights to
+grant the rights conveyed by this License. 
+
+3.3. Required Notices. You must
+include a notice in each of Your Modifications that identifies You as the
+Contributor of the Modification. You may not remove or alter any copyright,
+patent or trademark notices contained within the Covered Software, or any
+notices of licensing or any descriptive text giving attribution to any
+Contributor or the Initial Developer. 
+
+3.4. Application of Additional Terms.
+You may not offer or impose any terms on any Covered Software in Source Code
+form that alters or restricts the applicable version of this License or the
+recipients rights hereunder. You may choose to offer, and to charge a fee for,
+warranty, support, indemnity or liability obligations to one or more recipients
+of Covered Software. However, you may do so only on Your own behalf, and not on
+behalf of the Initial Developer or any Contributor. You must make it absolutely
+clear that any such warranty, support, indemnity or liability obligation is
+offered by You alone, and You hereby agree to indemnify the Initial Developer
+and every Contributor for any liability incurred by the Initial Developer or
+such Contributor as a result of warranty, support, indemnity or liability terms
+You offer.
+
+3.5. Distribution of Executable Versions. You may distribute the
+Executable form of the Covered Software under the terms of this License or under
+the terms of a license of Your choice, which may contain terms different from
+this License, provided that You are in compliance with the terms of this License
+and that the license for the Executable form does not attempt to limit or alter
+the recipients rights in the Source Code form from the rights set forth in this
+License. If You distribute the Covered Software in Executable form under a
+different license, You must make it absolutely clear that any terms which differ
+from this License are offered by You alone, not by the Initial Developer or
+Contributor. You hereby agree to indemnify the Initial Developer and every
+Contributor for any liability incurred by the Initial Developer or such
+Contributor as a result of any such terms You offer. 
+
+3.6. Larger Works. You
+may create a Larger Work by combining Covered Software with other code not
+governed by the terms of this License and distribute the Larger Work as a single
+product. In such a case, You must make sure the requirements of this License are
+fulfilled for the Covered Software. 
+
+4. Versions of the License. 
+
+4.1.
+New Versions. Sun Microsystems, Inc. is the initial license steward and may
+publish revised and/or new versions of this License from time to time. Each
+version will be given a distinguishing version number. Except as provided in
+Section 4.3, no one other than the license steward has the right to modify this
+License. 
+
+4.2. Effect of New Versions. You may always continue to use,
+distribute or otherwise make the Covered Software available under the terms of
+the version of the License under which You originally received the Covered
+Software. If the Initial Developer includes a notice in the Original Software
+prohibiting it from being distributed or otherwise made available under any
+subsequent version of the License, You must distribute and make the Covered
+Software available under the terms of the version of the License under which You
+originally received the Covered Software. Otherwise, You may also choose to use,
+distribute or otherwise make the Covered Software available under the terms of
+any subsequent version of the License published by the license
+steward. 
+
+4.3. Modified Versions. When You are an Initial Developer and You
+want to create a new license for Your Original Software, You may create and use
+a modified version of this License if You: (a) rename the license and remove
+any references to the name of the license steward (except to note that the
+license differs from this License); and (b) otherwise make it clear that the
+license contains terms which differ from this License. 
+
+5. DISCLAIMER OF WARRANTY.
+
+COVERED SOFTWARE IS PROVIDED UNDER THIS LICENSE ON AN AS IS BASIS,
+WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, WITHOUT
+LIMITATION, WARRANTIES THAT THE COVERED SOFTWARE IS FREE OF DEFECTS,
+MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR NON-INFRINGING. THE ENTIRE RISK AS
+TO THE QUALITY AND PERFORMANCE OF THE COVERED SOFTWARE IS WITH YOU. SHOULD ANY
+COVERED SOFTWARE PROVE DEFECTIVE IN ANY RESPECT, YOU (NOT THE INITIAL DEVELOPER
+OR ANY OTHER CONTRIBUTOR) ASSUME THE COST OF ANY NECESSARY SERVICING, REPAIR OR
+CORRECTION. THIS DISCLAIMER OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS
+LICENSE. NO USE OF ANY COVERED SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER
+THIS DISCLAIMER. 
+
+6. TERMINATION. 
+
+6.1. This License and the rights
+granted hereunder will terminate automatically if You fail to comply with terms
+herein and fail to cure such breach within 30 days of becoming aware of the
+breach. Provisions which, by their nature, must remain in effect beyond the
+termination of this License shall survive. 
+
+6.2. If You assert a patent
+infringement claim (excluding declaratory judgment actions) against Initial
+Developer or a Contributor (the Initial Developer or Contributor against whom
+You assert such claim is referred to as Participant) alleging that the
+Participant Software (meaning the Contributor Version where the Participant is a
+Contributor or the Original Software where the Participant is the Initial
+Developer) directly or indirectly infringes any patent, then any and all rights
+granted directly or indirectly to You by such Participant, the Initial Developer
+(if the Initial Developer is not the Participant) and all Contributors under
+Sections 2.1 and/or 2.2 of this License shall, upon 60 days notice from
+Participant terminate prospectively and automatically at the expiration of such
+60 day notice period, unless if within such 60 day period You withdraw Your
+claim with respect to the Participant Software against such Participant either
+unilaterally or pursuant to a written agreement with Participant. 
+
+6.3. In
+the event of termination under Sections 6.1 or 6.2 above, all end user licenses
+that have been validly granted by You or any distributor hereunder prior to
+termination (excluding licenses granted to You by any distributor) shall survive
+termination. 
+
+7. LIMITATION OF LIABILITY.
+UNDER NO CIRCUMSTANCES AND UNDER
+NO LEGAL THEORY, WHETHER TORT (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE,
+SHALL YOU, THE INITIAL DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF
+COVERED SOFTWARE, OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE TO ANY
+PERSON FOR ANY INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES OF ANY
+CHARACTER INCLUDING, WITHOUT LIMITATION, DAMAGES FOR LOST PROFITS, LOSS OF
+GOODWILL, WORK STOPPAGE, COMPUTER FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER
+COMMERCIAL DAMAGES OR LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN INFORMED OF THE
+POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF LIABILITY SHALL NOT APPLY TO
+LIABILITY FOR DEATH OR PERSONAL INJURY RESULTING FROM SUCH PARTYS NEGLIGENCE TO
+THE EXTENT APPLICABLE LAW PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO NOT
+ALLOW THE EXCLUSION OR LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO
+THIS EXCLUSION AND LIMITATION MAY NOT APPLY TO YOU. 
+
+8. U.S. GOVERNMENT END USERS.
+
+The Covered Software is a commercial item, as that term is defined in
+48 C.F.R. 2.101 (Oct. 1995), consisting of commercial computer software (as
+that term is defined at 48 C.F.R.  252.227-7014(a)(1)) and commercial computer
+software documentation as such terms are used in 48 C.F.R. 12.212 (Sept.
+1995). Consistent with 48 C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through
+227.7202-4 (June 1995), all U.S. Government End Users acquire Covered Software
+with only those rights set forth herein. This U.S. Government Rights clause is
+in lieu of, and supersedes, any other FAR, DFAR, or other clause or provision
+that addresses Government rights in computer software under this
+License. 
+
+9. MISCELLANEOUS.
+This License represents the complete agreement
+concerning subject matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent necessary to
+make it enforceable. This License shall be governed by the law of the
+jurisdiction specified in a notice contained within the Original Software
+(except to the extent applicable law, if any, provides otherwise), excluding
+such jurisdictions conflict-of-law provisions. Any litigation relating to this
+License shall be subject to the jurisdiction of the courts located in the
+jurisdiction and venue specified in a notice contained within the Original
+Software, with the losing party responsible for costs, including, without
+limitation, court costs and reasonable attorneys fees and expenses. The
+application of the United Nations Convention on Contracts for the International
+Sale of Goods is expressly excluded. Any law or regulation which provides that
+the language of a contract shall be construed against the drafter shall not
+apply to this License. You agree that You alone are responsible for compliance
+with the United States export administration regulations (and the export control
+laws and regulation of any other countries) when You use, distribute or
+otherwise make available any Covered Software. 
+
+10. RESPONSIBILITY FOR CLAIMS.
+As between Initial Developer and the Contributors, each party is
+responsible for claims and damages arising, directly or indirectly, out of its
+utilization of rights under this License and You agree to work with Initial
+Developer and Contributors to distribute such responsibility on an equitable
+basis. Nothing herein is intended or shall be deemed to constitute any admission
+of liability. 
+
+The binary distribution of this product bundles these dependencies under the
+following license:
+Jersey 1.9
+JAXB API bundle for GlassFish V3 2.2.2
+JAXB RI 2.2.3
+--------------------------------------------------------------------------------
+COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL)Version 1.1
+
+1. Definitions.
+
+1.1. “Contributor” means each individual or entity that creates or
+contributes to the creation of Modifications.
+1.2. “Contributor Version” means the combination of the Original Software,
+prior Modifications used by a Contributor (if any), and the Modifications made
+by that particular Contributor.
+1.3. “Covered Software” means (a) the Original Software, or (b)
+Modifications, or (c) the combination of files containing Original Software with
+files containing Modifications, in each case including portions thereof.
+1.4. “Executable” means the Covered Software in any form other than Source
+Code.
+1.5. “Initial Developer” means the individual or entity that first makes
+Original Software available under this License.
+1.6. “Larger Work” means a work which combines Covered Software or portions
+thereof with code not governed by the terms of this License.
+1.7. “License” means this document.
+1.8. “Licensable” means having the right to grant, to the maximum extent
+possible, whether at the time of the initial grant or subsequently acquired, any
+and all of the rights conveyed herein.
+1.9. “Modifications” means the Source Code and Executable form of any of the
+following:
+A. Any file that results from an addition to, deletion from or modification of
+the contents of a file containing Original Software or previous Modifications;
+B. Any new file that contains any part of the Original Software or previous
+Modification; or
+C. Any new file that is contributed or otherwise made available under the terms
+of this License.
+1.10. “Original Software” means the Source Code and Executable form of
+computer software code that is originally released under this License.
+1.11. “Patent Claims” means any patent claim(s), now owned or hereafter
+acquired, including without limitation, method, process, and apparatus claims,
+in any patent Licensable by grantor.
+1.12. “Source Code” means (a) the common form of computer software code in
+which modifications are made and (b) associated documentation included in or
+with such code.
+1.13. “You” (or “Your”) means an individual or a legal entity exercising
+rights under, and complying with all of the terms of, this License. For legal
+entities, “You” includes any entity which controls, is controlled by, or is
+under common control with You. For purposes of this definition, “control”
+means (a) the power, direct or indirect, to cause the direction or management of
+such entity, whether by contract or otherwise, or (b) ownership of more than
+fifty percent (50%) of the outstanding shares or beneficial ownership of such
+entity.
+
+2. License Grants.
+
+2.1. The Initial Developer Grant.
+
+Conditioned upon Your compliance with Section 3.1 below and subject to
+third party intellectual property claims, the Initial Developer hereby grants
+You a world-wide, royalty-free, non-exclusive license:
+(a) under intellectual
+property rights (other than patent or trademark) Licensable by Initial
+Developer, to use, reproduce, modify, display, perform, sublicense and
+distribute the Original Software (or portions thereof), with or without
+Modifications, and/or as part of a Larger Work; and
+(b) under Patent Claims
+infringed by the making, using or selling of Original Software, to make, have
+made, use, practice, sell, and offer for sale, and/or otherwise dispose of the
+Original Software (or portions thereof).
+(c) The licenses granted in Sections
+2.1(a) and (b) are effective on the date Initial Developer first distributes or
+otherwise makes the Original Software available to a third party under the terms
+of this License.
+(d) Notwithstanding Section 2.1(b) above, no patent license is
+granted: (1) for code that You delete from the Original Software, or (2) for
+infringements caused by: (i) the modification of the Original Software, or (ii)
+the combination of the Original Software with other software or devices.
+
+2.2. Contributor Grant.
+
+Conditioned upon Your compliance with Section 3.1 below and
+subject to third party intellectual property claims, each Contributor hereby
+grants You a world-wide, royalty-free, non-exclusive license:
+(a) under
+intellectual property rights (other than patent or trademark) Licensable by
+Contributor to use, reproduce, modify, display, perform, sublicense and
+distribute the Modifications created by such Contributor (or portions thereof),
+either on an unmodified basis, with other Modifications, as Covered Software
+and/or as part of a Larger Work; and
+(b) under Patent Claims infringed by the
+making, using, or selling of Modifications made by that Contributor either alone
+and/or in combination with its Contributor Version (or portions of such
+combination), to make, use, sell, offer for sale, have made, and/or otherwise
+dispose of: (1) Modifications made by that Contributor (or portions thereof);
+and (2) the combination of Modifications made by that Contributor with its
+Contributor Version (or portions of such combination).
+(c) The licenses granted
+in Sections 2.2(a) and 2.2(b) are effective on the date Contributor first
+distributes or otherwise makes the Modifications available to a third
+party.
+(d) Notwithstanding Section 2.2(b) above, no patent license is granted:
+(1) for any code that Contributor has deleted from the Contributor Version; (2)
+for infringements caused by: (i) third party modifications of Contributor
+Version, or (ii) the combination of Modifications made by that Contributor with
+other software (except as part of the Contributor Version) or other devices; or
+(3) under Patent Claims infringed by Covered Software in the absence of
+Modifications made by that Contributor.
+
+3. Distribution Obligations.
+
+3.1. Availability of Source Code.
+Any Covered Software that You distribute or
+otherwise make available in Executable form must also be made available in
+Source Code form and that Source Code form must be distributed only under the
+terms of this License. You must include a copy of this License with every copy
+of the Source Code form of the Covered Software You distribute or otherwise make
+available. You must inform recipients of any such Covered Software in Executable
+form as to how they can obtain such Covered Software in Source Code form in a
+reasonable manner on or through a medium customarily used for software
+exchange.
+3.2. Modifications.
+The Modifications that You create or to which
+You contribute are governed by the terms of this License. You represent that You
+believe Your Modifications are Your original creation(s) and/or You have
+sufficient rights to grant the rights conveyed by this License.
+3.3. Required Notices.
+You must include a notice in each of Your Modifications that
+identifies You as the Contributor of the Modification. You may not remove or
+alter any copyright, patent or trademark notices contained within the Covered
+Software, or any notices of licensing or any descriptive text giving attribution
+to any Contributor or the Initial Developer.
+3.4. Application of Additional Terms.
+You may not offer or impose any terms on any Covered Software in Source
+Code form that alters or restricts the applicable version of this License or the
+recipients' rights hereunder. You may choose to offer, and to charge a fee for,
+warranty, support, indemnity or liability obligations to one or more recipients
+of Covered Software. However, you may do so only on Your own behalf, and not on
+behalf of the Initial Developer or any Contributor. You must make it absolutely
+clear that any such warranty, support, indemnity or liability obligation is
+offered by You alone, and You hereby agree to indemnify the Initial Developer
+and every Contributor for any liability incurred by the Initial Developer or
+such Contributor as a result of warranty, support, indemnity or liability terms
+You offer.
+3.5. Distribution of Executable Versions.
+You may distribute the
+Executable form of the Covered Software under the terms of this License or under
+the terms of a license of Your choice, which may contain terms different from
+this License, provided that You are in compliance with the terms of this License
+and that the license for the Executable form does not attempt to limit or alter
+the recipient's rights in the Source Code form from the rights set forth in
+this License. If You distribute the Covered Software in Executable form under a
+different license, You must make it absolutely clear that any terms which differ
+from this License are offered by You alone, not by the Initial Developer or
+Contributor. You hereby agree to indemnify the Initial Developer and every
+Contributor for any liability incurred by the Initial Developer or such
+Contributor as a result of any such terms You offer.
+3.6. Larger Works.
+You
+may create a Larger Work by combining Covered Software with other code not
+governed by the terms of this License and distribute the Larger Work as a single
+product. In such a case, You must make sure the requirements of this License are
+fulfilled for the Covered Software.
+
+4. Versions of the License.
+
+4.1. New Versions.
+Oracle is the initial license steward and may publish revised and/or
+new versions of this License from time to time. Each version will be given a
+distinguishing version number. Except as provided in Section 4.3, no one other
+than the license steward has the right to modify this License.
+4.2. Effect of New Versions.
+You may always continue to use, distribute or otherwise make the
+Covered Software available under the terms of the version of the License under
+which You originally received the Covered Software. If the Initial Developer
+includes a notice in the Original Software prohibiting it from being distributed
+or otherwise made available under any subsequent version of the License, You
+must distribute and make the Covered Software available under the terms of the
+version of the License under which You originally received the Covered Software.
+Otherwise, You may also choose to use, distribute or otherwise make the Covered
+Software available under the terms of any subsequent version of the License
+published by the license steward.
+4.3. Modified Versions.
+When You are an
+Initial Developer and You want to create a new license for Your Original
+Software, You may create and use a modified version of this License if You: (a)
+rename the license and remove any references to the name of the license steward
+(except to note that the license differs from this License); and (b) otherwise
+make it clear that the license contains terms which differ from this
+License.
+
+5. DISCLAIMER OF WARRANTY.
+
+COVERED SOFTWARE IS PROVIDED UNDER THIS
+LICENSE ON AN “AS IS” BASIS, WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
+OR IMPLIED, INCLUDING, WITHOUT LIMITATION, WARRANTIES THAT THE COVERED SOFTWARE
+IS FREE OF DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR
+NON-INFRINGING. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE COVERED
+SOFTWARE IS WITH YOU. SHOULD ANY COVERED SOFTWARE PROVE DEFECTIVE IN ANY
+RESPECT, YOU (NOT THE INITIAL DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME THE
+COST OF ANY NECESSARY SERVICING, REPAIR OR CORRECTION. THIS DISCLAIMER OF
+WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO USE OF ANY COVERED
+SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER.
+
+6. TERMINATION.
+
+6.1. This License and the rights granted hereunder will
+terminate automatically if You fail to comply with terms herein and fail to cure
+such breach within 30 days of becoming aware of the breach. Provisions which, by
+their nature, must remain in effect beyond the termination of this License shall
+survive.
+6.2. If You assert a patent infringement claim (excluding declaratory
+judgment actions) against Initial Developer or a Contributor (the Initial
+Developer or Contributor against whom You assert such claim is referred to as
+“Participant”) alleging that the Participant Software (meaning the
+Contributor Version where the Participant is a Contributor or the Original
+Software where the Participant is the Initial Developer) directly or indirectly
+infringes any patent, then any and all rights granted directly or indirectly to
+You by such Participant, the Initial Developer (if the Initial Developer is not
+the Participant) and all Contributors under Sections 2.1 and/or 2.2 of this
+License shall, upon 60 days notice from Participant terminate prospectively and
+automatically at the expiration of such 60 day notice period, unless if within
+such 60 day period You withdraw Your claim with respect to the Participant
+Software against such Participant either unilaterally or pursuant to a written
+agreement with Participant.
+6.3. If You assert a patent infringement claim
+against Participant alleging that the Participant Software directly or
+indirectly infringes any patent where such claim is resolved (such as by license
+or settlement) prior to the initiation of patent infringement litigation, then
+the reasonable value of the licenses granted by such Participant under Sections
+2.1 or 2.2 shall be taken into account in determining the amount or value of any
+payment or license.
+6.4. In the event of termination under Sections 6.1 or 6.2
+above, all end user licenses that have been validly granted by You or any
+distributor hereunder prior to termination (excluding licenses granted to You by
+any distributor) shall survive termination.
+
+7. LIMITATION OF LIABILITY.
+
+UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT
+(INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE INITIAL
+DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF COVERED SOFTWARE, OR ANY
+SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE TO ANY PERSON FOR ANY INDIRECT,
+SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES OF ANY CHARACTER INCLUDING,
+WITHOUT LIMITATION, DAMAGES FOR LOSS OF GOODWILL, WORK STOPPAGE, COMPUTER
+FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER COMMERCIAL DAMAGES OR LOSSES, EVEN
+IF SUCH PARTY SHALL HAVE BEEN INFORMED OF THE POSSIBILITY OF SUCH DAMAGES. THIS
+LIMITATION OF LIABILITY SHALL NOT APPLY TO LIABILITY FOR DEATH OR PERSONAL
+INJURY RESULTING FROM SUCH PARTY'S NEGLIGENCE TO THE EXTENT APPLICABLE LAW
+PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OR
+LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO THIS EXCLUSION AND
+LIMITATION MAY NOT APPLY TO YOU.
+
+8. U.S. GOVERNMENT END USERS.
+
+The Covered
+Software is a “commercial item,” as that term is defined in 48 C.F.R. 2.101
+(Oct. 1995), consisting of “commercial computer software” (as that term is
+defined at 48 C.F.R. § 252.227-7014(a)(1)) and “commercial computer software
+documentation” as such terms are used in 48 C.F.R. 12.212 (Sept. 1995).
+Consistent with 48 C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4
+(June 1995), all U.S. Government End Users acquire Covered Software with only
+those rights set forth herein. This U.S. Government Rights clause is in lieu of,
+and supersedes, any other FAR, DFAR, or other clause or provision that addresses
+Government rights in computer software under this License.
+
+9. MISCELLANEOUS.
+
+This License represents the complete agreement concerning
+subject matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent necessary to
+make it enforceable. This License shall be governed by the law of the
+jurisdiction specified in a notice contained within the Original Software
+(except to the extent applicable law, if any, provides otherwise), excluding
+such jurisdiction's conflict-of-law provisions. Any litigation relating to this
+License shall be subject to the jurisdiction of the courts located in the
+jurisdiction and venue specified in a notice contained within the Original
+Software, with the losing party responsible for costs, including, without
+limitation, court costs and reasonable attorneys' fees and expenses. The
+application of the United Nations Convention on Contracts for the International
+Sale of Goods is expressly excluded. Any law or regulation which provides that
+the language of a contract shall be construed against the drafter shall not
+apply to this License. You agree that You alone are responsible for compliance
+with the United States export administration regulations (and the export control
+laws and regulation of any other countries) when You use, distribute or
+otherwise make available any Covered Software.
+
+10. RESPONSIBILITY FOR CLAIMS.
+
+As between Initial Developer and the Contributors, each party is
+responsible for claims and damages arising, directly or indirectly, out of its
+utilization of rights under this License and You agree to work with Initial
+Developer and Contributors to distribute such responsibility on an equitable
+basis. Nothing herein is intended or shall be deemed to constitute any admission
+of liability.
+
+The binary distribution of this product bundles these dependencies under the
+following license:
+Protocol Buffer Java API 2.5.0
+--------------------------------------------------------------------------------
+This license applies to all parts of Protocol Buffers except the following:
+
+  - Atomicops support for generic gcc, located in
+    src/google/protobuf/stubs/atomicops_internals_generic_gcc.h.
+    This file is copyrighted by Red Hat Inc.
+
+  - Atomicops support for AIX/POWER, located in
+    src/google/protobuf/stubs/atomicops_internals_power.h.
+    This file is copyrighted by Bloomberg Finance LP.
+
+Copyright 2014, Google Inc.  All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+    * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+    * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+Code generated by the Protocol Buffer compiler is owned by the owner
+of the input file used when generating it.  This code is not
+standalone and requires a support library to be linked with it.  This
+support library is itself covered by the above license.
+
+For:
+XML Commons External Components XML APIs 1.3.04
+--------------------------------------------------------------------------------
+By obtaining, using and/or copying this work, you (the licensee) agree that you
+have read, understood, and will comply with the following terms and conditions.
+
+Permission to copy, modify, and distribute this software and its documentation,
+with or without modification, for any purpose and without fee or royalty is
+hereby granted, provided that you include the following on ALL copies of the
+software and documentation or portions thereof, including modifications:
+- The full text of this NOTICE in a location viewable to users of the
+redistributed or derivative work.
+- Any pre-existing intellectual property disclaimers, notices, or terms and
+conditions. If none exist, the W3C Software Short Notice should be included
+(hypertext is preferred, text is permitted) within the body of any redistributed
+or derivative code.
+- Notice of any changes or modifications to the files, including the date changes
+were made. (We recommend you provide URIs to the location from which the code is
+derived.)
+
+The binary distribution of this product bundles these dependencies under the
+following license:
+JUnit 4.11
+ecj-4.3.1.jar
+--------------------------------------------------------------------------------
+Eclipse Public License - v 1.0
+
+THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS ECLIPSE PUBLIC
+LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION OF THE PROGRAM
+CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT.
+
+1. DEFINITIONS
+
+"Contribution" means:
+
+a) in the case of the initial Contributor, the initial code and documentation
+distributed under this Agreement, and
+b) in the case of each subsequent Contributor:
+i) changes to the Program, and
+ii) additions to the Program;
+where such changes and/or additions to the Program originate from and are
+distributed by that particular Contributor. A Contribution 'originates' from a
+Contributor if it was added to the Program by such Contributor itself or anyone
+acting on such Contributor's behalf. Contributions do not include additions to
+the Program which: (i) are separate modules of software distributed in
+conjunction with the Program under their own license agreement, and (ii) are not
+derivative works of the Program.
+"Contributor" means any person or entity that distributes the Program.
+
+"Licensed Patents" mean patent claims licensable by a Contributor which are
+necessarily infringed by the use or sale of its Contribution alone or when
+combined with the Program.
+
+"Program" means the Contributions distributed in accordance with this Agreement.
+
+"Recipient" means anyone who receives the Program under this Agreement,
+including all Contributors.
+
+2. GRANT OF RIGHTS
+
+a) Subject to the terms of this Agreement, each Contributor hereby grants
+Recipient a non-exclusive, worldwide, royalty-free copyright license to
+reproduce, prepare derivative works of, publicly display, publicly perform,
+distribute and sublicense the Contribution of such Contributor, if any, and such
+derivative works, in source code and object code form.
+b) Subject to the terms of this Agreement, each Contributor hereby grants
+Recipient a non-exclusive, worldwide, royalty-free patent license under Licensed
+Patents to make, use, sell, offer to sell, import and otherwise transfer the
+Contribution of such Contributor, if any, in source code and object code form.
+This patent license shall apply to the combination of the Contribution and the
+Program if, at the time the Contribution is added by the Contributor, such
+addition of the Contribution causes such combination to be covered by the
+Licensed Patents. The patent license shall not apply to any other combinations
+which include the Contribution. No hardware per se is licensed hereunder.
+c) Recipient understands that although each Contributor grants the licenses to
+its Contributions set forth herein, no assurances are provided by any
+Contributor that the Program does not infringe the patent or other intellectual
+property rights of any other entity. Each Contributor disclaims any liability to
+Recipient for claims brought by any other entity based on infringement of
+intellectual property rights or otherwise. As a condition to exercising the
+rights and licenses granted hereunder, each Recipient hereby assumes sole
+responsibility to secure any other intellectual property rights needed, if any.
+For example, if a third party patent license is required to allow Recipient to
+distribute the Program, it is Recipient's responsibility to acquire that license
+before distributing the Program.
+d) Each Contributor represents that to its knowledge it has sufficient copyright
+rights in its Contribution, if any, to grant the copyright license set forth in
+this Agreement.
+3. REQUIREMENTS
+
+A Contributor may choose to distribute the Program in object code form under its
+own license agreement, provided that:
+
+a) it complies with the terms and conditions of this Agreement; and
+b) its license agreement:
+i) effectively disclaims on behalf of all Contributors all warranties and
+conditions, express and implied, including warranties or conditions of title and
+non-infringement, and implied warranties or conditions of merchantability and
+fitness for a particular purpose;
+ii) effectively excludes on behalf of all Contributors all liability for
+damages, including direct, indirect, special, incidental and consequential
+damages, such as lost profits;
+iii) states that any provisions which differ from this Agreement are offered by
+that Contributor alone and not by any other party; and
+iv) states that source code for the Program is available from such Contributor,
+and informs licensees how to obtain it in a reasonable manner on or through a
+medium customarily used for software exchange.
+When the Program is made available in source code form:
+
+a) it must be made available under this Agreement; and
+b) a copy of this Agreement must be included with each copy of the Program.
+Contributors may not remove or alter any copyright notices contained within the
+Program.
+
+Each Contributor must identify itself as the originator of its Contribution, if
+any, in a manner that reasonably allows subsequent Recipients to identify the
+originator of the Contribution.
+
+4. COMMERCIAL DISTRIBUTION
+
+Commercial distributors of software may accept certain responsibilities with
+respect to end users, business partners and the like. While this license is
+intended to facilitate the commercial use of the Program, the Contributor who
+includes the Program in a commercial product offering should do so in a manner
+which does not create potential liability for other Contributors. Therefore, if
+a Contributor includes the Program in a commercial product offering, such
+Contributor ("Commercial Contributor") hereby agrees to defend and indemnify
+every other Contributor ("Indemnified Contributor") against any losses, damages
+and costs (collectively "Losses") arising from claims, lawsuits and other legal
+actions brought by a third party against the Indemnified Contributor to the
+extent caused by the acts or omissions of such Commercial Contributor in
+connection with its distribution of the Program in a commercial product
+offering. The obligations in this section do not apply to any claims or Losses
+relating to any actual or alleged intellectual property infringement. In order
+to qualify, an Indemnified Contributor must: a) promptly notify the Commercial
+Contributor in writing of such claim, and b) allow the Commercial Contributor to
+control, and cooperate with the Commercial Contributor in, the defense and any
+related settlement negotiations. The Indemnified Contributor may participate in
+any such claim at its own expense.
+
+For example, a Contributor might include the Program in a commercial product
+offering, Product X. That Contributor is then a Commercial Contributor. If that
+Commercial Contributor then makes performance claims, or offers warranties
+related to Product X, those performance claims and warranties are such
+Commercial Contributor's responsibility alone. Under this section, the
+Commercial Contributor would have to defend claims against the other
+Contributors related to those performance claims and warranties, and if a court
+requires any other Contributor to pay any damages as a result, the Commercial
+Contributor must pay those damages.
+
+5. NO WARRANTY
+
+EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, THE PROGRAM IS PROVIDED ON AN
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR
+IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE,
+NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each
+Recipient is solely responsible for determining the appropriateness of using and
+distributing the Program and assumes all risks associated with its exercise of
+rights under this Agreement , including but not limited to the risks and costs
+of program errors, compliance with applicable laws, damage to or loss of data,
+programs or equipment, and unavailability or interruption of operations.
+
+6. DISCLAIMER OF LIABILITY
+
+EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, NEITHER RECIPIENT NOR ANY
+CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION LOST
+PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS
+GRANTED HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
+
+7. GENERAL
+
+If any provision of this Agreement is invalid or unenforceable under applicable
+law, it shall not affect the validity or enforceability of the remainder of the
+terms of this Agreement, and without further action by the parties hereto, such
+provision shall be reformed to the minimum extent necessary to make such
+provision valid and enforceable.
+
+If Recipient institutes patent litigation against any entity (including a
+cross-claim or counterclaim in a lawsuit) alleging that the Program itself
+(excluding combinations of the Program with other software or hardware)
+infringes such Recipient's patent(s), then such Recipient's rights granted under
+Section 2(b) shall terminate as of the date such litigation is filed.
+
+All Recipient's rights under this Agreement shall terminate if it fails to
+comply with any of the material terms or conditions of this Agreement and does
+not cure such failure in a reasonable period of time after becoming aware of
+such noncompliance. If all Recipient's rights under this Agreement terminate,
+Recipient agrees to cease use and distribution of the Program as soon as
+reasonably practicable. However, Recipient's obligations under this Agreement
+and any licenses granted by Recipient relating to the Program shall continue and
+survive.
+
+Everyone is permitted to copy and distribute copies of this Agreement, but in
+order to avoid inconsistency the Agreement is copyrighted and may only be
+modified in the following manner. The Agreement Steward reserves the right to
+publish new versions (including revisions) of this Agreement from time to time.
+No one other than the Agreement Steward has the right to modify this Agreement.
+The Eclipse Foundation is the initial Agreement Steward. The Eclipse Foundation
+may assign the responsibility to serve as the Agreement Steward to a suitable
+separate entity. Each new version of the Agreement will be given a
+distinguishing version number. The Program (including Contributions) may always
+be distributed subject to the version of the Agreement under which it was
+received. In addition, after a new version of the Agreement is published,
+Contributor may elect to distribute the Program (including its Contributions)
+under the new version. Except as expressly stated in Sections 2(a) and 2(b)
+above, Recipient receives no rights or licenses to the intellectual property of
+any Contributor under this Agreement, whether expressly, by implication,
+estoppel or otherwise. All rights in the Program not expressly granted under
+this Agreement are reserved.
+
+This Agreement is governed by the laws of the State of New York and the
+intellectual property laws of the United States of America. No party to this
+Agreement will bring a legal action under this Agreement more than one year
+after the cause of action arose. Each party waives its rights to a jury trial in
+any resulting litigation.
+
+The binary distribution of this product bundles these dependencies under the
+following license:
+ASM Core 3.2
+JSch 0.1.51
+ParaNamer Core 2.3
+JLine 0.9.94
+leveldbjni-all 1.8
+Hamcrest Core 1.3
+xmlenc Library 0.52
+--------------------------------------------------------------------------------
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in the
+      documentation and/or other materials provided with the distribution.
+    * Neither the name of the <organization> nor the
+      names of its contributors may be used to endorse or promote products
+      derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+The binary distribution of this product bundles these dependencies under the
+following license:
+FindBugs-jsr305 3.0.0
+--------------------------------------------------------------------------------
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this
+   list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright notice,
+   this list of conditions and the following disclaimer in the documentation
+   and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+The views and conclusions contained in the software and documentation are those
+of the authors and should not be interpreted as representing official policies,
+either expressed or implied, of the FreeBSD Project.

+ 266 - 0
NOTICE.txt

@@ -15,3 +15,269 @@ which has the following notices:
 * This product includes software developed at
   Progress Software Corporation and/or its  subsidiaries or affiliates.
 * This product includes software developed by IBM Corporation and others.
+
+The binary distribution of this product bundles binaries of
+AWS Java SDK 1.10.6,
+which has the following notices:
+ * This software includes third party software subject to the following
+ copyrights: - XML parsing and utility functions from JetS3t - Copyright
+ 2006-2009 James Murty. - JSON parsing and utility functions from JSON.org -
+ Copyright 2002 JSON.org. - PKCS#1 PEM encoded private key parsing and utility
+ functions from oauth.googlecode.com - Copyright 1998-2010 AOL Inc.
+
+The binary distribution of this product bundles binaries of
+Gson 2.2.4,
+which has the following notices:
+
+                            The Netty Project
+                            =================
+
+Please visit the Netty web site for more information:
+
+  * http://netty.io/
+
+Copyright 2014 The Netty Project
+
+The Netty Project licenses this file to you under the Apache License,
+version 2.0 (the "License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at:
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+License for the specific language governing permissions and limitations
+under the License.
+
+Also, please refer to each LICENSE.<component>.txt file, which is located in
+the 'license' directory of the distribution file, for the license terms of the
+components that this product depends on.
+
+-------------------------------------------------------------------------------
+This product contains the extensions to Java Collections Framework which has
+been derived from the works by JSR-166 EG, Doug Lea, and Jason T. Greene:
+
+  * LICENSE:
+    * license/LICENSE.jsr166y.txt (Public Domain)
+  * HOMEPAGE:
+    * http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/
+    * http://viewvc.jboss.org/cgi-bin/viewvc.cgi/jbosscache/experimental/jsr166/
+
+This product contains a modified version of Robert Harder's Public Domain
+Base64 Encoder and Decoder, which can be obtained at:
+
+  * LICENSE:
+    * license/LICENSE.base64.txt (Public Domain)
+  * HOMEPAGE:
+    * http://iharder.sourceforge.net/current/java/base64/
+
+This product contains a modified portion of 'Webbit', an event based
+WebSocket and HTTP server, which can be obtained at:
+
+  * LICENSE:
+    * license/LICENSE.webbit.txt (BSD License)
+  * HOMEPAGE:
+    * https://github.com/joewalnes/webbit
+
+This product contains a modified portion of 'SLF4J', a simple logging
+facade for Java, which can be obtained at:
+
+  * LICENSE:
+    * license/LICENSE.slf4j.txt (MIT License)
+  * HOMEPAGE:
+    * http://www.slf4j.org/
+
+This product contains a modified portion of 'ArrayDeque', written by Josh
+Bloch of Google, Inc:
+
+  * LICENSE:
+    * license/LICENSE.deque.txt (Public Domain)
+
+This product contains a modified portion of 'Apache Harmony', an open source
+Java SE, which can be obtained at:
+
+  * LICENSE:
+    * license/LICENSE.harmony.txt (Apache License 2.0)
+  * HOMEPAGE:
+    * http://archive.apache.org/dist/harmony/
+
+This product contains a modified version of Roland Kuhn's ASL2
+AbstractNodeQueue, which is based on Dmitriy Vyukov's non-intrusive MPSC queue.
+It can be obtained at:
+
+  * LICENSE:
+    * license/LICENSE.abstractnodequeue.txt (Public Domain)
+  * HOMEPAGE:
+    * https://github.com/akka/akka/blob/wip-2.2.3-for-scala-2.11/akka-actor/src/main/java/akka/dispatch/AbstractNodeQueue.java
+
+This product contains a modified portion of 'jbzip2', a Java bzip2 compression
+and decompression library written by Matthew J. Francis. It can be obtained at:
+
+  * LICENSE:
+    * license/LICENSE.jbzip2.txt (MIT License)
+  * HOMEPAGE:
+    * https://code.google.com/p/jbzip2/
+
+This product contains a modified portion of 'libdivsufsort', a C API library to construct
+the suffix array and the Burrows-Wheeler transformed string for any input string of
+a constant-size alphabet written by Yuta Mori. It can be obtained at:
+
+  * LICENSE:
+    * license/LICENSE.libdivsufsort.txt (MIT License)
+  * HOMEPAGE:
+    * https://code.google.com/p/libdivsufsort/
+
+This product contains a modified portion of Nitsan Wakart's 'JCTools', Java Concurrency Tools for the JVM,
+ which can be obtained at:
+
+  * LICENSE:
+    * license/LICENSE.jctools.txt (ASL2 License)
+  * HOMEPAGE:
+    * https://github.com/JCTools/JCTools
+
+This product optionally depends on 'JZlib', a re-implementation of zlib in
+pure Java, which can be obtained at:
+
+  * LICENSE:
+    * license/LICENSE.jzlib.txt (BSD style License)
+  * HOMEPAGE:
+    * http://www.jcraft.com/jzlib/
+
+This product optionally depends on 'Compress-LZF', a Java library for encoding and
+decoding data in LZF format, written by Tatu Saloranta. It can be obtained at:
+
+  * LICENSE:
+    * license/LICENSE.compress-lzf.txt (Apache License 2.0)
+  * HOMEPAGE:
+    * https://github.com/ning/compress
+
+This product optionally depends on 'lz4', a LZ4 Java compression
+and decompression library written by Adrien Grand. It can be obtained at:
+
+  * LICENSE:
+    * license/LICENSE.lz4.txt (Apache License 2.0)
+  * HOMEPAGE:
+    * https://github.com/jpountz/lz4-java
+
+This product optionally depends on 'lzma-java', a LZMA Java compression
+and decompression library, which can be obtained at:
+
+  * LICENSE:
+    * license/LICENSE.lzma-java.txt (Apache License 2.0)
+  * HOMEPAGE:
+    * https://github.com/jponge/lzma-java
+
+This product contains a modified portion of 'jfastlz', a Java port of FastLZ compression
+and decompression library written by William Kinney. It can be obtained at:
+
+  * LICENSE:
+    * license/LICENSE.jfastlz.txt (MIT License)
+  * HOMEPAGE:
+    * https://code.google.com/p/jfastlz/
+
+This product contains a modified portion of and optionally depends on 'Protocol Buffers', Google's data
+interchange format, which can be obtained at:
+
+  * LICENSE:
+    * license/LICENSE.protobuf.txt (New BSD License)
+  * HOMEPAGE:
+    * http://code.google.com/p/protobuf/
+
+This product optionally depends on 'Bouncy Castle Crypto APIs' to generate
+a temporary self-signed X.509 certificate when the JVM does not provide the
+equivalent functionality.  It can be obtained at:
+
+  * LICENSE:
+    * license/LICENSE.bouncycastle.txt (MIT License)
+  * HOMEPAGE:
+    * http://www.bouncycastle.org/
+
+This product optionally depends on 'Snappy', a compression library produced
+by Google Inc, which can be obtained at:
+
+  * LICENSE:
+    * license/LICENSE.snappy.txt (New BSD License)
+  * HOMEPAGE:
+    * http://code.google.com/p/snappy/
+
+This product optionally depends on 'JBoss Marshalling', an alternative Java
+serialization API, which can be obtained at:
+
+  * LICENSE:
+    * license/LICENSE.jboss-marshalling.txt (GNU LGPL 2.1)
+  * HOMEPAGE:
+    * http://www.jboss.org/jbossmarshalling
+
+This product optionally depends on 'Caliper', Google's micro-
+benchmarking framework, which can be obtained at:
+
+  * LICENSE:
+    * license/LICENSE.caliper.txt (Apache License 2.0)
+  * HOMEPAGE:
+    * http://code.google.com/p/caliper/
+
+This product optionally depends on 'Apache Commons Logging', a logging
+framework, which can be obtained at:
+
+  * LICENSE:
+    * license/LICENSE.commons-logging.txt (Apache License 2.0)
+  * HOMEPAGE:
+    * http://commons.apache.org/logging/
+
+This product optionally depends on 'Apache Log4J', a logging framework, which
+can be obtained at:
+
+  * LICENSE:
+    * license/LICENSE.log4j.txt (Apache License 2.0)
+  * HOMEPAGE:
+    * http://logging.apache.org/log4j/
+
+This product optionally depends on 'Aalto XML', an ultra-high performance
+non-blocking XML processor, which can be obtained at:
+
+  * LICENSE:
+    * license/LICENSE.aalto-xml.txt (Apache License 2.0)
+  * HOMEPAGE:
+    * http://wiki.fasterxml.com/AaltoHome
+
+This product contains a modified version of 'HPACK', a Java implementation of
+the HTTP/2 HPACK algorithm written by Twitter. It can be obtained at:
+
+  * LICENSE:
+    * license/LICENSE.hpack.txt (Apache License 2.0)
+  * HOMEPAGE:
+    * https://github.com/twitter/hpack
+
+This product contains a modified portion of 'Apache Commons Lang', a Java library
+provides utilities for the java.lang API, which can be obtained at:
+
+  * LICENSE:
+    * license/LICENSE.commons-lang.txt (Apache License 2.0)
+  * HOMEPAGE:
+    * https://commons.apache.org/proper/commons-lang/
+
+The binary distribution of this product bundles binaries of
+Commons Codec 1.4,
+which has the following notices:
+ * src/test/org/apache/commons/codec/language/DoubleMetaphoneTest.javacontains test data from http://aspell.net/test/orig/batch0.tab.Copyright (C) 2002 Kevin Atkinson (kevina@gnu.org)
+  ===============================================================================
+  The content of package org.apache.commons.codec.language.bm has been translated
+  from the original php source code available at http://stevemorse.org/phoneticinfo.htm
+  with permission from the original authors.
+  Original source copyright:Copyright (c) 2008 Alexander Beider & Stephen P. Morse.
+
+The binary distribution of this product bundles binaries of
+Commons Lang 2.6,
+which has the following notices:
+ * This product includes software from the Spring Framework,under the Apache License 2.0 (see: StringUtils.containsWhitespace())
+
+The binary distribution of this product bundles binaries of
+Apache Log4j 1.2.17,
+which has the following notices:
+ * ResolverUtil.java
+    Copyright 2005-2006 Tim Fennell
+  Dumbster SMTP test server
+    Copyright 2004 Jason Paul Kitchen
+  TypeUtil.java
+    Copyright 2002-2012 Ramnivas Laddad, Juergen Hoeller, Chris Beams

+ 18 - 0
dev-support/bin/qbt

@@ -0,0 +1,18 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+BINDIR=$(cd -P -- "$(dirname -- "${BASH_SOURCE-0}")" >/dev/null && pwd -P)
+exec "${BINDIR}/yetus-wrapper" qbt --project=hadoop --skip-dir=dev-support "$@"

+ 1 - 1
dev-support/bin/yetus-wrapper

@@ -63,7 +63,7 @@ WANTED="$1"
 shift
 ARGV=("$@")
 
-HADOOP_YETUS_VERSION=${HADOOP_YETUS_VERSION:-0.2.0}
+HADOOP_YETUS_VERSION=${HADOOP_YETUS_VERSION:-0.3.0}
 BIN=$(yetus_abs "${BASH_SOURCE-$0}")
 BINDIR=$(dirname "${BIN}")
 

+ 21 - 0
dev-support/checkstyle/suppressions.xml

@@ -0,0 +1,21 @@
+<?xml version="1.0"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+<!DOCTYPE suppressions PUBLIC
+"-//Puppy Crawl//DTD Suppressions 1.1//EN"
+"http://www.puppycrawl.com/dtds/suppressions_1_1.dtd">
+
+<suppressions>
+  <suppress checks="JavadocPackage" files="[\\/]src[\\/]test[\\/].*"/>
+</suppressions>

+ 41 - 0
hadoop-build-tools/pom.xml

@@ -28,4 +28,45 @@
   <properties>
     <failIfNoTests>false</failIfNoTests>
   </properties>
+  <build>
+    <plugins>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-site-plugin</artifactId>
+        <configuration>
+          <skip>true</skip>
+        </configuration>
+      </plugin>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-remote-resources-plugin</artifactId>
+        <executions>
+          <execution>
+            <goals>
+              <goal>bundle</goal>
+            </goals>
+          </execution>
+        </executions>
+        <configuration>
+          <includes>
+            <include>META-INF/LICENSE.txt</include>
+            <include>META-INF/NOTICE.txt</include>
+          </includes>
+        </configuration>
+      </plugin>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-antrun-plugin</artifactId>
+        <executions>
+          <execution>
+            <id>dummy</id>
+            <phase>validate</phase>
+            <goals>
+              <goal>run</goal>
+            </goals>
+          </execution>
+        </executions>
+      </plugin>
+    </plugins>
+  </build>
 </project>

+ 4 - 23
hadoop-common-project/hadoop-auth/pom.xml

@@ -118,29 +118,6 @@
         </exclusion>
       </exclusions>
     </dependency>
-    <dependency>
-      <groupId>org.apache.directory.server</groupId>
-      <artifactId>apacheds-kerberos-codec</artifactId>
-      <scope>compile</scope>
-        <exclusions>
-          <exclusion>
-            <groupId>org.apache.directory.api</groupId>
-            <artifactId>api-asn1-ber</artifactId>
-          </exclusion>
-          <exclusion>
-            <groupId>org.apache.directory.api</groupId>
-            <artifactId>api-i18n</artifactId>
-          </exclusion>
-          <exclusion>
-            <groupId>org.apache.directory.api</groupId>
-            <artifactId>api-ldap-model</artifactId>
-          </exclusion>
-          <exclusion>
-            <groupId>net.sf.ehcache</groupId>
-            <artifactId>ehcache-core</artifactId>
-          </exclusion>
-        </exclusions>
-    </dependency>
     <dependency>
       <groupId>org.apache.zookeeper</groupId>
       <artifactId>zookeeper</artifactId>
@@ -154,6 +131,10 @@
       <artifactId>curator-test</artifactId>
       <scope>test</scope>
     </dependency>
+    <dependency>
+      <groupId>org.apache.kerby</groupId>
+      <artifactId>kerb-simplekdc</artifactId>
+    </dependency>
   </dependencies>
 
   <build>

+ 9 - 9
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosUtil.java

@@ -33,8 +33,8 @@ import java.util.Locale;
 import java.util.Set;
 import java.util.regex.Pattern;
 
-import org.apache.directory.server.kerberos.shared.keytab.Keytab;
-import org.apache.directory.server.kerberos.shared.keytab.KeytabEntry;
+import org.apache.kerby.kerberos.kerb.keytab.Keytab;
+import org.apache.kerby.kerberos.kerb.type.base.PrincipalName;
 import org.ietf.jgss.GSSException;
 import org.ietf.jgss.Oid;
 
@@ -200,14 +200,14 @@ public class KerberosUtil {
    *          If keytab entries cannot be read from the file.
    */
   static final String[] getPrincipalNames(String keytabFileName) throws IOException {
-      Keytab keytab = Keytab.read(new File(keytabFileName));
-      Set<String> principals = new HashSet<String>();
-      List<KeytabEntry> entries = keytab.getEntries();
-      for (KeytabEntry entry: entries){
-        principals.add(entry.getPrincipalName().replace("\\", "/"));
-      }
-      return principals.toArray(new String[0]);
+    Keytab keytab = Keytab.loadKeytab(new File(keytabFileName));
+    Set<String> principals = new HashSet<String>();
+    List<PrincipalName> entries = keytab.getPrincipals();
+    for (PrincipalName entry : entries) {
+      principals.add(entry.getName().replace("\\", "/"));
     }
+    return principals.toArray(new String[0]);
+  }
 
   /**
    * Get all the unique principals from keytabfile which matches a pattern.

+ 0 - 1
hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestKerberosAuthenticationHandler.java

@@ -18,7 +18,6 @@ import org.apache.hadoop.security.authentication.KerberosTestUtils;
 import org.apache.hadoop.security.authentication.client.AuthenticationException;
 import org.apache.hadoop.security.authentication.client.KerberosAuthenticator;
 import org.apache.commons.codec.binary.Base64;
-import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.security.authentication.util.KerberosName;
 import org.apache.hadoop.security.authentication.util.KerberosUtil;
 import org.ietf.jgss.GSSContext;

+ 14 - 12
hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestKerberosUtil.java

@@ -25,11 +25,12 @@ import java.util.List;
 import java.util.Locale;
 import java.util.regex.Pattern;
 
-import org.apache.directory.server.kerberos.shared.keytab.Keytab;
-import org.apache.directory.server.kerberos.shared.keytab.KeytabEntry;
-import org.apache.directory.shared.kerberos.KerberosTime;
-import org.apache.directory.shared.kerberos.codec.types.EncryptionType;
-import org.apache.directory.shared.kerberos.components.EncryptionKey;
+import org.apache.kerby.kerberos.kerb.keytab.Keytab;
+import org.apache.kerby.kerberos.kerb.keytab.KeytabEntry;
+import org.apache.kerby.kerberos.kerb.type.KerberosTime;
+import org.apache.kerby.kerberos.kerb.type.base.EncryptionKey;
+import org.apache.kerby.kerberos.kerb.type.base.EncryptionType;
+import org.apache.kerby.kerberos.kerb.type.base.PrincipalName;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Test;
@@ -96,14 +97,15 @@ public class TestKerberosUtil {
         KerberosUtil.getServicePrincipal(
             service, testHost.toLowerCase(Locale.US)));
   }
-  
+
   @Test
   public void testGetPrincipalNamesMissingKeytab() {
     try {
       KerberosUtil.getPrincipalNames(testKeytab);
       Assert.fail("Exception should have been thrown");
-    } catch (IOException e) {
+    } catch (IllegalArgumentException e) {
       //expects exception
+    } catch (IOException e) {
     }
   }
 
@@ -166,14 +168,14 @@ public class TestKerberosUtil {
       // duplicate principals
       for (int kvno=1; kvno <= 3; kvno++) {
         EncryptionKey key = new EncryptionKey(
-            EncryptionType.UNKNOWN, "samplekey1".getBytes(), kvno);
+            EncryptionType.NONE, "samplekey1".getBytes(), kvno);
         KeytabEntry keytabEntry = new KeytabEntry(
-            principal, 1 , new KerberosTime(), (byte) 1, key);
+            new PrincipalName(principal), new KerberosTime(), (byte) 1, key);
         lstEntries.add(keytabEntry);      
       }
     }
-    Keytab keytab = Keytab.getInstance();
-    keytab.setEntries(lstEntries);
-    keytab.write(new File(testKeytab));
+    Keytab keytab = new Keytab();
+    keytab.addKeytabEntries(lstEntries);
+    keytab.store(new File(testKeytab));
   }
 }

+ 5 - 4
hadoop-common-project/hadoop-common/HadoopCommon.cmake

@@ -48,11 +48,10 @@ macro(hadoop_add_compiler_flags FLAGS)
     set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${FLAGS}")
 endmacro()
 
-# Add flags to all the CMake linker variables
+# Add flags to all the CMake linker variables.
 macro(hadoop_add_linker_flags FLAGS)
     set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${FLAGS}")
     set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} ${FLAGS}")
-    set(CMAKE_STATIC_LINKER_FLAGS "${CMAKE_STATIC_LINKER_FLAGS} ${FLAGS}")
 endmacro()
 
 # Compile a library with both shared and static variants.
@@ -188,7 +187,9 @@ if(CMAKE_SYSTEM_NAME STREQUAL "Linux")
 #
 elseif(CMAKE_SYSTEM_NAME STREQUAL "SunOS")
     # Solaris flags. 64-bit compilation is mandatory, and is checked earlier.
-    hadoop_add_compiler_flags("-m64 -D__EXTENSIONS__ -D_POSIX_PTHREAD_SEMANTICS -D_XOPEN_SOURCE=500")
+    hadoop_add_compiler_flags("-m64 -D_POSIX_C_SOURCE=200112L -D__EXTENSIONS__ -D_POSIX_PTHREAD_SEMANTICS")
+    set(CMAKE_C_FLAGS "-std=gnu99 ${CMAKE_C_FLAGS}")
+    set(CMAKE_CXX_FLAGS "-std=gnu++98 ${CMAKE_CXX_FLAGS}")
     hadoop_add_linker_flags("-m64")
 
     # CMAKE_SYSTEM_PROCESSOR is set to the output of 'uname -p', which on Solaris is
@@ -199,7 +200,7 @@ elseif(CMAKE_SYSTEM_NAME STREQUAL "SunOS")
         set(CMAKE_SYSTEM_PROCESSOR "amd64")
         set(CMAKE_LIBRARY_ARCHITECTURE "amd64")
     elseif(CMAKE_SYSTEM_PROCESSOR STREQUAL "sparc")
-        set(CMAKE_SYSTEM_PROCESSOR STREQUAL "sparcv9")
+        set(CMAKE_SYSTEM_PROCESSOR "sparcv9")
         set(CMAKE_LIBRARY_ARCHITECTURE "sparcv9")
     else()
         message(FATAL_ERROR "Unrecognised CMAKE_SYSTEM_PROCESSOR ${CMAKE_SYSTEM_PROCESSOR}")

+ 7 - 1
hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml

@@ -345,7 +345,13 @@
        <Bug pattern="SF_SWITCH_FALLTHROUGH" />
      </Match>
 
-     <!-- Synchronization performed on util.concurrent instance. -->
+     <!-- WA_NOT_IN_LOOP is invalid in util.concurrent.AsyncGet$Util.wait. -->
+     <Match>
+       <Class name="org.apache.hadoop.util.concurrent.AsyncGet$Util" />
+       <Method name="wait" />
+       <Bug pattern="WA_NOT_IN_LOOP" />
+     </Match>
+
      <Match>
        <Class name="org.apache.hadoop.service.AbstractService" />
        <Method name="stop" />

+ 4 - 0
hadoop-common-project/hadoop-common/pom.xml

@@ -295,6 +295,10 @@
       <artifactId>bcprov-jdk16</artifactId>
       <scope>test</scope>
     </dependency>
+    <dependency>
+      <groupId>org.apache.kerby</groupId>
+      <artifactId>kerb-simplekdc</artifactId>
+    </dependency>
   </dependencies>
 
   <build>

+ 27 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderDelegationTokenExtension.java

@@ -34,7 +34,7 @@ public class KeyProviderDelegationTokenExtension extends
       new DefaultDelegationTokenExtension();
 
   /**
-   * DelegationTokenExtension is a type of Extension that exposes methods to 
+   * DelegationTokenExtension is a type of Extension that exposes methods
    * needed to work with Delegation Tokens.
    */  
   public interface DelegationTokenExtension extends 
@@ -49,8 +49,23 @@ public class KeyProviderDelegationTokenExtension extends
      * @return list of new delegation tokens
      * @throws IOException thrown if IOException if an IO error occurs.
      */
-    public Token<?>[] addDelegationTokens(final String renewer, 
+    Token<?>[] addDelegationTokens(final String renewer,
         Credentials credentials) throws IOException;
+
+    /**
+     * Renews the given token.
+     * @param token The token to be renewed.
+     * @return The token's lifetime after renewal, or 0 if it can't be renewed.
+     * @throws IOException
+     */
+    long renewDelegationToken(final Token<?> token) throws IOException;
+
+    /**
+     * Cancels the given token.
+     * @param token The token to be cancelled.
+     * @throws IOException
+     */
+    Void cancelDelegationToken(final Token<?> token) throws IOException;
   }
   
   /**
@@ -65,7 +80,16 @@ public class KeyProviderDelegationTokenExtension extends
         Credentials credentials) {
       return null;
     }
-    
+
+    @Override
+    public long renewDelegationToken(final Token<?> token) throws IOException {
+      return 0;
+    }
+
+    @Override
+    public Void cancelDelegationToken(final Token<?> token) throws IOException {
+      return null;
+    }
   }
 
   private KeyProviderDelegationTokenExtension(KeyProvider keyProvider,

+ 61 - 121
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyShell.java

@@ -19,7 +19,6 @@
 package org.apache.hadoop.crypto.key;
 
 import java.io.IOException;
-import java.io.PrintStream;
 import java.security.InvalidParameterException;
 import java.security.NoSuchAlgorithmException;
 import java.util.HashMap;
@@ -27,17 +26,19 @@ import java.util.List;
 import java.util.Map;
 
 import com.google.common.annotations.VisibleForTesting;
+
+import org.apache.commons.lang.StringUtils;
+
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.crypto.key.KeyProvider.Metadata;
 import org.apache.hadoop.crypto.key.KeyProvider.Options;
-import org.apache.hadoop.util.Tool;
+import org.apache.hadoop.tools.CommandShell;
 import org.apache.hadoop.util.ToolRunner;
 
 /**
  * This program is the CLI utility for the KeyProvider facilities in Hadoop.
  */
-public class KeyShell extends Configured implements Tool {
+public class KeyShell extends CommandShell {
   final static private String USAGE_PREFIX = "Usage: hadoop key " +
       "[generic options]\n";
   final static private String COMMANDS =
@@ -55,50 +56,12 @@ public class KeyShell extends Configured implements Tool {
       "MUST use the -provider argument.";
 
   private boolean interactive = true;
-  private Command command = null;
 
   /** If true, fail if the provider requires a password and none is given. */
   private boolean strict = false;
 
-  /** allows stdout to be captured if necessary. */
-  @VisibleForTesting
-  public PrintStream out = System.out;
-  /** allows stderr to be captured if necessary. */
-  @VisibleForTesting
-  public PrintStream err = System.err;
-
   private boolean userSuppliedProvider = false;
 
-  /**
-   * Primary entry point for the KeyShell; called via main().
-   *
-   * @param args Command line arguments.
-   * @return 0 on success and 1 on failure.  This value is passed back to
-   * the unix shell, so we must follow shell return code conventions:
-   * the return code is an unsigned character, and 0 means success, and
-   * small positive integers mean failure.
-   * @throws Exception
-   */
-  @Override
-  public int run(String[] args) throws Exception {
-    int exitCode = 0;
-    try {
-      exitCode = init(args);
-      if (exitCode != 0) {
-        return exitCode;
-      }
-      if (command.validate()) {
-        command.execute();
-      } else {
-        exitCode = 1;
-      }
-    } catch (Exception e) {
-      e.printStackTrace(err);
-      return 1;
-    }
-    return exitCode;
-  }
-
   /**
    * Parse the command line arguments and initialize the data.
    * <pre>
@@ -112,7 +75,8 @@ public class KeyShell extends Configured implements Tool {
    * @return 0 on success, 1 on failure.
    * @throws IOException
    */
-  private int init(String[] args) throws IOException {
+  @Override
+  protected int init(String[] args) throws IOException {
     final Options options = KeyProvider.options(getConf());
     final Map<String, String> attributes = new HashMap<String, String>();
 
@@ -123,10 +87,8 @@ public class KeyShell extends Configured implements Tool {
         if (moreTokens) {
           keyName = args[++i];
         }
-
-        command = new CreateCommand(keyName, options);
+        setSubCommand(new CreateCommand(keyName, options));
         if ("-help".equals(keyName)) {
-          printKeyShellUsage();
           return 1;
         }
       } else if (args[i].equals("delete")) {
@@ -134,10 +96,8 @@ public class KeyShell extends Configured implements Tool {
         if (moreTokens) {
           keyName = args[++i];
         }
-
-        command = new DeleteCommand(keyName);
+        setSubCommand(new DeleteCommand(keyName));
         if ("-help".equals(keyName)) {
-          printKeyShellUsage();
           return 1;
         }
       } else if (args[i].equals("roll")) {
@@ -145,14 +105,12 @@ public class KeyShell extends Configured implements Tool {
         if (moreTokens) {
           keyName = args[++i];
         }
-
-        command = new RollCommand(keyName);
+        setSubCommand(new RollCommand(keyName));
         if ("-help".equals(keyName)) {
-          printKeyShellUsage();
           return 1;
         }
       } else if ("list".equals(args[i])) {
-        command = new ListCommand();
+        setSubCommand(new ListCommand());
       } else if ("-size".equals(args[i]) && moreTokens) {
         options.setBitLength(Integer.parseInt(args[++i]));
       } else if ("-cipher".equals(args[i]) && moreTokens) {
@@ -164,15 +122,13 @@ public class KeyShell extends Configured implements Tool {
         final String attr = attrval[0].trim();
         final String val = attrval[1].trim();
         if (attr.isEmpty() || val.isEmpty()) {
-          out.println("\nAttributes must be in attribute=value form, " +
-                  "or quoted\nlike \"attribute = value\"\n");
-          printKeyShellUsage();
+          getOut().println("\nAttributes must be in attribute=value form, " +
+              "or quoted\nlike \"attribute = value\"\n");
           return 1;
         }
         if (attributes.containsKey(attr)) {
-          out.println("\nEach attribute must correspond to only one value:\n" +
-                  "atttribute \"" + attr + "\" was repeated\n" );
-          printKeyShellUsage();
+          getOut().println("\nEach attribute must correspond to only one " +
+              "value:\natttribute \"" + attr + "\" was repeated\n");
           return 1;
         }
         attributes.put(attr, val);
@@ -186,20 +142,13 @@ public class KeyShell extends Configured implements Tool {
       } else if (args[i].equals("-strict")) {
         strict = true;
       } else if ("-help".equals(args[i])) {
-        printKeyShellUsage();
         return 1;
       } else {
-        printKeyShellUsage();
-        ToolRunner.printGenericCommandUsage(System.err);
+        ToolRunner.printGenericCommandUsage(getErr());
         return 1;
       }
     }
 
-    if (command == null) {
-      printKeyShellUsage();
-      return 1;
-    }
-
     if (!attributes.isEmpty()) {
       options.setAttributes(attributes);
     }
@@ -207,33 +156,24 @@ public class KeyShell extends Configured implements Tool {
     return 0;
   }
 
-  private void printKeyShellUsage() {
-    out.println(USAGE_PREFIX + COMMANDS);
-    if (command != null) {
-      out.println(command.getUsage());
-    } else {
-      out.println("=========================================================" +
-          "======");
-      out.println(CreateCommand.USAGE + ":\n\n" + CreateCommand.DESC);
-      out.println("=========================================================" +
-          "======");
-      out.println(RollCommand.USAGE + ":\n\n" + RollCommand.DESC);
-      out.println("=========================================================" +
-          "======");
-      out.println(DeleteCommand.USAGE + ":\n\n" + DeleteCommand.DESC);
-      out.println("=========================================================" +
-          "======");
-      out.println(ListCommand.USAGE + ":\n\n" + ListCommand.DESC);
-    }
+  @Override
+  public String getCommandUsage() {
+    StringBuffer sbuf = new StringBuffer(USAGE_PREFIX + COMMANDS);
+    String banner = StringUtils.repeat("=", 66);
+    sbuf.append(banner + "\n");
+    sbuf.append(CreateCommand.USAGE + ":\n\n" + CreateCommand.DESC + "\n");
+    sbuf.append(banner + "\n");
+    sbuf.append(RollCommand.USAGE + ":\n\n" + RollCommand.DESC + "\n");
+    sbuf.append(banner + "\n");
+    sbuf.append(DeleteCommand.USAGE + ":\n\n" + DeleteCommand.DESC + "\n");
+    sbuf.append(banner + "\n");
+    sbuf.append(ListCommand.USAGE + ":\n\n" + ListCommand.DESC + "\n");
+    return sbuf.toString();
   }
 
-  private abstract class Command {
+  private abstract class Command extends SubCommand {
     protected KeyProvider provider = null;
 
-    public boolean validate() {
-      return true;
-    }
-
     protected KeyProvider getKeyProvider() {
       KeyProvider prov = null;
       List<KeyProvider> providers;
@@ -250,21 +190,21 @@ public class KeyShell extends Configured implements Tool {
           }
         }
       } catch (IOException e) {
-        e.printStackTrace(err);
+        e.printStackTrace(getErr());
       }
       if (prov == null) {
-        out.println(NO_VALID_PROVIDERS);
+        getOut().println(NO_VALID_PROVIDERS);
       }
       return prov;
     }
 
     protected void printProviderWritten() {
-      out.println(provider + " has been updated.");
+      getOut().println(provider + " has been updated.");
     }
 
     protected void warnIfTransientProvider() {
       if (provider.isTransient()) {
-        out.println("WARNING: you are modifying a transient provider.");
+        getOut().println("WARNING: you are modifying a transient provider.");
       }
     }
 
@@ -298,20 +238,20 @@ public class KeyShell extends Configured implements Tool {
     public void execute() throws IOException {
       try {
         final List<String> keys = provider.getKeys();
-        out.println("Listing keys for KeyProvider: " + provider);
+        getOut().println("Listing keys for KeyProvider: " + provider);
         if (metadata) {
           final Metadata[] meta =
             provider.getKeysMetadata(keys.toArray(new String[keys.size()]));
           for (int i = 0; i < meta.length; ++i) {
-            out.println(keys.get(i) + " : " + meta[i]);
+            getOut().println(keys.get(i) + " : " + meta[i]);
           }
         } else {
           for (String keyName : keys) {
-            out.println(keyName);
+            getOut().println(keyName);
           }
         }
       } catch (IOException e) {
-        out.println("Cannot list keys for KeyProvider: " + provider
+        getOut().println("Cannot list keys for KeyProvider: " + provider
             + ": " + e.toString());
         throw e;
       }
@@ -345,7 +285,7 @@ public class KeyShell extends Configured implements Tool {
         rc = false;
       }
       if (keyName == null) {
-        out.println("Please provide a <keyname>.\n" +
+        getOut().println("Please provide a <keyname>.\n" +
             "See the usage description by using -help.");
         rc = false;
       }
@@ -355,20 +295,20 @@ public class KeyShell extends Configured implements Tool {
     public void execute() throws NoSuchAlgorithmException, IOException {
       try {
         warnIfTransientProvider();
-        out.println("Rolling key version from KeyProvider: "
+        getOut().println("Rolling key version from KeyProvider: "
             + provider + "\n  for key name: " + keyName);
         try {
           provider.rollNewVersion(keyName);
           provider.flush();
-          out.println(keyName + " has been successfully rolled.");
+          getOut().println(keyName + " has been successfully rolled.");
           printProviderWritten();
         } catch (NoSuchAlgorithmException e) {
-          out.println("Cannot roll key: " + keyName + " within KeyProvider: "
-              + provider + ". " + e.toString());
+          getOut().println("Cannot roll key: " + keyName +
+              " within KeyProvider: " + provider + ". " + e.toString());
           throw e;
         }
       } catch (IOException e1) {
-        out.println("Cannot roll key: " + keyName + " within KeyProvider: "
+        getOut().println("Cannot roll key: " + keyName + " within KeyProvider: "
             + provider + ". " + e1.toString());
         throw e1;
       }
@@ -405,7 +345,7 @@ public class KeyShell extends Configured implements Tool {
         return false;
       }
       if (keyName == null) {
-        out.println("There is no keyName specified. Please specify a " +
+        getOut().println("There is no keyName specified. Please specify a " +
             "<keyname>. See the usage description with -help.");
         return false;
       }
@@ -416,12 +356,12 @@ public class KeyShell extends Configured implements Tool {
                   + " key " + keyName + " from KeyProvider "
                   + provider + ". Continue? ");
           if (!cont) {
-            out.println(keyName + " has not been deleted.");
+            getOut().println(keyName + " has not been deleted.");
           }
           return cont;
         } catch (IOException e) {
-          out.println(keyName + " will not be deleted.");
-          e.printStackTrace(err);
+          getOut().println(keyName + " will not be deleted.");
+          e.printStackTrace(getErr());
         }
       }
       return true;
@@ -429,16 +369,16 @@ public class KeyShell extends Configured implements Tool {
 
     public void execute() throws IOException {
       warnIfTransientProvider();
-      out.println("Deleting key: " + keyName + " from KeyProvider: "
+      getOut().println("Deleting key: " + keyName + " from KeyProvider: "
           + provider);
       if (cont) {
         try {
           provider.deleteKey(keyName);
           provider.flush();
-          out.println(keyName + " has been successfully deleted.");
+          getOut().println(keyName + " has been successfully deleted.");
           printProviderWritten();
         } catch (IOException e) {
-          out.println(keyName + " has not been deleted. " + e.toString());
+          getOut().println(keyName + " has not been deleted. " + e.toString());
           throw e;
         }
       }
@@ -483,18 +423,18 @@ public class KeyShell extends Configured implements Tool {
           rc = false;
         } else if (provider.needsPassword()) {
           if (strict) {
-            out.println(provider.noPasswordError());
+            getOut().println(provider.noPasswordError());
             rc = false;
           } else {
-            out.println(provider.noPasswordWarning());
+            getOut().println(provider.noPasswordWarning());
           }
         }
       } catch (IOException e) {
-        e.printStackTrace(err);
+        e.printStackTrace(getErr());
       }
       if (keyName == null) {
-        out.println("Please provide a <keyname>. See the usage description" +
-            " with -help.");
+        getOut().println("Please provide a <keyname>. " +
+            " See the usage description with -help.");
         rc = false;
       }
       return rc;
@@ -505,17 +445,17 @@ public class KeyShell extends Configured implements Tool {
       try {
         provider.createKey(keyName, options);
         provider.flush();
-        out.println(keyName + " has been successfully created with options "
-            + options.toString() + ".");
+        getOut().println(keyName + " has been successfully created " +
+            "with options " + options.toString() + ".");
         printProviderWritten();
       } catch (InvalidParameterException e) {
-        out.println(keyName + " has not been created. " + e.toString());
+        getOut().println(keyName + " has not been created. " + e.toString());
         throw e;
       } catch (IOException e) {
-        out.println(keyName + " has not been created. " + e.toString());
+        getOut().println(keyName + " has not been created. " + e.toString());
         throw e;
       } catch (NoSuchAlgorithmException e) {
-        out.println(keyName + " has not been created. " + e.toString());
+        getOut().println(keyName + " has not been created. " + e.toString());
         throw e;
       }
     }

+ 148 - 10
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java

@@ -38,8 +38,11 @@ import org.apache.hadoop.security.authentication.client.AuthenticationException;
 import org.apache.hadoop.security.authentication.client.ConnectionConfigurator;
 import org.apache.hadoop.security.ssl.SSLFactory;
 import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenRenewer;
+import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
 import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticatedURL;
 import org.apache.hadoop.util.HttpExceptionUtils;
+import org.apache.hadoop.util.KMSUtil;
 import org.apache.http.client.utils.URIBuilder;
 import org.codehaus.jackson.map.ObjectMapper;
 import org.slf4j.Logger;
@@ -94,7 +97,8 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension,
 
   private static final String ANONYMOUS_REQUESTS_DISALLOWED = "Anonymous requests are disallowed";
 
-  public static final String TOKEN_KIND = "kms-dt";
+  public static final String TOKEN_KIND_STR = "kms-dt";
+  public static final Text TOKEN_KIND = new Text(TOKEN_KIND_STR);
 
   public static final String SCHEME_NAME = "kms";
 
@@ -146,6 +150,54 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension,
     }
   }
 
+  /**
+   * The KMS implementation of {@link TokenRenewer}.
+   */
+  public static class KMSTokenRenewer extends TokenRenewer {
+    private static final Logger LOG =
+        LoggerFactory.getLogger(KMSTokenRenewer.class);
+
+    @Override
+    public boolean handleKind(Text kind) {
+      return kind.equals(TOKEN_KIND);
+    }
+
+    @Override
+    public boolean isManaged(Token<?> token) throws IOException {
+      return true;
+    }
+
+    @Override
+    public long renew(Token<?> token, Configuration conf) throws IOException {
+      LOG.debug("Renewing delegation token {}", token);
+      KeyProvider keyProvider = KMSUtil.createKeyProvider(conf,
+          KeyProviderFactory.KEY_PROVIDER_PATH);
+      if (!(keyProvider instanceof
+          KeyProviderDelegationTokenExtension.DelegationTokenExtension)) {
+        LOG.warn("keyProvider {} cannot renew dt.", keyProvider == null ?
+            "null" : keyProvider.getClass());
+        return 0;
+      }
+      return ((KeyProviderDelegationTokenExtension.DelegationTokenExtension)
+          keyProvider).renewDelegationToken(token);
+    }
+
+    @Override
+    public void cancel(Token<?> token, Configuration conf) throws IOException {
+      LOG.debug("Canceling delegation token {}", token);
+      KeyProvider keyProvider = KMSUtil.createKeyProvider(conf,
+          KeyProviderFactory.KEY_PROVIDER_PATH);
+      if (!(keyProvider instanceof
+          KeyProviderDelegationTokenExtension.DelegationTokenExtension)) {
+        LOG.warn("keyProvider {} cannot cancel dt.", keyProvider == null ?
+            "null" : keyProvider.getClass());
+        return;
+      }
+      ((KeyProviderDelegationTokenExtension.DelegationTokenExtension)
+          keyProvider).cancelDelegationToken(token);
+    }
+  }
+
   public static class KMSEncryptedKeyVersion extends EncryptedKeyVersion {
     public KMSEncryptedKeyVersion(String keyName, String keyVersionName,
         byte[] iv, String encryptedVersionName, byte[] keyMaterial) {
@@ -853,6 +905,100 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension,
     return encKeyVersionQueue.getSize(keyName);
   }
 
+  @Override
+  public long renewDelegationToken(final Token<?> dToken) throws IOException {
+    try {
+      final String doAsUser = getDoAsUser();
+      final DelegationTokenAuthenticatedURL.Token token =
+          generateDelegationToken(dToken);
+      final URL url = createURL(null, null, null, null);
+      LOG.debug("Renewing delegation token {} with url:{}, as:{}",
+          token, url, doAsUser);
+      final DelegationTokenAuthenticatedURL authUrl =
+          new DelegationTokenAuthenticatedURL(configurator);
+      return actualUgi.doAs(
+          new PrivilegedExceptionAction<Long>() {
+            @Override
+            public Long run() throws Exception {
+              return authUrl.renewDelegationToken(url, token, doAsUser);
+            }
+          }
+      );
+    } catch (Exception ex) {
+      if (ex instanceof IOException) {
+        throw (IOException) ex;
+      } else {
+        throw new IOException(ex);
+      }
+    }
+  }
+
+  @Override
+  public Void cancelDelegationToken(final Token<?> dToken) throws IOException {
+    try {
+      final String doAsUser = getDoAsUser();
+      final DelegationTokenAuthenticatedURL.Token token =
+          generateDelegationToken(dToken);
+      return actualUgi.doAs(
+          new PrivilegedExceptionAction<Void>() {
+            @Override
+            public Void run() throws Exception {
+              final URL url = createURL(null, null, null, null);
+              LOG.debug("Cancelling delegation token {} with url:{}, as:{}",
+                  dToken, url, doAsUser);
+              final DelegationTokenAuthenticatedURL authUrl =
+                  new DelegationTokenAuthenticatedURL(configurator);
+              authUrl.cancelDelegationToken(url, token, doAsUser);
+              return null;
+            }
+          }
+      );
+    } catch (Exception ex) {
+      if (ex instanceof IOException) {
+        throw (IOException) ex;
+      } else {
+        throw new IOException(ex);
+      }
+    }
+  }
+
+  /**
+   * Get the doAs user name.
+   *
+   * 'actualUGI' is the UGI of the user creating the client
+   * It is possible that the creator of the KMSClientProvier
+   * calls this method on behalf of a proxyUser (the doAsUser).
+   * In which case this call has to be made as the proxy user.
+   *
+   * @return the doAs user name.
+   * @throws IOException
+   */
+  private String getDoAsUser() throws IOException {
+    UserGroupInformation currentUgi = UserGroupInformation.getCurrentUser();
+    return (currentUgi.getAuthenticationMethod() ==
+        UserGroupInformation.AuthenticationMethod.PROXY)
+        ? currentUgi.getShortUserName() : null;
+  }
+
+  /**
+   * Generate a DelegationTokenAuthenticatedURL.Token from the given generic
+   * typed delegation token.
+   *
+   * @param dToken The delegation token.
+   * @return The DelegationTokenAuthenticatedURL.Token, with its delegation
+   *         token set to the delegation token passed in.
+   */
+  private DelegationTokenAuthenticatedURL.Token generateDelegationToken(
+      final Token<?> dToken) {
+    DelegationTokenAuthenticatedURL.Token token =
+        new DelegationTokenAuthenticatedURL.Token();
+    Token<AbstractDelegationTokenIdentifier> dt =
+        new Token<>(dToken.getIdentifier(), dToken.getPassword(),
+            dToken.getKind(), dToken.getService());
+    token.setDelegationToken(dt);
+    return token;
+  }
+
   @Override
   public Token<?>[] addDelegationTokens(final String renewer,
       Credentials credentials) throws IOException {
@@ -864,15 +1010,7 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension,
       final DelegationTokenAuthenticatedURL authUrl =
           new DelegationTokenAuthenticatedURL(configurator);
       try {
-        // 'actualUGI' is the UGI of the user creating the client 
-        // It is possible that the creator of the KMSClientProvier
-        // calls this method on behalf of a proxyUser (the doAsUser).
-        // In which case this call has to be made as the proxy user.
-        UserGroupInformation currentUgi = UserGroupInformation.getCurrentUser();
-        final String doAsUser = (currentUgi.getAuthenticationMethod() ==
-            UserGroupInformation.AuthenticationMethod.PROXY)
-                                ? currentUgi.getShortUserName() : null;
-
+        final String doAsUser = getDoAsUser();
         token = actualUgi.doAs(new PrivilegedExceptionAction<Token<?>>() {
           @Override
           public Token<?> run() throws Exception {

+ 37 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java

@@ -134,6 +134,27 @@ public class LoadBalancingKMSClientProvider extends KeyProvider implements
     }, nextIdx());
   }
 
+  @Override
+  public long renewDelegationToken(final Token<?> token) throws IOException {
+    return doOp(new ProviderCallable<Long>() {
+      @Override
+      public Long call(KMSClientProvider provider) throws IOException {
+        return provider.renewDelegationToken(token);
+      }
+    }, nextIdx());
+  }
+
+  @Override
+  public Void cancelDelegationToken(final Token<?> token) throws IOException {
+    return doOp(new ProviderCallable<Void>() {
+      @Override
+      public Void call(KMSClientProvider provider) throws IOException {
+        provider.cancelDelegationToken(token);
+        return null;
+      }
+    }, nextIdx());
+  }
+
   // This request is sent to all providers in the load-balancing group
   @Override
   public void warmUpEncryptedKeys(String... keyNames) throws IOException {
@@ -169,7 +190,10 @@ public class LoadBalancingKMSClientProvider extends KeyProvider implements
         }
       }, nextIdx());
     } catch (WrapperException we) {
-      throw (GeneralSecurityException) we.getCause();
+      if (we.getCause() instanceof GeneralSecurityException) {
+        throw (GeneralSecurityException) we.getCause();
+      }
+      throw new IOException(we.getCause());
     }
   }
 
@@ -186,7 +210,10 @@ public class LoadBalancingKMSClientProvider extends KeyProvider implements
         }
       }, nextIdx());
     } catch (WrapperException we) {
-      throw (GeneralSecurityException)we.getCause();
+      if (we.getCause() instanceof GeneralSecurityException) {
+        throw (GeneralSecurityException) we.getCause();
+      }
+      throw new IOException(we.getCause());
     }
   }
 
@@ -273,7 +300,10 @@ public class LoadBalancingKMSClientProvider extends KeyProvider implements
         }
       }, nextIdx());
     } catch (WrapperException e) {
-      throw (NoSuchAlgorithmException)e.getCause();
+      if (e.getCause() instanceof GeneralSecurityException) {
+        throw (NoSuchAlgorithmException) e.getCause();
+      }
+      throw new IOException(e.getCause());
     }
   }
   @Override
@@ -309,7 +339,10 @@ public class LoadBalancingKMSClientProvider extends KeyProvider implements
         }
       }, nextIdx());
     } catch (WrapperException e) {
-      throw (NoSuchAlgorithmException)e.getCause();
+      if (e.getCause() instanceof GeneralSecurityException) {
+        throw (NoSuchAlgorithmException) e.getCause();
+      }
+      throw new IOException(e.getCause());
     }
   }
 

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/ValueQueue.java

@@ -307,7 +307,7 @@ public class ValueQueue <E> {
         ekvs.add(val);
       }
     } catch (Exception e) {
-      throw new IOException("Exeption while contacting value generator ", e);
+      throw new IOException("Exception while contacting value generator ", e);
     }
     return ekvs;
   }

+ 3 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java

@@ -204,6 +204,9 @@ public class CommonConfigurationKeys extends CommonConfigurationKeysPublic {
   public static final String
   HADOOP_SECURITY_SERVICE_AUTHORIZATION_TRACING =
       "security.trace.protocol.acl";
+  public static final String
+      HADOOP_SECURITY_SERVICE_AUTHORIZATION_DATANODE_LIFELINE =
+          "security.datanode.lifeline.protocol.acl";
   public static final String 
   SECURITY_HA_SERVICE_PROTOCOL_ACL = "security.ha.service.protocol.acl";
   public static final String 

+ 4 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/InvalidRequestException.java

@@ -29,4 +29,8 @@ public class InvalidRequestException extends IOException {
   public InvalidRequestException(String str) {
     super(str);
   }
+
+  public InvalidRequestException(String message, Throwable cause) {
+    super(message, cause);
+  }
 }

+ 94 - 59
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java

@@ -20,9 +20,10 @@ package org.apache.hadoop.fs;
 
 import java.io.*;
 import java.util.*;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicReference;
 
 import org.apache.commons.logging.*;
-
 import org.apache.hadoop.util.*;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -247,74 +248,101 @@ public class LocalDirAllocator {
     private final Log LOG =
       LogFactory.getLog(AllocatorPerContext.class);
 
-    private int dirNumLastAccessed;
     private Random dirIndexRandomizer = new Random();
-    private FileSystem localFS;
-    private DF[] dirDF = new DF[0];
     private String contextCfgItemName;
-    private String[] localDirs = new String[0];
-    private String savedLocalDirs = "";
+
+    // NOTE: the context must be accessed via a local reference as it
+    //       may be updated at any time to reference a different context
+    private AtomicReference<Context> currentContext;
+
+    private static class Context {
+      private AtomicInteger dirNumLastAccessed = new AtomicInteger(0);
+      private FileSystem localFS;
+      private DF[] dirDF;
+      private Path[] localDirs;
+      private String savedLocalDirs;
+
+      public int getAndIncrDirNumLastAccessed() {
+        return getAndIncrDirNumLastAccessed(1);
+      }
+
+      public int getAndIncrDirNumLastAccessed(int delta) {
+        if (localDirs.length < 2 || delta == 0) {
+          return dirNumLastAccessed.get();
+        }
+        int oldval, newval;
+        do {
+          oldval = dirNumLastAccessed.get();
+          newval = (oldval + delta) % localDirs.length;
+        } while (!dirNumLastAccessed.compareAndSet(oldval, newval));
+        return oldval;
+      }
+    }
 
     public AllocatorPerContext(String contextCfgItemName) {
       this.contextCfgItemName = contextCfgItemName;
+      this.currentContext = new AtomicReference<Context>(new Context());
     }
 
     /** This method gets called everytime before any read/write to make sure
      * that any change to localDirs is reflected immediately.
      */
-    private synchronized void confChanged(Configuration conf) 
+    private Context confChanged(Configuration conf)
         throws IOException {
+      Context ctx = currentContext.get();
       String newLocalDirs = conf.get(contextCfgItemName);
       if (null == newLocalDirs) {
         throw new IOException(contextCfgItemName + " not configured");
       }
-      if (!newLocalDirs.equals(savedLocalDirs)) {
-        localDirs = StringUtils.getTrimmedStrings(newLocalDirs);
-        localFS = FileSystem.getLocal(conf);
-        int numDirs = localDirs.length;
-        ArrayList<String> dirs = new ArrayList<String>(numDirs);
+      if (!newLocalDirs.equals(ctx.savedLocalDirs)) {
+        ctx = new Context();
+        String[] dirStrings = StringUtils.getTrimmedStrings(newLocalDirs);
+        ctx.localFS = FileSystem.getLocal(conf);
+        int numDirs = dirStrings.length;
+        ArrayList<Path> dirs = new ArrayList<Path>(numDirs);
         ArrayList<DF> dfList = new ArrayList<DF>(numDirs);
         for (int i = 0; i < numDirs; i++) {
           try {
             // filter problematic directories
-            Path tmpDir = new Path(localDirs[i]);
-            if(localFS.mkdirs(tmpDir)|| localFS.exists(tmpDir)) {
+            Path tmpDir = new Path(dirStrings[i]);
+            if(ctx.localFS.mkdirs(tmpDir)|| ctx.localFS.exists(tmpDir)) {
               try {
-
                 File tmpFile = tmpDir.isAbsolute()
-                  ? new File(localFS.makeQualified(tmpDir).toUri())
-                  : new File(localDirs[i]);
+                    ? new File(ctx.localFS.makeQualified(tmpDir).toUri())
+                    : new File(dirStrings[i]);
 
                 DiskChecker.checkDir(tmpFile);
-                dirs.add(tmpFile.getPath());
+                dirs.add(new Path(tmpFile.getPath()));
                 dfList.add(new DF(tmpFile, 30000));
-
               } catch (DiskErrorException de) {
-                LOG.warn( localDirs[i] + " is not writable\n", de);
+                LOG.warn(dirStrings[i] + " is not writable\n", de);
               }
             } else {
-              LOG.warn( "Failed to create " + localDirs[i]);
+              LOG.warn("Failed to create " + dirStrings[i]);
             }
           } catch (IOException ie) { 
-            LOG.warn( "Failed to create " + localDirs[i] + ": " +
+            LOG.warn("Failed to create " + dirStrings[i] + ": " +
                 ie.getMessage() + "\n", ie);
           } //ignore
         }
-        localDirs = dirs.toArray(new String[dirs.size()]);
-        dirDF = dfList.toArray(new DF[dirs.size()]);
-        savedLocalDirs = newLocalDirs;
-        
+        ctx.localDirs = dirs.toArray(new Path[dirs.size()]);
+        ctx.dirDF = dfList.toArray(new DF[dirs.size()]);
+        ctx.savedLocalDirs = newLocalDirs;
+
         if (dirs.size() > 0) {
           // randomize the first disk picked in the round-robin selection
-          dirNumLastAccessed = dirIndexRandomizer.nextInt(dirs.size());
+          ctx.dirNumLastAccessed.set(dirIndexRandomizer.nextInt(dirs.size()));
         }
+
+        currentContext.set(ctx);
       }
+
+      return ctx;
     }
 
-    private Path createPath(String path, 
+    private Path createPath(Path dir, String path,
         boolean checkWrite) throws IOException {
-      Path file = new Path(new Path(localDirs[dirNumLastAccessed]),
-                                    path);
+      Path file = new Path(dir, path);
       if (checkWrite) {
         //check whether we are able to create a directory here. If the disk
         //happens to be RDONLY we will fail
@@ -334,7 +362,7 @@ public class LocalDirAllocator {
      * @return the current directory index.
      */
     int getCurrentDirectoryIndex() {
-      return dirNumLastAccessed;
+      return currentContext.get().dirNumLastAccessed.get();
     }
 
     /** Get a path from the local FS. If size is known, we go
@@ -344,10 +372,10 @@ public class LocalDirAllocator {
      *  If size is not known, use roulette selection -- pick directories
      *  with probability proportional to their available space.
      */
-    public synchronized Path getLocalPathForWrite(String pathStr, long size, 
+    public Path getLocalPathForWrite(String pathStr, long size,
         Configuration conf, boolean checkWrite) throws IOException {
-      confChanged(conf);
-      int numDirs = localDirs.length;
+      Context ctx = confChanged(conf);
+      int numDirs = ctx.localDirs.length;
       int numDirsSearched = 0;
       //remove the leading slash from the path (to make sure that the uri
       //resolution results in a valid path on the dir being checked)
@@ -358,12 +386,12 @@ public class LocalDirAllocator {
       
       if(size == SIZE_UNKNOWN) {  //do roulette selection: pick dir with probability 
                     //proportional to available size
-        long[] availableOnDisk = new long[dirDF.length];
+        long[] availableOnDisk = new long[ctx.dirDF.length];
         long totalAvailable = 0;
         
             //build the "roulette wheel"
-        for(int i =0; i < dirDF.length; ++i) {
-          availableOnDisk[i] = dirDF[i].getAvailable();
+        for(int i =0; i < ctx.dirDF.length; ++i) {
+          availableOnDisk[i] = ctx.dirDF[i].getAvailable();
           totalAvailable += availableOnDisk[i];
         }
 
@@ -380,8 +408,8 @@ public class LocalDirAllocator {
             randomPosition -= availableOnDisk[dir];
             dir++;
           }
-          dirNumLastAccessed = dir;
-          returnPath = createPath(pathStr, checkWrite);
+          ctx.dirNumLastAccessed.set(dir);
+          returnPath = createPath(ctx.localDirs[dir], pathStr, checkWrite);
           if (returnPath == null) {
             totalAvailable -= availableOnDisk[dir];
             availableOnDisk[dir] = 0; // skip this disk
@@ -389,15 +417,21 @@ public class LocalDirAllocator {
           }
         }
       } else {
-        while (numDirsSearched < numDirs && returnPath == null) {
-          long capacity = dirDF[dirNumLastAccessed].getAvailable();
+        int dirNum = ctx.getAndIncrDirNumLastAccessed();
+        while (numDirsSearched < numDirs) {
+          long capacity = ctx.dirDF[dirNum].getAvailable();
           if (capacity > size) {
-            returnPath = createPath(pathStr, checkWrite);
+            returnPath =
+                createPath(ctx.localDirs[dirNum], pathStr, checkWrite);
+            if (returnPath != null) {
+              ctx.getAndIncrDirNumLastAccessed(numDirsSearched);
+              break;
+            }
           }
-          dirNumLastAccessed++;
-          dirNumLastAccessed = dirNumLastAccessed % numDirs; 
+          dirNum++;
+          dirNum = dirNum % numDirs;
           numDirsSearched++;
-        } 
+        }
       }
       if (returnPath != null) {
         return returnPath;
@@ -432,10 +466,10 @@ public class LocalDirAllocator {
      *  configured dirs for the file's existence and return the complete
      *  path to the file when we find one 
      */
-    public synchronized Path getLocalPathToRead(String pathStr, 
+    public Path getLocalPathToRead(String pathStr,
         Configuration conf) throws IOException {
-      confChanged(conf);
-      int numDirs = localDirs.length;
+      Context ctx = confChanged(conf);
+      int numDirs = ctx.localDirs.length;
       int numDirsSearched = 0;
       //remove the leading slash from the path (to make sure that the uri
       //resolution results in a valid path on the dir being checked)
@@ -443,8 +477,8 @@ public class LocalDirAllocator {
         pathStr = pathStr.substring(1);
       }
       while (numDirsSearched < numDirs) {
-        Path file = new Path(localDirs[numDirsSearched], pathStr);
-        if (localFS.exists(file)) {
+        Path file = new Path(ctx.localDirs[numDirsSearched], pathStr);
+        if (ctx.localFS.exists(file)) {
           return file;
         }
         numDirsSearched++;
@@ -459,10 +493,10 @@ public class LocalDirAllocator {
       private final FileSystem fs;
       private final String pathStr;
       private int i = 0;
-      private final String[] rootDirs;
+      private final Path[] rootDirs;
       private Path next = null;
 
-      private PathIterator(FileSystem fs, String pathStr, String[] rootDirs)
+      private PathIterator(FileSystem fs, String pathStr, Path[] rootDirs)
           throws IOException {
         this.fs = fs;
         this.pathStr = pathStr;
@@ -517,21 +551,22 @@ public class LocalDirAllocator {
      * @return all of the paths that exist under any of the roots
      * @throws IOException
      */
-    synchronized Iterable<Path> getAllLocalPathsToRead(String pathStr,
+    Iterable<Path> getAllLocalPathsToRead(String pathStr,
         Configuration conf) throws IOException {
-      confChanged(conf);
+      Context ctx = confChanged(conf);
       if (pathStr.startsWith("/")) {
         pathStr = pathStr.substring(1);
       }
-      return new PathIterator(localFS, pathStr, localDirs);
+      return new PathIterator(ctx.localFS, pathStr, ctx.localDirs);
     }
 
     /** We search through all the configured dirs for the file's existence
      *  and return true when we find one 
      */
-    public synchronized boolean ifExists(String pathStr,Configuration conf) {
+    public boolean ifExists(String pathStr, Configuration conf) {
+      Context ctx = currentContext.get();
       try {
-        int numDirs = localDirs.length;
+        int numDirs = ctx.localDirs.length;
         int numDirsSearched = 0;
         //remove the leading slash from the path (to make sure that the uri
         //resolution results in a valid path on the dir being checked)
@@ -539,8 +574,8 @@ public class LocalDirAllocator {
           pathStr = pathStr.substring(1);
         }
         while (numDirsSearched < numDirs) {
-          Path file = new Path(localDirs[numDirsSearched], pathStr);
-          if (localFS.exists(file)) {
+          Path file = new Path(ctx.localDirs[numDirsSearched], pathStr);
+          if (ctx.localFS.exists(file)) {
             return true;
           }
           numDirsSearched++;

+ 11 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/PathAccessDeniedException.java

@@ -24,4 +24,14 @@ public class PathAccessDeniedException extends PathIOException {
   public PathAccessDeniedException(String path) {
     super(path, "Permission denied");
   }
-}
+
+  public PathAccessDeniedException(String path, Throwable cause) {
+    super(path, cause);
+  }
+
+  public PathAccessDeniedException(String path,
+      String error,
+      Throwable cause) {
+    super(path, error, cause);
+  }
+}

+ 17 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/PathNotFoundException.java

@@ -18,12 +18,26 @@
 package org.apache.hadoop.fs;
 
 /**
- * Exception corresponding to Permission denied - ENOENT
+ * Exception corresponding to path not found: ENOENT/ENOFILE
  */
 public class PathNotFoundException extends PathIOException {
   static final long serialVersionUID = 0L;
   /** @param path for the exception */
   public PathNotFoundException(String path) {
     super(path, "No such file or directory");
-  }    
-}
+  }
+
+  public PathNotFoundException(String path, Throwable cause) {
+    super(path, cause);
+  }
+
+  public PathNotFoundException(String path, String error) {
+    super(path, error);
+  }
+
+  public PathNotFoundException(String path,
+      String error,
+      Throwable cause) {
+    super(path, error, cause);
+  }
+}

+ 15 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/PathPermissionException.java

@@ -26,4 +26,18 @@ public class PathPermissionException extends PathIOException {
   public PathPermissionException(String path) {
     super(path, "Operation not permitted");
   }
-}
+
+  public PathPermissionException(String path, Throwable cause) {
+    super(path, cause);
+  }
+
+  public PathPermissionException(String path, String error) {
+    super(path, error);
+  }
+
+  public PathPermissionException(String path,
+      String error,
+      Throwable cause) {
+    super(path, error, cause);
+  }
+}

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/FastByteComparisons.java

@@ -75,7 +75,7 @@ abstract class FastByteComparisons {
      * implementation if unable to do so.
      */
     static Comparer<byte[]> getBestComparer() {
-      if (System.getProperty("os.arch").equals("sparc")) {
+      if (System.getProperty("os.arch").toLowerCase().startsWith("sparc")) {
         if (LOG.isTraceEnabled()) {
           LOG.trace("Lexicographical comparer selected for "
               + "byte aligned system architecture");

+ 29 - 84
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecUtil.java

@@ -21,7 +21,6 @@ import com.google.common.base.Preconditions;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
-import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureCoder;
 import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureCoderFactory;
 import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureDecoder;
 import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureEncoder;
@@ -36,115 +35,61 @@ public final class CodecUtil {
 
   /**
    * Create RS raw encoder according to configuration.
-   * @param conf configuration possibly with some items to configure the coder
-   * @param numDataUnits number of data units in a coding group
-   * @param numParityUnits number of parity units in a coding group
+   * @param conf configuration
+   * @param coderOptions coder options that's used to create the coder
    * @param codec the codec to use. If null, will use the default codec
    * @return raw encoder
    */
-  public static RawErasureEncoder createRSRawEncoder(
-      Configuration conf, int numDataUnits, int numParityUnits, String codec) {
+  public static RawErasureEncoder createRawEncoder(
+      Configuration conf, String codec, ErasureCoderOptions coderOptions) {
     Preconditions.checkNotNull(conf);
-    if (codec == null) {
-      codec = ErasureCodeConstants.RS_DEFAULT_CODEC_NAME;
-    }
-    RawErasureCoder rawCoder = createRawCoder(conf,
-        getFactNameFromCodec(conf, codec), true, numDataUnits, numParityUnits);
-    return (RawErasureEncoder) rawCoder;
-  }
+    Preconditions.checkNotNull(codec);
 
-  /**
-   * Create RS raw encoder using the default codec.
-   */
-  public static RawErasureEncoder createRSRawEncoder(
-      Configuration conf, int numDataUnits, int numParityUnits) {
-    return createRSRawEncoder(conf, numDataUnits, numParityUnits, null);
+    String rawCoderFactoryKey = getFactNameFromCodec(conf, codec);
+
+    RawErasureCoderFactory fact = createRawCoderFactory(conf,
+        rawCoderFactoryKey);
+
+    return fact.createEncoder(coderOptions);
   }
 
   /**
    * Create RS raw decoder according to configuration.
-   * @param conf configuration possibly with some items to configure the coder
-   * @param numDataUnits number of data units in a coding group
-   * @param numParityUnits number of parity units in a coding group
+   * @param conf configuration
+   * @param coderOptions coder options that's used to create the coder
    * @param codec the codec to use. If null, will use the default codec
    * @return raw decoder
    */
-  public static RawErasureDecoder createRSRawDecoder(
-      Configuration conf, int numDataUnits, int numParityUnits, String codec) {
+  public static RawErasureDecoder createRawDecoder(
+      Configuration conf, String codec, ErasureCoderOptions coderOptions) {
     Preconditions.checkNotNull(conf);
-    if (codec == null) {
-      codec = ErasureCodeConstants.RS_DEFAULT_CODEC_NAME;
-    }
-    RawErasureCoder rawCoder = createRawCoder(conf,
-        getFactNameFromCodec(conf, codec), false, numDataUnits, numParityUnits);
-    return (RawErasureDecoder) rawCoder;
-  }
+    Preconditions.checkNotNull(codec);
 
-  /**
-   * Create RS raw decoder using the default codec.
-   */
-  public static RawErasureDecoder createRSRawDecoder(
-      Configuration conf, int numDataUnits, int numParityUnits) {
-    return createRSRawDecoder(conf, numDataUnits, numParityUnits, null);
-  }
+    String rawCoderFactoryKey = getFactNameFromCodec(conf, codec);
 
-  /**
-   * Create XOR raw encoder according to configuration.
-   * @param conf configuration possibly with some items to configure the coder
-   * @param numDataUnits number of data units in a coding group
-   * @param numParityUnits number of parity units in a coding group
-   * @return raw encoder
-   */
-  public static RawErasureEncoder createXORRawEncoder(
-      Configuration conf, int numDataUnits, int numParityUnits) {
-    Preconditions.checkNotNull(conf);
-    RawErasureCoder rawCoder = createRawCoder(conf,
-        getFactNameFromCodec(conf, ErasureCodeConstants.XOR_CODEC_NAME),
-        true, numDataUnits, numParityUnits);
-    return (RawErasureEncoder) rawCoder;
-  }
+    RawErasureCoderFactory fact = createRawCoderFactory(conf,
+        rawCoderFactoryKey);
 
-  /**
-   * Create XOR raw decoder according to configuration.
-   * @param conf configuration possibly with some items to configure the coder
-   * @param numDataUnits number of data units in a coding group
-   * @param numParityUnits number of parity units in a coding group
-   * @return raw decoder
-   */
-  public static RawErasureDecoder createXORRawDecoder(
-      Configuration conf, int numDataUnits, int numParityUnits) {
-    Preconditions.checkNotNull(conf);
-    RawErasureCoder rawCoder = createRawCoder(conf,
-        getFactNameFromCodec(conf, ErasureCodeConstants.XOR_CODEC_NAME),
-        false, numDataUnits, numParityUnits);
-    return (RawErasureDecoder) rawCoder;
+    return fact.createDecoder(coderOptions);
   }
 
-  /**
-   * Create raw coder using specified conf and raw coder factory key.
-   * @param conf configuration possibly with some items to configure the coder
-   * @param rawCoderFactory name of the raw coder factory
-   * @param isEncoder is encoder or not we're going to create
-   * @param numDataUnits number of data units in a coding group
-   * @param numParityUnits number of parity units in a coding group
-   * @return raw coder
-   */
-  public static RawErasureCoder createRawCoder(Configuration conf,
-      String rawCoderFactory, boolean isEncoder, int numDataUnits,
-                                               int numParityUnits) {
-
+  private static RawErasureCoderFactory createRawCoderFactory(
+      Configuration conf, String rawCoderFactoryKey) {
     RawErasureCoderFactory fact;
     try {
       Class<? extends RawErasureCoderFactory> factClass = conf.getClassByName(
-          rawCoderFactory).asSubclass(RawErasureCoderFactory.class);
+          rawCoderFactoryKey).asSubclass(RawErasureCoderFactory.class);
       fact = factClass.newInstance();
     } catch (ClassNotFoundException | InstantiationException |
         IllegalAccessException e) {
-      throw new RuntimeException("Failed to create raw coder", e);
+      throw new RuntimeException("Failed to create raw coder factory", e);
+    }
+
+    if (fact == null) {
+      throw new RuntimeException("Failed to create raw coder factory");
     }
 
-    return isEncoder ? fact.createEncoder(numDataUnits, numParityUnits) :
-            fact.createDecoder(numDataUnits, numParityUnits);
+    return fact;
   }
 
   private static String getFactNameFromCodec(Configuration conf, String codec) {

+ 89 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCoderOptions.java

@@ -0,0 +1,89 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.io.erasurecode;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+
+/**
+ * Erasure coder configuration that maintains schema info and coder options.
+ */
+@InterfaceAudience.Private
+public final class ErasureCoderOptions {
+
+  private final int numDataUnits;
+  private final int numParityUnits;
+  private final int numAllUnits;
+  private final boolean allowChangeInputs;
+  private final boolean allowVerboseDump;
+
+  public ErasureCoderOptions(int numDataUnits, int numParityUnits) {
+    this(numDataUnits, numParityUnits, false, false);
+  }
+
+  public ErasureCoderOptions(int numDataUnits, int numParityUnits,
+                        boolean allowChangeInputs, boolean allowVerboseDump) {
+    this.numDataUnits = numDataUnits;
+    this.numParityUnits = numParityUnits;
+    this.numAllUnits = numDataUnits + numParityUnits;
+    this.allowChangeInputs = allowChangeInputs;
+    this.allowVerboseDump = allowVerboseDump;
+  }
+
+  /**
+   * The number of data input units for the coding. A unit can be a byte,
+   * chunk or buffer or even a block.
+   * @return count of data input units
+   */
+  public int getNumDataUnits() {
+    return numDataUnits;
+  }
+
+  /**
+   * The number of parity output units for the coding. A unit can be a byte,
+   * chunk, buffer or even a block.
+   * @return count of parity output units
+   */
+  public int getNumParityUnits() {
+    return numParityUnits;
+  }
+
+  /**
+   * The number of all the involved units in the coding.
+   * @return count of all the data units and parity units
+   */
+  public int getNumAllUnits() {
+    return numAllUnits;
+  }
+
+  /**
+   * Allow changing input buffer content (not positions). Maybe better
+   * performance if not allowed.
+   * @return true if allowing input content to be changed, false otherwise
+   */
+  public boolean allowChangeInputs() {
+    return allowChangeInputs;
+  }
+
+  /**
+   * Allow dump verbose debug info or not.
+   * @return true if verbose debug info is desired, false otherwise
+   */
+  public boolean allowVerboseDump() {
+    return allowVerboseDump;
+  }
+}

+ 12 - 6
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/HHXORErasureDecoder.java

@@ -22,7 +22,10 @@ import org.apache.hadoop.io.erasurecode.CodecUtil;
 import org.apache.hadoop.io.erasurecode.ECBlock;
 import org.apache.hadoop.io.erasurecode.ECBlockGroup;
 import org.apache.hadoop.io.erasurecode.ECSchema;
-import org.apache.hadoop.io.erasurecode.rawcoder.*;
+import org.apache.hadoop.io.erasurecode.ErasureCodeConstants;
+import org.apache.hadoop.io.erasurecode.ErasureCoderOptions;
+import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureDecoder;
+import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureEncoder;
 
 /**
  * Hitchhiker is a new erasure coding algorithm developed as a research project
@@ -68,17 +71,20 @@ public class HHXORErasureDecoder extends AbstractErasureDecoder {
 
   private RawErasureDecoder checkCreateRSRawDecoder() {
     if (rsRawDecoder == null) {
-      rsRawDecoder = CodecUtil.createRSRawDecoder(getConf(),
-              getNumDataUnits(), getNumParityUnits());
+      ErasureCoderOptions coderOptions = new ErasureCoderOptions(
+          getNumDataUnits(), getNumParityUnits());
+      rsRawDecoder = CodecUtil.createRawDecoder(getConf(),
+              ErasureCodeConstants.RS_DEFAULT_CODEC_NAME, coderOptions);
     }
     return rsRawDecoder;
   }
 
   private RawErasureEncoder checkCreateXorRawEncoder() {
     if (xorRawEncoder == null) {
-      xorRawEncoder = CodecUtil.createXORRawEncoder(getConf(),
-              getNumDataUnits(), getNumParityUnits());
-      xorRawEncoder.setCoderOption(CoderOption.ALLOW_CHANGE_INPUTS, false);
+      ErasureCoderOptions coderOptions = new ErasureCoderOptions(
+          getNumDataUnits(), getNumParityUnits());
+      xorRawEncoder = CodecUtil.createRawEncoder(getConf(),
+          ErasureCodeConstants.XOR_CODEC_NAME, coderOptions);
     }
     return xorRawEncoder;
   }

+ 10 - 5
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/HHXORErasureEncoder.java

@@ -22,7 +22,8 @@ import org.apache.hadoop.io.erasurecode.CodecUtil;
 import org.apache.hadoop.io.erasurecode.ECBlock;
 import org.apache.hadoop.io.erasurecode.ECBlockGroup;
 import org.apache.hadoop.io.erasurecode.ECSchema;
-import org.apache.hadoop.io.erasurecode.rawcoder.CoderOption;
+import org.apache.hadoop.io.erasurecode.ErasureCodeConstants;
+import org.apache.hadoop.io.erasurecode.ErasureCoderOptions;
 import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureEncoder;
 
 /**
@@ -64,17 +65,21 @@ public class HHXORErasureEncoder extends AbstractErasureEncoder {
 
   private RawErasureEncoder checkCreateRSRawEncoder() {
     if (rsRawEncoder == null) {
-      rsRawEncoder = CodecUtil.createRSRawEncoder(getConf(),
+      ErasureCoderOptions coderOptions = new ErasureCoderOptions(
           getNumDataUnits(), getNumParityUnits());
+      rsRawEncoder = CodecUtil.createRawEncoder(getConf(),
+          ErasureCodeConstants.RS_DEFAULT_CODEC_NAME, coderOptions);
     }
     return rsRawEncoder;
   }
 
   private RawErasureEncoder checkCreateXorRawEncoder() {
     if (xorRawEncoder == null) {
-      xorRawEncoder = CodecUtil.createXORRawEncoder(getConf(),
-              getNumDataUnits(), getNumParityUnits());
-      xorRawEncoder.setCoderOption(CoderOption.ALLOW_CHANGE_INPUTS, false);
+      ErasureCoderOptions erasureCoderOptions = new ErasureCoderOptions(
+          getNumDataUnits(), getNumParityUnits());
+      xorRawEncoder = CodecUtil.createRawEncoder(getConf(),
+          ErasureCodeConstants.XOR_CODEC_NAME,
+          erasureCoderOptions);
     }
     return xorRawEncoder;
   }

+ 5 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureDecoder.java

@@ -22,6 +22,8 @@ import org.apache.hadoop.io.erasurecode.CodecUtil;
 import org.apache.hadoop.io.erasurecode.ECBlock;
 import org.apache.hadoop.io.erasurecode.ECBlockGroup;
 import org.apache.hadoop.io.erasurecode.ECSchema;
+import org.apache.hadoop.io.erasurecode.ErasureCodeConstants;
+import org.apache.hadoop.io.erasurecode.ErasureCoderOptions;
 import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureDecoder;
 
 /**
@@ -55,8 +57,10 @@ public class RSErasureDecoder extends AbstractErasureDecoder {
   private RawErasureDecoder checkCreateRSRawDecoder() {
     if (rsRawDecoder == null) {
       // TODO: we should create the raw coder according to codec.
-      rsRawDecoder = CodecUtil.createRSRawDecoder(getConf(),
+      ErasureCoderOptions coderOptions = new ErasureCoderOptions(
           getNumDataUnits(), getNumParityUnits());
+      rsRawDecoder = CodecUtil.createRawDecoder(getConf(),
+          ErasureCodeConstants.RS_DEFAULT_CODEC_NAME, coderOptions);
     }
     return rsRawDecoder;
   }

+ 5 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureEncoder.java

@@ -22,6 +22,8 @@ import org.apache.hadoop.io.erasurecode.CodecUtil;
 import org.apache.hadoop.io.erasurecode.ECBlock;
 import org.apache.hadoop.io.erasurecode.ECBlockGroup;
 import org.apache.hadoop.io.erasurecode.ECSchema;
+import org.apache.hadoop.io.erasurecode.ErasureCodeConstants;
+import org.apache.hadoop.io.erasurecode.ErasureCoderOptions;
 import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureEncoder;
 
 /**
@@ -55,8 +57,10 @@ public class RSErasureEncoder extends AbstractErasureEncoder {
   private RawErasureEncoder checkCreateRSRawEncoder() {
     if (rawEncoder == null) {
       // TODO: we should create the raw coder according to codec.
-      rawEncoder = CodecUtil.createRSRawEncoder(getConf(),
+      ErasureCoderOptions coderOptions = new ErasureCoderOptions(
           getNumDataUnits(), getNumParityUnits());
+      rawEncoder = CodecUtil.createRawEncoder(getConf(),
+          ErasureCodeConstants.RS_DEFAULT_CODEC_NAME, coderOptions);
     }
     return rawEncoder;
   }

+ 5 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/XORErasureDecoder.java

@@ -22,6 +22,8 @@ import org.apache.hadoop.io.erasurecode.CodecUtil;
 import org.apache.hadoop.io.erasurecode.ECBlock;
 import org.apache.hadoop.io.erasurecode.ECBlockGroup;
 import org.apache.hadoop.io.erasurecode.ECSchema;
+import org.apache.hadoop.io.erasurecode.ErasureCodeConstants;
+import org.apache.hadoop.io.erasurecode.ErasureCoderOptions;
 import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureDecoder;
 
 /**
@@ -43,8 +45,10 @@ public class XORErasureDecoder extends AbstractErasureDecoder {
   @Override
   protected ErasureCodingStep prepareDecodingStep(
       final ECBlockGroup blockGroup) {
-    RawErasureDecoder rawDecoder = CodecUtil.createXORRawDecoder(getConf(),
+    ErasureCoderOptions coderOptions = new ErasureCoderOptions(
         getNumDataUnits(), getNumParityUnits());
+    RawErasureDecoder rawDecoder = CodecUtil.createRawDecoder(getConf(),
+        ErasureCodeConstants.XOR_CODEC_NAME, coderOptions);
 
     ECBlock[] inputBlocks = getInputBlocks(blockGroup);
 

+ 5 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/XORErasureEncoder.java

@@ -22,6 +22,8 @@ import org.apache.hadoop.io.erasurecode.CodecUtil;
 import org.apache.hadoop.io.erasurecode.ECBlock;
 import org.apache.hadoop.io.erasurecode.ECBlockGroup;
 import org.apache.hadoop.io.erasurecode.ECSchema;
+import org.apache.hadoop.io.erasurecode.ErasureCodeConstants;
+import org.apache.hadoop.io.erasurecode.ErasureCoderOptions;
 import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureEncoder;
 
 /**
@@ -43,8 +45,10 @@ public class XORErasureEncoder extends AbstractErasureEncoder {
   @Override
   protected ErasureCodingStep prepareEncodingStep(
       final ECBlockGroup blockGroup) {
-    RawErasureEncoder rawEncoder = CodecUtil.createXORRawEncoder(getConf(),
+    ErasureCoderOptions coderOptions = new ErasureCoderOptions(
         getNumDataUnits(), getNumParityUnits());
+    RawErasureEncoder rawEncoder = CodecUtil.createRawEncoder(getConf(),
+        ErasureCodeConstants.XOR_CODEC_NAME, coderOptions);
 
     ECBlock[] inputBlocks = getInputBlocks(blockGroup);
 

+ 0 - 220
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractRawErasureCoder.java

@@ -1,220 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.io.erasurecode.rawcoder;
-
-import org.apache.hadoop.HadoopIllegalArgumentException;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.conf.Configured;
-
-import java.nio.ByteBuffer;
-import java.util.HashMap;
-import java.util.Map;
-
-/**
- * A common class of basic facilities to be shared by encoder and decoder
- *
- * It implements the {@link RawErasureCoder} interface.
- */
-@InterfaceAudience.Private
-public abstract class AbstractRawErasureCoder
-    extends Configured implements RawErasureCoder {
-
-  private static byte[] emptyChunk = new byte[4096];
-  private final int numDataUnits;
-  private final int numParityUnits;
-  private final int numAllUnits;
-  private final Map<CoderOption, Object> coderOptions;
-
-  public AbstractRawErasureCoder(int numDataUnits, int numParityUnits) {
-    this.numDataUnits = numDataUnits;
-    this.numParityUnits = numParityUnits;
-    this.numAllUnits = numDataUnits + numParityUnits;
-    this.coderOptions = new HashMap<>(3);
-
-    coderOptions.put(CoderOption.PREFER_DIRECT_BUFFER, preferDirectBuffer());
-    coderOptions.put(CoderOption.ALLOW_CHANGE_INPUTS, false);
-    coderOptions.put(CoderOption.ALLOW_VERBOSE_DUMP, false);
-  }
-
-  @Override
-  public Object getCoderOption(CoderOption option) {
-    if (option == null) {
-      throw new HadoopIllegalArgumentException("Invalid option");
-    }
-    return coderOptions.get(option);
-  }
-
-  @Override
-  public void setCoderOption(CoderOption option, Object value) {
-    if (option == null || value == null) {
-      throw new HadoopIllegalArgumentException(
-          "Invalid option or option value");
-    }
-    if (option.isReadOnly()) {
-      throw new HadoopIllegalArgumentException(
-          "The option is read-only: " + option.name());
-    }
-
-    coderOptions.put(option, value);
-  }
-
-  /**
-   * Make sure to return an empty chunk buffer for the desired length.
-   * @param leastLength
-   * @return empty chunk of zero bytes
-   */
-  protected static byte[] getEmptyChunk(int leastLength) {
-    if (emptyChunk.length >= leastLength) {
-      return emptyChunk; // In most time
-    }
-
-    synchronized (AbstractRawErasureCoder.class) {
-      emptyChunk = new byte[leastLength];
-    }
-
-    return emptyChunk;
-  }
-
-  @Override
-  public int getNumDataUnits() {
-    return numDataUnits;
-  }
-
-  @Override
-  public int getNumParityUnits() {
-    return numParityUnits;
-  }
-
-  protected int getNumAllUnits() {
-    return numAllUnits;
-  }
-
-  @Override
-  public void release() {
-    // Nothing to do by default
-  }
-
-  /**
-   * Tell if direct buffer is preferred or not. It's for callers to
-   * decide how to allocate coding chunk buffers, using DirectByteBuffer or
-   * bytes array. It will return false by default.
-   * @return true if native buffer is preferred for performance consideration,
-   * otherwise false.
-   */
-  protected boolean preferDirectBuffer() {
-    return false;
-  }
-
-  protected boolean isAllowingChangeInputs() {
-    Object value = getCoderOption(CoderOption.ALLOW_CHANGE_INPUTS);
-    if (value != null && value instanceof Boolean) {
-      return (boolean) value;
-    }
-    return false;
-  }
-
-  protected boolean isAllowingVerboseDump() {
-    Object value = getCoderOption(CoderOption.ALLOW_VERBOSE_DUMP);
-    if (value != null && value instanceof Boolean) {
-      return (boolean) value;
-    }
-    return false;
-  }
-
-  /**
-   * Ensure a buffer filled with ZERO bytes from current readable/writable
-   * position.
-   * @param buffer a buffer ready to read / write certain size bytes
-   * @return the buffer itself, with ZERO bytes written, the position and limit
-   *         are not changed after the call
-   */
-  protected ByteBuffer resetBuffer(ByteBuffer buffer, int len) {
-    int pos = buffer.position();
-    buffer.put(getEmptyChunk(len), 0, len);
-    buffer.position(pos);
-
-    return buffer;
-  }
-
-  /**
-   * Ensure the buffer (either input or output) ready to read or write with ZERO
-   * bytes fully in specified length of len.
-   * @param buffer bytes array buffer
-   * @return the buffer itself
-   */
-  protected byte[] resetBuffer(byte[] buffer, int offset, int len) {
-    byte[] empty = getEmptyChunk(len);
-    System.arraycopy(empty, 0, buffer, offset, len);
-
-    return buffer;
-  }
-
-  /**
-   * Check and ensure the buffers are of the length specified by dataLen, also
-   * ensure the buffers are direct buffers or not according to isDirectBuffer.
-   * @param buffers the buffers to check
-   * @param allowNull whether to allow any element to be null or not
-   * @param dataLen the length of data available in the buffer to ensure with
-   * @param isDirectBuffer is direct buffer or not to ensure with
-   * @param isOutputs is output buffer or not
-   */
-  protected void checkParameterBuffers(ByteBuffer[] buffers, boolean
-      allowNull, int dataLen, boolean isDirectBuffer, boolean isOutputs) {
-    for (ByteBuffer buffer : buffers) {
-      if (buffer == null && !allowNull) {
-        throw new HadoopIllegalArgumentException(
-            "Invalid buffer found, not allowing null");
-      } else if (buffer != null) {
-        if (buffer.remaining() != dataLen) {
-          throw new HadoopIllegalArgumentException(
-              "Invalid buffer, not of length " + dataLen);
-        }
-        if (buffer.isDirect() != isDirectBuffer) {
-          throw new HadoopIllegalArgumentException(
-              "Invalid buffer, isDirect should be " + isDirectBuffer);
-        }
-        if (isOutputs) {
-          resetBuffer(buffer, dataLen);
-        }
-      }
-    }
-  }
-
-  /**
-   * Check and ensure the buffers are of the length specified by dataLen. If is
-   * output buffers, ensure they will be ZEROed.
-   * @param buffers the buffers to check
-   * @param allowNull whether to allow any element to be null or not
-   * @param dataLen the length of data available in the buffer to ensure with
-   * @param isOutputs is output buffer or not
-   */
-  protected void checkParameterBuffers(byte[][] buffers, boolean allowNull,
-                                       int dataLen, boolean isOutputs) {
-    for (byte[] buffer : buffers) {
-      if (buffer == null && !allowNull) {
-        throw new HadoopIllegalArgumentException(
-            "Invalid buffer found, not allowing null");
-      } else if (buffer != null && buffer.length != dataLen) {
-        throw new HadoopIllegalArgumentException(
-            "Invalid buffer not of length " + dataLen);
-      } else if (isOutputs) {
-        resetBuffer(buffer, 0, dataLen);
-      }
-    }
-  }
-}

+ 0 - 181
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractRawErasureDecoder.java

@@ -1,181 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.io.erasurecode.rawcoder;
-
-import org.apache.hadoop.HadoopIllegalArgumentException;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.io.erasurecode.ECChunk;
-import org.apache.hadoop.io.erasurecode.rawcoder.util.CoderUtil;
-
-import java.nio.ByteBuffer;
-
-/**
- * An abstract raw erasure decoder that's to be inherited by new decoders.
- *
- * It implements the {@link RawErasureDecoder} interface.
- */
-@InterfaceAudience.Private
-public abstract class AbstractRawErasureDecoder extends AbstractRawErasureCoder
-    implements RawErasureDecoder {
-
-  public AbstractRawErasureDecoder(int numDataUnits, int numParityUnits) {
-    super(numDataUnits, numParityUnits);
-  }
-
-  @Override
-  public void decode(ByteBuffer[] inputs, int[] erasedIndexes,
-                     ByteBuffer[] outputs) {
-    checkParameters(inputs, erasedIndexes, outputs);
-
-    ByteBuffer validInput = CoderUtil.findFirstValidInput(inputs);
-    boolean usingDirectBuffer = validInput.isDirect();
-    int dataLen = validInput.remaining();
-    if (dataLen == 0) {
-      return;
-    }
-    checkParameterBuffers(inputs, true, dataLen, usingDirectBuffer, false);
-    checkParameterBuffers(outputs, false, dataLen, usingDirectBuffer, true);
-
-    int[] inputPositions = new int[inputs.length];
-    for (int i = 0; i < inputPositions.length; i++) {
-      if (inputs[i] != null) {
-        inputPositions[i] = inputs[i].position();
-      }
-    }
-
-    if (usingDirectBuffer) {
-      doDecode(inputs, erasedIndexes, outputs);
-    } else {
-      int[] inputOffsets = new int[inputs.length];
-      int[] outputOffsets = new int[outputs.length];
-      byte[][] newInputs = new byte[inputs.length][];
-      byte[][] newOutputs = new byte[outputs.length][];
-
-      ByteBuffer buffer;
-      for (int i = 0; i < inputs.length; ++i) {
-        buffer = inputs[i];
-        if (buffer != null) {
-          inputOffsets[i] = buffer.arrayOffset() + buffer.position();
-          newInputs[i] = buffer.array();
-        }
-      }
-
-      for (int i = 0; i < outputs.length; ++i) {
-        buffer = outputs[i];
-        outputOffsets[i] = buffer.arrayOffset() + buffer.position();
-        newOutputs[i] = buffer.array();
-      }
-
-      doDecode(newInputs, inputOffsets, dataLen,
-          erasedIndexes, newOutputs, outputOffsets);
-    }
-
-    for (int i = 0; i < inputs.length; i++) {
-      if (inputs[i] != null) {
-        // dataLen bytes consumed
-        inputs[i].position(inputPositions[i] + dataLen);
-      }
-    }
-  }
-
-  /**
-   * Perform the real decoding using Direct ByteBuffer.
-   * @param inputs Direct ByteBuffers expected
-   * @param erasedIndexes indexes of erased units in the inputs array
-   * @param outputs Direct ByteBuffers expected
-   */
-  protected abstract void doDecode(ByteBuffer[] inputs, int[] erasedIndexes,
-                                   ByteBuffer[] outputs);
-
-  @Override
-  public void decode(byte[][] inputs, int[] erasedIndexes, byte[][] outputs) {
-    checkParameters(inputs, erasedIndexes, outputs);
-
-    byte[] validInput = CoderUtil.findFirstValidInput(inputs);
-    int dataLen = validInput.length;
-    if (dataLen == 0) {
-      return;
-    }
-    checkParameterBuffers(inputs, true, dataLen, false);
-    checkParameterBuffers(outputs, false, dataLen, true);
-
-    int[] inputOffsets = new int[inputs.length]; // ALL ZERO
-    int[] outputOffsets = new int[outputs.length]; // ALL ZERO
-
-    doDecode(inputs, inputOffsets, dataLen, erasedIndexes, outputs,
-        outputOffsets);
-  }
-
-  /**
-   * Perform the real decoding using bytes array, supporting offsets and
-   * lengths.
-   * @param inputs the input byte arrays to read data from
-   * @param inputOffsets offsets for the input byte arrays to read data from
-   * @param dataLen how much data are to be read from
-   * @param erasedIndexes indexes of erased units in the inputs array
-   * @param outputs the output byte arrays to write resultant data into
-   * @param outputOffsets offsets from which to write resultant data into
-   */
-  protected abstract void doDecode(byte[][] inputs, int[] inputOffsets,
-                                   int dataLen, int[] erasedIndexes,
-                                   byte[][] outputs, int[] outputOffsets);
-
-  @Override
-  public void decode(ECChunk[] inputs, int[] erasedIndexes,
-                     ECChunk[] outputs) {
-    ByteBuffer[] newInputs = ECChunk.toBuffers(inputs);
-    ByteBuffer[] newOutputs = ECChunk.toBuffers(outputs);
-    decode(newInputs, erasedIndexes, newOutputs);
-  }
-
-  /**
-   * Check and validate decoding parameters, throw exception accordingly. The
-   * checking assumes it's a MDS code. Other code  can override this.
-   * @param inputs input buffers to check
-   * @param erasedIndexes indexes of erased units in the inputs array
-   * @param outputs output buffers to check
-   */
-  protected <T> void checkParameters(T[] inputs, int[] erasedIndexes,
-                                 T[] outputs) {
-    if (inputs.length != getNumParityUnits() + getNumDataUnits()) {
-      throw new IllegalArgumentException("Invalid inputs length");
-    }
-
-    if (erasedIndexes.length != outputs.length) {
-      throw new HadoopIllegalArgumentException(
-          "erasedIndexes and outputs mismatch in length");
-    }
-
-    if (erasedIndexes.length > getNumParityUnits()) {
-      throw new HadoopIllegalArgumentException(
-          "Too many erased, not recoverable");
-    }
-
-    int validInputs = 0;
-    for (T input : inputs) {
-      if (input != null) {
-        validInputs += 1;
-      }
-    }
-
-    if (validInputs < getNumDataUnits()) {
-      throw new HadoopIllegalArgumentException(
-          "No enough valid inputs are provided, not recoverable");
-    }
-  }
-}

+ 0 - 146
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractRawErasureEncoder.java

@@ -1,146 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.io.erasurecode.rawcoder;
-
-import org.apache.hadoop.HadoopIllegalArgumentException;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.io.erasurecode.ECChunk;
-
-import java.nio.ByteBuffer;
-
-/**
- * An abstract raw erasure encoder that's to be inherited by new encoders.
- *
- * It implements the {@link RawErasureEncoder} interface.
- */
-@InterfaceAudience.Private
-public abstract class AbstractRawErasureEncoder extends AbstractRawErasureCoder
-    implements RawErasureEncoder {
-
-  public AbstractRawErasureEncoder(int numDataUnits, int numParityUnits) {
-    super(numDataUnits, numParityUnits);
-  }
-
-  @Override
-  public void encode(ByteBuffer[] inputs, ByteBuffer[] outputs) {
-    checkParameters(inputs, outputs);
-
-    boolean usingDirectBuffer = inputs[0].isDirect();
-    int dataLen = inputs[0].remaining();
-    if (dataLen == 0) {
-      return;
-    }
-    checkParameterBuffers(inputs, false, dataLen, usingDirectBuffer, false);
-    checkParameterBuffers(outputs, false, dataLen, usingDirectBuffer, true);
-
-    int[] inputPositions = new int[inputs.length];
-    for (int i = 0; i < inputPositions.length; i++) {
-      if (inputs[i] != null) {
-        inputPositions[i] = inputs[i].position();
-      }
-    }
-
-    if (usingDirectBuffer) {
-      doEncode(inputs, outputs);
-    } else {
-      int[] inputOffsets = new int[inputs.length];
-      int[] outputOffsets = new int[outputs.length];
-      byte[][] newInputs = new byte[inputs.length][];
-      byte[][] newOutputs = new byte[outputs.length][];
-
-      ByteBuffer buffer;
-      for (int i = 0; i < inputs.length; ++i) {
-        buffer = inputs[i];
-        inputOffsets[i] = buffer.arrayOffset() + buffer.position();
-        newInputs[i] = buffer.array();
-      }
-
-      for (int i = 0; i < outputs.length; ++i) {
-        buffer = outputs[i];
-        outputOffsets[i] = buffer.arrayOffset() + buffer.position();
-        newOutputs[i] = buffer.array();
-      }
-
-      doEncode(newInputs, inputOffsets, dataLen, newOutputs, outputOffsets);
-    }
-
-    for (int i = 0; i < inputs.length; i++) {
-      if (inputs[i] != null) {
-        // dataLen bytes consumed
-        inputs[i].position(inputPositions[i] + dataLen);
-      }
-    }
-  }
-
-  /**
-   * Perform the real encoding work using direct ByteBuffer
-   * @param inputs Direct ByteBuffers expected
-   * @param outputs Direct ByteBuffers expected
-   */
-  protected abstract void doEncode(ByteBuffer[] inputs, ByteBuffer[] outputs);
-
-  @Override
-  public void encode(byte[][] inputs, byte[][] outputs) {
-    checkParameters(inputs, outputs);
-    int dataLen = inputs[0].length;
-    if (dataLen == 0) {
-      return;
-    }
-    checkParameterBuffers(inputs, false, dataLen, false);
-    checkParameterBuffers(outputs, false, dataLen, true);
-
-    int[] inputOffsets = new int[inputs.length]; // ALL ZERO
-    int[] outputOffsets = new int[outputs.length]; // ALL ZERO
-
-    doEncode(inputs, inputOffsets, dataLen, outputs, outputOffsets);
-  }
-
-  /**
-   * Perform the real encoding work using bytes array, supporting offsets
-   * and lengths.
-   * @param inputs the input byte arrays to read data from
-   * @param inputOffsets offsets for the input byte arrays to read data from
-   * @param dataLen how much data are to be read from
-   * @param outputs the output byte arrays to write resultant data into
-   * @param outputOffsets offsets from which to write resultant data into
-   */
-  protected abstract void doEncode(byte[][] inputs, int[] inputOffsets,
-                                   int dataLen, byte[][] outputs,
-                                   int[] outputOffsets);
-
-  @Override
-  public void encode(ECChunk[] inputs, ECChunk[] outputs) {
-    ByteBuffer[] newInputs = ECChunk.toBuffers(inputs);
-    ByteBuffer[] newOutputs = ECChunk.toBuffers(outputs);
-    encode(newInputs, newOutputs);
-  }
-
-  /**
-   * Check and validate decoding parameters, throw exception accordingly.
-   * @param inputs input buffers to check
-   * @param outputs output buffers to check
-   */
-  protected <T> void checkParameters(T[] inputs, T[] outputs) {
-    if (inputs.length != getNumDataUnits()) {
-      throw new HadoopIllegalArgumentException("Invalid inputs length");
-    }
-    if (outputs.length != getNumParityUnits()) {
-      throw new HadoopIllegalArgumentException("Invalid outputs length");
-    }
-  }
-}

+ 111 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/ByteArrayDecodingState.java

@@ -0,0 +1,111 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.io.erasurecode.rawcoder;
+
+import org.apache.hadoop.HadoopIllegalArgumentException;
+import org.apache.hadoop.classification.InterfaceAudience;
+
+/**
+ * A utility class that maintains decoding state during a decode call using
+ * byte array inputs.
+ */
+@InterfaceAudience.Private
+class ByteArrayDecodingState extends DecodingState {
+  byte[][] inputs;
+  int[] inputOffsets;
+  int[] erasedIndexes;
+  byte[][] outputs;
+  int[] outputOffsets;
+
+  ByteArrayDecodingState(RawErasureDecoder decoder, byte[][] inputs,
+                         int[] erasedIndexes, byte[][] outputs) {
+    this.decoder = decoder;
+    this.inputs = inputs;
+    this.outputs = outputs;
+    this.erasedIndexes = erasedIndexes;
+    byte[] validInput = CoderUtil.findFirstValidInput(inputs);
+    this.decodeLength = validInput.length;
+
+    checkParameters(inputs, erasedIndexes, outputs);
+    checkInputBuffers(inputs);
+    checkOutputBuffers(outputs);
+
+    this.inputOffsets = new int[inputs.length]; // ALL ZERO
+    this.outputOffsets = new int[outputs.length]; // ALL ZERO
+  }
+
+  ByteArrayDecodingState(RawErasureDecoder decoder,
+                         int decodeLength,
+                         int[] erasedIndexes,
+                         byte[][] inputs,
+                         int[] inputOffsets,
+                         byte[][] outputs,
+                         int[] outputOffsets) {
+    this.decoder = decoder;
+    this.decodeLength = decodeLength;
+    this.erasedIndexes = erasedIndexes;
+    this.inputs = inputs;
+    this.outputs = outputs;
+    this.inputOffsets = inputOffsets;
+    this.outputOffsets = outputOffsets;
+  }
+
+  /**
+   * Check and ensure the buffers are of the desired length.
+   * @param buffers the buffers to check
+   */
+  void checkInputBuffers(byte[][] buffers) {
+    int validInputs = 0;
+
+    for (byte[] buffer : buffers) {
+      if (buffer == null) {
+        continue;
+      }
+
+      if (buffer.length != decodeLength) {
+        throw new HadoopIllegalArgumentException(
+            "Invalid buffer, not of length " + decodeLength);
+      }
+
+      validInputs++;
+    }
+
+    if (validInputs < decoder.getNumDataUnits()) {
+      throw new HadoopIllegalArgumentException(
+          "No enough valid inputs are provided, not recoverable");
+    }
+  }
+
+  /**
+   * Check and ensure the buffers are of the desired length.
+   * @param buffers the buffers to check
+   */
+  void checkOutputBuffers(byte[][] buffers) {
+    for (byte[] buffer : buffers) {
+      if (buffer == null) {
+        throw new HadoopIllegalArgumentException(
+            "Invalid buffer found, not allowing null");
+      }
+
+      if (buffer.length != decodeLength) {
+        throw new HadoopIllegalArgumentException(
+            "Invalid buffer not of length " + decodeLength);
+      }
+    }
+  }
+}

+ 81 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/ByteArrayEncodingState.java

@@ -0,0 +1,81 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.io.erasurecode.rawcoder;
+
+import org.apache.hadoop.HadoopIllegalArgumentException;
+import org.apache.hadoop.classification.InterfaceAudience;
+
+/**
+ * A utility class that maintains encoding state during an encode call using
+ * byte array inputs.
+ */
+@InterfaceAudience.Private
+class ByteArrayEncodingState extends EncodingState {
+  byte[][] inputs;
+  byte[][] outputs;
+  int[] inputOffsets;
+  int[] outputOffsets;
+
+  ByteArrayEncodingState(RawErasureEncoder encoder,
+                         byte[][] inputs, byte[][] outputs) {
+    this.encoder = encoder;
+    byte[] validInput = CoderUtil.findFirstValidInput(inputs);
+    this.encodeLength = validInput.length;
+    this.inputs = inputs;
+    this.outputs = outputs;
+
+    checkParameters(inputs, outputs);
+    checkBuffers(inputs);
+    checkBuffers(outputs);
+
+    this.inputOffsets = new int[inputs.length]; // ALL ZERO
+    this.outputOffsets = new int[outputs.length]; // ALL ZERO
+  }
+
+  ByteArrayEncodingState(RawErasureEncoder encoder,
+                         int encodeLength,
+                         byte[][] inputs,
+                         int[] inputOffsets,
+                         byte[][] outputs,
+                         int[] outputOffsets) {
+    this.encoder = encoder;
+    this.encodeLength = encodeLength;
+    this.inputs = inputs;
+    this.outputs = outputs;
+    this.inputOffsets = inputOffsets;
+    this.outputOffsets = outputOffsets;
+  }
+
+  /**
+   * Check and ensure the buffers are of the desired length.
+   * @param buffers the buffers to check
+   */
+  void checkBuffers(byte[][] buffers) {
+    for (byte[] buffer : buffers) {
+      if (buffer == null) {
+        throw new HadoopIllegalArgumentException(
+            "Invalid buffer found, not allowing null");
+      }
+
+      if (buffer.length != encodeLength) {
+        throw new HadoopIllegalArgumentException(
+            "Invalid buffer not of length " + encodeLength);
+      }
+    }
+  }
+}

+ 134 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/ByteBufferDecodingState.java

@@ -0,0 +1,134 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.io.erasurecode.rawcoder;
+
+import org.apache.hadoop.HadoopIllegalArgumentException;
+import org.apache.hadoop.classification.InterfaceAudience;
+
+import java.nio.ByteBuffer;
+
+/**
+ * A utility class that maintains decoding state during a decode call using
+ * ByteBuffer inputs.
+ */
+@InterfaceAudience.Private
+class ByteBufferDecodingState extends DecodingState {
+  ByteBuffer[] inputs;
+  ByteBuffer[] outputs;
+  int[] erasedIndexes;
+  boolean usingDirectBuffer;
+
+  ByteBufferDecodingState(RawErasureDecoder decoder, ByteBuffer[] inputs,
+                          int[] erasedIndexes, ByteBuffer[] outputs) {
+    this.decoder = decoder;
+    this.inputs = inputs;
+    this.outputs = outputs;
+    this.erasedIndexes = erasedIndexes;
+    ByteBuffer validInput = CoderUtil.findFirstValidInput(inputs);
+    this.decodeLength = validInput.remaining();
+    this.usingDirectBuffer = validInput.isDirect();
+
+    checkParameters(inputs, erasedIndexes, outputs);
+    checkInputBuffers(inputs);
+    checkOutputBuffers(outputs);
+  }
+
+  /**
+   * Convert to a ByteArrayEncodingState when it's backed by on-heap arrays.
+   */
+  ByteArrayDecodingState convertToByteArrayState() {
+    int[] inputOffsets = new int[inputs.length];
+    int[] outputOffsets = new int[outputs.length];
+    byte[][] newInputs = new byte[inputs.length][];
+    byte[][] newOutputs = new byte[outputs.length][];
+
+    ByteBuffer buffer;
+    for (int i = 0; i < inputs.length; ++i) {
+      buffer = inputs[i];
+      if (buffer != null) {
+        inputOffsets[i] = buffer.arrayOffset() + buffer.position();
+        newInputs[i] = buffer.array();
+      }
+    }
+
+    for (int i = 0; i < outputs.length; ++i) {
+      buffer = outputs[i];
+      outputOffsets[i] = buffer.arrayOffset() + buffer.position();
+      newOutputs[i] = buffer.array();
+    }
+
+    ByteArrayDecodingState baeState = new ByteArrayDecodingState(decoder,
+        decodeLength, erasedIndexes, newInputs,
+        inputOffsets, newOutputs, outputOffsets);
+    return baeState;
+  }
+
+  /**
+   * Check and ensure the buffers are of the desired length and type, direct
+   * buffers or not.
+   * @param buffers the buffers to check
+   */
+  void checkInputBuffers(ByteBuffer[] buffers) {
+    int validInputs = 0;
+
+    for (ByteBuffer buffer : buffers) {
+      if (buffer == null) {
+        continue;
+      }
+
+      if (buffer.remaining() != decodeLength) {
+        throw new HadoopIllegalArgumentException(
+            "Invalid buffer, not of length " + decodeLength);
+      }
+      if (buffer.isDirect() != usingDirectBuffer) {
+        throw new HadoopIllegalArgumentException(
+            "Invalid buffer, isDirect should be " + usingDirectBuffer);
+      }
+
+      validInputs++;
+    }
+
+    if (validInputs < decoder.getNumDataUnits()) {
+      throw new HadoopIllegalArgumentException(
+          "No enough valid inputs are provided, not recoverable");
+    }
+  }
+
+  /**
+   * Check and ensure the buffers are of the desired length and type, direct
+   * buffers or not.
+   * @param buffers the buffers to check
+   */
+  void checkOutputBuffers(ByteBuffer[] buffers) {
+    for (ByteBuffer buffer : buffers) {
+      if (buffer == null) {
+        throw new HadoopIllegalArgumentException(
+            "Invalid buffer found, not allowing null");
+      }
+
+      if (buffer.remaining() != decodeLength) {
+        throw new HadoopIllegalArgumentException(
+            "Invalid buffer, not of length " + decodeLength);
+      }
+      if (buffer.isDirect() != usingDirectBuffer) {
+        throw new HadoopIllegalArgumentException(
+            "Invalid buffer, isDirect should be " + usingDirectBuffer);
+      }
+    }
+  }
+}

+ 98 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/ByteBufferEncodingState.java

@@ -0,0 +1,98 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.io.erasurecode.rawcoder;
+
+import org.apache.hadoop.HadoopIllegalArgumentException;
+import org.apache.hadoop.classification.InterfaceAudience;
+
+import java.nio.ByteBuffer;
+
+/**
+ * A utility class that maintains encoding state during an encode call using
+ * ByteBuffer inputs.
+ */
+@InterfaceAudience.Private
+class ByteBufferEncodingState extends EncodingState {
+  ByteBuffer[] inputs;
+  ByteBuffer[] outputs;
+  boolean usingDirectBuffer;
+
+  ByteBufferEncodingState(RawErasureEncoder encoder,
+                          ByteBuffer[] inputs, ByteBuffer[] outputs) {
+    this.encoder = encoder;
+    ByteBuffer validInput = CoderUtil.findFirstValidInput(inputs);
+    this.encodeLength = validInput.remaining();
+    this.usingDirectBuffer = validInput.isDirect();
+    this.inputs = inputs;
+    this.outputs = outputs;
+
+    checkParameters(inputs, outputs);
+    checkBuffers(inputs);
+    checkBuffers(outputs);
+  }
+
+  /**
+   * Convert to a ByteArrayEncodingState when it's backed by on-heap arrays.
+   */
+  ByteArrayEncodingState convertToByteArrayState() {
+    int[] inputOffsets = new int[inputs.length];
+    int[] outputOffsets = new int[outputs.length];
+    byte[][] newInputs = new byte[inputs.length][];
+    byte[][] newOutputs = new byte[outputs.length][];
+
+    ByteBuffer buffer;
+    for (int i = 0; i < inputs.length; ++i) {
+      buffer = inputs[i];
+      inputOffsets[i] = buffer.arrayOffset() + buffer.position();
+      newInputs[i] = buffer.array();
+    }
+
+    for (int i = 0; i < outputs.length; ++i) {
+      buffer = outputs[i];
+      outputOffsets[i] = buffer.arrayOffset() + buffer.position();
+      newOutputs[i] = buffer.array();
+    }
+
+    ByteArrayEncodingState baeState = new ByteArrayEncodingState(encoder,
+        encodeLength, newInputs, inputOffsets, newOutputs, outputOffsets);
+    return baeState;
+  }
+
+  /**
+   * Check and ensure the buffers are of the desired length and type, direct
+   * buffers or not.
+   * @param buffers the buffers to check
+   */
+  void checkBuffers(ByteBuffer[] buffers) {
+    for (ByteBuffer buffer : buffers) {
+      if (buffer == null) {
+        throw new HadoopIllegalArgumentException(
+            "Invalid buffer found, not allowing null");
+      }
+
+      if (buffer.remaining() != encodeLength) {
+        throw new HadoopIllegalArgumentException(
+            "Invalid buffer, not of length " + encodeLength);
+      }
+      if (buffer.isDirect() != usingDirectBuffer) {
+        throw new HadoopIllegalArgumentException(
+            "Invalid buffer, isDirect should be " + usingDirectBuffer);
+      }
+    }
+  }
+}

+ 199 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/CoderUtil.java

@@ -0,0 +1,199 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.io.erasurecode.rawcoder;
+
+import org.apache.hadoop.HadoopIllegalArgumentException;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.io.erasurecode.ECChunk;
+
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+
+/**
+ * Helpful utilities for implementing some raw erasure coders.
+ */
+@InterfaceAudience.Private
+final class CoderUtil {
+
+  private CoderUtil() {
+    // No called
+  }
+
+  private static byte[] emptyChunk = new byte[4096];
+
+  /**
+   * Make sure to return an empty chunk buffer for the desired length.
+   * @param leastLength
+   * @return empty chunk of zero bytes
+   */
+  static byte[] getEmptyChunk(int leastLength) {
+    if (emptyChunk.length >= leastLength) {
+      return emptyChunk; // In most time
+    }
+
+    synchronized (CoderUtil.class) {
+      emptyChunk = new byte[leastLength];
+    }
+
+    return emptyChunk;
+  }
+
+  /**
+   * Ensure a buffer filled with ZERO bytes from current readable/writable
+   * position.
+   * @param buffer a buffer ready to read / write certain size bytes
+   * @return the buffer itself, with ZERO bytes written, the position and limit
+   *         are not changed after the call
+   */
+  static ByteBuffer resetBuffer(ByteBuffer buffer, int len) {
+    int pos = buffer.position();
+    buffer.put(getEmptyChunk(len), 0, len);
+    buffer.position(pos);
+
+    return buffer;
+  }
+
+  /**
+   * Ensure the buffer (either input or output) ready to read or write with ZERO
+   * bytes fully in specified length of len.
+   * @param buffer bytes array buffer
+   * @return the buffer itself
+   */
+  static byte[] resetBuffer(byte[] buffer, int offset, int len) {
+    byte[] empty = getEmptyChunk(len);
+    System.arraycopy(empty, 0, buffer, offset, len);
+
+    return buffer;
+  }
+
+  /**
+   * Initialize the output buffers with ZERO bytes.
+   * @param buffers
+   * @param dataLen
+   */
+  static void resetOutputBuffers(ByteBuffer[] buffers, int dataLen) {
+    for (ByteBuffer buffer : buffers) {
+      resetBuffer(buffer, dataLen);
+    }
+  }
+
+  /**
+   * Initialize the output buffers with ZERO bytes.
+   * @param buffers
+   * @param dataLen
+   */
+  static void resetOutputBuffers(byte[][] buffers, int[] offsets,
+                                 int dataLen) {
+    for (int i = 0; i < buffers.length; i++) {
+      resetBuffer(buffers[i], offsets[i], dataLen);
+    }
+  }
+
+  /**
+   * Convert an array of this chunks to an array of ByteBuffers
+   * @param chunks chunks to convertToByteArrayState into buffers
+   * @return an array of ByteBuffers
+   */
+  static ByteBuffer[] toBuffers(ECChunk[] chunks) {
+    ByteBuffer[] buffers = new ByteBuffer[chunks.length];
+
+    ECChunk chunk;
+    for (int i = 0; i < chunks.length; i++) {
+      chunk = chunks[i];
+      if (chunk == null) {
+        buffers[i] = null;
+      } else {
+        buffers[i] = chunk.getBuffer();
+      }
+    }
+
+    return buffers;
+  }
+
+  /**
+   * Clone an input bytes array as direct ByteBuffer.
+   * @param input
+   * @param len
+   * @param offset
+   * @return direct ByteBuffer
+   */
+  static ByteBuffer cloneAsDirectByteBuffer(byte[] input, int offset, int len) {
+    if (input == null) { // an input can be null, if erased or not to read
+      return null;
+    }
+
+    ByteBuffer directBuffer = ByteBuffer.allocateDirect(len);
+    directBuffer.put(input, offset, len);
+    directBuffer.flip();
+    return directBuffer;
+  }
+
+  /**
+   * Get indexes array for items marked as null, either erased or
+   * not to read.
+   * @return indexes array
+   */
+  static <T> int[] getNullIndexes(T[] inputs) {
+    int[] nullIndexes = new int[inputs.length];
+    int idx = 0;
+    for (int i = 0; i < inputs.length; i++) {
+      if (inputs[i] == null) {
+        nullIndexes[idx++] = i;
+      }
+    }
+
+    return Arrays.copyOf(nullIndexes, idx);
+  }
+
+  /**
+   * Find the valid input from all the inputs.
+   * @param inputs input buffers to look for valid input
+   * @return the first valid input
+   */
+  static <T> T findFirstValidInput(T[] inputs) {
+    if (inputs.length > 0 && inputs[0] != null) {
+      return inputs[0];
+    }
+
+    for (T input : inputs) {
+      if (input != null) {
+        return input;
+      }
+    }
+
+    throw new HadoopIllegalArgumentException(
+        "Invalid inputs are found, all being null");
+  }
+
+  /**
+   * Picking up indexes of valid inputs.
+   * @param inputs decoding input buffers
+   * @param <T>
+   */
+  static <T> int[] getValidIndexes(T[] inputs) {
+    int[] validIndexes = new int[inputs.length];
+    int idx = 0;
+    for (int i = 0; i < inputs.length; i++) {
+      if (inputs[i] != null) {
+        validIndexes[idx++] = i;
+      }
+    }
+
+    return Arrays.copyOf(validIndexes, idx);
+  }
+}

+ 55 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/DecodingState.java

@@ -0,0 +1,55 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.io.erasurecode.rawcoder;
+
+import org.apache.hadoop.HadoopIllegalArgumentException;
+import org.apache.hadoop.classification.InterfaceAudience;
+
+/**
+ * A utility class that maintains decoding state during a decode call.
+ */
+@InterfaceAudience.Private
+class DecodingState {
+  RawErasureDecoder decoder;
+  int decodeLength;
+
+  /**
+   * Check and validate decoding parameters, throw exception accordingly. The
+   * checking assumes it's a MDS code. Other code  can override this.
+   * @param inputs input buffers to check
+   * @param erasedIndexes indexes of erased units in the inputs array
+   * @param outputs output buffers to check
+   */
+  <T> void checkParameters(T[] inputs, int[] erasedIndexes,
+                           T[] outputs) {
+    if (inputs.length != decoder.getNumParityUnits() +
+        decoder.getNumDataUnits()) {
+      throw new IllegalArgumentException("Invalid inputs length");
+    }
+
+    if (erasedIndexes.length != outputs.length) {
+      throw new HadoopIllegalArgumentException(
+          "erasedIndexes and outputs mismatch in length");
+    }
+
+    if (erasedIndexes.length > decoder.getNumParityUnits()) {
+      throw new HadoopIllegalArgumentException(
+          "Too many erased, not recoverable");
+    }
+  }
+}

+ 7 - 9
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/DummyRawDecoder.java

@@ -18,8 +18,7 @@
 package org.apache.hadoop.io.erasurecode.rawcoder;
 
 import org.apache.hadoop.classification.InterfaceAudience;
-
-import java.nio.ByteBuffer;
+import org.apache.hadoop.io.erasurecode.ErasureCoderOptions;
 
 /**
  * A dummy raw decoder that does no real computation.
@@ -28,20 +27,19 @@ import java.nio.ByteBuffer;
  * instead of codec, and is intended for test only.
  */
 @InterfaceAudience.Private
-public class DummyRawDecoder extends AbstractRawErasureDecoder {
-  public DummyRawDecoder(int numDataUnits, int numParityUnits) {
-    super(numDataUnits, numParityUnits);
+public class DummyRawDecoder extends RawErasureDecoder {
+
+  public DummyRawDecoder(ErasureCoderOptions coderOptions) {
+    super(coderOptions);
   }
 
   @Override
-  protected void doDecode(ByteBuffer[] inputs, int[] erasedIndexes,
-      ByteBuffer[] outputs) {
+  protected void doDecode(ByteBufferDecodingState decodingState) {
     // Nothing to do. Output buffers have already been reset
   }
 
   @Override
-  protected void doDecode(byte[][] inputs, int[] inputOffsets, int dataLen,
-      int[] erasedIndexes, byte[][] outputs, int[] outputOffsets) {
+  protected void doDecode(ByteArrayDecodingState decodingState) {
     // Nothing to do. Output buffers have already been reset
   }
 }

+ 7 - 8
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/DummyRawEncoder.java

@@ -18,8 +18,7 @@
 package org.apache.hadoop.io.erasurecode.rawcoder;
 
 import org.apache.hadoop.classification.InterfaceAudience;
-
-import java.nio.ByteBuffer;
+import org.apache.hadoop.io.erasurecode.ErasureCoderOptions;
 
 /**
  * A dummy raw encoder that does no real computation.
@@ -28,19 +27,19 @@ import java.nio.ByteBuffer;
  * instead of codec, and is intended for test only.
  */
 @InterfaceAudience.Private
-public class DummyRawEncoder extends AbstractRawErasureEncoder {
-  public DummyRawEncoder(int numDataUnits, int numParityUnits) {
-    super(numDataUnits, numParityUnits);
+public class DummyRawEncoder extends RawErasureEncoder {
+
+  public DummyRawEncoder(ErasureCoderOptions coderOptions) {
+    super(coderOptions);
   }
 
   @Override
-  protected void doEncode(ByteBuffer[] inputs, ByteBuffer[] outputs) {
+  protected void doEncode(ByteArrayEncodingState encodingState) {
     // Nothing to do. Output buffers have already been reset
   }
 
   @Override
-  protected void doEncode(byte[][] inputs, int[] inputOffsets, int dataLen,
-      byte[][] outputs, int[] outputOffsets) {
+  protected void doEncode(ByteBufferEncodingState encodingState) {
     // Nothing to do. Output buffers have already been reset
   }
 }

+ 6 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/DummyRawErasureCoderFactory.java

@@ -18,19 +18,21 @@
 package org.apache.hadoop.io.erasurecode.rawcoder;
 
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.io.erasurecode.ErasureCoderOptions;
 
 /**
  * A raw erasure coder factory for dummy raw coders.
  */
 @InterfaceAudience.Private
 public class DummyRawErasureCoderFactory implements RawErasureCoderFactory {
+
   @Override
-  public RawErasureEncoder createEncoder(int numDataUnits, int numParityUnits) {
-    return new DummyRawEncoder(numDataUnits, numParityUnits);
+  public RawErasureEncoder createEncoder(ErasureCoderOptions coderOptions) {
+    return new DummyRawEncoder(coderOptions);
   }
 
   @Override
-  public RawErasureDecoder createDecoder(int numDataUnits, int numParityUnits) {
-    return new DummyRawDecoder(numDataUnits, numParityUnits);
+  public RawErasureDecoder createDecoder(ErasureCoderOptions coderOptions) {
+    return new DummyRawDecoder(coderOptions);
   }
 }

+ 44 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/EncodingState.java

@@ -0,0 +1,44 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.io.erasurecode.rawcoder;
+
+import org.apache.hadoop.HadoopIllegalArgumentException;
+import org.apache.hadoop.classification.InterfaceAudience;
+
+/**
+ * A utility class that maintains encoding state during an encode call.
+ */
+@InterfaceAudience.Private
+abstract class EncodingState {
+  RawErasureEncoder encoder;
+  int encodeLength;
+
+  /**
+   * Check and validate decoding parameters, throw exception accordingly.
+   * @param inputs input buffers to check
+   * @param outputs output buffers to check
+   */
+  <T> void checkParameters(T[] inputs, T[] outputs) {
+    if (inputs.length != encoder.getNumDataUnits()) {
+      throw new HadoopIllegalArgumentException("Invalid inputs length");
+    }
+    if (outputs.length != encoder.getNumParityUnits()) {
+      throw new HadoopIllegalArgumentException("Invalid outputs length");
+    }
+  }
+}

+ 25 - 23
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RSRawDecoder.java

@@ -19,7 +19,7 @@ package org.apache.hadoop.io.erasurecode.rawcoder;
 
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.io.erasurecode.rawcoder.util.CoderUtil;
+import org.apache.hadoop.io.erasurecode.ErasureCoderOptions;
 import org.apache.hadoop.io.erasurecode.rawcoder.util.DumpUtil;
 import org.apache.hadoop.io.erasurecode.rawcoder.util.GF256;
 import org.apache.hadoop.io.erasurecode.rawcoder.util.RSUtil;
@@ -34,7 +34,7 @@ import java.util.Arrays;
  * from HDFS-RAID, and also compatible with the native/ISA-L coder.
  */
 @InterfaceAudience.Private
-public class RSRawDecoder extends AbstractRawErasureDecoder {
+public class RSRawDecoder extends RawErasureDecoder {
   //relevant to schema and won't change during decode calls
   private byte[] encodeMatrix;
 
@@ -54,52 +54,54 @@ public class RSRawDecoder extends AbstractRawErasureDecoder {
   private int numErasedDataUnits;
   private boolean[] erasureFlags;
 
-  public RSRawDecoder(int numDataUnits, int numParityUnits) {
-    super(numDataUnits, numParityUnits);
-    if (numDataUnits + numParityUnits >= RSUtil.GF.getFieldSize()) {
+  public RSRawDecoder(ErasureCoderOptions coderOptions) {
+    super(coderOptions);
+
+    int numAllUnits = getNumAllUnits();
+    if (getNumAllUnits() >= RSUtil.GF.getFieldSize()) {
       throw new HadoopIllegalArgumentException(
               "Invalid getNumDataUnits() and numParityUnits");
     }
 
-    int numAllUnits = getNumDataUnits() + numParityUnits;
     encodeMatrix = new byte[numAllUnits * getNumDataUnits()];
     RSUtil.genCauchyMatrix(encodeMatrix, numAllUnits, getNumDataUnits());
-    if (isAllowingVerboseDump()) {
-      DumpUtil.dumpMatrix(encodeMatrix, numDataUnits, numAllUnits);
+    if (allowVerboseDump()) {
+      DumpUtil.dumpMatrix(encodeMatrix, getNumDataUnits(), numAllUnits);
     }
   }
 
   @Override
-  protected void doDecode(ByteBuffer[] inputs, int[] erasedIndexes,
-                          ByteBuffer[] outputs) {
-    prepareDecoding(inputs, erasedIndexes);
+  protected void doDecode(ByteBufferDecodingState decodingState) {
+    CoderUtil.resetOutputBuffers(decodingState.outputs,
+        decodingState.decodeLength);
+    prepareDecoding(decodingState.inputs, decodingState.erasedIndexes);
 
     ByteBuffer[] realInputs = new ByteBuffer[getNumDataUnits()];
     for (int i = 0; i < getNumDataUnits(); i++) {
-      realInputs[i] = inputs[validIndexes[i]];
+      realInputs[i] = decodingState.inputs[validIndexes[i]];
     }
-    RSUtil.encodeData(gfTables, realInputs, outputs);
+    RSUtil.encodeData(gfTables, realInputs, decodingState.outputs);
   }
 
   @Override
-  protected void doDecode(byte[][] inputs, int[] inputOffsets,
-                          int dataLen, int[] erasedIndexes,
-                          byte[][] outputs, int[] outputOffsets) {
-    prepareDecoding(inputs, erasedIndexes);
+  protected void doDecode(ByteArrayDecodingState decodingState) {
+    int dataLen = decodingState.decodeLength;
+    CoderUtil.resetOutputBuffers(decodingState.outputs,
+        decodingState.outputOffsets, dataLen);
+    prepareDecoding(decodingState.inputs, decodingState.erasedIndexes);
 
     byte[][] realInputs = new byte[getNumDataUnits()][];
     int[] realInputOffsets = new int[getNumDataUnits()];
     for (int i = 0; i < getNumDataUnits(); i++) {
-      realInputs[i] = inputs[validIndexes[i]];
-      realInputOffsets[i] = inputOffsets[validIndexes[i]];
+      realInputs[i] = decodingState.inputs[validIndexes[i]];
+      realInputOffsets[i] = decodingState.inputOffsets[validIndexes[i]];
     }
     RSUtil.encodeData(gfTables, dataLen, realInputs, realInputOffsets,
-            outputs, outputOffsets);
+        decodingState.outputs, decodingState.outputOffsets);
   }
 
   private <T> void prepareDecoding(T[] inputs, int[] erasedIndexes) {
-    int[] tmpValidIndexes = new int[getNumDataUnits()];
-    CoderUtil.makeValidIndexes(inputs, tmpValidIndexes);
+    int[] tmpValidIndexes = CoderUtil.getValidIndexes(inputs);
     if (Arrays.equals(this.cachedErasedIndexes, erasedIndexes) &&
         Arrays.equals(this.validIndexes, tmpValidIndexes)) {
       return; // Optimization. Nothing to do
@@ -132,7 +134,7 @@ public class RSRawDecoder extends AbstractRawErasureDecoder {
 
     RSUtil.initTables(getNumDataUnits(), erasedIndexes.length,
         decodeMatrix, 0, gfTables);
-    if (isAllowingVerboseDump()) {
+    if (allowVerboseDump()) {
       System.out.println(DumpUtil.bytesToHex(gfTables, -1));
     }
   }

+ 36 - 30
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RSRawDecoderLegacy.java

@@ -19,7 +19,7 @@ package org.apache.hadoop.io.erasurecode.rawcoder;
 
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.io.erasurecode.rawcoder.util.CoderUtil;
+import org.apache.hadoop.io.erasurecode.ErasureCoderOptions;
 import org.apache.hadoop.io.erasurecode.rawcoder.util.RSUtil;
 
 import java.nio.ByteBuffer;
@@ -34,7 +34,7 @@ import java.nio.ByteBuffer;
  * addressed in HADOOP-11871.
  */
 @InterfaceAudience.Private
-public class RSRawDecoderLegacy extends AbstractRawErasureDecoder {
+public class RSRawDecoderLegacy extends RawErasureDecoder {
   // To describe and calculate the needed Vandermonde matrix
   private int[] errSignature;
   private int[] primitivePower;
@@ -61,16 +61,16 @@ public class RSRawDecoderLegacy extends AbstractRawErasureDecoder {
   private ByteBuffer[] adjustedDirectBufferOutputsParameter =
       new ByteBuffer[getNumParityUnits()];
 
-  public RSRawDecoderLegacy(int numDataUnits, int numParityUnits) {
-    super(numDataUnits, numParityUnits);
-    if (numDataUnits + numParityUnits >= RSUtil.GF.getFieldSize()) {
+  public RSRawDecoderLegacy(ErasureCoderOptions coderOptions) {
+    super(coderOptions);
+    if (getNumAllUnits() >= RSUtil.GF.getFieldSize()) {
       throw new HadoopIllegalArgumentException(
               "Invalid numDataUnits and numParityUnits");
     }
 
-    this.errSignature = new int[numParityUnits];
-    this.primitivePower = RSUtil.getPrimitivePower(numDataUnits,
-        numParityUnits);
+    this.errSignature = new int[getNumParityUnits()];
+    this.primitivePower = RSUtil.getPrimitivePower(getNumDataUnits(),
+        getNumParityUnits());
   }
 
   @Override
@@ -129,16 +129,18 @@ public class RSRawDecoderLegacy extends AbstractRawErasureDecoder {
   }
 
   @Override
-  protected void doDecode(byte[][] inputs, int[] inputOffsets,
-                          int dataLen, int[] erasedIndexes,
-                          byte[][] outputs, int[] outputOffsets) {
+  protected void doDecode(ByteArrayDecodingState decodingState) {
+    int dataLen = decodingState.decodeLength;
+    CoderUtil.resetOutputBuffers(decodingState.outputs,
+        decodingState.outputOffsets, dataLen);
+
     /**
      * As passed parameters are friendly to callers but not to the underlying
      * implementations, so we have to adjust them before calling doDecodeImpl.
      */
 
     int[] erasedOrNotToReadIndexes =
-        CoderUtil.getErasedOrNotToReadIndexes(inputs);
+        CoderUtil.getNullIndexes(decodingState.inputs);
 
     // Prepare for adjustedOutputsParameter
 
@@ -148,16 +150,18 @@ public class RSRawDecoderLegacy extends AbstractRawErasureDecoder {
       adjustedOutputOffsets[i] = 0;
     }
     // Use the caller passed buffers in erasedIndexes positions
-    for (int outputIdx = 0, i = 0; i < erasedIndexes.length; i++) {
+    for (int outputIdx = 0, i = 0;
+         i < decodingState.erasedIndexes.length; i++) {
       boolean found = false;
       for (int j = 0; j < erasedOrNotToReadIndexes.length; j++) {
         // If this index is one requested by the caller via erasedIndexes, then
         // we use the passed output buffer to avoid copying data thereafter.
-        if (erasedIndexes[i] == erasedOrNotToReadIndexes[j]) {
+        if (decodingState.erasedIndexes[i] == erasedOrNotToReadIndexes[j]) {
           found = true;
-          adjustedByteArrayOutputsParameter[j] = resetBuffer(
-                  outputs[outputIdx], outputOffsets[outputIdx], dataLen);
-          adjustedOutputOffsets[j] = outputOffsets[outputIdx];
+          adjustedByteArrayOutputsParameter[j] = CoderUtil.resetBuffer(
+              decodingState.outputs[outputIdx],
+              decodingState.outputOffsets[outputIdx], dataLen);
+          adjustedOutputOffsets[j] = decodingState.outputOffsets[outputIdx];
           outputIdx++;
         }
       }
@@ -169,22 +173,22 @@ public class RSRawDecoderLegacy extends AbstractRawErasureDecoder {
     // Use shared buffers for other positions (not set yet)
     for (int bufferIdx = 0, i = 0; i < erasedOrNotToReadIndexes.length; i++) {
       if (adjustedByteArrayOutputsParameter[i] == null) {
-        adjustedByteArrayOutputsParameter[i] = resetBuffer(
+        adjustedByteArrayOutputsParameter[i] = CoderUtil.resetBuffer(
             checkGetBytesArrayBuffer(bufferIdx, dataLen), 0, dataLen);
         adjustedOutputOffsets[i] = 0; // Always 0 for such temp output
         bufferIdx++;
       }
     }
 
-    doDecodeImpl(inputs, inputOffsets, dataLen, erasedOrNotToReadIndexes,
+    doDecodeImpl(decodingState.inputs, decodingState.inputOffsets,
+        dataLen, erasedOrNotToReadIndexes,
         adjustedByteArrayOutputsParameter, adjustedOutputOffsets);
   }
 
   @Override
-  protected void doDecode(ByteBuffer[] inputs, int[] erasedIndexes,
-                          ByteBuffer[] outputs) {
-    ByteBuffer validInput = CoderUtil.findFirstValidInput(inputs);
-    int dataLen = validInput.remaining();
+  protected void doDecode(ByteBufferDecodingState decodingState) {
+    int dataLen = decodingState.decodeLength;
+    CoderUtil.resetOutputBuffers(decodingState.outputs, dataLen);
 
     /**
      * As passed parameters are friendly to callers but not to the underlying
@@ -192,7 +196,7 @@ public class RSRawDecoderLegacy extends AbstractRawErasureDecoder {
      */
 
     int[] erasedOrNotToReadIndexes =
-        CoderUtil.getErasedOrNotToReadIndexes(inputs);
+        CoderUtil.getNullIndexes(decodingState.inputs);
 
     // Prepare for adjustedDirectBufferOutputsParameter
 
@@ -201,15 +205,16 @@ public class RSRawDecoderLegacy extends AbstractRawErasureDecoder {
       adjustedDirectBufferOutputsParameter[i] = null;
     }
     // Use the caller passed buffers in erasedIndexes positions
-    for (int outputIdx = 0, i = 0; i < erasedIndexes.length; i++) {
+    for (int outputIdx = 0, i = 0;
+         i < decodingState.erasedIndexes.length; i++) {
       boolean found = false;
       for (int j = 0; j < erasedOrNotToReadIndexes.length; j++) {
         // If this index is one requested by the caller via erasedIndexes, then
         // we use the passed output buffer to avoid copying data thereafter.
-        if (erasedIndexes[i] == erasedOrNotToReadIndexes[j]) {
+        if (decodingState.erasedIndexes[i] == erasedOrNotToReadIndexes[j]) {
           found = true;
-          adjustedDirectBufferOutputsParameter[j] =
-              resetBuffer(outputs[outputIdx++], dataLen);
+          adjustedDirectBufferOutputsParameter[j] = CoderUtil.resetBuffer(
+              decodingState.outputs[outputIdx++], dataLen);
         }
       }
       if (!found) {
@@ -223,12 +228,13 @@ public class RSRawDecoderLegacy extends AbstractRawErasureDecoder {
         ByteBuffer buffer = checkGetDirectBuffer(bufferIdx, dataLen);
         buffer.position(0);
         buffer.limit(dataLen);
-        adjustedDirectBufferOutputsParameter[i] = resetBuffer(buffer, dataLen);
+        adjustedDirectBufferOutputsParameter[i] =
+            CoderUtil.resetBuffer(buffer, dataLen);
         bufferIdx++;
       }
     }
 
-    doDecodeImpl(inputs, erasedOrNotToReadIndexes,
+    doDecodeImpl(decodingState.inputs, erasedOrNotToReadIndexes,
         adjustedDirectBufferOutputsParameter);
   }
 

+ 25 - 20
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RSRawEncoder.java

@@ -19,11 +19,10 @@ package org.apache.hadoop.io.erasurecode.rawcoder;
 
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.io.erasurecode.ErasureCoderOptions;
 import org.apache.hadoop.io.erasurecode.rawcoder.util.DumpUtil;
 import org.apache.hadoop.io.erasurecode.rawcoder.util.RSUtil;
 
-import java.nio.ByteBuffer;
-
 /**
  * A raw erasure encoder in RS code scheme in pure Java in case native one
  * isn't available in some environment. Please always use native implementations
@@ -31,7 +30,7 @@ import java.nio.ByteBuffer;
  * from HDFS-RAID, and also compatible with the native/ISA-L coder.
  */
 @InterfaceAudience.Private
-public class RSRawEncoder extends AbstractRawErasureEncoder {
+public class RSRawEncoder extends RawErasureEncoder {
   // relevant to schema and won't change during encode calls.
   private byte[] encodeMatrix;
   /**
@@ -40,36 +39,42 @@ public class RSRawEncoder extends AbstractRawErasureEncoder {
    */
   private byte[] gfTables;
 
-  public RSRawEncoder(int numDataUnits, int numParityUnits) {
-    super(numDataUnits, numParityUnits);
+  public RSRawEncoder(ErasureCoderOptions coderOptions) {
+    super(coderOptions);
 
-    if (numDataUnits + numParityUnits >= RSUtil.GF.getFieldSize()) {
+    if (getNumAllUnits() >= RSUtil.GF.getFieldSize()) {
       throw new HadoopIllegalArgumentException(
           "Invalid numDataUnits and numParityUnits");
     }
 
-    encodeMatrix = new byte[getNumAllUnits() * numDataUnits];
-    RSUtil.genCauchyMatrix(encodeMatrix, getNumAllUnits(), numDataUnits);
-    if (isAllowingVerboseDump()) {
-      DumpUtil.dumpMatrix(encodeMatrix, numDataUnits, getNumAllUnits());
+    encodeMatrix = new byte[getNumAllUnits() * getNumDataUnits()];
+    RSUtil.genCauchyMatrix(encodeMatrix, getNumAllUnits(), getNumDataUnits());
+    if (allowVerboseDump()) {
+      DumpUtil.dumpMatrix(encodeMatrix, getNumDataUnits(), getNumAllUnits());
     }
-    gfTables = new byte[getNumAllUnits() * numDataUnits * 32];
-    RSUtil.initTables(numDataUnits, numParityUnits, encodeMatrix,
-        numDataUnits * numDataUnits, gfTables);
-    if (isAllowingVerboseDump()) {
+    gfTables = new byte[getNumAllUnits() * getNumDataUnits() * 32];
+    RSUtil.initTables(getNumDataUnits(), getNumParityUnits(), encodeMatrix,
+        getNumDataUnits() * getNumDataUnits(), gfTables);
+    if (allowVerboseDump()) {
       System.out.println(DumpUtil.bytesToHex(gfTables, -1));
     }
   }
 
   @Override
-  protected void doEncode(ByteBuffer[] inputs, ByteBuffer[] outputs) {
-    RSUtil.encodeData(gfTables, inputs, outputs);
+  protected void doEncode(ByteBufferEncodingState encodingState) {
+    CoderUtil.resetOutputBuffers(encodingState.outputs,
+        encodingState.encodeLength);
+    RSUtil.encodeData(gfTables, encodingState.inputs, encodingState.outputs);
   }
 
   @Override
-  protected void doEncode(byte[][] inputs, int[] inputOffsets,
-                          int dataLen, byte[][] outputs, int[] outputOffsets) {
-    RSUtil.encodeData(gfTables, dataLen, inputs, inputOffsets, outputs,
-        outputOffsets);
+  protected void doEncode(ByteArrayEncodingState encodingState) {
+    CoderUtil.resetOutputBuffers(encodingState.outputs,
+        encodingState.outputOffsets,
+        encodingState.encodeLength);
+    RSUtil.encodeData(gfTables, encodingState.encodeLength,
+        encodingState.inputs,
+        encodingState.inputOffsets, encodingState.outputs,
+        encodingState.outputOffsets);
   }
 }

+ 50 - 32
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RSRawEncoderLegacy.java

@@ -18,6 +18,7 @@
 package org.apache.hadoop.io.erasurecode.rawcoder;
 
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.io.erasurecode.ErasureCoderOptions;
 import org.apache.hadoop.io.erasurecode.rawcoder.util.RSUtil;
 
 import java.nio.ByteBuffer;
@@ -29,20 +30,20 @@ import java.util.Arrays;
  * when possible.
  */
 @InterfaceAudience.Private
-public class RSRawEncoderLegacy extends AbstractRawErasureEncoder {
+public class RSRawEncoderLegacy extends RawErasureEncoder {
   private int[] generatingPolynomial;
 
-  public RSRawEncoderLegacy(int numDataUnits, int numParityUnits) {
-    super(numDataUnits, numParityUnits);
+  public RSRawEncoderLegacy(ErasureCoderOptions coderOptions) {
+    super(coderOptions);
 
     assert (getNumDataUnits() + getNumParityUnits() < RSUtil.GF.getFieldSize());
 
-    int[] primitivePower = RSUtil.getPrimitivePower(numDataUnits,
-        numParityUnits);
+    int[] primitivePower = RSUtil.getPrimitivePower(getNumDataUnits(),
+        getNumParityUnits());
     // compute generating polynomial
     int[] gen = {1};
     int[] poly = new int[2];
-    for (int i = 0; i < numParityUnits; i++) {
+    for (int i = 0; i < getNumParityUnits(); i++) {
       poly[0] = primitivePower[i];
       poly[1] = 1;
       gen = RSUtil.GF.multiply(gen, poly);
@@ -52,15 +53,21 @@ public class RSRawEncoderLegacy extends AbstractRawErasureEncoder {
   }
 
   @Override
-  protected void doEncode(ByteBuffer[] inputs, ByteBuffer[] outputs) {
+  protected void doEncode(ByteBufferEncodingState encodingState) {
+    CoderUtil.resetOutputBuffers(encodingState.outputs,
+        encodingState.encodeLength);
     // parity units + data units
-    ByteBuffer[] all = new ByteBuffer[outputs.length + inputs.length];
+    ByteBuffer[] all = new ByteBuffer[encodingState.outputs.length +
+        encodingState.inputs.length];
 
-    if (isAllowingChangeInputs()) {
-      System.arraycopy(outputs, 0, all, 0, outputs.length);
-      System.arraycopy(inputs, 0, all, outputs.length, inputs.length);
+    if (allowChangeInputs()) {
+      System.arraycopy(encodingState.outputs, 0, all, 0,
+          encodingState.outputs.length);
+      System.arraycopy(encodingState.inputs, 0, all,
+          encodingState.outputs.length, encodingState.inputs.length);
     } else {
-      System.arraycopy(outputs, 0, all, 0, outputs.length);
+      System.arraycopy(encodingState.outputs, 0, all, 0,
+          encodingState.outputs.length);
 
       /**
        * Note when this coder would be really (rarely) used in a production
@@ -68,11 +75,11 @@ public class RSRawEncoderLegacy extends AbstractRawErasureEncoder {
        * buffers avoiding reallocating.
        */
       ByteBuffer tmp;
-      for (int i = 0; i < inputs.length; i++) {
-        tmp = ByteBuffer.allocate(inputs[i].remaining());
-        tmp.put(inputs[i]);
+      for (int i = 0; i < encodingState.inputs.length; i++) {
+        tmp = ByteBuffer.allocate(encodingState.inputs[i].remaining());
+        tmp.put(encodingState.inputs[i]);
         tmp.flip();
-        all[outputs.length + i] = tmp;
+        all[encodingState.outputs.length + i] = tmp;
       }
     }
 
@@ -81,27 +88,38 @@ public class RSRawEncoderLegacy extends AbstractRawErasureEncoder {
   }
 
   @Override
-  protected void doEncode(byte[][] inputs, int[] inputOffsets,
-                          int dataLen, byte[][] outputs,
-                          int[] outputOffsets) {
+  protected void doEncode(ByteArrayEncodingState encodingState) {
+    int dataLen = encodingState.encodeLength;
+    CoderUtil.resetOutputBuffers(encodingState.outputs,
+        encodingState.outputOffsets, dataLen);
     // parity units + data units
-    byte[][] all = new byte[outputs.length + inputs.length][];
-    int[] allOffsets = new int[outputOffsets.length + inputOffsets.length];
+    byte[][] all = new byte[encodingState.outputs.length +
+        encodingState.inputs.length][];
+    int[] allOffsets = new int[encodingState.outputOffsets.length +
+        encodingState.inputOffsets.length];
 
-    if (isAllowingChangeInputs()) {
-      System.arraycopy(outputs, 0, all, 0, outputs.length);
-      System.arraycopy(inputs, 0, all, outputs.length, inputs.length);
+    if (allowChangeInputs()) {
+      System.arraycopy(encodingState.outputs, 0, all, 0,
+          encodingState.outputs.length);
+      System.arraycopy(encodingState.inputs, 0, all,
+          encodingState.outputs.length, encodingState.inputs.length);
 
-      System.arraycopy(outputOffsets, 0, allOffsets, 0, outputOffsets.length);
-      System.arraycopy(inputOffsets, 0, allOffsets,
-          outputOffsets.length, inputOffsets.length);
+      System.arraycopy(encodingState.outputOffsets, 0, allOffsets, 0,
+          encodingState.outputOffsets.length);
+      System.arraycopy(encodingState.inputOffsets, 0, allOffsets,
+          encodingState.outputOffsets.length,
+          encodingState.inputOffsets.length);
     } else {
-      System.arraycopy(outputs, 0, all, 0, outputs.length);
-      System.arraycopy(outputOffsets, 0, allOffsets, 0, outputOffsets.length);
+      System.arraycopy(encodingState.outputs, 0, all, 0,
+          encodingState.outputs.length);
+      System.arraycopy(encodingState.outputOffsets, 0, allOffsets, 0,
+          encodingState.outputOffsets.length);
 
-      for (int i = 0; i < inputs.length; i++) {
-        all[outputs.length + i] = Arrays.copyOfRange(inputs[i],
-            inputOffsets[i], inputOffsets[i] + dataLen);
+      for (int i = 0; i < encodingState.inputs.length; i++) {
+        all[encodingState.outputs.length + i] =
+            Arrays.copyOfRange(encodingState.inputs[i],
+            encodingState.inputOffsets[i],
+                encodingState.inputOffsets[i] + dataLen);
       }
     }
 

+ 5 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RSRawErasureCoderFactory.java

@@ -18,6 +18,7 @@
 package org.apache.hadoop.io.erasurecode.rawcoder;
 
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.io.erasurecode.ErasureCoderOptions;
 
 /**
  * A raw coder factory for the new raw Reed-Solomon coder in Java.
@@ -26,12 +27,12 @@ import org.apache.hadoop.classification.InterfaceAudience;
 public class RSRawErasureCoderFactory implements RawErasureCoderFactory {
 
   @Override
-  public RawErasureEncoder createEncoder(int numDataUnits, int numParityUnits) {
-    return new RSRawEncoder(numDataUnits, numParityUnits);
+  public RawErasureEncoder createEncoder(ErasureCoderOptions coderOptions) {
+    return new RSRawEncoder(coderOptions);
   }
 
   @Override
-  public RawErasureDecoder createDecoder(int numDataUnits, int numParityUnits) {
-    return new RSRawDecoder(numDataUnits, numParityUnits);
+  public RawErasureDecoder createDecoder(ErasureCoderOptions coderOptions) {
+    return new RSRawDecoder(coderOptions);
   }
 }

+ 5 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RSRawErasureCoderFactoryLegacy.java

@@ -18,6 +18,7 @@
 package org.apache.hadoop.io.erasurecode.rawcoder;
 
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.io.erasurecode.ErasureCoderOptions;
 
 /**
  * A raw coder factory for the legacy raw Reed-Solomon coder in Java.
@@ -26,12 +27,12 @@ import org.apache.hadoop.classification.InterfaceAudience;
 public class RSRawErasureCoderFactoryLegacy implements RawErasureCoderFactory {
 
   @Override
-  public RawErasureEncoder createEncoder(int numDataUnits, int numParityUnits) {
-    return new RSRawEncoderLegacy(numDataUnits, numParityUnits);
+  public RawErasureEncoder createEncoder(ErasureCoderOptions coderOptions) {
+    return new RSRawEncoderLegacy(coderOptions);
   }
 
   @Override
-  public RawErasureDecoder createDecoder(int numDataUnits, int numParityUnits) {
-    return new RSRawDecoderLegacy(numDataUnits, numParityUnits);
+  public RawErasureDecoder createDecoder(ErasureCoderOptions coderOptions) {
+    return new RSRawDecoderLegacy(coderOptions);
   }
 }

+ 0 - 73
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RawErasureCoder.java

@@ -1,73 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.io.erasurecode.rawcoder;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.conf.Configurable;
-
-/**
- * RawErasureCoder is a common interface for {@link RawErasureEncoder} and
- * {@link RawErasureDecoder} as both encoder and decoder share some properties.
- *
- * RawErasureCoder is part of ErasureCodec framework, where ErasureCoder is
- * used to encode/decode a group of blocks (BlockGroup) according to the codec
- * specific BlockGroup layout and logic. An ErasureCoder extracts chunks of
- * data from the blocks and can employ various low level RawErasureCoders to
- * perform encoding/decoding against the chunks.
- *
- * To distinguish from ErasureCoder, here RawErasureCoder is used to mean the
- * low level constructs, since it only takes care of the math calculation with
- * a group of byte buffers.
- */
-@InterfaceAudience.Private
-public interface RawErasureCoder extends Configurable {
-
-  /**
-   * Get a coder option value.
-   * @param option
-   * @return option value
-   */
-  public Object getCoderOption(CoderOption option);
-
-  /**
-   * Set a coder option value.
-   * @param option
-   * @param value
-   */
-  public void setCoderOption(CoderOption option, Object value);
-
-  /**
-   * The number of data input units for the coding. A unit can be a byte,
-   * chunk or buffer or even a block.
-   * @return count of data input units
-   */
-  public int getNumDataUnits();
-
-  /**
-   * The number of parity output units for the coding. A unit can be a byte,
-   * chunk, buffer or even a block.
-   * @return count of parity output units
-   */
-  public int getNumParityUnits();
-
-  /**
-   * Should be called when release this coder. Good chance to release encoding
-   * or decoding buffers
-   */
-  public void release();
-}

+ 5 - 6
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RawErasureCoderFactory.java

@@ -18,6 +18,7 @@
 package org.apache.hadoop.io.erasurecode.rawcoder;
 
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.io.erasurecode.ErasureCoderOptions;
 
 /**
  * Raw erasure coder factory that can be used to create raw encoder and decoder.
@@ -29,17 +30,15 @@ public interface RawErasureCoderFactory {
 
   /**
    * Create raw erasure encoder.
-   * @param numDataUnits number of data units in a coding group
-   * @param numParityUnits number of parity units in a coding group
+   * @param conf the configuration used to create the encoder
    * @return raw erasure encoder
    */
-  public RawErasureEncoder createEncoder(int numDataUnits, int numParityUnits);
+  RawErasureEncoder createEncoder(ErasureCoderOptions coderOptions);
 
   /**
    * Create raw erasure decoder.
-   * @param numDataUnits number of data units in a coding group
-   * @param numParityUnits number of parity units in a coding group
+   * @param conf the configuration used to create the encoder
    * @return raw erasure decoder
    */
-  public RawErasureDecoder createDecoder(int numDataUnits, int numParityUnits);
+  RawErasureDecoder createDecoder(ErasureCoderOptions coderOptions);
 }

+ 128 - 9
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RawErasureDecoder.java

@@ -19,18 +19,34 @@ package org.apache.hadoop.io.erasurecode.rawcoder;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.io.erasurecode.ECChunk;
+import org.apache.hadoop.io.erasurecode.ErasureCoderOptions;
 
 import java.nio.ByteBuffer;
 
 /**
- * RawErasureDecoder performs decoding given chunks of input data and generates
- * missing data that corresponds to an erasure code scheme, like XOR and
- * Reed-Solomon.
+ * An abstract raw erasure decoder that's to be inherited by new decoders.
  *
- * It extends the {@link RawErasureCoder} interface.
+ * Raw erasure coder is part of erasure codec framework, where erasure coder is
+ * used to encode/decode a group of blocks (BlockGroup) according to the codec
+ * specific BlockGroup layout and logic. An erasure coder extracts chunks of
+ * data from the blocks and can employ various low level raw erasure coders to
+ * perform encoding/decoding against the chunks.
+ *
+ * To distinguish from erasure coder, here raw erasure coder is used to mean the
+ * low level constructs, since it only takes care of the math calculation with
+ * a group of byte buffers.
+ *
+ * Note it mainly provides decode() calls, which should be stateless and may be
+ * made thread-safe in future.
  */
 @InterfaceAudience.Private
-public interface RawErasureDecoder extends RawErasureCoder {
+public abstract class RawErasureDecoder {
+
+  private final ErasureCoderOptions coderOptions;
+
+  public RawErasureDecoder(ErasureCoderOptions coderOptions) {
+    this.coderOptions = coderOptions;
+  }
 
   /**
    * Decode with inputs and erasedIndexes, generates outputs.
@@ -64,8 +80,44 @@ public interface RawErasureDecoder extends RawErasureCoder {
    * @param outputs output buffers to put decoded data into according to
    *                erasedIndexes, ready for read after the call
    */
-  void decode(ByteBuffer[] inputs, int[] erasedIndexes,
-                     ByteBuffer[] outputs);
+  public void decode(ByteBuffer[] inputs, int[] erasedIndexes,
+                     ByteBuffer[] outputs) {
+    ByteBufferDecodingState decodingState = new ByteBufferDecodingState(this,
+        inputs, erasedIndexes, outputs);
+
+    boolean usingDirectBuffer = decodingState.usingDirectBuffer;
+    int dataLen = decodingState.decodeLength;
+    if (dataLen == 0) {
+      return;
+    }
+
+    int[] inputPositions = new int[inputs.length];
+    for (int i = 0; i < inputPositions.length; i++) {
+      if (inputs[i] != null) {
+        inputPositions[i] = inputs[i].position();
+      }
+    }
+
+    if (usingDirectBuffer) {
+      doDecode(decodingState);
+    } else {
+      ByteArrayDecodingState badState = decodingState.convertToByteArrayState();
+      doDecode(badState);
+    }
+
+    for (int i = 0; i < inputs.length; i++) {
+      if (inputs[i] != null) {
+        // dataLen bytes consumed
+        inputs[i].position(inputPositions[i] + dataLen);
+      }
+    }
+  }
+
+  /**
+   * Perform the real decoding using Direct ByteBuffer.
+   * @param decodingState the decoding state
+   */
+  protected abstract void doDecode(ByteBufferDecodingState decodingState);
 
   /**
    * Decode with inputs and erasedIndexes, generates outputs. More see above.
@@ -75,7 +127,23 @@ public interface RawErasureDecoder extends RawErasureCoder {
    * @param outputs output buffers to put decoded data into according to
    *                erasedIndexes, ready for read after the call
    */
-  void decode(byte[][] inputs, int[] erasedIndexes, byte[][] outputs);
+  public void decode(byte[][] inputs, int[] erasedIndexes, byte[][] outputs) {
+    ByteArrayDecodingState decodingState = new ByteArrayDecodingState(this,
+        inputs, erasedIndexes, outputs);
+
+    if (decodingState.decodeLength == 0) {
+      return;
+    }
+
+    doDecode(decodingState);
+  }
+
+  /**
+   * Perform the real decoding using bytes array, supporting offsets and
+   * lengths.
+   * @param decodingState the decoding state
+   */
+  protected abstract void doDecode(ByteArrayDecodingState decodingState);
 
   /**
    * Decode with inputs and erasedIndexes, generates outputs. More see above.
@@ -88,6 +156,57 @@ public interface RawErasureDecoder extends RawErasureCoder {
    * @param outputs output buffers to put decoded data into according to
    *                erasedIndexes, ready for read after the call
    */
-  void decode(ECChunk[] inputs, int[] erasedIndexes, ECChunk[] outputs);
+  public void decode(ECChunk[] inputs, int[] erasedIndexes,
+                     ECChunk[] outputs) {
+    ByteBuffer[] newInputs = CoderUtil.toBuffers(inputs);
+    ByteBuffer[] newOutputs = CoderUtil.toBuffers(outputs);
+    decode(newInputs, erasedIndexes, newOutputs);
+  }
+
+  public int getNumDataUnits() {
+    return coderOptions.getNumDataUnits();
+  }
+
+  public int getNumParityUnits() {
+    return coderOptions.getNumParityUnits();
+  }
+
+  protected int getNumAllUnits() {
+    return coderOptions.getNumAllUnits();
+  }
+
+  /**
+   * Tell if direct buffer is preferred or not. It's for callers to
+   * decide how to allocate coding chunk buffers, using DirectByteBuffer or
+   * bytes array. It will return false by default.
+   * @return true if native buffer is preferred for performance consideration,
+   * otherwise false.
+   */
+  public boolean preferDirectBuffer() {
+    return false;
+  }
 
+  /**
+   * Allow change into input buffers or not while perform encoding/decoding.
+   * @return true if it's allowed to change inputs, false otherwise
+   */
+  public boolean allowChangeInputs() {
+    return coderOptions.allowChangeInputs();
+  }
+
+  /**
+   * Allow to dump verbose info during encoding/decoding.
+   * @return true if it's allowed to do verbose dump, false otherwise.
+   */
+  public boolean allowVerboseDump() {
+    return coderOptions.allowVerboseDump();
+  }
+
+  /**
+   * Should be called when release this coder. Good chance to release encoding
+   * or decoding buffers
+   */
+  public void release() {
+    // Nothing to do here.
+  }
 }

+ 127 - 8
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RawErasureEncoder.java

@@ -19,18 +19,34 @@ package org.apache.hadoop.io.erasurecode.rawcoder;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.io.erasurecode.ECChunk;
+import org.apache.hadoop.io.erasurecode.ErasureCoderOptions;
 
 import java.nio.ByteBuffer;
 
 /**
- * RawErasureEncoder performs encoding given chunks of input data and generates
- * parity outputs that corresponds to an erasure code scheme, like XOR and
- * Reed-Solomon.
+ * An abstract raw erasure encoder that's to be inherited by new encoders.
  *
- * It extends the {@link RawErasureCoder} interface.
+ * Raw erasure coder is part of erasure codec framework, where erasure coder is
+ * used to encode/decode a group of blocks (BlockGroup) according to the codec
+ * specific BlockGroup layout and logic. An erasure coder extracts chunks of
+ * data from the blocks and can employ various low level raw erasure coders to
+ * perform encoding/decoding against the chunks.
+ *
+ * To distinguish from erasure coder, here raw erasure coder is used to mean the
+ * low level constructs, since it only takes care of the math calculation with
+ * a group of byte buffers.
+ *
+ * Note it mainly provides encode() calls, which should be stateless and may be
+ * made thread-safe in future.
  */
 @InterfaceAudience.Private
-public interface RawErasureEncoder extends RawErasureCoder {
+public abstract class RawErasureEncoder {
+
+  private final ErasureCoderOptions coderOptions;
+
+  public RawErasureEncoder(ErasureCoderOptions coderOptions) {
+    this.coderOptions = coderOptions;
+  }
 
   /**
    * Encode with inputs and generates outputs.
@@ -47,7 +63,43 @@ public interface RawErasureEncoder extends RawErasureCoder {
    * @param outputs output buffers to put the encoded data into, ready to read
    *                after the call
    */
-  void encode(ByteBuffer[] inputs, ByteBuffer[] outputs);
+  public void encode(ByteBuffer[] inputs, ByteBuffer[] outputs) {
+    ByteBufferEncodingState bbeState = new ByteBufferEncodingState(
+        this, inputs, outputs);
+
+    boolean usingDirectBuffer = bbeState.usingDirectBuffer;
+    int dataLen = bbeState.encodeLength;
+    if (dataLen == 0) {
+      return;
+    }
+
+    int[] inputPositions = new int[inputs.length];
+    for (int i = 0; i < inputPositions.length; i++) {
+      if (inputs[i] != null) {
+        inputPositions[i] = inputs[i].position();
+      }
+    }
+
+    if (usingDirectBuffer) {
+      doEncode(bbeState);
+    } else {
+      ByteArrayEncodingState baeState = bbeState.convertToByteArrayState();
+      doEncode(baeState);
+    }
+
+    for (int i = 0; i < inputs.length; i++) {
+      if (inputs[i] != null) {
+        // dataLen bytes consumed
+        inputs[i].position(inputPositions[i] + dataLen);
+      }
+    }
+  }
+
+  /**
+   * Perform the real encoding work using direct ByteBuffer.
+   * @param encodingState the encoding state
+   */
+  protected abstract void doEncode(ByteBufferEncodingState encodingState);
 
   /**
    * Encode with inputs and generates outputs. More see above.
@@ -56,7 +108,24 @@ public interface RawErasureEncoder extends RawErasureCoder {
    * @param outputs output buffers to put the encoded data into, read to read
    *                after the call
    */
-  void encode(byte[][] inputs, byte[][] outputs);
+  public void encode(byte[][] inputs, byte[][] outputs) {
+    ByteArrayEncodingState baeState = new ByteArrayEncodingState(
+        this, inputs, outputs);
+
+    int dataLen = baeState.encodeLength;
+    if (dataLen == 0) {
+      return;
+    }
+
+    doEncode(baeState);
+  }
+
+  /**
+   * Perform the real encoding work using bytes array, supporting offsets
+   * and lengths.
+   * @param encodingState the encoding state
+   */
+  protected abstract void doEncode(ByteArrayEncodingState encodingState);
 
   /**
    * Encode with inputs and generates outputs. More see above.
@@ -65,6 +134,56 @@ public interface RawErasureEncoder extends RawErasureCoder {
    * @param outputs output buffers to put the encoded data into, read to read
    *                after the call
    */
-  void encode(ECChunk[] inputs, ECChunk[] outputs);
+  public void encode(ECChunk[] inputs, ECChunk[] outputs) {
+    ByteBuffer[] newInputs = ECChunk.toBuffers(inputs);
+    ByteBuffer[] newOutputs = ECChunk.toBuffers(outputs);
+    encode(newInputs, newOutputs);
+  }
+
+  public int getNumDataUnits() {
+    return coderOptions.getNumDataUnits();
+  }
+
+  public int getNumParityUnits() {
+    return coderOptions.getNumParityUnits();
+  }
+
+  public int getNumAllUnits() {
+    return coderOptions.getNumAllUnits();
+  }
+
+  /**
+   * Tell if direct buffer is preferred or not. It's for callers to
+   * decide how to allocate coding chunk buffers, using DirectByteBuffer or
+   * bytes array. It will return false by default.
+   * @return true if native buffer is preferred for performance consideration,
+   * otherwise false.
+   */
+  public boolean preferDirectBuffer() {
+    return false;
+  }
 
+  /**
+   * Allow change into input buffers or not while perform encoding/decoding.
+   * @return true if it's allowed to change inputs, false otherwise
+   */
+  public boolean allowChangeInputs() {
+    return coderOptions.allowChangeInputs();
+  }
+
+  /**
+   * Allow to dump verbose info during encoding/decoding.
+   * @return true if it's allowed to do verbose dump, false otherwise.
+   */
+  public boolean allowVerboseDump() {
+    return coderOptions.allowVerboseDump();
+  }
+
+  /**
+   * Should be called when release this coder. Good chance to release encoding
+   * or decoding buffers
+   */
+  public void release() {
+    // Nothing to do here.
+  }
 }

+ 27 - 24
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/XORRawDecoder.java

@@ -17,9 +17,10 @@
  */
 package org.apache.hadoop.io.erasurecode.rawcoder;
 
-import java.nio.ByteBuffer;
-
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.io.erasurecode.ErasureCoderOptions;
+
+import java.nio.ByteBuffer;
 
 /**
  * A raw decoder in XOR code scheme in pure Java, adapted from HDFS-RAID.
@@ -29,55 +30,57 @@ import org.apache.hadoop.classification.InterfaceAudience;
  * deployed independently.
  */
 @InterfaceAudience.Private
-public class XORRawDecoder extends AbstractRawErasureDecoder {
+public class XORRawDecoder extends RawErasureDecoder {
 
-  public XORRawDecoder(int numDataUnits, int numParityUnits) {
-    super(numDataUnits, numParityUnits);
+  public XORRawDecoder(ErasureCoderOptions coderOptions) {
+    super(coderOptions);
   }
 
   @Override
-  protected void doDecode(ByteBuffer[] inputs, int[] erasedIndexes,
-                          ByteBuffer[] outputs) {
-    ByteBuffer output = outputs[0];
+  protected void doDecode(ByteBufferDecodingState decodingState) {
+    CoderUtil.resetOutputBuffers(decodingState.outputs,
+        decodingState.decodeLength);
+    ByteBuffer output = decodingState.outputs[0];
 
-    int erasedIdx = erasedIndexes[0];
+    int erasedIdx = decodingState.erasedIndexes[0];
 
     // Process the inputs.
     int iIdx, oIdx;
-    for (int i = 0; i < inputs.length; i++) {
+    for (int i = 0; i < decodingState.inputs.length; i++) {
       // Skip the erased location.
       if (i == erasedIdx) {
         continue;
       }
 
-      for (iIdx = inputs[i].position(), oIdx = output.position();
-           iIdx < inputs[i].limit();
+      for (iIdx = decodingState.inputs[i].position(), oIdx = output.position();
+           iIdx < decodingState.inputs[i].limit();
            iIdx++, oIdx++) {
-        output.put(oIdx, (byte) (output.get(oIdx) ^ inputs[i].get(iIdx)));
+        output.put(oIdx, (byte) (output.get(oIdx) ^
+            decodingState.inputs[i].get(iIdx)));
       }
     }
   }
 
   @Override
-  protected void doDecode(byte[][] inputs, int[] inputOffsets, int dataLen,
-                          int[] erasedIndexes, byte[][] outputs,
-                          int[] outputOffsets) {
-    byte[] output = outputs[0];
-    resetBuffer(output, outputOffsets[0], dataLen);
-
-    int erasedIdx = erasedIndexes[0];
+  protected void doDecode(ByteArrayDecodingState decodingState) {
+    byte[] output = decodingState.outputs[0];
+    int dataLen = decodingState.decodeLength;
+    CoderUtil.resetOutputBuffers(decodingState.outputs,
+        decodingState.outputOffsets, dataLen);
+    int erasedIdx = decodingState.erasedIndexes[0];
 
     // Process the inputs.
     int iIdx, oIdx;
-    for (int i = 0; i < inputs.length; i++) {
+    for (int i = 0; i < decodingState.inputs.length; i++) {
       // Skip the erased location.
       if (i == erasedIdx) {
         continue;
       }
 
-      for (iIdx = inputOffsets[i], oIdx = outputOffsets[0];
-           iIdx < inputOffsets[i] + dataLen; iIdx++, oIdx++) {
-        output[oIdx] ^= inputs[i][iIdx];
+      for (iIdx = decodingState.inputOffsets[i],
+               oIdx = decodingState.outputOffsets[0];
+           iIdx < decodingState.inputOffsets[i] + dataLen; iIdx++, oIdx++) {
+        output[oIdx] ^= decodingState.inputs[i][iIdx];
       }
     }
   }

+ 32 - 25
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/XORRawEncoder.java

@@ -17,9 +17,10 @@
  */
 package org.apache.hadoop.io.erasurecode.rawcoder;
 
-import java.nio.ByteBuffer;
-
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.io.erasurecode.ErasureCoderOptions;
+
+import java.nio.ByteBuffer;
 
 /**
  * A raw encoder in XOR code scheme in pure Java, adapted from HDFS-RAID.
@@ -29,50 +30,56 @@ import org.apache.hadoop.classification.InterfaceAudience;
  * deployed independently.
  */
 @InterfaceAudience.Private
-public class XORRawEncoder extends AbstractRawErasureEncoder {
+public class XORRawEncoder extends RawErasureEncoder {
 
-  public XORRawEncoder(int numDataUnits, int numParityUnits) {
-    super(numDataUnits, numParityUnits);
+  public XORRawEncoder(ErasureCoderOptions coderOptions) {
+    super(coderOptions);
   }
 
-  protected void doEncode(ByteBuffer[] inputs, ByteBuffer[] outputs) {
-    ByteBuffer output = outputs[0];
+  protected void doEncode(ByteBufferEncodingState encodingState) {
+    CoderUtil.resetOutputBuffers(encodingState.outputs,
+        encodingState.encodeLength);
+    ByteBuffer output = encodingState.outputs[0];
 
     // Get the first buffer's data.
     int iIdx, oIdx;
-    for (iIdx = inputs[0].position(), oIdx = output.position();
-         iIdx < inputs[0].limit(); iIdx++, oIdx++) {
-      output.put(oIdx, inputs[0].get(iIdx));
+    for (iIdx = encodingState.inputs[0].position(), oIdx = output.position();
+         iIdx < encodingState.inputs[0].limit(); iIdx++, oIdx++) {
+      output.put(oIdx, encodingState.inputs[0].get(iIdx));
     }
 
     // XOR with everything else.
-    for (int i = 1; i < inputs.length; i++) {
-      for (iIdx = inputs[i].position(), oIdx = output.position();
-           iIdx < inputs[i].limit();
+    for (int i = 1; i < encodingState.inputs.length; i++) {
+      for (iIdx = encodingState.inputs[i].position(), oIdx = output.position();
+           iIdx < encodingState.inputs[i].limit();
            iIdx++, oIdx++) {
-        output.put(oIdx, (byte) (output.get(oIdx) ^ inputs[i].get(iIdx)));
+        output.put(oIdx, (byte) (output.get(oIdx) ^
+            encodingState.inputs[i].get(iIdx)));
       }
     }
   }
 
   @Override
-  protected void doEncode(byte[][] inputs, int[] inputOffsets, int dataLen,
-                          byte[][] outputs, int[] outputOffsets) {
-    byte[] output = outputs[0];
-    resetBuffer(output, outputOffsets[0], dataLen);
+  protected void doEncode(ByteArrayEncodingState encodingState) {
+    int dataLen = encodingState.encodeLength;
+    CoderUtil.resetOutputBuffers(encodingState.outputs,
+        encodingState.outputOffsets, dataLen);
+    byte[] output = encodingState.outputs[0];
 
     // Get the first buffer's data.
     int iIdx, oIdx;
-    for (iIdx = inputOffsets[0], oIdx = outputOffsets[0];
-         iIdx < inputOffsets[0] + dataLen; iIdx++, oIdx++) {
-      output[oIdx] = inputs[0][iIdx];
+    for (iIdx = encodingState.inputOffsets[0],
+             oIdx = encodingState.outputOffsets[0];
+         iIdx < encodingState.inputOffsets[0] + dataLen; iIdx++, oIdx++) {
+      output[oIdx] = encodingState.inputs[0][iIdx];
     }
 
     // XOR with everything else.
-    for (int i = 1; i < inputs.length; i++) {
-      for (iIdx = inputOffsets[i], oIdx = outputOffsets[0];
-           iIdx < inputOffsets[i] + dataLen; iIdx++, oIdx++) {
-        output[oIdx] ^= inputs[i][iIdx];
+    for (int i = 1; i < encodingState.inputs.length; i++) {
+      for (iIdx = encodingState.inputOffsets[i],
+               oIdx = encodingState.outputOffsets[0];
+           iIdx < encodingState.inputOffsets[i] + dataLen; iIdx++, oIdx++) {
+        output[oIdx] ^= encodingState.inputs[i][iIdx];
       }
     }
   }

+ 5 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/XORRawErasureCoderFactory.java

@@ -18,6 +18,7 @@
 package org.apache.hadoop.io.erasurecode.rawcoder;
 
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.io.erasurecode.ErasureCoderOptions;
 
 /**
  * A raw coder factory for raw XOR coder.
@@ -26,12 +27,12 @@ import org.apache.hadoop.classification.InterfaceAudience;
 public class XORRawErasureCoderFactory implements RawErasureCoderFactory {
 
   @Override
-  public RawErasureEncoder createEncoder(int numDataUnits, int numParityUnits) {
-    return new XORRawEncoder(numDataUnits, numParityUnits);
+  public RawErasureEncoder createEncoder(ErasureCoderOptions coderOptions) {
+    return new XORRawEncoder(coderOptions);
   }
 
   @Override
-  public RawErasureDecoder createDecoder(int numDataUnits, int numParityUnits) {
-    return new XORRawDecoder(numDataUnits, numParityUnits);
+  public RawErasureDecoder createDecoder(ErasureCoderOptions coderOptions) {
+    return new XORRawDecoder(coderOptions);
   }
 }

+ 38 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/package-info.java

@@ -0,0 +1,38 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ *
+ * Raw erasure coders.
+ *
+ * Raw erasure coder is part of erasure codec framework, where erasure coder is
+ * used to encode/decode a group of blocks (BlockGroup) according to the codec
+ * specific BlockGroup layout and logic. An erasure coder extracts chunks of
+ * data from the blocks and can employ various low level raw erasure coders to
+ * perform encoding/decoding against the chunks.
+ *
+ * To distinguish from erasure coder, here raw erasure coder is used to mean the
+ * low level constructs, since it only takes care of the math calculation with
+ * a group of byte buffers.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+package org.apache.hadoop.io.erasurecode.rawcoder;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;

+ 0 - 83
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/util/CoderUtil.java

@@ -1,83 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.io.erasurecode.rawcoder.util;
-
-import org.apache.hadoop.HadoopIllegalArgumentException;
-import org.apache.hadoop.classification.InterfaceAudience;
-
-import java.util.Arrays;
-
-/**
- * Helpful utilities for implementing some raw erasure coders.
- */
-@InterfaceAudience.Private
-public final class CoderUtil {
-
-  private CoderUtil() {
-    // No called
-  }
-
-
-  /**
-   * Get indexes into inputs array for items marked as null, either erased or
-   * not to read.
-   * @return indexes into inputs array
-   */
-  public static <T> int[] getErasedOrNotToReadIndexes(T[] inputs) {
-    int[] invalidIndexes = new int[inputs.length];
-    int idx = 0;
-    for (int i = 0; i < inputs.length; i++) {
-      if (inputs[i] == null) {
-        invalidIndexes[idx++] = i;
-      }
-    }
-
-    return Arrays.copyOf(invalidIndexes, idx);
-  }
-
-  /**
-   * Find the valid input from all the inputs.
-   * @param inputs input buffers to look for valid input
-   * @return the first valid input
-   */
-  public static <T> T findFirstValidInput(T[] inputs) {
-    for (T input : inputs) {
-      if (input != null) {
-        return input;
-      }
-    }
-
-    throw new HadoopIllegalArgumentException(
-        "Invalid inputs are found, all being null");
-  }
-
-  /**
-   * Picking up indexes of valid inputs.
-   * @param inputs actually decoding input buffers
-   * @param validIndexes an array to be filled and returned
-   * @param <T>
-   */
-  public static <T> void makeValidIndexes(T[] inputs, int[] validIndexes) {
-    int idx = 0;
-    for (int i = 0; i < inputs.length && idx < validIndexes.length; i++) {
-      if (inputs[i] != null) {
-        validIndexes[idx++] = i;
-      }
-    }
-  }
-}

+ 2 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/util/GaloisField.java

@@ -17,12 +17,12 @@
  */
 package org.apache.hadoop.io.erasurecode.rawcoder.util;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+
 import java.nio.ByteBuffer;
 import java.util.HashMap;
 import java.util.Map;
 
-import org.apache.hadoop.classification.InterfaceAudience;
-
 /**
  * Implementation of Galois field arithmetic with 2^p elements. The input must
  * be unsigned integers. It's ported from HDFS-RAID, slightly adapted.

+ 321 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/AsyncCallHandler.java

@@ -0,0 +1,321 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.io.retry;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.ipc.Client;
+import org.apache.hadoop.util.Daemon;
+import org.apache.hadoop.util.Time;
+import org.apache.hadoop.util.concurrent.AsyncGet;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.lang.reflect.Method;
+import java.util.LinkedList;
+import java.util.Queue;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+import java.util.concurrent.atomic.AtomicReference;
+
+/** Handle async calls. */
+@InterfaceAudience.Private
+public class AsyncCallHandler {
+  static final Logger LOG = LoggerFactory.getLogger(AsyncCallHandler.class);
+
+  private static final ThreadLocal<AsyncGet<?, Exception>>
+      LOWER_LAYER_ASYNC_RETURN = new ThreadLocal<>();
+  private static final ThreadLocal<AsyncGet<Object, Throwable>>
+      ASYNC_RETURN = new ThreadLocal<>();
+
+  /** @return the async return value from {@link AsyncCallHandler}. */
+  @InterfaceStability.Unstable
+  @SuppressWarnings("unchecked")
+  public static <R, T extends  Throwable> AsyncGet<R, T> getAsyncReturn() {
+    final AsyncGet<R, T> asyncGet = (AsyncGet<R, T>)ASYNC_RETURN.get();
+    if (asyncGet != null) {
+      ASYNC_RETURN.set(null);
+      return asyncGet;
+    } else {
+      return (AsyncGet<R, T>) getLowerLayerAsyncReturn();
+    }
+  }
+
+  /** For the lower rpc layers to set the async return value. */
+  @InterfaceStability.Unstable
+  public static void setLowerLayerAsyncReturn(
+      AsyncGet<?, Exception> asyncReturn) {
+    LOWER_LAYER_ASYNC_RETURN.set(asyncReturn);
+  }
+
+  private static AsyncGet<?, Exception> getLowerLayerAsyncReturn() {
+    final AsyncGet<?, Exception> asyncGet = LOWER_LAYER_ASYNC_RETURN.get();
+    Preconditions.checkNotNull(asyncGet);
+    LOWER_LAYER_ASYNC_RETURN.set(null);
+    return asyncGet;
+  }
+
+  /** A simple concurrent queue which keeping track the empty start time. */
+  static class ConcurrentQueue<T> {
+    private final Queue<T> queue = new LinkedList<>();
+    private long emptyStartTime = Time.monotonicNow();
+
+    synchronized int size() {
+      return queue.size();
+    }
+
+    /** Is the queue empty for more than the given time in millisecond? */
+    synchronized boolean isEmpty(long time) {
+      return queue.isEmpty() && Time.monotonicNow() - emptyStartTime > time;
+    }
+
+    synchronized void offer(T c) {
+      final boolean added = queue.offer(c);
+      Preconditions.checkState(added);
+    }
+
+    synchronized T poll() {
+      Preconditions.checkState(!queue.isEmpty());
+      final T t = queue.poll();
+      if (queue.isEmpty()) {
+        emptyStartTime = Time.monotonicNow();
+      }
+      return t;
+    }
+  }
+
+  /** A queue for handling async calls. */
+  static class AsyncCallQueue {
+    private final ConcurrentQueue<AsyncCall> queue = new ConcurrentQueue<>();
+    private final Processor processor = new Processor();
+
+    void addCall(AsyncCall call) {
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("add " + call);
+      }
+      queue.offer(call);
+      processor.tryStart();
+    }
+
+    void checkCalls() {
+      final int size = queue.size();
+      for (int i = 0; i < size; i++) {
+        final AsyncCall c = queue.poll();
+        if (!c.isDone()) {
+          queue.offer(c); // the call is not done yet, add it back.
+        }
+      }
+    }
+
+    /** Process the async calls in the queue. */
+    private class Processor {
+      static final long GRACE_PERIOD = 10*1000L;
+      static final long SLEEP_PERIOD = 100L;
+
+      private final AtomicReference<Thread> running = new AtomicReference<>();
+
+      boolean isRunning(Daemon d) {
+        return d == running.get();
+      }
+
+      void tryStart() {
+        final Thread current = Thread.currentThread();
+        if (running.compareAndSet(null, current)) {
+          final Daemon daemon = new Daemon() {
+            @Override
+            public void run() {
+              for (; isRunning(this);) {
+                try {
+                  Thread.sleep(SLEEP_PERIOD);
+                } catch (InterruptedException e) {
+                  kill(this);
+                  return;
+                }
+
+                checkCalls();
+                tryStop(this);
+              }
+            }
+          };
+
+          final boolean set = running.compareAndSet(current, daemon);
+          Preconditions.checkState(set);
+          if (LOG.isDebugEnabled()) {
+            LOG.debug("Starting AsyncCallQueue.Processor " + daemon);
+          }
+          daemon.start();
+        }
+      }
+
+      void tryStop(Daemon d) {
+        if (queue.isEmpty(GRACE_PERIOD)) {
+          kill(d);
+        }
+      }
+
+      void kill(Daemon d) {
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Killing " + d);
+        }
+        final boolean set = running.compareAndSet(d, null);
+        Preconditions.checkState(set);
+      }
+    }
+  }
+
+  static class AsyncValue<V> {
+    private V value;
+
+    synchronized V waitAsyncValue(long timeout, TimeUnit unit)
+        throws InterruptedException, TimeoutException {
+      if (value != null) {
+        return value;
+      }
+      AsyncGet.Util.wait(this, timeout, unit);
+      if (value != null) {
+        return value;
+      }
+
+      throw new TimeoutException("waitCallReturn timed out "
+          + timeout + " " + unit);
+    }
+
+    synchronized void set(V v) {
+      Preconditions.checkNotNull(v);
+      Preconditions.checkState(value == null);
+      value = v;
+      notify();
+    }
+
+    synchronized boolean isDone() {
+      return value != null;
+    }
+  }
+
+  static class AsyncCall extends RetryInvocationHandler.Call {
+    private final AsyncCallHandler asyncCallHandler;
+
+    private final AsyncValue<CallReturn> asyncCallReturn = new AsyncValue<>();
+    private AsyncGet<?, Exception> lowerLayerAsyncGet;
+
+    AsyncCall(Method method, Object[] args, boolean isRpc, int callId,
+              RetryInvocationHandler.Counters counters,
+              RetryInvocationHandler<?> retryInvocationHandler,
+              AsyncCallHandler asyncCallHandler) {
+      super(method, args, isRpc, callId, counters, retryInvocationHandler);
+
+      this.asyncCallHandler = asyncCallHandler;
+    }
+
+    /** @return true if the call is done; otherwise, return false. */
+    boolean isDone() {
+      final CallReturn r = invokeOnce();
+      switch (r.getState()) {
+        case RETURNED:
+        case EXCEPTION:
+          asyncCallReturn.set(r); // the async call is done
+          return true;
+        case RETRY:
+          invokeOnce();
+          break;
+        case ASYNC_CALL_IN_PROGRESS:
+        case ASYNC_INVOKED:
+          // nothing to do
+          break;
+        default:
+          Preconditions.checkState(false);
+      }
+      return false;
+    }
+
+    @Override
+    CallReturn invoke() throws Throwable {
+      LOG.debug("{}.invoke {}", getClass().getSimpleName(), this);
+      if (lowerLayerAsyncGet != null) {
+        // async call was submitted early, check the lower level async call
+        final boolean isDone = lowerLayerAsyncGet.isDone();
+        LOG.trace("invoke: lowerLayerAsyncGet.isDone()? {}", isDone);
+        if (!isDone) {
+          return CallReturn.ASYNC_CALL_IN_PROGRESS;
+        }
+        try {
+          return new CallReturn(lowerLayerAsyncGet.get(0, TimeUnit.SECONDS));
+        } finally {
+          lowerLayerAsyncGet = null;
+        }
+      }
+
+      // submit a new async call
+      LOG.trace("invoke: ASYNC_INVOKED");
+      final boolean mode = Client.isAsynchronousMode();
+      try {
+        Client.setAsynchronousMode(true);
+        final Object r = invokeMethod();
+        // invokeMethod should set LOWER_LAYER_ASYNC_RETURN and return null.
+        Preconditions.checkState(r == null);
+        lowerLayerAsyncGet = getLowerLayerAsyncReturn();
+
+        if (counters.isZeros()) {
+          // first async attempt, initialize
+          LOG.trace("invoke: initAsyncCall");
+          asyncCallHandler.initAsyncCall(this, asyncCallReturn);
+        }
+        return CallReturn.ASYNC_INVOKED;
+      } finally {
+        Client.setAsynchronousMode(mode);
+      }
+    }
+  }
+
+  private final AsyncCallQueue asyncCalls = new AsyncCallQueue();
+  private volatile boolean hasSuccessfulCall = false;
+
+  AsyncCall newAsyncCall(Method method, Object[] args, boolean isRpc,
+                         int callId, RetryInvocationHandler.Counters counters,
+                         RetryInvocationHandler<?> retryInvocationHandler) {
+    return new AsyncCall(method, args, isRpc, callId, counters,
+        retryInvocationHandler, this);
+  }
+
+  boolean hasSuccessfulCall() {
+    return hasSuccessfulCall;
+  }
+
+  private void initAsyncCall(final AsyncCall asyncCall,
+                             final AsyncValue<CallReturn> asyncCallReturn) {
+    asyncCalls.addCall(asyncCall);
+
+    final AsyncGet<Object, Throwable> asyncGet
+        = new AsyncGet<Object, Throwable>() {
+      @Override
+      public Object get(long timeout, TimeUnit unit) throws Throwable {
+        final CallReturn c = asyncCallReturn.waitAsyncValue(timeout, unit);
+        final Object r = c.getReturnValue();
+        hasSuccessfulCall = true;
+        return r;
+      }
+
+      @Override
+      public boolean isDone() {
+        return asyncCallReturn.isDone();
+      }
+    };
+    ASYNC_RETURN.set(asyncGet);
+  }
+}

+ 75 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/CallReturn.java

@@ -0,0 +1,75 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.io.retry;
+
+import com.google.common.base.Preconditions;
+
+/** The call return from a method invocation. */
+class CallReturn {
+  /** The return state. */
+  enum State {
+    /** Call is returned successfully. */
+    RETURNED,
+    /** Call throws an exception. */
+    EXCEPTION,
+    /** Call should be retried according to the {@link RetryPolicy}. */
+    RETRY,
+    /** Call, which is async, is still in progress. */
+    ASYNC_CALL_IN_PROGRESS,
+    /** Call, which is async, just has been invoked. */
+    ASYNC_INVOKED
+  }
+
+  static final CallReturn ASYNC_CALL_IN_PROGRESS = new CallReturn(
+      State.ASYNC_CALL_IN_PROGRESS);
+  static final CallReturn ASYNC_INVOKED = new CallReturn(State.ASYNC_INVOKED);
+  static final CallReturn RETRY = new CallReturn(State.RETRY);
+
+  private final Object returnValue;
+  private final Throwable thrown;
+  private final State state;
+
+  CallReturn(Object r) {
+    this(r, null, State.RETURNED);
+  }
+  CallReturn(Throwable t) {
+    this(null, t, State.EXCEPTION);
+    Preconditions.checkNotNull(t);
+  }
+  private CallReturn(State s) {
+    this(null, null, s);
+  }
+  private CallReturn(Object r, Throwable t, State s) {
+    Preconditions.checkArgument(r == null || t == null);
+    returnValue = r;
+    thrown = t;
+    state = s;
+  }
+
+  State getState() {
+    return state;
+  }
+
+  Object getReturnValue() throws Throwable {
+    if (state == State.EXCEPTION) {
+      throw thrown;
+    }
+    Preconditions.checkState(state == State.RETURNED, "state == %s", state);
+    return returnValue;
+  }
+}

+ 104 - 30
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java

@@ -42,11 +42,83 @@ import java.util.Map;
 public class RetryInvocationHandler<T> implements RpcInvocationHandler {
   public static final Log LOG = LogFactory.getLog(RetryInvocationHandler.class);
 
-  private static class Counters {
+  static class Call {
+    private final Method method;
+    private final Object[] args;
+    private final boolean isRpc;
+    private final int callId;
+    final Counters counters;
+
+    private final RetryPolicy retryPolicy;
+    private final RetryInvocationHandler<?> retryInvocationHandler;
+
+    Call(Method method, Object[] args, boolean isRpc, int callId,
+         Counters counters, RetryInvocationHandler<?> retryInvocationHandler) {
+      this.method = method;
+      this.args = args;
+      this.isRpc = isRpc;
+      this.callId = callId;
+      this.counters = counters;
+
+      this.retryPolicy = retryInvocationHandler.getRetryPolicy(method);
+      this.retryInvocationHandler = retryInvocationHandler;
+    }
+
+    /** Invoke the call once without retrying. */
+    synchronized CallReturn invokeOnce() {
+      try {
+        // The number of times this invocation handler has ever been failed over
+        // before this method invocation attempt. Used to prevent concurrent
+        // failed method invocations from triggering multiple failover attempts.
+        final long failoverCount = retryInvocationHandler.getFailoverCount();
+        try {
+          return invoke();
+        } catch (Exception e) {
+          if (LOG.isTraceEnabled()) {
+            LOG.trace(this, e);
+          }
+          if (Thread.currentThread().isInterrupted()) {
+            // If interrupted, do not retry.
+            throw e;
+          }
+          retryInvocationHandler.handleException(
+              method, retryPolicy, failoverCount, counters, e);
+          return CallReturn.RETRY;
+        }
+      } catch(Throwable t) {
+        return new CallReturn(t);
+      }
+    }
+
+    CallReturn invoke() throws Throwable {
+      return new CallReturn(invokeMethod());
+    }
+
+    Object invokeMethod() throws Throwable {
+      if (isRpc) {
+        Client.setCallIdAndRetryCount(callId, counters.retries);
+      }
+      return retryInvocationHandler.invokeMethod(method, args);
+    }
+
+    @Override
+    public String toString() {
+      return getClass().getSimpleName() + "#" + callId + ": "
+          + method.getDeclaringClass().getSimpleName() + "." + method.getName()
+          + "(" + (args == null || args.length == 0? "": Arrays.toString(args))
+          +  ")";
+    }
+  }
+
+  static class Counters {
     /** Counter for retries. */
     private int retries;
     /** Counter for method invocation has been failed over. */
     private int failovers;
+
+    boolean isZeros() {
+      return retries == 0 && failovers == 0;
+    }
   }
 
   private static class ProxyDescriptor<T> {
@@ -144,11 +216,13 @@ public class RetryInvocationHandler<T> implements RpcInvocationHandler {
 
   private final ProxyDescriptor<T> proxyDescriptor;
 
-  private volatile boolean hasMadeASuccessfulCall = false;
-  
+  private volatile boolean hasSuccessfulCall = false;
+
   private final RetryPolicy defaultPolicy;
   private final Map<String,RetryPolicy> methodNameToPolicyMap;
 
+  private final AsyncCallHandler asyncCallHandler = new AsyncCallHandler();
+
   protected RetryInvocationHandler(FailoverProxyProvider<T> proxyProvider,
       RetryPolicy retryPolicy) {
     this(proxyProvider, retryPolicy, Collections.<String, RetryPolicy>emptyMap());
@@ -167,38 +241,35 @@ public class RetryInvocationHandler<T> implements RpcInvocationHandler {
     return policy != null? policy: defaultPolicy;
   }
 
+  private long getFailoverCount() {
+    return proxyDescriptor.getFailoverCount();
+  }
+
+  private Call newCall(Method method, Object[] args, boolean isRpc, int callId,
+                       Counters counters) {
+    if (Client.isAsynchronousMode()) {
+      return asyncCallHandler.newAsyncCall(method, args, isRpc, callId,
+          counters, this);
+    } else {
+      return new Call(method, args, isRpc, callId, counters, this);
+    }
+  }
+
   @Override
   public Object invoke(Object proxy, Method method, Object[] args)
       throws Throwable {
     final boolean isRpc = isRpcInvocation(proxyDescriptor.getProxy());
     final int callId = isRpc? Client.nextCallId(): RpcConstants.INVALID_CALL_ID;
-    return invoke(method, args, isRpc, callId, new Counters());
-  }
-
-  private Object invoke(final Method method, final Object[] args,
-      final boolean isRpc, final int callId, final Counters counters)
-      throws Throwable {
-    final RetryPolicy policy = getRetryPolicy(method);
+    final Counters counters = new Counters();
 
+    final Call call = newCall(method, args, isRpc, callId, counters);
     while (true) {
-      // The number of times this invocation handler has ever been failed over,
-      // before this method invocation attempt. Used to prevent concurrent
-      // failed method invocations from triggering multiple failover attempts.
-      final long failoverCount = proxyDescriptor.getFailoverCount();
-
-      if (isRpc) {
-        Client.setCallIdAndRetryCount(callId, counters.retries);
-      }
-      try {
-        final Object ret = invokeMethod(method, args);
-        hasMadeASuccessfulCall = true;
-        return ret;
-      } catch (Exception ex) {
-        if (Thread.currentThread().isInterrupted()) {
-          // If interrupted, do not retry.
-          throw ex;
-        }
-        handleException(method, policy, failoverCount, counters, ex);
+      final CallReturn c = call.invokeOnce();
+      final CallReturn.State state = c.getState();
+      if (state == CallReturn.State.ASYNC_INVOKED) {
+        return null; // return null for async calls
+      } else if (c.getState() != CallReturn.State.RETRY) {
+        return c.getReturnValue();
       }
     }
   }
@@ -239,7 +310,8 @@ public class RetryInvocationHandler<T> implements RpcInvocationHandler {
       final int failovers, final long delay, final Exception ex) {
     // log info if this has made some successful calls or
     // this is not the first failover
-    final boolean info = hasMadeASuccessfulCall || failovers != 0;
+    final boolean info = hasSuccessfulCall || failovers != 0
+        || asyncCallHandler.hasSuccessfulCall();
     if (!info && !LOG.isDebugEnabled()) {
       return;
     }
@@ -265,7 +337,9 @@ public class RetryInvocationHandler<T> implements RpcInvocationHandler {
       if (!method.isAccessible()) {
         method.setAccessible(true);
       }
-      return method.invoke(proxyDescriptor.getProxy(), args);
+      final Object r = method.invoke(proxyDescriptor.getProxy(), args);
+      hasSuccessfulCall = true;
+      return r;
     } catch (InvocationTargetException e) {
       throw e.getCause();
     }

+ 3 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java

@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.io.retry;
 
+import java.io.EOFException;
 import java.io.IOException;
 import java.net.ConnectException;
 import java.net.NoRouteToHostException;
@@ -647,8 +648,9 @@ public class RetryPolicies {
         return new RetryAction(RetryAction.RetryDecision.FAIL, 0, "retries ("
             + retries + ") exceeded maximum allowed (" + maxRetries + ")");
       }
-      
+
       if (e instanceof ConnectException ||
+          e instanceof EOFException ||
           e instanceof NoRouteToHostException ||
           e instanceof UnknownHostException ||
           e instanceof StandbyException ||

+ 60 - 64
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java

@@ -18,46 +18,10 @@
 
 package org.apache.hadoop.ipc;
 
-import static org.apache.hadoop.ipc.RpcConstants.*;
-
-import java.io.BufferedInputStream;
-import java.io.BufferedOutputStream;
-import java.io.ByteArrayOutputStream;
-import java.io.DataInputStream;
-import java.io.DataOutputStream;
-import java.io.EOFException;
-import java.io.FilterInputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.InterruptedIOException;
-import java.io.OutputStream;
-import java.net.InetAddress;
-import java.net.InetSocketAddress;
-import java.net.Socket;
-import java.net.SocketTimeoutException;
-import java.net.UnknownHostException;
-import java.security.PrivilegedExceptionAction;
-import java.util.Arrays;
-import java.util.Hashtable;
-import java.util.Iterator;
-import java.util.Map.Entry;
-import java.util.Random;
-import java.util.Set;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentMap;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
-import java.util.concurrent.RejectedExecutionException;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.atomic.AtomicLong;
-
-import javax.net.SocketFactory;
-import javax.security.sasl.Sasl;
-
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
+import com.google.protobuf.CodedOutputStream;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -93,14 +57,24 @@ import org.apache.hadoop.util.ProtoUtil;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Time;
+import org.apache.hadoop.util.concurrent.AsyncGet;
 import org.apache.htrace.core.Span;
 import org.apache.htrace.core.Tracer;
 
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import com.google.common.util.concurrent.AbstractFuture;
-import com.google.common.util.concurrent.ThreadFactoryBuilder;
-import com.google.protobuf.CodedOutputStream;
+import javax.net.SocketFactory;
+import javax.security.sasl.Sasl;
+import java.io.*;
+import java.net.*;
+import java.security.PrivilegedExceptionAction;
+import java.util.*;
+import java.util.Map.Entry;
+import java.util.concurrent.*;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
+
+import static org.apache.hadoop.ipc.RpcConstants.CONNECTION_CONTEXT_CALL_ID;
+import static org.apache.hadoop.ipc.RpcConstants.PING_CALL_ID;
 
 /** A client for an IPC service.  IPC calls take a single {@link Writable} as a
  * parameter, and return a {@link Writable} as their value.  A service runs on
@@ -119,8 +93,8 @@ public class Client implements AutoCloseable {
 
   private static final ThreadLocal<Integer> callId = new ThreadLocal<Integer>();
   private static final ThreadLocal<Integer> retryCount = new ThreadLocal<Integer>();
-  private static final ThreadLocal<Future<?>>
-      RETURN_RPC_RESPONSE = new ThreadLocal<>();
+  private static final ThreadLocal<AsyncGet<? extends Writable, IOException>>
+      ASYNC_RPC_RESPONSE = new ThreadLocal<>();
   private static final ThreadLocal<Boolean> asynchronousMode =
       new ThreadLocal<Boolean>() {
         @Override
@@ -131,8 +105,9 @@ public class Client implements AutoCloseable {
 
   @SuppressWarnings("unchecked")
   @Unstable
-  public static <T> Future<T> getReturnRpcResponse() {
-    return (Future<T>) RETURN_RPC_RESPONSE.get();
+  public static <T extends Writable> AsyncGet<T, IOException>
+      getAsyncRpcResponse() {
+    return (AsyncGet<T, IOException>) ASYNC_RPC_RESPONSE.get();
   }
 
   /** Set call id and retry count for the next call. */
@@ -379,6 +354,11 @@ public class Client implements AutoCloseable {
       }
     }
 
+    @Override
+    public String toString() {
+      return getClass().getSimpleName() + id;
+    }
+
     /** Indicate when the call is complete and the
      * value or error are available.  Notifies by default.  */
     protected synchronized void callComplete() {
@@ -1413,27 +1393,39 @@ public class Client implements AutoCloseable {
     }
 
     if (isAsynchronousMode()) {
-      Future<Writable> returnFuture = new AbstractFuture<Writable>() {
-        private final AtomicBoolean callled = new AtomicBoolean(false);
+      final AsyncGet<Writable, IOException> asyncGet
+          = new AsyncGet<Writable, IOException>() {
         @Override
-        public Writable get() throws InterruptedException, ExecutionException {
-          if (callled.compareAndSet(false, true)) {
-            try {
-              set(getRpcResponse(call, connection));
-            } catch (IOException ie) {
-              setException(ie);
-            } finally {
+        public Writable get(long timeout, TimeUnit unit)
+            throws IOException, TimeoutException{
+          boolean done = true;
+          try {
+            final Writable w = getRpcResponse(call, connection, timeout, unit);
+            if (w == null) {
+              done = false;
+              throw new TimeoutException(call + " timed out "
+                  + timeout + " " + unit);
+            }
+            return w;
+          } finally {
+            if (done) {
               releaseAsyncCall();
             }
           }
-          return super.get();
+        }
+
+        @Override
+        public boolean isDone() {
+          synchronized (call) {
+            return call.done;
+          }
         }
       };
 
-      RETURN_RPC_RESPONSE.set(returnFuture);
+      ASYNC_RPC_RESPONSE.set(asyncGet);
       return null;
     } else {
-      return getRpcResponse(call, connection);
+      return getRpcResponse(call, connection, -1, null);
     }
   }
 
@@ -1469,12 +1461,16 @@ public class Client implements AutoCloseable {
     return asyncCallCounter.get();
   }
 
-  private Writable getRpcResponse(final Call call, final Connection connection)
-      throws IOException {
+  /** @return the rpc response or, in case of timeout, null. */
+  private Writable getRpcResponse(final Call call, final Connection connection,
+      final long timeout, final TimeUnit unit) throws IOException {
     synchronized (call) {
       while (!call.done) {
         try {
-          call.wait();                           // wait for the result
+          AsyncGet.Util.wait(call, timeout, unit);
+          if (timeout >= 0 && !call.done) {
+            return null;
+          }
         } catch (InterruptedException ie) {
           Thread.currentThread().interrupt();
           throw new InterruptedIOException("Call interrupted");

+ 80 - 50
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java

@@ -19,6 +19,7 @@
 package org.apache.hadoop.ipc;
 
 import java.lang.ref.WeakReference;
+import java.util.ArrayList;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.Iterator;
@@ -125,12 +126,17 @@ public class DecayRpcScheduler implements RpcScheduler,
   public static final Logger LOG =
       LoggerFactory.getLogger(DecayRpcScheduler.class);
 
-  // Track the number of calls for each schedulable identity
-  private final ConcurrentHashMap<Object, AtomicLong> callCounts =
-    new ConcurrentHashMap<Object, AtomicLong>();
+  // Track the decayed and raw (no decay) number of calls for each schedulable
+  // identity from all previous decay windows: idx 0 for decayed call count and
+  // idx 1 for the raw call count
+  private final ConcurrentHashMap<Object, List<AtomicLong>> callCounts =
+      new ConcurrentHashMap<Object, List<AtomicLong>>();
+
+  // Should be the sum of all AtomicLongs in decayed callCounts
+  private final AtomicLong totalDecayedCallCount = new AtomicLong();
+  // The sum of all AtomicLongs in raw callCounts
+  private final AtomicLong totalRawCallCount = new AtomicLong();
 
-  // Should be the sum of all AtomicLongs in callCounts
-  private final AtomicLong totalCalls = new AtomicLong();
 
   // Track total call count and response time in current decay window
   private final AtomicLongArray responseTimeCountInCurrWindow;
@@ -155,6 +161,7 @@ public class DecayRpcScheduler implements RpcScheduler,
   private final long[] backOffResponseTimeThresholds;
   private final String namespace;
   private final int topUsersCount; // e.g., report top 10 users' metrics
+  private static final double PRECISION = 0.0001;
 
   /**
    * This TimerTask will call decayCurrentCounts until
@@ -380,19 +387,23 @@ public class DecayRpcScheduler implements RpcScheduler,
    */
   private void decayCurrentCounts() {
     try {
-      long total = 0;
-      Iterator<Map.Entry<Object, AtomicLong>> it =
+      long totalDecayedCount = 0;
+      long totalRawCount = 0;
+      Iterator<Map.Entry<Object, List<AtomicLong>>> it =
           callCounts.entrySet().iterator();
 
       while (it.hasNext()) {
-        Map.Entry<Object, AtomicLong> entry = it.next();
-        AtomicLong count = entry.getValue();
+        Map.Entry<Object, List<AtomicLong>> entry = it.next();
+        AtomicLong decayedCount = entry.getValue().get(0);
+        AtomicLong rawCount = entry.getValue().get(1);
+
 
         // Compute the next value by reducing it by the decayFactor
-        long currentValue = count.get();
+        totalRawCount += rawCount.get();
+        long currentValue = decayedCount.get();
         long nextValue = (long) (currentValue * decayFactor);
-        total += nextValue;
-        count.set(nextValue);
+        totalDecayedCount += nextValue;
+        decayedCount.set(nextValue);
 
         if (nextValue == 0) {
           // We will clean up unused keys here. An interesting optimization
@@ -403,7 +414,8 @@ public class DecayRpcScheduler implements RpcScheduler,
       }
 
       // Update the total so that we remain in sync
-      totalCalls.set(total);
+      totalDecayedCallCount.set(totalDecayedCount);
+      totalRawCallCount.set(totalRawCount);
 
       // Now refresh the cache of scheduling decisions
       recomputeScheduleCache();
@@ -423,9 +435,9 @@ public class DecayRpcScheduler implements RpcScheduler,
   private void recomputeScheduleCache() {
     Map<Object, Integer> nextCache = new HashMap<Object, Integer>();
 
-    for (Map.Entry<Object, AtomicLong> entry : callCounts.entrySet()) {
+    for (Map.Entry<Object, List<AtomicLong>> entry : callCounts.entrySet()) {
       Object id = entry.getKey();
-      AtomicLong value = entry.getValue();
+      AtomicLong value = entry.getValue().get(0);
 
       long snapshot = value.get();
       int computedLevel = computePriorityLevel(snapshot);
@@ -442,27 +454,34 @@ public class DecayRpcScheduler implements RpcScheduler,
    * @param identity the identity of the user to increment
    * @return the value before incrementation
    */
-  private long getAndIncrement(Object identity) throws InterruptedException {
+  private long getAndIncrementCallCounts(Object identity)
+      throws InterruptedException {
     // We will increment the count, or create it if no such count exists
-    AtomicLong count = this.callCounts.get(identity);
+    List<AtomicLong> count = this.callCounts.get(identity);
     if (count == null) {
-      // Create the count since no such count exists.
-      count = new AtomicLong(0);
+      // Create the counts since no such count exists.
+      // idx 0 for decayed call count
+      // idx 1 for the raw call count
+      count = new ArrayList<AtomicLong>(2);
+      count.add(new AtomicLong(0));
+      count.add(new AtomicLong(0));
 
       // Put it in, or get the AtomicInteger that was put in by another thread
-      AtomicLong otherCount = callCounts.putIfAbsent(identity, count);
+      List<AtomicLong> otherCount = callCounts.putIfAbsent(identity, count);
       if (otherCount != null) {
         count = otherCount;
       }
     }
 
     // Update the total
-    totalCalls.getAndIncrement();
+    totalDecayedCallCount.getAndIncrement();
+    totalRawCallCount.getAndIncrement();
 
     // At this point value is guaranteed to be not null. It may however have
     // been clobbered from callCounts. Nonetheless, we return what
     // we have.
-    return count.getAndIncrement();
+    count.get(1).getAndIncrement();
+    return count.get(0).getAndIncrement();
   }
 
   /**
@@ -471,7 +490,7 @@ public class DecayRpcScheduler implements RpcScheduler,
    * @return scheduling decision from 0 to numLevels - 1
    */
   private int computePriorityLevel(long occurrences) {
-    long totalCallSnapshot = totalCalls.get();
+    long totalCallSnapshot = totalDecayedCallCount.get();
 
     double proportion = 0;
     if (totalCallSnapshot > 0) {
@@ -497,7 +516,7 @@ public class DecayRpcScheduler implements RpcScheduler,
    */
   private int cachedOrComputedPriorityLevel(Object identity) {
     try {
-      long occurrences = this.getAndIncrement(identity);
+      long occurrences = this.getAndIncrementCallCounts(identity);
 
       // Try the cache
       Map<Object, Integer> scheduleCache = scheduleCacheRef.get();
@@ -580,7 +599,7 @@ public class DecayRpcScheduler implements RpcScheduler,
     }
   }
 
-  // Update the cached average response time at the end of decay window
+  // Update the cached average response time at the end of the decay window
   void updateAverageResponseTime(boolean enableDecay) {
     for (int i = 0; i < numLevels; i++) {
       double averageResponseTime = 0;
@@ -590,11 +609,13 @@ public class DecayRpcScheduler implements RpcScheduler,
         averageResponseTime = (double) totalResponseTime / responseTimeCount;
       }
       final double lastAvg = responseTimeAvgInLastWindow.get(i);
-      if (enableDecay && lastAvg > 0.0) {
-        final double decayed = decayFactor * lastAvg + averageResponseTime;
-        responseTimeAvgInLastWindow.set(i, decayed);
-      } else {
-        responseTimeAvgInLastWindow.set(i, averageResponseTime);
+      if (lastAvg > PRECISION || averageResponseTime > PRECISION) {
+        if (enableDecay) {
+          final double decayed = decayFactor * lastAvg + averageResponseTime;
+          responseTimeAvgInLastWindow.set(i, decayed);
+        } else {
+          responseTimeAvgInLastWindow.set(i, averageResponseTime);
+        }
       }
       responseTimeCountInLastWindow.set(i, responseTimeCount);
       if (LOG.isDebugEnabled()) {
@@ -624,8 +645,8 @@ public class DecayRpcScheduler implements RpcScheduler,
   public Map<Object, Long> getCallCountSnapshot() {
     HashMap<Object, Long> snapshot = new HashMap<Object, Long>();
 
-    for (Map.Entry<Object, AtomicLong> entry : callCounts.entrySet()) {
-      snapshot.put(entry.getKey(), entry.getValue().get());
+    for (Map.Entry<Object, List<AtomicLong>> entry : callCounts.entrySet()) {
+      snapshot.put(entry.getKey(), entry.getValue().get(0).get());
     }
 
     return Collections.unmodifiableMap(snapshot);
@@ -633,7 +654,7 @@ public class DecayRpcScheduler implements RpcScheduler,
 
   @VisibleForTesting
   public long getTotalCallSnapshot() {
-    return totalCalls.get();
+    return totalDecayedCallCount.get();
   }
 
   /**
@@ -750,7 +771,11 @@ public class DecayRpcScheduler implements RpcScheduler,
   }
 
   public long getTotalCallVolume() {
-    return totalCalls.get();
+    return totalDecayedCallCount.get();
+  }
+
+  public long getTotalRawCallVolume() {
+    return totalRawCallCount.get();
   }
 
   public long[] getResponseTimeCountInLastWindow() {
@@ -776,11 +801,12 @@ public class DecayRpcScheduler implements RpcScheduler,
     try {
       MetricsRecordBuilder rb = collector.addRecord(getClass().getName())
           .setContext(namespace);
-      addTotalCallVolume(rb);
+      addDecayedCallVolume(rb);
       addUniqueIdentityCount(rb);
       addTopNCallerSummary(rb);
       addAvgResponseTimePerPriority(rb);
       addCallVolumePerPriority(rb);
+      addRawCallVolume(rb);
     } catch (Exception e) {
       LOG.warn("Exception thrown while metric collection. Exception : "
           + e.getMessage());
@@ -793,16 +819,22 @@ public class DecayRpcScheduler implements RpcScheduler,
         getUniqueIdentityCount());
   }
 
-  // Key: CallVolume
-  private void addTotalCallVolume(MetricsRecordBuilder rb) {
-    rb.addCounter(Interns.info("CallVolume", "Total Call Volume"),
-        getTotalCallVolume());
+  // Key: DecayedCallVolume
+  private void addDecayedCallVolume(MetricsRecordBuilder rb) {
+    rb.addCounter(Interns.info("DecayedCallVolume", "Decayed Total " +
+        "incoming Call Volume"), getTotalCallVolume());
+  }
+
+  private void addRawCallVolume(MetricsRecordBuilder rb) {
+    rb.addCounter(Interns.info("CallVolume", "Raw Total " +
+        "incoming Call Volume"), getTotalRawCallVolume());
   }
 
-  // Key: Priority.0.CallVolume
+  // Key: Priority.0.CompletedCallVolume
   private void addCallVolumePerPriority(MetricsRecordBuilder rb) {
     for (int i = 0; i < responseTimeCountInLastWindow.length(); i++) {
-      rb.addGauge(Interns.info("Priority." + i + ".CallVolume", "Call volume " +
+      rb.addGauge(Interns.info("Priority." + i + ".CompletedCallVolume",
+          "Completed Call volume " +
           "of priority "+ i), responseTimeCountInLastWindow.get(i));
     }
   }
@@ -816,16 +848,14 @@ public class DecayRpcScheduler implements RpcScheduler,
     }
   }
 
-  // Key: Top.0.Caller(xyz).Volume and Top.0.Caller(xyz).Priority
+  // Key: Caller(xyz).Volume and Caller(xyz).Priority
   private void addTopNCallerSummary(MetricsRecordBuilder rb) {
-    final int topCallerCount = 10;
-    TopN topNCallers = getTopCallers(topCallerCount);
+    TopN topNCallers = getTopCallers(topUsersCount);
     Map<Object, Integer> decisions = scheduleCacheRef.get();
     final int actualCallerCount = topNCallers.size();
     for (int i = 0; i < actualCallerCount; i++) {
       NameValuePair entry =  topNCallers.poll();
-      String topCaller = "Top." + (actualCallerCount - i) + "." +
-          "Caller(" + entry.getName() + ")";
+      String topCaller = "Caller(" + entry.getName() + ")";
       String topCallerVolume = topCaller + ".Volume";
       String topCallerPriority = topCaller + ".Priority";
       rb.addCounter(Interns.info(topCallerVolume, topCallerVolume),
@@ -838,15 +868,15 @@ public class DecayRpcScheduler implements RpcScheduler,
     }
   }
 
-  // Get the top N callers' call count and scheduler decision
+  // Get the top N callers' raw call count and scheduler decision
   private TopN getTopCallers(int n) {
     TopN topNCallers = new TopN(n);
-    Iterator<Map.Entry<Object, AtomicLong>> it =
+    Iterator<Map.Entry<Object, List<AtomicLong>>> it =
         callCounts.entrySet().iterator();
     while (it.hasNext()) {
-      Map.Entry<Object, AtomicLong> entry = it.next();
+      Map.Entry<Object, List<AtomicLong>> entry = it.next();
       String caller = entry.getKey().toString();
-      Long count = entry.getValue().get();
+      Long count = entry.getValue().get(1).get();
       if (count > 0) {
         topNCallers.offer(new NameValuePair(caller, count));
       }

+ 32 - 33
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java

@@ -18,21 +18,9 @@
 
 package org.apache.hadoop.ipc;
 
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-import java.io.OutputStream;
-import java.lang.reflect.Method;
-import java.lang.reflect.Proxy;
-import java.net.InetSocketAddress;
-import java.util.Map;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.Future;
-import java.util.concurrent.atomic.AtomicBoolean;
-
-import javax.net.SocketFactory;
-
+import com.google.common.annotations.VisibleForTesting;
+import com.google.protobuf.*;
+import com.google.protobuf.Descriptors.MethodDescriptor;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -52,17 +40,22 @@ import org.apache.hadoop.security.token.SecretManager;
 import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.util.ProtoUtil;
 import org.apache.hadoop.util.Time;
+import org.apache.hadoop.util.concurrent.AsyncGet;
 import org.apache.htrace.core.TraceScope;
 import org.apache.htrace.core.Tracer;
 
-import com.google.common.annotations.VisibleForTesting;
-import com.google.protobuf.BlockingService;
-import com.google.protobuf.CodedOutputStream;
-import com.google.protobuf.Descriptors.MethodDescriptor;
-import com.google.protobuf.GeneratedMessage;
-import com.google.protobuf.Message;
-import com.google.protobuf.ServiceException;
-import com.google.protobuf.TextFormat;
+import javax.net.SocketFactory;
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.lang.reflect.Method;
+import java.lang.reflect.Proxy;
+import java.net.InetSocketAddress;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
 
 /**
  * RPC Engine for for protobuf based RPCs.
@@ -70,8 +63,8 @@ import com.google.protobuf.TextFormat;
 @InterfaceStability.Evolving
 public class ProtobufRpcEngine implements RpcEngine {
   public static final Log LOG = LogFactory.getLog(ProtobufRpcEngine.class);
-  private static final ThreadLocal<Callable<?>>
-      RETURN_MESSAGE_CALLBACK = new ThreadLocal<>();
+  private static final ThreadLocal<AsyncGet<Message, Exception>>
+      ASYNC_RETURN_MESSAGE = new ThreadLocal<>();
 
   static { // Register the rpcRequest deserializer for WritableRpcEngine 
     org.apache.hadoop.ipc.Server.registerProtocolEngine(
@@ -81,10 +74,9 @@ public class ProtobufRpcEngine implements RpcEngine {
 
   private static final ClientCache CLIENTS = new ClientCache();
 
-  @SuppressWarnings("unchecked")
   @Unstable
-  public static <T> Callable<T> getReturnMessageCallback() {
-    return (Callable<T>) RETURN_MESSAGE_CALLBACK.get();
+  public static AsyncGet<Message, Exception> getAsyncReturnMessage() {
+    return ASYNC_RETURN_MESSAGE.get();
   }
 
   public <T> ProtocolProxy<T> getProxy(Class<T> protocol, long clientVersion,
@@ -263,14 +255,21 @@ public class ProtobufRpcEngine implements RpcEngine {
       }
       
       if (Client.isAsynchronousMode()) {
-        final Future<RpcResponseWrapper> frrw = Client.getReturnRpcResponse();
-        Callable<Message> callback = new Callable<Message>() {
+        final AsyncGet<RpcResponseWrapper, IOException> arr
+            = Client.getAsyncRpcResponse();
+        final AsyncGet<Message, Exception> asyncGet
+            = new AsyncGet<Message, Exception>() {
+          @Override
+          public Message get(long timeout, TimeUnit unit) throws Exception {
+            return getReturnMessage(method, arr.get(timeout, unit));
+          }
+
           @Override
-          public Message call() throws Exception {
-            return getReturnMessage(method, frrw.get());
+          public boolean isDone() {
+            return arr.isDone();
           }
         };
-        RETURN_MESSAGE_CALLBACK.set(callback);
+        ASYNC_RETURN_MESSAGE.set(asyncGet);
         return null;
       } else {
         return getReturnMessage(method, val);

+ 256 - 29
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java

@@ -17,20 +17,38 @@
  */
 package org.apache.hadoop.log;
 
-import java.io.*;
-import java.net.*;
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.io.PrintWriter;
+import java.net.URL;
+import java.net.URLConnection;
 import java.util.regex.Pattern;
 
-import javax.servlet.*;
-import javax.servlet.http.*;
+import javax.net.ssl.HttpsURLConnection;
+import javax.net.ssl.SSLSocketFactory;
+import javax.servlet.ServletException;
+import javax.servlet.http.HttpServlet;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
 
+import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Charsets;
-import org.apache.commons.logging.*;
-import org.apache.commons.logging.impl.*;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.commons.logging.impl.Jdk14Logger;
+import org.apache.commons.logging.impl.Log4JLogger;
+import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.http.HttpServer2;
+import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
+import org.apache.hadoop.security.authentication.client.KerberosAuthenticator;
+import org.apache.hadoop.security.ssl.SSLFactory;
 import org.apache.hadoop.util.ServletUtil;
+import org.apache.hadoop.util.Tool;
 
 /**
  * Change log level in runtime.
@@ -38,43 +56,252 @@ import org.apache.hadoop.util.ServletUtil;
 @InterfaceStability.Evolving
 public class LogLevel {
   public static final String USAGES = "\nUsage: General options are:\n"
-      + "\t[-getlevel <host:httpPort> <classname>]\n"
-      + "\t[-setlevel <host:httpPort> <classname> <level>]\n";
+      + "\t[-getlevel <host:port> <classname> [-protocol (http|https)]\n"
+      + "\t[-setlevel <host:port> <classname> <level> "
+      + "[-protocol (http|https)]\n";
 
+  public static final String PROTOCOL_HTTP = "http";
+  public static final String PROTOCOL_HTTPS = "https";
   /**
    * A command line implementation
    */
-  public static void main(String[] args) {
-    if (args.length == 3 && "-getlevel".equals(args[0])) {
-      process("http://" + args[1] + "/logLevel?log=" + args[2]);
-      return;
-    }
-    else if (args.length == 4 && "-setlevel".equals(args[0])) {
-      process("http://" + args[1] + "/logLevel?log=" + args[2]
-              + "&level=" + args[3]);
-      return;
-    }
+  public static void main(String[] args) throws Exception {
+    CLI cli = new CLI(new Configuration());
+    System.exit(cli.run(args));
+  }
+
+  /**
+   * Valid command line options.
+   */
+  private enum Operations {
+    GETLEVEL,
+    SETLEVEL,
+    UNKNOWN
+  }
 
+  private static void printUsage() {
     System.err.println(USAGES);
-    System.exit(-1);
   }
 
-  private static void process(String urlstring) {
-    try {
-      URL url = new URL(urlstring);
-      System.out.println("Connecting to " + url);
-      URLConnection connection = url.openConnection();
+  public static boolean isValidProtocol(String protocol) {
+    return ((protocol.equals(PROTOCOL_HTTP) ||
+      protocol.equals(PROTOCOL_HTTPS)));
+  }
+
+  @VisibleForTesting
+  static class CLI extends Configured implements Tool {
+    private Operations operation = Operations.UNKNOWN;
+    private String protocol;
+    private String hostName;
+    private String className;
+    private String level;
+
+    CLI(Configuration conf) {
+      setConf(conf);
+    }
+
+    @Override
+    public int run(String[] args) throws Exception {
+      try {
+        parseArguments(args);
+        sendLogLevelRequest();
+      } catch (HadoopIllegalArgumentException e) {
+        printUsage();
+        throw e;
+      }
+      return 0;
+    }
+
+    /**
+     * Send HTTP/HTTPS request to the daemon.
+     * @throws HadoopIllegalArgumentException if arguments are invalid.
+     * @throws Exception if unable to connect
+     */
+    private void sendLogLevelRequest()
+        throws HadoopIllegalArgumentException, Exception {
+      switch (operation) {
+      case GETLEVEL:
+        doGetLevel();
+        break;
+      case SETLEVEL:
+        doSetLevel();
+        break;
+      default:
+        throw new HadoopIllegalArgumentException(
+          "Expect either -getlevel or -setlevel");
+      }
+    }
+
+    public void parseArguments(String[] args) throws
+        HadoopIllegalArgumentException {
+      if (args.length == 0) {
+        throw new HadoopIllegalArgumentException("No arguments specified");
+      }
+      int nextArgIndex = 0;
+      while (nextArgIndex < args.length) {
+        if (args[nextArgIndex].equals("-getlevel")) {
+          nextArgIndex = parseGetLevelArgs(args, nextArgIndex);
+        } else if (args[nextArgIndex].equals("-setlevel")) {
+          nextArgIndex = parseSetLevelArgs(args, nextArgIndex);
+        } else if (args[nextArgIndex].equals("-protocol")) {
+          nextArgIndex = parseProtocolArgs(args, nextArgIndex);
+        } else {
+          throw new HadoopIllegalArgumentException(
+              "Unexpected argument " + args[nextArgIndex]);
+        }
+      }
+
+      // if operation is never specified in the arguments
+      if (operation == Operations.UNKNOWN) {
+        throw new HadoopIllegalArgumentException(
+            "Must specify either -getlevel or -setlevel");
+      }
+
+      // if protocol is unspecified, set it as http.
+      if (protocol == null) {
+        protocol = PROTOCOL_HTTP;
+      }
+    }
+
+    private int parseGetLevelArgs(String[] args, int index) throws
+        HadoopIllegalArgumentException {
+      // fail if multiple operations are specified in the arguments
+      if (operation != Operations.UNKNOWN) {
+        throw new HadoopIllegalArgumentException(
+            "Redundant -getlevel command");
+      }
+      // check number of arguments is sufficient
+      if (index+2 >= args.length) {
+        throw new HadoopIllegalArgumentException(
+            "-getlevel needs two parameters");
+      }
+      operation = Operations.GETLEVEL;
+      hostName = args[index+1];
+      className = args[index+2];
+      return index+3;
+    }
+
+    private int parseSetLevelArgs(String[] args, int index) throws
+        HadoopIllegalArgumentException {
+      // fail if multiple operations are specified in the arguments
+      if (operation != Operations.UNKNOWN) {
+        throw new HadoopIllegalArgumentException(
+            "Redundant -setlevel command");
+      }
+      // check number of arguments is sufficient
+      if (index+3 >= args.length) {
+        throw new HadoopIllegalArgumentException(
+            "-setlevel needs three parameters");
+      }
+      operation = Operations.SETLEVEL;
+      hostName = args[index+1];
+      className = args[index+2];
+      level = args[index+3];
+      return index+4;
+    }
+
+    private int parseProtocolArgs(String[] args, int index) throws
+        HadoopIllegalArgumentException {
+      // make sure only -protocol is specified
+      if (protocol != null) {
+        throw new HadoopIllegalArgumentException(
+            "Redundant -protocol command");
+      }
+      // check number of arguments is sufficient
+      if (index+1 >= args.length) {
+        throw new HadoopIllegalArgumentException(
+            "-protocol needs one parameter");
+      }
+      // check protocol is valid
+      protocol = args[index+1];
+      if (!isValidProtocol(protocol)) {
+        throw new HadoopIllegalArgumentException(
+            "Invalid protocol: " + protocol);
+      }
+      return index+2;
+    }
+
+    /**
+     * Send HTTP/HTTPS request to get log level.
+     *
+     * @throws HadoopIllegalArgumentException if arguments are invalid.
+     * @throws Exception if unable to connect
+     */
+    private void doGetLevel() throws Exception {
+      process(protocol + "://" + hostName + "/logLevel?log=" + className);
+    }
+
+    /**
+     * Send HTTP/HTTPS request to set log level.
+     *
+     * @throws HadoopIllegalArgumentException if arguments are invalid.
+     * @throws Exception if unable to connect
+     */
+    private void doSetLevel() throws Exception {
+      process(protocol + "://" + hostName + "/logLevel?log=" + className
+          + "&level=" + level);
+    }
+
+    /**
+     * Connect to the URL. Supports HTTP/HTTPS and supports SPNEGO
+     * authentication. It falls back to simple authentication if it fails to
+     * initiate SPNEGO.
+     *
+     * @param url the URL address of the daemon servlet
+     * @return a connected connection
+     * @throws Exception if it can not establish a connection.
+     */
+    private URLConnection connect(URL url) throws Exception {
+      AuthenticatedURL.Token token = new AuthenticatedURL.Token();
+      AuthenticatedURL aUrl;
+      SSLFactory clientSslFactory;
+      URLConnection connection;
+      // If https is chosen, configures SSL client.
+      if (PROTOCOL_HTTPS.equals(url.getProtocol())) {
+        clientSslFactory = new SSLFactory(
+            SSLFactory.Mode.CLIENT, this.getConf());
+        clientSslFactory.init();
+        SSLSocketFactory sslSocketF = clientSslFactory.createSSLSocketFactory();
+
+        aUrl = new AuthenticatedURL(
+            new KerberosAuthenticator(), clientSslFactory);
+        connection = aUrl.openConnection(url, token);
+        HttpsURLConnection httpsConn = (HttpsURLConnection) connection;
+        httpsConn.setSSLSocketFactory(sslSocketF);
+      } else {
+        aUrl = new AuthenticatedURL(new KerberosAuthenticator());
+        connection = aUrl.openConnection(url, token);
+      }
+
       connection.connect();
+      return connection;
+    }
+
+    /**
+     * Configures the client to send HTTP/HTTPS request to the URL.
+     * Supports SPENGO for authentication.
+     * @param urlString URL and query string to the daemon's web UI
+     * @throws Exception if unable to connect
+     */
+    private void process(String urlString) throws Exception {
+      URL url = new URL(urlString);
+      System.out.println("Connecting to " + url);
 
-      BufferedReader in = new BufferedReader(new InputStreamReader(
-          connection.getInputStream(), Charsets.UTF_8));
-      for(String line; (line = in.readLine()) != null; )
+      URLConnection connection = connect(url);
+
+      // read from the servlet
+      BufferedReader in = new BufferedReader(
+          new InputStreamReader(connection.getInputStream(), Charsets.UTF_8));
+      for (String line;;) {
+        line = in.readLine();
+        if (line == null) {
+          break;
+        }
         if (line.startsWith(MARKER)) {
           System.out.println(TAG.matcher(line).replaceAll(""));
         }
+      }
       in.close();
-    } catch (IOException ioe) {
-      System.err.println("" + ioe);
     }
   }
 

+ 1 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java

@@ -255,6 +255,7 @@ public class MetricsSystemImpl extends MetricsSystem implements MetricsSource {
     if (namedCallbacks.containsKey(name)) {
       namedCallbacks.remove(name);
     }
+    DefaultMetricsSystem.removeSourceName(name);
   }
 
   synchronized

+ 9 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/DefaultMetricsSystem.java

@@ -115,6 +115,11 @@ public enum DefaultMetricsSystem {
     INSTANCE.removeObjectName(name.toString());
   }
 
+  @InterfaceAudience.Private
+  public static void removeSourceName(String name) {
+    INSTANCE.removeSource(name);
+  }
+
   @InterfaceAudience.Private
   public static String sourceName(String name, boolean dupOK) {
     return INSTANCE.newSourceName(name, dupOK);
@@ -135,6 +140,10 @@ public enum DefaultMetricsSystem {
     mBeanNames.map.remove(name);
   }
 
+  synchronized void removeSource(String name) {
+    sourceNames.map.remove(name);
+  }
+
   synchronized String newSourceName(String name, boolean dupOK) {
     if (sourceNames.map.containsKey(name)) {
       if (dupOK) {

+ 291 - 62
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/RollingFileSystemSink.java

@@ -31,6 +31,10 @@ import java.util.Date;
 import java.util.TimeZone;
 import java.util.Timer;
 import java.util.TimerTask;
+import java.util.concurrent.ThreadLocalRandom;
+import java.util.concurrent.TimeUnit;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
 
 import org.apache.commons.configuration.SubsetConfiguration;
 import org.apache.commons.lang.time.FastDateFormat;
@@ -53,14 +57,14 @@ import org.apache.hadoop.security.UserGroupInformation;
 /**
  * <p>This class is a metrics sink that uses
  * {@link org.apache.hadoop.fs.FileSystem} to write the metrics logs.  Every
- * hour a new directory will be created under the path specified by the
+ * roll interval a new directory will be created under the path specified by the
  * <code>basepath</code> property. All metrics will be logged to a file in the
- * current hour's directory in a file named &lt;hostname&gt;.log, where
+ * current interval's directory in a file named &lt;hostname&gt;.log, where
  * &lt;hostname&gt; is the name of the host on which the metrics logging
  * process is running. The base path is set by the
  * <code>&lt;prefix&gt;.sink.&lt;instance&gt;.basepath</code> property.  The
- * time zone used to create the current hour's directory name is GMT.  If the
- * <code>basepath</code> property isn't specified, it will default to
+ * time zone used to create the current interval's directory name is GMT.  If
+ * the <code>basepath</code> property isn't specified, it will default to
  * &quot;/tmp&quot;, which is the temp directory on whatever default file
  * system is configured for the cluster.</p>
  *
@@ -69,6 +73,26 @@ import org.apache.hadoop.security.UserGroupInformation;
  * writing a log file.  The default value is <code>true</code>.  When set to
  * <code>false</code>, file errors are quietly swallowed.</p>
  *
+ * <p>The <code>roll-interval</code> property sets the amount of time before
+ * rolling the directory. The default value is 1 hour. The roll interval may
+ * not be less than 1 minute. The property's value should be given as
+ * <i>number unit</i>, where <i>number</i> is an integer value, and
+ * <i>unit</i> is a valid unit.  Valid units are <i>minute</i>, <i>hour</i>,
+ * and <i>day</i>.  The units are case insensitive and may be abbreviated or
+ * plural. If no units are specified, hours are assumed. For example,
+ * &quot;2&quot;, &quot;2h&quot;, &quot;2 hour&quot;, and
+ * &quot;2 hours&quot; are all valid ways to specify two hours.</p>
+ *
+ * <p>The <code>roll-offset-interval-millis</code> property sets the upper
+ * bound on a random time interval (in milliseconds) that is used to delay
+ * before the initial roll.  All subsequent rolls will happen an integer
+ * number of roll intervals after the initial roll, hence retaining the original
+ * offset. The purpose of this property is to insert some variance in the roll
+ * times so that large clusters using this sink on every node don't cause a
+ * performance impact on HDFS by rolling simultaneously.  The default value is
+ * 30000 (30s).  When writing to HDFS, as a rule of thumb, the roll offset in
+ * millis should be no less than the number of sink instances times 5.
+ *
  * <p>The primary use of this class is for logging to HDFS.  As it uses
  * {@link org.apache.hadoop.fs.FileSystem} to access the target file system,
  * however, it can be used to write to the local file system, Amazon S3, or any
@@ -79,7 +103,8 @@ import org.apache.hadoop.security.UserGroupInformation;
  * <p>Not all file systems support the ability to append to files.  In file
  * systems without the ability to append to files, only one writer can write to
  * a file at a time.  To allow for concurrent writes from multiple daemons on a
- * single host, the <code>source</code> property should be set to the name of
+ * single host, the <code>source</code> property is used to set unique headers
+ * for the log files.  The property should be set to the name of
  * the source daemon, e.g. <i>namenode</i>.  The value of the
  * <code>source</code> property should typically be the same as the property's
  * prefix.  If this property is not set, the source is taken to be
@@ -105,7 +130,7 @@ import org.apache.hadoop.security.UserGroupInformation;
  * 3.</p>
  *
  * <p>Note also that when writing to HDFS, the file size information is not
- * updated until the file is closed (e.g. at the top of the hour) even though
+ * updated until the file is closed (at the end of the interval) even though
  * the data is being written successfully. This is a known HDFS limitation that
  * exists because of the performance cost of updating the metadata.  See
  * <a href="https://issues.apache.org/jira/browse/HDFS-5478">HDFS-5478</a>.</p>
@@ -124,21 +149,32 @@ public class RollingFileSystemSink implements MetricsSink, Closeable {
   private static final String BASEPATH_KEY = "basepath";
   private static final String SOURCE_KEY = "source";
   private static final String IGNORE_ERROR_KEY = "ignore-error";
+  private static final boolean DEFAULT_IGNORE_ERROR = false;
   private static final String ALLOW_APPEND_KEY = "allow-append";
+  private static final boolean DEFAULT_ALLOW_APPEND = false;
   private static final String KEYTAB_PROPERTY_KEY = "keytab-key";
   private static final String USERNAME_PROPERTY_KEY = "principal-key";
+  private static final String ROLL_INTERVAL_KEY = "roll-interval";
+  private static final String DEFAULT_ROLL_INTERVAL = "1h";
+  private static final String ROLL_OFFSET_INTERVAL_MILLIS_KEY =
+      "roll-offset-interval-millis";
+  private static final int DEFAULT_ROLL_OFFSET_INTERVAL_MILLIS = 30000;
   private static final String SOURCE_DEFAULT = "unknown";
   private static final String BASEPATH_DEFAULT = "/tmp";
   private static final FastDateFormat DATE_FORMAT =
-      FastDateFormat.getInstance("yyyyMMddHH", TimeZone.getTimeZone("GMT"));
+      FastDateFormat.getInstance("yyyyMMddHHmm", TimeZone.getTimeZone("GMT"));
   private final Object lock = new Object();
   private boolean initialized = false;
   private SubsetConfiguration properties;
   private Configuration conf;
-  private String source;
-  private boolean ignoreError;
-  private boolean allowAppend;
-  private Path basePath;
+  @VisibleForTesting
+  protected String source;
+  @VisibleForTesting
+  protected boolean ignoreError;
+  @VisibleForTesting
+  protected boolean allowAppend;
+  @VisibleForTesting
+  protected Path basePath;
   private FileSystem fileSystem;
   // The current directory path into which we're writing files
   private Path currentDirPath;
@@ -149,11 +185,21 @@ public class RollingFileSystemSink implements MetricsSink, Closeable {
   // We keep this only to be able to call hsynch() on it.
   private FSDataOutputStream currentFSOutStream;
   private Timer flushTimer;
-
-  // This flag is used during testing to make the flusher thread run after only
-  // a short pause instead of waiting for the top of the hour.
+  // The amount of time between rolls
+  @VisibleForTesting
+  protected long rollIntervalMillis;
+  // The maximum amount of random time to add to the initial roll
+  @VisibleForTesting
+  protected long rollOffsetIntervalMillis;
+  // The time for the nextFlush
+  @VisibleForTesting
+  protected Calendar nextFlush = null;
+  // This flag when true causes a metrics write to schedule a flush thread to
+  // run immediately, but only if a flush thread is already scheduled. (It's a
+  // timing thing.  If the first write forces the flush, it will strand the
+  // second write.)
   @VisibleForTesting
-  protected static boolean flushQuickly = false;
+  protected static boolean forceFlush = false;
   // This flag is used by the flusher thread to indicate that it has run. Used
   // only for testing purposes.
   @VisibleForTesting
@@ -165,13 +211,36 @@ public class RollingFileSystemSink implements MetricsSink, Closeable {
   @VisibleForTesting
   protected static FileSystem suppliedFilesystem = null;
 
+  /**
+   * Create an empty instance.  Required for reflection.
+   */
+  public RollingFileSystemSink() {
+  }
+
+  /**
+   * Create an instance for testing.
+   *
+   * @param flushIntervalMillis the roll interval in millis
+   * @param flushOffsetIntervalMillis the roll offset interval in millis
+   */
+  @VisibleForTesting
+  protected RollingFileSystemSink(long flushIntervalMillis,
+      long flushOffsetIntervalMillis) {
+    this.rollIntervalMillis = flushIntervalMillis;
+    this.rollOffsetIntervalMillis = flushOffsetIntervalMillis;
+  }
+
   @Override
   public void init(SubsetConfiguration metrics2Properties) {
     properties = metrics2Properties;
     basePath = new Path(properties.getString(BASEPATH_KEY, BASEPATH_DEFAULT));
     source = properties.getString(SOURCE_KEY, SOURCE_DEFAULT);
-    ignoreError = properties.getBoolean(IGNORE_ERROR_KEY, false);
-    allowAppend = properties.getBoolean(ALLOW_APPEND_KEY, false);
+    ignoreError = properties.getBoolean(IGNORE_ERROR_KEY, DEFAULT_IGNORE_ERROR);
+    allowAppend = properties.getBoolean(ALLOW_APPEND_KEY, DEFAULT_ALLOW_APPEND);
+    rollOffsetIntervalMillis =
+        getNonNegative(ROLL_OFFSET_INTERVAL_MILLIS_KEY,
+          DEFAULT_ROLL_OFFSET_INTERVAL_MILLIS);
+    rollIntervalMillis = getRollInterval();
 
     conf = loadConf();
     UserGroupInformation.setConfiguration(conf);
@@ -179,8 +248,8 @@ public class RollingFileSystemSink implements MetricsSink, Closeable {
     // Don't do secure setup if it's not needed.
     if (UserGroupInformation.isSecurityEnabled()) {
       // Validate config so that we don't get an NPE
-      checkForProperty(properties, KEYTAB_PROPERTY_KEY);
-      checkForProperty(properties, USERNAME_PROPERTY_KEY);
+      checkIfPropertyExists(KEYTAB_PROPERTY_KEY);
+      checkIfPropertyExists(USERNAME_PROPERTY_KEY);
 
 
       try {
@@ -228,6 +297,7 @@ public class RollingFileSystemSink implements MetricsSink, Closeable {
       }
 
       flushTimer = new Timer("RollingFileSystemSink Flusher", true);
+      setInitialFlushTime(new Date());
     }
 
     return success;
@@ -238,8 +308,6 @@ public class RollingFileSystemSink implements MetricsSink, Closeable {
    * strings, allowing for either the property or the configuration not to be
    * set.
    *
-   * @param properties the sink properties
-   * @param conf the conf
    * @param property the property to stringify
    * @return the stringified property
    */
@@ -264,15 +332,98 @@ public class RollingFileSystemSink implements MetricsSink, Closeable {
     return securityProperty;
   }
 
+  /**
+   * Extract the roll interval from the configuration and return it in
+   * milliseconds.
+   *
+   * @return the roll interval in millis
+   */
+  @VisibleForTesting
+  protected long getRollInterval() {
+    String rollInterval =
+        properties.getString(ROLL_INTERVAL_KEY, DEFAULT_ROLL_INTERVAL);
+    Pattern pattern = Pattern.compile("^\\s*(\\d+)\\s*([A-Za-z]*)\\s*$");
+    Matcher match = pattern.matcher(rollInterval);
+    long millis;
+
+    if (match.matches()) {
+      String flushUnit = match.group(2);
+      int rollIntervalInt;
+
+      try {
+        rollIntervalInt = Integer.parseInt(match.group(1));
+      } catch (NumberFormatException ex) {
+        throw new MetricsException("Unrecognized flush interval: "
+            + rollInterval + ". Must be a number followed by an optional "
+            + "unit. The unit must be one of: minute, hour, day", ex);
+      }
+
+      if ("".equals(flushUnit)) {
+        millis = TimeUnit.HOURS.toMillis(rollIntervalInt);
+      } else {
+        switch (flushUnit.toLowerCase()) {
+        case "m":
+        case "min":
+        case "minute":
+        case "minutes":
+          millis = TimeUnit.MINUTES.toMillis(rollIntervalInt);
+          break;
+        case "h":
+        case "hr":
+        case "hour":
+        case "hours":
+          millis = TimeUnit.HOURS.toMillis(rollIntervalInt);
+          break;
+        case "d":
+        case "day":
+        case "days":
+          millis = TimeUnit.DAYS.toMillis(rollIntervalInt);
+          break;
+        default:
+          throw new MetricsException("Unrecognized unit for flush interval: "
+              + flushUnit + ". Must be one of: minute, hour, day");
+        }
+      }
+    } else {
+      throw new MetricsException("Unrecognized flush interval: "
+          + rollInterval + ". Must be a number followed by an optional unit."
+          + " The unit must be one of: minute, hour, day");
+    }
+
+    if (millis < 60000) {
+      throw new MetricsException("The flush interval property must be "
+          + "at least 1 minute. Value was " + rollInterval);
+    }
+
+    return millis;
+  }
+
+  /**
+   * Return the property value if it's non-negative and throw an exception if
+   * it's not.
+   *
+   * @param key the property key
+   * @param defaultValue the default value
+   */
+  private long getNonNegative(String key, int defaultValue) {
+    int flushOffsetIntervalMillis = properties.getInt(key, defaultValue);
+
+    if (flushOffsetIntervalMillis < 0) {
+      throw new MetricsException("The " + key + " property must be "
+          + "non-negative. Value was " + flushOffsetIntervalMillis);
+    }
+
+    return flushOffsetIntervalMillis;
+  }
+
   /**
    * Throw a {@link MetricsException} if the given property is not set.
    *
-   * @param conf the configuration to test
    * @param key the key to validate
    */
-  private static void checkForProperty(SubsetConfiguration conf, String key) {
-    if (!conf.containsKey(key)) {
-      throw new MetricsException("Configuration is missing " + key
+  private void checkIfPropertyExists(String key) {
+    if (!properties.containsKey(key)) {
+      throw new MetricsException("Metrics2 configuration is missing " + key
           + " property");
     }
   }
@@ -301,7 +452,6 @@ public class RollingFileSystemSink implements MetricsSink, Closeable {
    * Return the supplied file system for testing or otherwise get a new file
    * system.
    *
-   * @param conf the configuration
    * @return the file system to use
    * @throws MetricsException thrown if the file system could not be retrieved
    */
@@ -327,6 +477,7 @@ public class RollingFileSystemSink implements MetricsSink, Closeable {
 
   /**
    * Test whether the file system supports append and return the answer.
+   *
    * @param fs the target file system
    */
   private boolean checkAppend(FileSystem fs) {
@@ -351,14 +502,14 @@ public class RollingFileSystemSink implements MetricsSink, Closeable {
    * new directory or new log file
    */
   private void rollLogDirIfNeeded() throws MetricsException {
+    // Because we're working relative to the clock, we use a Date instead
+    // of Time.monotonicNow().
     Date now = new Date();
-    String currentDir = DATE_FORMAT.format(now);
-    Path path = new Path(basePath, currentDir);
 
     // We check whether currentOutStream is null instead of currentDirPath,
     // because if currentDirPath is null, then currentOutStream is null, but
-    // currentOutStream can be null for other reasons.
-    if ((currentOutStream == null) || !path.equals(currentDirPath)) {
+    // currentOutStream can be null for other reasons.  Same for nextFlush.
+    if ((currentOutStream == null) || now.after(nextFlush.getTime())) {
       // If we're not yet connected to HDFS, create the connection
       if (!initialized) {
         initialized = initFs();
@@ -372,7 +523,7 @@ public class RollingFileSystemSink implements MetricsSink, Closeable {
           currentOutStream.close();
         }
 
-        currentDirPath = path;
+        currentDirPath = findCurrentDirectory(now);
 
         try {
           rollLogDir();
@@ -380,34 +531,41 @@ public class RollingFileSystemSink implements MetricsSink, Closeable {
           throwMetricsException("Failed to create new log file", ex);
         }
 
-        scheduleFlush(now);
+        // Update the time of the next flush
+        updateFlushTime(now);
+        // Schedule the next flush at that time
+        scheduleFlush(nextFlush.getTime());
       }
+    } else if (forceFlush) {
+      scheduleFlush(new Date());
     }
   }
 
   /**
-   * Schedule the current hour's directory to be flushed at the top of the next
-   * hour. If this ends up running after the top of the next hour, it will
-   * execute immediately.
+   * Use the given time to determine the current directory. The current
+   * directory will be based on the {@link #rollIntervalMinutes}.
    *
    * @param now the current time
+   * @return the current directory
    */
-  private void scheduleFlush(Date now) {
-    // Store the current currentDirPath to close later
-    final PrintStream toClose = currentOutStream;
-    Calendar next = Calendar.getInstance();
+  private Path findCurrentDirectory(Date now) {
+    long offset = ((now.getTime() - nextFlush.getTimeInMillis())
+        / rollIntervalMillis) * rollIntervalMillis;
+    String currentDir =
+        DATE_FORMAT.format(new Date(nextFlush.getTimeInMillis() + offset));
 
-    next.setTime(now);
+    return new Path(basePath, currentDir);
+  }
 
-    if (flushQuickly) {
-      // If we're running unit tests, flush after a short pause
-      next.add(Calendar.MILLISECOND, 400);
-    } else {
-      // Otherwise flush at the top of the hour
-      next.set(Calendar.SECOND, 0);
-      next.set(Calendar.MINUTE, 0);
-      next.add(Calendar.HOUR, 1);
-    }
+  /**
+   * Schedule the current interval's directory to be flushed. If this ends up
+   * running after the top of the next interval, it will execute immediately.
+   *
+   * @param when the time the thread should run
+   */
+  private void scheduleFlush(Date when) {
+    // Store the current currentDirPath to close later
+    final PrintStream toClose = currentOutStream;
 
     flushTimer.schedule(new TimerTask() {
       @Override
@@ -420,11 +578,81 @@ public class RollingFileSystemSink implements MetricsSink, Closeable {
 
         hasFlushed = true;
       }
-    }, next.getTime());
+    }, when);
+  }
+
+  /**
+   * Update the {@link #nextFlush} variable to the next flush time. Add
+   * an integer number of flush intervals, preserving the initial random offset.
+   *
+   * @param now the current time
+   */
+  @VisibleForTesting
+  protected void updateFlushTime(Date now) {
+    // In non-initial rounds, add an integer number of intervals to the last
+    // flush until a time in the future is achieved, thus preserving the
+    // original random offset.
+    int millis =
+        (int) (((now.getTime() - nextFlush.getTimeInMillis())
+        / rollIntervalMillis + 1) * rollIntervalMillis);
+
+    nextFlush.add(Calendar.MILLISECOND, millis);
+  }
+
+  /**
+   * Set the {@link #nextFlush} variable to the initial flush time. The initial
+   * flush will be an integer number of flush intervals past the beginning of
+   * the current hour and will have a random offset added, up to
+   * {@link #rollOffsetIntervalMillis}. The initial flush will be a time in
+   * past that can be used from which to calculate future flush times.
+   *
+   * @param now the current time
+   */
+  @VisibleForTesting
+  protected void setInitialFlushTime(Date now) {
+    // Start with the beginning of the current hour
+    nextFlush = Calendar.getInstance();
+    nextFlush.setTime(now);
+    nextFlush.set(Calendar.MILLISECOND, 0);
+    nextFlush.set(Calendar.SECOND, 0);
+    nextFlush.set(Calendar.MINUTE, 0);
+
+    // In the first round, calculate the first flush as the largest number of
+    // intervals from the beginning of the current hour that's not in the
+    // future by:
+    // 1. Subtract the beginning of the hour from the current time
+    // 2. Divide by the roll interval and round down to get the number of whole
+    //    intervals that have passed since the beginning of the hour
+    // 3. Multiply by the roll interval to get the number of millis between
+    //    the beginning of the current hour and the beginning of the current
+    //    interval.
+    int millis = (int) (((now.getTime() - nextFlush.getTimeInMillis())
+        / rollIntervalMillis) * rollIntervalMillis);
+
+    // Then add some noise to help prevent all the nodes from
+    // closing their files at the same time.
+    if (rollOffsetIntervalMillis > 0) {
+      millis += ThreadLocalRandom.current().nextLong(rollOffsetIntervalMillis);
+
+      // If the added time puts us into the future, step back one roll interval
+      // because the code to increment nextFlush to the next flush expects that
+      // nextFlush is the next flush from the previous interval.  There wasn't
+      // a previous interval, so we just fake it with the time in the past that
+      // would have been the previous interval if there had been one.
+      //
+      // It's OK if millis comes out negative.
+      while (nextFlush.getTimeInMillis() + millis > now.getTime()) {
+        millis -= rollIntervalMillis;
+      }
+    }
+
+    // Adjust the next flush time by millis to get the time of our ficticious
+    // previous next flush
+    nextFlush.add(Calendar.MILLISECOND, millis);
   }
 
   /**
-   * Create a new directory based on the current hour and a new log file in
+   * Create a new directory based on the current interval and a new log file in
    * that directory.
    *
    * @throws IOException thrown if an error occurs while creating the
@@ -451,7 +679,8 @@ public class RollingFileSystemSink implements MetricsSink, Closeable {
    * path is found.
    *
    * Once the file is open, update {@link #currentFSOutStream},
-   * {@link #currentOutStream}, and {@#link #currentFile} are set appropriately.
+   * {@link #currentOutStream}, and {@#link #currentFilePath} are set
+   * appropriately.
    *
    * @param initial the target path
    * @throws IOException thrown if the call to see if the exists fails
@@ -552,7 +781,7 @@ public class RollingFileSystemSink implements MetricsSink, Closeable {
    * instead.
    *
    * Once the file is open, update {@link #currentFSOutStream},
-   * {@link #currentOutStream}, and {@#link #currentFile} are set appropriately.
+   * {@link #currentOutStream}, and {@#link #currentFilePath}.
    *
    * @param initial the target path
    * @throws IOException thrown if the call to see the append operation fails.
@@ -615,9 +844,9 @@ public class RollingFileSystemSink implements MetricsSink, Closeable {
         currentOutStream.println();
 
         // If we don't hflush(), the data may not be written until the file is
-        // closed. The file won't be closed until the top of the hour *AND*
+        // closed. The file won't be closed until the end of the interval *AND*
         // another record is received. Calling hflush() makes sure that the data
-        // is complete at the top of the hour.
+        // is complete at the end of the interval.
         try {
           currentFSOutStream.hflush();
         } catch (IOException ex) {
@@ -668,8 +897,8 @@ public class RollingFileSystemSink implements MetricsSink, Closeable {
    * as the new exception's message with the current file name
    * ({@link #currentFilePath}) appended to it.
    *
-   * @param message the exception message. The message will have the current
-   * file name ({@link #currentFilePath}) appended to it.
+   * @param message the exception message. The message will have a colon and
+   * the current file name ({@link #currentFilePath}) appended to it.
    * @throws MetricsException thrown if there was an error and the sink isn't
    * ignoring errors
    */
@@ -687,9 +916,9 @@ public class RollingFileSystemSink implements MetricsSink, Closeable {
    * ({@link #currentFilePath}) and the Throwable's string representation
    * appended to it.
    *
-   * @param message the exception message. The message will have the current
-   * file name ({@link #currentFilePath}) and the Throwable's string
-   * representation appended to it.
+   * @param message the exception message. The message will have a colon, the
+   * current file name ({@link #currentFilePath}), and the Throwable's string
+   * representation (wrapped in square brackets) appended to it.
    * @param t the Throwable to wrap
    */
   private void throwMetricsException(String message, Throwable t) {
@@ -705,8 +934,8 @@ public class RollingFileSystemSink implements MetricsSink, Closeable {
    * new exception's message with the current file name
    * ({@link #currentFilePath}) appended to it.
    *
-   * @param message the exception message. The message will have the current
-   * file name ({@link #currentFilePath}) appended to it.
+   * @param message the exception message. The message will have a colon and
+   * the current file name ({@link #currentFilePath}) appended to it.
    */
   private void throwMetricsException(String message) {
     if (!ignoreError) {

+ 22 - 12
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/KDiag.java

@@ -19,9 +19,6 @@
 package org.apache.hadoop.security;
 
 import org.apache.commons.io.IOUtils;
-import org.apache.directory.server.kerberos.shared.keytab.Keytab;
-import org.apache.directory.server.kerberos.shared.keytab.KeytabEntry;
-import org.apache.directory.shared.kerberos.components.EncryptionKey;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.io.Text;
@@ -33,6 +30,10 @@ import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
+import org.apache.kerby.kerberos.kerb.keytab.Keytab;
+import org.apache.kerby.kerberos.kerb.keytab.KeytabEntry;
+import org.apache.kerby.kerberos.kerb.type.base.EncryptionKey;
+import org.apache.kerby.kerberos.kerb.type.base.PrincipalName;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -544,16 +545,25 @@ public class KDiag extends Configured implements Tool, Closeable {
     title("Examining keytab %s", keytabFile);
     File kt = keytabFile.getCanonicalFile();
     verifyFileIsValid(kt, CAT_KERBEROS, "keytab");
-    List<KeytabEntry> entries = Keytab.read(kt).getEntries();
-    println("keytab entry count: %d", entries.size());
-    for (KeytabEntry entry : entries) {
-      EncryptionKey key = entry.getKey();
-      println(" %s: version=%d expires=%s encryption=%s",
-          entry.getPrincipalName(),
-          entry.getKeyVersion(),
-          entry.getTimeStamp(),
-          key.getKeyType());
+
+    Keytab loadKeytab = Keytab.loadKeytab(kt);
+    List<PrincipalName> principals = loadKeytab.getPrincipals();
+    println("keytab princial count: %d", principals.size());
+    int entrySize = 0;
+    for (PrincipalName princ : principals) {
+      List<KeytabEntry> entries = loadKeytab.getKeytabEntries(princ);
+      entrySize = entrySize + entries.size();
+      for (KeytabEntry entry : entries) {
+        EncryptionKey key = entry.getKey();
+        println(" %s: version=%d expires=%s encryption=%s",
+                entry.getPrincipal(),
+                entry.getKvno(),
+                entry.getTimestamp(),
+                key.getKeyType());
+      }
     }
+    println("keytab entry count: %d", entrySize);
+
     endln();
   }
 

+ 12 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java

@@ -179,6 +179,13 @@ public class LdapGroupsMapping
     LDAP_CONFIG_PREFIX + ".directory.search.timeout";
   public static final int DIRECTORY_SEARCH_TIMEOUT_DEFAULT = 10000; // 10s
 
+  public static final String CONNECTION_TIMEOUT =
+      LDAP_CONFIG_PREFIX + ".connection.timeout.ms";
+  public static final int CONNECTION_TIMEOUT_DEFAULT = 60 * 1000; // 60 seconds
+  public static final String READ_TIMEOUT =
+      LDAP_CONFIG_PREFIX + ".read.timeout.ms";
+  public static final int READ_TIMEOUT_DEFAULT = 60 * 1000; // 60 seconds
+
   private static final Log LOG = LogFactory.getLog(LdapGroupsMapping.class);
 
   private static final SearchControls SEARCH_CONTROLS = new SearchControls();
@@ -432,6 +439,11 @@ public class LdapGroupsMapping
       env.put(Context.SECURITY_PRINCIPAL, bindUser);
       env.put(Context.SECURITY_CREDENTIALS, bindPassword);
 
+      env.put("com.sun.jndi.ldap.connect.timeout", conf.get(CONNECTION_TIMEOUT,
+          String.valueOf(CONNECTION_TIMEOUT_DEFAULT)));
+      env.put("com.sun.jndi.ldap.read.timeout", conf.get(READ_TIMEOUT,
+          String.valueOf(READ_TIMEOUT_DEFAULT)));
+
       ctx = new InitialDirContext(env);
     }
 

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedIdMapping.java

@@ -85,7 +85,7 @@ public class ShellBasedIdMapping implements IdMappingServiceProvider {
   private static final Pattern EMPTY_LINE = Pattern.compile("^\\s*$");
   private static final Pattern COMMENT_LINE = Pattern.compile("^\\s*#.*$");
   private static final Pattern MAPPING_LINE =
-      Pattern.compile("^(uid|gid)\\s+(\\d+)\\s+(\\d+)\\s*(#.*)?$");
+      Pattern.compile("^(uid|gid)\\s+(\\d+)\\s+(0|-?[1-9]\\d*)\\s*(#.*)?$");
 
   final private long timeout;
   

+ 84 - 119
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialShell.java

@@ -20,23 +20,24 @@ package org.apache.hadoop.security.alias;
 
 import java.io.Console;
 import java.io.IOException;
-import java.io.PrintStream;
 import java.security.InvalidParameterException;
 import java.security.NoSuchAlgorithmException;
 import java.util.Arrays;
 import java.util.List;
 
 import com.google.common.annotations.VisibleForTesting;
+
+import org.apache.commons.lang.StringUtils;
+
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.conf.Configured;
-import org.apache.hadoop.util.Tool;
+import org.apache.hadoop.tools.CommandShell;
 import org.apache.hadoop.util.ToolRunner;
 
 /**
- * This program is the CLI utility for the CredentialProvider facilities in 
+ * This program is the CLI utility for the CredentialProvider facilities in
  * Hadoop.
  */
-public class CredentialShell extends Configured implements Tool {
+public class CredentialShell extends CommandShell {
   final static private String USAGE_PREFIX = "Usage: hadoop credential " +
       "[generic options]\n";
   final static private String COMMANDS =
@@ -52,44 +53,13 @@ public class CredentialShell extends Configured implements Tool {
       "MUST use the -provider argument.";
 
   private boolean interactive = true;
-  private Command command = null;
 
   /** If true, fail if the provider requires a password and none is given. */
   private boolean strict = false;
 
-  /** Allows stdout to be captured if necessary. */
-  @VisibleForTesting
-  public PrintStream out = System.out;
-  /** Allows stderr to be captured if necessary. */
-  @VisibleForTesting
-  public PrintStream err = System.err;
-
   private boolean userSuppliedProvider = false;
   private String value = null;
   private PasswordReader passwordReader;
-  private boolean isHelp = false;
-
-  @Override
-  public int run(String[] args) throws Exception {
-    int exitCode = 0;
-    try {
-      exitCode = init(args);
-      if (exitCode != 0) {
-        return exitCode;
-      }
-      if (!isHelp) {
-        if (command.validate()) {
-          command.execute();
-        } else {
-          exitCode = 1;
-        }
-      }
-    } catch (Exception e) {
-      e.printStackTrace(err);
-      return 1;
-    }
-    return exitCode;
-  }
 
   /**
    * Parse the command line arguments and initialize the data.
@@ -102,46 +72,33 @@ public class CredentialShell extends Configured implements Tool {
    * @return 0 if the argument(s) were recognized, 1 otherwise
    * @throws IOException
    */
+  @Override
   protected int init(String[] args) throws IOException {
     // no args should print the help message
     if (0 == args.length) {
-      printCredShellUsage();
-      ToolRunner.printGenericCommandUsage(System.err);
+      ToolRunner.printGenericCommandUsage(getErr());
       return 1;
     }
 
     for (int i = 0; i < args.length; i++) { // parse command line
       if (args[i].equals("create")) {
         if (i == args.length - 1) {
-          printCredShellUsage();
           return 1;
         }
-        String alias = args[++i];
-        command = new CreateCommand(alias);
-        if (alias.equals("-help")) {
-          printCredShellUsage();
-          return 0;
-        }
+        setSubCommand(new CreateCommand(args[++i]));
       } else if (args[i].equals("delete")) {
         if (i == args.length - 1) {
-          printCredShellUsage();
           return 1;
         }
-        String alias = args[++i];
-        command = new DeleteCommand(alias);
-        if (alias.equals("-help")) {
-          printCredShellUsage();
-          return 0;
-        }
+        setSubCommand(new DeleteCommand(args[++i]));
       } else if (args[i].equals("list")) {
-        command = new ListCommand();
+        setSubCommand(new ListCommand());
       } else if (args[i].equals("-provider")) {
         if (i == args.length - 1) {
-          printCredShellUsage();
           return 1;
         }
         userSuppliedProvider = true;
-        getConf().set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH, 
+        getConf().set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH,
             args[++i]);
       } else if (args[i].equals("-f") || (args[i].equals("-force"))) {
         interactive = false;
@@ -150,42 +107,32 @@ public class CredentialShell extends Configured implements Tool {
       } else if (args[i].equals("-v") || (args[i].equals("-value"))) {
         value = args[++i];
       } else if (args[i].equals("-help")) {
-        printCredShellUsage();
+        printShellUsage();
         return 0;
       } else {
-        printCredShellUsage();
-        ToolRunner.printGenericCommandUsage(System.err);
+        ToolRunner.printGenericCommandUsage(getErr());
         return 1;
       }
     }
     return 0;
   }
 
-  private void printCredShellUsage() {
-    isHelp = true;
-    out.println(USAGE_PREFIX + COMMANDS);
-    if (command != null) {
-      out.println(command.getUsage());
-    } else {
-      out.println("=========================================================" +
-          "======");
-      out.println(CreateCommand.USAGE + ":\n\n" + CreateCommand.DESC);
-      out.println("=========================================================" +
-          "======");
-      out.println(DeleteCommand.USAGE + ":\n\n" + DeleteCommand.DESC);
-      out.println("=========================================================" +
-          "======");
-      out.println(ListCommand.USAGE + ":\n\n" + ListCommand.DESC);
-    }
+  @Override
+  public String getCommandUsage() {
+    StringBuffer sbuf = new StringBuffer(USAGE_PREFIX + COMMANDS);
+    String banner = StringUtils.repeat("=", 66);
+    sbuf.append(banner + "\n");
+    sbuf.append(CreateCommand.USAGE + ":\n\n" + CreateCommand.DESC + "\n");
+    sbuf.append(banner + "\n");
+    sbuf.append(DeleteCommand.USAGE + ":\n\n" + DeleteCommand.DESC + "\n");
+    sbuf.append(banner + "\n");
+    sbuf.append(ListCommand.USAGE + ":\n\n" + ListCommand.DESC + "\n");
+    return sbuf.toString();
   }
 
-  private abstract class Command {
+  private abstract class Command extends SubCommand {
     protected CredentialProvider provider = null;
 
-    public boolean validate() {
-      return true;
-    }
-
     protected CredentialProvider getCredentialProvider() {
       CredentialProvider prov = null;
       List<CredentialProvider> providers;
@@ -202,24 +149,29 @@ public class CredentialShell extends Configured implements Tool {
           }
         }
       } catch (IOException e) {
-        e.printStackTrace(err);
+        e.printStackTrace(getErr());
       }
       if (prov == null) {
-        out.println(NO_VALID_PROVIDERS);
+        getOut().println(NO_VALID_PROVIDERS);
       }
       return prov;
     }
 
     protected void printProviderWritten() {
-      out.println("Provider " + provider.toString() + " has been updated.");
+      getOut().println("Provider " + provider.toString() + " was updated.");
     }
 
     protected void warnIfTransientProvider() {
       if (provider.isTransient()) {
-        out.println("WARNING: you are modifying a transient provider.");
+        getOut().println("WARNING: you are modifying a transient provider.");
       }
     }
 
+    protected void doHelp() {
+      getOut().println(USAGE_PREFIX + COMMANDS);
+      printShellUsage();
+    }
+
     public abstract void execute() throws Exception;
 
     public abstract String getUsage();
@@ -244,13 +196,13 @@ public class CredentialShell extends Configured implements Tool {
       List<String> aliases;
       try {
         aliases = provider.getAliases();
-        out.println("Listing aliases for CredentialProvider: " +
+        getOut().println("Listing aliases for CredentialProvider: " +
             provider.toString());
         for (String alias : aliases) {
-          out.println(alias);
+          getOut().println(alias);
         }
       } catch (IOException e) {
-        out.println("Cannot list aliases for CredentialProvider: " +
+        getOut().println("Cannot list aliases for CredentialProvider: " +
             provider.toString()
             + ": " + e.getMessage());
         throw e;
@@ -283,15 +235,18 @@ public class CredentialShell extends Configured implements Tool {
 
     @Override
     public boolean validate() {
-      provider = getCredentialProvider();
-      if (provider == null) {
-        return false;
-      }
       if (alias == null) {
-        out.println("There is no alias specified. Please provide the" +
+        getOut().println("There is no alias specified. Please provide the" +
             "mandatory <alias>. See the usage description with -help.");
         return false;
       }
+      if (alias.equals("-help")) {
+        return true;
+      }
+      provider = getCredentialProvider();
+      if (provider == null) {
+        return false;
+      }
       if (interactive) {
         try {
           cont = ToolRunner
@@ -299,30 +254,34 @@ public class CredentialShell extends Configured implements Tool {
                   alias + " from CredentialProvider " + provider.toString() +
                   ". Continue? ");
           if (!cont) {
-            out.println("Nothing has been deleted.");
+            getOut().println("Nothing has been deleted.");
           }
           return cont;
         } catch (IOException e) {
-          out.println(alias + " will not be deleted.");
-          e.printStackTrace(err);
+          getOut().println(alias + " will not be deleted.");
+          e.printStackTrace(getErr());
         }
       }
       return true;
     }
 
     public void execute() throws IOException {
+      if (alias.equals("-help")) {
+        doHelp();
+        return;
+      }
       warnIfTransientProvider();
-      out.println("Deleting credential: " + alias +
+      getOut().println("Deleting credential: " + alias +
           " from CredentialProvider: " + provider.toString());
       if (cont) {
         try {
           provider.deleteCredentialEntry(alias);
-          out.println("Credential " + alias +
+          getOut().println("Credential " + alias +
               " has been successfully deleted.");
           provider.flush();
           printProviderWritten();
         } catch (IOException e) {
-          out.println("Credential " + alias + " has NOT been deleted.");
+          getOut().println("Credential " + alias + " has NOT been deleted.");
           throw e;
         }
       }
@@ -352,31 +311,37 @@ public class CredentialShell extends Configured implements Tool {
     }
 
     public boolean validate() {
-      boolean rc = true;
+      if (alias == null) {
+        getOut().println("There is no alias specified. Please provide the" +
+            "mandatory <alias>. See the usage description with -help.");
+        return false;
+      }
+      if (alias.equals("-help")) {
+        return true;
+      }
       try {
         provider = getCredentialProvider();
         if (provider == null) {
-          rc = false;
+          return false;
         } else if (provider.needsPassword()) {
           if (strict) {
-            out.println(provider.noPasswordError());
-            rc = false;
+            getOut().println(provider.noPasswordError());
+            return false;
           } else {
-            out.println(provider.noPasswordWarning());
+            getOut().println(provider.noPasswordWarning());
           }
         }
       } catch (IOException e) {
-        e.printStackTrace(err);
-      }
-      if (alias == null) {
-        out.println("There is no alias specified. Please provide the" +
-            "mandatory <alias>. See the usage description with -help.");
-        rc = false;
+        e.printStackTrace(getErr());
       }
-      return rc;
+      return true;
     }
 
     public void execute() throws IOException, NoSuchAlgorithmException {
+      if (alias.equals("-help")) {
+        doHelp();
+        return;
+      }
       warnIfTransientProvider();
       try {
         char[] credential = null;
@@ -388,14 +353,14 @@ public class CredentialShell extends Configured implements Tool {
         }
         provider.createCredentialEntry(alias, credential);
         provider.flush();
-        out.println(alias + " has been successfully created.");
+        getOut().println(alias + " has been successfully created.");
         printProviderWritten();
       } catch (InvalidParameterException e) {
-        out.println("Credential " + alias + " has NOT been created. " +
+        getOut().println("Credential " + alias + " has NOT been created. " +
             e.getMessage());
         throw e;
       } catch (IOException e) {
-        out.println("Credential " + alias + " has NOT been created. " +
+        getOut().println("Credential " + alias + " has NOT been created. " +
             e.getMessage());
         throw e;
       }
@@ -406,13 +371,13 @@ public class CredentialShell extends Configured implements Tool {
       return USAGE + ":\n\n" + DESC;
     }
   }
-  
+
   protected char[] promptForCredential() throws IOException {
     PasswordReader c = getPasswordReader();
     if (c == null) {
       throw new IOException("No console available for prompting user.");
     }
-    
+
     char[] cred = null;
 
     boolean noMatch;
@@ -434,18 +399,18 @@ public class CredentialShell extends Configured implements Tool {
     } while (noMatch);
     return cred;
   }
-  
+
   public PasswordReader getPasswordReader() {
     if (passwordReader == null) {
       passwordReader = new PasswordReader();
     }
     return passwordReader;
   }
-  
+
   public void setPasswordReader(PasswordReader reader) {
     passwordReader = reader;
   }
-  
+
   /** To facilitate testing since Console is a final class. */
   public static class PasswordReader {
     public char[] readPassword(String prompt) {
@@ -459,8 +424,8 @@ public class CredentialShell extends Configured implements Tool {
       console.format(message);
     }
   }
-  
-  
+
+
   /**
    * Main program.
    *

+ 23 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/DtFileOperations.java

@@ -199,6 +199,29 @@ public final class DtFileOperations {
     doFormattedWrite(tokenFile, fileFormat, creds, conf);
   }
 
+  /** Alias a token from a file and save back to file in the local filesystem.
+   *  @param tokenFile a local File object to hold the input and output.
+   *  @param fileFormat a string equal to FORMAT_PB or FORMAT_JAVA, for output
+   *  @param alias overwrite service field of fetched token with this text.
+   *  @param service only apply alias to tokens matching this service text.
+   *  @param conf Configuration object passed along.
+   *  @throws IOException
+   */
+  public static void aliasTokenFile(File tokenFile, String fileFormat,
+      Text alias, Text service, Configuration conf) throws Exception {
+    Credentials newCreds = new Credentials();
+    Credentials creds = Credentials.readTokenStorageFile(tokenFile, conf);
+    for (Token<?> token : creds.getAllTokens()) {
+      newCreds.addToken(token.getService(), token);
+      if (token.getService().equals(service)) {
+        Token<?> aliasedToken = token.copyToken();
+        aliasedToken.setService(alias);
+        newCreds.addToken(alias, aliasedToken);
+      }
+    }
+    doFormattedWrite(tokenFile, fileFormat, newCreds, conf);
+  }
+
   /** Append tokens from list of files in local filesystem, saving to last file.
    *  @param tokenFiles list of local File objects.  Last file holds the output.
    *  @param fileFormat a string equal to FORMAT_PB or FORMAT_JAVA, for output

+ 42 - 5
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/DtUtilShell.java

@@ -41,7 +41,7 @@ public class DtUtilShell extends CommandShell {
       DtFileOperations.FORMAT_PB + ")]";
   public static final String DT_USAGE = "hadoop dtutil " +
       "[-keytab <keytab_file> -principal <principal_name>] " +
-      "subcommand (help|print|get|append|cancel|remove|renew) " +
+      "subcommand (help|print|get|edit|append|cancel|remove|renew) " +
          FORMAT_SUBSTRING + " [-alias <alias>] filename...";
 
   // command line options
@@ -50,6 +50,7 @@ public class DtUtilShell extends CommandShell {
   private static final String PRINCIPAL = "-principal";
   private static final String PRINT = "print";
   private static final String GET = "get";
+  private static final String EDIT = "edit";
   private static final String APPEND = "append";
   private static final String CANCEL = "cancel";
   private static final String REMOVE = "remove";
@@ -127,6 +128,8 @@ public class DtUtilShell extends CommandShell {
           setSubCommand(new Print());
         } else if (command.equals(GET)) {
           setSubCommand(new Get(args[++i]));
+        } else if (command.equals(EDIT)) {
+          setSubCommand(new Edit());
         } else if (command.equals(APPEND)) {
           setSubCommand(new Append());
         } else if (command.equals(CANCEL)) {
@@ -172,10 +175,12 @@ public class DtUtilShell extends CommandShell {
 
   @Override
   public String getCommandUsage() {
-    return String.format("%n%s%n   %s%n   %s%n   %s%n   %s%n   %s%n   %s%n%n",
-                  DT_USAGE, (new Print()).getUsage(), (new Get()).getUsage(),
-                  (new Append()).getUsage(), (new Remove(true)).getUsage(),
-                  (new Remove(false)).getUsage(), (new Renew()).getUsage());
+    return String.format(
+        "%n%s%n   %s%n   %s%n   %s%n   %s%n   %s%n   %s%n   %s%n%n",
+        DT_USAGE, (new Print()).getUsage(), (new Get()).getUsage(),
+        (new Edit()).getUsage(), (new Append()).getUsage(),
+        (new Remove(true)).getUsage(), (new Remove(false)).getUsage(),
+        (new Renew()).getUsage());
   }
 
   private class Print extends SubCommand {
@@ -242,6 +247,38 @@ public class DtUtilShell extends CommandShell {
     }
   }
 
+  private class Edit extends SubCommand {
+    public static final String EDIT_USAGE =
+        "dtutil edit -service <service> -alias <alias> " +
+        FORMAT_SUBSTRING + "filename...";
+
+    @Override
+    public boolean validate() {
+      if (service == null) {
+        LOG.error("must pass -service field with dtutil edit command");
+        return false;
+      }
+      if (alias == null) {
+        LOG.error("must pass -alias field with dtutil edit command");
+        return false;
+      }
+      return true;
+    }
+
+    @Override
+    public void execute() throws Exception {
+      for (File tokenFile : tokenFiles) {
+        DtFileOperations.aliasTokenFile(
+            tokenFile, format, alias, service, getConf());
+      }
+    }
+
+    @Override
+    public String getUsage() {
+      return EDIT_USAGE;
+    }
+  }
+
   private class Append extends SubCommand {
     public static final String APPEND_USAGE =
         "dtutil append " + FORMAT_SUBSTRING + "filename...";

+ 7 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java

@@ -51,6 +51,8 @@ import org.apache.hadoop.util.StringUtils;
 import org.codehaus.jackson.map.ObjectMapper;
 
 import com.google.common.annotations.VisibleForTesting;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * An {@link AuthenticationHandler} that implements Kerberos SPNEGO mechanism
@@ -78,6 +80,9 @@ import com.google.common.annotations.VisibleForTesting;
 public abstract class DelegationTokenAuthenticationHandler
     implements AuthenticationHandler {
 
+  private static final Logger LOG =
+      LoggerFactory.getLogger(DelegationTokenAuthenticationHandler.class);
+
   protected static final String TYPE_POSTFIX = "-dt";
 
   public static final String PREFIX = "delegation-token.";
@@ -327,6 +332,8 @@ public abstract class DelegationTokenAuthenticationHandler
       throws IOException, AuthenticationException {
     AuthenticationToken token;
     String delegationParam = getDelegationToken(request);
+    LOG.debug("Authenticating with delegationParam: {}, query string: {}",
+        delegationParam, request.getQueryString());
     if (delegationParam != null) {
       try {
         Token<AbstractDelegationTokenIdentifier> dt = new Token();

+ 19 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.java

@@ -121,6 +121,24 @@ public abstract class DelegationTokenAuthenticator implements Authenticator {
     return hasDt;
   }
 
+  /**
+   * Append the delegation token to the request header if needed.
+   */
+  private void appendDelegationToken(final AuthenticatedURL.Token token,
+      final Token<?> dToken, final HttpURLConnection conn) throws IOException {
+    if (token.isSet()) {
+      LOG.debug("Auth token is set, not appending delegation token.");
+      return;
+    }
+    if (dToken == null) {
+      LOG.warn("Delegation token is null, cannot set on request header.");
+      return;
+    }
+    conn.setRequestProperty(
+        DelegationTokenAuthenticator.DELEGATION_TOKEN_HEADER,
+        dToken.encodeToUrlString());
+  }
+
   @Override
   public void authenticate(URL url, AuthenticatedURL.Token token)
       throws IOException, AuthenticationException {
@@ -283,6 +301,7 @@ public abstract class DelegationTokenAuthenticator implements Authenticator {
     url = new URL(sb.toString());
     AuthenticatedURL aUrl = new AuthenticatedURL(this, connConfigurator);
     HttpURLConnection conn = aUrl.openConnection(url, token);
+    appendDelegationToken(token, dToken, conn);
     conn.setRequestMethod(operation.getHttpMethod());
     HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
     if (hasResponse) {

+ 2 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tools/CommandShell.java

@@ -64,7 +64,7 @@ public abstract class CommandShell extends Configured implements Tool {
     int exitCode = 0;
     try {
       exitCode = init(args);
-      if (exitCode != 0) {
+      if (exitCode != 0 || subcommand == null) {
         printShellUsage();
         return exitCode;
       }
@@ -89,7 +89,7 @@ public abstract class CommandShell extends Configured implements Tool {
    */
   protected abstract int init(String[] args) throws Exception;
 
-  private void printShellUsage() {
+  protected final void printShellUsage() {
     if (subcommand != null) {
       out.println(subcommand.getUsage());
     } else {

+ 15 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/TraceAdmin.java

@@ -29,6 +29,7 @@ import org.apache.commons.io.Charsets;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.ipc.ProtobufRpcEngine;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.net.NetUtils;
@@ -36,6 +37,8 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.tools.TableListing;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Tool;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * A command-line tool for viewing and modifying tracing settings.
@@ -44,6 +47,7 @@ import org.apache.hadoop.util.Tool;
 public class TraceAdmin extends Configured implements Tool {
   private TraceAdminProtocolPB proxy;
   private TraceAdminProtocolTranslatorPB remote;
+  private static final Logger LOG = LoggerFactory.getLogger(TraceAdmin.class);
 
   private void usage() {
     PrintStream err = System.err;
@@ -61,7 +65,9 @@ public class TraceAdmin extends Configured implements Tool {
             "  -list: List the current span receivers.\n" +
             "  -remove [id]\n" +
             "    Remove the span receiver with the specified id.  Use -list to\n" +
-            "    find the id of each receiver.\n"
+            "    find the id of each receiver.\n" +
+            "  -principal: If the daemon is Kerberized, specify the service\n" +
+            "    principal name."
     );
   }
 
@@ -166,6 +172,14 @@ public class TraceAdmin extends Configured implements Tool {
       System.err.println("You must specify an operation.");
       return 1;
     }
+    String servicePrincipal = StringUtils.popOptionWithArgument("-principal",
+        args);
+    if (servicePrincipal != null) {
+      LOG.debug("Set service principal: {}", servicePrincipal);
+      getConf().set(
+          CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY,
+          servicePrincipal);
+    }
     RPC.setProtocolEngine(getConf(), TraceAdminProtocolPB.class,
         ProtobufRpcEngine.class);
     InetSocketAddress address = NetUtils.createSocketAddr(hostPort);

+ 108 - 51
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/HostsFileReader.java

@@ -21,6 +21,9 @@ package org.apache.hadoop.util;
 import java.io.*;
 import java.util.Set;
 import java.util.HashSet;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock;
 
 import org.apache.commons.io.Charsets;
 import org.apache.commons.logging.LogFactory;
@@ -38,6 +41,8 @@ public class HostsFileReader {
   private Set<String> excludes;
   private String includesFile;
   private String excludesFile;
+  private WriteLock writeLock;
+  private ReadLock readLock;
   
   private static final Log LOG = LogFactory.getLog(HostsFileReader.class);
 
@@ -47,6 +52,9 @@ public class HostsFileReader {
     excludes = new HashSet<String>();
     includesFile = inFile;
     excludesFile = exFile;
+    ReentrantReadWriteLock rwLock = new ReentrantReadWriteLock();
+    this.writeLock = rwLock.writeLock();
+    this.readLock = rwLock.readLock();
     refresh();
   }
 
@@ -57,6 +65,9 @@ public class HostsFileReader {
     excludes = new HashSet<String>();
     this.includesFile = includesFile;
     this.excludesFile = excludesFile;
+    ReentrantReadWriteLock rwLock = new ReentrantReadWriteLock();
+    this.writeLock = rwLock.writeLock();
+    this.readLock = rwLock.readLock();
     refresh(inFileInputStream, exFileInputStream);
   }
 
@@ -101,80 +112,126 @@ public class HostsFileReader {
     }
   }
 
-  public synchronized void refresh() throws IOException {
-    LOG.info("Refreshing hosts (include/exclude) list");
-    Set<String> newIncludes = new HashSet<String>();
-    Set<String> newExcludes = new HashSet<String>();
-    boolean switchIncludes = false;
-    boolean switchExcludes = false;
-    if (!includesFile.isEmpty()) {
-      readFileToSet("included", includesFile, newIncludes);
-      switchIncludes = true;
-    }
-    if (!excludesFile.isEmpty()) {
-      readFileToSet("excluded", excludesFile, newExcludes);
-      switchExcludes = true;
+  public void refresh() throws IOException {
+    this.writeLock.lock();
+    try {
+      refresh(includesFile, excludesFile);
+    } finally {
+      this.writeLock.unlock();
     }
+  }
 
-    if (switchIncludes) {
-      // switch the new hosts that are to be included
-      includes = newIncludes;
-    }
-    if (switchExcludes) {
-      // switch the excluded hosts
-      excludes = newExcludes;
+  public void refresh(String includeFiles, String excludeFiles)
+      throws IOException {
+    LOG.info("Refreshing hosts (include/exclude) list");
+    this.writeLock.lock();
+    try {
+      // update instance variables
+      updateFileNames(includeFiles, excludeFiles);
+      Set<String> newIncludes = new HashSet<String>();
+      Set<String> newExcludes = new HashSet<String>();
+      boolean switchIncludes = false;
+      boolean switchExcludes = false;
+      if (includeFiles != null && !includeFiles.isEmpty()) {
+        readFileToSet("included", includeFiles, newIncludes);
+        switchIncludes = true;
+      }
+      if (excludeFiles != null && !excludeFiles.isEmpty()) {
+        readFileToSet("excluded", excludeFiles, newExcludes);
+        switchExcludes = true;
+      }
+
+      if (switchIncludes) {
+        // switch the new hosts that are to be included
+        includes = newIncludes;
+      }
+      if (switchExcludes) {
+        // switch the excluded hosts
+        excludes = newExcludes;
+      }
+    } finally {
+      this.writeLock.unlock();
     }
   }
 
   @Private
-  public synchronized void refresh(InputStream inFileInputStream,
+  public void refresh(InputStream inFileInputStream,
       InputStream exFileInputStream) throws IOException {
     LOG.info("Refreshing hosts (include/exclude) list");
-    Set<String> newIncludes = new HashSet<String>();
-    Set<String> newExcludes = new HashSet<String>();
-    boolean switchIncludes = false;
-    boolean switchExcludes = false;
-    if (inFileInputStream != null) {
-      readFileToSetWithFileInputStream("included", includesFile,
-          inFileInputStream, newIncludes);
-      switchIncludes = true;
-    }
-    if (exFileInputStream != null) {
-      readFileToSetWithFileInputStream("excluded", excludesFile,
-          exFileInputStream, newExcludes);
-      switchExcludes = true;
-    }
-    if (switchIncludes) {
-      // switch the new hosts that are to be included
-      includes = newIncludes;
+    this.writeLock.lock();
+    try {
+      Set<String> newIncludes = new HashSet<String>();
+      Set<String> newExcludes = new HashSet<String>();
+      boolean switchIncludes = false;
+      boolean switchExcludes = false;
+      if (inFileInputStream != null) {
+        readFileToSetWithFileInputStream("included", includesFile,
+            inFileInputStream, newIncludes);
+        switchIncludes = true;
+      }
+      if (exFileInputStream != null) {
+        readFileToSetWithFileInputStream("excluded", excludesFile,
+            exFileInputStream, newExcludes);
+        switchExcludes = true;
+      }
+      if (switchIncludes) {
+        // switch the new hosts that are to be included
+        includes = newIncludes;
+      }
+      if (switchExcludes) {
+        // switch the excluded hosts
+        excludes = newExcludes;
+      }
+    } finally {
+      this.writeLock.unlock();
     }
-    if (switchExcludes) {
-      // switch the excluded hosts
-      excludes = newExcludes;
+  }
+
+  public Set<String> getHosts() {
+    this.readLock.lock();
+    try {
+      return includes;
+    } finally {
+      this.readLock.unlock();
     }
   }
 
-  public synchronized Set<String> getHosts() {
-    return includes;
+  public Set<String> getExcludedHosts() {
+    this.readLock.lock();
+    try {
+      return excludes;
+    } finally {
+      this.readLock.unlock();
+    }
   }
 
-  public synchronized Set<String> getExcludedHosts() {
-    return excludes;
+  public void getHostDetails(Set<String> includes, Set<String> excludes) {
+    this.readLock.lock();
+    try {
+      includes.addAll(this.includes);
+      excludes.addAll(this.excludes);
+    } finally {
+      this.readLock.unlock();
+    }
   }
 
-  public synchronized void setIncludesFile(String includesFile) {
+  public void setIncludesFile(String includesFile) {
     LOG.info("Setting the includes file to " + includesFile);
     this.includesFile = includesFile;
   }
   
-  public synchronized void setExcludesFile(String excludesFile) {
+  public void setExcludesFile(String excludesFile) {
     LOG.info("Setting the excludes file to " + excludesFile);
     this.excludesFile = excludesFile;
   }
 
-  public synchronized void updateFileNames(String includesFile,
-      String excludesFile) {
-    setIncludesFile(includesFile);
-    setExcludesFile(excludesFile);
+  public void updateFileNames(String includeFiles, String excludeFiles) {
+    this.writeLock.lock();
+    try {
+      setIncludesFile(includeFiles);
+      setExcludesFile(excludeFiles);
+    } finally {
+      this.writeLock.unlock();
+    }
   }
 }

+ 76 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/KMSUtil.java

@@ -0,0 +1,76 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.util;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.crypto.key.KeyProvider;
+import org.apache.hadoop.crypto.key.KeyProviderFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.net.URI;
+import java.net.URISyntaxException;
+
+/**
+ * Utils for KMS.
+ */
+@InterfaceAudience.Private
+public final class KMSUtil {
+  public static final Logger LOG =
+      LoggerFactory.getLogger(KMSUtil.class);
+
+  private KMSUtil() { /* Hidden constructor */ }
+
+  /**
+   * Creates a new KeyProvider from the given Configuration
+   * and configuration key name.
+   *
+   * @param conf Configuration
+   * @param configKeyName The configuration key name
+   * @return new KeyProvider, or null if no provider was found.
+   * @throws IOException if the KeyProvider is improperly specified in
+   *                             the Configuration
+   */
+  public static KeyProvider createKeyProvider(final Configuration conf,
+      final String configKeyName) throws IOException {
+    LOG.debug("Creating key provider with config key {}", configKeyName);
+    final String providerUriStr = conf.getTrimmed(configKeyName, "");
+    // No provider set in conf
+    if (providerUriStr.isEmpty()) {
+      return null;
+    }
+    final URI providerUri;
+    try {
+      providerUri = new URI(providerUriStr);
+    } catch (URISyntaxException e) {
+      throw new IOException(e);
+    }
+    KeyProvider keyProvider = KeyProviderFactory.get(providerUri, conf);
+    if (keyProvider == null) {
+      throw new IOException("Could not instantiate KeyProvider from " +
+          configKeyName + " setting of '" + providerUriStr + "'");
+    }
+    if (keyProvider.isTransient()) {
+      throw new IOException("KeyProvider " + keyProvider.toString()
+          + " was found but it is a transient provider.");
+    }
+    return keyProvider;
+  }
+}

+ 5 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeCrc32.java

@@ -33,7 +33,11 @@ class NativeCrc32 {
    * Return true if the JNI-based native CRC extensions are available.
    */
   public static boolean isAvailable() {
-    return NativeCodeLoader.isNativeCodeLoaded();
+    if (System.getProperty("os.arch").toLowerCase().startsWith("sparc")) {
+      return false;
+    } else {
+      return NativeCodeLoader.isNativeCodeLoaded();
+    }
   }
 
   /**

+ 65 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/AsyncGet.java

@@ -0,0 +1,65 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.util.concurrent;
+
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+/**
+ * This interface defines an asynchronous {@link #get(long, TimeUnit)} method.
+ *
+ * When the return value is still being computed, invoking
+ * {@link #get(long, TimeUnit)} will result in a {@link TimeoutException}.
+ * The method should be invoked again and again
+ * until the underlying computation is completed.
+ *
+ * @param <R> The type of the return value.
+ * @param <E> The exception type that the underlying implementation may throw.
+ */
+public interface AsyncGet<R, E extends Throwable> {
+  /**
+   * Get the result.
+   *
+   * @param timeout The maximum time period to wait.
+   *                When timeout == 0, it does not wait at all.
+   *                When timeout < 0, it waits indefinitely.
+   * @param unit The unit of the timeout value
+   * @return the result, which is possibly null.
+   * @throws E an exception thrown by the underlying implementation.
+   * @throws TimeoutException if it cannot return after the given time period.
+   * @throws InterruptedException if the thread is interrupted.
+   */
+  R get(long timeout, TimeUnit unit)
+      throws E, TimeoutException, InterruptedException;
+
+  /** @return true if the underlying computation is done; false, otherwise. */
+  boolean isDone();
+
+  /** Utility */
+  class Util {
+    /** Use {@link #get(long, TimeUnit)} timeout parameters to wait. */
+    public static void wait(Object obj, long timeout, TimeUnit unit)
+        throws InterruptedException {
+      if (timeout < 0) {
+        obj.wait();
+      } else if (timeout > 0) {
+        obj.wait(unit.toMillis(timeout));
+      }
+    }
+  }
+}

+ 73 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/concurrent/AsyncGetFuture.java

@@ -0,0 +1,73 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.util.concurrent;
+
+import com.google.common.util.concurrent.AbstractFuture;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+/** A {@link Future} implemented using an {@link AsyncGet} object. */
+public class AsyncGetFuture<T, E extends Throwable> extends AbstractFuture<T> {
+  public static final Log LOG = LogFactory.getLog(AsyncGetFuture.class);
+
+  private final AtomicBoolean called = new AtomicBoolean(false);
+  private final AsyncGet<T, E> asyncGet;
+
+  public AsyncGetFuture(AsyncGet<T, E> asyncGet) {
+    this.asyncGet = asyncGet;
+  }
+
+  private void callAsyncGet(long timeout, TimeUnit unit) {
+    if (!isCancelled() && called.compareAndSet(false, true)) {
+      try {
+        set(asyncGet.get(timeout, unit));
+      } catch (TimeoutException te) {
+        LOG.trace("TRACE", te);
+        called.compareAndSet(true, false);
+      } catch (Throwable e) {
+        LOG.trace("TRACE", e);
+        setException(e);
+      }
+    }
+  }
+
+  @Override
+  public T get() throws InterruptedException, ExecutionException {
+    callAsyncGet(-1, TimeUnit.MILLISECONDS);
+    return super.get();
+  }
+
+  @Override
+  public T get(long timeout, TimeUnit unit)
+      throws InterruptedException, TimeoutException, ExecutionException {
+    callAsyncGet(timeout, unit);
+    return super.get(0, TimeUnit.MILLISECONDS);
+  }
+
+  @Override
+  public boolean isDone() {
+    callAsyncGet(0, TimeUnit.MILLISECONDS);
+    return super.isDone();
+  }
+}

+ 14 - 0
hadoop-common-project/hadoop-common/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer

@@ -0,0 +1,14 @@
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+org.apache.hadoop.crypto.key.kms.KMSClientProvider$KMSTokenRenewer

+ 107 - 2
hadoop-common-project/hadoop-common/src/main/resources/core-default.xml

@@ -165,6 +165,30 @@
   </description>
 </property>
 
+<property>
+  <name>hadoop.security.group.mapping.ldap.connection.timeout.ms</name>
+  <value>60000</value>
+  <description>
+    This property is the connection timeout (in milliseconds) for LDAP
+    operations. If the LDAP provider doesn't establish a connection within the
+    specified period, it will abort the connect attempt. Non-positive value
+    means no LDAP connection timeout is specified in which case it waits for the
+    connection to establish until the underlying network times out.
+  </description>
+</property>
+
+<property>
+  <name>hadoop.security.group.mapping.ldap.read.timeout.ms</name>
+  <value>60000</value>
+  <description>
+    This property is the read timeout (in milliseconds) for LDAP
+    operations. If the LDAP provider doesn't get a LDAP response within the
+    specified period, it will abort the read attempt. Non-positive value
+    means no read timeout is specified in which case it waits for the response
+    infinitely.
+  </description>
+</property>
+
 <property>
   <name>hadoop.security.group.mapping.ldap.url</name>
   <value></value>
@@ -757,12 +781,33 @@
 
 <property>
   <name>fs.s3a.access.key</name>
-  <description>AWS access key ID used by S3A file system. Omit for Role-based authentication.</description>
+  <description>AWS access key ID used by S3A file system. Omit for IAM role-based or provider-based authentication.</description>
 </property>
 
 <property>
   <name>fs.s3a.secret.key</name>
-  <description>AWS secret key used by S3A file system. Omit for Role-based authentication.</description>
+  <description>AWS secret key used by S3A file system. Omit for IAM role-based or provider-based authentication.</description>
+</property>
+
+<property>
+  <name>fs.s3a.aws.credentials.provider</name>
+  <description>
+    Class name of a credentials provider that implements
+    com.amazonaws.auth.AWSCredentialsProvider.  Omit if using access/secret keys
+    or another authentication mechanism.  The specified class must provide an
+    accessible constructor accepting java.net.URI and
+    org.apache.hadoop.conf.Configuration, or an accessible default constructor.
+    Specifying org.apache.hadoop.fs.s3a.AnonymousAWSCredentialsProvider allows
+    anonymous access to a publicly accessible S3 bucket without any credentials.
+    Please note that allowing anonymous access to an S3 bucket compromises
+    security and therefore is unsuitable for most use cases.  It can be useful
+    for accessing public data sets without requiring AWS credentials.
+  </description>
+</property>
+
+<property>
+  <name>fs.s3a.session.token</name>
+  <description>The session token used with temporary credentials. Used only with provider org.apache.hadoop.fs.s3a.TemporaryAWSCredentialsProvider.</description>
 </property>
 
 <property>
@@ -2168,4 +2213,64 @@
       needs to be specified in net.topology.script.file.name.
     </description>
   </property>
+
+
+  <!-- Azure Data Lake File System Configurations -->
+
+  <property>
+    <name>adl.feature.override.readahead</name>
+    <value>true</value>
+    <description>
+      Enables read aheads in the ADL client, the feature is used to
+      improve read throughput.
+      This works in conjunction with the value set in
+      adl.feature.override.readahead.max.buffersize.
+      When set to false the read ahead feature is turned off.
+      Default : True if not configured.
+    </description>
+  </property>
+
+  <property>
+    <name>adl.feature.override.readahead.max.buffersize</name>
+    <value>8388608</value>
+    <description>
+      Define maximum buffer size to cache read ahead data, this is
+      allocated per process to
+      cache read ahead data. Applicable only when
+      adl.feature.override.readahead is set to true.
+      Default : 8388608 Byte i.e. 8MB if not configured.
+    </description>
+  </property>
+
+  <property>
+    <name>adl.feature.override.readahead.max.concurrent.connection</name>
+    <value>2</value>
+    <description>
+      Define maximum concurrent connection can be established to
+      read ahead. If the data size is less than 4MB then only 1 read n/w
+      connection
+      is set. If the data size is less than 4MB but less than 8MB then 2 read
+      n/w connection
+      is set. Data greater than 8MB then value set under the property would
+      take
+      effect. Applicable only when adl.feature.override.readahead is set
+      to true and buffer size is greater than 8MB.
+      It is recommended to reset this property if the
+      adl.feature.override.readahead.max.buffersize
+      is less than 8MB to gain performance. Application has to consider
+      throttling limit for the account as well before configuring large
+      buffer size.
+    </description>
+  </property>
+
+  <property>
+    <name>fs.adl.impl</name>
+    <value>org.apache.hadoop.fs.adl.AdlFileSystem</value>
+  </property>
+
+  <property>
+    <name>fs.AbstractFileSystem.adl.impl</name>
+    <value>org.apache.hadoop.fs.adl.Adl</value>
+  </property>
+
 </configuration>

+ 26 - 6
hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md

@@ -229,17 +229,37 @@ Commands useful for administrators of a hadoop cluster.
 
 Usage:
 
-    hadoop daemonlog -getlevel <host:httpport> <classname>
-    hadoop daemonlog -setlevel <host:httpport> <classname> <level>
+    hadoop daemonlog -getlevel <host:port> <classname> [-protocol (http|https)]
+    hadoop daemonlog -setlevel <host:port> <classname> <level> [-protocol (http|https)]
 
 | COMMAND\_OPTION | Description |
 |:---- |:---- |
-| `-getlevel` *host:httpport* *classname* | Prints the log level of the log identified by a qualified *classname*, in the daemon running at *host:httpport*. This command internally connects to `http://<host:httpport>/logLevel?log=<classname>` |
-| `-setlevel` *host:httpport* *classname* *level* | Sets the log level of the log identified by a qualified *classname*, in the daemon running at *host:httpport*. This command internally connects to `http://<host:httpport>/logLevel?log=<classname>&level=<level>` |
+| `-getlevel` *host:port* *classname* [-protocol (http|https)] | Prints the log level of the log identified by a qualified *classname*, in the daemon running at *host:port*. The `-protocol` flag specifies the protocol for connection. |
+| `-setlevel` *host:port* *classname* *level* [-protocol (http|https)] | Sets the log level of the log identified by a qualified *classname*, in the daemon running at *host:port*.  The `-protocol` flag specifies the protocol for connection. |
 
-Get/Set the log level for a Log identified by a qualified class name in the daemon.
+Get/Set the log level for a Log identified by a qualified class name in the daemon dynamically.
+By default, the command sends a HTTP request, but this can be overridden by using argument `-protocol https` to send a HTTPS request.
+
+Example:
+
+    $ bin/hadoop daemonlog -setlevel 127.0.0.1:9870 org.apache.hadoop.hdfs.server.namenode.NameNode DEBUG
+    $ bin/hadoop daemonlog -getlevel 127.0.0.1:9871 org.apache.hadoop.hdfs.server.namenode.NameNode DEBUG -protocol https
+
+Note that the setting is not permanent and will be reset when the daemon is restarted.
+This command works by sending a HTTP/HTTPS request to the daemon's internal Jetty servlet, so it supports the following daemons:
+
+* HDFS
+    * name node
+    * secondary name node
+    * data node
+    * journal node
+* YARN
+    * resource manager
+    * node manager
+    * Timeline server
+
+However, the command does not support KMS server, because its web interface is based on Tomcat, which does not support the servlet.
 
-	Example: $ bin/hadoop daemonlog -setlevel 127.0.0.1:9870 org.apache.hadoop.hdfs.server.namenode.NameNode DEBUG
 
 Files
 -----

+ 1 - 0
hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md

@@ -218,6 +218,7 @@ Each metrics record contains tags such as HAState and Hostname as additional inf
 | `TotalSyncCount` | Total number of sync operations performed by edit log |
 | `TotalSyncTimes` | Total number of milliseconds spent by various edit logs in sync operation|
 | `NameDirSize` | NameNode name directories size in bytes |
+| `NumTimedOutPendingReconstructions` | The number of timed out reconstructions. Not the number of unique blocks that timed out. |
 
 JournalNode
 -----------

+ 13 - 2
hadoop-common-project/hadoop-common/src/site/markdown/SingleCluster.md.vm

@@ -153,7 +153,8 @@ The following instructions are to run a MapReduce job locally. If you want to ex
 
 5.  Copy the input files into the distributed filesystem:
 
-          $ bin/hdfs dfs -put etc/hadoop input
+          $ bin/hdfs dfs -mkdir input
+          $ bin/hdfs dfs -put etc/hadoop/*.xml input
 
 6.  Run some of the examples provided:
 
@@ -180,13 +181,23 @@ You can run a MapReduce job on YARN in a pseudo-distributed mode by setting a fe
 
 The following instructions assume that 1. ~ 4. steps of [the above instructions](#Execution) are already executed.
 
-1.  Configure parameters as follows:`etc/hadoop/mapred-site.xml`:
+1.  Configure parameters as follows:
+
+    `etc/hadoop/mapred-site.xml`:
 
         <configuration>
             <property>
                 <name>mapreduce.framework.name</name>
                 <value>yarn</value>
             </property>
+            <property>
+                <name>mapreduce.admin.user.env</name>
+                <value>HADOOP_MAPRED_HOME=$HADOOP_COMMON_HOME</value>
+            </property>
+            <property>
+                <name>yarn.app.mapreduce.am.env</name>
+                <value>HADOOP_MAPRED_HOME=$HADOOP_COMMON_HOME</value>
+            </property>
         </configuration>
 
     `etc/hadoop/yarn-site.xml`:

+ 9 - 0
hadoop-common-project/hadoop-common/src/site/markdown/Tracing.md

@@ -84,6 +84,15 @@ You can specify the configuration associated with span receiver by `-Ckey=value`
       ID  CLASS
       2   org.apache.htrace.core.LocalFileSpanReceiver
 
+If the cluster is Kerberized, the service principal name must be specified using `-principal` option.
+For example, to show list of span receivers of a namenode:
+
+    $ hadoop trace -list -host NN1:8020 -principal namenode/NN1@EXAMPLE.COM
+
+Or, for a datanode:
+
+    $ hadoop trace -list -host DN2:9867 -principal datanode/DN1@EXAMPLE.COM
+
 ### Starting tracing spans by HTrace API
 
 In order to trace, you will need to wrap the traced logic with **tracing span** as shown below.

+ 6 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java

@@ -102,6 +102,12 @@ public class TestCommonConfigurationFields extends TestConfigurationFieldsBase {
     xmlPrefixToSkipCompare.add("s3.");
     xmlPrefixToSkipCompare.add("s3native.");
 
+    // ADL properties are in a different subtree
+    // - org.apache.hadoop.hdfs.web.ADLConfKeys
+    xmlPrefixToSkipCompare.add("adl.");
+    xmlPropsToSkipCompare.add("fs.adl.impl");
+    xmlPropsToSkipCompare.add("fs.AbstractFileSystem.adl.impl");
+
     // Deprecated properties.  These should eventually be removed from the
     // class.
     configurationPropsToSkipCompare

+ 94 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/kms/TestLoadBalancingKMSClientProvider.java

@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.crypto.key.kms;
 
+import static org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
@@ -25,11 +26,14 @@ import static org.mockito.Mockito.when;
 
 import java.io.IOException;
 import java.net.URI;
+import java.security.GeneralSecurityException;
 import java.security.NoSuchAlgorithmException;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.crypto.key.KeyProvider;
 import org.apache.hadoop.crypto.key.KeyProvider.Options;
+import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
+import org.apache.hadoop.security.authentication.client.AuthenticationException;
 import org.junit.Test;
 import org.mockito.Mockito;
 
@@ -163,4 +167,94 @@ public class TestLoadBalancingKMSClientProvider {
       assertTrue(e instanceof IOException);
     }
   }
+
+  // copied from HttpExceptionUtils:
+
+  // trick, riding on generics to throw an undeclared exception
+
+  private static void throwEx(Throwable ex) {
+    TestLoadBalancingKMSClientProvider.<RuntimeException>throwException(ex);
+  }
+
+  @SuppressWarnings("unchecked")
+  private static <E extends Throwable> void throwException(Throwable ex)
+      throws E {
+    throw (E) ex;
+  }
+
+  private class MyKMSClientProvider extends KMSClientProvider {
+    public MyKMSClientProvider(URI uri, Configuration conf) throws IOException {
+      super(uri, conf);
+    }
+
+    @Override
+    public EncryptedKeyVersion generateEncryptedKey(
+        final String encryptionKeyName)
+        throws IOException, GeneralSecurityException {
+      throwEx(new AuthenticationException("bar"));
+      return null;
+    }
+
+    @Override
+    public KeyVersion decryptEncryptedKey(
+        final EncryptedKeyVersion encryptedKeyVersion) throws IOException,
+        GeneralSecurityException {
+      throwEx(new AuthenticationException("bar"));
+      return null;
+    }
+
+    @Override
+    public KeyVersion createKey(final String name, final Options options)
+        throws NoSuchAlgorithmException, IOException {
+      throwEx(new AuthenticationException("bar"));
+      return null;
+    }
+
+    @Override
+    public KeyVersion rollNewVersion(final String name)
+        throws NoSuchAlgorithmException, IOException {
+      throwEx(new AuthenticationException("bar"));
+      return null;
+    }
+  }
+
+  @Test
+  public void testClassCastException() throws Exception {
+    Configuration conf = new Configuration();
+    KMSClientProvider p1 = new MyKMSClientProvider(
+        new URI("kms://http@host1/kms/foo"), conf);
+    LoadBalancingKMSClientProvider kp = new LoadBalancingKMSClientProvider(
+        new KMSClientProvider[] {p1}, 0, conf);
+    try {
+      kp.generateEncryptedKey("foo");
+    } catch (IOException ioe) {
+      assertTrue(ioe.getCause().getClass().getName().contains(
+          "AuthenticationException"));
+    }
+
+    try {
+      final KeyProviderCryptoExtension.EncryptedKeyVersion
+          encryptedKeyVersion =
+          mock(KeyProviderCryptoExtension.EncryptedKeyVersion.class);
+      kp.decryptEncryptedKey(encryptedKeyVersion);
+    } catch (IOException ioe) {
+      assertTrue(ioe.getCause().getClass().getName().contains(
+          "AuthenticationException"));
+    }
+
+    try {
+      final KeyProvider.Options options = KeyProvider.options(conf);
+      kp.createKey("foo", options);
+    } catch (IOException ioe) {
+      assertTrue(ioe.getCause().getClass().getName().contains(
+          "AuthenticationException"));
+    }
+
+    try {
+      kp.rollNewVersion("foo");
+    } catch (IOException ioe) {
+      assertTrue(ioe.getCause().getClass().getName().contains(
+          "AuthenticationException"));
+    }
+  }
 }

+ 70 - 3
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextCreateMkdirBaseTest.java

@@ -20,14 +20,15 @@ package org.apache.hadoop.fs;
 
 import java.io.IOException;
 
-import org.apache.hadoop.util.StringUtils;
 import org.apache.log4j.Level;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 import static org.apache.hadoop.fs.FileContextTestHelper.*;
-import org.apache.commons.logging.impl.Log4JLogger;
+import static org.apache.hadoop.fs.contract.ContractTestUtils.assertIsDirectory;
+import static org.apache.hadoop.fs.contract.ContractTestUtils.assertIsFile;
+
 import org.apache.hadoop.test.GenericTestUtils;
 
 /**
@@ -116,7 +117,73 @@ public abstract class FileContextCreateMkdirBaseTest {
     fc.mkdir(f, FileContext.DEFAULT_PERM, true);
     Assert.assertTrue(isDir(fc, f));
   }
- 
+
+  @Test
+  public void testMkdirsRecursiveWithExistingDir() throws IOException {
+    Path f = getTestRootPath(fc, "aDir/bDir/cDir");
+    fc.mkdir(f, FileContext.DEFAULT_PERM, true);
+    assertIsDirectory(fc.getFileStatus(f));
+    assertIsDirectory(fc.getFileStatus(f.getParent()));
+    assertIsDirectory(fc.getFileStatus(f.getParent().getParent()));
+  }
+
+  @Test
+  public void testMkdirRecursiveWithExistingFile() throws IOException {
+    Path f = getTestRootPath(fc, "NonExistant3/aDir");
+    fc.mkdir(f, FileContext.DEFAULT_PERM, true);
+    assertIsDirectory(fc.getFileStatus(f));
+    assertIsDirectory(fc.getFileStatus(f.getParent()));
+
+    // create a sample file
+    Path filePath = new Path(f.getParent(), "test.txt");
+    createFile(fc, filePath);
+    assertIsFile(filePath, fc.getFileStatus(filePath));
+
+    // try creating another folder which conflicts with filePath
+    Path dirPath = new Path(filePath, "bDir/cDir");
+    try {
+      fc.mkdir(dirPath, FileContext.DEFAULT_PERM, true);
+      Assert.fail("Mkdir for " + dirPath
+          + " should have failed as a file was present");
+    } catch(IOException e) {
+      // failed as expected
+    }
+  }
+
+  @Test
+  public void testWithRename() throws IOException, InterruptedException {
+    Path root = getTestRootPath(fc);
+    Path f = new Path(root, "d1/d2/d3");
+    fc.mkdir(f, FileContext.DEFAULT_PERM, true);
+    assertIsDirectory(fc.getFileStatus(new Path(root, "d1")));
+    assertIsDirectory(fc.getFileStatus(new Path(root, "d1/d2")));
+    assertIsDirectory(fc.getFileStatus(new Path(root, "d1/d2/d3")));
+
+    // create a sample file f.txt
+    Path fPath = new Path(root, "d1/d2/f.txt");
+    createFile(fc, fPath);
+    assertIsFile(fPath, fc.getFileStatus(fPath));
+    assertIsDirectory(fc.getFileStatus(new Path(root, "d1")));
+    assertIsDirectory(fc.getFileStatus(new Path(root, "d1/d2")));
+    assertIsDirectory(fc.getFileStatus(new Path(root, "d1/d2/d3")));
+
+    // create a sample file f2.txt
+    Path f2Path = new Path(getTestRootPath(fc), "d1/d2/d3/f2.txt");
+    createFile(fc, f2Path);
+    assertIsFile(fPath, fc.getFileStatus(f2Path));
+    assertIsDirectory(fc.getFileStatus(new Path(root, "d1")));
+    assertIsDirectory(fc.getFileStatus(new Path(root, "d1/d2")));
+    assertIsDirectory(fc.getFileStatus(new Path(root, "d1/d2/d3")));
+
+    //rename d1/d2/d3 d1/d4
+    fc.rename(new Path(root, "d1/d2/d3"), new Path(root, "d1/d4"));
+    assertIsDirectory(fc.getFileStatus(new Path(root, "d1")));
+    assertIsDirectory(fc.getFileStatus(new Path(root, "d1/d4")));
+    Path f2NewPath = new Path(root, "d1/d4/f2.txt");
+    assertIsFile(f2NewPath, fc.getFileStatus(f2NewPath));
+  }
+
+
   ///////////////////////
   //      Test Create
   ////////////////////////

+ 420 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java

@@ -22,7 +22,9 @@ import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.LocatedFileStatus;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.io.IOUtils;
 import org.junit.Assert;
 import org.junit.internal.AssumptionViolatedException;
@@ -34,8 +36,14 @@ import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.OutputStream;
+import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Collection;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Locale;
 import java.util.Properties;
+import java.util.Set;
 import java.util.UUID;
 
 /**
@@ -892,4 +900,416 @@ public class ContractTestUtils extends Assert {
       fs.delete(objectPath, false);
     }
   }
+
+  /**
+   * Make times more readable, by adding a "," every three digits.
+   * @param nanos nanos or other large number
+   * @return a string for logging
+   */
+  public static String toHuman(long nanos) {
+    return String.format(Locale.ENGLISH, "%,d", nanos);
+  }
+
+  /**
+   * Log the bandwidth of a timer as inferred from the number of
+   * bytes processed.
+   * @param timer timer
+   * @param bytes bytes processed in the time period
+   */
+  public static void bandwidth(NanoTimer timer, long bytes) {
+    LOG.info("Bandwidth = {}  MB/S",
+        timer.bandwidthDescription(bytes));
+  }
+
+  /**
+   * Work out the bandwidth in MB/s.
+   * @param bytes bytes
+   * @param durationNS duration in nanos
+   * @return the number of megabytes/second of the recorded operation
+   */
+  public static double bandwidthMBs(long bytes, long durationNS) {
+    return (bytes * 1000.0) / durationNS;
+  }
+
+  /**
+   * Recursively create a directory tree.
+   * Return the details about the created tree. The files and directories
+   * are those created under the path, not the base directory created. That
+   * is retrievable via {@link TreeScanResults#getBasePath()}.
+   * @param fs filesystem
+   * @param current parent dir
+   * @param depth depth of directory tree
+   * @param width width: subdirs per entry
+   * @param files number of files per entry
+   * @param filesize size of files to create in bytes.
+   * @return the details about the created tree.
+   * @throws IOException IO Problems
+   */
+  public static TreeScanResults createSubdirs(FileSystem fs,
+      Path current,
+      int depth,
+      int width,
+      int files,
+      int filesize) throws IOException {
+    return createSubdirs(fs, current, depth, width, files,
+        filesize, "dir-", "file-", "0");
+  }
+
+  /**
+   * Recursively create a directory tree.
+   * @param fs filesystem
+   * @param current the current dir in the walk
+   * @param depth depth of directory tree
+   * @param width width: subdirs per entry
+   * @param files number of files per entry
+   * @param filesize size of files to create in bytes.
+   * @param dirPrefix prefix for directory entries
+   * @param filePrefix prefix for file entries
+   * @param marker string which is slowly built up to uniquely name things
+   * @return the details about the created tree.
+   * @throws IOException IO Problems
+   */
+  public static TreeScanResults createSubdirs(FileSystem fs,
+      Path current,
+      int depth,
+      int width,
+      int files,
+      int filesize,
+      String dirPrefix,
+      String filePrefix,
+      String marker) throws IOException {
+    fs.mkdirs(current);
+    TreeScanResults results = new TreeScanResults(current);
+    if (depth > 0) {
+      byte[] data = dataset(filesize, 'a', 'z');
+      for (int i = 0; i < files; i++) {
+        String name = String.format("%s-%s-%04d.txt", filePrefix, marker, i);
+        Path path = new Path(current, name);
+        createFile(fs, path, true, data);
+        results.add(fs, path);
+      }
+      for (int w = 0; w < width; w++) {
+        String marker2 = String.format("%s-%04d", marker, w);
+        Path child = new Path(current, dirPrefix + marker2);
+        results.add(createSubdirs(fs, child, depth - 1, width, files,
+            filesize, dirPrefix, filePrefix, marker2));
+        results.add(fs, child);
+      }
+    }
+    return results;
+  }
+
+  /**
+   * Predicate to determine if two lists are equivalent, that is, they
+   * contain the same entries.
+   * @param left first collection of paths
+   * @param right second collection of paths
+   * @return true if all entries are in each collection of path.
+   */
+  public static boolean collectionsEquivalent(Collection<Path> left,
+      Collection<Path> right) {
+    Set<Path> leftSet = new HashSet<>(left);
+    Set<Path> rightSet = new HashSet<>(right);
+    return leftSet.containsAll(right) && rightSet.containsAll(left);
+  }
+
+  /**
+   * Predicate to determine if two lists are equivalent, that is, they
+   * contain the same entries.
+   * @param left first collection of paths
+   * @param right second collection of paths
+   * @return true if all entries are in each collection of path.
+   */
+  public static boolean collectionsEquivalentNoDuplicates(Collection<Path> left,
+      Collection<Path> right) {
+    return collectionsEquivalent(left, right) &&
+        !containsDuplicates(left) && !containsDuplicates(right);
+  }
+
+
+  /**
+   * Predicate to test for a collection of paths containing duplicate entries.
+   * @param paths collection of paths
+   * @return true if there are duplicates.
+   */
+  public static boolean containsDuplicates(Collection<Path> paths) {
+    return new HashSet<>(paths).size() != paths.size();
+  }
+
+  /**
+   * Recursively list all entries, with a depth first traversal of the
+   * directory tree.
+   * @param path path
+   * @return the number of entries listed
+   * @throws IOException IO problems
+   */
+  public static TreeScanResults treeWalk(FileSystem fs, Path path)
+      throws IOException {
+    TreeScanResults dirsAndFiles = new TreeScanResults();
+
+    FileStatus[] statuses = fs.listStatus(path);
+    for (FileStatus status : statuses) {
+      LOG.info("{}{}", status.getPath(), status.isDirectory() ? "*" : "");
+    }
+    for (FileStatus status : statuses) {
+      dirsAndFiles.add(status);
+      if (status.isDirectory()) {
+        dirsAndFiles.add(treeWalk(fs, status.getPath()));
+      }
+    }
+    return dirsAndFiles;
+  }
+
+  /**
+   * Results of recursive directory creation/scan operations.
+   */
+  public static final class TreeScanResults {
+
+    private Path basePath;
+    private final List<Path> files = new ArrayList<>();
+    private final List<Path> directories = new ArrayList<>();
+    private final List<Path> other = new ArrayList<>();
+
+
+    public TreeScanResults() {
+    }
+
+    public TreeScanResults(Path basePath) {
+      this.basePath = basePath;
+    }
+
+    /**
+     * Build from a located file status iterator.
+     * @param results results of the listFiles/listStatus call.
+     * @throws IOException IO problems during the iteration.
+     */
+    public TreeScanResults(RemoteIterator<LocatedFileStatus> results)
+        throws IOException {
+      while (results.hasNext()) {
+        add(results.next());
+      }
+    }
+
+    /**
+     * Construct results from an array of statistics.
+     * @param stats statistics array. Must not be null.
+     */
+    public TreeScanResults(FileStatus[] stats) {
+      assertNotNull("Null file status array", stats);
+      for (FileStatus stat : stats) {
+        add(stat);
+      }
+    }
+
+    /**
+     * Add all paths in the other set of results to this instance.
+     * @param that the other instance
+     * @return this instance
+     */
+    public TreeScanResults add(TreeScanResults that) {
+      files.addAll(that.files);
+      directories.addAll(that.directories);
+      other.addAll(that.other);
+      return this;
+    }
+
+    /**
+     * Increment the counters based on the file status.
+     * @param status path status to count.
+     */
+    public void add(FileStatus status) {
+      if (status.isFile()) {
+        files.add(status.getPath());
+      } else if (status.isDirectory()) {
+        directories.add(status.getPath());
+      } else {
+        other.add(status.getPath());
+      }
+    }
+
+    public void add(FileSystem fs, Path path) throws IOException {
+      add(fs.getFileStatus(path));
+    }
+
+    @Override
+    public String toString() {
+      return String.format("%d director%s and %d file%s",
+          getDirCount(),
+          getDirCount() == 1 ? "y" : "ies",
+          getFileCount(),
+          getFileCount() == 1 ? "" : "s");
+    }
+
+    /**
+     * Assert that the state of a listing has the specific number of files,
+     * directories and other entries. The error text will include
+     * the {@code text} param, the field in question, and the entire object's
+     * string value.
+     * @param text text prefix for assertions.
+     * @param f file count
+     * @param d expected directory count
+     * @param o expected other entries.
+     */
+    public void assertSizeEquals(String text, long f, long d, long o) {
+      String self = toString();
+      Assert.assertEquals(text + ": file count in " + self,
+          f, getFileCount());
+      Assert.assertEquals(text + ": directory count in " + self,
+          d, getDirCount());
+      Assert.assertEquals(text + ": 'other' count in " + self,
+          o, getOtherCount());
+    }
+
+    /**
+     * Assert that the trees are equivalent: that every list matches (and
+     * that neither has any duplicates).
+     * @param that the other entry
+     */
+    public void assertEquivalent(TreeScanResults that) {
+      String details = "this= " + this + "; that=" + that;
+      assertFieldsEquivalent("files", that, files, that.files);
+      assertFieldsEquivalent("directories", that,
+          directories, that.directories);
+      assertFieldsEquivalent("other", that, other, that.other);
+    }
+
+    /**
+     * Assert that a field in two instances are equivalent.
+     * @param fieldname field name for error messages
+     * @param that the other instance to scan
+     * @param ours our field's contents
+     * @param theirs the other instance's field constants
+     */
+    public void assertFieldsEquivalent(String fieldname,
+        TreeScanResults that,
+        List<Path> ours, List<Path> theirs) {
+      assertFalse("Duplicate  " + files + " in " + this,
+          containsDuplicates(ours));
+      assertFalse("Duplicate  " + files + " in other " + that,
+          containsDuplicates(theirs));
+      assertTrue(fieldname + " mismatch: between {" + this + "}" +
+              " and {" + that + "}",
+          collectionsEquivalent(files, that.files));
+    }
+
+    public List<Path> getFiles() {
+      return files;
+    }
+
+    public List<Path> getDirectories() {
+      return directories;
+    }
+
+    public List<Path> getOther() {
+      return other;
+    }
+
+    public Path getBasePath() {
+      return basePath;
+    }
+
+    public long getFileCount() {
+      return files.size();
+    }
+
+    public long getDirCount() {
+      return directories.size();
+    }
+
+    public long getOtherCount() {
+      return other.size();
+    }
+
+    /**
+     * Total count of entries.
+     * @return the total number of entries
+     */
+    public long totalCount() {
+      return getFileCount() + getDirCount() + getOtherCount();
+    }
+
+  }
+
+  /**
+   * A simple class for timing operations in nanoseconds, and for
+   * printing some useful results in the process.
+   */
+  public static final class NanoTimer {
+    private final long startTime;
+    private long endTime;
+
+    public NanoTimer() {
+      startTime = now();
+    }
+
+    /**
+     * End the operation.
+     * @return the duration of the operation
+     */
+    public long end() {
+      endTime = now();
+      return duration();
+    }
+
+    /**
+     * End the operation; log the duration.
+     * @param format message
+     * @param args any arguments
+     * @return the duration of the operation
+     */
+    public long end(String format, Object... args) {
+      long d = end();
+      LOG.info("Duration of {}: {} nS",
+          String.format(format, args), toHuman(d));
+      return d;
+    }
+
+    public long now() {
+      return System.nanoTime();
+    }
+
+    public long duration() {
+      return endTime - startTime;
+    }
+
+    public double bandwidth(long bytes) {
+      return bandwidthMBs(bytes, duration());
+    }
+
+    /**
+     * Bandwidth as bytes per second.
+     * @param bytes bytes in
+     * @return the number of bytes per second this operation timed.
+     */
+    public double bandwidthBytes(long bytes) {
+      return (bytes * 1.0) / duration();
+    }
+
+    /**
+     * How many nanoseconds per IOP, byte, etc.
+     * @param operations operations processed in this time period
+     * @return the nanoseconds it took each byte to be processed
+     */
+    public long nanosPerOperation(long operations) {
+      return duration() / operations;
+    }
+
+    /**
+     * Get a description of the bandwidth, even down to fractions of
+     * a MB.
+     * @param bytes bytes processed
+     * @return bandwidth
+     */
+    public String bandwidthDescription(long bytes) {
+      return String.format("%,.6f", bandwidth(bytes));
+    }
+
+    public long getStartTime() {
+      return startTime;
+    }
+
+    public long getEndTime() {
+      return endTime;
+    }
+  }
 }

+ 17 - 12
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestCodecRawCoderMapping.java

@@ -46,37 +46,42 @@ public class TestCodecRawCoderMapping {
 
   @Test
   public void testRSDefaultRawCoder() {
+    ErasureCoderOptions coderOptions = new ErasureCoderOptions(
+        numDataUnit, numParityUnit);
     // should return default raw coder of rs-default codec
-    RawErasureEncoder encoder = CodecUtil.createRSRawEncoder(
-        conf, numDataUnit, numParityUnit);
+    RawErasureEncoder encoder = CodecUtil.createRawEncoder(
+        conf, ErasureCodeConstants.RS_DEFAULT_CODEC_NAME, coderOptions);
     Assert.assertTrue(encoder instanceof RSRawEncoder);
-    RawErasureDecoder decoder = CodecUtil.createRSRawDecoder(
-        conf, numDataUnit, numParityUnit);
+    RawErasureDecoder decoder = CodecUtil.createRawDecoder(
+        conf, ErasureCodeConstants.RS_DEFAULT_CODEC_NAME, coderOptions);
     Assert.assertTrue(decoder instanceof RSRawDecoder);
 
     // should return default raw coder of rs-legacy codec
-    encoder = CodecUtil.createRSRawEncoder(conf, numDataUnit, numParityUnit,
-        ErasureCodeConstants.RS_LEGACY_CODEC_NAME);
+    encoder = CodecUtil.createRawEncoder(conf,
+        ErasureCodeConstants.RS_LEGACY_CODEC_NAME, coderOptions);
     Assert.assertTrue(encoder instanceof RSRawEncoderLegacy);
-    decoder = CodecUtil.createRSRawDecoder(conf, numDataUnit, numParityUnit,
-        ErasureCodeConstants.RS_LEGACY_CODEC_NAME);
+    decoder = CodecUtil.createRawDecoder(conf,
+        ErasureCodeConstants.RS_LEGACY_CODEC_NAME, coderOptions);
     Assert.assertTrue(decoder instanceof RSRawDecoderLegacy);
   }
 
   @Test
   public void testDedicatedRawCoderKey() {
+    ErasureCoderOptions coderOptions = new ErasureCoderOptions(
+        numDataUnit, numParityUnit);
+
     String dummyFactName = "DummyNoneExistingFactory";
     // set the dummy factory to rs-legacy and create a raw coder
     // with rs-default, which is OK as the raw coder key is not used
     conf.set(CommonConfigurationKeys.
         IO_ERASURECODE_CODEC_RS_LEGACY_RAWCODER_KEY, dummyFactName);
-    RawErasureEncoder encoder = CodecUtil.createRSRawEncoder(conf, numDataUnit,
-        numParityUnit, ErasureCodeConstants.RS_DEFAULT_CODEC_NAME);
+    RawErasureEncoder encoder = CodecUtil.createRawEncoder(conf,
+        ErasureCodeConstants.RS_DEFAULT_CODEC_NAME, coderOptions);
     Assert.assertTrue(encoder instanceof RSRawEncoder);
     // now create the raw coder with rs-legacy, which should throw exception
     try {
-      CodecUtil.createRSRawEncoder(conf, numDataUnit, numParityUnit,
-          ErasureCodeConstants.RS_LEGACY_CODEC_NAME);
+      CodecUtil.createRawEncoder(conf,
+          ErasureCodeConstants.RS_LEGACY_CODEC_NAME, coderOptions);
       Assert.fail();
     } catch (Exception e) {
       GenericTestUtils.assertExceptionContains("Failed to create raw coder", e);

+ 5 - 9
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestCoderBase.java

@@ -35,7 +35,7 @@ import static org.junit.Assert.assertTrue;
 public abstract class TestCoderBase {
   protected static Random RAND = new Random();
 
-  private boolean allowDump = true;
+  protected boolean allowDump = true;
 
   private Configuration conf;
   protected int numDataUnits;
@@ -90,13 +90,8 @@ public abstract class TestCoderBase {
     }
   }
 
-  /**
-   * Set true during setup if want to dump test settings and coding data,
-   * useful in debugging.
-   * @param allowDump
-   */
-  protected void setAllowDump(boolean allowDump) {
-    this.allowDump = allowDump;
+  protected boolean isAllowDump() {
+    return allowDump;
   }
 
   /**
@@ -502,7 +497,8 @@ public abstract class TestCoderBase {
       sb.append(" erasedParityIndexes=").
               append(Arrays.toString(erasedParityIndexes));
       sb.append(" usingDirectBuffer=").append(usingDirectBuffer);
-      sb.append(" isAllowingChangeInputs=").append(allowChangeInputs);
+      sb.append(" allowChangeInputs=").append(allowChangeInputs);
+      sb.append(" allowVerboseDump=").append(allowDump);
       sb.append("\n");
 
       System.out.println(sb.toString());

Một số tệp đã không được hiển thị bởi vì quá nhiều tập tin thay đổi trong này khác