Quellcode durchsuchen

Merge branch 'trunk' into HDFS-7240

Anu Engineer vor 8 Jahren
Ursprung
Commit
595257e3f4
100 geänderte Dateien mit 3017 neuen und 1841 gelöschten Zeilen
  1. 9 9
      .gitattributes
  2. 9 1
      hadoop-build-tools/src/main/resources/checkstyle/checkstyle.xml
  3. 0 0
      hadoop-build-tools/src/main/resources/checkstyle/suppressions.xml
  4. 64 0
      hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/IncludePublicAnnotationsJDiffDoclet.java
  5. 3 0
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java
  6. 2 8
      hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
  7. 1 1
      hadoop-common-project/hadoop-common/dev-support/jdiff/Apache_Hadoop_Common_2.7.2.xml
  8. 8 5
      hadoop-common-project/hadoop-common/src/main/bin/hadoop
  9. 139 5
      hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
  10. 16 15
      hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh
  11. 13 8
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfServlet.java
  12. 20 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java
  13. 2 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderFactory.java
  14. 10 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
  15. 8 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
  16. 109 19
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
  17. 7 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/crypto/CryptoFSDataOutputStream.java
  18. 3 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
  19. 4 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java
  20. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java
  21. 9 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/BloomMapFile.java
  22. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ElasticByteBufferPool.java
  23. 7 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
  24. 7 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MapFile.java
  25. 13 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java
  26. 7 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BZip2Codec.java
  27. 7 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DefaultCodec.java
  28. 6 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/GzipCodec.java
  29. 22 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECChunk.java
  30. 3 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/CoderUtil.java
  31. 10 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/Compression.java
  32. 14 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
  33. 78 40
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryUtils.java
  34. 128 50
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
  35. 3 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
  36. 7 6
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
  37. 24 5
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
  38. 3 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocksSocketFactory.java
  39. 9 16
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcClient.java
  40. 2 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
  41. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java
  42. 8 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/AutoCloseableLock.java
  43. 4 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LineReader.java
  44. 4 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
  45. 31 27
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoWindows.java
  46. 5 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/hash/Hash.java
  47. 21 10
      hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
  48. 1 0
      hadoop-common-project/hadoop-common/src/site/markdown/Benchmarking.md
  49. 6 13
      hadoop-common-project/hadoop-common/src/site/markdown/ClusterSetup.md
  50. 1 0
      hadoop-common-project/hadoop-common/src/site/markdown/DeprecatedProperties.md
  51. 30 4
      hadoop-common-project/hadoop-common/src/site/markdown/UnixShellGuide.md
  52. 44 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/util/ExactLineComparator.java
  53. 24 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfServlet.java
  54. 57 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoOutputStreamClosing.java
  55. 1 2
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDelegationTokenRenewer.java
  56. 1 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java
  57. 1 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java
  58. 1 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractFSContractTestBase.java
  59. 5 4
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java
  60. 154 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestConnectionRetryPolicy.java
  61. 10 28
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/RPCCallBenchmark.java
  62. 84 2
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
  63. 6 230
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestMultipleProtocolServer.java
  64. 0 13
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPCCallBenchmark.java
  65. 23 219
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPCCompatibility.java
  66. 29 15
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPCWaitForProxy.java
  67. 166 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestReuseRpcConnections.java
  68. 59 18
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRpcBase.java
  69. 48 26
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java
  70. 23 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/ServerSocketUtil.java
  71. 103 188
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestDoAsEffectiveUser.java
  72. 23 5
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java
  73. 6 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSysInfoWindows.java
  74. 2 2
      hadoop-common-project/hadoop-common/src/test/proto/test.proto
  75. 7 1
      hadoop-common-project/hadoop-common/src/test/proto/test_rpc_service.proto
  76. 40 0
      hadoop-common-project/hadoop-common/src/test/scripts/hadoop_add_client_opts.bats
  77. 68 0
      hadoop-common-project/hadoop-common/src/test/scripts/hadoop_subcommand_opts.bats
  78. 52 0
      hadoop-common-project/hadoop-common/src/test/scripts/hadoop_subcommand_secure_opts.bats
  79. 53 0
      hadoop-common-project/hadoop-common/src/test/scripts/hadoop_verify_user.bats
  80. 2 0
      hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java
  81. 5 5
      hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm
  82. 40 36
      hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
  83. 4 0
      hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
  84. 5 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
  85. 11 9
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
  86. 104 550
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
  87. 7 6
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
  88. 109 37
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
  89. 3 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java
  90. 3 3
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/KeyProviderCache.java
  91. 104 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/PositionStripeReader.java
  92. 95 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/StatefulStripeReader.java
  93. 463 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/StripeReader.java
  94. 0 1
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/CreateEncryptionZoneFlag.java
  95. 24 25
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
  96. 0 4
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
  97. 6 6
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsUtils.java
  98. 0 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/package-info.java
  99. 1 1
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancerWorkItem.java
  100. 41 117
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/StripedBlockUtil.java

+ 9 - 9
.gitattributes

@@ -1,15 +1,15 @@
 # Auto detect text files and perform LF normalization
 *        text=auto
 
-*.cs     text diff=csharp
-*.java   text diff=java
-*.html   text diff=html
-*.py     text diff=python
-*.pl     text diff=perl
-*.pm     text diff=perl
-*.css    text
-*.js     text
-*.sql    text
+*.cs     text diff=csharp eol=lf
+*.java   text diff=java eol=lf
+*.html   text diff=html eol=lf
+*.py     text diff=python eol=lf
+*.pl     text diff=perl eol=lf
+*.pm     text diff=perl eol=lf
+*.css    text eol=lf
+*.js     text eol=lf
+*.sql    text eol=lf
 
 *.sh     text eol=lf
 

+ 9 - 1
hadoop-build-tools/src/main/resources/checkstyle/checkstyle.xml

@@ -49,6 +49,10 @@
 
 <module name="Checker">
 
+    <module name="SuppressWarningsFilter"/>
+    <module name="SuppressionCommentFilter"/>
+    <module name="SuppressWithNearbyCommentFilter"/>
+
     <!-- Checks that a package.html file exists for each package.     -->
     <!-- See http://checkstyle.sf.net/config_javadoc.html#PackageHtml -->
     <module name="JavadocPackage"/>
@@ -67,6 +71,10 @@
 
     <module name="TreeWalker">
 
+        <module name="SuppressWarningsHolder"/>
+        <module name="FileContentsHolder"/>
+
+
         <!-- Checks for Javadoc comments.                     -->
         <!-- See http://checkstyle.sf.net/config_javadoc.html -->
         <module name="JavadocType">
@@ -180,7 +188,7 @@
         <module name="Indentation">
             <property name="basicOffset" value="2" />
             <property name="caseIndent" value="0" />
-        </module> 
+        </module>
         <!--<module name="TodoComment"/>-->
         <module name="UpperEll"/>
 

+ 0 - 0
dev-support/checkstyle/suppressions.xml → hadoop-build-tools/src/main/resources/checkstyle/suppressions.xml


+ 64 - 0
hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/IncludePublicAnnotationsJDiffDoclet.java

@@ -0,0 +1,64 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.classification.tools;
+
+import com.sun.javadoc.DocErrorReporter;
+import com.sun.javadoc.LanguageVersion;
+import com.sun.javadoc.RootDoc;
+
+import jdiff.JDiff;
+
+/**
+ * A <a href="http://java.sun.com/javase/6/docs/jdk/api/javadoc/doclet/">Doclet</a>
+ * that only includes class-level elements that are annotated with
+ * {@link org.apache.hadoop.classification.InterfaceAudience.Public}.
+ * Class-level elements with no annotation are excluded.
+ * In addition, all elements that are annotated with
+ * {@link org.apache.hadoop.classification.InterfaceAudience.Private} or
+ * {@link org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate}
+ * are also excluded.
+ * It delegates to the JDiff Doclet, and takes the same options.
+ */
+public class IncludePublicAnnotationsJDiffDoclet {
+
+  public static LanguageVersion languageVersion() {
+    return LanguageVersion.JAVA_1_5;
+  }
+
+  public static boolean start(RootDoc root) {
+    System.out.println(
+        IncludePublicAnnotationsJDiffDoclet.class.getSimpleName());
+    RootDocProcessor.treatUnannotatedClassesAsPrivate = true;
+    return JDiff.start(RootDocProcessor.process(root));
+  }
+
+  public static int optionLength(String option) {
+    Integer length = StabilityOptions.optionLength(option);
+    if (length != null) {
+      return length;
+    }
+    return JDiff.optionLength(option);
+  }
+
+  public static boolean validOptions(String[][] options,
+      DocErrorReporter reporter) {
+    StabilityOptions.validOptions(options, reporter);
+    String[][] filteredOptions = StabilityOptions.filterOptions(options);
+    return JDiff.validOptions(filteredOptions, reporter);
+  }
+}

+ 3 - 0
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java

@@ -438,6 +438,9 @@ public class AuthenticationFilter implements Filter {
       for (Cookie cookie : cookies) {
         if (cookie.getName().equals(AuthenticatedURL.AUTH_COOKIE)) {
           tokenStr = cookie.getValue();
+          if (tokenStr.isEmpty()) {
+            throw new AuthenticationException("Unauthorized access");
+          }
           try {
             tokenStr = signer.verifyAndExtract(tokenStr);
           } catch (SignerException ex) {

+ 2 - 8
hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml

@@ -44,7 +44,7 @@
      --> 
      <Match>
        <Class name="org.apache.hadoop.ipc.Client$Connection" />
-       <Field name="out" />
+       <Field name="ipcStreams" />
        <Bug pattern="IS2_INCONSISTENT_SYNC" />
      </Match>
     <!--
@@ -341,13 +341,7 @@
        <Method name="removeRenewAction" />
        <Bug pattern="BC_UNCONFIRMED_CAST" />
      </Match>
-     
-     <!-- Inconsistent synchronization flagged by findbugs is not valid. -->
-     <Match>
-       <Class name="org.apache.hadoop.ipc.Client$Connection" />
-       <Field name="in" />
-       <Bug pattern="IS2_INCONSISTENT_SYNC" />
-     </Match>
+
      <!-- 
        The switch condition for INITIATE is expected to fallthru to RESPONSE
        to process initial sasl response token included in the INITIATE

Datei-Diff unterdrückt, da er zu groß ist
+ 1 - 1
hadoop-common-project/hadoop-common/dev-support/jdiff/Apache_Hadoop_Common_2.7.2.xml


+ 8 - 5
hadoop-common-project/hadoop-common/src/main/bin/hadoop

@@ -161,10 +161,6 @@ function hadoopcmd_case
       fi
     ;;
   esac
-
-  # Always respect HADOOP_OPTS and HADOOP_CLIENT_OPTS
-  hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
-  HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
 }
 
 # This script runs the hadoop core commands.
@@ -194,6 +190,8 @@ fi
 HADOOP_SUBCMD=$1
 shift
 
+hadoop_verify_user "${HADOOP_SHELL_EXECNAME}" "${HADOOP_SUBCMD}"
+
 HADOOP_SUBCMD_ARGS=("$@")
 
 if declare -f hadoop_subcommand_"${HADOOP_SUBCMD}" >/dev/null 2>&1; then
@@ -203,15 +201,20 @@ else
   hadoopcmd_case "${HADOOP_SUBCMD}" "${HADOOP_SUBCMD_ARGS[@]}"
 fi
 
-hadoop_verify_user "${HADOOP_SUBCMD}"
+hadoop_add_client_opts
 
 if [[ ${HADOOP_WORKER_MODE} = true ]]; then
   hadoop_common_worker_mode_execute "${HADOOP_COMMON_HOME}/bin/hadoop" "${HADOOP_USER_PARAMS[@]}"
   exit $?
 fi
 
+hadoop_subcommand_opts "${HADOOP_SHELL_EXECNAME}" "${HADOOP_SUBCMD}"
+
 if [[ "${HADOOP_SUBCMD_SECURESERVICE}" = true ]]; then
   HADOOP_SECURE_USER="${HADOOP_SUBCMD_SECUREUSER}"
+
+  hadoop_subcommand_secure_opts "${HADOOP_SHELL_EXECNAME}" "${HADOOP_SUBCMD}"
+
   hadoop_verify_secure_prereq
   hadoop_setup_secure_service
   priv_outfile="${HADOOP_LOG_DIR}/privileged-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}-${HOSTNAME}.out"

+ 139 - 5
hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh

@@ -306,6 +306,13 @@ function hadoop_bootstrap
   HADOOP_TOOLS_DIR=${HADOOP_TOOLS_DIR:-"share/hadoop/tools"}
   HADOOP_TOOLS_LIB_JARS_DIR=${HADOOP_TOOLS_LIB_JARS_DIR:-"${HADOOP_TOOLS_DIR}/lib"}
 
+  # by default, whatever we are about to run doesn't support
+  # daemonization
+  HADOOP_SUBCMD_SUPPORTDAEMONIZATION=false
+
+  # shellcheck disable=SC2034
+  HADOOP_SUBCMD_SECURESERVICE=false
+
   # usage output set to zero
   hadoop_reset_usage
 
@@ -1230,6 +1237,20 @@ function hadoop_translate_cygwin_path
   fi
 }
 
+## @description  Adds the HADOOP_CLIENT_OPTS variable to
+## @description  HADOOP_OPTS if HADOOP_SUBCMD_SUPPORTDAEMONIZATION is false
+## @audience     public
+## @stability    stable
+## @replaceable  yes
+function hadoop_add_client_opts
+{
+  if [[ "${HADOOP_SUBCMD_SUPPORTDAEMONIZATION}" = false
+     || -z "${HADOOP_SUBCMD_SUPPORTDAEMONIZATION}" ]]; then
+    hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
+    HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
+  fi
+}
+
 ## @description  Finish configuring Hadoop specific system properties
 ## @description  prior to executing Java
 ## @audience     private
@@ -1963,17 +1984,130 @@ function hadoop_secure_daemon_handler
 ## @return       will exit on failure conditions
 function hadoop_verify_user
 {
-  local command=$1
-  local uservar="HADOOP_${command}_USER"
+  declare program=$1
+  declare command=$2
+  declare uprogram
+  declare ucommand
+  declare uvar
+
+  if [[ -z "${BASH_VERSINFO[0]}" ]] \
+     || [[ "${BASH_VERSINFO[0]}" -lt 4 ]]; then
+    uprogram=$(echo "${program}" | tr '[:lower:]' '[:upper:]')
+    ucommand=$(echo "${command}" | tr '[:lower:]' '[:upper:]')
+  else
+    uprogram=${program^^}
+    ucommand=${command^^}
+  fi
+
+  uvar="${uprogram}_${ucommand}_USER"
 
-  if [[ -n ${!uservar} ]]; then
-    if [[ ${!uservar} !=  "${USER}" ]]; then
-      hadoop_error "ERROR: ${command} can only be executed by ${!uservar}."
+  if [[ -n ${!uvar} ]]; then
+    if [[ ${!uvar} !=  "${USER}" ]]; then
+      hadoop_error "ERROR: ${command} can only be executed by ${!uvar}."
       exit 1
     fi
   fi
 }
 
+## @description  Add custom (program)_(command)_OPTS to HADOOP_OPTS.
+## @description  Also handles the deprecated cases from pre-3.x.
+## @audience     public
+## @stability    stable
+## @replaceable  yes
+## @param        program
+## @param        subcommand
+## @return       will exit on failure conditions
+function hadoop_subcommand_opts
+{
+  declare program=$1
+  declare command=$2
+  declare uvar
+  declare depvar
+  declare uprogram
+  declare ucommand
+
+  if [[ -z "${program}" || -z "${command}" ]]; then
+    return 1
+  fi
+
+  # bash 4 and up have built-in ways to upper and lower
+  # case the contents of vars.  This is faster than
+  # calling tr.
+
+  if [[ -z "${BASH_VERSINFO[0]}" ]] \
+     || [[ "${BASH_VERSINFO[0]}" -lt 4 ]]; then
+    uprogram=$(echo "${program}" | tr '[:lower:]' '[:upper:]')
+    ucommand=$(echo "${command}" | tr '[:lower:]' '[:upper:]')
+  else
+    uprogram=${program^^}
+    ucommand=${command^^}
+  fi
+
+  uvar="${uprogram}_${ucommand}_OPTS"
+
+  # Let's handle all of the deprecation cases early
+  # HADOOP_NAMENODE_OPTS -> HDFS_NAMENODE_OPTS
+
+  depvar="HADOOP_${ucommand}_OPTS"
+
+  if [[ "${depvar}" != "${uvar}" ]]; then
+    if [[ -n "${!depvar}" ]]; then
+      hadoop_deprecate_envvar "${depvar}" "${uvar}"
+    fi
+  fi
+
+  if [[ -n ${!uvar} ]]; then
+    hadoop_debug "Appending ${uvar} onto HADOOP_OPTS"
+    HADOOP_OPTS="${HADOOP_OPTS} ${!uvar}"
+    return 0
+  fi
+}
+
+## @description  Add custom (program)_(command)_SECURE_EXTRA_OPTS to HADOOP_OPTS.
+## @description  This *does not* handle the pre-3.x deprecated cases
+## @audience     public
+## @stability    stable
+## @replaceable  yes
+## @param        program
+## @param        subcommand
+## @return       will exit on failure conditions
+function hadoop_subcommand_secure_opts
+{
+  declare program=$1
+  declare command=$2
+  declare uvar
+  declare uprogram
+  declare ucommand
+
+  if [[ -z "${program}" || -z "${command}" ]]; then
+    return 1
+  fi
+
+  # bash 4 and up have built-in ways to upper and lower
+  # case the contents of vars.  This is faster than
+  # calling tr.
+
+  if [[ -z "${BASH_VERSINFO[0]}" ]] \
+     || [[ "${BASH_VERSINFO[0]}" -lt 4 ]]; then
+    uprogram=$(echo "${program}" | tr '[:lower:]' '[:upper:]')
+    ucommand=$(echo "${command}" | tr '[:lower:]' '[:upper:]')
+  else
+    uprogram=${program^^}
+    ucommand=${command^^}
+  fi
+
+  # HDFS_DATANODE_SECURE_EXTRA_OPTS
+  # HDFS_NFS3_SECURE_EXTRA_OPTS
+  # ...
+  uvar="${uprogram}_${ucommand}_SECURE_EXTRA_OPTS"
+
+  if [[ -n ${!uvar} ]]; then
+    hadoop_debug "Appending ${uvar} onto HADOOP_OPTS"
+    HADOOP_OPTS="${HADOOP_OPTS} ${!uvar}"
+    return 0
+  fi
+}
+
 ## @description  Perform the 'hadoop classpath', etc subcommand with the given
 ## @description  parameters
 ## @audience     private

+ 16 - 15
hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh

@@ -294,16 +294,16 @@ esac
 # and therefore may override any similar flags set in HADOOP_OPTS
 #
 # a) Set JMX options
-# export HADOOP_NAMENODE_OPTS="-Dcom.sun.management.jmxremote=true -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.port=1026"
+# export HDFS_NAMENODE_OPTS="-Dcom.sun.management.jmxremote=true -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.port=1026"
 #
 # b) Set garbage collection logs
-# export HADOOP_NAMENODE_OPTS="${HADOOP_GC_SETTINGS} -Xloggc:${HADOOP_LOG_DIR}/gc-rm.log-$(date +'%Y%m%d%H%M')"
+# export HDFS_NAMENODE_OPTS="${HADOOP_GC_SETTINGS} -Xloggc:${HADOOP_LOG_DIR}/gc-rm.log-$(date +'%Y%m%d%H%M')"
 #
 # c) ... or set them directly
-# export HADOOP_NAMENODE_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xloggc:${HADOOP_LOG_DIR}/gc-rm.log-$(date +'%Y%m%d%H%M')"
+# export HDFS_NAMENODE_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xloggc:${HADOOP_LOG_DIR}/gc-rm.log-$(date +'%Y%m%d%H%M')"
 
 # this is the default:
-# export HADOOP_NAMENODE_OPTS="-Dhadoop.security.logger=INFO,RFAS"
+# export HDFS_NAMENODE_OPTS="-Dhadoop.security.logger=INFO,RFAS"
 
 ###
 # SecondaryNameNode specific parameters
@@ -313,7 +313,7 @@ esac
 # and therefore may override any similar flags set in HADOOP_OPTS
 #
 # This is the default:
-# export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=INFO,RFAS"
+# export HDFS_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=INFO,RFAS"
 
 ###
 # DataNode specific parameters
@@ -323,7 +323,7 @@ esac
 # and therefore may override any similar flags set in HADOOP_OPTS
 #
 # This is the default:
-# export HADOOP_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,RFAS"
+# export HDFS_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,RFAS"
 
 # On secure datanodes, user to run the datanode as after dropping privileges.
 # This **MUST** be uncommented to enable secure HDFS if using privileged ports
@@ -336,7 +336,7 @@ esac
 # Supplemental options for secure datanodes
 # By default, Hadoop uses jsvc which needs to know to launch a
 # server jvm.
-# export HADOOP_DN_SECURE_EXTRA_OPTS="-jvm server"
+# export HDFS_DATANODE_SECURE_EXTRA_OPTS="-jvm server"
 
 # Where datanode log files are stored in the secure data environment.
 # This will replace the hadoop.log.dir Java property in secure mode.
@@ -352,18 +352,18 @@ esac
 # These options will be appended to the options specified as HADOOP_OPTS
 # and therefore may override any similar flags set in HADOOP_OPTS
 #
-# export HADOOP_NFS3_OPTS=""
+# export HDFS_NFS3_OPTS=""
 
 # Specify the JVM options to be used when starting the Hadoop portmapper.
 # These options will be appended to the options specified as HADOOP_OPTS
 # and therefore may override any similar flags set in HADOOP_OPTS
 #
-# export HADOOP_PORTMAP_OPTS="-Xmx512m"
+# export HDFS_PORTMAP_OPTS="-Xmx512m"
 
 # Supplemental options for priviliged gateways
 # By default, Hadoop uses jsvc which needs to know to launch a
 # server jvm.
-# export HADOOP_NFS3_SECURE_EXTRA_OPTS="-jvm server"
+# export HDFS_NFS3_SECURE_EXTRA_OPTS="-jvm server"
 
 # On privileged gateways, user to run the gateway as after dropping privileges
 # This will replace the hadoop.id.str Java property in secure mode.
@@ -376,7 +376,7 @@ esac
 # These options will be appended to the options specified as HADOOP_OPTS
 # and therefore may override any similar flags set in HADOOP_OPTS
 #
-# export HADOOP_ZKFC_OPTS=""
+# export HDFS_ZKFC_OPTS=""
 
 ###
 # QuorumJournalNode specific parameters
@@ -385,7 +385,7 @@ esac
 # These options will be appended to the options specified as HADOOP_OPTS
 # and therefore may override any similar flags set in HADOOP_OPTS
 #
-# export HADOOP_JOURNALNODE_OPTS=""
+# export HDFS_JOURNALNODE_OPTS=""
 
 ###
 # HDFS Balancer specific parameters
@@ -394,7 +394,7 @@ esac
 # These options will be appended to the options specified as HADOOP_OPTS
 # and therefore may override any similar flags set in HADOOP_OPTS
 #
-# export HADOOP_BALANCER_OPTS=""
+# export HDFS_BALANCER_OPTS=""
 
 ###
 # HDFS Mover specific parameters
@@ -403,7 +403,7 @@ esac
 # These options will be appended to the options specified as HADOOP_OPTS
 # and therefore may override any similar flags set in HADOOP_OPTS
 #
-# export HADOOP_MOVER_OPTS=""
+# export HDFS_MOVER_OPTS=""
 
 ###
 # Advanced Users Only!
@@ -417,6 +417,7 @@ esac
 #
 # To prevent accidents, shell commands be (superficially) locked
 # to only allow certain users to execute certain subcommands.
+# It uses the format of (command)_(subcommand)_USER.
 #
 # For example, to limit who can execute the namenode command,
-# export HADOOP_namenode_USER=hdfs
+# export HDFS_NAMENODE_USER=hdfs

+ 13 - 8
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfServlet.java

@@ -24,11 +24,14 @@ import javax.servlet.ServletException;
 import javax.servlet.http.HttpServlet;
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
+import javax.ws.rs.core.HttpHeaders;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.http.HttpServer2;
 
+import com.google.common.annotations.VisibleForTesting;
+
 /**
  * A servlet to print out the running configuration data.
  */
@@ -37,9 +40,8 @@ import org.apache.hadoop.http.HttpServer2;
 public class ConfServlet extends HttpServlet {
   private static final long serialVersionUID = 1L;
 
-  private static final String FORMAT_JSON = "json";
-  private static final String FORMAT_XML = "xml";
-  private static final String FORMAT_PARAM = "format";
+  protected static final String FORMAT_JSON = "json";
+  protected static final String FORMAT_XML = "xml";
 
   /**
    * Return the Configuration of the daemon hosting this servlet.
@@ -61,11 +63,7 @@ public class ConfServlet extends HttpServlet {
       return;
     }
 
-    String format = request.getParameter(FORMAT_PARAM);
-    if (null == format) {
-      format = FORMAT_XML;
-    }
-
+    String format = parseAccecptHeader(request);
     if (FORMAT_XML.equals(format)) {
       response.setContentType("text/xml; charset=utf-8");
     } else if (FORMAT_JSON.equals(format)) {
@@ -81,6 +79,13 @@ public class ConfServlet extends HttpServlet {
     out.close();
   }
 
+  @VisibleForTesting
+  static String parseAccecptHeader(HttpServletRequest request) {
+    String format = request.getHeader(HttpHeaders.ACCEPT);
+    return format != null && format.contains(FORMAT_JSON) ?
+        FORMAT_JSON : FORMAT_XML;
+  }
+
   /**
    * Guts of the servlet - extracted for easy testing.
    */

+ 20 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java

@@ -76,6 +76,7 @@ public class CryptoOutputStream extends FilterOutputStream implements
   private final byte[] key;
   private final byte[] initIV;
   private byte[] iv;
+  private boolean closeOutputStream;
   
   public CryptoOutputStream(OutputStream out, CryptoCodec codec, 
       int bufferSize, byte[] key, byte[] iv) throws IOException {
@@ -85,6 +86,13 @@ public class CryptoOutputStream extends FilterOutputStream implements
   public CryptoOutputStream(OutputStream out, CryptoCodec codec, 
       int bufferSize, byte[] key, byte[] iv, long streamOffset) 
       throws IOException {
+    this(out, codec, bufferSize, key, iv, streamOffset, true);
+  }
+
+  public CryptoOutputStream(OutputStream out, CryptoCodec codec,
+      int bufferSize, byte[] key, byte[] iv, long streamOffset,
+      boolean closeOutputStream)
+      throws IOException {
     super(out);
     CryptoStreamUtils.checkCodec(codec);
     this.bufferSize = CryptoStreamUtils.checkBufferSize(codec, bufferSize);
@@ -95,6 +103,7 @@ public class CryptoOutputStream extends FilterOutputStream implements
     inBuffer = ByteBuffer.allocateDirect(this.bufferSize);
     outBuffer = ByteBuffer.allocateDirect(this.bufferSize);
     this.streamOffset = streamOffset;
+    this.closeOutputStream = closeOutputStream;
     try {
       encryptor = codec.createEncryptor();
     } catch (GeneralSecurityException e) {
@@ -110,8 +119,14 @@ public class CryptoOutputStream extends FilterOutputStream implements
   
   public CryptoOutputStream(OutputStream out, CryptoCodec codec, 
       byte[] key, byte[] iv, long streamOffset) throws IOException {
+    this(out, codec, key, iv, streamOffset, true);
+  }
+
+  public CryptoOutputStream(OutputStream out, CryptoCodec codec,
+      byte[] key, byte[] iv, long streamOffset, boolean closeOutputStream)
+      throws IOException {
     this(out, codec, CryptoStreamUtils.getBufferSize(codec.getConf()), 
-        key, iv, streamOffset);
+        key, iv, streamOffset, closeOutputStream);
   }
   
   public OutputStream getWrappedStream() {
@@ -221,7 +236,10 @@ public class CryptoOutputStream extends FilterOutputStream implements
       return;
     }
     try {
-      super.close();
+      flush();
+      if (closeOutputStream) {
+        super.close();
+      }
       freeBuffers();
     } finally {
       closed = true;

+ 2 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderFactory.java

@@ -29,6 +29,7 @@ import java.util.ServiceLoader;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 
 /**
  * A factory to create a list of KeyProvider based on the path given in a
@@ -39,7 +40,7 @@ import org.apache.hadoop.conf.Configuration;
 @InterfaceStability.Unstable
 public abstract class KeyProviderFactory {
   public static final String KEY_PROVIDER_PATH =
-      "hadoop.security.key.provider.path";
+      CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH;
 
   public abstract KeyProvider createProvider(URI providerName,
                                              Configuration conf

+ 10 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java

@@ -78,12 +78,20 @@ public class CommonConfigurationKeys extends CommonConfigurationKeysPublic {
   /** Default value for IPC_SERVER_RPC_READ_CONNECTION_QUEUE_SIZE */
   public static final int IPC_SERVER_RPC_READ_CONNECTION_QUEUE_SIZE_DEFAULT =
       100;
-      
+
+  /** Max request size a server will accept. */
   public static final String IPC_MAXIMUM_DATA_LENGTH =
       "ipc.maximum.data.length";
-  
+  /** Default value for IPC_MAXIMUM_DATA_LENGTH. */
   public static final int IPC_MAXIMUM_DATA_LENGTH_DEFAULT = 64 * 1024 * 1024;
 
+  /** Max response size a client will accept. */
+  public static final String IPC_MAXIMUM_RESPONSE_LENGTH =
+      "ipc.maximum.response.length";
+  /** Default value for IPC_MAXIMUM_RESPONSE_LENGTH. */
+  public static final int IPC_MAXIMUM_RESPONSE_LENGTH_DEFAULT =
+      128 * 1024 * 1024;
+
   /** How many calls per handler are allowed in the queue. */
   public static final String  IPC_SERVER_HANDLER_QUEUE_SIZE_KEY =
     "ipc.server.handler.queue.size";

+ 8 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java

@@ -628,6 +628,14 @@ public class CommonConfigurationKeysPublic {
   public static final String  HADOOP_SECURITY_IMPERSONATION_PROVIDER_CLASS =
     "hadoop.security.impersonation.provider.class";
 
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
+  public static final String HADOOP_SECURITY_KEY_PROVIDER_PATH =
+      "hadoop.security.key.provider.path";
+
   //  <!-- KMSClientProvider configurations -->
   /**
    * @see

+ 109 - 19
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java

@@ -73,6 +73,7 @@ import org.apache.hadoop.util.StringUtils;
 import org.apache.htrace.core.Tracer;
 import org.apache.htrace.core.TraceScope;
 
+import com.google.common.base.Preconditions;
 import com.google.common.annotations.VisibleForTesting;
 
 import static com.google.common.base.Preconditions.checkArgument;
@@ -1530,7 +1531,68 @@ public abstract class FileSystem extends Configured implements Closeable {
    */
   public abstract FileStatus[] listStatus(Path f) throws FileNotFoundException, 
                                                          IOException;
-    
+
+  /**
+   * Represents a batch of directory entries when iteratively listing a
+   * directory. This is a private API not meant for use by end users.
+   * <p>
+   * For internal use by FileSystem subclasses that override
+   * {@link FileSystem#listStatusBatch(Path, byte[])} to implement iterative
+   * listing.
+   */
+  @InterfaceAudience.Private
+  public static class DirectoryEntries {
+    private final FileStatus[] entries;
+    private final byte[] token;
+    private final boolean hasMore;
+
+    public DirectoryEntries(FileStatus[] entries, byte[] token, boolean
+        hasMore) {
+      this.entries = entries;
+      if (token != null) {
+        this.token = token.clone();
+      } else {
+        this.token = null;
+      }
+      this.hasMore = hasMore;
+    }
+
+    public FileStatus[] getEntries() {
+      return entries;
+    }
+
+    public byte[] getToken() {
+      return token;
+    }
+
+    public boolean hasMore() {
+      return hasMore;
+    }
+  }
+
+  /**
+   * Given an opaque iteration token, return the next batch of entries in a
+   * directory. This is a private API not meant for use by end users.
+   * <p>
+   * This method should be overridden by FileSystem subclasses that want to
+   * use the generic {@link FileSystem#listStatusIterator(Path)} implementation.
+   * @param f Path to list
+   * @param token opaque iteration token returned by previous call, or null
+   *              if this is the first call.
+   * @return
+   * @throws FileNotFoundException
+   * @throws IOException
+   */
+  @InterfaceAudience.Private
+  protected DirectoryEntries listStatusBatch(Path f, byte[] token) throws
+      FileNotFoundException, IOException {
+    // The default implementation returns the entire listing as a single batch.
+    // Thus, there is never a second batch, and no need to respect the passed
+    // token or set a token in the returned DirectoryEntries.
+    FileStatus[] listing = listStatus(f);
+    return new DirectoryEntries(listing, null, false);
+  }
+
   /*
    * Filter files/directories in the given path using the user-supplied path
    * filter. Results are added to the given array <code>results</code>.
@@ -1766,6 +1828,49 @@ public abstract class FileSystem extends Configured implements Closeable {
     };
   }
 
+  /**
+   * Generic iterator for implementing {@link #listStatusIterator(Path)}.
+   */
+  private class DirListingIterator<T extends FileStatus> implements
+      RemoteIterator<T> {
+
+    private final Path path;
+    private DirectoryEntries entries;
+    private int i = 0;
+
+    DirListingIterator(Path path) {
+      this.path = path;
+    }
+
+    @Override
+    public boolean hasNext() throws IOException {
+      if (entries == null) {
+        fetchMore();
+      }
+      return i < entries.getEntries().length ||
+          entries.hasMore();
+    }
+
+    private void fetchMore() throws IOException {
+      byte[] token = null;
+      if (entries != null) {
+        token = entries.getToken();
+      }
+      entries = listStatusBatch(path, token);
+      i = 0;
+    }
+
+    @Override
+    @SuppressWarnings("unchecked")
+    public T next() throws IOException {
+      Preconditions.checkState(hasNext(), "No more items in iterator");
+      if (i == entries.getEntries().length) {
+        fetchMore();
+      }
+      return (T)entries.getEntries()[i++];
+    }
+  }
+
   /**
    * Returns a remote iterator so that followup calls are made on demand
    * while consuming the entries. Each file system implementation should
@@ -1779,23 +1884,7 @@ public abstract class FileSystem extends Configured implements Closeable {
    */
   public RemoteIterator<FileStatus> listStatusIterator(final Path p)
   throws FileNotFoundException, IOException {
-    return new RemoteIterator<FileStatus>() {
-      private final FileStatus[] stats = listStatus(p);
-      private int i = 0;
-
-      @Override
-      public boolean hasNext() {
-        return i<stats.length;
-      }
-
-      @Override
-      public FileStatus next() throws IOException {
-        if (!hasNext()) {
-          throw new NoSuchElementException("No more entry in " + p);
-        }
-        return stats[i++];
-      }
-    };
+    return new DirListingIterator<>(p);
   }
 
   /**
@@ -2855,7 +2944,8 @@ public abstract class FileSystem extends Configured implements Closeable {
         }
         fs.key = key;
         map.put(key, fs);
-        if (conf.getBoolean("fs.automatic.close", true)) {
+        if (conf.getBoolean(
+            FS_AUTOMATIC_CLOSE_KEY, FS_AUTOMATIC_CLOSE_DEFAULT)) {
           toAutoClose.add(key);
         }
         return fs;

+ 7 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/crypto/CryptoFSDataOutputStream.java

@@ -28,8 +28,14 @@ public class CryptoFSDataOutputStream extends FSDataOutputStream {
   
   public CryptoFSDataOutputStream(FSDataOutputStream out, CryptoCodec codec,
       int bufferSize, byte[] key, byte[] iv) throws IOException {
+    this(out, codec, bufferSize, key, iv, true);
+  }
+
+  public CryptoFSDataOutputStream(FSDataOutputStream out, CryptoCodec codec,
+      int bufferSize, byte[] key, byte[] iv, boolean closeOutputStream)
+      throws IOException {
     super(new CryptoOutputStream(out, codec, bufferSize, key, iv, 
-        out.getPos()), null, out.getPos()); 
+        out.getPos(), closeOutputStream), null, out.getPos());
     this.fsOut = out;
   }
   

+ 3 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java

@@ -105,13 +105,13 @@ public class FTPFileSystem extends FileSystem {
     // get port information from uri, (overrides info in conf)
     int port = uri.getPort();
     port = (port == -1) ? FTP.DEFAULT_PORT : port;
-    conf.setInt("fs.ftp.host.port", port);
+    conf.setInt(FS_FTP_HOST_PORT, port);
 
     // get user/password information from URI (overrides info in conf)
     String userAndPassword = uri.getUserInfo();
     if (userAndPassword == null) {
-      userAndPassword = (conf.get("fs.ftp.user." + host, null) + ":" + conf
-          .get("fs.ftp.password." + host, null));
+      userAndPassword = (conf.get(FS_FTP_USER_PREFIX + host, null) + ":" + conf
+          .get(FS_FTP_PASSWORD_PREFIX + host, null));
     }
     String[] userPasswdInfo = userAndPassword.split(":");
     Preconditions.checkState(userPasswdInfo.length > 1,

+ 4 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java

@@ -46,6 +46,8 @@ import org.apache.hadoop.fs.permission.AclUtil;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.io.IOUtils;
 
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
 import static org.apache.hadoop.fs.CreateFlag.CREATE;
 import static org.apache.hadoop.fs.CreateFlag.LAZY_PERSIST;
 
@@ -497,7 +499,8 @@ abstract class CommandWithDestination extends FsCommand {
                         FsPermission.getFileDefault().applyUMask(
                             FsPermission.getUMask(getConf())),
                         createFlags,
-                        getConf().getInt("io.file.buffer.size", 4096),
+                        getConf().getInt(IO_FILE_BUFFER_SIZE_KEY,
+                            IO_FILE_BUFFER_SIZE_DEFAULT),
                         lazyPersist ? 1 : getDefaultReplication(item.path),
                         getDefaultBlockSize(),
                         null,

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java

@@ -83,7 +83,7 @@ import org.apache.hadoop.util.Time;
  * ViewFs is specified with the following URI: <b>viewfs:///</b> 
  * <p>
  * To use viewfs one would typically set the default file system in the
- * config  (i.e. fs.default.name< = viewfs:///) along with the
+ * config  (i.e. fs.defaultFS < = viewfs:///) along with the
  * mount table config variables as described below. 
  * 
  * <p>

+ 9 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/BloomMapFile.java

@@ -37,6 +37,11 @@ import org.apache.hadoop.util.bloom.Filter;
 import org.apache.hadoop.util.bloom.Key;
 import org.apache.hadoop.util.hash.Hash;
 
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_MAPFILE_BLOOM_ERROR_RATE_DEFAULT;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_MAPFILE_BLOOM_ERROR_RATE_KEY;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_MAPFILE_BLOOM_SIZE_DEFAULT;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_MAPFILE_BLOOM_SIZE_KEY;
+
 /**
  * This class extends {@link MapFile} and provides very much the same
  * functionality. However, it uses dynamic Bloom filters to provide
@@ -159,13 +164,15 @@ public class BloomMapFile {
     }
 
     private synchronized void initBloomFilter(Configuration conf) {
-      numKeys = conf.getInt("io.mapfile.bloom.size", 1024 * 1024);
+      numKeys = conf.getInt(
+          IO_MAPFILE_BLOOM_SIZE_KEY, IO_MAPFILE_BLOOM_SIZE_DEFAULT);
       // vector size should be <code>-kn / (ln(1 - c^(1/k)))</code> bits for
       // single key, where <code> is the number of hash functions,
       // <code>n</code> is the number of keys and <code>c</code> is the desired
       // max. error rate.
       // Our desired error rate is by default 0.005, i.e. 0.5%
-      float errorRate = conf.getFloat("io.mapfile.bloom.error.rate", 0.005f);
+      float errorRate = conf.getFloat(
+          IO_MAPFILE_BLOOM_ERROR_RATE_KEY, IO_MAPFILE_BLOOM_ERROR_RATE_DEFAULT);
       vectorSize = (int)Math.ceil((double)(-HASH_COUNT * numKeys) /
           Math.log(1.0 - Math.pow(errorRate, 1.0/HASH_COUNT)));
       bloomFilter = new DynamicBloomFilter(vectorSize, HASH_COUNT,

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ElasticByteBufferPool.java

@@ -85,7 +85,7 @@ public final class ElasticByteBufferPool implements ByteBufferPool {
   private final TreeMap<Key, ByteBuffer> getBufferTree(boolean direct) {
     return direct ? directBuffers : buffers;
   }
-  
+
   @Override
   public synchronized ByteBuffer getBuffer(boolean direct, int length) {
     TreeMap<Key, ByteBuffer> tree = getBufferTree(direct);

+ 7 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java

@@ -38,6 +38,9 @@ import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.util.ChunkedArrayList;
 
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
+
 /**
  * An utility class for I/O related functionality. 
  */
@@ -105,7 +108,8 @@ public class IOUtils {
    */
   public static void copyBytes(InputStream in, OutputStream out, Configuration conf)
     throws IOException {
-    copyBytes(in, out, conf.getInt("io.file.buffer.size", 4096), true);
+    copyBytes(in, out, conf.getInt(
+        IO_FILE_BUFFER_SIZE_KEY, IO_FILE_BUFFER_SIZE_DEFAULT), true);
   }
   
   /**
@@ -119,7 +123,8 @@ public class IOUtils {
    */
   public static void copyBytes(InputStream in, OutputStream out, Configuration conf, boolean close)
     throws IOException {
-    copyBytes(in, out, conf.getInt("io.file.buffer.size", 4096),  close);
+    copyBytes(in, out, conf.getInt(
+        IO_FILE_BUFFER_SIZE_KEY, IO_FILE_BUFFER_SIZE_DEFAULT),  close);
   }
 
   /**

+ 7 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MapFile.java

@@ -38,6 +38,9 @@ import org.apache.hadoop.util.Options;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.ReflectionUtils;
 
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_MAP_INDEX_SKIP_DEFAULT;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_MAP_INDEX_SKIP_KEY;
+
 /** A file-based map from keys to values.
  * 
  * <p>A map is a directory containing two files, the <code>data</code> file,
@@ -395,7 +398,8 @@ public class MapFile {
         Options.getOption(ComparatorOption.class, opts);
       WritableComparator comparator =
         comparatorOption == null ? null : comparatorOption.getValue();
-      INDEX_SKIP = conf.getInt("io.map.index.skip", 0);
+      INDEX_SKIP = conf.getInt(
+          IO_MAP_INDEX_SKIP_KEY, IO_MAP_INDEX_SKIP_DEFAULT);
       open(dir, comparator, conf, opts);
     }
  
@@ -990,8 +994,8 @@ public class MapFile {
             reader.getKeyClass().asSubclass(WritableComparable.class),
             reader.getValueClass());
 
-      WritableComparable key = ReflectionUtils.newInstance(reader.getKeyClass()
-        .asSubclass(WritableComparable.class), conf);
+      WritableComparable<?> key = ReflectionUtils.newInstance(
+          reader.getKeyClass().asSubclass(WritableComparable.class), conf);
       Writable value = ReflectionUtils.newInstance(reader.getValueClass()
         .asSubclass(Writable.class), conf);
 

+ 13 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java

@@ -51,6 +51,13 @@ import org.apache.hadoop.util.MergeSort;
 import org.apache.hadoop.util.PriorityQueue;
 import org.apache.hadoop.util.Time;
 
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_SEQFILE_COMPRESS_BLOCKSIZE_DEFAULT;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_SEQFILE_COMPRESS_BLOCKSIZE_KEY;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_SKIP_CHECKSUM_ERRORS_DEFAULT;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_SKIP_CHECKSUM_ERRORS_KEY;
+
 /** 
  * <code>SequenceFile</code>s are flat files consisting of binary key/value 
  * pairs.
@@ -1513,7 +1520,9 @@ public class SequenceFile {
                         Option... options) throws IOException {
       super(conf, options);
       compressionBlockSize = 
-        conf.getInt("io.seqfile.compress.blocksize", 1000000);
+        conf.getInt(IO_SEQFILE_COMPRESS_BLOCKSIZE_KEY,
+            IO_SEQFILE_COMPRESS_BLOCKSIZE_DEFAULT
+        );
       keySerializer.close();
       keySerializer.open(keyBuffer);
       uncompressedValSerializer.close();
@@ -1637,7 +1646,7 @@ public class SequenceFile {
 
   /** Get the configured buffer size */
   private static int getBufferSize(Configuration conf) {
-    return conf.getInt("io.file.buffer.size", 4096);
+    return conf.getInt(IO_FILE_BUFFER_SIZE_KEY, IO_FILE_BUFFER_SIZE_DEFAULT);
   }
 
   /** Reads key/value pairs from a sequence-format file. */
@@ -2655,7 +2664,8 @@ public class SequenceFile {
 
     private void handleChecksumException(ChecksumException e)
       throws IOException {
-      if (this.conf.getBoolean("io.skip.checksum.errors", false)) {
+      if (this.conf.getBoolean(
+          IO_SKIP_CHECKSUM_ERRORS_KEY, IO_SKIP_CHECKSUM_ERRORS_DEFAULT)) {
         LOG.warn("Bad checksum at "+getPosition()+". Skipping entries.");
         sync(getPosition()+this.conf.getInt("io.bytes.per.checksum", 512));
       } else {

+ 7 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/BZip2Codec.java

@@ -35,6 +35,9 @@ import org.apache.hadoop.io.compress.bzip2.CBZip2InputStream;
 import org.apache.hadoop.io.compress.bzip2.CBZip2OutputStream;
 import org.apache.hadoop.io.compress.bzip2.Bzip2Factory;
 
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
+
 /**
  * This class provides output and input streams for bzip2 compression
  * and decompression.  It uses the native bzip2 library on the system
@@ -120,7 +123,8 @@ public class BZip2Codec implements Configurable, SplittableCompressionCodec {
       Compressor compressor) throws IOException {
     return Bzip2Factory.isNativeBzip2Loaded(conf) ?
       new CompressorStream(out, compressor, 
-                           conf.getInt("io.file.buffer.size", 4*1024)) :
+                           conf.getInt(IO_FILE_BUFFER_SIZE_KEY,
+                                   IO_FILE_BUFFER_SIZE_DEFAULT)) :
       new BZip2CompressionOutputStream(out);
   }
 
@@ -174,7 +178,8 @@ public class BZip2Codec implements Configurable, SplittableCompressionCodec {
       Decompressor decompressor) throws IOException {
     return Bzip2Factory.isNativeBzip2Loaded(conf) ? 
       new DecompressorStream(in, decompressor,
-                             conf.getInt("io.file.buffer.size", 4*1024)) :
+                             conf.getInt(IO_FILE_BUFFER_SIZE_KEY,
+                                 IO_FILE_BUFFER_SIZE_DEFAULT)) :
       new BZip2CompressionInputStream(in);
   }
 

+ 7 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/DefaultCodec.java

@@ -31,6 +31,9 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.compress.zlib.ZlibDecompressor;
 import org.apache.hadoop.io.compress.zlib.ZlibFactory;
 
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
+
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
 public class DefaultCodec implements Configurable, CompressionCodec, DirectDecompressionCodec {
@@ -60,7 +63,8 @@ public class DefaultCodec implements Configurable, CompressionCodec, DirectDecom
                                                     Compressor compressor) 
   throws IOException {
     return new CompressorStream(out, compressor, 
-                                conf.getInt("io.file.buffer.size", 4*1024));
+                                conf.getInt(IO_FILE_BUFFER_SIZE_KEY,
+                                        IO_FILE_BUFFER_SIZE_DEFAULT));
   }
 
   @Override
@@ -85,7 +89,8 @@ public class DefaultCodec implements Configurable, CompressionCodec, DirectDecom
                                                   Decompressor decompressor) 
   throws IOException {
     return new DecompressorStream(in, decompressor, 
-                                  conf.getInt("io.file.buffer.size", 4*1024));
+                                  conf.getInt(IO_FILE_BUFFER_SIZE_KEY,
+                                      IO_FILE_BUFFER_SIZE_DEFAULT));
   }
 
   @Override

+ 6 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/GzipCodec.java

@@ -27,6 +27,8 @@ import org.apache.hadoop.io.compress.DefaultCodec;
 import org.apache.hadoop.io.compress.zlib.*;
 import org.apache.hadoop.io.compress.zlib.ZlibDecompressor.ZlibDirectDecompressor;
 
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
 import static org.apache.hadoop.util.PlatformName.IBM_JAVA;
 
 /**
@@ -172,8 +174,8 @@ public class GzipCodec extends DefaultCodec {
   throws IOException {
     return (compressor != null) ?
                new CompressorStream(out, compressor,
-                                    conf.getInt("io.file.buffer.size", 
-                                                4*1024)) :
+                                    conf.getInt(IO_FILE_BUFFER_SIZE_KEY,
+                                            IO_FILE_BUFFER_SIZE_DEFAULT)) :
                createOutputStream(out);
   }
 
@@ -206,7 +208,8 @@ public class GzipCodec extends DefaultCodec {
       decompressor = createDecompressor();  // always succeeds (or throws)
     }
     return new DecompressorStream(in, decompressor,
-                                  conf.getInt("io.file.buffer.size", 4*1024));
+                                  conf.getInt(IO_FILE_BUFFER_SIZE_KEY,
+                                      IO_FILE_BUFFER_SIZE_DEFAULT));
   }
 
   @Override

+ 22 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECChunk.java

@@ -29,6 +29,9 @@ public class ECChunk {
 
   private ByteBuffer chunkBuffer;
 
+  // TODO: should be in a more general flags
+  private boolean allZero = false;
+
   /**
    * Wrapping a ByteBuffer
    * @param buffer buffer to be wrapped by the chunk
@@ -37,6 +40,13 @@ public class ECChunk {
     this.chunkBuffer = buffer;
   }
 
+  public ECChunk(ByteBuffer buffer, int offset, int len) {
+    ByteBuffer tmp = buffer.duplicate();
+    tmp.position(offset);
+    tmp.limit(offset + len);
+    this.chunkBuffer = tmp.slice();
+  }
+
   /**
    * Wrapping a bytes array
    * @param buffer buffer to be wrapped by the chunk
@@ -45,6 +55,18 @@ public class ECChunk {
     this.chunkBuffer = ByteBuffer.wrap(buffer);
   }
 
+  public ECChunk(byte[] buffer, int offset, int len) {
+    this.chunkBuffer = ByteBuffer.wrap(buffer, offset, len);
+  }
+
+  public boolean isAllZero() {
+    return allZero;
+  }
+
+  public void setAllZero(boolean allZero) {
+    this.allZero = allZero;
+  }
+
   /**
    * Convert to ByteBuffer
    * @return ByteBuffer

+ 3 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/CoderUtil.java

@@ -115,6 +115,9 @@ final class CoderUtil {
         buffers[i] = null;
       } else {
         buffers[i] = chunk.getBuffer();
+        if (chunk.isAllZero()) {
+          CoderUtil.resetBuffer(buffers[i], buffers[i].remaining());
+        }
       }
     }
 

+ 10 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/Compression.java

@@ -36,6 +36,10 @@ import org.apache.hadoop.io.compress.Decompressor;
 import org.apache.hadoop.io.compress.DefaultCodec;
 import org.apache.hadoop.util.ReflectionUtils;
 
+import static org.apache.hadoop.fs.CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZO_BUFFERSIZE_DEFAULT;
+import static org.apache.hadoop.fs.CommonConfigurationKeys.IO_COMPRESSION_CODEC_LZO_BUFFERSIZE_KEY;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
+
 /**
  * Compression related stuff.
  */
@@ -124,7 +128,8 @@ final class Compression {
         } else {
           bis1 = downStream;
         }
-        conf.setInt("io.compression.codec.lzo.buffersize", 64 * 1024);
+        conf.setInt(IO_COMPRESSION_CODEC_LZO_BUFFERSIZE_KEY,
+            IO_COMPRESSION_CODEC_LZO_BUFFERSIZE_DEFAULT);
         CompressionInputStream cis =
             codec.createInputStream(bis1, decompressor);
         BufferedInputStream bis2 = new BufferedInputStream(cis, DATA_IBUF_SIZE);
@@ -146,7 +151,8 @@ final class Compression {
         } else {
           bos1 = downStream;
         }
-        conf.setInt("io.compression.codec.lzo.buffersize", 64 * 1024);
+        conf.setInt(IO_COMPRESSION_CODEC_LZO_BUFFERSIZE_KEY,
+            IO_COMPRESSION_CODEC_LZO_BUFFERSIZE_DEFAULT);
         CompressionOutputStream cos =
             codec.createOutputStream(bos1, compressor);
         BufferedOutputStream bos2 =
@@ -175,7 +181,7 @@ final class Compression {
           int downStreamBufferSize) throws IOException {
         // Set the internal buffer size to read from down stream.
         if (downStreamBufferSize > 0) {
-          codec.getConf().setInt("io.file.buffer.size", downStreamBufferSize);
+          codec.getConf().setInt(IO_FILE_BUFFER_SIZE_KEY, downStreamBufferSize);
         }
         CompressionInputStream cis =
             codec.createInputStream(downStream, decompressor);
@@ -193,7 +199,7 @@ final class Compression {
         } else {
           bos1 = downStream;
         }
-        codec.getConf().setInt("io.file.buffer.size", 32 * 1024);
+        codec.getConf().setInt(IO_FILE_BUFFER_SIZE_KEY, 32 * 1024);
         CompressionOutputStream cos =
             codec.createOutputStream(bos1, compressor);
         BufferedOutputStream bos2 =

+ 14 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java

@@ -183,6 +183,20 @@ public class RetryPolicies {
       return new RetryAction(RetryAction.RetryDecision.FAIL, 0, "try once " +
           "and fail.");
     }
+
+    @Override
+    public boolean equals(Object obj) {
+      if (obj == this) {
+        return true;
+      } else {
+        return obj != null && obj.getClass() == this.getClass();
+      }
+    }
+
+    @Override
+    public int hashCode() {
+      return this.getClass().hashCode();
+    }
   }
 
   static class RetryForever implements RetryPolicy {

+ 78 - 40
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryUtils.java

@@ -22,6 +22,7 @@ import java.io.IOException;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.retry.RetryPolicies.MultipleLinearRandomRetry;
 import org.apache.hadoop.ipc.RemoteException;
 
 import com.google.protobuf.ServiceException;
@@ -79,48 +80,85 @@ public class RetryUtils {
       //no retry
       return RetryPolicies.TRY_ONCE_THEN_FAIL;
     } else {
-      return new RetryPolicy() {
-        @Override
-        public RetryAction shouldRetry(Exception e, int retries, int failovers,
-            boolean isMethodIdempotent) throws Exception {
-          if (e instanceof ServiceException) {
-            //unwrap ServiceException
-            final Throwable cause = e.getCause();
-            if (cause != null && cause instanceof Exception) {
-              e = (Exception)cause;
-            }
-          }
-
-          //see (1) and (2) in the javadoc of this method.
-          final RetryPolicy p;
-          if (e instanceof RetriableException
-              || RetryPolicies.getWrappedRetriableException(e) != null) {
-            // RetriableException or RetriableException wrapped
-            p = multipleLinearRandomRetry;
-          } else if (e instanceof RemoteException) {
-            final RemoteException re = (RemoteException)e;
-            p = remoteExceptionToRetry.equals(re.getClassName())?
-                multipleLinearRandomRetry: RetryPolicies.TRY_ONCE_THEN_FAIL;
-          } else if (e instanceof IOException || e instanceof ServiceException) {
-            p = multipleLinearRandomRetry;
-          } else { //non-IOException
-            p = RetryPolicies.TRY_ONCE_THEN_FAIL;
-          }
-
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("RETRY " + retries + ") policy="
-                + p.getClass().getSimpleName() + ", exception=" + e);
-          }
-          return p.shouldRetry(e, retries, failovers, isMethodIdempotent);
-        }
+      return new WrapperRetryPolicy(
+          (MultipleLinearRandomRetry) multipleLinearRandomRetry,
+          remoteExceptionToRetry);
+    }
+  }
+
+  private static final class WrapperRetryPolicy implements RetryPolicy {
+    private MultipleLinearRandomRetry multipleLinearRandomRetry;
+    private String remoteExceptionToRetry;
 
-        @Override
-        public String toString() {
-          return "RetryPolicy[" + multipleLinearRandomRetry + ", "
-              + RetryPolicies.TRY_ONCE_THEN_FAIL.getClass().getSimpleName()
-              + "]";
+    private WrapperRetryPolicy(
+        final MultipleLinearRandomRetry multipleLinearRandomRetry,
+        final String remoteExceptionToRetry) {
+      this.multipleLinearRandomRetry = multipleLinearRandomRetry;
+      this.remoteExceptionToRetry = remoteExceptionToRetry;
+    }
+
+    @Override
+    public RetryAction shouldRetry(Exception e, int retries, int failovers,
+        boolean isMethodIdempotent) throws Exception {
+      if (e instanceof ServiceException) {
+        //unwrap ServiceException
+        final Throwable cause = e.getCause();
+        if (cause != null && cause instanceof Exception) {
+          e = (Exception)cause;
         }
-      };
+      }
+
+      //see (1) and (2) in the javadoc of this method.
+      final RetryPolicy p;
+      if (e instanceof RetriableException
+          || RetryPolicies.getWrappedRetriableException(e) != null) {
+        // RetriableException or RetriableException wrapped
+        p = multipleLinearRandomRetry;
+      } else if (e instanceof RemoteException) {
+        final RemoteException re = (RemoteException)e;
+        p = re.getClassName().equals(remoteExceptionToRetry)
+            ? multipleLinearRandomRetry : RetryPolicies.TRY_ONCE_THEN_FAIL;
+      } else if (e instanceof IOException || e instanceof ServiceException) {
+        p = multipleLinearRandomRetry;
+      } else { //non-IOException
+        p = RetryPolicies.TRY_ONCE_THEN_FAIL;
+      }
+
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("RETRY " + retries + ") policy="
+            + p.getClass().getSimpleName() + ", exception=" + e);
+      }
+      return p.shouldRetry(e, retries, failovers, isMethodIdempotent);
+    }
+
+    /**
+     * remoteExceptionToRetry is ignored as part of equals since it does not
+     * affect connection failure handling.
+     */
+    @Override
+    public boolean equals(final Object obj) {
+      if (obj == this) {
+        return true;
+      } else {
+        return (obj instanceof WrapperRetryPolicy)
+            && this.multipleLinearRandomRetry
+                .equals(((WrapperRetryPolicy) obj).multipleLinearRandomRetry);
+      }
+    }
+
+    /**
+     * Similarly, remoteExceptionToRetry is ignored as part of hashCode since it
+     * does not affect connection failure handling.
+     */
+    @Override
+    public int hashCode() {
+      return multipleLinearRandomRetry.hashCode();
+    }
+
+    @Override
+    public String toString() {
+      return "RetryPolicy[" + multipleLinearRandomRetry + ", "
+          + RetryPolicies.TRY_ONCE_THEN_FAIL.getClass().getSimpleName() + "]";
     }
   }
 

+ 128 - 50
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java

@@ -32,6 +32,7 @@ import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.WritableUtils;
 import org.apache.hadoop.io.retry.RetryPolicies;
 import org.apache.hadoop.io.retry.RetryPolicy;
 import org.apache.hadoop.io.retry.RetryPolicy.RetryAction;
@@ -413,8 +414,8 @@ public class Client implements AutoCloseable {
     private SaslRpcClient saslRpcClient;
     
     private Socket socket = null;                 // connected socket
-    private DataInputStream in;
-    private DataOutputStream out;
+    private IpcStreams ipcStreams;
+    private final int maxResponseLength;
     private final int rpcTimeout;
     private int maxIdleTime; //connections will be culled if it was idle for 
     //maxIdleTime msecs
@@ -426,8 +427,8 @@ public class Client implements AutoCloseable {
     private final boolean doPing; //do we need to send ping message
     private final int pingInterval; // how often sends ping to the server
     private final int soTimeout; // used by ipc ping and rpc timeout
-    private ResponseBuffer pingRequest; // ping message
-    
+    private byte[] pingRequest; // ping message
+
     // currently active calls
     private Hashtable<Integer, Call> calls = new Hashtable<Integer, Call>();
     private AtomicLong lastActivity = new AtomicLong();// last I/O activity time
@@ -446,6 +447,9 @@ public class Client implements AutoCloseable {
             0,
             new UnknownHostException());
       }
+      this.maxResponseLength = remoteId.conf.getInt(
+          CommonConfigurationKeys.IPC_MAXIMUM_RESPONSE_LENGTH,
+          CommonConfigurationKeys.IPC_MAXIMUM_RESPONSE_LENGTH_DEFAULT);
       this.rpcTimeout = remoteId.getRpcTimeout();
       this.maxIdleTime = remoteId.getMaxIdleTime();
       this.connectionRetryPolicy = remoteId.connectionRetryPolicy;
@@ -456,12 +460,13 @@ public class Client implements AutoCloseable {
       this.doPing = remoteId.getDoPing();
       if (doPing) {
         // construct a RPC header with the callId as the ping callId
-        pingRequest = new ResponseBuffer();
+        ResponseBuffer buf = new ResponseBuffer();
         RpcRequestHeaderProto pingHeader = ProtoUtil
             .makeRpcRequestHeader(RpcKind.RPC_PROTOCOL_BUFFER,
                 OperationProto.RPC_FINAL_PACKET, PING_CALL_ID,
                 RpcConstants.INVALID_RETRY_COUNT, clientId);
-        pingHeader.writeDelimitedTo(pingRequest);
+        pingHeader.writeDelimitedTo(buf);
+        pingRequest = buf.toByteArray();
       }
       this.pingInterval = remoteId.getPingInterval();
       if (rpcTimeout > 0) {
@@ -596,15 +601,15 @@ public class Client implements AutoCloseable {
       }
       return false;
     }
-    
-    private synchronized AuthMethod setupSaslConnection(final InputStream in2, 
-        final OutputStream out2) throws IOException {
+
+    private synchronized AuthMethod setupSaslConnection(IpcStreams streams)
+        throws IOException {
       // Do not use Client.conf here! We must use ConnectionId.conf, since the
       // Client object is cached and shared between all RPC clients, even those
       // for separate services.
       saslRpcClient = new SaslRpcClient(remoteId.getTicket(),
           remoteId.getProtocol(), remoteId.getAddress(), remoteId.conf);
-      return saslRpcClient.saslConnect(in2, out2);
+      return saslRpcClient.saslConnect(streams);
     }
 
     /**
@@ -770,12 +775,9 @@ public class Client implements AutoCloseable {
         Random rand = null;
         while (true) {
           setupConnection();
-          InputStream inStream = NetUtils.getInputStream(socket);
-          OutputStream outStream = NetUtils.getOutputStream(socket);
-          writeConnectionHeader(outStream);
+          ipcStreams = new IpcStreams(socket, maxResponseLength);
+          writeConnectionHeader(ipcStreams);
           if (authProtocol == AuthProtocol.SASL) {
-            final InputStream in2 = inStream;
-            final OutputStream out2 = outStream;
             UserGroupInformation ticket = remoteId.getTicket();
             if (ticket.getRealUser() != null) {
               ticket = ticket.getRealUser();
@@ -786,7 +788,7 @@ public class Client implements AutoCloseable {
                     @Override
                     public AuthMethod run()
                         throws IOException, InterruptedException {
-                      return setupSaslConnection(in2, out2);
+                      return setupSaslConnection(ipcStreams);
                     }
                   });
             } catch (IOException ex) {
@@ -805,8 +807,7 @@ public class Client implements AutoCloseable {
             }
             if (authMethod != AuthMethod.SIMPLE) {
               // Sasl connect is successful. Let's set up Sasl i/o streams.
-              inStream = saslRpcClient.getInputStream(inStream);
-              outStream = saslRpcClient.getOutputStream(outStream);
+              ipcStreams.setSaslClient(saslRpcClient);
               // for testing
               remoteId.saslQop =
                   (String)saslRpcClient.getNegotiatedProperty(Sasl.QOP);
@@ -825,18 +826,11 @@ public class Client implements AutoCloseable {
               }
             }
           }
-        
+
           if (doPing) {
-            inStream = new PingInputStream(inStream);
+            ipcStreams.setInputStream(new PingInputStream(ipcStreams.in));
           }
-          this.in = new DataInputStream(new BufferedInputStream(inStream));
 
-          // SASL may have already buffered the stream
-          if (!(outStream instanceof BufferedOutputStream)) {
-            outStream = new BufferedOutputStream(outStream);
-          }
-          this.out = new DataOutputStream(outStream);
-          
           writeConnectionContext(remoteId, authMethod);
 
           // update last activity time
@@ -950,17 +944,28 @@ public class Client implements AutoCloseable {
      * |  AuthProtocol (1 byte)           |      
      * +----------------------------------+
      */
-    private void writeConnectionHeader(OutputStream outStream)
+    private void writeConnectionHeader(IpcStreams streams)
         throws IOException {
-      DataOutputStream out = new DataOutputStream(new BufferedOutputStream(outStream));
-      // Write out the header, version and authentication method
-      out.write(RpcConstants.HEADER.array());
-      out.write(RpcConstants.CURRENT_VERSION);
-      out.write(serviceClass);
-      out.write(authProtocol.callId);
-      out.flush();
+      // Write out the header, version and authentication method.
+      // The output stream is buffered but we must not flush it yet.  The
+      // connection setup protocol requires the client to send multiple
+      // messages before reading a response.
+      //
+      //   insecure: send header+context+call, read
+      //   secure  : send header+negotiate, read, (sasl), context+call, read
+      //
+      // The client must flush only when it's prepared to read.  Otherwise
+      // "broken pipe" exceptions occur if the server closes the connection
+      // before all messages are sent.
+      final DataOutputStream out = streams.out;
+      synchronized (out) {
+        out.write(RpcConstants.HEADER.array());
+        out.write(RpcConstants.CURRENT_VERSION);
+        out.write(serviceClass);
+        out.write(authProtocol.callId);
+      }
     }
-    
+
     /* Write the connection context header for each connection
      * Out is not synchronized because only the first thread does this.
      */
@@ -976,12 +981,17 @@ public class Client implements AutoCloseable {
           .makeRpcRequestHeader(RpcKind.RPC_PROTOCOL_BUFFER,
               OperationProto.RPC_FINAL_PACKET, CONNECTION_CONTEXT_CALL_ID,
               RpcConstants.INVALID_RETRY_COUNT, clientId);
+      // do not flush.  the context and first ipc call request must be sent
+      // together to avoid possibility of broken pipes upon authz failure.
+      // see writeConnectionHeader
       final ResponseBuffer buf = new ResponseBuffer();
       connectionContextHeader.writeDelimitedTo(buf);
       message.writeDelimitedTo(buf);
-      buf.writeTo(out);
+      synchronized (ipcStreams.out) {
+        ipcStreams.sendRequest(buf.toByteArray());
+      }
     }
-    
+
     /* wait till someone signals us to start reading RPC response or
      * it is idle too long, it is marked as to be closed, 
      * or the client is marked as not running.
@@ -1024,9 +1034,9 @@ public class Client implements AutoCloseable {
       long curTime = Time.now();
       if ( curTime - lastActivity.get() >= pingInterval) {
         lastActivity.set(curTime);
-        synchronized (out) {
-          pingRequest.writeTo(out);
-          out.flush();
+        synchronized (ipcStreams.out) {
+          ipcStreams.sendRequest(pingRequest);
+          ipcStreams.flush();
         }
       }
     }
@@ -1092,15 +1102,16 @@ public class Client implements AutoCloseable {
           @Override
           public void run() {
             try {
-              synchronized (Connection.this.out) {
+              synchronized (ipcStreams.out) {
                 if (shouldCloseConnection.get()) {
                   return;
                 }
                 if (LOG.isDebugEnabled()) {
                   LOG.debug(getName() + " sending #" + call.id);
                 }
-                buf.writeTo(out); // RpcRequestHeader + RpcRequest
-                out.flush();
+                // RpcRequestHeader + RpcRequest
+                ipcStreams.sendRequest(buf.toByteArray());
+                ipcStreams.flush();
               }
             } catch (IOException e) {
               // exception at this point would leave the connection in an
@@ -1141,10 +1152,7 @@ public class Client implements AutoCloseable {
       touch();
       
       try {
-        int totalLen = in.readInt();
-        ByteBuffer bb = ByteBuffer.allocate(totalLen);
-        in.readFully(bb.array());
-
+        ByteBuffer bb = ipcStreams.readResponse();
         RpcWritable.Buffer packet = RpcWritable.Buffer.wrap(bb);
         RpcResponseHeaderProto header =
             packet.getValue(RpcResponseHeaderProto.getDefaultInstance());
@@ -1209,8 +1217,7 @@ public class Client implements AutoCloseable {
       connections.remove(remoteId, this);
 
       // close the streams and therefore the socket
-      IOUtils.closeStream(out);
-      IOUtils.closeStream(in);
+      IOUtils.closeStream(ipcStreams);
       disposeSasl();
 
       // clean up all calls
@@ -1739,4 +1746,75 @@ public class Client implements AutoCloseable {
   public void close() throws Exception {
     stop();
   }
+
+  /** Manages the input and output streams for an IPC connection.
+   *  Only exposed for use by SaslRpcClient.
+   */
+  @InterfaceAudience.Private
+  public static class IpcStreams implements Closeable, Flushable {
+    private DataInputStream in;
+    public DataOutputStream out;
+    private int maxResponseLength;
+    private boolean firstResponse = true;
+
+    IpcStreams(Socket socket, int maxResponseLength) throws IOException {
+      this.maxResponseLength = maxResponseLength;
+      setInputStream(
+          new BufferedInputStream(NetUtils.getInputStream(socket)));
+      setOutputStream(
+          new BufferedOutputStream(NetUtils.getOutputStream(socket)));
+    }
+
+    void setSaslClient(SaslRpcClient client) throws IOException {
+      setInputStream(client.getInputStream(in));
+      setOutputStream(client.getOutputStream(out));
+    }
+
+    private void setInputStream(InputStream is) {
+      this.in = (is instanceof DataInputStream)
+          ? (DataInputStream)is : new DataInputStream(is);
+    }
+
+    private void setOutputStream(OutputStream os) {
+      this.out = (os instanceof DataOutputStream)
+          ? (DataOutputStream)os : new DataOutputStream(os);
+    }
+
+    public ByteBuffer readResponse() throws IOException {
+      int length = in.readInt();
+      if (firstResponse) {
+        firstResponse = false;
+        // pre-rpcv9 exception, almost certainly a version mismatch.
+        if (length == -1) {
+          in.readInt(); // ignore fatal/error status, it's fatal for us.
+          throw new RemoteException(WritableUtils.readString(in),
+                                    WritableUtils.readString(in));
+        }
+      }
+      if (length <= 0) {
+        throw new RpcException("RPC response has invalid length");
+      }
+      if (maxResponseLength > 0 && length > maxResponseLength) {
+        throw new RpcException("RPC response exceeds maximum data length");
+      }
+      ByteBuffer bb = ByteBuffer.allocate(length);
+      in.readFully(bb.array());
+      return bb;
+    }
+
+    public void sendRequest(byte[] buf) throws IOException {
+      out.write(buf);
+    }
+
+    @Override
+    public void flush() throws IOException {
+      out.flush();
+    }
+
+    @Override
+    public void close() {
+      IOUtils.closeStream(out);
+      IOUtils.closeStream(in);
+    }
+  }
 }

+ 3 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java

@@ -60,7 +60,7 @@ public class ProtobufRpcEngine implements RpcEngine {
   private static final ThreadLocal<AsyncGet<Message, Exception>>
       ASYNC_RETURN_MESSAGE = new ThreadLocal<>();
 
-  static { // Register the rpcRequest deserializer for WritableRpcEngine 
+  static { // Register the rpcRequest deserializer for ProtobufRpcEngine
     org.apache.hadoop.ipc.Server.registerProtocolEngine(
         RPC.RpcKind.RPC_PROTOCOL_BUFFER, RpcProtobufRequest.class,
         new Server.ProtoBufRpcInvoker());
@@ -194,7 +194,8 @@ public class ProtobufRpcEngine implements RpcEngine {
       }
       
       if (args.length != 2) { // RpcController + Message
-        throw new ServiceException("Too many parameters for request. Method: ["
+        throw new ServiceException(
+            "Too many or few parameters for request. Method: ["
             + method.getName() + "]" + ", Expected: 2, Actual: "
             + args.length);
       }

+ 7 - 6
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java

@@ -18,6 +18,8 @@
 
 package org.apache.hadoop.ipc;
 
+import java.io.IOException;
+import java.io.InterruptedIOException;
 import java.lang.reflect.Field;
 import java.lang.reflect.InvocationHandler;
 import java.lang.reflect.Proxy;
@@ -26,7 +28,6 @@ import java.net.ConnectException;
 import java.net.InetSocketAddress;
 import java.net.NoRouteToHostException;
 import java.net.SocketTimeoutException;
-import java.io.*;
 import java.io.Closeable;
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -37,11 +38,12 @@ import java.util.concurrent.atomic.AtomicBoolean;
 
 import javax.net.SocketFactory;
 
-import org.apache.commons.logging.*;
-
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.HadoopIllegalArgumentException;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
-import org.apache.hadoop.io.*;
+import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.retry.RetryPolicy;
 import org.apache.hadoop.ipc.Client.ConnectionId;
 import org.apache.hadoop.ipc.protobuf.ProtocolInfoProtos.ProtocolInfoService;
@@ -54,7 +56,6 @@ import org.apache.hadoop.security.token.SecretManager;
 import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.conf.*;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.Time;
 
@@ -87,7 +88,7 @@ public class RPC {
     RPC_WRITABLE ((short) 2),        // Use WritableRpcEngine 
     RPC_PROTOCOL_BUFFER ((short) 3); // Use ProtobufRpcEngine
     final static short MAX_INDEX = RPC_PROTOCOL_BUFFER.value; // used for array size
-    public final short value; //TODO make it private
+    private final short value;
 
     RpcKind(short val) {
       this.value = val;

+ 24 - 5
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java

@@ -110,6 +110,7 @@ import org.apache.hadoop.security.authorize.ServiceAuthorizationManager;
 import org.apache.hadoop.security.token.SecretManager;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.util.ExitUtil;
 import org.apache.hadoop.util.ProtoUtil;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Time;
@@ -237,14 +238,14 @@ public abstract class Server {
   static class RpcKindMapValue {
     final Class<? extends Writable> rpcRequestWrapperClass;
     final RpcInvoker rpcInvoker;
+
     RpcKindMapValue (Class<? extends Writable> rpcRequestWrapperClass,
           RpcInvoker rpcInvoker) {
       this.rpcInvoker = rpcInvoker;
       this.rpcRequestWrapperClass = rpcRequestWrapperClass;
     }   
   }
-  static Map<RPC.RpcKind, RpcKindMapValue> rpcKindMap = new
-      HashMap<RPC.RpcKind, RpcKindMapValue>(4);
+  static Map<RPC.RpcKind, RpcKindMapValue> rpcKindMap = new HashMap<>(4);
   
   
 
@@ -956,10 +957,16 @@ public abstract class Server {
             while (iter.hasNext()) {
               key = iter.next();
               iter.remove();
-              if (key.isValid()) {
+              try {
                 if (key.isReadable()) {
                   doRead(key);
                 }
+              } catch (CancelledKeyException cke) {
+                // something else closed the connection, ex. responder or
+                // the listener doing an idle scan.  ignore it and let them
+                // clean up.
+                LOG.info(Thread.currentThread().getName() +
+                    ": connection aborted from " + key.attachment());
               }
               key = null;
             }
@@ -969,6 +976,9 @@ public abstract class Server {
             }
           } catch (IOException ex) {
             LOG.error("Error in Reader", ex);
+          } catch (Throwable re) {
+            LOG.fatal("Bug in read selector!", re);
+            ExitUtil.terminate(1, "Bug in read selector!");
           }
         }
       }
@@ -1187,8 +1197,17 @@ public abstract class Server {
             SelectionKey key = iter.next();
             iter.remove();
             try {
-              if (key.isValid() && key.isWritable()) {
-                  doAsyncWrite(key);
+              if (key.isWritable()) {
+                doAsyncWrite(key);
+              }
+            } catch (CancelledKeyException cke) {
+              // something else closed the connection, ex. reader or the
+              // listener doing an idle scan.  ignore it and let them clean
+              // up
+              RpcCall call = (RpcCall)key.attachment();
+              if (call != null) {
+                LOG.info(Thread.currentThread().getName() +
+                    ": connection aborted from " + call.connection);
               }
             } catch (IOException e) {
               LOG.info(Thread.currentThread().getName() + ": doAsyncWrite threw exception " + e);

+ 3 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocksSocketFactory.java

@@ -31,6 +31,8 @@ import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.conf.Configuration;
 
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SOCKS_SERVER_KEY;
+
 /**
  * Specialized SocketFactory to create sockets with a SOCKS proxy
  */
@@ -133,7 +135,7 @@ public class SocksSocketFactory extends SocketFactory implements
   @Override
   public void setConf(Configuration conf) {
     this.conf = conf;
-    String proxyStr = conf.get("hadoop.socks.server");
+    String proxyStr = conf.get(HADOOP_SOCKS_SERVER_KEY);
     if ((proxyStr != null) && (proxyStr.length() > 0)) {
       setProxy(proxyStr);
     }

+ 9 - 16
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcClient.java

@@ -18,11 +18,9 @@
 
 package org.apache.hadoop.security;
 
-import java.io.BufferedInputStream;
 import java.io.BufferedOutputStream;
 import java.io.ByteArrayInputStream;
 import java.io.DataInputStream;
-import java.io.DataOutputStream;
 import java.io.FilterInputStream;
 import java.io.FilterOutputStream;
 import java.io.IOException;
@@ -53,6 +51,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.GlobPattern;
+import org.apache.hadoop.ipc.Client.IpcStreams;
 import org.apache.hadoop.ipc.RPC.RpcKind;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.ResponseBuffer;
@@ -353,24 +352,16 @@ public class SaslRpcClient {
    * @return AuthMethod used to negotiate the connection
    * @throws IOException
    */
-  public AuthMethod saslConnect(InputStream inS, OutputStream outS)
-      throws IOException {
-    DataInputStream inStream = new DataInputStream(new BufferedInputStream(inS));
-    DataOutputStream outStream = new DataOutputStream(new BufferedOutputStream(
-        outS));
-    
+  public AuthMethod saslConnect(IpcStreams ipcStreams) throws IOException {
     // redefined if/when a SASL negotiation starts, can be queried if the
     // negotiation fails
     authMethod = AuthMethod.SIMPLE;
 
-    sendSaslMessage(outStream, negotiateRequest);
-
+    sendSaslMessage(ipcStreams.out, negotiateRequest);
     // loop until sasl is complete or a rpc error occurs
     boolean done = false;
     do {
-      int rpcLen = inStream.readInt();
-      ByteBuffer bb = ByteBuffer.allocate(rpcLen);
-      inStream.readFully(bb.array());
+      ByteBuffer bb = ipcStreams.readResponse();
 
       RpcWritable.Buffer saslPacket = RpcWritable.Buffer.wrap(bb);
       RpcResponseHeaderProto header =
@@ -447,7 +438,7 @@ public class SaslRpcClient {
         }
       }
       if (response != null) {
-        sendSaslMessage(outStream, response.build());
+        sendSaslMessage(ipcStreams.out, response.build());
       }
     } while (!done);
     return authMethod;
@@ -461,8 +452,10 @@ public class SaslRpcClient {
     ResponseBuffer buf = new ResponseBuffer();
     saslHeader.writeDelimitedTo(buf);
     message.writeDelimitedTo(buf);
-    buf.writeTo(out);
-    out.flush();
+    synchronized (out) {
+      buf.writeTo(out);
+      out.flush();
+    }
   }
 
   /**

+ 2 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java

@@ -730,7 +730,7 @@ public class UserGroupInformation {
    * 
    * @param user                The principal name to load from the ticket
    *                            cache
-   * @param ticketCachePath     the path to the ticket cache file
+   * @param ticketCache     the path to the ticket cache file
    *
    * @throws IOException        if the kerberos login fails
    */
@@ -790,7 +790,7 @@ public class UserGroupInformation {
   /**
    * Create a UserGroupInformation from a Subject with Kerberos principal.
    *
-   * @param user                The KerberosPrincipal to use in UGI
+   * @param subject             The KerberosPrincipal to use in UGI
    *
    * @throws IOException        if the kerberos login fails
    */

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java

@@ -528,7 +528,7 @@ extends AbstractDelegationTokenIdentifier>
     DataInputStream in = new DataInputStream(buf);
     TokenIdent id = createIdentifier();
     id.readFields(in);
-    LOG.info("Token cancelation requested for identifier: "+id);
+    LOG.info("Token cancellation requested for identifier: " + id);
     
     if (id.getUser() == null) {
       throw new InvalidToken("Token with no owner");

+ 8 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/AutoCloseableLock.java

@@ -18,6 +18,7 @@
 package org.apache.hadoop.util;
 
 import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.Condition;
 import java.util.concurrent.locks.ReentrantLock;
 
 import com.google.common.annotations.VisibleForTesting;
@@ -135,4 +136,11 @@ public class AutoCloseableLock implements AutoCloseable {
     throw new UnsupportedOperationException();
   }
 
+  /**
+   * See {@link ReentrantLock#newCondition()}.
+   * @return the Condition object
+   */
+  public Condition newCondition() {
+    return lock.newCondition();
+  }
 }

+ 4 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LineReader.java

@@ -27,6 +27,8 @@ import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.Text;
 
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
+
 /**
  * A class that provides a line reader from an input stream.
  * Depending on the constructor used, lines will either be terminated by:
@@ -89,7 +91,7 @@ public class LineReader implements Closeable {
    * @throws IOException
    */
   public LineReader(InputStream in, Configuration conf) throws IOException {
-    this(in, conf.getInt("io.file.buffer.size", DEFAULT_BUFFER_SIZE));
+    this(in, conf.getInt(IO_FILE_BUFFER_SIZE_KEY, DEFAULT_BUFFER_SIZE));
   }
 
   /**
@@ -136,7 +138,7 @@ public class LineReader implements Closeable {
   public LineReader(InputStream in, Configuration conf,
       byte[] recordDelimiterBytes) throws IOException {
     this.in = in;
-    this.bufferSize = conf.getInt("io.file.buffer.size", DEFAULT_BUFFER_SIZE);
+    this.bufferSize = conf.getInt(IO_FILE_BUFFER_SIZE_KEY, DEFAULT_BUFFER_SIZE);
     this.buffer = new byte[this.bufferSize];
     this.recordDelimiterBytes = recordDelimiterBytes;
   }

+ 4 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java

@@ -226,7 +226,7 @@ public class RunJar {
 
     unJar(file, workDir);
 
-    ClassLoader loader = createClassLoader(workDir);
+    ClassLoader loader = createClassLoader(file, workDir);
 
     Thread.currentThread().setContextClassLoader(loader);
     Class<?> mainClass = Class.forName(mainClassName, true, loader);
@@ -250,13 +250,14 @@ public class RunJar {
    * the user jar as well as the HADOOP_CLASSPATH. Otherwise, it creates a
    * classloader that simply adds the user jar to the classpath.
    */
-  private ClassLoader createClassLoader(final File workDir)
+  private ClassLoader createClassLoader(File file, final File workDir)
       throws MalformedURLException {
     ClassLoader loader;
     // see if the client classloader is enabled
     if (useClientClassLoader()) {
       StringBuilder sb = new StringBuilder();
       sb.append(workDir).append("/").
+          append(File.pathSeparator).append(file).
           append(File.pathSeparator).append(workDir).append("/classes/").
           append(File.pathSeparator).append(workDir).append("/lib/*");
       // HADOOP_CLASSPATH is added to the client classpath
@@ -276,6 +277,7 @@ public class RunJar {
     } else {
       List<URL> classPath = new ArrayList<>();
       classPath.add(new File(workDir + "/").toURI().toURL());
+      classPath.add(file.toURI().toURL());
       classPath.add(new File(workDir, "classes/").toURI().toURL());
       File[] libs = new File(workDir, "lib").listFiles();
       if (libs != null) {

+ 31 - 27
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoWindows.java

@@ -100,36 +100,40 @@ public class SysInfoWindows extends SysInfo {
       String sysInfoStr = getSystemInfoInfoFromShell();
       if (sysInfoStr != null) {
         final int sysInfoSplitCount = 11;
-        String[] sysInfo = sysInfoStr.substring(0, sysInfoStr.indexOf("\r\n"))
-            .split(",");
-        if (sysInfo.length == sysInfoSplitCount) {
-          try {
-            vmemSize = Long.parseLong(sysInfo[0]);
-            memSize = Long.parseLong(sysInfo[1]);
-            vmemAvailable = Long.parseLong(sysInfo[2]);
-            memAvailable = Long.parseLong(sysInfo[3]);
-            numProcessors = Integer.parseInt(sysInfo[4]);
-            cpuFrequencyKhz = Long.parseLong(sysInfo[5]);
-            cumulativeCpuTimeMs = Long.parseLong(sysInfo[6]);
-            storageBytesRead = Long.parseLong(sysInfo[7]);
-            storageBytesWritten = Long.parseLong(sysInfo[8]);
-            netBytesRead = Long.parseLong(sysInfo[9]);
-            netBytesWritten = Long.parseLong(sysInfo[10]);
-            if (lastCumCpuTimeMs != -1) {
-              /**
-               * This number will be the aggregated usage across all cores in
-               * [0.0, 100.0]. For example, it will be 400.0 if there are 8
-               * cores and each of them is running at 50% utilization.
-               */
-              cpuUsage = (cumulativeCpuTimeMs - lastCumCpuTimeMs)
-                  * 100F / refreshInterval;
+        int index = sysInfoStr.indexOf("\r\n");
+        if (index >= 0) {
+          String[] sysInfo = sysInfoStr.substring(0, index).split(",");
+          if (sysInfo.length == sysInfoSplitCount) {
+            try {
+              vmemSize = Long.parseLong(sysInfo[0]);
+              memSize = Long.parseLong(sysInfo[1]);
+              vmemAvailable = Long.parseLong(sysInfo[2]);
+              memAvailable = Long.parseLong(sysInfo[3]);
+              numProcessors = Integer.parseInt(sysInfo[4]);
+              cpuFrequencyKhz = Long.parseLong(sysInfo[5]);
+              cumulativeCpuTimeMs = Long.parseLong(sysInfo[6]);
+              storageBytesRead = Long.parseLong(sysInfo[7]);
+              storageBytesWritten = Long.parseLong(sysInfo[8]);
+              netBytesRead = Long.parseLong(sysInfo[9]);
+              netBytesWritten = Long.parseLong(sysInfo[10]);
+              if (lastCumCpuTimeMs != -1) {
+                /**
+                 * This number will be the aggregated usage across all cores in
+                 * [0.0, 100.0]. For example, it will be 400.0 if there are 8
+                 * cores and each of them is running at 50% utilization.
+                 */
+                cpuUsage = (cumulativeCpuTimeMs - lastCumCpuTimeMs)
+                    * 100F / refreshInterval;
+              }
+            } catch (NumberFormatException nfe) {
+              LOG.warn("Error parsing sysInfo", nfe);
             }
-          } catch (NumberFormatException nfe) {
-            LOG.warn("Error parsing sysInfo", nfe);
+          } else {
+            LOG.warn("Expected split length of sysInfo to be "
+                + sysInfoSplitCount + ". Got " + sysInfo.length);
           }
         } else {
-          LOG.warn("Expected split length of sysInfo to be "
-              + sysInfoSplitCount + ". Got " + sysInfo.length);
+          LOG.warn("Wrong output from sysInfo: " + sysInfoStr);
         }
       }
     }

+ 5 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/hash/Hash.java

@@ -22,6 +22,9 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_UTIL_HASH_TYPE_DEFAULT;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_UTIL_HASH_TYPE_KEY;
+
 /**
  * This class represents a common API for hashing functions.
  */
@@ -59,7 +62,8 @@ public abstract class Hash {
    * @return one of the predefined constants
    */
   public static int getHashType(Configuration conf) {
-    String name = conf.get("hadoop.util.hash.type", "murmur");
+    String name = conf.get(HADOOP_UTIL_HASH_TYPE_KEY,
+        HADOOP_UTIL_HASH_TYPE_DEFAULT);
     return parseHashType(name);
   }
   

+ 21 - 10
hadoop-common-project/hadoop-common/src/main/resources/core-default.xml

@@ -798,12 +798,6 @@
   </description>
 </property>
 
-<property>
-  <name>fs.swift.impl</name>
-  <value>org.apache.hadoop.fs.swift.snative.SwiftNativeFileSystem</value>
-  <description>The implementation class of the OpenStack Swift Filesystem</description>
-</property>
-
 <property>
   <name>fs.automatic.close</name>
   <value>true</value>
@@ -1313,10 +1307,19 @@
   <name>ipc.maximum.data.length</name>
   <value>67108864</value>
   <description>This indicates the maximum IPC message length (bytes) that can be
-    accepted by the server. Messages larger than this value are rejected by
-    server immediately. This setting should rarely need to be changed. It merits
-    investigating whether the cause of long RPC messages can be fixed instead,
-    e.g. by splitting into smaller messages.
+    accepted by the server. Messages larger than this value are rejected by the
+    immediately to avoid possible OOMs. This setting should rarely need to be
+    changed.
+  </description>
+</property>
+
+<property>
+  <name>ipc.maximum.response.length</name>
+  <value>134217728</value>
+  <description>This indicates the maximum IPC message length (bytes) that can be
+    accepted by the client. Messages larger than this value are rejected
+    immediately to avoid possible OOMs. This setting should rarely need to be
+    changed.  Set to 0 to disable.
   </description>
 </property>
 
@@ -2034,6 +2037,14 @@
   </description>
 </property>
 
+<property>
+  <name>hadoop.security.key.provider.path</name>
+  <description>
+    The KeyProvider to use when managing zone keys, and interacting with
+    encryption keys when reading and writing to an encryption zone.
+  </description>
+</property>
+
 <property>
   <name>fs.har.impl.disable.cache</name>
   <value>true</value>

+ 1 - 0
hadoop-common-project/hadoop-common/src/site/markdown/Benchmarking.md

@@ -91,6 +91,7 @@ When running benchmarks with the above operation(s), please provide operation-sp
 The benchmark measures the number of operations performed by the name-node per second. Specifically, for each operation tested, it reports the total running time in seconds (_Elapsed Time_), operation throughput (_Ops per sec_), and average time for the operations (_Average Time_). The higher, the better.
 
 Following is a sample reports by running following commands that opens 100K files with 1K threads against a remote name-node. See [HDFS scalability: the limits to growth](https://www.usenix.org/legacy/publications/login/2010-04/openpdfs/shvachko.pdf) for real-world benchmark stats.
+
 ```
 $ hadoop org.apache.hadoop.hdfs.server.namenode.NNThroughputBenchmark -fs hdfs://nameservice:9000 -op open -threads 1000 -files 100000
 

+ 6 - 13
hadoop-common-project/hadoop-common/src/site/markdown/ClusterSetup.md

@@ -64,17 +64,17 @@ Administrators can configure individual daemons using the configuration options
 
 | Daemon | Environment Variable |
 |:---- |:---- |
-| NameNode | HADOOP\_NAMENODE\_OPTS |
-| DataNode | HADOOP\_DATANODE\_OPTS |
-| Secondary NameNode | HADOOP\_SECONDARYNAMENODE\_OPTS |
+| NameNode | HDFS\_NAMENODE\_OPTS |
+| DataNode | HDFS\_DATANODE\_OPTS |
+| Secondary NameNode | HDFS\_SECONDARYNAMENODE\_OPTS |
 | ResourceManager | YARN\_RESOURCEMANAGER\_OPTS |
 | NodeManager | YARN\_NODEMANAGER\_OPTS |
 | WebAppProxy | YARN\_PROXYSERVER\_OPTS |
-| Map Reduce Job History Server | HADOOP\_JOB\_HISTORYSERVER\_OPTS |
+| Map Reduce Job History Server | MAPRED\_HISTORYSERVER\_OPTS |
 
-For example, To configure Namenode to use parallelGC, the following statement should be added in hadoop-env.sh :
+For example, To configure Namenode to use parallelGC and a 4GB Java Heap, the following statement should be added in hadoop-env.sh :
 
-      export HADOOP_NAMENODE_OPTS="-XX:+UseParallelGC"
+      export HDFS_NAMENODE_OPTS="-XX:+UseParallelGC -Xmx4g"
 
 See `etc/hadoop/hadoop-env.sh` for other examples.
 
@@ -91,13 +91,6 @@ It is also traditional to configure `HADOOP_HOME` in the system-wide shell envir
       HADOOP_HOME=/path/to/hadoop
       export HADOOP_HOME
 
-| Daemon | Environment Variable |
-|:---- |:---- |
-| ResourceManager | YARN\_RESOURCEMANAGER\_HEAPSIZE |
-| NodeManager | YARN\_NODEMANAGER\_HEAPSIZE |
-| WebAppProxy | YARN\_PROXYSERVER\_HEAPSIZE |
-| Map Reduce Job History Server | HADOOP\_JOB\_HISTORYSERVER\_HEAPSIZE |
-
 ### Configuring the Hadoop Daemons
 
 This section deals with important parameters to be specified in the given configuration files:

+ 1 - 0
hadoop-common-project/hadoop-common/src/site/markdown/DeprecatedProperties.md

@@ -28,6 +28,7 @@ The following table lists the configuration property names that are deprecated i
 | dfs.data.dir | dfs.datanode.data.dir |
 | dfs.datanode.max.xcievers | dfs.datanode.max.transfer.threads |
 | dfs.df.interval | fs.df.interval |
+| dfs.encryption.key.provider.uri | hadoop.security.key.provider.path |
 | dfs.federation.nameservice.id | dfs.nameservice.id |
 | dfs.federation.nameservices | dfs.nameservices |
 | dfs.http.address | dfs.namenode.http-address |

+ 30 - 4
hadoop-common-project/hadoop-common/src/site/markdown/UnixShellGuide.md

@@ -24,7 +24,7 @@ Apache Hadoop has many environment variables that control various aspects of the
 
 ### `HADOOP_CLIENT_OPTS`
 
-This environment variable is used for almost all end-user operations.  It can be used to set any Java options as well as any Apache Hadoop options via a system property definition. For example:
+This environment variable is used for all end-user, non-daemon operations.  It can be used to set any Java options as well as any Apache Hadoop options via a system property definition. For example:
 
 ```bash
 HADOOP_CLIENT_OPTS="-Xmx1g -Dhadoop.socks.server=localhost:4000" hadoop fs -ls /tmp
@@ -32,6 +32,18 @@ HADOOP_CLIENT_OPTS="-Xmx1g -Dhadoop.socks.server=localhost:4000" hadoop fs -ls /
 
 will increase the memory and send this command via a SOCKS proxy server.
 
+### `(command)_(subcommand)_OPTS`
+
+It is also possible to set options on a per subcommand basis.  This allows for one to create special options for particular cases.  The first part of the pattern is the command being used, but all uppercase.  The second part of the command is the subcommand being used.  Then finally followed by the string `_OPT`.
+
+For example, to configure `mapred distcp` to use a 2GB heap, one would use:
+
+```bash
+MAPRED_DISTCP_OPTS="-Xmx2g"
+```
+
+These options will appear *after* `HADOOP_CLIENT_OPTS` during execution and will generally take precedence.
+
 ### `HADOOP_CLASSPATH`
 
   NOTE: Site-wide settings should be configured via a shellprofile entry and permanent user-wide settings should be configured via ${HOME}/.hadooprc using the `hadoop_add_classpath` function. See below for more information.
@@ -56,6 +68,8 @@ For example:
 #
 
 HADOOP_CLIENT_OPTS="-Xmx1g"
+MAPRED_DISTCP_OPTS="-Xmx2g"
+HADOOP_DISTCP_OPTS="-Xmx2g"
 ```
 
 The `.hadoop-env` file can also be used to extend functionality and teach Apache Hadoop new tricks.  For example, to run hadoop commands accessing the server referenced in the environment variable `${HADOOP_SERVER}`, the following in the `.hadoop-env` will do just that:
@@ -71,11 +85,23 @@ One word of warning:  not all of Unix Shell API routines are available or work c
 
 ## Administrator Environment
 
-There are many environment variables that impact how the system operates.  By far, the most important are the series of `_OPTS` variables that control how daemons work.  These variables should contain all of the relevant settings for those daemons.
+In addition to the various XML files, there are two key capabilities for administrators to configure Apache Hadoop when using the Unix Shell:
+
+  * Many environment variables that impact how the system operates.  This guide will only highlight some key ones.  There is generally more information in the various `*-env.sh` files.
+
+  * Supplement or do some platform-specific changes to the existing scripts.  Apache Hadoop provides the capabilities to do function overrides so that the existing code base may be changed in place without all of that work.  Replacing functions is covered later under the Shell API documentation.
+
+### `(command)_(subcommand)_OPTS`
+
+By far, the most important are the series of `_OPTS` variables that control how daemons work.  These variables should contain all of the relevant settings for those daemons.
+
+Similar to the user commands above, all daemons will honor the `(command)_(subcommand)_OPTS` pattern.  It is generally recommended that these be set in `hadoop-env.sh` to guarantee that the system will know which settings it should use on restart.  Unlike user-facing subcommands, daemons will *NOT* honor `HADOOP_CLIENT_OPTS`.
+
+In addition, daemons that run in an extra security mode also support `(command)_(subcommand)_SECURE_EXTRA_OPTS`.  These options are *supplemental* to the generic `*_OPTS` and will appear after, therefore generally taking precedence.
 
-More, detailed information is contained in `hadoop-env.sh` and the other env.sh files.
+### `(command)_(subcommand)_USER`
 
-Advanced administrators may wish to supplement or do some platform-specific fixes to the existing scripts.  In some systems, this means copying the errant script or creating a custom build with these changes.  Apache Hadoop provides the capabilities to do function overrides so that the existing code base may be changed in place without all of that work.  Replacing functions is covered later under the Shell API documentation.
+Apache Hadoop provides a way to do a user check per-subcommand.  While this method is easily circumvented and should not be considered a security-feature, it does provide a mechanism by which to prevent accidents.  For example, setting `HDFS_NAMENODE_USER=hdfs` will make the `hdfs namenode` and `hdfs --daemon start namenode` commands verify that the user running the commands are the hdfs user by checking the `USER` environment variable.  This also works for non-daemons.  Setting `HADOOP_DISTCP_USER=jane` will verify that `USER` is set to `jane` before being allowed to execute the `hadoop distcp` command.
 
 ## Developer and Advanced Administrator Environment
 

+ 44 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/util/ExactLineComparator.java

@@ -0,0 +1,44 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.cli.util;
+
+import java.util.StringTokenizer;
+
+/**
+ * Comparator for the Command line tests.
+ *
+ * This comparator searches for an exact line as 'expected'
+ * in the string 'actual' and returns true if found
+ *
+ */
+public class ExactLineComparator extends ComparatorBase {
+
+  @Override
+  public boolean compare(String actual, String expected) {
+    boolean success = false;
+    StringTokenizer tokenizer = new StringTokenizer(actual, "\n\r");
+    while (tokenizer.hasMoreTokens() && !success) {
+      String actualToken = tokenizer.nextToken();
+      success = actualToken.equals(expected);
+    }
+
+    return success;
+  }
+
+}

+ 24 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfServlet.java

@@ -19,7 +19,11 @@ package org.apache.hadoop.conf;
 
 import java.io.StringWriter;
 import java.io.StringReader;
+import java.util.HashMap;
 import java.util.Map;
+
+import javax.servlet.http.HttpServletRequest;
+import javax.ws.rs.core.HttpHeaders;
 import javax.xml.parsers.DocumentBuilder;
 import javax.xml.parsers.DocumentBuilderFactory;
 
@@ -32,6 +36,7 @@ import org.xml.sax.InputSource;
 
 import junit.framework.TestCase;
 import org.junit.Test;
+import org.mockito.Mockito;
 
 /**
  * Basic test case that the ConfServlet can write configuration
@@ -47,6 +52,25 @@ public class TestConfServlet extends TestCase {
     return testConf;
   }
 
+  @Test
+  public void testParseHeaders() throws Exception {
+    HashMap<String, String> verifyMap = new HashMap<String, String>();
+    verifyMap.put("text/plain", ConfServlet.FORMAT_XML);
+    verifyMap.put(null, ConfServlet.FORMAT_XML);
+    verifyMap.put("text/xml", ConfServlet.FORMAT_XML);
+    verifyMap.put("application/xml", ConfServlet.FORMAT_XML);
+    verifyMap.put("application/json", ConfServlet.FORMAT_JSON);
+
+    HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
+    for(String contentTypeExpected : verifyMap.keySet()) {
+      String contenTypeActual = verifyMap.get(contentTypeExpected);
+      Mockito.when(request.getHeader(HttpHeaders.ACCEPT))
+          .thenReturn(contentTypeExpected);
+      assertEquals(contenTypeActual,
+          ConfServlet.parseAccecptHeader(request));
+    }
+  }
+
   @Test
   @SuppressWarnings("unchecked")
   public void testWriteJson() throws Exception {

+ 57 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/TestCryptoOutputStreamClosing.java

@@ -0,0 +1,57 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.crypto;
+
+import java.io.OutputStream;
+
+import org.apache.hadoop.conf.Configuration;
+
+import org.junit.BeforeClass;
+import org.junit.Test;
+import static org.mockito.Mockito.*;
+
+/**
+ * To test proper closing of underlying stream of CryptoOutputStream.
+ */
+public class TestCryptoOutputStreamClosing {
+  private static CryptoCodec codec;
+
+  @BeforeClass
+  public static void init() throws Exception {
+    codec = CryptoCodec.getInstance(new Configuration());
+  }
+
+  @Test
+  public void testOutputStreamClosing() throws Exception {
+    OutputStream outputStream = mock(OutputStream.class);
+    CryptoOutputStream cos = new CryptoOutputStream(outputStream, codec,
+        new byte[16], new byte[16], 0L, true);
+    cos.close();
+    verify(outputStream).close();
+  }
+
+  @Test
+  public void testOutputStreamNotClosing() throws Exception {
+    OutputStream outputStream = mock(OutputStream.class);
+    CryptoOutputStream cos = new CryptoOutputStream(outputStream, codec,
+        new byte[16], new byte[16], 0L, false);
+    cos.close();
+    verify(outputStream, never()).close();
+  }
+
+}

+ 1 - 2
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDelegationTokenRenewer.java

@@ -49,7 +49,6 @@ public class TestDelegationTokenRenewer {
     renewer = DelegationTokenRenewer.getInstance();
   }
   
-  @SuppressWarnings("unchecked")
   @Test
   public void testAddRemoveRenewAction() throws IOException,
       InterruptedException {
@@ -81,7 +80,7 @@ public class TestDelegationTokenRenewer {
     verify(token).cancel(eq(conf));
 
     verify(fs, never()).getDelegationToken(null);
-    verify(fs, never()).setDelegationToken(any(Token.class));
+    verify(fs, never()).setDelegationToken(any());
     
     assertEquals("FileSystem not removed from DelegationTokenRenewer", 0,
         renewer.getRenewQueueLength());

+ 1 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java

@@ -103,6 +103,7 @@ public class TestFilterFileSystem {
     public void processDeleteOnExit();
     public FsStatus getStatus();
     public FileStatus[] listStatus(Path f, PathFilter filter);
+    public FileStatus[] listStatusBatch(Path f, byte[] token);
     public FileStatus[] listStatus(Path[] files);
     public FileStatus[] listStatus(Path[] files, PathFilter filter);
     public FileStatus[] globStatus(Path pathPattern);

+ 1 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java

@@ -115,6 +115,7 @@ public class TestHarFileSystem {
     public QuotaUsage getQuotaUsage(Path f);
     public FsStatus getStatus();
     public FileStatus[] listStatus(Path f, PathFilter filter);
+    public FileStatus[] listStatusBatch(Path f, byte[] token);
     public FileStatus[] listStatus(Path[] files);
     public FileStatus[] listStatus(Path[] files, PathFilter filter);
     public FileStatus[] globStatus(Path pathPattern);

+ 1 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractFSContractTestBase.java

@@ -359,7 +359,7 @@ public abstract class AbstractFSContractTestBase extends Assert
     assertEquals(text + " wrong read result " + result, -1, result);
   }
 
-  boolean rename(Path src, Path dst) throws IOException {
+  protected boolean rename(Path src, Path dst) throws IOException {
     return getFileSystem().rename(src, dst);
   }
 

+ 5 - 4
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java

@@ -47,6 +47,9 @@ import java.util.Properties;
 import java.util.Set;
 import java.util.UUID;
 
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
+
 /**
  * Utilities used across test cases.
  */
@@ -55,8 +58,6 @@ public class ContractTestUtils extends Assert {
   private static final Logger LOG =
       LoggerFactory.getLogger(ContractTestUtils.class);
 
-  public static final String IO_FILE_BUFFER_SIZE = "io.file.buffer.size";
-
   // For scale testing, we can repeatedly write small chunk data to generate
   // a large file.
   public static final String IO_CHUNK_BUFFER_SIZE = "io.chunk.buffer.size";
@@ -150,8 +151,8 @@ public class ContractTestUtils extends Assert {
     FSDataOutputStream out = fs.create(path,
                                        overwrite,
                                        fs.getConf()
-                                         .getInt(IO_FILE_BUFFER_SIZE,
-                                                 4096),
+                                         .getInt(IO_FILE_BUFFER_SIZE_KEY,
+                                             IO_FILE_BUFFER_SIZE_DEFAULT),
                                        (short) 1,
                                        buffersize);
     out.write(src, 0, len);

+ 154 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestConnectionRetryPolicy.java

@@ -0,0 +1,154 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.io.retry;
+
+import static org.junit.Assert.*;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.PathIOException;
+import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.ipc.RetriableException;
+import org.apache.hadoop.ipc.RpcNoSuchMethodException;
+import org.junit.Test;
+
+/**
+ * This class mainly tests behaviors of various retry policies in connection
+ * level.
+ */
+public class TestConnectionRetryPolicy {
+  private static RetryPolicy getDefaultRetryPolicy(
+      final boolean defaultRetryPolicyEnabled,
+      final String defaultRetryPolicySpec,
+      final String remoteExceptionToRetry) {
+    return getDefaultRetryPolicy(
+        new Configuration(),
+        defaultRetryPolicyEnabled,
+        defaultRetryPolicySpec,
+        remoteExceptionToRetry);
+  }
+
+  private static RetryPolicy getDefaultRetryPolicy(
+      final boolean defaultRetryPolicyEnabled,
+      final String defaultRetryPolicySpec) {
+    return getDefaultRetryPolicy(
+        new Configuration(),
+        defaultRetryPolicyEnabled,
+        defaultRetryPolicySpec,
+        "");
+  }
+
+  public static RetryPolicy getDefaultRetryPolicy(
+      final Configuration conf,
+      final boolean defaultRetryPolicyEnabled,
+      final String defaultRetryPolicySpec,
+      final String remoteExceptionToRetry) {
+    return RetryUtils.getDefaultRetryPolicy(
+        conf,
+        "org.apache.hadoop.io.retry.TestConnectionRetryPolicy.No.Such.Key",
+        defaultRetryPolicyEnabled,
+        "org.apache.hadoop.io.retry.TestConnectionRetryPolicy.No.Such.Key",
+        defaultRetryPolicySpec,
+        "");
+  }
+
+  @Test(timeout = 60000)
+  public void testDefaultRetryPolicyEquivalence() {
+    RetryPolicy rp1 = null;
+    RetryPolicy rp2 = null;
+    RetryPolicy rp3 = null;
+
+    /* test the same setting */
+    rp1 = getDefaultRetryPolicy(true, "10000,2");
+    rp2 = getDefaultRetryPolicy(true, "10000,2");
+    rp3 = getDefaultRetryPolicy(true, "10000,2");
+    verifyRetryPolicyEquivalence(new RetryPolicy[] {rp1, rp2, rp3});
+
+    /* test different remoteExceptionToRetry */
+    rp1 = getDefaultRetryPolicy(
+        true,
+        "10000,2",
+        new RemoteException(
+            PathIOException.class.getName(),
+            "path IO exception").getClassName());
+    rp2 = getDefaultRetryPolicy(
+        true,
+        "10000,2",
+        new RemoteException(
+            RpcNoSuchMethodException.class.getName(),
+            "no such method exception").getClassName());
+    rp3 = getDefaultRetryPolicy(
+        true,
+        "10000,2",
+        new RemoteException(
+            RetriableException.class.getName(),
+            "retriable exception").getClassName());
+    verifyRetryPolicyEquivalence(new RetryPolicy[] {rp1, rp2, rp3});
+
+    /* test enabled and different specifications */
+    rp1 = getDefaultRetryPolicy(true, "20000,3");
+    rp2 = getDefaultRetryPolicy(true, "30000,4");
+    assertNotEquals("should not be equal", rp1, rp2);
+    assertNotEquals(
+        "should not have the same hash code",
+        rp1.hashCode(),
+        rp2.hashCode());
+
+    /* test disabled and the same specifications */
+    rp1 = getDefaultRetryPolicy(false, "40000,5");
+    rp2 = getDefaultRetryPolicy(false, "40000,5");
+    assertEquals("should be equal", rp1, rp2);
+    assertEquals(
+        "should have the same hash code",
+        rp1, rp2);
+
+    /* test the disabled and different specifications */
+    rp1 = getDefaultRetryPolicy(false, "50000,6");
+    rp2 = getDefaultRetryPolicy(false, "60000,7");
+    assertEquals("should be equal", rp1, rp2);
+    assertEquals(
+        "should have the same hash code",
+        rp1, rp2);
+  }
+
+  public static RetryPolicy newTryOnceThenFail() {
+    return new RetryPolicies.TryOnceThenFail();
+  }
+
+  @Test(timeout = 60000)
+  public void testTryOnceThenFailEquivalence() throws Exception {
+    final RetryPolicy rp1 = newTryOnceThenFail();
+    final RetryPolicy rp2 = newTryOnceThenFail();
+    final RetryPolicy rp3 = newTryOnceThenFail();
+    verifyRetryPolicyEquivalence(new RetryPolicy[] {rp1, rp2, rp3});
+  }
+
+  private void verifyRetryPolicyEquivalence(RetryPolicy[] polices) {
+    for (int i = 0; i < polices.length; i++) {
+      for (int j = 0; j < polices.length; j++) {
+        if (i != j) {
+          assertEquals("should be equal", polices[i], polices[j]);
+          assertEquals(
+              "should have the same hash code",
+              polices[i].hashCode(),
+              polices[j].hashCode());
+        }
+      }
+    }
+  }
+}

+ 10 - 28
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/RPCCallBenchmark.java

@@ -17,13 +17,8 @@
  */
 package org.apache.hadoop.ipc;
 
-import java.io.IOException;
-import java.lang.management.ManagementFactory;
-import java.lang.management.ThreadMXBean;
-import java.net.InetSocketAddress;
-import java.security.PrivilegedExceptionAction;
-import java.util.concurrent.atomic.AtomicLong;
-
+import com.google.common.base.Joiner;
+import com.google.protobuf.BlockingService;
 import org.apache.commons.cli.CommandLine;
 import org.apache.commons.cli.CommandLineParser;
 import org.apache.commons.cli.GnuParser;
@@ -34,7 +29,6 @@ import org.apache.commons.cli.ParseException;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.ipc.RPC.Server;
-import org.apache.hadoop.ipc.TestRPC.TestProtocol;
 import org.apache.hadoop.ipc.protobuf.TestProtos.EchoRequestProto;
 import org.apache.hadoop.ipc.protobuf.TestProtos.EchoResponseProto;
 import org.apache.hadoop.ipc.protobuf.TestRpcServiceProtos.TestProtobufRpcProto;
@@ -45,8 +39,12 @@ import org.apache.hadoop.test.MultithreadedTestUtil.TestContext;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 
-import com.google.common.base.Joiner;
-import com.google.protobuf.BlockingService;
+import java.io.IOException;
+import java.lang.management.ManagementFactory;
+import java.lang.management.ThreadMXBean;
+import java.net.InetSocketAddress;
+import java.security.PrivilegedExceptionAction;
+import java.util.concurrent.atomic.AtomicLong;
 
 /**
  * Benchmark for protobuf RPC.
@@ -68,7 +66,7 @@ public class RPCCallBenchmark extends TestRpcBase implements Tool {
     public int secondsToRun = 15;
     private int msgSize = 1024;
     public Class<? extends RpcEngine> rpcEngine =
-      WritableRpcEngine.class;
+        ProtobufRpcEngine.class;
     
     private MyOptions(String args[]) {
       try {
@@ -135,7 +133,7 @@ public class RPCCallBenchmark extends TestRpcBase implements Tool {
       
       opts.addOption(
           OptionBuilder.withLongOpt("engine").hasArg(true)
-          .withArgName("writable|protobuf")
+          .withArgName("protobuf")
           .withDescription("engine to use")
           .create('e'));
       
@@ -184,8 +182,6 @@ public class RPCCallBenchmark extends TestRpcBase implements Tool {
         String eng = line.getOptionValue('e');
         if ("protobuf".equals(eng)) {
           rpcEngine = ProtobufRpcEngine.class;
-        } else if ("writable".equals(eng)) {
-          rpcEngine = WritableRpcEngine.class;
         } else {
           throw new ParseException("invalid engine: " + eng);
         }
@@ -237,11 +233,6 @@ public class RPCCallBenchmark extends TestRpcBase implements Tool {
       server = new RPC.Builder(conf).setProtocol(TestRpcService.class)
           .setInstance(service).setBindAddress(opts.host).setPort(opts.getPort())
           .setNumHandlers(opts.serverThreads).setVerbose(false).build();
-    } else if (opts.rpcEngine == WritableRpcEngine.class) {
-      server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
-          .setInstance(new TestRPC.TestImpl()).setBindAddress(opts.host)
-          .setPort(opts.getPort()).setNumHandlers(opts.serverThreads)
-          .setVerbose(false).build();
     } else {
       throw new RuntimeException("Bad engine: " + opts.rpcEngine);
     }
@@ -399,15 +390,6 @@ public class RPCCallBenchmark extends TestRpcBase implements Tool {
           return responseProto.getMessage();
         }
       };
-    } else if (opts.rpcEngine == WritableRpcEngine.class) {
-      final TestProtocol proxy = RPC.getProxy(
-          TestProtocol.class, TestProtocol.versionID, addr, conf);
-      return new RpcServiceWrapper() {
-        @Override
-        public String doEcho(String msg) throws Exception {
-          return proxy.echo(msg);
-        }
-      };
     } else {
       throw new RuntimeException("unsupported engine: " + opts.rpcEngine);
     }

+ 84 - 2
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java

@@ -40,6 +40,7 @@ import java.io.OutputStream;
 import java.lang.reflect.Method;
 import java.lang.reflect.Proxy;
 import java.net.InetSocketAddress;
+import java.net.ServerSocket;
 import java.net.Socket;
 import java.net.SocketTimeoutException;
 import java.util.ArrayList;
@@ -49,6 +50,8 @@ import java.util.Random;
 import java.util.concurrent.BrokenBarrierException;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.CyclicBarrier;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
 import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicInteger;
@@ -76,6 +79,9 @@ import org.apache.hadoop.ipc.Server.Connection;
 import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto;
 import org.apache.hadoop.net.ConnectTimeoutException;
 import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.StringUtils;
@@ -112,6 +118,8 @@ public class TestIPC {
   public void setupConf() {
     conf = new Configuration();
     Client.setPingInterval(conf, PING_INTERVAL);
+    // tests may enable security, so disable before each test
+    UserGroupInformation.setConfiguration(conf);
   }
 
   static final Random RANDOM = new Random();
@@ -123,8 +131,8 @@ public class TestIPC {
 
   static ConnectionId getConnectionId(InetSocketAddress addr, int rpcTimeout,
       Configuration conf) throws IOException {
-    return ConnectionId.getConnectionId(addr, null, null, rpcTimeout, null,
-        conf);
+    return ConnectionId.getConnectionId(addr, null,
+        UserGroupInformation.getCurrentUser(), rpcTimeout, null, conf);
   }
 
   static Writable call(Client client, InetSocketAddress addr,
@@ -1402,6 +1410,80 @@ public class TestIPC {
     client.stop();
   }
   
+  @Test(timeout=4000)
+  public void testInsecureVersionMismatch() throws IOException {
+    checkVersionMismatch();
+  }
+
+  @Test(timeout=4000)
+  public void testSecureVersionMismatch() throws IOException {
+    SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS, conf);
+    UserGroupInformation.setConfiguration(conf);
+    checkVersionMismatch();
+  }
+
+  private void checkVersionMismatch() throws IOException {
+    try (final ServerSocket listenSocket = new ServerSocket()) {
+      listenSocket.bind(null);
+      InetSocketAddress addr =
+          (InetSocketAddress) listenSocket.getLocalSocketAddress();
+
+      // open a socket that accepts a client and immediately returns
+      // a version mismatch exception.
+      ExecutorService executor = Executors.newSingleThreadExecutor();
+      executor.submit(new Runnable(){
+        @Override
+        public void run() {
+          try {
+            Socket socket = listenSocket.accept();
+            socket.getOutputStream().write(
+                NetworkTraces.RESPONSE_TO_HADOOP_0_20_3_RPC);
+            socket.close();
+          } catch (Throwable t) {
+            // ignore.
+          }
+        }
+      });
+
+      try {
+        Client client = new Client(LongWritable.class, conf);
+        call(client, 0, addr, conf);
+      } catch (RemoteException re) {
+        Assert.assertEquals(RPC.VersionMismatch.class.getName(),
+            re.getClassName());
+        Assert.assertEquals(NetworkTraces.HADOOP0_20_ERROR_MSG,
+            re.getMessage());
+        return;
+      }
+      Assert.fail("didn't get version mismatch");
+    }
+  }
+
+  @Test
+  public void testRpcResponseLimit() throws Throwable {
+    Server server = new TestServer(1, false);
+    InetSocketAddress addr = NetUtils.getConnectAddress(server);
+    server.start();
+
+    conf.setInt(CommonConfigurationKeys.IPC_MAXIMUM_RESPONSE_LENGTH, 0);
+    Client client = new Client(LongWritable.class, conf);
+    call(client, 0, addr, conf);
+
+    conf.setInt(CommonConfigurationKeys.IPC_MAXIMUM_RESPONSE_LENGTH, 4);
+    client = new Client(LongWritable.class, conf);
+    try {
+      call(client, 0, addr, conf);
+    } catch (IOException ioe) {
+      Throwable t = ioe.getCause();
+      Assert.assertNotNull(t);
+      Assert.assertEquals(RpcException.class, t.getClass());
+      Assert.assertEquals("RPC response exceeds maximum data length",
+          t.getMessage());
+      return;
+    }
+    Assert.fail("didn't get limit exceeded");
+  }
+
   private void doIpcVersionTest(
       byte[] requestData,
       byte[] expectedResponse) throws IOException {

+ 6 - 230
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestMultipleProtocolServer.java

@@ -17,252 +17,28 @@
  */
 package org.apache.hadoop.ipc;
 
-import java.io.IOException;
-import java.net.InetSocketAddress;
-
-import org.junit.Assert;
-
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.ipc.protobuf.TestRpcServiceProtos.TestProtobufRpcProto;
-import org.apache.hadoop.net.NetUtils;
-import org.junit.Before;
 import org.junit.After;
+import org.junit.Before;
 import org.junit.Test;
-import com.google.protobuf.BlockingService;
 
 public class TestMultipleProtocolServer extends TestRpcBase {
-  private static InetSocketAddress addr;
-  private static RPC.Server server;
-
-  private static Configuration conf = new Configuration();
-  
-  
-  @ProtocolInfo(protocolName="Foo")
-  interface Foo0 extends VersionedProtocol {
-    public static final long versionID = 0L;
-    String ping() throws IOException;
-    
-  }
-  
-  @ProtocolInfo(protocolName="Foo")
-  interface Foo1 extends VersionedProtocol {
-    public static final long versionID = 1L;
-    String ping() throws IOException;
-    String ping2() throws IOException;
-  }
-  
-  @ProtocolInfo(protocolName="Foo")
-  interface FooUnimplemented extends VersionedProtocol {
-    public static final long versionID = 2L;
-    String ping() throws IOException;  
-  }
-  
-  interface Mixin extends VersionedProtocol{
-    public static final long versionID = 0L;
-    void hello() throws IOException;
-  }
-
-  interface Bar extends Mixin {
-    public static final long versionID = 0L;
-    int echo(int i) throws IOException;
-  }
-  
-  class Foo0Impl implements Foo0 {
-
-    @Override
-    public long getProtocolVersion(String protocol, long clientVersion)
-        throws IOException {
-      return Foo0.versionID;
-    }
-
-    @SuppressWarnings("unchecked")
-    @Override
-    public ProtocolSignature getProtocolSignature(String protocol,
-        long clientVersion, int clientMethodsHash) throws IOException {
-      Class<? extends VersionedProtocol> inter;
-      try {
-        inter = (Class<? extends VersionedProtocol>)getClass().
-                                          getGenericInterfaces()[0];
-      } catch (Exception e) {
-        throw new IOException(e);
-      }
-      return ProtocolSignature.getProtocolSignature(clientMethodsHash, 
-          getProtocolVersion(protocol, clientVersion), inter);
-    }
-
-    @Override
-    public String ping() {
-      return "Foo0";     
-    }
-    
-  }
-  
-  class Foo1Impl implements Foo1 {
-
-    @Override
-    public long getProtocolVersion(String protocol, long clientVersion)
-        throws IOException {
-      return Foo1.versionID;
-    }
-
-    @SuppressWarnings("unchecked")
-    @Override
-    public ProtocolSignature getProtocolSignature(String protocol,
-        long clientVersion, int clientMethodsHash) throws IOException {
-      Class<? extends VersionedProtocol> inter;
-      try {
-        inter = (Class<? extends VersionedProtocol>)getClass().
-                                        getGenericInterfaces()[0];
-      } catch (Exception e) {
-        throw new IOException(e);
-      }
-      return ProtocolSignature.getProtocolSignature(clientMethodsHash, 
-          getProtocolVersion(protocol, clientVersion), inter);
-    }
-
-    @Override
-    public String ping() {
-      return "Foo1";
-    }
 
-    @Override
-    public String ping2() {
-      return "Foo1";
-      
-    }
-    
-  }
-
-  
-  class BarImpl implements Bar {
-
-    @Override
-    public long getProtocolVersion(String protocol, long clientVersion)
-        throws IOException {
-      return Bar.versionID;
-    }
-
-    @SuppressWarnings("unchecked")
-    @Override
-    public ProtocolSignature getProtocolSignature(String protocol,
-        long clientVersion, int clientMethodsHash) throws IOException {
-      Class<? extends VersionedProtocol> inter;
-      try {
-        inter = (Class<? extends VersionedProtocol>)getClass().
-                                          getGenericInterfaces()[0];
-      } catch (Exception e) {
-        throw new IOException(e);
-      }
-      return ProtocolSignature.getProtocolSignature(clientMethodsHash, 
-          getProtocolVersion(protocol, clientVersion), inter);
-    }
-
-    @Override
-    public int echo(int i) {
-      return i;
-    }
-
-    @Override
-    public void hello() {
+  private static RPC.Server server;
 
-      
-    }
-  }
   @Before
   public void setUp() throws Exception {
-    // create a server with two handlers
-    server = new RPC.Builder(conf).setProtocol(Foo0.class)
-        .setInstance(new Foo0Impl()).setBindAddress(ADDRESS).setPort(0)
-        .setNumHandlers(2).setVerbose(false).build();
-    server.addProtocol(RPC.RpcKind.RPC_WRITABLE, Foo1.class, new Foo1Impl());
-    server.addProtocol(RPC.RpcKind.RPC_WRITABLE, Bar.class, new BarImpl());
-    server.addProtocol(RPC.RpcKind.RPC_WRITABLE, Mixin.class, new BarImpl());
-    
-    
-    // Add Protobuf server
-    // Create server side implementation
-    PBServerImpl pbServerImpl = new PBServerImpl();
-    BlockingService service = TestProtobufRpcProto
-        .newReflectiveBlockingService(pbServerImpl);
-    server.addProtocol(RPC.RpcKind.RPC_PROTOCOL_BUFFER, TestRpcService.class,
-        service);
-    server.start();
-    addr = NetUtils.getConnectAddress(server);
+    super.setupConf();
+
+    server = setupTestServer(conf, 2);
   }
-  
+
   @After
   public void tearDown() throws Exception {
     server.stop();
   }
 
-  @Test
-  public void test1() throws IOException {
-    ProtocolProxy<?> proxy;
-    proxy = RPC.getProtocolProxy(Foo0.class, Foo0.versionID, addr, conf);
 
-    Foo0 foo0 = (Foo0)proxy.getProxy(); 
-    Assert.assertEquals("Foo0", foo0.ping());
-    
-    
-    proxy = RPC.getProtocolProxy(Foo1.class, Foo1.versionID, addr, conf);
-    
-    
-    Foo1 foo1 = (Foo1)proxy.getProxy(); 
-    Assert.assertEquals("Foo1", foo1.ping());
-    Assert.assertEquals("Foo1", foo1.ping());
-    
-    
-    proxy = RPC.getProtocolProxy(Bar.class, Foo1.versionID, addr, conf);
-    
-    
-    Bar bar = (Bar)proxy.getProxy(); 
-    Assert.assertEquals(99, bar.echo(99));
-    
-    // Now test Mixin class method
-    
-    Mixin mixin = bar;
-    mixin.hello();
-  }
-  
-  
-  // Server does not implement the FooUnimplemented version of protocol Foo.
-  // See that calls to it fail.
-  @Test(expected=IOException.class)
-  public void testNonExistingProtocol() throws IOException {
-    ProtocolProxy<?> proxy;
-    proxy = RPC.getProtocolProxy(FooUnimplemented.class, 
-        FooUnimplemented.versionID, addr, conf);
-
-    FooUnimplemented foo = (FooUnimplemented)proxy.getProxy(); 
-    foo.ping();
-  }
-
-  /**
-   * getProtocolVersion of an unimplemented version should return highest version
-   * Similarly getProtocolSignature should work.
-   * @throws IOException
-   */
-  @Test
-  public void testNonExistingProtocol2() throws IOException {
-    ProtocolProxy<?> proxy;
-    proxy = RPC.getProtocolProxy(FooUnimplemented.class, 
-        FooUnimplemented.versionID, addr, conf);
-
-    FooUnimplemented foo = (FooUnimplemented)proxy.getProxy(); 
-    Assert.assertEquals(Foo1.versionID, 
-        foo.getProtocolVersion(RPC.getProtocolName(FooUnimplemented.class), 
-        FooUnimplemented.versionID));
-    foo.getProtocolSignature(RPC.getProtocolName(FooUnimplemented.class), 
-        FooUnimplemented.versionID, 0);
-  }
-  
-  @Test(expected=IOException.class)
-  public void testIncorrectServerCreation() throws IOException {
-    new RPC.Builder(conf).setProtocol(Foo1.class).setInstance(new Foo0Impl())
-        .setBindAddress(ADDRESS).setPort(0).setNumHandlers(2).setVerbose(false)
-        .build();
-  } 
-  
   // Now test a PB service - a server  hosts both PB and Writable Rpcs.
   @Test
   public void testPBService() throws Exception {

+ 0 - 13
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPCCallBenchmark.java

@@ -25,19 +25,6 @@ import org.junit.Test;
 
 public class TestRPCCallBenchmark {
 
-  @Test(timeout=20000)
-  public void testBenchmarkWithWritable() throws Exception {
-    int rc = ToolRunner.run(new RPCCallBenchmark(),
-        new String[] {
-      "--clientThreads", "30",
-      "--serverThreads", "30",
-      "--time", "5",
-      "--serverReaderThreads", "4",
-      "--messageSize", "1024",
-      "--engine", "writable"});
-    assertEquals(0, rc);
-  }
-  
   @Test(timeout=20000)
   public void testBenchmarkWithProto() throws Exception {
     int rc = ToolRunner.run(new RPCCallBenchmark(),

+ 23 - 219
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPCCompatibility.java

@@ -18,28 +18,20 @@
 
 package org.apache.hadoop.ipc;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.fail;
-
-import java.io.IOException;
-import java.lang.reflect.Method;
-import java.net.InetSocketAddress;
-
-import org.junit.Assert;
-
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.ipc.protobuf.ProtocolInfoProtos.GetProtocolSignatureRequestProto;
-import org.apache.hadoop.ipc.protobuf.ProtocolInfoProtos.GetProtocolSignatureResponseProto;
-import org.apache.hadoop.ipc.protobuf.ProtocolInfoProtos.ProtocolSignatureProto;
-import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto.RpcErrorCodeProto;
-import org.apache.hadoop.net.NetUtils;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
 
+import java.io.IOException;
+import java.lang.reflect.Method;
+import java.net.InetSocketAddress;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+
 /** Unit test for supporting method-name based compatible RPCs. */
 public class TestRPCCompatibility {
   private static final String ADDRESS = "0.0.0.0";
@@ -49,7 +41,7 @@ public class TestRPCCompatibility {
 
   public static final Log LOG =
     LogFactory.getLog(TestRPCCompatibility.class);
-  
+
   private static Configuration conf = new Configuration();
 
   public interface TestProtocol0 extends VersionedProtocol {
@@ -120,6 +112,21 @@ public class TestRPCCompatibility {
   @Before
   public void setUp() {
     ProtocolSignature.resetCache();
+
+    RPC.setProtocolEngine(conf,
+        TestProtocol0.class, ProtobufRpcEngine.class);
+
+    RPC.setProtocolEngine(conf,
+        TestProtocol1.class, ProtobufRpcEngine.class);
+
+    RPC.setProtocolEngine(conf,
+        TestProtocol2.class, ProtobufRpcEngine.class);
+
+    RPC.setProtocolEngine(conf,
+        TestProtocol3.class, ProtobufRpcEngine.class);
+
+    RPC.setProtocolEngine(conf,
+        TestProtocol4.class, ProtobufRpcEngine.class);
   }
   
   @After
@@ -133,117 +140,7 @@ public class TestRPCCompatibility {
       server = null;
     }
   }
-  
-  @Test  // old client vs new server
-  public void testVersion0ClientVersion1Server() throws Exception {
-    // create a server with two handlers
-    TestImpl1 impl = new TestImpl1();
-    server = new RPC.Builder(conf).setProtocol(TestProtocol1.class)
-        .setInstance(impl).setBindAddress(ADDRESS).setPort(0).setNumHandlers(2)
-        .setVerbose(false).build();
-    server.addProtocol(RPC.RpcKind.RPC_WRITABLE, TestProtocol0.class, impl);
-    server.start();
-    addr = NetUtils.getConnectAddress(server);
-
-    proxy = RPC.getProtocolProxy(
-        TestProtocol0.class, TestProtocol0.versionID, addr, conf);
-
-    TestProtocol0 proxy0 = (TestProtocol0)proxy.getProxy();
-    proxy0.ping();
-  }
-  
-  @Test  // old client vs new server
-  public void testVersion1ClientVersion0Server() throws Exception {
-    // create a server with two handlers
-    server = new RPC.Builder(conf).setProtocol(TestProtocol0.class)
-        .setInstance(new TestImpl0()).setBindAddress(ADDRESS).setPort(0)
-        .setNumHandlers(2).setVerbose(false).build();
-    server.start();
-    addr = NetUtils.getConnectAddress(server);
-
-    proxy = RPC.getProtocolProxy(
-        TestProtocol1.class, TestProtocol1.versionID, addr, conf);
-
-    TestProtocol1 proxy1 = (TestProtocol1)proxy.getProxy();
-    proxy1.ping();
-    try {
-      proxy1.echo("hello");
-      fail("Echo should fail");
-    } catch(IOException e) {
-    }
-  }
-  
-  private class Version2Client {
 
-    private TestProtocol2 proxy2;
-    private ProtocolProxy<TestProtocol2> serverInfo;
-    
-    private Version2Client() throws IOException {
-      serverInfo =  RPC.getProtocolProxy(
-          TestProtocol2.class, TestProtocol2.versionID, addr, conf);
-      proxy2 = serverInfo.getProxy();
-    }
-    
-    public int echo(int value) throws IOException, NumberFormatException {
-      if (serverInfo.isMethodSupported("echo", int.class)) {
-System.out.println("echo int is supported");
-        return -value;  // use version 3 echo long
-      } else { // server is version 2
-System.out.println("echo int is NOT supported");
-        return Integer.parseInt(proxy2.echo(String.valueOf(value)));
-      }
-    }
-
-    public String echo(String value) throws IOException {
-      return proxy2.echo(value);
-    }
-
-    public void ping() throws IOException {
-      proxy2.ping();
-    }
-  }
-
-  @Test // Compatible new client & old server
-  public void testVersion2ClientVersion1Server() throws Exception {
-    // create a server with two handlers
-    TestImpl1 impl = new TestImpl1();
-    server = new RPC.Builder(conf).setProtocol(TestProtocol1.class)
-        .setInstance(impl).setBindAddress(ADDRESS).setPort(0).setNumHandlers(2)
-        .setVerbose(false).build();
-    server.addProtocol(RPC.RpcKind.RPC_WRITABLE, TestProtocol0.class, impl);
-    server.start();
-    addr = NetUtils.getConnectAddress(server);
-
-
-    Version2Client client = new Version2Client();
-    client.ping();
-    assertEquals("hello", client.echo("hello"));
-    
-    // echo(int) is not supported by server, so returning 3
-    // This verifies that echo(int) and echo(String)'s hash codes are different
-    assertEquals(3, client.echo(3));
-  }
-  
-  @Test // equal version client and server
-  public void testVersion2ClientVersion2Server() throws Exception {
-    // create a server with two handlers
-    TestImpl2 impl = new TestImpl2();
-    server = new RPC.Builder(conf).setProtocol(TestProtocol2.class)
-        .setInstance(impl).setBindAddress(ADDRESS).setPort(0).setNumHandlers(2)
-        .setVerbose(false).build();
-    server.addProtocol(RPC.RpcKind.RPC_WRITABLE, TestProtocol0.class, impl);
-    server.start();
-    addr = NetUtils.getConnectAddress(server);
-
-    Version2Client client = new Version2Client();
-
-    client.ping();
-    assertEquals("hello", client.echo("hello"));
-    
-    // now that echo(int) is supported by the server, echo(int) should return -3
-    assertEquals(-3, client.echo(3));
-  }
-  
   public interface TestProtocol3 {
     int echo(String value);
     int echo(int value);
@@ -297,97 +194,4 @@ System.out.println("echo int is NOT supported");
     @Override
     int echo(int value)  throws IOException;
   }
-  
-  @Test
-  public void testVersionMismatch() throws IOException {
-    server = new RPC.Builder(conf).setProtocol(TestProtocol2.class)
-        .setInstance(new TestImpl2()).setBindAddress(ADDRESS).setPort(0)
-        .setNumHandlers(2).setVerbose(false).build();
-    server.start();
-    addr = NetUtils.getConnectAddress(server);
-
-    TestProtocol4 proxy = RPC.getProxy(TestProtocol4.class,
-        TestProtocol4.versionID, addr, conf);
-    try {
-      proxy.echo(21);
-      fail("The call must throw VersionMismatch exception");
-    } catch (RemoteException ex) {
-      Assert.assertEquals(RPC.VersionMismatch.class.getName(), 
-          ex.getClassName());
-      Assert.assertTrue(ex.getErrorCode().equals(
-          RpcErrorCodeProto.ERROR_RPC_VERSION_MISMATCH));
-    }  catch (IOException ex) {
-      fail("Expected version mismatch but got " + ex);
-    }
-  }
-  
-  @Test
-  public void testIsMethodSupported() throws IOException {
-    server = new RPC.Builder(conf).setProtocol(TestProtocol2.class)
-        .setInstance(new TestImpl2()).setBindAddress(ADDRESS).setPort(0)
-        .setNumHandlers(2).setVerbose(false).build();
-    server.start();
-    addr = NetUtils.getConnectAddress(server);
-
-    TestProtocol2 proxy = RPC.getProxy(TestProtocol2.class,
-        TestProtocol2.versionID, addr, conf);
-    boolean supported = RpcClientUtil.isMethodSupported(proxy,
-        TestProtocol2.class, RPC.RpcKind.RPC_WRITABLE,
-        RPC.getProtocolVersion(TestProtocol2.class), "echo");
-    Assert.assertTrue(supported);
-    supported = RpcClientUtil.isMethodSupported(proxy,
-        TestProtocol2.class, RPC.RpcKind.RPC_PROTOCOL_BUFFER,
-        RPC.getProtocolVersion(TestProtocol2.class), "echo");
-    Assert.assertFalse(supported);
-  }
-
-  /**
-   * Verify that ProtocolMetaInfoServerSideTranslatorPB correctly looks up
-   * the server registry to extract protocol signatures and versions.
-   */
-  @Test
-  public void testProtocolMetaInfoSSTranslatorPB() throws Exception {
-    TestImpl1 impl = new TestImpl1();
-    server = new RPC.Builder(conf).setProtocol(TestProtocol1.class)
-        .setInstance(impl).setBindAddress(ADDRESS).setPort(0).setNumHandlers(2)
-        .setVerbose(false).build();
-    server.addProtocol(RPC.RpcKind.RPC_WRITABLE, TestProtocol0.class, impl);
-    server.start();
-
-    ProtocolMetaInfoServerSideTranslatorPB xlator = 
-        new ProtocolMetaInfoServerSideTranslatorPB(server);
-
-    GetProtocolSignatureResponseProto resp = xlator.getProtocolSignature(
-        null,
-        createGetProtocolSigRequestProto(TestProtocol1.class,
-            RPC.RpcKind.RPC_PROTOCOL_BUFFER));
-    //No signatures should be found
-    Assert.assertEquals(0, resp.getProtocolSignatureCount());
-    resp = xlator.getProtocolSignature(
-        null,
-        createGetProtocolSigRequestProto(TestProtocol1.class,
-            RPC.RpcKind.RPC_WRITABLE));
-    Assert.assertEquals(1, resp.getProtocolSignatureCount());
-    ProtocolSignatureProto sig = resp.getProtocolSignatureList().get(0);
-    Assert.assertEquals(TestProtocol1.versionID, sig.getVersion());
-    boolean found = false;
-    int expected = ProtocolSignature.getFingerprint(TestProtocol1.class
-        .getMethod("echo", String.class));
-    for (int m : sig.getMethodsList()) {
-      if (expected == m) {
-        found = true;
-        break;
-      }
-    }
-    Assert.assertTrue(found);
-  }
-  
-  private GetProtocolSignatureRequestProto createGetProtocolSigRequestProto(
-      Class<?> protocol, RPC.RpcKind rpcKind) {
-    GetProtocolSignatureRequestProto.Builder builder = 
-        GetProtocolSignatureRequestProto.newBuilder();
-    builder.setProtocol(protocol.getName());
-    builder.setRpcKind(rpcKind.toString());
-    return builder.build();
-  }
 }

+ 29 - 15
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPCWaitForProxy.java

@@ -18,9 +18,8 @@
 package org.apache.hadoop.ipc;
 
 import org.apache.hadoop.conf.Configuration;
-import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.*;
-import org.apache.hadoop.ipc.TestRPC.TestProtocol;
 import org.junit.Assert;
+import org.junit.Before;
 import org.junit.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -30,30 +29,39 @@ import java.net.ConnectException;
 import java.net.InetSocketAddress;
 import java.nio.channels.ClosedByInterruptException;
 
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY;
+
 /**
  * tests that the proxy can be interrupted
  */
-public class TestRPCWaitForProxy extends Assert {
-  private static final String ADDRESS = "0.0.0.0";
+public class TestRPCWaitForProxy extends TestRpcBase {
   private static final Logger
       LOG = LoggerFactory.getLogger(TestRPCWaitForProxy.class);
 
   private static final Configuration conf = new Configuration();
 
+  @Before
+  public void setupProtocolEngine() {
+    RPC.setProtocolEngine(conf, TestRpcService.class,
+        ProtobufRpcEngine.class);
+  }
+
   /**
    * This tests that the time-bounded wait for a proxy operation works, and
    * times out.
    *
    * @throws Throwable any exception other than that which was expected
    */
-  @Test(timeout = 10000)
+  @Test(timeout = 50000)
   public void testWaitForProxy() throws Throwable {
     RpcThread worker = new RpcThread(0);
     worker.start();
     worker.join();
     Throwable caught = worker.getCaught();
-    assertNotNull("No exception was raised", caught);
-    if (!(caught instanceof ConnectException)) {
+    Throwable cause = caught.getCause();
+    Assert.assertNotNull("No exception was raised", cause);
+    if (!(cause instanceof ConnectException)) {
       throw caught;
     }
   }
@@ -69,11 +77,11 @@ public class TestRPCWaitForProxy extends Assert {
     RpcThread worker = new RpcThread(100);
     worker.start();
     Thread.sleep(1000);
-    assertTrue("worker hasn't started", worker.waitStarted);
+    Assert.assertTrue("worker hasn't started", worker.waitStarted);
     worker.interrupt();
     worker.join();
     Throwable caught = worker.getCaught();
-    assertNotNull("No exception was raised", caught);
+    Assert.assertNotNull("No exception was raised", caught);
     // looking for the root cause here, which can be wrapped
     // as part of the NetUtils work. Having this test look
     // a the type of exception there would be brittle to improvements
@@ -82,6 +90,8 @@ public class TestRPCWaitForProxy extends Assert {
     if (cause == null) {
       // no inner cause, use outer exception as root cause.
       cause = caught;
+    } else if (cause.getCause() != null) {
+      cause = cause.getCause();
     }
     if (!(cause instanceof InterruptedIOException)
         && !(cause instanceof ClosedByInterruptException)) {
@@ -112,12 +122,16 @@ public class TestRPCWaitForProxy extends Assert {
             IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY,
             connectRetries);
         waitStarted = true;
-        TestProtocol proxy = RPC.waitForProxy(TestProtocol.class,
-            TestProtocol.versionID,
-            new InetSocketAddress(ADDRESS, 20),
-            config,
-            15000L);
-        proxy.echo("");
+
+        short invalidPort = 20;
+        InetSocketAddress invalidAddress = new InetSocketAddress(ADDRESS,
+            invalidPort);
+        TestRpcBase.TestRpcService proxy = RPC.getProxy(
+            TestRpcBase.TestRpcService.class,
+            1L, invalidAddress, conf);
+        // Test echo method
+        proxy.echo(null, newEchoRequest("hello"));
+
       } catch (Throwable throwable) {
         caught = throwable;
       }

+ 166 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestReuseRpcConnections.java

@@ -0,0 +1,166 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ipc;
+import static org.junit.Assert.*;
+import java.util.Set;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.fs.PathIOException;
+import org.apache.hadoop.io.retry.RetryPolicies;
+import org.apache.hadoop.io.retry.RetryPolicy;
+import org.apache.hadoop.io.retry.TestConnectionRetryPolicy;
+import org.apache.hadoop.ipc.Client.ConnectionId;
+import org.apache.hadoop.ipc.TestRpcBase.TestRpcService;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ * This class mainly tests behaviors of reusing RPC connections for various
+ * retry policies.
+ */
+public class TestReuseRpcConnections extends TestRpcBase {
+  @Before
+  public void setup() {
+    setupConf();
+  }
+
+  private static RetryPolicy getDefaultRetryPolicy(
+      final boolean defaultRetryPolicyEnabled,
+      final String defaultRetryPolicySpec) {
+    return TestConnectionRetryPolicy.getDefaultRetryPolicy(
+        conf,
+        defaultRetryPolicyEnabled,
+        defaultRetryPolicySpec,
+        "");
+  }
+
+  private static RetryPolicy getDefaultRetryPolicy(
+      final boolean defaultRetryPolicyEnabled,
+      final String defaultRetryPolicySpec,
+      final String remoteExceptionToRetry) {
+    return TestConnectionRetryPolicy.getDefaultRetryPolicy(
+        conf,
+        defaultRetryPolicyEnabled,
+        defaultRetryPolicySpec,
+        remoteExceptionToRetry);
+  }
+
+  @Test(timeout = 60000)
+  public void testDefaultRetryPolicyReuseConnections() throws Exception {
+    RetryPolicy rp1 = null;
+    RetryPolicy rp2 = null;
+    RetryPolicy rp3 = null;
+
+    /* test the same setting */
+    rp1 = getDefaultRetryPolicy(true, "10000,2");
+    rp2 = getDefaultRetryPolicy(true, "10000,2");
+    verifyRetryPolicyReuseConnections(rp1, rp2, RetryPolicies.RETRY_FOREVER);
+
+    /* test enabled and different specifications */
+    rp1 = getDefaultRetryPolicy(true, "20000,3");
+    rp2 = getDefaultRetryPolicy(true, "20000,3");
+    rp3 = getDefaultRetryPolicy(true, "30000,4");
+    verifyRetryPolicyReuseConnections(rp1, rp2, rp3);
+
+    /* test disabled and the same specifications */
+    rp1 = getDefaultRetryPolicy(false, "40000,5");
+    rp2 = getDefaultRetryPolicy(false, "40000,5");
+    verifyRetryPolicyReuseConnections(rp1, rp2, RetryPolicies.RETRY_FOREVER);
+
+    /* test disabled and different specifications */
+    rp1 = getDefaultRetryPolicy(false, "50000,6");
+    rp2 = getDefaultRetryPolicy(false, "60000,7");
+    verifyRetryPolicyReuseConnections(rp1, rp2, RetryPolicies.RETRY_FOREVER);
+
+    /* test different remoteExceptionToRetry */
+    rp1 = getDefaultRetryPolicy(
+        true,
+        "70000,8",
+        new RemoteException(
+            RpcNoSuchMethodException.class.getName(),
+            "no such method exception").getClassName());
+    rp2 = getDefaultRetryPolicy(
+        true,
+        "70000,8",
+        new RemoteException(
+            PathIOException.class.getName(),
+            "path IO exception").getClassName());
+    verifyRetryPolicyReuseConnections(rp1, rp2, RetryPolicies.RETRY_FOREVER);
+  }
+
+  @Test(timeout = 60000)
+  public void testRetryPolicyTryOnceThenFail() throws Exception {
+    final RetryPolicy rp1 = TestConnectionRetryPolicy.newTryOnceThenFail();
+    final RetryPolicy rp2 = TestConnectionRetryPolicy.newTryOnceThenFail();
+    verifyRetryPolicyReuseConnections(rp1, rp2, RetryPolicies.RETRY_FOREVER);
+  }
+
+  private void verifyRetryPolicyReuseConnections(
+      final RetryPolicy retryPolicy1,
+      final RetryPolicy retryPolicy2,
+      final RetryPolicy anotherRetryPolicy) throws Exception {
+    final Server server = setupTestServer(conf, 2);
+    final Configuration newConf = new Configuration(conf);
+    newConf.set(
+        CommonConfigurationKeysPublic
+          .HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_KEY,
+        "");
+    Client client = null;
+    TestRpcService proxy1 = null;
+    TestRpcService proxy2 = null;
+    TestRpcService proxy3 = null;
+
+    try {
+      proxy1 = getClient(addr, newConf, retryPolicy1);
+      proxy1.ping(null, newEmptyRequest());
+      client = ProtobufRpcEngine.getClient(newConf);
+      final Set<ConnectionId> conns = client.getConnectionIds();
+      assertEquals("number of connections in cache is wrong", 1, conns.size());
+
+      /*
+       * another equivalent retry policy, reuse connection
+       */
+      proxy2 = getClient(addr, newConf, retryPolicy2);
+      proxy2.ping(null, newEmptyRequest());
+      assertEquals("number of connections in cache is wrong", 1, conns.size());
+
+      /*
+       * different retry policy, create a new connection
+       */
+      proxy3 = getClient(addr, newConf, anotherRetryPolicy);
+      proxy3.ping(null, newEmptyRequest());
+      assertEquals("number of connections in cache is wrong", 2, conns.size());
+    } finally {
+      server.stop();
+      // this is dirty, but clear out connection cache for next run
+      if (client != null) {
+        client.getConnectionIds().clear();
+      }
+      if (proxy1 != null) {
+        RPC.stopProxy(proxy1);
+      }
+      if (proxy2 != null) {
+        RPC.stopProxy(proxy2);
+      }
+      if (proxy3 != null) {
+        RPC.stopProxy(proxy3);
+      }
+    }
+  }
+}

+ 59 - 18
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRpcBase.java

@@ -30,18 +30,13 @@ import org.apache.hadoop.security.token.SecretManager;
 import org.junit.Assert;
 
 import org.apache.hadoop.io.Text;
-import org.apache.hadoop.ipc.protobuf.TestProtos;
-import org.apache.hadoop.ipc.protobuf.TestRpcServiceProtos;
-import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.io.retry.RetryPolicy;
 import org.apache.hadoop.security.KerberosInfo;
 import org.apache.hadoop.security.SaslRpcServer.AuthMethod;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.token.SecretManager;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.security.token.TokenInfo;
 import org.apache.hadoop.security.token.TokenSelector;
-import org.junit.Assert;
 
 import java.io.DataInput;
 import java.io.DataOutput;
@@ -112,7 +107,8 @@ public class TestRpcBase {
     return setupTestServer(builder);
   }
 
-  protected static RPC.Server setupTestServer(RPC.Builder builder) throws IOException {
+  protected static RPC.Server setupTestServer(
+      RPC.Builder builder) throws IOException {
     RPC.Server server = builder.build();
 
     server.start();
@@ -132,6 +128,24 @@ public class TestRpcBase {
     }
   }
 
+  protected static TestRpcService getClient(InetSocketAddress serverAddr,
+      Configuration clientConf, final RetryPolicy connectionRetryPolicy)
+      throws ServiceException {
+    try {
+      return RPC.getProtocolProxy(
+          TestRpcService.class,
+          0,
+          serverAddr,
+          UserGroupInformation.getCurrentUser(),
+          clientConf,
+          NetUtils.getDefaultSocketFactory(clientConf),
+          RPC.getRpcTimeout(clientConf),
+          connectionRetryPolicy, null).getProxy();
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+  }
+
   protected static void stop(Server server, TestRpcService proxy) {
     if (proxy != null) {
       try {
@@ -175,17 +189,21 @@ public class TestRpcBase {
     public TestTokenIdentifier() {
       this(new Text(), new Text());
     }
+
     public TestTokenIdentifier(Text tokenid) {
       this(tokenid, new Text());
     }
+
     public TestTokenIdentifier(Text tokenid, Text realUser) {
       this.tokenid = tokenid == null ? new Text() : tokenid;
       this.realUser = realUser == null ? new Text() : realUser;
     }
+
     @Override
     public Text getKind() {
       return KIND_NAME;
     }
+
     @Override
     public UserGroupInformation getUser() {
       if (realUser.toString().isEmpty()) {
@@ -203,6 +221,7 @@ public class TestRpcBase {
       tokenid.readFields(in);
       realUser.readFields(in);
     }
+
     @Override
     public void write(DataOutput out) throws IOException {
       tokenid.write(out);
@@ -234,7 +253,7 @@ public class TestRpcBase {
     @SuppressWarnings("unchecked")
     @Override
     public Token<TestTokenIdentifier> selectToken(Text service,
-                                                  Collection<Token<? extends TokenIdentifier>> tokens) {
+                      Collection<Token<? extends TokenIdentifier>> tokens) {
       if (service == null) {
         return null;
       }
@@ -388,19 +407,17 @@ public class TestRpcBase {
     }
 
     @Override
-    public TestProtos.AuthUserResponseProto getAuthUser(
+    public TestProtos.UserResponseProto getAuthUser(
         RpcController controller, TestProtos.EmptyRequestProto request)
         throws ServiceException {
-      UserGroupInformation authUser = null;
+      UserGroupInformation authUser;
       try {
         authUser = UserGroupInformation.getCurrentUser();
       } catch (IOException e) {
         throw new ServiceException(e);
       }
 
-      return TestProtos.AuthUserResponseProto.newBuilder()
-          .setAuthUser(authUser.getUserName())
-          .build();
+      return newUserResponse(authUser.getUserName());
     }
 
     @Override
@@ -432,9 +449,37 @@ public class TestRpcBase {
 
       return TestProtos.EmptyResponseProto.newBuilder().build();
     }
+
+    @Override
+    public TestProtos.UserResponseProto getCurrentUser(
+        RpcController controller,
+        TestProtos.EmptyRequestProto request) throws ServiceException {
+      String user;
+      try {
+        user = UserGroupInformation.getCurrentUser().toString();
+      } catch (IOException e) {
+        throw new ServiceException("Failed to get current user", e);
+      }
+
+      return newUserResponse(user);
+    }
+
+    @Override
+    public TestProtos.UserResponseProto getServerRemoteUser(
+        RpcController controller,
+        TestProtos.EmptyRequestProto request) throws ServiceException {
+      String serverRemoteUser = Server.getRemoteUser().toString();
+      return newUserResponse(serverRemoteUser);
+    }
+
+    private TestProtos.UserResponseProto newUserResponse(String user) {
+      return TestProtos.UserResponseProto.newBuilder()
+          .setUser(user)
+          .build();
+    }
   }
 
-  protected static TestProtos.EmptyRequestProto newEmptyRequest() {
+  public static TestProtos.EmptyRequestProto newEmptyRequest() {
     return TestProtos.EmptyRequestProto.newBuilder().build();
   }
 
@@ -478,8 +523,4 @@ public class TestRpcBase {
     }
     return null;
   }
-
-  protected static String convert(TestProtos.AuthUserResponseProto response) {
-    return response.getAuthUser();
-  }
 }

+ 48 - 26
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java

@@ -45,30 +45,55 @@ import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 import org.junit.runners.Parameterized.Parameters;
 
-import javax.security.auth.callback.*;
-import javax.security.sasl.*;
+import javax.security.auth.callback.Callback;
+import javax.security.auth.callback.CallbackHandler;
+import javax.security.auth.callback.NameCallback;
+import javax.security.auth.callback.PasswordCallback;
+import javax.security.auth.callback.UnsupportedCallbackException;
+import javax.security.sasl.AuthorizeCallback;
+import javax.security.sasl.Sasl;
+import javax.security.sasl.SaslClient;
+import javax.security.sasl.SaslException;
+import javax.security.sasl.SaslServer;
 import java.io.IOException;
 import java.lang.annotation.Annotation;
 import java.net.InetAddress;
 import java.net.InetSocketAddress;
 import java.security.PrivilegedExceptionAction;
 import java.security.Security;
-import java.util.*;
-import java.util.concurrent.*;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.regex.Pattern;
 
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_RPC_PROTECTION;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION;
-import static org.apache.hadoop.security.SaslRpcServer.AuthMethod.*;
-import static org.junit.Assert.*;
+import static org.apache.hadoop.security.SaslRpcServer.AuthMethod.KERBEROS;
+import static org.apache.hadoop.security.SaslRpcServer.AuthMethod.SIMPLE;
+import static org.apache.hadoop.security.SaslRpcServer.AuthMethod.TOKEN;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNotSame;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 /** Unit tests for using Sasl over RPC. */
 @RunWith(Parameterized.class)
 public class TestSaslRPC extends TestRpcBase {
   @Parameters
   public static Collection<Object[]> data() {
-    Collection<Object[]> params = new ArrayList<Object[]>();
+    Collection<Object[]> params = new ArrayList<>();
     for (QualityOfProtection qop : QualityOfProtection.values()) {
       params.add(new Object[]{ new QualityOfProtection[]{qop},qop, null });
     }
@@ -114,7 +139,7 @@ public class TestSaslRPC extends TestRpcBase {
     NONE(),
     VALID(),
     INVALID(),
-    OTHER();
+    OTHER()
   }
   
   @BeforeClass
@@ -230,7 +255,7 @@ public class TestSaslRPC extends TestRpcBase {
       final Server server = setupTestServer(conf, 5, sm);
       doDigestRpc(server, sm);
     } finally {
-      SecurityUtil.setSecurityInfoProviders(new SecurityInfo[0]);
+      SecurityUtil.setSecurityInfoProviders();
     }
   }
 
@@ -259,7 +284,7 @@ public class TestSaslRPC extends TestRpcBase {
     addr = NetUtils.getConnectAddress(server);
     TestTokenIdentifier tokenId = new TestTokenIdentifier(new Text(current
         .getUserName()));
-    Token<TestTokenIdentifier> token = new Token<TestTokenIdentifier>(tokenId, sm);
+    Token<TestTokenIdentifier> token = new Token<>(tokenId, sm);
     SecurityUtil.setTokenService(token, addr);
     current.addToken(token);
 
@@ -296,8 +321,8 @@ public class TestSaslRPC extends TestRpcBase {
 
     // set doPing to true
     newConf.setBoolean(CommonConfigurationKeys.IPC_CLIENT_PING_KEY, true);
-    ConnectionId remoteId = ConnectionId.getConnectionId(new InetSocketAddress(0),
-        TestRpcService.class, null, 0, null, newConf);
+    ConnectionId remoteId = ConnectionId.getConnectionId(
+        new InetSocketAddress(0), TestRpcService.class, null, 0, null, newConf);
     assertEquals(CommonConfigurationKeys.IPC_PING_INTERVAL_DEFAULT,
         remoteId.getPingInterval());
     // set doPing to false
@@ -806,13 +831,13 @@ public class TestSaslRPC extends TestRpcBase {
     final TestTokenSecretManager sm = new TestTokenSecretManager();
     boolean useSecretManager = (serverAuth != SIMPLE);
     if (enableSecretManager != null) {
-      useSecretManager &= enableSecretManager.booleanValue();
+      useSecretManager &= enableSecretManager;
     }
     if (forceSecretManager != null) {
-      useSecretManager |= forceSecretManager.booleanValue();
+      useSecretManager |= forceSecretManager;
     }
     final SecretManager<?> serverSm = useSecretManager ? sm : null;
-    
+
     Server server = serverUgi.doAs(new PrivilegedExceptionAction<Server>() {
       @Override
       public Server run() throws IOException {
@@ -867,13 +892,13 @@ public class TestSaslRPC extends TestRpcBase {
             proxy.ping(null, newEmptyRequest());
             // make sure the other side thinks we are who we said we are!!!
             assertEquals(clientUgi.getUserName(),
-                convert(proxy.getAuthUser(null, newEmptyRequest())));
+                proxy.getAuthUser(null, newEmptyRequest()).getUser());
             AuthMethod authMethod =
                 convert(proxy.getAuthMethod(null, newEmptyRequest()));
             // verify sasl completed with correct QOP
             assertEquals((authMethod != SIMPLE) ? expectedQop.saslQop : null,
-                RPC.getConnectionIdForProxy(proxy).getSaslQop());
-            return authMethod.toString();
+                         RPC.getConnectionIdForProxy(proxy).getSaslQop());
+            return authMethod != null ? authMethod.toString() : null;
           } catch (ServiceException se) {
             if (se.getCause() instanceof RemoteException) {
               throw (RemoteException) se.getCause();
@@ -898,21 +923,18 @@ public class TestSaslRPC extends TestRpcBase {
       String actual) {
     assertEquals(expect.toString(), actual);
   }
-  
-  private static void assertAuthEquals(Pattern expect,
-      String actual) {
+
+  private static void assertAuthEquals(Pattern expect, String actual) {
     // this allows us to see the regexp and the value it didn't match
     if (!expect.matcher(actual).matches()) {
-      assertEquals(expect, actual); // it failed
-    } else {
-      assertTrue(true); // it matched
+      fail(); // it failed
     }
   }
 
   /*
    * Class used to test overriding QOP values using SaslPropertiesResolver
    */
-  static class AuthSaslPropertiesResolver extends SaslPropertiesResolver{
+  static class AuthSaslPropertiesResolver extends SaslPropertiesResolver {
 
     @Override
     public Map<String, String> getServerProperties(InetAddress address) {
@@ -921,7 +943,7 @@ public class TestSaslRPC extends TestRpcBase {
       return newPropertes;
     }
   }
-  
+
   public static void main(String[] args) throws Exception {
     System.out.println("Testing Kerberos authentication over RPC");
     if (args.length != 2) {

+ 23 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/ServerSocketUtil.java

@@ -102,4 +102,27 @@ public class ServerSocketUtil {
       }
     }
   }
+
+  /**
+   * Find the specified number of unique ports available.
+   * The ports are all closed afterwards,
+   * so other network services started may grab those same ports.
+   *
+   * @param numPorts number of required port nubmers
+   * @return array of available port numbers
+   * @throws IOException
+   */
+  public static int[] getPorts(int numPorts) throws IOException {
+    ServerSocket[] sockets = new ServerSocket[numPorts];
+    int[] ports = new int[numPorts];
+    for (int i = 0; i < numPorts; i++) {
+      ServerSocket sock = new ServerSocket(0);
+      sockets[i] = sock;
+      ports[i] = sock.getLocalPort();
+    }
+    for (ServerSocket sock : sockets) {
+      sock.close();
+    }
+    return ports;
+  }
 }

+ 103 - 188
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestDoAsEffectiveUser.java

@@ -17,40 +17,35 @@
  */
 package org.apache.hadoop.security;
 
-import java.io.IOException;
-import java.net.InetAddress;
-import java.net.InetSocketAddress;
-import java.net.NetworkInterface;
-import java.security.PrivilegedExceptionAction;
-import java.util.ArrayList;
-import java.util.Enumeration;
-
-import org.junit.Assert;
-
+import com.google.protobuf.ServiceException;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.io.Text;
-import org.apache.hadoop.ipc.ProtocolSignature;
+import org.apache.hadoop.ipc.ProtobufRpcEngine;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.Server;
-import org.apache.hadoop.ipc.VersionedProtocol;
-import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.ipc.TestRpcBase;
 import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
 import org.apache.hadoop.security.authorize.DefaultImpersonationProvider;
 import org.apache.hadoop.security.authorize.ProxyUsers;
 import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.security.token.TokenInfo;
+import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
-import org.apache.hadoop.ipc.TestRpcBase.TestTokenSecretManager;
-import org.apache.hadoop.ipc.TestRpcBase.TestTokenIdentifier;
-import org.apache.hadoop.ipc.TestRpcBase.TestTokenSelector;
-import org.apache.commons.logging.*;
-import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+
+import java.io.IOException;
+import java.net.InetAddress;
+import java.net.NetworkInterface;
+import java.security.PrivilegedExceptionAction;
+import java.util.ArrayList;
+import java.util.Enumeration;
 
 /**
- *
+ * Test do as effective user.
  */
-public class TestDoAsEffectiveUser {
+public class TestDoAsEffectiveUser extends TestRpcBase {
   final private static String REAL_USER_NAME = "realUser1@HADOOP.APACHE.ORG";
   final private static String REAL_USER_SHORT_NAME = "realUser1";
   final private static String PROXY_USER_NAME = "proxyUser";
@@ -58,8 +53,8 @@ public class TestDoAsEffectiveUser {
   final private static String GROUP2_NAME = "group2";
   final private static String[] GROUP_NAMES = new String[] { GROUP1_NAME,
       GROUP2_NAME };
-  private static final String ADDRESS = "0.0.0.0";
-  private TestProtocol proxy;
+
+  private TestRpcService client;
   private static final Configuration masterConf = new Configuration();
   
   
@@ -82,7 +77,7 @@ public class TestDoAsEffectiveUser {
 
   private void configureSuperUserIPAddresses(Configuration conf,
       String superUserShortName) throws IOException {
-    ArrayList<String> ipList = new ArrayList<String>();
+    ArrayList<String> ipList = new ArrayList<>();
     Enumeration<NetworkInterface> netInterfaceList = NetworkInterface
         .getNetworkInterfaces();
     while (netInterfaceList.hasMoreElements()) {
@@ -130,50 +125,19 @@ public class TestDoAsEffectiveUser {
         curUGI.toString());
   }
 
-  @TokenInfo(TestTokenSelector.class)
-  public interface TestProtocol extends VersionedProtocol {
-    public static final long versionID = 1L;
-
-    String aMethod() throws IOException;
-    String getServerRemoteUser() throws IOException;
-  }
-
-  public class TestImpl implements TestProtocol {
-
-    @Override
-    public String aMethod() throws IOException {
-      return UserGroupInformation.getCurrentUser().toString();
-    }
-
-    @Override
-    public String getServerRemoteUser() throws IOException {
-      return Server.getRemoteUser().toString();
-    }
-    
-    @Override
-    public long getProtocolVersion(String protocol, long clientVersion)
-        throws IOException {
-      return TestProtocol.versionID;
-    }
-
-    @Override
-    public ProtocolSignature getProtocolSignature(String protocol,
-        long clientVersion, int clientMethodsHash) throws IOException {
-      return new ProtocolSignature(TestProtocol.versionID, null);
-    }
-  }
-
-  private void checkRemoteUgi(final Server server,
-      final UserGroupInformation ugi, final Configuration conf)
-          throws Exception {
+  private void checkRemoteUgi(final UserGroupInformation ugi,
+                              final Configuration conf) throws Exception {
     ugi.doAs(new PrivilegedExceptionAction<Void>() {
       @Override
-      public Void run() throws IOException {
-        proxy = RPC.getProxy(
-            TestProtocol.class, TestProtocol.versionID,
-            NetUtils.getConnectAddress(server), conf);
-        Assert.assertEquals(ugi.toString(), proxy.aMethod());
-        Assert.assertEquals(ugi.toString(), proxy.getServerRemoteUser());
+      public Void run() throws ServiceException {
+        client = getClient(addr, conf);
+        String currentUser = client.getCurrentUser(null,
+            newEmptyRequest()).getUser();
+        String serverRemoteUser = client.getServerRemoteUser(null,
+            newEmptyRequest()).getUser();
+
+        Assert.assertEquals(ugi.toString(), currentUser);
+        Assert.assertEquals(ugi.toString(), serverRemoteUser);
         return null;
       }
     });    
@@ -185,29 +149,27 @@ public class TestDoAsEffectiveUser {
     conf.setStrings(DefaultImpersonationProvider.getTestProvider().
         getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME), "group1");
     configureSuperUserIPAddresses(conf, REAL_USER_SHORT_NAME);
-    Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
-        .setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0)
-        .setNumHandlers(5).setVerbose(true).build();
+    // Set RPC engine to protobuf RPC engine
+    RPC.setProtocolEngine(conf, TestRpcService.class,
+        ProtobufRpcEngine.class);
+    UserGroupInformation.setConfiguration(conf);
+    final Server server = setupTestServer(conf, 5);
 
     refreshConf(conf);
     try {
-      server.start();
-
       UserGroupInformation realUserUgi = UserGroupInformation
           .createRemoteUser(REAL_USER_NAME);
-      checkRemoteUgi(server, realUserUgi, conf);
+      checkRemoteUgi(realUserUgi, conf);
       
-      UserGroupInformation proxyUserUgi = UserGroupInformation.createProxyUserForTesting(
+      UserGroupInformation proxyUserUgi =
+          UserGroupInformation.createProxyUserForTesting(
           PROXY_USER_NAME, realUserUgi, GROUP_NAMES);
-      checkRemoteUgi(server, proxyUserUgi, conf);
+      checkRemoteUgi(proxyUserUgi, conf);
     } catch (Exception e) {
       e.printStackTrace();
       Assert.fail();
     } finally {
-      server.stop();
-      if (proxy != null) {
-        RPC.stopProxy(proxy);
-      }
+      stop(server, client);
     }
   }
 
@@ -218,29 +180,25 @@ public class TestDoAsEffectiveUser {
     conf.setStrings(DefaultImpersonationProvider.getTestProvider().
             getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME),
         "group1");
-    Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
-        .setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0)
-        .setNumHandlers(2).setVerbose(false).build();
+    RPC.setProtocolEngine(conf, TestRpcService.class,
+        ProtobufRpcEngine.class);
+    UserGroupInformation.setConfiguration(conf);
+    final Server server = setupTestServer(conf, 5);
 
     refreshConf(conf);
     try {
-      server.start();
-
       UserGroupInformation realUserUgi = UserGroupInformation
           .createRemoteUser(REAL_USER_NAME);
-      checkRemoteUgi(server, realUserUgi, conf);
+      checkRemoteUgi(realUserUgi, conf);
 
       UserGroupInformation proxyUserUgi = UserGroupInformation
           .createProxyUserForTesting(PROXY_USER_NAME, realUserUgi, GROUP_NAMES);
-      checkRemoteUgi(server, proxyUserUgi, conf);
+      checkRemoteUgi(proxyUserUgi, conf);
     } catch (Exception e) {
       e.printStackTrace();
       Assert.fail();
     } finally {
-      server.stop();
-      if (proxy != null) {
-        RPC.stopProxy(proxy);
-      }
+      stop(server, client);
     }
   }
 
@@ -256,17 +214,14 @@ public class TestDoAsEffectiveUser {
     conf.setStrings(DefaultImpersonationProvider.getTestProvider().
             getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME),
         "group1");
-    Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
-        .setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0)
-        .setNumHandlers(2).setVerbose(false).build();
+    RPC.setProtocolEngine(conf, TestRpcService.class,
+        ProtobufRpcEngine.class);
+    UserGroupInformation.setConfiguration(conf);
+    final Server server = setupTestServer(conf, 5);
 
     refreshConf(conf);
     
     try {
-      server.start();
-
-      final InetSocketAddress addr = NetUtils.getConnectAddress(server);
-
       UserGroupInformation realUserUgi = UserGroupInformation
           .createRemoteUser(REAL_USER_NAME);
 
@@ -275,11 +230,10 @@ public class TestDoAsEffectiveUser {
       String retVal = proxyUserUgi
           .doAs(new PrivilegedExceptionAction<String>() {
             @Override
-            public String run() throws IOException {
-              proxy = RPC.getProxy(TestProtocol.class,
-                  TestProtocol.versionID, addr, conf);
-              String ret = proxy.aMethod();
-              return ret;
+            public String run() throws ServiceException {
+              client = getClient(addr, conf);
+              return client.getCurrentUser(null,
+                  newEmptyRequest()).getUser();
             }
           });
 
@@ -287,10 +241,7 @@ public class TestDoAsEffectiveUser {
     } catch (Exception e) {
       e.printStackTrace();
     } finally {
-      server.stop();
-      if (proxy != null) {
-        RPC.stopProxy(proxy);
-      }
+      stop(server, client);
     }
   }
   
@@ -299,17 +250,14 @@ public class TestDoAsEffectiveUser {
     final Configuration conf = new Configuration();
     conf.setStrings(DefaultImpersonationProvider.getTestProvider().
         getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME), "group1");
-    Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
-        .setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0)
-        .setNumHandlers(2).setVerbose(false).build();
+    RPC.setProtocolEngine(conf, TestRpcService.class,
+        ProtobufRpcEngine.class);
+    UserGroupInformation.setConfiguration(conf);
+    final Server server = setupTestServer(conf, 2);
 
     refreshConf(conf);
 
     try {
-      server.start();
-
-      final InetSocketAddress addr = NetUtils.getConnectAddress(server);
-
       UserGroupInformation realUserUgi = UserGroupInformation
           .createRemoteUser(REAL_USER_NAME);
 
@@ -318,11 +266,10 @@ public class TestDoAsEffectiveUser {
       String retVal = proxyUserUgi
           .doAs(new PrivilegedExceptionAction<String>() {
             @Override
-            public String run() throws IOException {
-              proxy = RPC.getProxy(TestProtocol.class,
-                  TestProtocol.versionID, addr, conf);
-              String ret = proxy.aMethod();
-              return ret;
+            public String run() throws ServiceException {
+              client = getClient(addr, conf);
+              return client.getCurrentUser(null,
+                  newEmptyRequest()).getUser();
             }
           });
 
@@ -330,10 +277,7 @@ public class TestDoAsEffectiveUser {
     } catch (Exception e) {
       e.printStackTrace();
     } finally {
-      server.stop();
-      if (proxy != null) {
-        RPC.stopProxy(proxy);
-      }
+      stop(server, client);
     }
   }
 
@@ -341,15 +285,12 @@ public class TestDoAsEffectiveUser {
   public void testRealUserGroupNotSpecified() throws IOException {
     final Configuration conf = new Configuration();
     configureSuperUserIPAddresses(conf, REAL_USER_SHORT_NAME);
-    Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
-        .setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0)
-        .setNumHandlers(2).setVerbose(false).build();
+    RPC.setProtocolEngine(conf, TestRpcService.class,
+        ProtobufRpcEngine.class);
+    UserGroupInformation.setConfiguration(conf);
+    final Server server = setupTestServer(conf, 2);
 
     try {
-      server.start();
-
-      final InetSocketAddress addr = NetUtils.getConnectAddress(server);
-
       UserGroupInformation realUserUgi = UserGroupInformation
           .createRemoteUser(REAL_USER_NAME);
 
@@ -358,11 +299,10 @@ public class TestDoAsEffectiveUser {
       String retVal = proxyUserUgi
           .doAs(new PrivilegedExceptionAction<String>() {
             @Override
-            public String run() throws IOException {
-              proxy = (TestProtocol) RPC.getProxy(TestProtocol.class,
-                  TestProtocol.versionID, addr, conf);
-              String ret = proxy.aMethod();
-              return ret;
+            public String run() throws ServiceException {
+              client = getClient(addr, conf);
+              return client.getCurrentUser(null,
+                  newEmptyRequest()).getUser();
             }
           });
 
@@ -370,10 +310,7 @@ public class TestDoAsEffectiveUser {
     } catch (Exception e) {
       e.printStackTrace();
     } finally {
-      server.stop();
-      if (proxy != null) {
-        RPC.stopProxy(proxy);
-      }
+      stop(server, client);
     }
   }
   
@@ -384,17 +321,14 @@ public class TestDoAsEffectiveUser {
     conf.setStrings(DefaultImpersonationProvider.getTestProvider().
             getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME),
         "group3");
-    Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
-        .setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0)
-        .setNumHandlers(2).setVerbose(false).build();
+    RPC.setProtocolEngine(conf, TestRpcService.class,
+        ProtobufRpcEngine.class);
+    UserGroupInformation.setConfiguration(conf);
+    final Server server = setupTestServer(conf, 2);
     
     refreshConf(conf);
 
     try {
-      server.start();
-
-      final InetSocketAddress addr = NetUtils.getConnectAddress(server);
-
       UserGroupInformation realUserUgi = UserGroupInformation
           .createRemoteUser(REAL_USER_NAME);
 
@@ -403,11 +337,10 @@ public class TestDoAsEffectiveUser {
       String retVal = proxyUserUgi
           .doAs(new PrivilegedExceptionAction<String>() {
             @Override
-            public String run() throws IOException {
-              proxy = RPC.getProxy(TestProtocol.class,
-                  TestProtocol.versionID, addr, conf);
-              String ret = proxy.aMethod();
-              return ret;
+            public String run() throws ServiceException {
+              client = getClient(addr, conf);
+              return client.getCurrentUser(null,
+                  newEmptyRequest()).getUser();
             }
           });
 
@@ -415,10 +348,7 @@ public class TestDoAsEffectiveUser {
     } catch (Exception e) {
       e.printStackTrace();
     } finally {
-      server.stop();
-      if (proxy != null) {
-        RPC.stopProxy(proxy);
-      }
+      stop(server, client);
     }
   }
 
@@ -432,20 +362,17 @@ public class TestDoAsEffectiveUser {
     final Configuration conf = new Configuration(masterConf);
     TestTokenSecretManager sm = new TestTokenSecretManager();
     SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS, conf);
+    RPC.setProtocolEngine(conf, TestRpcService.class,
+        ProtobufRpcEngine.class);
     UserGroupInformation.setConfiguration(conf);
-    final Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
-        .setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0)
-        .setNumHandlers(5).setVerbose(true).setSecretManager(sm).build();
-
-    server.start();
+    final Server server = setupTestServer(conf, 5, sm);
 
     final UserGroupInformation current = UserGroupInformation
         .createRemoteUser(REAL_USER_NAME);    
-    
-    final InetSocketAddress addr = NetUtils.getConnectAddress(server);
+
     TestTokenIdentifier tokenId = new TestTokenIdentifier(new Text(current
         .getUserName()), new Text("SomeSuperUser"));
-    Token<TestTokenIdentifier> token = new Token<TestTokenIdentifier>(tokenId,
+    Token<TestTokenIdentifier> token = new Token<>(tokenId,
         sm);
     SecurityUtil.setTokenService(token, addr);
     UserGroupInformation proxyUserUgi = UserGroupInformation
@@ -453,23 +380,19 @@ public class TestDoAsEffectiveUser {
     proxyUserUgi.addToken(token);
     
     refreshConf(conf);
-    
+
     String retVal = proxyUserUgi.doAs(new PrivilegedExceptionAction<String>() {
       @Override
       public String run() throws Exception {
         try {
-          proxy = RPC.getProxy(TestProtocol.class,
-              TestProtocol.versionID, addr, conf);
-          String ret = proxy.aMethod();
-          return ret;
+          client = getClient(addr, conf);
+          return client.getCurrentUser(null,
+              newEmptyRequest()).getUser();
         } catch (Exception e) {
           e.printStackTrace();
           throw e;
         } finally {
-          server.stop();
-          if (proxy != null) {
-            RPC.stopProxy(proxy);
-          }
+          stop(server, client);
         }
       }
     });
@@ -486,42 +409,34 @@ public class TestDoAsEffectiveUser {
     TestTokenSecretManager sm = new TestTokenSecretManager();
     final Configuration newConf = new Configuration(masterConf);
     SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS, newConf);
+    // Set RPC engine to protobuf RPC engine
+    RPC.setProtocolEngine(newConf, TestRpcService.class,
+        ProtobufRpcEngine.class);
     UserGroupInformation.setConfiguration(newConf);
-    final Server server = new RPC.Builder(newConf)
-        .setProtocol(TestProtocol.class).setInstance(new TestImpl())
-        .setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true)
-        .setSecretManager(sm).build();
-
-    server.start();
+    final Server server = setupTestServer(newConf, 5, sm);
 
     final UserGroupInformation current = UserGroupInformation
         .createUserForTesting(REAL_USER_NAME, GROUP_NAMES);
     
     refreshConf(newConf);
-    
-    final InetSocketAddress addr = NetUtils.getConnectAddress(server);
+
     TestTokenIdentifier tokenId = new TestTokenIdentifier(new Text(current
         .getUserName()), new Text("SomeSuperUser"));
-    Token<TestTokenIdentifier> token = new Token<TestTokenIdentifier>(tokenId,
-        sm);
+    Token<TestTokenIdentifier> token = new Token<>(tokenId, sm);
     SecurityUtil.setTokenService(token, addr);
     current.addToken(token);
     String retVal = current.doAs(new PrivilegedExceptionAction<String>() {
       @Override
       public String run() throws Exception {
         try {
-          proxy = RPC.getProxy(TestProtocol.class,
-              TestProtocol.versionID, addr, newConf);
-          String ret = proxy.aMethod();
-          return ret;
+          client = getClient(addr, newConf);
+          return client.getCurrentUser(null,
+              newEmptyRequest()).getUser();
         } catch (Exception e) {
           e.printStackTrace();
           throw e;
         } finally {
-          server.stop();
-          if (proxy != null) {
-            RPC.stopProxy(proxy);
-          }
+          stop(server, client);
         }
       }
     });

+ 23 - 5
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java

@@ -20,6 +20,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.Text;
+import org.apache.hadoop.ipc.TestRpcBase.TestTokenIdentifier;
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import org.apache.hadoop.security.SaslRpcServer.AuthMethod;
 import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
@@ -28,7 +29,11 @@ import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.StringUtils;
-import org.junit.*;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
 
 import javax.security.auth.Subject;
 import javax.security.auth.kerberos.KerberosPrincipal;
@@ -50,9 +55,22 @@ import java.util.Set;
 
 import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_USER_GROUP_METRICS_PERCENTILES_INTERVALS;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTH_TO_LOCAL;
-import static org.apache.hadoop.ipc.TestSaslRPC.*;
-import static org.apache.hadoop.test.MetricsAsserts.*;
-import static org.junit.Assert.*;
+import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
+import static org.apache.hadoop.test.MetricsAsserts.assertCounterGt;
+import static org.apache.hadoop.test.MetricsAsserts.assertGaugeGt;
+import static org.apache.hadoop.test.MetricsAsserts.assertQuantileGauges;
+import static org.apache.hadoop.test.MetricsAsserts.getDoubleGauge;
+import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNotSame;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertSame;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 
@@ -109,7 +127,7 @@ public class TestUserGroupInformation {
     UserGroupInformation.setLoginUser(null);
   }
 
-  @Test (timeout = 30000)
+  @Test(timeout = 30000)
   public void testSimpleLogin() throws IOException {
     tryLoginAuthenticationMethod(AuthenticationMethod.SIMPLE, true);
   }

+ 6 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSysInfoWindows.java

@@ -141,10 +141,15 @@ public class TestSysInfoWindows {
   @Test(timeout = 10000)
   public void errorInGetSystemInfo() {
     SysInfoWindowsMock tester = new SysInfoWindowsMock();
-    // info str derived from windows shell command has \r\n termination
+    // info str derived from windows shell command is null
     tester.setSysinfoString(null);
     // call a method to refresh values
     tester.getAvailablePhysicalMemorySize();
+
+    // info str derived from windows shell command with no \r\n termination
+    tester.setSysinfoString("");
+    // call a method to refresh values
+    tester.getAvailablePhysicalMemorySize();
   }
 
 }

+ 2 - 2
hadoop-common-project/hadoop-common/src/test/proto/test.proto

@@ -88,6 +88,6 @@ message AuthMethodResponseProto {
   required string mechanismName = 2;
 }
 
-message AuthUserResponseProto {
-  required string authUser = 1;
+message UserResponseProto {
+  required string user = 1;
 }

+ 7 - 1
hadoop-common-project/hadoop-common/src/test/proto/test_rpc_service.proto

@@ -40,9 +40,11 @@ service TestProtobufRpcProto {
   rpc exchange(ExchangeRequestProto) returns (ExchangeResponseProto);
   rpc sleep(SleepRequestProto) returns (EmptyResponseProto);
   rpc getAuthMethod(EmptyRequestProto) returns (AuthMethodResponseProto);
-  rpc getAuthUser(EmptyRequestProto) returns (AuthUserResponseProto);
+  rpc getAuthUser(EmptyRequestProto) returns (UserResponseProto);
   rpc echoPostponed(EchoRequestProto) returns (EchoResponseProto);
   rpc sendPostponed(EmptyRequestProto) returns (EmptyResponseProto);
+  rpc getCurrentUser(EmptyRequestProto) returns (UserResponseProto);
+  rpc getServerRemoteUser(EmptyRequestProto) returns (UserResponseProto);
 }
 
 service TestProtobufRpc2Proto {
@@ -65,3 +67,7 @@ service NewerProtobufRpcProto {
   rpc ping(EmptyRequestProto) returns (EmptyResponseProto);
   rpc echo(EmptyRequestProto) returns (EmptyResponseProto);
 }
+
+service CustomProto {
+  rpc ping(EmptyRequestProto) returns (EmptyResponseProto);
+}

+ 40 - 0
hadoop-common-project/hadoop-common/src/test/scripts/hadoop_add_client_opts.bats

@@ -0,0 +1,40 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load hadoop-functions_test_helper
+
+@test "hadoop_subcommand_opts (daemonization false)" {
+  HADOOP_OPTS="1"
+  HADOOP_CLIENT_OPTS="2"
+  HADOOP_SUBCMD_SUPPORTDAEMONIZATION="false"
+  hadoop_add_client_opts
+  [ "${HADOOP_OPTS}" = "1 2" ]
+}
+
+@test "hadoop_subcommand_opts (daemonization true)" {
+  HADOOP_OPTS="1"
+  HADOOP_CLIENT_OPTS="2"
+  HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
+  hadoop_add_client_opts
+  [ "${HADOOP_OPTS}" = "1" ]
+}
+
+@test "hadoop_subcommand_opts (daemonization empty)" {
+  HADOOP_OPTS="1"
+  HADOOP_CLIENT_OPTS="2"
+  unset HADOOP_SUBCMD_SUPPORTDAEMONIZATION
+  hadoop_add_client_opts
+  [ "${HADOOP_OPTS}" = "1 2" ]
+}

+ 68 - 0
hadoop-common-project/hadoop-common/src/test/scripts/hadoop_subcommand_opts.bats

@@ -0,0 +1,68 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load hadoop-functions_test_helper
+
+@test "hadoop_subcommand_opts (missing param)" {
+  HADOOP_OPTS="x"
+  run hadoop_subcommand_opts testvar
+  [ "${status}" = "1" ]
+}
+
+@test "hadoop_subcommand_opts (simple not exist)" {
+  HADOOP_OPTS="x"
+  hadoop_subcommand_opts hadoop subcommand
+  [ "${HADOOP_OPTS}" = "x" ]
+}
+
+@test "hadoop_subcommand_opts (hadoop simple exist)" {
+  HADOOP_OPTS="x"
+  HADOOP_TEST_OPTS="y"
+  hadoop_subcommand_opts hadoop test
+  echo "${HADOOP_OPTS}"
+  [ "${HADOOP_OPTS}" = "x y" ]
+}
+
+@test "hadoop_subcommand_opts (hadoop complex exist)" {
+  HADOOP_OPTS="x"
+  HADOOP_TEST_OPTS="y z"
+  hadoop_subcommand_opts hadoop test
+  echo "${HADOOP_OPTS}"
+  [ "${HADOOP_OPTS}" = "x y z" ]
+}
+
+@test "hadoop_subcommand_opts (hdfs simple exist)" {
+  HADOOP_OPTS="x"
+  HDFS_TEST_OPTS="y"
+  hadoop_subcommand_opts hdfs test
+  echo "${HADOOP_OPTS}"
+  [ "${HADOOP_OPTS}" = "x y" ]
+}
+
+@test "hadoop_subcommand_opts (yarn simple exist)" {
+  HADOOP_OPTS="x"
+  YARN_TEST_OPTS="y"
+  hadoop_subcommand_opts yarn test
+  echo "${HADOOP_OPTS}"
+  [ "${HADOOP_OPTS}" = "x y" ]
+}
+
+@test "hadoop_subcommand_opts (deprecation case)" {
+  HADOOP_OPTS="x"
+  HADOOP_NAMENODE_OPTS="y"
+  hadoop_subcommand_opts hdfs namenode
+  echo "${HADOOP_OPTS}"
+  [ "${HADOOP_OPTS}" = "x y" ]
+}

+ 52 - 0
hadoop-common-project/hadoop-common/src/test/scripts/hadoop_subcommand_secure_opts.bats

@@ -0,0 +1,52 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load hadoop-functions_test_helper
+
+@test "hadoop_subcommand_secure_opts (missing param)" {
+  HADOOP_OPTS="x"
+  run hadoop_subcommand_secure_opts testvar
+  [ "${status}" = "1" ]
+}
+
+@test "hadoop_subcommand_secure_opts (simple not exist)" {
+  HADOOP_OPTS="x"
+  hadoop_subcommand_secure_opts hadoop subcommand
+  [ "${HADOOP_OPTS}" = "x" ]
+}
+
+@test "hadoop_subcommand_secure_opts (hadoop simple exist)" {
+  HADOOP_OPTS="x"
+  HADOOP_TEST_SECURE_EXTRA_OPTS="y"
+  hadoop_subcommand_secure_opts hadoop test
+  echo "${HADOOP_OPTS}"
+  [ "${HADOOP_OPTS}" = "x y" ]
+}
+
+@test "hadoop_subcommand_secure_opts (hadoop complex exist)" {
+  HADOOP_OPTS="x"
+  HADOOP_TEST_SECURE_EXTRA_OPTS="y z"
+  hadoop_subcommand_secure_opts hadoop test
+  echo "${HADOOP_OPTS}"
+  [ "${HADOOP_OPTS}" = "x y z" ]
+}
+
+@test "hadoop_subcommand_secure_opts (hdfs simple exist)" {
+  HADOOP_OPTS="x"
+  HDFS_TEST_SECURE_EXTRA_OPTS="y"
+  hadoop_subcommand_secure_opts hdfs test
+  echo "${HADOOP_OPTS}"
+  [ "${HADOOP_OPTS}" = "x y" ]
+}

+ 53 - 0
hadoop-common-project/hadoop-common/src/test/scripts/hadoop_verify_user.bats

@@ -0,0 +1,53 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load hadoop-functions_test_helper
+
+@test "hadoop_verify_user (hadoop: no setting)" {
+  run hadoop_verify_user hadoop test
+  [ "${status}" = "0" ]
+}
+
+@test "hadoop_verify_user (yarn: no setting)" {
+  run hadoop_verify_user yarn test
+  [ "${status}" = "0" ]
+}
+
+@test "hadoop_verify_user (hadoop: allow)" {
+  HADOOP_TEST_USER=${USER}
+  run hadoop_verify_user hadoop test
+  [ "${status}" = "0" ]
+}
+
+@test "hadoop_verify_user (yarn: allow)" {
+  YARN_TEST_USER=${USER}
+  run hadoop_verify_user yarn test
+  [ "${status}" = "0" ]
+}
+
+# colon isn't a valid username, so let's use it
+# this should fail regardless of who the user is
+# that is running the test code
+@test "hadoop_verify_user (hadoop: disallow)" {
+  HADOOP_TEST_USER=:
+  run hadoop_verify_user hadoop test
+  [ "${status}" = "1" ]
+}
+
+@test "hadoop_verify_user (yarn: disallow)" {
+  YARN_TEST_USER=:
+  run hadoop_verify_user yarn test
+  [ "${status}" = "1" ]
+}

+ 2 - 0
hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java

@@ -28,6 +28,7 @@ import org.apache.hadoop.crypto.key.KeyProvider;
 import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
 import org.apache.hadoop.crypto.key.KeyProviderFactory;
 import org.apache.hadoop.http.HttpServer2;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authorize.AccessControlList;
 import org.apache.hadoop.util.VersionInfo;
 import org.apache.log4j.PropertyConfigurator;
@@ -121,6 +122,7 @@ public class KMSWebApp implements ServletContextListener {
       }
       kmsConf = KMSConfiguration.getKMSConf();
       initLogging(confDir);
+      UserGroupInformation.setConfiguration(kmsConf);
       LOG.info("-------------------------------------------------------------");
       LOG.info("  Java runtime version : {}", System.getProperty(
           "java.runtime.version"));

+ 5 - 5
hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm

@@ -37,10 +37,10 @@ KMS Client Configuration
 The KMS client `KeyProvider` uses the **kms** scheme, and the embedded URL must be the URL of the KMS. For example, for a KMS running on `http://localhost:9600/kms`, the KeyProvider URI is `kms://http@localhost:9600/kms`. And, for a KMS running on `https://localhost:9600/kms`, the KeyProvider URI is `kms://https@localhost:9600/kms`
 
 The following is an example to configure HDFS NameNode as a KMS client in
-`hdfs-site.xml`:
+`core-site.xml`:
 
     <property>
-      <name>dfs.encryption.key.provider.uri</name>
+      <name>hadoop.security.key.provider.path</name>
       <value>kms://http@localhost:9600/kms</value>
       <description>
         The KeyProvider to use when interacting with encryption keys used
@@ -664,15 +664,15 @@ is to use LoadBalancingKMSClientProvider. Using this approach, a KMS client
 (for example, a HDFS NameNode) is aware of multiple KMS instances, and it sends
 requests to them in a round-robin fashion. LoadBalancingKMSClientProvider is
 implicitly used when more than one URI is specified in
-`dfs.encryption.key.provider.uri`.
+`hadoop.security.key.provider.path`.
 
-The following example in `hdfs-site.xml` configures two KMS
+The following example in `core-site.xml` configures two KMS
 instances, `kms01.example.com` and `kms02.example.com`.
 The hostnames are separated by semi-colons, and all KMS instances must run
 on the same port.
 
     <property>
-      <name>dfs.encryption.key.provider.uri</name>
+      <name>hadoop.security.key.provider.path</name>
       <value>kms://https@kms01.example.com;kms02.example.com:9600/kms</value>
       <description>
         The KeyProvider to use when interacting with encryption keys used

+ 40 - 36
hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java

@@ -143,11 +143,31 @@ public class TestKMS {
   }
 
   protected Configuration createBaseKMSConf(File keyStoreDir) throws Exception {
-    Configuration conf = new Configuration(false);
-    conf.set(KMSConfiguration.KEY_PROVIDER_URI,
+    return createBaseKMSConf(keyStoreDir, null);
+  }
+
+  /**
+   * The Configuration object is shared by both KMS client and server in unit
+   * tests because UGI gets/sets it to a static variable.
+   * As a workaround, make sure the client configurations are copied to server
+   * so that client can read them.
+   * @param keyStoreDir where keystore is located.
+   * @param conf KMS client configuration
+   * @return KMS server configuration based on client.
+   * @throws Exception
+   */
+  protected Configuration createBaseKMSConf(File keyStoreDir,
+      Configuration conf) throws Exception {
+    Configuration newConf;
+    if (conf == null) {
+      newConf = new Configuration(false);
+    } else {
+      newConf = new Configuration(conf);
+    }
+    newConf.set(KMSConfiguration.KEY_PROVIDER_URI,
         "jceks://file@" + new Path(keyStoreDir.getAbsolutePath(), "kms.keystore").toUri());
-    conf.set("hadoop.kms.authentication.type", "simple");
-    return conf;
+    newConf.set("hadoop.kms.authentication.type", "simple");
+    return newConf;
   }
 
   public static void writeConf(File confDir, Configuration conf)
@@ -280,9 +300,8 @@ public class TestKMS {
     if (kerberos) {
       conf.set("hadoop.security.authentication", "kerberos");
     }
-    UserGroupInformation.setConfiguration(conf);
     File testDir = getTestDir();
-    conf = createBaseKMSConf(testDir);
+    conf = createBaseKMSConf(testDir, conf);
 
     final String keystore;
     final String password;
@@ -404,9 +423,8 @@ public class TestKMS {
     final String specialKey = "key %^[\n{]}|\"<>\\";
     Configuration conf = new Configuration();
     conf.set("hadoop.security.authentication", "kerberos");
-    UserGroupInformation.setConfiguration(conf);
     File confDir = getTestDir();
-    conf = createBaseKMSConf(confDir);
+    conf = createBaseKMSConf(confDir, conf);
     conf.set(KeyAuthorizationKeyProvider.KEY_ACL + specialKey + ".ALL", "*");
     writeConf(confDir, conf);
 
@@ -439,9 +457,8 @@ public class TestKMS {
   public void testKMSProvider() throws Exception {
     Configuration conf = new Configuration();
     conf.set("hadoop.security.authentication", "kerberos");
-    UserGroupInformation.setConfiguration(conf);
     File confDir = getTestDir();
-    conf = createBaseKMSConf(confDir);
+    conf = createBaseKMSConf(confDir, conf);
     conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "k1.ALL", "*");
     conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "k2.MANAGEMENT", "*");
     conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "k2.READ", "*");
@@ -699,9 +716,8 @@ public class TestKMS {
   public void testKeyACLs() throws Exception {
     Configuration conf = new Configuration();
     conf.set("hadoop.security.authentication", "kerberos");
-    UserGroupInformation.setConfiguration(conf);
     final File testDir = getTestDir();
-    conf = createBaseKMSConf(testDir);
+    conf = createBaseKMSConf(testDir, conf);
     conf.set("hadoop.kms.authentication.type", "kerberos");
     conf.set("hadoop.kms.authentication.kerberos.keytab",
         keytab.getAbsolutePath());
@@ -977,9 +993,8 @@ public class TestKMS {
   public void doKMSRestart(boolean useKrb) throws Exception {
     Configuration conf = new Configuration();
     conf.set("hadoop.security.authentication", "kerberos");
-    UserGroupInformation.setConfiguration(conf);
     final File testDir = getTestDir();
-    conf = createBaseKMSConf(testDir);
+    conf = createBaseKMSConf(testDir, conf);
     if (useKrb) {
       conf.set("hadoop.kms.authentication.type", "kerberos");
     }
@@ -1057,9 +1072,8 @@ public class TestKMS {
   public void testKMSAuthFailureRetry() throws Exception {
     Configuration conf = new Configuration();
     conf.set("hadoop.security.authentication", "kerberos");
-    UserGroupInformation.setConfiguration(conf);
     final File testDir = getTestDir();
-    conf = createBaseKMSConf(testDir);
+    conf = createBaseKMSConf(testDir, conf);
     conf.set("hadoop.kms.authentication.kerberos.keytab",
         keytab.getAbsolutePath());
     conf.set("hadoop.kms.authentication.kerberos.principal", "HTTP/localhost");
@@ -1151,9 +1165,8 @@ public class TestKMS {
   public void testACLs() throws Exception {
     Configuration conf = new Configuration();
     conf.set("hadoop.security.authentication", "kerberos");
-    UserGroupInformation.setConfiguration(conf);
     final File testDir = getTestDir();
-    conf = createBaseKMSConf(testDir);
+    conf = createBaseKMSConf(testDir, conf);
     conf.set("hadoop.kms.authentication.type", "kerberos");
     conf.set("hadoop.kms.authentication.kerberos.keytab",
         keytab.getAbsolutePath());
@@ -1461,9 +1474,8 @@ public class TestKMS {
   public void testKMSBlackList() throws Exception {
     Configuration conf = new Configuration();
     conf.set("hadoop.security.authentication", "kerberos");
-    UserGroupInformation.setConfiguration(conf);
     File testDir = getTestDir();
-    conf = createBaseKMSConf(testDir);
+    conf = createBaseKMSConf(testDir, conf);
     conf.set("hadoop.kms.authentication.type", "kerberos");
     conf.set("hadoop.kms.authentication.kerberos.keytab",
         keytab.getAbsolutePath());
@@ -1550,9 +1562,8 @@ public class TestKMS {
   public void testServicePrincipalACLs() throws Exception {
     Configuration conf = new Configuration();
     conf.set("hadoop.security.authentication", "kerberos");
-    UserGroupInformation.setConfiguration(conf);
     File testDir = getTestDir();
-    conf = createBaseKMSConf(testDir);
+    conf = createBaseKMSConf(testDir, conf);
     conf.set("hadoop.kms.authentication.type", "kerberos");
     conf.set("hadoop.kms.authentication.kerberos.keytab",
         keytab.getAbsolutePath());
@@ -1676,9 +1687,8 @@ public class TestKMS {
   public void testDelegationTokenAccess() throws Exception {
     Configuration conf = new Configuration();
     conf.set("hadoop.security.authentication", "kerberos");
-    UserGroupInformation.setConfiguration(conf);
     final File testDir = getTestDir();
-    conf = createBaseKMSConf(testDir);
+    conf = createBaseKMSConf(testDir, conf);
     conf.set("hadoop.kms.authentication.type", "kerberos");
     conf.set("hadoop.kms.authentication.kerberos.keytab",
         keytab.getAbsolutePath());
@@ -1759,9 +1769,8 @@ public class TestKMS {
 
   private void testDelegationTokensOps(Configuration conf,
       final boolean useKrb) throws Exception {
-    UserGroupInformation.setConfiguration(conf);
     File confDir = getTestDir();
-    conf = createBaseKMSConf(confDir);
+    conf = createBaseKMSConf(confDir, conf);
     if (useKrb) {
       conf.set("hadoop.kms.authentication.type", "kerberos");
       conf.set("hadoop.kms.authentication.kerberos.keytab",
@@ -1885,9 +1894,8 @@ public class TestKMS {
   @Test
   public void testDelegationTokensUpdatedInUGI() throws Exception {
     Configuration conf = new Configuration();
-    UserGroupInformation.setConfiguration(conf);
     File confDir = getTestDir();
-    conf = createBaseKMSConf(confDir);
+    conf = createBaseKMSConf(confDir, conf);
     conf.set(
         "hadoop.kms.authentication.delegation-token.max-lifetime.sec", "5");
     conf.set(
@@ -2024,9 +2032,8 @@ public class TestKMS {
 
       Configuration conf = new Configuration();
       conf.set("hadoop.security.authentication", "kerberos");
-      UserGroupInformation.setConfiguration(conf);
       final File testDir = getTestDir();
-      conf = createBaseKMSConf(testDir);
+      conf = createBaseKMSConf(testDir, conf);
       conf.set("hadoop.kms.authentication.type", "kerberos");
       conf.set("hadoop.kms.authentication.kerberos.keytab", keytab.getAbsolutePath());
       conf.set("hadoop.kms.authentication.kerberos.principal", "HTTP/localhost");
@@ -2114,9 +2121,8 @@ public class TestKMS {
   public void doProxyUserTest(final boolean kerberos) throws Exception {
     Configuration conf = new Configuration();
     conf.set("hadoop.security.authentication", "kerberos");
-    UserGroupInformation.setConfiguration(conf);
     final File testDir = getTestDir();
-    conf = createBaseKMSConf(testDir);
+    conf = createBaseKMSConf(testDir, conf);
     if (kerberos) {
       conf.set("hadoop.kms.authentication.type", "kerberos");
     }
@@ -2226,9 +2232,8 @@ public class TestKMS {
 
     Configuration conf = new Configuration();
     conf.set("hadoop.security.authentication", "kerberos");
-    UserGroupInformation.setConfiguration(conf);
     final File testDir = getTestDir();
-    conf = createBaseKMSConf(testDir);
+    conf = createBaseKMSConf(testDir, conf);
     conf.set("hadoop.kms.authentication.type", "kerberos");
     conf.set("hadoop.kms.authentication.kerberos.keytab",
         keytab.getAbsolutePath());
@@ -2286,9 +2291,8 @@ public class TestKMS {
   public void doWebHDFSProxyUserTest(final boolean kerberos) throws Exception {
     Configuration conf = new Configuration();
     conf.set("hadoop.security.authentication", "kerberos");
-    UserGroupInformation.setConfiguration(conf);
     final File testDir = getTestDir();
-    conf = createBaseKMSConf(testDir);
+    conf = createBaseKMSConf(testDir, conf);
     if (kerberos) {
       conf.set("hadoop.kms.authentication.type", "kerberos");
     }

+ 4 - 0
hadoop-hdfs-project/hadoop-hdfs-client/pom.xml

@@ -107,6 +107,10 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
       <scope>test</scope>
       <type>test-jar</type>
     </dependency>
+      <dependency>
+          <groupId>com.fasterxml.jackson.core</groupId>
+          <artifactId>jackson-annotations</artifactId>
+      </dependency>
   </dependencies>
 
   <build>

+ 5 - 0
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java

@@ -1710,6 +1710,11 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
     }
   }
 
+  @VisibleForTesting
+  public DataEncryptionKey getEncryptionKey() {
+    return encryptionKey;
+  }
+
   /**
    * Get the checksum of the whole file or a range of the file. Note that the
    * range always starts from the beginning of the file. The file can be

+ 11 - 9
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java

@@ -240,7 +240,7 @@ public class DFSInputStream extends FSInputStream
       Iterator<LocatedBlock> oldIter = locatedBlocks.getLocatedBlocks().iterator();
       Iterator<LocatedBlock> newIter = newInfo.getLocatedBlocks().iterator();
       while (oldIter.hasNext() && newIter.hasNext()) {
-        if (! oldIter.next().getBlock().equals(newIter.next().getBlock())) {
+        if (!oldIter.next().getBlock().equals(newIter.next().getBlock())) {
           throw new IOException("Blocklist for " + src + " has changed!");
         }
       }
@@ -677,8 +677,8 @@ public class DFSInputStream extends FSInputStream
     if (oneByteBuf == null) {
       oneByteBuf = new byte[1];
     }
-    int ret = read( oneByteBuf, 0, 1 );
-    return ( ret <= 0 ) ? -1 : (oneByteBuf[0] & 0xff);
+    int ret = read(oneByteBuf, 0, 1);
+    return (ret <= 0) ? -1 : (oneByteBuf[0] & 0xff);
   }
 
   /* This is a used by regular read() and handles ChecksumExceptions.
@@ -702,7 +702,7 @@ public class DFSInputStream extends FSInputStream
       // retry as many times as seekToNewSource allows.
       try {
         return reader.readFromBlock(blockReader, len);
-      } catch ( ChecksumException ce ) {
+      } catch (ChecksumException ce) {
         DFSClient.LOG.warn("Found Checksum error for "
             + getCurrentBlock() + " from " + currentNode
             + " at " + ce.getPos());
@@ -710,7 +710,7 @@ public class DFSInputStream extends FSInputStream
         retryCurrentNode = false;
         // we want to remember which block replicas we have tried
         corruptedBlocks.addCorruptedBlock(getCurrentBlock(), currentNode);
-      } catch ( IOException e ) {
+      } catch (IOException e) {
         if (!retryCurrentNode) {
           DFSClient.LOG.warn("Exception while reading from "
               + getCurrentBlock() + " of " + src + " from "
@@ -779,7 +779,9 @@ public class DFSInputStream extends FSInputStream
             DFSClient.LOG.warn("DFS Read", e);
           }
           blockEnd = -1;
-          if (currentNode != null) { addToDeadNodes(currentNode); }
+          if (currentNode != null) {
+            addToDeadNodes(currentNode);
+          }
           if (--retries == 0) {
             throw e;
           }
@@ -1397,10 +1399,10 @@ public class DFSInputStream extends FSInputStream
 
   @Override
   public long skip(long n) throws IOException {
-    if ( n > 0 ) {
+    if (n > 0) {
       long curPos = getPos();
       long fileLen = getFileLength();
-      if( n+curPos > fileLen ) {
+      if (n+curPos > fileLen) {
         n = fileLen - curPos;
       }
       seek(curPos+n);
@@ -1550,7 +1552,7 @@ public class DFSInputStream extends FSInputStream
    * Get statistics about the reads which this DFSInputStream has done.
    */
   public ReadStatistics getReadStatistics() {
-    return new ReadStatistics(readStatistics);
+    return readStatistics;
   }
 
   /**

+ 104 - 550
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java

@@ -17,24 +17,21 @@
  */
 package org.apache.hadoop.hdfs;
 
-import com.google.common.base.Preconditions;
 import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.fs.ChecksumException;
 import org.apache.hadoop.fs.ReadOption;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
 import org.apache.hadoop.hdfs.protocol.datatransfer.InvalidEncryptionKeyException;
 import org.apache.hadoop.hdfs.DFSUtilClient.CorruptedBlocks;
+import org.apache.hadoop.hdfs.StripeReader.BlockReaderInfo;
+import org.apache.hadoop.hdfs.StripeReader.ReaderRetryPolicy;
 import org.apache.hadoop.hdfs.util.StripedBlockUtil;
+import org.apache.hadoop.hdfs.util.StripedBlockUtil.AlignedStripe;
+import org.apache.hadoop.hdfs.util.StripedBlockUtil.StripeRange;
 import org.apache.hadoop.io.ByteBufferPool;
 
-import static org.apache.hadoop.hdfs.util.StripedBlockUtil.AlignedStripe;
-import static org.apache.hadoop.hdfs.util.StripedBlockUtil.StripingChunk;
-import static org.apache.hadoop.hdfs.util.StripedBlockUtil.StripingChunkReadResult;
-
 import org.apache.hadoop.io.ElasticByteBufferPool;
 import org.apache.hadoop.io.erasurecode.CodecUtil;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
@@ -44,7 +41,6 @@ import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureDecoder;
 
 import java.io.EOFException;
 import java.io.IOException;
-import java.io.InterruptedIOException;
 import java.nio.ByteBuffer;
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -53,111 +49,32 @@ import java.util.EnumSet;
 import java.util.List;
 import java.util.Set;
 import java.util.Collection;
-import java.util.Map;
-import java.util.HashMap;
-import java.util.concurrent.CompletionService;
 import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ExecutorCompletionService;
-import java.util.concurrent.Callable;
-import java.util.concurrent.Future;
+import java.util.concurrent.ThreadPoolExecutor;
 
 /**
- * DFSStripedInputStream reads from striped block groups
+ * DFSStripedInputStream reads from striped block groups.
  */
 @InterfaceAudience.Private
 public class DFSStripedInputStream extends DFSInputStream {
 
-  private static class ReaderRetryPolicy {
-    private int fetchEncryptionKeyTimes = 1;
-    private int fetchTokenTimes = 1;
-
-    void refetchEncryptionKey() {
-      fetchEncryptionKeyTimes--;
-    }
-
-    void refetchToken() {
-      fetchTokenTimes--;
-    }
-
-    boolean shouldRefetchEncryptionKey() {
-      return fetchEncryptionKeyTimes > 0;
-    }
-
-    boolean shouldRefetchToken() {
-      return fetchTokenTimes > 0;
-    }
-  }
-
-  /** Used to indicate the buffered data's range in the block group */
-  private static class StripeRange {
-    /** start offset in the block group (inclusive) */
-    final long offsetInBlock;
-    /** length of the stripe range */
-    final long length;
-
-    StripeRange(long offsetInBlock, long length) {
-      Preconditions.checkArgument(offsetInBlock >= 0 && length >= 0);
-      this.offsetInBlock = offsetInBlock;
-      this.length = length;
-    }
-
-    boolean include(long pos) {
-      return pos >= offsetInBlock && pos < offsetInBlock + length;
-    }
-  }
-
-  private static class BlockReaderInfo {
-    final BlockReader reader;
-    final DatanodeInfo datanode;
-    /**
-     * when initializing block readers, their starting offsets are set to the same
-     * number: the smallest internal block offsets among all the readers. This is
-     * because it is possible that for some internal blocks we have to read
-     * "backwards" for decoding purpose. We thus use this offset array to track
-     * offsets for all the block readers so that we can skip data if necessary.
-     */
-    long blockReaderOffset;
-    /**
-     * We use this field to indicate whether we should use this reader. In case
-     * we hit any issue with this reader, we set this field to true and avoid
-     * using it for the next stripe.
-     */
-    boolean shouldSkip = false;
-
-    BlockReaderInfo(BlockReader reader, DatanodeInfo dn, long offset) {
-      this.reader = reader;
-      this.datanode = dn;
-      this.blockReaderOffset = offset;
-    }
-
-    void setOffset(long offset) {
-      this.blockReaderOffset = offset;
-    }
-
-    void skip() {
-      this.shouldSkip = true;
-    }
-  }
-
   private static final ByteBufferPool BUFFER_POOL = new ElasticByteBufferPool();
-
   private final BlockReaderInfo[] blockReaders;
   private final int cellSize;
   private final short dataBlkNum;
   private final short parityBlkNum;
   private final int groupSize;
-  /** the buffer for a complete stripe */
+  /** the buffer for a complete stripe. */
   private ByteBuffer curStripeBuf;
   private ByteBuffer parityBuf;
   private final ErasureCodingPolicy ecPolicy;
   private final RawErasureDecoder decoder;
 
   /**
-   * indicate the start/end offset of the current buffered stripe in the
-   * block group
+   * Indicate the start/end offset of the current buffered stripe in the
+   * block group.
    */
   private StripeRange curStripeRange;
-  private final CompletionService<Void> readingService;
 
   /**
    * When warning the user of a lost block in striping mode, we remember the
@@ -167,8 +84,8 @@ public class DFSStripedInputStream extends DFSInputStream {
    *
    * To minimize the overhead, we only store the datanodeUuid in this set
    */
-  private final Set<String> warnedNodes = Collections.newSetFromMap(
-      new ConcurrentHashMap<String, Boolean>());
+  private final Set<String> warnedNodes =
+      Collections.newSetFromMap(new ConcurrentHashMap<>());
 
   DFSStripedInputStream(DFSClient dfsClient, String src,
       boolean verifyChecksum, ErasureCodingPolicy ecPolicy,
@@ -183,8 +100,6 @@ public class DFSStripedInputStream extends DFSInputStream {
     groupSize = dataBlkNum + parityBlkNum;
     blockReaders = new BlockReaderInfo[groupSize];
     curStripeRange = new StripeRange(0, 0);
-    readingService =
-        new ExecutorCompletionService<>(dfsClient.getStripedReadsThreadPool());
     ErasureCoderOptions coderOptions = new ErasureCoderOptions(
         dataBlkNum, parityBlkNum);
     decoder = CodecUtil.createRawDecoder(dfsClient.getConfiguration(),
@@ -198,7 +113,7 @@ public class DFSStripedInputStream extends DFSInputStream {
     return decoder.preferDirectBuffer();
   }
 
-  private void resetCurStripeBuffer() {
+  void resetCurStripeBuffer() {
     if (curStripeBuf == null) {
       curStripeBuf = BUFFER_POOL.getBuffer(useDirectBuffer(),
           cellSize * dataBlkNum);
@@ -207,7 +122,7 @@ public class DFSStripedInputStream extends DFSInputStream {
     curStripeRange = new StripeRange(0, 0);
   }
 
-  private ByteBuffer getParityBuffer() {
+  protected ByteBuffer getParityBuffer() {
     if (parityBuf == null) {
       parityBuf = BUFFER_POOL.getBuffer(useDirectBuffer(),
           cellSize * parityBlkNum);
@@ -216,6 +131,29 @@ public class DFSStripedInputStream extends DFSInputStream {
     return parityBuf;
   }
 
+  protected ByteBuffer getCurStripeBuf() {
+    return curStripeBuf;
+  }
+
+  protected String getSrc() {
+    return src;
+  }
+
+  protected DFSClient getDFSClient() {
+    return dfsClient;
+  }
+
+  protected LocatedBlocks getLocatedBlocks() {
+    return locatedBlocks;
+  }
+
+  protected ByteBufferPool getBufferPool() {
+    return BUFFER_POOL;
+  }
+
+  protected ThreadPoolExecutor getStripedReadsThreadPool(){
+    return dfsClient.getStripedReadsThreadPool();
+  }
   /**
    * When seeking into a new block group, create blockReader for each internal
    * block in the group.
@@ -268,7 +206,7 @@ public class DFSStripedInputStream extends DFSInputStream {
     blockEnd = -1;
   }
 
-  private void closeReader(BlockReaderInfo readerInfo) {
+  protected void closeReader(BlockReaderInfo readerInfo) {
     if (readerInfo != null) {
       if (readerInfo.reader != null) {
         try {
@@ -288,6 +226,59 @@ public class DFSStripedInputStream extends DFSInputStream {
     return pos - currentLocatedBlock.getStartOffset();
   }
 
+  boolean createBlockReader(LocatedBlock block, long offsetInBlock,
+      LocatedBlock[] targetBlocks, BlockReaderInfo[] readerInfos,
+      int chunkIndex) throws IOException {
+    BlockReader reader = null;
+    final ReaderRetryPolicy retry = new ReaderRetryPolicy();
+    DFSInputStream.DNAddrPair dnInfo =
+        new DFSInputStream.DNAddrPair(null, null, null);
+
+    while (true) {
+      try {
+        // the cached block location might have been re-fetched, so always
+        // get it from cache.
+        block = refreshLocatedBlock(block);
+        targetBlocks[chunkIndex] = block;
+
+        // internal block has one location, just rule out the deadNodes
+        dnInfo = getBestNodeDNAddrPair(block, null);
+        if (dnInfo == null) {
+          break;
+        }
+        reader = getBlockReader(block, offsetInBlock,
+            block.getBlockSize() - offsetInBlock,
+            dnInfo.addr, dnInfo.storageType, dnInfo.info);
+      } catch (IOException e) {
+        if (e instanceof InvalidEncryptionKeyException &&
+            retry.shouldRefetchEncryptionKey()) {
+          DFSClient.LOG.info("Will fetch a new encryption key and retry, "
+              + "encryption key was invalid when connecting to " + dnInfo.addr
+              + " : " + e);
+          dfsClient.clearDataEncryptionKey();
+          retry.refetchEncryptionKey();
+        } else if (retry.shouldRefetchToken() &&
+            tokenRefetchNeeded(e, dnInfo.addr)) {
+          fetchBlockAt(block.getStartOffset());
+          retry.refetchToken();
+        } else {
+          //TODO: handles connection issues
+          DFSClient.LOG.warn("Failed to connect to " + dnInfo.addr + " for " +
+              "block" + block.getBlock(), e);
+          // re-fetch the block in case the block has been moved
+          fetchBlockAt(block.getStartOffset());
+          addToDeadNodes(dnInfo.info);
+        }
+      }
+      if (reader != null) {
+        readerInfos[chunkIndex] =
+            new BlockReaderInfo(reader, dnInfo.info, offsetInBlock);
+        return true;
+      }
+    }
+    return false;
+  }
+
   /**
    * Read a new stripe covering the current position, and store the data in the
    * {@link #curStripeBuf}.
@@ -303,20 +294,20 @@ public class DFSStripedInputStream extends DFSInputStream {
     final int stripeBufOffset = (int) (offsetInBlockGroup % stripeLen);
     final int stripeLimit = (int) Math.min(currentLocatedBlock.getBlockSize()
         - (stripeIndex * stripeLen), stripeLen);
-    StripeRange stripeRange = new StripeRange(offsetInBlockGroup,
-        stripeLimit - stripeBufOffset);
+    StripeRange stripeRange =
+        new StripeRange(offsetInBlockGroup, stripeLimit - stripeBufOffset);
 
     LocatedStripedBlock blockGroup = (LocatedStripedBlock) currentLocatedBlock;
     AlignedStripe[] stripes = StripedBlockUtil.divideOneStripe(ecPolicy,
         cellSize, blockGroup, offsetInBlockGroup,
-        offsetInBlockGroup + stripeRange.length - 1, curStripeBuf);
+        offsetInBlockGroup + stripeRange.getLength() - 1, curStripeBuf);
     final LocatedBlock[] blks = StripedBlockUtil.parseStripedBlockGroup(
         blockGroup, cellSize, dataBlkNum, parityBlkNum);
     // read the whole stripe
     for (AlignedStripe stripe : stripes) {
       // Parse group to get chosen DN location
-      StripeReader sreader = new StatefulStripeReader(readingService, stripe,
-          blks, blockReaders, corruptedBlocks);
+      StripeReader sreader = new StatefulStripeReader(stripe, ecPolicy, blks,
+          blockReaders, corruptedBlocks, decoder, this);
       sreader.readStripe();
     }
     curStripeBuf.position(stripeBufOffset);
@@ -324,69 +315,8 @@ public class DFSStripedInputStream extends DFSInputStream {
     curStripeRange = stripeRange;
   }
 
-  private Callable<Void> readCells(final BlockReader reader,
-      final DatanodeInfo datanode, final long currentReaderOffset,
-      final long targetReaderOffset, final ByteBufferStrategy[] strategies,
-      final ExtendedBlock currentBlock,
-      final CorruptedBlocks corruptedBlocks) {
-    return new Callable<Void>() {
-      @Override
-      public Void call() throws Exception {
-        // reader can be null if getBlockReaderWithRetry failed or
-        // the reader hit exception before
-        if (reader == null) {
-          throw new IOException("The BlockReader is null. " +
-              "The BlockReader creation failed or the reader hit exception.");
-        }
-        Preconditions.checkState(currentReaderOffset <= targetReaderOffset);
-        if (currentReaderOffset < targetReaderOffset) {
-          long skipped = reader.skip(targetReaderOffset - currentReaderOffset);
-          Preconditions.checkState(
-              skipped == targetReaderOffset - currentReaderOffset);
-        }
-        int result = 0;
-        for (ByteBufferStrategy strategy : strategies) {
-          result += readToBuffer(reader, datanode, strategy, currentBlock,
-              corruptedBlocks);
-        }
-        return null;
-      }
-    };
-  }
-
-  private int readToBuffer(BlockReader blockReader,
-      DatanodeInfo currentNode, ByteBufferStrategy strategy,
-      ExtendedBlock currentBlock,
-      CorruptedBlocks corruptedBlocks)
-      throws IOException {
-    final int targetLength = strategy.getTargetLength();
-    int length = 0;
-    try {
-      while (length < targetLength) {
-        int ret = strategy.readFromBlock(blockReader);
-        if (ret < 0) {
-          throw new IOException("Unexpected EOS from the reader");
-        }
-        length += ret;
-      }
-      return length;
-    } catch (ChecksumException ce) {
-      DFSClient.LOG.warn("Found Checksum error for "
-          + currentBlock + " from " + currentNode
-          + " at " + ce.getPos());
-      // we want to remember which block replicas we have tried
-      corruptedBlocks.addCorruptedBlock(currentBlock, currentNode);
-      throw ce;
-    } catch (IOException e) {
-      DFSClient.LOG.warn("Exception while reading from "
-          + currentBlock + " of " + src + " from "
-          + currentNode, e);
-      throw e;
-    }
-  }
-
   /**
-   * Seek to a new arbitrary location
+   * Seek to a new arbitrary location.
    */
   @Override
   public synchronized void seek(long targetPos) throws IOException {
@@ -469,7 +399,7 @@ public class DFSStripedInputStream extends DFSInputStream {
   }
 
   /**
-   * Copy the data from {@link #curStripeBuf} into the given buffer
+   * Copy the data from {@link #curStripeBuf} into the given buffer.
    * @param strategy the ReaderStrategy containing the given buffer
    * @param length target length
    * @return number of bytes copied
@@ -530,17 +460,19 @@ public class DFSStripedInputStream extends DFSInputStream {
 
     AlignedStripe[] stripes = StripedBlockUtil.divideByteRangeIntoStripes(
         ecPolicy, cellSize, blockGroup, start, end, buf);
-    CompletionService<Void> readService = new ExecutorCompletionService<>(
-        dfsClient.getStripedReadsThreadPool());
     final LocatedBlock[] blks = StripedBlockUtil.parseStripedBlockGroup(
         blockGroup, cellSize, dataBlkNum, parityBlkNum);
     final BlockReaderInfo[] preaderInfos = new BlockReaderInfo[groupSize];
     try {
       for (AlignedStripe stripe : stripes) {
         // Parse group to get chosen DN location
-        StripeReader preader = new PositionStripeReader(readService, stripe,
-            blks, preaderInfos, corruptedBlocks);
-        preader.readStripe();
+        StripeReader preader = new PositionStripeReader(stripe, ecPolicy, blks,
+            preaderInfos, corruptedBlocks, decoder, this);
+        try {
+          preader.readStripe();
+        } finally {
+          preader.close();
+        }
       }
       buf.position(buf.position() + (int)(end - start + 1));
     } finally {
@@ -570,376 +502,6 @@ public class DFSStripedInputStream extends DFSInputStream {
     }
   }
 
-  /**
-   * The reader for reading a complete {@link AlignedStripe}. Note that an
-   * {@link AlignedStripe} may cross multiple stripes with cellSize width.
-   */
-  private abstract class StripeReader {
-    final Map<Future<Void>, Integer> futures = new HashMap<>();
-    final AlignedStripe alignedStripe;
-    final CompletionService<Void> service;
-    final LocatedBlock[] targetBlocks;
-    final CorruptedBlocks corruptedBlocks;
-    final BlockReaderInfo[] readerInfos;
-
-    StripeReader(CompletionService<Void> service, AlignedStripe alignedStripe,
-        LocatedBlock[] targetBlocks, BlockReaderInfo[] readerInfos,
-                 CorruptedBlocks corruptedBlocks) {
-      this.service = service;
-      this.alignedStripe = alignedStripe;
-      this.targetBlocks = targetBlocks;
-      this.readerInfos = readerInfos;
-      this.corruptedBlocks = corruptedBlocks;
-    }
-
-    /** prepare all the data chunks */
-    abstract void prepareDecodeInputs();
-
-    /** prepare the parity chunk and block reader if necessary */
-    abstract boolean prepareParityChunk(int index);
-
-    abstract void decode();
-
-    void updateState4SuccessRead(StripingChunkReadResult result) {
-      Preconditions.checkArgument(
-          result.state == StripingChunkReadResult.SUCCESSFUL);
-      readerInfos[result.index].setOffset(alignedStripe.getOffsetInBlock()
-          + alignedStripe.getSpanInBlock());
-    }
-
-    private void checkMissingBlocks() throws IOException {
-      if (alignedStripe.missingChunksNum > parityBlkNum) {
-        clearFutures(futures.keySet());
-        throw new IOException(alignedStripe.missingChunksNum
-            + " missing blocks, the stripe is: " + alignedStripe
-            + "; locatedBlocks is: " + locatedBlocks);
-      }
-    }
-
-    /**
-     * We need decoding. Thus go through all the data chunks and make sure we
-     * submit read requests for all of them.
-     */
-    private void readDataForDecoding() throws IOException {
-      prepareDecodeInputs();
-      for (int i = 0; i < dataBlkNum; i++) {
-        Preconditions.checkNotNull(alignedStripe.chunks[i]);
-        if (alignedStripe.chunks[i].state == StripingChunk.REQUESTED) {
-          if (!readChunk(targetBlocks[i], i)) {
-            alignedStripe.missingChunksNum++;
-          }
-        }
-      }
-      checkMissingBlocks();
-    }
-
-    void readParityChunks(int num) throws IOException {
-      for (int i = dataBlkNum, j = 0; i < dataBlkNum + parityBlkNum && j < num;
-           i++) {
-        if (alignedStripe.chunks[i] == null) {
-          if (prepareParityChunk(i) && readChunk(targetBlocks[i], i)) {
-            j++;
-          } else {
-            alignedStripe.missingChunksNum++;
-          }
-        }
-      }
-      checkMissingBlocks();
-    }
-
-    boolean createBlockReader(LocatedBlock block, int chunkIndex)
-        throws IOException {
-      BlockReader reader = null;
-      final ReaderRetryPolicy retry = new ReaderRetryPolicy();
-      DNAddrPair dnInfo = new DNAddrPair(null, null, null);
-
-      while(true) {
-        try {
-          // the cached block location might have been re-fetched, so always
-          // get it from cache.
-          block = refreshLocatedBlock(block);
-          targetBlocks[chunkIndex] = block;
-
-          // internal block has one location, just rule out the deadNodes
-          dnInfo = getBestNodeDNAddrPair(block, null);
-          if (dnInfo == null) {
-            break;
-          }
-          reader = getBlockReader(block, alignedStripe.getOffsetInBlock(),
-              block.getBlockSize() - alignedStripe.getOffsetInBlock(),
-              dnInfo.addr, dnInfo.storageType, dnInfo.info);
-        } catch (IOException e) {
-          if (e instanceof InvalidEncryptionKeyException &&
-              retry.shouldRefetchEncryptionKey()) {
-            DFSClient.LOG.info("Will fetch a new encryption key and retry, "
-                + "encryption key was invalid when connecting to " + dnInfo.addr
-                + " : " + e);
-            dfsClient.clearDataEncryptionKey();
-            retry.refetchEncryptionKey();
-          } else if (retry.shouldRefetchToken() &&
-              tokenRefetchNeeded(e, dnInfo.addr)) {
-            fetchBlockAt(block.getStartOffset());
-            retry.refetchToken();
-          } else {
-            //TODO: handles connection issues
-            DFSClient.LOG.warn("Failed to connect to " + dnInfo.addr + " for " +
-                "block" + block.getBlock(), e);
-            // re-fetch the block in case the block has been moved
-            fetchBlockAt(block.getStartOffset());
-            addToDeadNodes(dnInfo.info);
-          }
-        }
-        if (reader != null) {
-          readerInfos[chunkIndex] = new BlockReaderInfo(reader, dnInfo.info,
-              alignedStripe.getOffsetInBlock());
-          return true;
-        }
-      }
-      return false;
-    }
-
-    private ByteBufferStrategy[] getReadStrategies(StripingChunk chunk) {
-      if (chunk.useByteBuffer()) {
-        ByteBufferStrategy strategy = new ByteBufferStrategy(
-            chunk.getByteBuffer(), readStatistics, dfsClient);
-        return new ByteBufferStrategy[]{strategy};
-      } else {
-        ByteBufferStrategy[] strategies =
-            new ByteBufferStrategy[chunk.getChunkBuffer().getSlices().size()];
-        for (int i = 0; i < strategies.length; i++) {
-          ByteBuffer buffer = chunk.getChunkBuffer().getSlice(i);
-          strategies[i] =
-              new ByteBufferStrategy(buffer, readStatistics, dfsClient);
-        }
-        return strategies;
-      }
-    }
-
-    boolean readChunk(final LocatedBlock block, int chunkIndex)
-        throws IOException {
-      final StripingChunk chunk = alignedStripe.chunks[chunkIndex];
-      if (block == null) {
-        chunk.state = StripingChunk.MISSING;
-        return false;
-      }
-      if (readerInfos[chunkIndex] == null) {
-        if (!createBlockReader(block, chunkIndex)) {
-          chunk.state = StripingChunk.MISSING;
-          return false;
-        }
-      } else if (readerInfos[chunkIndex].shouldSkip) {
-        chunk.state = StripingChunk.MISSING;
-        return false;
-      }
-
-      chunk.state = StripingChunk.PENDING;
-      Callable<Void> readCallable = readCells(readerInfos[chunkIndex].reader,
-          readerInfos[chunkIndex].datanode,
-          readerInfos[chunkIndex].blockReaderOffset,
-          alignedStripe.getOffsetInBlock(), getReadStrategies(chunk),
-          block.getBlock(), corruptedBlocks);
-
-      Future<Void> request = service.submit(readCallable);
-      futures.put(request, chunkIndex);
-      return true;
-    }
-
-    /** read the whole stripe. do decoding if necessary */
-    void readStripe() throws IOException {
-      for (int i = 0; i < dataBlkNum; i++) {
-        if (alignedStripe.chunks[i] != null &&
-            alignedStripe.chunks[i].state != StripingChunk.ALLZERO) {
-          if (!readChunk(targetBlocks[i], i)) {
-            alignedStripe.missingChunksNum++;
-          }
-        }
-      }
-      // There are missing block locations at this stage. Thus we need to read
-      // the full stripe and one more parity block.
-      if (alignedStripe.missingChunksNum > 0) {
-        checkMissingBlocks();
-        readDataForDecoding();
-        // read parity chunks
-        readParityChunks(alignedStripe.missingChunksNum);
-      }
-      // TODO: for a full stripe we can start reading (dataBlkNum + 1) chunks
-
-      // Input buffers for potential decode operation, which remains null until
-      // first read failure
-      while (!futures.isEmpty()) {
-        try {
-          StripingChunkReadResult r = StripedBlockUtil
-              .getNextCompletedStripedRead(service, futures, 0);
-          if (DFSClient.LOG.isDebugEnabled()) {
-            DFSClient.LOG.debug("Read task returned: " + r + ", for stripe "
-                + alignedStripe);
-          }
-          StripingChunk returnedChunk = alignedStripe.chunks[r.index];
-          Preconditions.checkNotNull(returnedChunk);
-          Preconditions.checkState(returnedChunk.state == StripingChunk.PENDING);
-
-          if (r.state == StripingChunkReadResult.SUCCESSFUL) {
-            returnedChunk.state = StripingChunk.FETCHED;
-            alignedStripe.fetchedChunksNum++;
-            updateState4SuccessRead(r);
-            if (alignedStripe.fetchedChunksNum == dataBlkNum) {
-              clearFutures(futures.keySet());
-              break;
-            }
-          } else {
-            returnedChunk.state = StripingChunk.MISSING;
-            // close the corresponding reader
-            closeReader(readerInfos[r.index]);
-
-            final int missing = alignedStripe.missingChunksNum;
-            alignedStripe.missingChunksNum++;
-            checkMissingBlocks();
-
-            readDataForDecoding();
-            readParityChunks(alignedStripe.missingChunksNum - missing);
-          }
-        } catch (InterruptedException ie) {
-          String err = "Read request interrupted";
-          DFSClient.LOG.error(err);
-          clearFutures(futures.keySet());
-          // Don't decode if read interrupted
-          throw new InterruptedIOException(err);
-        }
-      }
-
-      if (alignedStripe.missingChunksNum > 0) {
-        decode();
-      }
-    }
-  }
-
-  class PositionStripeReader extends StripeReader {
-    private ByteBuffer[] decodeInputs = null;
-
-    PositionStripeReader(CompletionService<Void> service,
-        AlignedStripe alignedStripe, LocatedBlock[] targetBlocks,
-        BlockReaderInfo[] readerInfos, CorruptedBlocks corruptedBlocks) {
-      super(service, alignedStripe, targetBlocks, readerInfos,
-          corruptedBlocks);
-    }
-
-    @Override
-    void prepareDecodeInputs() {
-      if (decodeInputs == null) {
-        decodeInputs = StripedBlockUtil.initDecodeInputs(alignedStripe,
-            dataBlkNum, parityBlkNum);
-      }
-    }
-
-    @Override
-    boolean prepareParityChunk(int index) {
-      Preconditions.checkState(index >= dataBlkNum &&
-          alignedStripe.chunks[index] == null);
-      alignedStripe.chunks[index] = new StripingChunk(decodeInputs[index]);
-      return true;
-    }
-
-    @Override
-    void decode() {
-      StripedBlockUtil.finalizeDecodeInputs(decodeInputs, alignedStripe);
-      StripedBlockUtil.decodeAndFillBuffer(decodeInputs, alignedStripe,
-          dataBlkNum, parityBlkNum, decoder);
-    }
-  }
-
-  class StatefulStripeReader extends StripeReader {
-    ByteBuffer[] decodeInputs;
-
-    StatefulStripeReader(CompletionService<Void> service,
-        AlignedStripe alignedStripe, LocatedBlock[] targetBlocks,
-        BlockReaderInfo[] readerInfos, CorruptedBlocks corruptedBlocks) {
-      super(service, alignedStripe, targetBlocks, readerInfos,
-          corruptedBlocks);
-    }
-
-    @Override
-    void prepareDecodeInputs() {
-      if (decodeInputs == null) {
-        decodeInputs = new ByteBuffer[dataBlkNum + parityBlkNum];
-        final ByteBuffer cur;
-        synchronized (DFSStripedInputStream.this) {
-          cur = curStripeBuf.duplicate();
-        }
-        StripedBlockUtil.VerticalRange range = alignedStripe.range;
-        for (int i = 0; i < dataBlkNum; i++) {
-          cur.limit(cur.capacity());
-          int pos = (int) (range.offsetInBlock % cellSize + cellSize * i);
-          cur.position(pos);
-          cur.limit((int) (pos + range.spanInBlock));
-          decodeInputs[i] = cur.slice();
-          if (alignedStripe.chunks[i] == null) {
-            alignedStripe.chunks[i] = new StripingChunk(decodeInputs[i]);
-          }
-        }
-      }
-    }
-
-    @Override
-    boolean prepareParityChunk(int index) {
-      Preconditions.checkState(index >= dataBlkNum
-          && alignedStripe.chunks[index] == null);
-      if (blockReaders[index] != null && blockReaders[index].shouldSkip) {
-        alignedStripe.chunks[index] = new StripingChunk(StripingChunk.MISSING);
-        // we have failed the block reader before
-        return false;
-      }
-      final int parityIndex = index - dataBlkNum;
-      ByteBuffer buf = getParityBuffer().duplicate();
-      buf.position(cellSize * parityIndex);
-      buf.limit(cellSize * parityIndex + (int) alignedStripe.range.spanInBlock);
-      decodeInputs[index] = buf.slice();
-      alignedStripe.chunks[index] = new StripingChunk(decodeInputs[index]);
-      return true;
-    }
-
-    @Override
-    void decode() {
-      final int span = (int) alignedStripe.getSpanInBlock();
-      for (int i = 0; i < alignedStripe.chunks.length; i++) {
-        if (alignedStripe.chunks[i] != null &&
-            alignedStripe.chunks[i].state == StripingChunk.ALLZERO) {
-          for (int j = 0; j < span; j++) {
-            decodeInputs[i].put((byte) 0);
-          }
-          decodeInputs[i].flip();
-        } else if (alignedStripe.chunks[i] != null &&
-            alignedStripe.chunks[i].state == StripingChunk.FETCHED) {
-          decodeInputs[i].position(0);
-          decodeInputs[i].limit(span);
-        }
-      }
-      int[] decodeIndices = new int[parityBlkNum];
-      int pos = 0;
-      for (int i = 0; i < alignedStripe.chunks.length; i++) {
-        if (alignedStripe.chunks[i] != null &&
-            alignedStripe.chunks[i].state == StripingChunk.MISSING) {
-          if (i < dataBlkNum) {
-            decodeIndices[pos++] = i;
-          } else {
-            decodeInputs[i] = null;
-          }
-        }
-      }
-      decodeIndices = Arrays.copyOf(decodeIndices, pos);
-
-      final int decodeChunkNum = decodeIndices.length;
-      ByteBuffer[] outputs = new ByteBuffer[decodeChunkNum];
-      for (int i = 0; i < decodeChunkNum; i++) {
-        outputs[i] = decodeInputs[decodeIndices[i]];
-        outputs[i].position(0);
-        outputs[i].limit((int) alignedStripe.range.spanInBlock);
-        decodeInputs[decodeIndices[i]] = null;
-      }
-
-      decoder.decode(decodeInputs, decodeIndices, outputs);
-    }
-  }
-
   /**
    * May need online read recovery, zero-copy read doesn't make
    * sense, so don't support it.
@@ -957,12 +519,4 @@ public class DFSStripedInputStream extends DFSInputStream {
     throw new UnsupportedOperationException(
         "Not support enhanced byte buffer access.");
   }
-
-  /** A variation to {@link DFSInputStream#cancelAll} */
-  private void clearFutures(Collection<Future<Void>> futures) {
-    for (Future<Void> future : futures) {
-      future.cancel(false);
-    }
-    futures.clear();
-  }
 }

+ 7 - 6
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java

@@ -526,7 +526,7 @@ public class DFSUtilClient {
   }
 
   private static String keyProviderUriKeyName =
-      HdfsClientConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI;
+      CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH;
 
   /**
    * Set the key provider uri configuration key name for creating key providers.
@@ -616,16 +616,17 @@ public class DFSUtilClient {
   }
 
   /**
-   * Probe for HDFS Encryption being enabled; this uses the value of
-   * the option {@link HdfsClientConfigKeys#DFS_ENCRYPTION_KEY_PROVIDER_URI},
-   * returning true if that property contains a non-empty, non-whitespace
+   * Probe for HDFS Encryption being enabled; this uses the value of the option
+   * {@link CommonConfigurationKeysPublic#HADOOP_SECURITY_KEY_PROVIDER_PATH}
+   * , returning true if that property contains a non-empty, non-whitespace
    * string.
    * @param conf configuration to probe
    * @return true if encryption is considered enabled.
    */
   public static boolean isHDFSEncryptionEnabled(Configuration conf) {
-    return !conf.getTrimmed(
-        HdfsClientConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI, "").isEmpty();
+    return !(conf.getTrimmed(
+        CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH, "")
+        .isEmpty());
   }
 
   public static InetSocketAddress getNNAddress(String address) {

+ 109 - 37
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java

@@ -116,6 +116,89 @@ import javax.annotation.Nonnull;
 class DataStreamer extends Daemon {
   static final Logger LOG = LoggerFactory.getLogger(DataStreamer.class);
 
+  private class RefetchEncryptionKeyPolicy {
+    private int fetchEncryptionKeyTimes = 0;
+    private InvalidEncryptionKeyException lastException;
+    private final DatanodeInfo src;
+
+    RefetchEncryptionKeyPolicy(DatanodeInfo src) {
+      this.src = src;
+    }
+    boolean continueRetryingOrThrow() throws InvalidEncryptionKeyException {
+      if (fetchEncryptionKeyTimes >= 2) {
+        // hit the same exception twice connecting to the node, so
+        // throw the exception and exclude the node.
+        throw lastException;
+      }
+      // Don't exclude this node just yet.
+      // Try again with a new encryption key.
+      LOG.info("Will fetch a new encryption key and retry, "
+          + "encryption key was invalid when connecting to "
+          + this.src + ": ", lastException);
+      // The encryption key used is invalid.
+      dfsClient.clearDataEncryptionKey();
+      return true;
+    }
+
+    /**
+     * Record a connection exception.
+     * @param e
+     * @throws InvalidEncryptionKeyException
+     */
+    void recordFailure(final InvalidEncryptionKeyException e)
+        throws InvalidEncryptionKeyException {
+      fetchEncryptionKeyTimes++;
+      lastException = e;
+    }
+  }
+
+  private class StreamerStreams implements java.io.Closeable {
+    private Socket sock = null;
+    private DataOutputStream out = null;
+    private DataInputStream in = null;
+
+    StreamerStreams(final DatanodeInfo src,
+        final long writeTimeout, final long readTimeout,
+        final Token<BlockTokenIdentifier> blockToken)
+        throws IOException {
+      sock = createSocketForPipeline(src, 2, dfsClient);
+
+      OutputStream unbufOut = NetUtils.getOutputStream(sock, writeTimeout);
+      InputStream unbufIn = NetUtils.getInputStream(sock, readTimeout);
+      IOStreamPair saslStreams = dfsClient.saslClient
+          .socketSend(sock, unbufOut, unbufIn, dfsClient, blockToken, src);
+      unbufOut = saslStreams.out;
+      unbufIn = saslStreams.in;
+      out = new DataOutputStream(new BufferedOutputStream(unbufOut,
+          DFSUtilClient.getSmallBufferSize(dfsClient.getConfiguration())));
+      in = new DataInputStream(unbufIn);
+    }
+
+    void sendTransferBlock(final DatanodeInfo[] targets,
+        final StorageType[] targetStorageTypes,
+        final Token<BlockTokenIdentifier> blockToken) throws IOException {
+      //send the TRANSFER_BLOCK request
+      new Sender(out)
+          .transferBlock(block, blockToken, dfsClient.clientName, targets,
+              targetStorageTypes);
+      out.flush();
+      //ack
+      BlockOpResponseProto transferResponse = BlockOpResponseProto
+          .parseFrom(PBHelperClient.vintPrefixed(in));
+      if (SUCCESS != transferResponse.getStatus()) {
+        throw new IOException("Failed to add a datanode. Response status: "
+            + transferResponse.getStatus());
+      }
+    }
+
+    @Override
+    public void close() throws IOException {
+      IOUtils.closeStream(in);
+      IOUtils.closeStream(out);
+      IOUtils.closeSocket(sock);
+    }
+  }
+
   /**
    * Create a socket for a write pipeline
    *
@@ -1270,50 +1353,39 @@ class DataStreamer extends Daemon {
         new IOException("Failed to add a node");
   }
 
+  private long computeTransferWriteTimeout() {
+    return dfsClient.getDatanodeWriteTimeout(2);
+  }
+  private long computeTransferReadTimeout() {
+    // transfer timeout multiplier based on the transfer size
+    // One per 200 packets = 12.8MB. Minimum is 2.
+    int multi = 2
+        + (int) (bytesSent / dfsClient.getConf().getWritePacketSize()) / 200;
+    return dfsClient.getDatanodeReadTimeout(multi);
+  }
+
   private void transfer(final DatanodeInfo src, final DatanodeInfo[] targets,
                         final StorageType[] targetStorageTypes,
                         final Token<BlockTokenIdentifier> blockToken)
       throws IOException {
     //transfer replica to the new datanode
-    Socket sock = null;
-    DataOutputStream out = null;
-    DataInputStream in = null;
-    try {
-      sock = createSocketForPipeline(src, 2, dfsClient);
-      final long writeTimeout = dfsClient.getDatanodeWriteTimeout(2);
-
-      // transfer timeout multiplier based on the transfer size
-      // One per 200 packets = 12.8MB. Minimum is 2.
-      int multi = 2 + (int)(bytesSent /dfsClient.getConf().getWritePacketSize())
-          / 200;
-      final long readTimeout = dfsClient.getDatanodeReadTimeout(multi);
-
-      OutputStream unbufOut = NetUtils.getOutputStream(sock, writeTimeout);
-      InputStream unbufIn = NetUtils.getInputStream(sock, readTimeout);
-      IOStreamPair saslStreams = dfsClient.saslClient.socketSend(sock,
-          unbufOut, unbufIn, dfsClient, blockToken, src);
-      unbufOut = saslStreams.out;
-      unbufIn = saslStreams.in;
-      out = new DataOutputStream(new BufferedOutputStream(unbufOut,
-          DFSUtilClient.getSmallBufferSize(dfsClient.getConfiguration())));
-      in = new DataInputStream(unbufIn);
-
-      //send the TRANSFER_BLOCK request
-      new Sender(out).transferBlock(block, blockToken, dfsClient.clientName,
-          targets, targetStorageTypes);
-      out.flush();
+    RefetchEncryptionKeyPolicy policy = new RefetchEncryptionKeyPolicy(src);
+    do {
+      StreamerStreams streams = null;
+      try {
+        final long writeTimeout = computeTransferWriteTimeout();
+        final long readTimeout = computeTransferReadTimeout();
 
-      //ack
-      BlockOpResponseProto response =
-          BlockOpResponseProto.parseFrom(PBHelperClient.vintPrefixed(in));
-      if (SUCCESS != response.getStatus()) {
-        throw new IOException("Failed to add a datanode");
+        streams = new StreamerStreams(src, writeTimeout, readTimeout,
+            blockToken);
+        streams.sendTransferBlock(targets, targetStorageTypes, blockToken);
+        return;
+      } catch (InvalidEncryptionKeyException e) {
+        policy.recordFailure(e);
+      } finally {
+        IOUtils.closeStream(streams);
       }
-    } finally {
-      IOUtils.closeStream(in);
-      IOUtils.closeStream(out);
-      IOUtils.closeSocket(sock);
-    }
+    } while (policy.continueRetryingOrThrow());
   }
 
   /**

+ 3 - 0
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java

@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 
 import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DeprecatedKeys;
@@ -141,6 +142,8 @@ public class HdfsConfiguration extends Configuration {
             HdfsClientConfigKeys.DFS_NAMESERVICES),
         new DeprecationDelta("dfs.federation.nameservice.id",
             DeprecatedKeys.DFS_NAMESERVICE_ID),
+        new DeprecationDelta("dfs.encryption.key.provider.uri",
+            CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH),
     });
   }
 

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/KeyProviderCache.java

@@ -25,7 +25,7 @@ import java.util.concurrent.TimeUnit;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.crypto.key.KeyProvider;
-import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.cache.Cache;
@@ -86,11 +86,11 @@ public class KeyProviderCache {
 
   private URI createKeyProviderURI(Configuration conf) {
     final String providerUriStr = conf.getTrimmed(
-        HdfsClientConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI, "");
+        CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH, "");
     // No provider set in conf
     if (providerUriStr.isEmpty()) {
       LOG.error("Could not find uri with key ["
-          + HdfsClientConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI
+          + CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH
           + "] to create a keyProvider !!");
       return null;
     }

+ 104 - 0
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/PositionStripeReader.java

@@ -0,0 +1,104 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import com.google.common.base.Preconditions;
+import org.apache.commons.configuration.SystemConfiguration;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.util.StripedBlockUtil;
+import org.apache.hadoop.hdfs.util.StripedBlockUtil.StripingChunk;
+import org.apache.hadoop.hdfs.util.StripedBlockUtil.AlignedStripe;
+import org.apache.hadoop.io.erasurecode.ECChunk;
+import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureDecoder;
+import org.apache.hadoop.hdfs.DFSUtilClient.CorruptedBlocks;
+
+import java.nio.ByteBuffer;
+
+/**
+ * The reader for reading a complete {@link StripedBlockUtil.AlignedStripe}
+ * which may cross multiple stripes with cellSize width.
+ */
+class PositionStripeReader extends StripeReader {
+  private ByteBuffer codingBuffer;
+
+  PositionStripeReader(AlignedStripe alignedStripe,
+      ErasureCodingPolicy ecPolicy, LocatedBlock[] targetBlocks,
+      BlockReaderInfo[] readerInfos, CorruptedBlocks corruptedBlocks,
+      RawErasureDecoder decoder, DFSStripedInputStream dfsStripedInputStream) {
+    super(alignedStripe, ecPolicy, targetBlocks, readerInfos,
+        corruptedBlocks, decoder, dfsStripedInputStream);
+  }
+
+  @Override
+  void prepareDecodeInputs() {
+    if (codingBuffer == null) {
+      this.decodeInputs = new ECChunk[dataBlkNum + parityBlkNum];
+      initDecodeInputs(alignedStripe);
+    }
+  }
+
+  @Override
+  boolean prepareParityChunk(int index) {
+    Preconditions.checkState(index >= dataBlkNum &&
+        alignedStripe.chunks[index] == null);
+
+    alignedStripe.chunks[index] =
+        new StripingChunk(decodeInputs[index].getBuffer());
+
+    return true;
+  }
+
+  @Override
+  void decode() {
+    finalizeDecodeInputs();
+    decodeAndFillBuffer(true);
+  }
+
+  void initDecodeInputs(AlignedStripe alignedStripe) {
+    int bufLen = (int) alignedStripe.getSpanInBlock();
+    int bufCount = dataBlkNum + parityBlkNum;
+    codingBuffer = dfsStripedInputStream.getBufferPool().
+        getBuffer(useDirectBuffer(), bufLen * bufCount);
+    ByteBuffer buffer;
+    for (int i = 0; i < decodeInputs.length; i++) {
+      buffer = codingBuffer.duplicate();
+      decodeInputs[i] = new ECChunk(buffer, i * bufLen, bufLen);
+    }
+
+    for (int i = 0; i < dataBlkNum; i++) {
+      if (alignedStripe.chunks[i] == null) {
+        alignedStripe.chunks[i] =
+            new StripingChunk(decodeInputs[i].getBuffer());
+      }
+    }
+  }
+
+  void close() {
+    if (decodeInputs != null) {
+      for (int i = 0; i < decodeInputs.length; i++) {
+        decodeInputs[i] = null;
+      }
+    }
+
+    if (codingBuffer != null) {
+      dfsStripedInputStream.getBufferPool().putBuffer(codingBuffer);
+      codingBuffer = null;
+    }
+  }
+}

+ 95 - 0
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/StatefulStripeReader.java

@@ -0,0 +1,95 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.util.StripedBlockUtil;
+import org.apache.hadoop.hdfs.util.StripedBlockUtil.StripingChunk;
+import org.apache.hadoop.hdfs.util.StripedBlockUtil.AlignedStripe;
+import org.apache.hadoop.io.erasurecode.ECChunk;
+import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureDecoder;
+import org.apache.hadoop.hdfs.DFSUtilClient.CorruptedBlocks;
+
+import java.nio.ByteBuffer;
+
+/**
+ * The reader for reading a complete {@link StripedBlockUtil.AlignedStripe}
+ * which belongs to a single stripe.
+ * Reading cross multiple strips is not supported in this reader.
+ */
+class StatefulStripeReader extends StripeReader {
+
+  StatefulStripeReader(AlignedStripe alignedStripe,
+      ErasureCodingPolicy ecPolicy, LocatedBlock[] targetBlocks,
+      BlockReaderInfo[] readerInfos, CorruptedBlocks corruptedBlocks,
+      RawErasureDecoder decoder, DFSStripedInputStream dfsStripedInputStream) {
+    super(alignedStripe, ecPolicy, targetBlocks, readerInfos,
+        corruptedBlocks, decoder, dfsStripedInputStream);
+  }
+
+  @Override
+  void prepareDecodeInputs() {
+    final ByteBuffer cur;
+    synchronized (dfsStripedInputStream) {
+      cur = dfsStripedInputStream.getCurStripeBuf().duplicate();
+    }
+
+    this.decodeInputs = new ECChunk[dataBlkNum + parityBlkNum];
+    int bufLen = (int) alignedStripe.getSpanInBlock();
+    int bufOff = (int) alignedStripe.getOffsetInBlock();
+    for (int i = 0; i < dataBlkNum; i++) {
+      cur.limit(cur.capacity());
+      int pos = bufOff % cellSize + cellSize * i;
+      cur.position(pos);
+      cur.limit(pos + bufLen);
+      decodeInputs[i] = new ECChunk(cur.slice(), 0, bufLen);
+      if (alignedStripe.chunks[i] == null) {
+        alignedStripe.chunks[i] =
+            new StripingChunk(decodeInputs[i].getBuffer());
+      }
+    }
+  }
+
+  @Override
+  boolean prepareParityChunk(int index) {
+    Preconditions.checkState(index >= dataBlkNum
+        && alignedStripe.chunks[index] == null);
+    if (readerInfos[index] != null && readerInfos[index].shouldSkip) {
+      alignedStripe.chunks[index] = new StripingChunk(StripingChunk.MISSING);
+      // we have failed the block reader before
+      return false;
+    }
+    final int parityIndex = index - dataBlkNum;
+    ByteBuffer buf = dfsStripedInputStream.getParityBuffer().duplicate();
+    buf.position(cellSize * parityIndex);
+    buf.limit(cellSize * parityIndex + (int) alignedStripe.range.spanInBlock);
+    decodeInputs[index] =
+        new ECChunk(buf.slice(), 0, (int) alignedStripe.range.spanInBlock);
+    alignedStripe.chunks[index] =
+        new StripingChunk(decodeInputs[index].getBuffer());
+    return true;
+  }
+
+  @Override
+  void decode() {
+    finalizeDecodeInputs();
+    decodeAndFillBuffer(false);
+  }
+}

+ 463 - 0
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/StripeReader.java

@@ -0,0 +1,463 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.fs.ChecksumException;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.util.StripedBlockUtil;
+import org.apache.hadoop.hdfs.util.StripedBlockUtil.StripingChunk;
+import org.apache.hadoop.hdfs.util.StripedBlockUtil.AlignedStripe;
+import org.apache.hadoop.hdfs.util.StripedBlockUtil.StripingChunkReadResult;
+import org.apache.hadoop.io.erasurecode.ECChunk;
+import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureDecoder;
+import org.apache.hadoop.hdfs.DFSUtilClient.CorruptedBlocks;
+
+import java.io.IOException;
+import java.io.InterruptedIOException;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.Callable;
+import java.util.concurrent.CompletionService;
+import java.util.concurrent.ExecutorCompletionService;
+import java.util.concurrent.Future;
+
+/**
+ * The reader for reading a complete {@link StripedBlockUtil.AlignedStripe}.
+ * Note that an {@link StripedBlockUtil.AlignedStripe} may cross multiple
+ * stripes with cellSize width.
+ */
+abstract class StripeReader {
+
+  static class ReaderRetryPolicy {
+    private int fetchEncryptionKeyTimes = 1;
+    private int fetchTokenTimes = 1;
+
+    void refetchEncryptionKey() {
+      fetchEncryptionKeyTimes--;
+    }
+
+    void refetchToken() {
+      fetchTokenTimes--;
+    }
+
+    boolean shouldRefetchEncryptionKey() {
+      return fetchEncryptionKeyTimes > 0;
+    }
+
+    boolean shouldRefetchToken() {
+      return fetchTokenTimes > 0;
+    }
+  }
+
+  static class BlockReaderInfo {
+    final BlockReader reader;
+    final DatanodeInfo datanode;
+    /**
+     * when initializing block readers, their starting offsets are set to the
+     * same number: the smallest internal block offsets among all the readers.
+     * This is because it is possible that for some internal blocks we have to
+     * read "backwards" for decoding purpose. We thus use this offset array to
+     * track offsets for all the block readers so that we can skip data if
+     * necessary.
+     */
+    long blockReaderOffset;
+    /**
+     * We use this field to indicate whether we should use this reader. In case
+     * we hit any issue with this reader, we set this field to true and avoid
+     * using it for the next stripe.
+     */
+    boolean shouldSkip = false;
+
+    BlockReaderInfo(BlockReader reader, DatanodeInfo dn, long offset) {
+      this.reader = reader;
+      this.datanode = dn;
+      this.blockReaderOffset = offset;
+    }
+
+    void setOffset(long offset) {
+      this.blockReaderOffset = offset;
+    }
+
+    void skip() {
+      this.shouldSkip = true;
+    }
+  }
+
+  protected final Map<Future<Void>, Integer> futures = new HashMap<>();
+  protected final AlignedStripe alignedStripe;
+  protected final CompletionService<Void> service;
+  protected final LocatedBlock[] targetBlocks;
+  protected final CorruptedBlocks corruptedBlocks;
+  protected final BlockReaderInfo[] readerInfos;
+  protected final ErasureCodingPolicy ecPolicy;
+  protected final short dataBlkNum;
+  protected final short parityBlkNum;
+  protected final int cellSize;
+  protected final RawErasureDecoder decoder;
+  protected final DFSStripedInputStream dfsStripedInputStream;
+
+  protected ECChunk[] decodeInputs;
+
+  StripeReader(AlignedStripe alignedStripe,
+      ErasureCodingPolicy ecPolicy, LocatedBlock[] targetBlocks,
+      BlockReaderInfo[] readerInfos, CorruptedBlocks corruptedBlocks,
+      RawErasureDecoder decoder,
+      DFSStripedInputStream dfsStripedInputStream) {
+    this.alignedStripe = alignedStripe;
+    this.ecPolicy = ecPolicy;
+    this.dataBlkNum = (short)ecPolicy.getNumDataUnits();
+    this.parityBlkNum = (short)ecPolicy.getNumParityUnits();
+    this.cellSize = ecPolicy.getCellSize();
+    this.targetBlocks = targetBlocks;
+    this.readerInfos = readerInfos;
+    this.corruptedBlocks = corruptedBlocks;
+    this.decoder = decoder;
+    this.dfsStripedInputStream = dfsStripedInputStream;
+
+    service = new ExecutorCompletionService<>(
+            dfsStripedInputStream.getStripedReadsThreadPool());
+  }
+
+  /**
+   * Prepare all the data chunks.
+   */
+  abstract void prepareDecodeInputs();
+
+  /**
+   * Prepare the parity chunk and block reader if necessary.
+   */
+  abstract boolean prepareParityChunk(int index);
+
+  /*
+   * Decode to get the missing data.
+   */
+  abstract void decode();
+
+  /*
+   * Default close do nothing.
+   */
+  void close() {
+  }
+
+  void updateState4SuccessRead(StripingChunkReadResult result) {
+    Preconditions.checkArgument(
+        result.state == StripingChunkReadResult.SUCCESSFUL);
+    readerInfos[result.index].setOffset(alignedStripe.getOffsetInBlock()
+        + alignedStripe.getSpanInBlock());
+  }
+
+  private void checkMissingBlocks() throws IOException {
+    if (alignedStripe.missingChunksNum > parityBlkNum) {
+      clearFutures();
+      throw new IOException(alignedStripe.missingChunksNum
+          + " missing blocks, the stripe is: " + alignedStripe
+          + "; locatedBlocks is: " + dfsStripedInputStream.getLocatedBlocks());
+    }
+  }
+
+  /**
+   * We need decoding. Thus go through all the data chunks and make sure we
+   * submit read requests for all of them.
+   */
+  private void readDataForDecoding() throws IOException {
+    prepareDecodeInputs();
+    for (int i = 0; i < dataBlkNum; i++) {
+      Preconditions.checkNotNull(alignedStripe.chunks[i]);
+      if (alignedStripe.chunks[i].state == StripingChunk.REQUESTED) {
+        if (!readChunk(targetBlocks[i], i)) {
+          alignedStripe.missingChunksNum++;
+        }
+      }
+    }
+    checkMissingBlocks();
+  }
+
+  void readParityChunks(int num) throws IOException {
+    for (int i = dataBlkNum, j = 0; i < dataBlkNum + parityBlkNum && j < num;
+         i++) {
+      if (alignedStripe.chunks[i] == null) {
+        if (prepareParityChunk(i) && readChunk(targetBlocks[i], i)) {
+          j++;
+        } else {
+          alignedStripe.missingChunksNum++;
+        }
+      }
+    }
+    checkMissingBlocks();
+  }
+
+  private ByteBufferStrategy[] getReadStrategies(StripingChunk chunk) {
+    if (chunk.useByteBuffer()) {
+      ByteBufferStrategy strategy = new ByteBufferStrategy(
+          chunk.getByteBuffer(), dfsStripedInputStream.getReadStatistics(),
+          dfsStripedInputStream.getDFSClient());
+      return new ByteBufferStrategy[]{strategy};
+    }
+
+    ByteBufferStrategy[] strategies =
+        new ByteBufferStrategy[chunk.getChunkBuffer().getSlices().size()];
+    for (int i = 0; i < strategies.length; i++) {
+      ByteBuffer buffer = chunk.getChunkBuffer().getSlice(i);
+      strategies[i] = new ByteBufferStrategy(buffer,
+              dfsStripedInputStream.getReadStatistics(),
+              dfsStripedInputStream.getDFSClient());
+    }
+    return strategies;
+  }
+
+  private int readToBuffer(BlockReader blockReader,
+      DatanodeInfo currentNode, ByteBufferStrategy strategy,
+      ExtendedBlock currentBlock) throws IOException {
+    final int targetLength = strategy.getTargetLength();
+    int length = 0;
+    try {
+      while (length < targetLength) {
+        int ret = strategy.readFromBlock(blockReader);
+        if (ret < 0) {
+          throw new IOException("Unexpected EOS from the reader");
+        }
+        length += ret;
+      }
+      return length;
+    } catch (ChecksumException ce) {
+      DFSClient.LOG.warn("Found Checksum error for "
+          + currentBlock + " from " + currentNode
+          + " at " + ce.getPos());
+      // we want to remember which block replicas we have tried
+      corruptedBlocks.addCorruptedBlock(currentBlock, currentNode);
+      throw ce;
+    } catch (IOException e) {
+      DFSClient.LOG.warn("Exception while reading from "
+          + currentBlock + " of " + dfsStripedInputStream.getSrc() + " from "
+          + currentNode, e);
+      throw e;
+    }
+  }
+
+  private Callable<Void> readCells(final BlockReader reader,
+      final DatanodeInfo datanode, final long currentReaderOffset,
+      final long targetReaderOffset, final ByteBufferStrategy[] strategies,
+      final ExtendedBlock currentBlock) {
+    return () -> {
+      // reader can be null if getBlockReaderWithRetry failed or
+      // the reader hit exception before
+      if (reader == null) {
+        throw new IOException("The BlockReader is null. " +
+            "The BlockReader creation failed or the reader hit exception.");
+      }
+      Preconditions.checkState(currentReaderOffset <= targetReaderOffset);
+      if (currentReaderOffset < targetReaderOffset) {
+        long skipped = reader.skip(targetReaderOffset - currentReaderOffset);
+        Preconditions.checkState(
+            skipped == targetReaderOffset - currentReaderOffset);
+      }
+
+      for (ByteBufferStrategy strategy : strategies) {
+        readToBuffer(reader, datanode, strategy, currentBlock);
+      }
+      return null;
+    };
+  }
+
+  boolean readChunk(final LocatedBlock block, int chunkIndex)
+      throws IOException {
+    final StripingChunk chunk = alignedStripe.chunks[chunkIndex];
+    if (block == null) {
+      chunk.state = StripingChunk.MISSING;
+      return false;
+    }
+
+    if (readerInfos[chunkIndex] == null) {
+      if (!dfsStripedInputStream.createBlockReader(block,
+          alignedStripe.getOffsetInBlock(), targetBlocks,
+          readerInfos, chunkIndex)) {
+        chunk.state = StripingChunk.MISSING;
+        return false;
+      }
+    } else if (readerInfos[chunkIndex].shouldSkip) {
+      chunk.state = StripingChunk.MISSING;
+      return false;
+    }
+
+    chunk.state = StripingChunk.PENDING;
+    Callable<Void> readCallable = readCells(readerInfos[chunkIndex].reader,
+        readerInfos[chunkIndex].datanode,
+        readerInfos[chunkIndex].blockReaderOffset,
+        alignedStripe.getOffsetInBlock(), getReadStrategies(chunk),
+        block.getBlock());
+
+    Future<Void> request = service.submit(readCallable);
+    futures.put(request, chunkIndex);
+    return true;
+  }
+
+  /**
+   * read the whole stripe. do decoding if necessary
+   */
+  void readStripe() throws IOException {
+    for (int i = 0; i < dataBlkNum; i++) {
+      if (alignedStripe.chunks[i] != null &&
+          alignedStripe.chunks[i].state != StripingChunk.ALLZERO) {
+        if (!readChunk(targetBlocks[i], i)) {
+          alignedStripe.missingChunksNum++;
+        }
+      }
+    }
+    // There are missing block locations at this stage. Thus we need to read
+    // the full stripe and one more parity block.
+    if (alignedStripe.missingChunksNum > 0) {
+      checkMissingBlocks();
+      readDataForDecoding();
+      // read parity chunks
+      readParityChunks(alignedStripe.missingChunksNum);
+    }
+    // TODO: for a full stripe we can start reading (dataBlkNum + 1) chunks
+
+    // Input buffers for potential decode operation, which remains null until
+    // first read failure
+    while (!futures.isEmpty()) {
+      try {
+        StripingChunkReadResult r = StripedBlockUtil
+            .getNextCompletedStripedRead(service, futures, 0);
+        if (DFSClient.LOG.isDebugEnabled()) {
+          DFSClient.LOG.debug("Read task returned: " + r + ", for stripe "
+              + alignedStripe);
+        }
+        StripingChunk returnedChunk = alignedStripe.chunks[r.index];
+        Preconditions.checkNotNull(returnedChunk);
+        Preconditions.checkState(returnedChunk.state == StripingChunk.PENDING);
+
+        if (r.state == StripingChunkReadResult.SUCCESSFUL) {
+          returnedChunk.state = StripingChunk.FETCHED;
+          alignedStripe.fetchedChunksNum++;
+          updateState4SuccessRead(r);
+          if (alignedStripe.fetchedChunksNum == dataBlkNum) {
+            clearFutures();
+            break;
+          }
+        } else {
+          returnedChunk.state = StripingChunk.MISSING;
+          // close the corresponding reader
+          dfsStripedInputStream.closeReader(readerInfos[r.index]);
+
+          final int missing = alignedStripe.missingChunksNum;
+          alignedStripe.missingChunksNum++;
+          checkMissingBlocks();
+
+          readDataForDecoding();
+          readParityChunks(alignedStripe.missingChunksNum - missing);
+        }
+      } catch (InterruptedException ie) {
+        String err = "Read request interrupted";
+        DFSClient.LOG.error(err);
+        clearFutures();
+        // Don't decode if read interrupted
+        throw new InterruptedIOException(err);
+      }
+    }
+
+    if (alignedStripe.missingChunksNum > 0) {
+      decode();
+    }
+  }
+
+  /**
+   * Some fetched {@link StripingChunk} might be stored in original application
+   * buffer instead of prepared decode input buffers. Some others are beyond
+   * the range of the internal blocks and should correspond to all zero bytes.
+   * When all pending requests have returned, this method should be called to
+   * finalize decode input buffers.
+   */
+
+  void finalizeDecodeInputs() {
+    for (int i = 0; i < alignedStripe.chunks.length; i++) {
+      final StripingChunk chunk = alignedStripe.chunks[i];
+      if (chunk != null && chunk.state == StripingChunk.FETCHED) {
+        if (chunk.useChunkBuffer()) {
+          chunk.getChunkBuffer().copyTo(decodeInputs[i].getBuffer());
+        } else {
+          chunk.getByteBuffer().flip();
+        }
+      } else if (chunk != null && chunk.state == StripingChunk.ALLZERO) {
+        decodeInputs[i].setAllZero(true);
+      }
+    }
+  }
+
+  /**
+   * Decode based on the given input buffers and erasure coding policy.
+   */
+  void decodeAndFillBuffer(boolean fillBuffer) {
+    // Step 1: prepare indices and output buffers for missing data units
+    int[] decodeIndices = prepareErasedIndices();
+
+    final int decodeChunkNum = decodeIndices.length;
+    ECChunk[] outputs = new ECChunk[decodeChunkNum];
+    for (int i = 0; i < decodeChunkNum; i++) {
+      outputs[i] = decodeInputs[decodeIndices[i]];
+      decodeInputs[decodeIndices[i]] = null;
+    }
+    // Step 2: decode into prepared output buffers
+    decoder.decode(decodeInputs, decodeIndices, outputs);
+
+    // Step 3: fill original application buffer with decoded data
+    if (fillBuffer) {
+      for (int i = 0; i < decodeIndices.length; i++) {
+        int missingBlkIdx = decodeIndices[i];
+        StripingChunk chunk = alignedStripe.chunks[missingBlkIdx];
+        if (chunk.state == StripingChunk.MISSING && chunk.useChunkBuffer()) {
+          chunk.getChunkBuffer().copyFrom(outputs[i].getBuffer());
+        }
+      }
+    }
+  }
+
+  /**
+   * Prepare erased indices.
+   */
+  int[] prepareErasedIndices() {
+    int[] decodeIndices = new int[parityBlkNum];
+    int pos = 0;
+    for (int i = 0; i < alignedStripe.chunks.length; i++) {
+      if (alignedStripe.chunks[i] != null &&
+          alignedStripe.chunks[i].state == StripingChunk.MISSING){
+        decodeIndices[pos++] = i;
+      }
+    }
+
+    int[] erasedIndices = Arrays.copyOf(decodeIndices, pos);
+    return erasedIndices;
+  }
+
+  void clearFutures() {
+    for (Future<Void> future : futures.keySet()) {
+      future.cancel(false);
+    }
+    futures.clear();
+  }
+
+  boolean useDirectBuffer() {
+    return decoder.preferDirectBuffer();
+  }
+}

+ 0 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/CreateEncryptionZoneFlag.java → hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/CreateEncryptionZoneFlag.java

@@ -68,4 +68,3 @@ public enum CreateEncryptionZoneFlag {
     return mode;
   }
 }
-

+ 24 - 25
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java → hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java

@@ -45,29 +45,28 @@ import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
 import org.apache.hadoop.hdfs.protocol.EncryptionZone;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.security.AccessControlException;
-import org.apache.hadoop.hdfs.tools.DFSAdmin;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 
 /**
  * The public API for performing administrative functions on HDFS. Those writing
  * applications against HDFS should prefer this interface to directly accessing
  * functionality in DistributedFileSystem or DFSClient.
- * 
- * Note that this is distinct from the similarly-named {@link DFSAdmin}, which
+ *
+ * Note that this is distinct from the similarly-named DFSAdmin, which
  * is a class that provides the functionality for the CLI `hdfs dfsadmin ...'
  * commands.
  */
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
 public class HdfsAdmin {
-  
+
   private DistributedFileSystem dfs;
   private static final FsPermission TRASH_PERMISSION = new FsPermission(
       FsAction.ALL, FsAction.ALL, FsAction.ALL, true);
-  
+
   /**
    * Create a new HdfsAdmin client.
-   * 
+   *
    * @param uri the unique URI of the HDFS file system to administer
    * @param conf configuration
    * @throws IOException in the event the file system could not be created
@@ -80,11 +79,11 @@ public class HdfsAdmin {
       dfs = (DistributedFileSystem)fs;
     }
   }
-  
+
   /**
    * Set the namespace quota (count of files, directories, and sym links) for a
    * directory.
-   * 
+   *
    * @param src the path to set the quota for
    * @param quota the value to set for the quota
    * @throws IOException in the event of error
@@ -92,22 +91,22 @@ public class HdfsAdmin {
   public void setQuota(Path src, long quota) throws IOException {
     dfs.setQuota(src, quota, HdfsConstants.QUOTA_DONT_SET);
   }
-  
+
   /**
    * Clear the namespace quota (count of files, directories and sym links) for a
    * directory.
-   * 
+   *
    * @param src the path to clear the quota of
    * @throws IOException in the event of error
    */
   public void clearQuota(Path src) throws IOException {
     dfs.setQuota(src, HdfsConstants.QUOTA_RESET, HdfsConstants.QUOTA_DONT_SET);
   }
-  
+
   /**
    * Set the storage space quota (size of files) for a directory. Note that
    * directories and sym links do not occupy storage space.
-   * 
+   *
    * @param src the path to set the space quota of
    * @param spaceQuota the value to set for the space quota
    * @throws IOException in the event of error
@@ -115,11 +114,11 @@ public class HdfsAdmin {
   public void setSpaceQuota(Path src, long spaceQuota) throws IOException {
     dfs.setQuota(src, HdfsConstants.QUOTA_DONT_SET, spaceQuota);
   }
-  
+
   /**
    * Clear the storage space quota (size of files) for a directory. Note that
    * directories and sym links do not occupy storage space.
-   * 
+   *
    * @param src the path to clear the space quota of
    * @throws IOException in the event of error
    */
@@ -152,7 +151,7 @@ public class HdfsAdmin {
   public void clearQuotaByStorageType(Path src, StorageType type) throws IOException {
     dfs.setQuotaByStorageType(src, type, HdfsConstants.QUOTA_RESET);
   }
-  
+
   /**
    * Allow snapshot on a directory.
    * @param path The path of the directory where snapshots will be taken.
@@ -160,7 +159,7 @@ public class HdfsAdmin {
   public void allowSnapshot(Path path) throws IOException {
     dfs.allowSnapshot(path);
   }
-  
+
   /**
    * Disallow snapshot on a directory.
    * @param path The path of the snapshottable directory.
@@ -171,7 +170,7 @@ public class HdfsAdmin {
 
   /**
    * Add a new CacheDirectiveInfo.
-   * 
+   *
    * @param info Information about a directive to add.
    * @param flags {@link CacheFlag}s to use for this operation.
    * @return the ID of the directive that was created.
@@ -181,10 +180,10 @@ public class HdfsAdmin {
       EnumSet<CacheFlag> flags) throws IOException {
   return dfs.addCacheDirective(info, flags);
   }
-  
+
   /**
    * Modify a CacheDirective.
-   * 
+   *
    * @param info Information about the directive to modify. You must set the ID
    *          to indicate which CacheDirective you want to modify.
    * @param flags {@link CacheFlag}s to use for this operation.
@@ -197,7 +196,7 @@ public class HdfsAdmin {
 
   /**
    * Remove a CacheDirective.
-   * 
+   *
    * @param id identifier of the CacheDirectiveInfo to remove
    * @throws IOException if the directive could not be removed
    */
@@ -208,7 +207,7 @@ public class HdfsAdmin {
 
   /**
    * List cache directives. Incrementally fetches results from the server.
-   * 
+   *
    * @param filter Filter parameters to use when listing the directives, null to
    *               list all directives visible to us.
    * @return A RemoteIterator which returns CacheDirectiveInfo objects.
@@ -223,7 +222,7 @@ public class HdfsAdmin {
    *
    * @param info
    *          The request to add a cache pool.
-   * @throws IOException 
+   * @throws IOException
    *          If the request could not be completed.
    */
   public void addCachePool(CachePoolInfo info) throws IOException {
@@ -235,19 +234,19 @@ public class HdfsAdmin {
    *
    * @param info
    *          The request to modify a cache pool.
-   * @throws IOException 
+   * @throws IOException
    *          If the request could not be completed.
    */
   public void modifyCachePool(CachePoolInfo info) throws IOException {
     dfs.modifyCachePool(info);
   }
-    
+
   /**
    * Remove a cache pool.
    *
    * @param poolName
    *          Name of the cache pool to remove.
-   * @throws IOException 
+   * @throws IOException
    *          if the cache pool did not exist, or could not be removed.
    */
   public void removeCachePool(String poolName) throws IOException {

+ 0 - 4
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java

@@ -131,15 +131,11 @@ public interface HdfsClientConfigKeys {
           "dfs.client.key.provider.cache.expiry";
   long    DFS_CLIENT_KEY_PROVIDER_CACHE_EXPIRY_DEFAULT =
               TimeUnit.DAYS.toMillis(10); // 10 days
-  String  DFS_HDFS_BLOCKS_METADATA_ENABLED =
-      "dfs.datanode.hdfs-blocks-metadata.enabled";
-  boolean DFS_HDFS_BLOCKS_METADATA_ENABLED_DEFAULT = false;
 
   String  DFS_DATANODE_KERBEROS_PRINCIPAL_KEY =
       "dfs.datanode.kerberos.principal";
   String  DFS_DATANODE_READAHEAD_BYTES_KEY = "dfs.datanode.readahead.bytes";
   long    DFS_DATANODE_READAHEAD_BYTES_DEFAULT = 4 * 1024 * 1024; // 4MB
-  String  DFS_ENCRYPTION_KEY_PROVIDER_URI = "dfs.encryption.key.provider.uri";
 
   String DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY =
       "dfs.encrypt.data.transfer.cipher.suites";

+ 6 - 6
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsUtils.java → hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsUtils.java

@@ -20,8 +20,7 @@ package org.apache.hadoop.hdfs.client;
 import java.io.IOException;
 import java.net.URI;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.apache.commons.io.IOUtils;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -30,7 +29,8 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
-import org.apache.hadoop.io.IOUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * The public utility API for HDFS.
@@ -38,7 +38,7 @@ import org.apache.hadoop.io.IOUtils;
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
 public class HdfsUtils {
-  private static final Log LOG = LogFactory.getLog(HdfsUtils.class);
+  public static final Logger LOG = LoggerFactory.getLogger(HdfsUtils.class);
 
   /**
    * Is the HDFS healthy?
@@ -54,7 +54,7 @@ public class HdfsUtils {
       throw new IllegalArgumentException("The scheme is not "
           + HdfsConstants.HDFS_URI_SCHEME + ", uri=" + uri);
     }
-    
+
     final Configuration conf = new Configuration();
     //disable FileSystem cache
     conf.setBoolean(String.format("fs.%s.impl.disable.cache", scheme), true);
@@ -80,7 +80,7 @@ public class HdfsUtils {
       }
       return false;
     } finally {
-      IOUtils.cleanup(LOG, fs);
+      IOUtils.closeQuietly(fs);
     }
   }
 }

+ 0 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/package-info.java → hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/package-info.java


+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancerWorkItem.java

@@ -19,10 +19,10 @@
 
 package org.apache.hadoop.hdfs.server.datanode;
 
+import com.fasterxml.jackson.annotation.JsonInclude;
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.htrace.fasterxml.jackson.annotation.JsonInclude;
 import org.codehaus.jackson.map.ObjectMapper;
 import org.codehaus.jackson.map.ObjectReader;
 

+ 41 - 117
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/StripedBlockUtil.java

@@ -22,7 +22,6 @@ import com.google.common.annotations.VisibleForTesting;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.StorageType;
-import org.apache.hadoop.hdfs.DFSStripedOutputStream;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -32,7 +31,7 @@ import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
-import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureDecoder;
+import org.apache.hadoop.hdfs.DFSStripedOutputStream;
 import org.apache.hadoop.security.token.Token;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -76,18 +75,6 @@ public class StripedBlockUtil {
   public static final Logger LOG =
       LoggerFactory.getLogger(StripedBlockUtil.class);
 
-  /**
-   * Parses a striped block group into individual blocks.
-   * @param bg The striped block group
-   * @param ecPolicy The erasure coding policy
-   * @return An array of the blocks in the group
-   */
-  public static LocatedBlock[] parseStripedBlockGroup(LocatedStripedBlock bg,
-                                               ErasureCodingPolicy ecPolicy) {
-    return parseStripedBlockGroup(bg, ecPolicy.getCellSize(),
-        ecPolicy.getNumDataUnits(), ecPolicy.getNumParityUnits());
-  }
-
   /**
    * This method parses a striped block group into individual blocks.
    *
@@ -112,7 +99,7 @@ public class StripedBlockUtil {
   }
 
   /**
-   * This method creates an internal block at the given index of a block group
+   * This method creates an internal block at the given index of a block group.
    *
    * @param idxInReturnedLocs The index in the stored locations in the
    *                          {@link LocatedStripedBlock} object
@@ -169,7 +156,7 @@ public class StripedBlockUtil {
   }
 
   /**
-   * Get the size of an internal block at the given index of a block group
+   * Get the size of an internal block at the given index of a block group.
    *
    * @param dataSize Size of the block group only counting data blocks
    * @param cellSize The size of a striping cell
@@ -237,7 +224,7 @@ public class StripedBlockUtil {
 
   /**
    * Given a byte's offset in an internal block, calculate the offset in
-   * the block group
+   * the block group.
    */
   public static long offsetInBlkToOffsetInBG(int cellSize, int dataBlkNum,
       long offsetInBlk, int idxInBlockGroup) {
@@ -248,12 +235,12 @@ public class StripedBlockUtil {
   }
 
   /**
-   * Get the next completed striped read task
+   * Get the next completed striped read task.
    *
-   * @return {@link StripingChunkReadResult} indicating the status of the read task
-   *          succeeded, and the block index of the task. If the method times
-   *          out without getting any completed read tasks, -1 is returned as
-   *          block index.
+   * @return {@link StripingChunkReadResult} indicating the status of the read
+   *          task succeeded, and the block index of the task. If the method
+   *          times out without getting any completed read tasks, -1 is
+   *          returned as block index.
    * @throws InterruptedException
    */
   public static StripingChunkReadResult getNextCompletedStripedRead(
@@ -287,7 +274,7 @@ public class StripedBlockUtil {
 
   /**
    * Get the total usage of the striped blocks, which is the total of data
-   * blocks and parity blocks
+   * blocks and parity blocks.
    *
    * @param numDataBlkBytes
    *          Size of the block group only counting data blocks
@@ -307,91 +294,6 @@ public class StripedBlockUtil {
     return numDataBlkBytes + numParityBlkBytes;
   }
 
-  /**
-   * Initialize the decoding input buffers based on the chunk states in an
-   * {@link AlignedStripe}. For each chunk that was not initially requested,
-   * schedule a new fetch request with the decoding input buffer as transfer
-   * destination.
-   */
-  public static ByteBuffer[] initDecodeInputs(AlignedStripe alignedStripe,
-      int dataBlkNum, int parityBlkNum) {
-    ByteBuffer[] decodeInputs = new ByteBuffer[dataBlkNum + parityBlkNum];
-    for (int i = 0; i < decodeInputs.length; i++) {
-      decodeInputs[i] = ByteBuffer.allocate(
-          (int) alignedStripe.getSpanInBlock());
-    }
-    // read the full data aligned stripe
-    for (int i = 0; i < dataBlkNum; i++) {
-      if (alignedStripe.chunks[i] == null) {
-        alignedStripe.chunks[i] = new StripingChunk(decodeInputs[i]);
-      }
-    }
-    return decodeInputs;
-  }
-
-  /**
-   * Some fetched {@link StripingChunk} might be stored in original application
-   * buffer instead of prepared decode input buffers. Some others are beyond
-   * the range of the internal blocks and should correspond to all zero bytes.
-   * When all pending requests have returned, this method should be called to
-   * finalize decode input buffers.
-   */
-  public static void finalizeDecodeInputs(final ByteBuffer[] decodeInputs,
-                                          AlignedStripe alignedStripe) {
-    for (int i = 0; i < alignedStripe.chunks.length; i++) {
-      final StripingChunk chunk = alignedStripe.chunks[i];
-      if (chunk != null && chunk.state == StripingChunk.FETCHED) {
-        if (chunk.useChunkBuffer()) {
-          chunk.getChunkBuffer().copyTo(decodeInputs[i]);
-        } else {
-          chunk.getByteBuffer().flip();
-        }
-      } else if (chunk != null && chunk.state == StripingChunk.ALLZERO) {
-        //ZERO it. Will be better handled in other following issue.
-        byte[] emptyBytes = new byte[decodeInputs[i].limit()];
-        decodeInputs[i].put(emptyBytes);
-        decodeInputs[i].flip();
-      } else {
-        decodeInputs[i] = null;
-      }
-    }
-  }
-
-  /**
-   * Decode based on the given input buffers and erasure coding policy.
-   */
-  public static void decodeAndFillBuffer(final ByteBuffer[] decodeInputs,
-      AlignedStripe alignedStripe, int dataBlkNum, int parityBlkNum,
-      RawErasureDecoder decoder) {
-    // Step 1: prepare indices and output buffers for missing data units
-    int[] decodeIndices = new int[parityBlkNum];
-    int pos = 0;
-    for (int i = 0; i < dataBlkNum; i++) {
-      if (alignedStripe.chunks[i] != null &&
-          alignedStripe.chunks[i].state == StripingChunk.MISSING){
-        decodeIndices[pos++] = i;
-      }
-    }
-    decodeIndices = Arrays.copyOf(decodeIndices, pos);
-    ByteBuffer[] decodeOutputs = new ByteBuffer[decodeIndices.length];
-    for (int i = 0; i < decodeOutputs.length; i++) {
-      decodeOutputs[i] = ByteBuffer.allocate(
-          (int) alignedStripe.getSpanInBlock());
-    }
-
-    // Step 2: decode into prepared output buffers
-    decoder.decode(decodeInputs, decodeIndices, decodeOutputs);
-
-    // Step 3: fill original application buffer with decoded data
-    for (int i = 0; i < decodeIndices.length; i++) {
-      int missingBlkIdx = decodeIndices[i];
-      StripingChunk chunk = alignedStripe.chunks[missingBlkIdx];
-      if (chunk.state == StripingChunk.MISSING && chunk.useChunkBuffer()) {
-        chunk.getChunkBuffer().copyFrom(decodeOutputs[i]);
-      }
-    }
-  }
-
   /**
    * Similar functionality with {@link #divideByteRangeIntoStripes}, but is used
    * by stateful read and uses ByteBuffer as reading target buffer. Besides the
@@ -485,7 +387,7 @@ public class StripedBlockUtil {
   /**
    * Map the logical byte range to a set of inclusive {@link StripingCell}
    * instances, each representing the overlap of the byte range to a cell
-   * used by {@link DFSStripedOutputStream} in encoding
+   * used by {@link DFSStripedOutputStream} in encoding.
    */
   @VisibleForTesting
   private static StripingCell[] getStripingCellsOfByteRange(
@@ -530,7 +432,7 @@ public class StripedBlockUtil {
     int dataBlkNum = ecPolicy.getNumDataUnits();
     int parityBlkNum = ecPolicy.getNumParityUnits();
 
-    VerticalRange ranges[] = new VerticalRange[dataBlkNum + parityBlkNum];
+    VerticalRange[] ranges = new VerticalRange[dataBlkNum + parityBlkNum];
 
     long earliestStart = Long.MAX_VALUE;
     long latestEnd = -1;
@@ -675,7 +577,7 @@ public class StripedBlockUtil {
   @VisibleForTesting
   static class StripingCell {
     final ErasureCodingPolicy ecPolicy;
-    /** Logical order in a block group, used when doing I/O to a block group */
+    /** Logical order in a block group, used when doing I/O to a block group. */
     final int idxInBlkGroup;
     final int idxInInternalBlk;
     final int idxInStripe;
@@ -738,7 +640,7 @@ public class StripedBlockUtil {
    */
   public static class AlignedStripe {
     public VerticalRange range;
-    /** status of each chunk in the stripe */
+    /** status of each chunk in the stripe. */
     public final StripingChunk[] chunks;
     public int fetchedChunksNum = 0;
     public int missingChunksNum = 0;
@@ -790,9 +692,9 @@ public class StripedBlockUtil {
    * +-----+
    */
   public static class VerticalRange {
-    /** start offset in the block group (inclusive) */
+    /** start offset in the block group (inclusive). */
     public long offsetInBlock;
-    /** length of the stripe range */
+    /** length of the stripe range. */
     public long spanInBlock;
 
     public VerticalRange(long offsetInBlock, long length) {
@@ -801,7 +703,7 @@ public class StripedBlockUtil {
       this.spanInBlock = length;
     }
 
-    /** whether a position is in the range */
+    /** whether a position is in the range. */
     public boolean include(long pos) {
       return pos >= offsetInBlock && pos < offsetInBlock + spanInBlock;
     }
@@ -915,7 +817,7 @@ public class StripedBlockUtil {
     /**
      *  Note: target will be ready-to-read state after the call.
      */
-    void copyTo(ByteBuffer target) {
+    public void copyTo(ByteBuffer target) {
       for (ByteBuffer slice : slices) {
         slice.flip();
         target.put(slice);
@@ -923,7 +825,7 @@ public class StripedBlockUtil {
       target.flip();
     }
 
-    void copyFrom(ByteBuffer src) {
+    public void copyFrom(ByteBuffer src) {
       ByteBuffer tmp;
       int len;
       for (ByteBuffer slice : slices) {
@@ -970,6 +872,28 @@ public class StripedBlockUtil {
     }
   }
 
+  /** Used to indicate the buffered data's range in the block group. */
+  public static class StripeRange {
+    /** start offset in the block group (inclusive). */
+    final long offsetInBlock;
+    /** length of the stripe range. */
+    final long length;
+
+    public StripeRange(long offsetInBlock, long length) {
+      Preconditions.checkArgument(offsetInBlock >= 0 && length >= 0);
+      this.offsetInBlock = offsetInBlock;
+      this.length = length;
+    }
+
+    public boolean include(long pos) {
+      return pos >= offsetInBlock && pos < offsetInBlock + length;
+    }
+
+    public long getLength() {
+      return length;
+    }
+  }
+
   /**
    * Check if the information such as IDs and generation stamps in block-i
    * match the block group.

Einige Dateien werden nicht angezeigt, da zu viele Dateien in diesem Diff geändert wurden.