Explorar o código

Merge remote-tracking branch 'origin/trunk' into MR-2841

Conflicts:
	hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java
Todd Lipcon %!s(int64=10) %!d(string=hai) anos
pai
achega
cce7d1e2f9
Modificáronse 100 ficheiros con 8329 adicións e 1376 borrados
  1. 1 0
      .gitignore
  2. 22 0
      BUILDING.txt
  3. 14 12
      dev-support/test-patch.sh
  4. 7 0
      hadoop-assemblies/src/main/resources/assemblies/hadoop-dist.xml
  5. 38 0
      hadoop-common-project/hadoop-auth/dev-support/findbugsExcludeFile.xml
  6. 26 0
      hadoop-common-project/hadoop-auth/pom.xml
  7. 10 26
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/AuthenticatedURL.java
  8. 96 13
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java
  9. 20 1
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java
  10. 20 1
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/PseudoAuthenticationHandler.java
  11. 49 0
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/RandomSignerSecretProvider.java
  12. 139 0
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/RolloverSignerSecretProvider.java
  13. 32 14
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/Signer.java
  14. 62 0
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/SignerSecretProvider.java
  15. 49 0
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/StringSignerSecretProvider.java
  16. 133 4
      hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/AuthenticatorTestCase.java
  17. 8 30
      hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestAuthenticatedURL.java
  18. 53 5
      hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestKerberosAuthenticator.java
  19. 110 8
      hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAuthenticationFilter.java
  20. 63 0
      hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestRandomSignerSecretProvider.java
  21. 79 0
      hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestRolloverSignerSecretProvider.java
  22. 69 16
      hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestSigner.java
  23. 33 0
      hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestStringSignerSecretProvider.java
  24. 388 114
      hadoop-common-project/hadoop-common/CHANGES.txt
  25. 5 0
      hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
  26. 33 1
      hadoop-common-project/hadoop-common/pom.xml
  27. 34 0
      hadoop-common-project/hadoop-common/src/CMakeLists.txt
  28. 6 0
      hadoop-common-project/hadoop-common/src/JNIFlags.cmake
  29. 1 0
      hadoop-common-project/hadoop-common/src/config.h.cmake
  30. 13 15
      hadoop-common-project/hadoop-common/src/contrib/bash-tab-completion/hadoop.sh
  31. 135 94
      hadoop-common-project/hadoop-common/src/main/bin/hadoop
  32. 6 4
      hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.cmd
  33. 158 251
      hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh
  34. 28 186
      hadoop-common-project/hadoop-common/src/main/bin/hadoop-daemon.sh
  35. 26 11
      hadoop-common-project/hadoop-common/src/main/bin/hadoop-daemons.sh
  36. 1066 0
      hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
  37. 93 0
      hadoop-common-project/hadoop-common/src/main/bin/hadoop-layout.sh.example
  38. 30 3
      hadoop-common-project/hadoop-common/src/main/bin/hadoop.cmd
  39. 17 35
      hadoop-common-project/hadoop-common/src/main/bin/rcc
  40. 23 28
      hadoop-common-project/hadoop-common/src/main/bin/slaves.sh
  41. 26 12
      hadoop-common-project/hadoop-common/src/main/bin/start-all.sh
  42. 25 11
      hadoop-common-project/hadoop-common/src/main/bin/stop-all.sh
  43. 383 49
      hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh
  44. 73 5
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
  45. 2 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurationServlet.java
  46. 67 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/AesCtrCryptoCodec.java
  47. 115 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CipherSuite.java
  48. 179 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoCodec.java
  49. 680 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java
  50. 280 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java
  51. 70 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoStreamUtils.java
  52. 72 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/Decryptor.java
  53. 71 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/Encryptor.java
  54. 165 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JceAesCtrCryptoCodec.java
  55. 164 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslAesCtrCryptoCodec.java
  56. 289 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslCipher.java
  57. 229 19
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/JavaKeyStoreProvider.java
  58. 31 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java
  59. 74 17
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderCryptoExtension.java
  60. 6 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderDelegationTokenExtension.java
  61. 1 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderExtension.java
  62. 45 44
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyShell.java
  63. 3 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/UserProvider.java
  64. 87 30
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
  65. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSRESTConstants.java
  66. 122 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/random/OpensslSecureRandom.java
  67. 115 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/random/OsSecureRandom.java
  68. 28 75
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
  69. 5 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
  70. 5 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
  71. 3 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
  72. 32 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
  73. 37 9
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java
  74. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStream.java
  75. 70 37
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSOutputSummer.java
  76. 65 76
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
  77. 102 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileEncryptionInfo.java
  78. 82 77
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
  79. 7 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
  80. 8 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java
  81. 6 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Globber.java
  82. 37 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/crypto/CryptoFSDataInputStream.java
  83. 47 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/crypto/CryptoFSDataOutputStream.java
  84. 68 7
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java
  85. 5 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java
  86. 31 5
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Count.java
  87. 5 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Delete.java
  88. 11 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Display.java
  89. 8 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
  90. 7 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java
  91. 9 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
  92. 9 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java
  93. 14 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
  94. 5 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java
  95. 16 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java
  96. 8 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
  97. 522 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java
  98. 30 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcSchedulerMXBean.java
  99. 449 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueue.java
  100. 27 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueueMXBean.java

+ 1 - 0
.gitignore

@@ -9,6 +9,7 @@
 .project
 .project
 .settings
 .settings
 target
 target
+build
 hadoop-common-project/hadoop-kms/downloads/
 hadoop-common-project/hadoop-kms/downloads/
 hadoop-hdfs-project/hadoop-hdfs/downloads
 hadoop-hdfs-project/hadoop-hdfs/downloads
 hadoop-hdfs-project/hadoop-hdfs-httpfs/downloads
 hadoop-hdfs-project/hadoop-hdfs-httpfs/downloads

+ 22 - 0
BUILDING.txt

@@ -81,6 +81,27 @@ Maven build goals:
     the final tar file. This option requires that -Dsnappy.lib is also given,
     the final tar file. This option requires that -Dsnappy.lib is also given,
     and it ignores the -Dsnappy.prefix option.
     and it ignores the -Dsnappy.prefix option.
 
 
+ OpenSSL build options:
+
+   OpenSSL includes a crypto library that can be utilized by the native code.
+   It is currently an optional component, meaning that Hadoop can be built with
+   or without this dependency.
+
+  * Use -Drequire.openssl to fail the build if libcrypto.so is not found.
+    If this option is not specified and the openssl library is missing,
+    we silently build a version of libhadoop.so that cannot make use of
+    openssl. This option is recommended if you plan on making use of openssl 
+    and want to get more repeatable builds.
+  * Use -Dopenssl.prefix to specify a nonstandard location for the libcrypto
+    header files and library files. You do not need this option if you have
+    installed openssl using a package manager.
+  * Use -Dopenssl.lib to specify a nonstandard location for the libcrypto library
+    files. Similarly to openssl.prefix, you do not need this option if you have
+    installed openssl using a package manager.
+  * Use -Dbundle.openssl to copy the contents of the openssl.lib directory into
+    the final tar file. This option requires that -Dopenssl.lib is also given,
+    and it ignores the -Dopenssl.prefix option.
+
    Tests options:
    Tests options:
 
 
   * Use -DskipTests to skip tests when running the following Maven goals:
   * Use -DskipTests to skip tests when running the following Maven goals:
@@ -189,6 +210,7 @@ Requirements:
 * Maven 3.0 or later
 * Maven 3.0 or later
 * Findbugs 1.3.9 (if running findbugs)
 * Findbugs 1.3.9 (if running findbugs)
 * ProtocolBuffer 2.5.0
 * ProtocolBuffer 2.5.0
+* CMake 2.6 or newer
 * Windows SDK or Visual Studio 2010 Professional
 * Windows SDK or Visual Studio 2010 Professional
 * Unix command-line tools from GnuWin32 or Cygwin: sh, mkdir, rm, cp, tar, gzip
 * Unix command-line tools from GnuWin32 or Cygwin: sh, mkdir, rm, cp, tar, gzip
 * zlib headers (if building native code bindings for zlib)
 * zlib headers (if building native code bindings for zlib)

+ 14 - 12
dev-support/test-patch.sh

@@ -16,7 +16,7 @@
 ulimit -n 1024
 ulimit -n 1024
 
 
 ### Setup some variables.  
 ### Setup some variables.  
-### SVN_REVISION and BUILD_URL are set by Hudson if it is run by patch process
+### BUILD_URL is set by Hudson if it is run by patch process
 ### Read variables from properties file
 ### Read variables from properties file
 bindir=$(dirname $0)
 bindir=$(dirname $0)
 
 
@@ -36,7 +36,7 @@ BUILD_NATIVE=true
 PS=${PS:-ps}
 PS=${PS:-ps}
 AWK=${AWK:-awk}
 AWK=${AWK:-awk}
 WGET=${WGET:-wget}
 WGET=${WGET:-wget}
-SVN=${SVN:-svn}
+GIT=${GIT:-git}
 GREP=${GREP:-grep}
 GREP=${GREP:-grep}
 PATCH=${PATCH:-patch}
 PATCH=${PATCH:-patch}
 DIFF=${DIFF:-diff}
 DIFF=${DIFF:-diff}
@@ -59,13 +59,13 @@ printUsage() {
   echo "--mvn-cmd=<cmd>        The 'mvn' command to use (default \$MAVEN_HOME/bin/mvn, or 'mvn')"
   echo "--mvn-cmd=<cmd>        The 'mvn' command to use (default \$MAVEN_HOME/bin/mvn, or 'mvn')"
   echo "--ps-cmd=<cmd>         The 'ps' command to use (default 'ps')"
   echo "--ps-cmd=<cmd>         The 'ps' command to use (default 'ps')"
   echo "--awk-cmd=<cmd>        The 'awk' command to use (default 'awk')"
   echo "--awk-cmd=<cmd>        The 'awk' command to use (default 'awk')"
-  echo "--svn-cmd=<cmd>        The 'svn' command to use (default 'svn')"
+  echo "--git-cmd=<cmd>        The 'git' command to use (default 'git')"
   echo "--grep-cmd=<cmd>       The 'grep' command to use (default 'grep')"
   echo "--grep-cmd=<cmd>       The 'grep' command to use (default 'grep')"
   echo "--patch-cmd=<cmd>      The 'patch' command to use (default 'patch')"
   echo "--patch-cmd=<cmd>      The 'patch' command to use (default 'patch')"
   echo "--diff-cmd=<cmd>       The 'diff' command to use (default 'diff')"
   echo "--diff-cmd=<cmd>       The 'diff' command to use (default 'diff')"
   echo "--findbugs-home=<path> Findbugs home directory (default FINDBUGS_HOME environment variable)"
   echo "--findbugs-home=<path> Findbugs home directory (default FINDBUGS_HOME environment variable)"
   echo "--forrest-home=<path>  Forrest home directory (default FORREST_HOME environment variable)"
   echo "--forrest-home=<path>  Forrest home directory (default FORREST_HOME environment variable)"
-  echo "--dirty-workspace      Allow the local SVN workspace to have uncommitted changes"
+  echo "--dirty-workspace      Allow the local git workspace to have uncommitted changes"
   echo "--run-tests            Run all tests below the base directory"
   echo "--run-tests            Run all tests below the base directory"
   echo "--build-native=<bool>  If true, then build native components (default 'true')"
   echo "--build-native=<bool>  If true, then build native components (default 'true')"
   echo
   echo
@@ -107,8 +107,8 @@ parseArgs() {
     --wget-cmd=*)
     --wget-cmd=*)
       WGET=${i#*=}
       WGET=${i#*=}
       ;;
       ;;
-    --svn-cmd=*)
-      SVN=${i#*=}
+    --git-cmd=*)
+      GIT=${i#*=}
       ;;
       ;;
     --grep-cmd=*)
     --grep-cmd=*)
       GREP=${i#*=}
       GREP=${i#*=}
@@ -197,7 +197,7 @@ checkout () {
   echo ""
   echo ""
   ### When run by a developer, if the workspace contains modifications, do not continue
   ### When run by a developer, if the workspace contains modifications, do not continue
   ### unless the --dirty-workspace option was set
   ### unless the --dirty-workspace option was set
-  status=`$SVN stat --ignore-externals | sed -e '/^X[ ]*/D'`
+  status=`$GIT status --porcelain`
   if [[ $JENKINS == "false" ]] ; then
   if [[ $JENKINS == "false" ]] ; then
     if [[ "$status" != "" && -z $DIRTY_WORKSPACE ]] ; then
     if [[ "$status" != "" && -z $DIRTY_WORKSPACE ]] ; then
       echo "ERROR: can't run in a workspace that contains the following modifications"
       echo "ERROR: can't run in a workspace that contains the following modifications"
@@ -207,10 +207,12 @@ checkout () {
     echo
     echo
   else   
   else   
     cd $BASEDIR
     cd $BASEDIR
-    $SVN revert -R .
-    rm -rf `$SVN status --no-ignore`
-    $SVN update
+    $GIT reset --hard
+    $GIT clean -xdf
+    $GIT checkout trunk
+    $GIT pull --rebase
   fi
   fi
+  GIT_REVISION=`git rev-parse --verify --short HEAD`
   return $?
   return $?
 }
 }
 
 
@@ -229,10 +231,10 @@ downloadPatch () {
     echo "$defect patch is being downloaded at `date` from"
     echo "$defect patch is being downloaded at `date` from"
     echo "$patchURL"
     echo "$patchURL"
     $WGET -q -O $PATCH_DIR/patch $patchURL
     $WGET -q -O $PATCH_DIR/patch $patchURL
-    VERSION=${SVN_REVISION}_${defect}_PATCH-${patchNum}
+    VERSION=${GIT_REVISION}_${defect}_PATCH-${patchNum}
     JIRA_COMMENT="Here are the results of testing the latest attachment 
     JIRA_COMMENT="Here are the results of testing the latest attachment 
   $patchURL
   $patchURL
-  against trunk revision ${SVN_REVISION}."
+  against trunk revision ${GIT_REVISION}."
 
 
     ### Copy in any supporting files needed by this process
     ### Copy in any supporting files needed by this process
     cp -r $SUPPORT_DIR/lib/* ./lib
     cp -r $SUPPORT_DIR/lib/* ./lib

+ 7 - 0
hadoop-assemblies/src/main/resources/assemblies/hadoop-dist.xml

@@ -29,6 +29,7 @@
         <exclude>*-config.cmd</exclude>
         <exclude>*-config.cmd</exclude>
         <exclude>start-*.cmd</exclude>
         <exclude>start-*.cmd</exclude>
         <exclude>stop-*.cmd</exclude>
         <exclude>stop-*.cmd</exclude>
+        <exclude>hadoop-layout.sh.example</exclude>
       </excludes>
       </excludes>
       <fileMode>0755</fileMode>
       <fileMode>0755</fileMode>
     </fileSet>
     </fileSet>
@@ -42,6 +43,8 @@
       <includes>
       <includes>
         <include>*-config.sh</include>
         <include>*-config.sh</include>
         <include>*-config.cmd</include>
         <include>*-config.cmd</include>
+        <include>*-functions.sh</include>
+        <include>hadoop-layout.sh.example</include>
       </includes>
       </includes>
       <fileMode>0755</fileMode>
       <fileMode>0755</fileMode>
     </fileSet>
     </fileSet>
@@ -57,6 +60,10 @@
         <exclude>hadoop.cmd</exclude>
         <exclude>hadoop.cmd</exclude>
         <exclude>hdfs.cmd</exclude>
         <exclude>hdfs.cmd</exclude>
         <exclude>hadoop-config.cmd</exclude>
         <exclude>hadoop-config.cmd</exclude>
+        <exclude>hadoop-functions.sh</exclude>
+        <exclude>hadoop-layout.sh.example</exclude>
+        <exclude>hdfs-config.cmd</exclude>
+        <exclude>hdfs-config.sh</exclude>
       </excludes>
       </excludes>
       <fileMode>0755</fileMode>
       <fileMode>0755</fileMode>
     </fileSet>
     </fileSet>

+ 38 - 0
hadoop-common-project/hadoop-auth/dev-support/findbugsExcludeFile.xml

@@ -0,0 +1,38 @@
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<FindBugsFilter>
+  <!--
+    Caller is not supposed to modify returned values even though there's nothing
+    stopping them; we do this for performance reasons.
+  -->
+  <Match>
+    <Class name="org.apache.hadoop.security.authentication.util.RolloverSignerSecretProvider" />
+    <Method name="getAllSecrets" />
+    <Bug pattern="EI_EXPOSE_REP" />
+  </Match>
+  <Match>
+    <Class name="org.apache.hadoop.security.authentication.util.StringSignerSecretProvider" />
+    <Method name="getAllSecrets" />
+    <Bug pattern="EI_EXPOSE_REP" />
+  </Match>
+  <Match>
+    <Class name="org.apache.hadoop.security.authentication.util.StringSignerSecretProvider" />
+    <Method name="getCurrentSecret" />
+    <Bug pattern="EI_EXPOSE_REP" />
+  </Match>
+
+</FindBugsFilter>

+ 26 - 0
hadoop-common-project/hadoop-auth/pom.xml

@@ -61,6 +61,16 @@
       <groupId>org.mortbay.jetty</groupId>
       <groupId>org.mortbay.jetty</groupId>
       <artifactId>jetty</artifactId>
       <artifactId>jetty</artifactId>
       <scope>test</scope>
       <scope>test</scope>
+    </dependency>
+     <dependency>
+      <groupId>org.apache.tomcat.embed</groupId>
+      <artifactId>tomcat-embed-core</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.tomcat.embed</groupId>
+      <artifactId>tomcat-embed-logging-juli</artifactId>
+      <scope>test</scope>
     </dependency>
     </dependency>
     <dependency>
     <dependency>
       <groupId>javax.servlet</groupId>
       <groupId>javax.servlet</groupId>
@@ -144,12 +154,28 @@
         <artifactId>maven-jar-plugin</artifactId>
         <artifactId>maven-jar-plugin</artifactId>
         <executions>
         <executions>
           <execution>
           <execution>
+            <id>prepare-jar</id>
+            <phase>prepare-package</phase>
+            <goals>
+              <goal>jar</goal>
+            </goals>
+          </execution>
+          <execution>
+            <id>prepare-test-jar</id>
+            <phase>prepare-package</phase>
             <goals>
             <goals>
               <goal>test-jar</goal>
               <goal>test-jar</goal>
             </goals>
             </goals>
           </execution>
           </execution>
         </executions>
         </executions>
       </plugin>
       </plugin>
+      <plugin>
+        <groupId>org.codehaus.mojo</groupId>
+        <artifactId>findbugs-maven-plugin</artifactId>
+        <configuration>
+          <excludeFilterFile>${basedir}/dev-support/findbugsExcludeFile.xml</excludeFilterFile>
+        </configuration>
+      </plugin>
     </plugins>
     </plugins>
   </build>
   </build>
 
 

+ 10 - 26
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/AuthenticatedURL.java

@@ -120,32 +120,6 @@ public class AuthenticatedURL {
       return token;
       return token;
     }
     }
 
 
-    /**
-     * Return the hashcode for the token.
-     *
-     * @return the hashcode for the token.
-     */
-    @Override
-    public int hashCode() {
-      return (token != null) ? token.hashCode() : 0;
-    }
-
-    /**
-     * Return if two token instances are equal.
-     *
-     * @param o the other token instance.
-     *
-     * @return if this instance and the other instance are equal.
-     */
-    @Override
-    public boolean equals(Object o) {
-      boolean eq = false;
-      if (o instanceof Token) {
-        Token other = (Token) o;
-        eq = (token == null && other.token == null) || (token != null && this.token.equals(other.token));
-      }
-      return eq;
-    }
   }
   }
 
 
   private static Class<? extends Authenticator> DEFAULT_AUTHENTICATOR = KerberosAuthenticator.class;
   private static Class<? extends Authenticator> DEFAULT_AUTHENTICATOR = KerberosAuthenticator.class;
@@ -208,6 +182,16 @@ public class AuthenticatedURL {
     this.authenticator.setConnectionConfigurator(connConfigurator);
     this.authenticator.setConnectionConfigurator(connConfigurator);
   }
   }
 
 
+  /**
+   * Returns the {@link Authenticator} instance used by the
+   * <code>AuthenticatedURL</code>.
+   *
+   * @return the {@link Authenticator} instance
+   */
+  protected Authenticator getAuthenticator() {
+    return authenticator;
+  }
+
   /**
   /**
    * Returns an authenticated {@link HttpURLConnection}.
    * Returns an authenticated {@link HttpURLConnection}.
    *
    *

+ 96 - 13
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java

@@ -19,6 +19,9 @@ import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
 import org.apache.hadoop.security.authentication.client.AuthenticationException;
 import org.apache.hadoop.security.authentication.client.AuthenticationException;
 import org.apache.hadoop.security.authentication.util.Signer;
 import org.apache.hadoop.security.authentication.util.Signer;
 import org.apache.hadoop.security.authentication.util.SignerException;
 import org.apache.hadoop.security.authentication.util.SignerException;
+import org.apache.hadoop.security.authentication.util.RandomSignerSecretProvider;
+import org.apache.hadoop.security.authentication.util.SignerSecretProvider;
+import org.apache.hadoop.security.authentication.util.StringSignerSecretProvider;
 import org.slf4j.Logger;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.LoggerFactory;
 
 
@@ -107,11 +110,29 @@ public class AuthenticationFilter implements Filter {
    */
    */
   public static final String COOKIE_PATH = "cookie.path";
   public static final String COOKIE_PATH = "cookie.path";
 
 
-  private static final Random RAN = new Random();
+  /**
+   * Constant for the configuration property that indicates the name of the
+   * SignerSecretProvider class to use.  If not specified, SIGNATURE_SECRET
+   * will be used or a random secret.
+   */
+  public static final String SIGNER_SECRET_PROVIDER_CLASS =
+          "signer.secret.provider";
+
+  /**
+   * Constant for the attribute that can be used for providing a custom
+   * object that subclasses the SignerSecretProvider.  Note that this should be
+   * set in the ServletContext and the class should already be initialized.  
+   * If not specified, SIGNER_SECRET_PROVIDER_CLASS will be used.
+   */
+  public static final String SIGNATURE_PROVIDER_ATTRIBUTE =
+      "org.apache.hadoop.security.authentication.util.SignerSecretProvider";
 
 
+  private Properties config;
   private Signer signer;
   private Signer signer;
+  private SignerSecretProvider secretProvider;
   private AuthenticationHandler authHandler;
   private AuthenticationHandler authHandler;
   private boolean randomSecret;
   private boolean randomSecret;
+  private boolean customSecretProvider;
   private long validity;
   private long validity;
   private String cookieDomain;
   private String cookieDomain;
   private String cookiePath;
   private String cookiePath;
@@ -130,7 +151,7 @@ public class AuthenticationFilter implements Filter {
   public void init(FilterConfig filterConfig) throws ServletException {
   public void init(FilterConfig filterConfig) throws ServletException {
     String configPrefix = filterConfig.getInitParameter(CONFIG_PREFIX);
     String configPrefix = filterConfig.getInitParameter(CONFIG_PREFIX);
     configPrefix = (configPrefix != null) ? configPrefix + "." : "";
     configPrefix = (configPrefix != null) ? configPrefix + "." : "";
-    Properties config = getConfiguration(configPrefix, filterConfig);
+    config = getConfiguration(configPrefix, filterConfig);
     String authHandlerName = config.getProperty(AUTH_TYPE, null);
     String authHandlerName = config.getProperty(AUTH_TYPE, null);
     String authHandlerClassName;
     String authHandlerClassName;
     if (authHandlerName == null) {
     if (authHandlerName == null) {
@@ -159,19 +180,62 @@ public class AuthenticationFilter implements Filter {
     } catch (IllegalAccessException ex) {
     } catch (IllegalAccessException ex) {
       throw new ServletException(ex);
       throw new ServletException(ex);
     }
     }
-    String signatureSecret = config.getProperty(configPrefix + SIGNATURE_SECRET);
-    if (signatureSecret == null) {
-      signatureSecret = Long.toString(RAN.nextLong());
-      randomSecret = true;
-      LOG.warn("'signature.secret' configuration not set, using a random value as secret");
+
+    validity = Long.parseLong(config.getProperty(AUTH_TOKEN_VALIDITY, "36000"))
+        * 1000; //10 hours
+    secretProvider = (SignerSecretProvider) filterConfig.getServletContext().
+        getAttribute(SIGNATURE_PROVIDER_ATTRIBUTE);
+    if (secretProvider == null) {
+      String signerSecretProviderClassName =
+          config.getProperty(configPrefix + SIGNER_SECRET_PROVIDER_CLASS, null);
+      if (signerSecretProviderClassName == null) {
+        String signatureSecret =
+            config.getProperty(configPrefix + SIGNATURE_SECRET, null);
+        if (signatureSecret != null) {
+          secretProvider = new StringSignerSecretProvider(signatureSecret);
+        } else {
+          secretProvider = new RandomSignerSecretProvider();
+          randomSecret = true;
+        }
+      } else {
+        try {
+          Class<?> klass = Thread.currentThread().getContextClassLoader().
+              loadClass(signerSecretProviderClassName);
+          secretProvider = (SignerSecretProvider) klass.newInstance();
+          customSecretProvider = true;
+        } catch (ClassNotFoundException ex) {
+          throw new ServletException(ex);
+        } catch (InstantiationException ex) {
+          throw new ServletException(ex);
+        } catch (IllegalAccessException ex) {
+          throw new ServletException(ex);
+        }
+      }
+      try {
+        secretProvider.init(config, validity);
+      } catch (Exception ex) {
+        throw new ServletException(ex);
+      }
+    } else {
+      customSecretProvider = true;
     }
     }
-    signer = new Signer(signatureSecret.getBytes());
-    validity = Long.parseLong(config.getProperty(AUTH_TOKEN_VALIDITY, "36000")) * 1000; //10 hours
+    signer = new Signer(secretProvider);
 
 
     cookieDomain = config.getProperty(COOKIE_DOMAIN, null);
     cookieDomain = config.getProperty(COOKIE_DOMAIN, null);
     cookiePath = config.getProperty(COOKIE_PATH, null);
     cookiePath = config.getProperty(COOKIE_PATH, null);
   }
   }
 
 
+  /**
+   * Returns the configuration properties of the {@link AuthenticationFilter}
+   * without the prefix. The returned properties are the same that the
+   * {@link #getConfiguration(String, FilterConfig)} method returned.
+   *
+   * @return the configuration properties.
+   */
+  protected Properties getConfiguration() {
+    return config;
+  }
+
   /**
   /**
    * Returns the authentication handler being used.
    * Returns the authentication handler being used.
    *
    *
@@ -190,6 +254,15 @@ public class AuthenticationFilter implements Filter {
     return randomSecret;
     return randomSecret;
   }
   }
 
 
+  /**
+   * Returns if a custom implementation of a SignerSecretProvider is being used.
+   *
+   * @return if a custom implementation of a SignerSecretProvider is being used.
+   */
+  protected boolean isCustomSignerSecretProvider() {
+    return customSecretProvider;
+  }
+
   /**
   /**
    * Returns the validity time of the generated tokens.
    * Returns the validity time of the generated tokens.
    *
    *
@@ -228,6 +301,9 @@ public class AuthenticationFilter implements Filter {
       authHandler.destroy();
       authHandler.destroy();
       authHandler = null;
       authHandler = null;
     }
     }
+    if (secretProvider != null) {
+      secretProvider.destroy();
+    }
   }
   }
 
 
   /**
   /**
@@ -393,7 +469,7 @@ public class AuthenticationFilter implements Filter {
             createAuthCookie(httpResponse, signedToken, getCookieDomain(),
             createAuthCookie(httpResponse, signedToken, getCookieDomain(),
                     getCookiePath(), token.getExpires(), isHttps);
                     getCookiePath(), token.getExpires(), isHttps);
           }
           }
-          filterChain.doFilter(httpRequest, httpResponse);
+          doFilter(filterChain, httpRequest, httpResponse);
         }
         }
       } else {
       } else {
         unauthorizedResponse = false;
         unauthorizedResponse = false;
@@ -417,6 +493,15 @@ public class AuthenticationFilter implements Filter {
     }
     }
   }
   }
 
 
+  /**
+   * Delegates call to the servlet filter chain. Sub-classes my override this
+   * method to perform pre and post tasks.
+   */
+  protected void doFilter(FilterChain filterChain, HttpServletRequest request,
+      HttpServletResponse response) throws IOException, ServletException {
+    filterChain.doFilter(request, response);
+  }
+
   /**
   /**
    * Creates the Hadoop authentication HTTP cookie.
    * Creates the Hadoop authentication HTTP cookie.
    *
    *
@@ -434,9 +519,7 @@ public class AuthenticationFilter implements Filter {
     StringBuilder sb = new StringBuilder(AuthenticatedURL.AUTH_COOKIE)
     StringBuilder sb = new StringBuilder(AuthenticatedURL.AUTH_COOKIE)
                            .append("=");
                            .append("=");
     if (token != null && token.length() > 0) {
     if (token != null && token.length() > 0) {
-      sb.append("\"")
-          .append(token)
-          .append("\"");
+      sb.append(token);
     }
     }
     sb.append("; Version=1");
     sb.append("; Version=1");
 
 

+ 20 - 1
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java

@@ -142,11 +142,30 @@ public class KerberosAuthenticationHandler implements AuthenticationHandler {
    */
    */
   public static final String NAME_RULES = TYPE + ".name.rules";
   public static final String NAME_RULES = TYPE + ".name.rules";
 
 
+  private String type;
   private String keytab;
   private String keytab;
   private GSSManager gssManager;
   private GSSManager gssManager;
   private Subject serverSubject = new Subject();
   private Subject serverSubject = new Subject();
   private List<LoginContext> loginContexts = new ArrayList<LoginContext>();
   private List<LoginContext> loginContexts = new ArrayList<LoginContext>();
 
 
+  /**
+   * Creates a Kerberos SPNEGO authentication handler with the default
+   * auth-token type, <code>kerberos</code>.
+   */
+  public KerberosAuthenticationHandler() {
+    this(TYPE);
+  }
+
+  /**
+   * Creates a Kerberos SPNEGO authentication handler with a custom auth-token
+   * type.
+   *
+   * @param type auth-token type.
+   */
+  public KerberosAuthenticationHandler(String type) {
+    this.type = type;
+  }
+
   /**
   /**
    * Initializes the authentication handler instance.
    * Initializes the authentication handler instance.
    * <p/>
    * <p/>
@@ -249,7 +268,7 @@ public class KerberosAuthenticationHandler implements AuthenticationHandler {
    */
    */
   @Override
   @Override
   public String getType() {
   public String getType() {
-    return TYPE;
+    return type;
   }
   }
 
 
   /**
   /**

+ 20 - 1
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/PseudoAuthenticationHandler.java

@@ -55,6 +55,25 @@ public class PseudoAuthenticationHandler implements AuthenticationHandler {
 
 
   private static final Charset UTF8_CHARSET = Charset.forName("UTF-8");
   private static final Charset UTF8_CHARSET = Charset.forName("UTF-8");
   private boolean acceptAnonymous;
   private boolean acceptAnonymous;
+  private String type;
+
+  /**
+   * Creates a Hadoop pseudo authentication handler with the default auth-token
+   * type, <code>simple</code>.
+   */
+  public PseudoAuthenticationHandler() {
+    this(TYPE);
+  }
+
+  /**
+   * Creates a Hadoop pseudo authentication handler with a custom auth-token
+   * type.
+   *
+   * @param type auth-token type.
+   */
+  public PseudoAuthenticationHandler(String type) {
+    this.type = type;
+  }
 
 
   /**
   /**
    * Initializes the authentication handler instance.
    * Initializes the authentication handler instance.
@@ -96,7 +115,7 @@ public class PseudoAuthenticationHandler implements AuthenticationHandler {
    */
    */
   @Override
   @Override
   public String getType() {
   public String getType() {
-    return TYPE;
+    return type;
   }
   }
 
 
   /**
   /**

+ 49 - 0
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/RandomSignerSecretProvider.java

@@ -0,0 +1,49 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License. See accompanying LICENSE file.
+ */
+package org.apache.hadoop.security.authentication.util;
+
+import java.util.Random;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * A SignerSecretProvider that uses a random number as it's secret.  It rolls
+ * the secret at a regular interval.
+ */
+@InterfaceStability.Unstable
+@InterfaceAudience.Private
+public class RandomSignerSecretProvider extends RolloverSignerSecretProvider {
+
+  private final Random rand;
+
+  public RandomSignerSecretProvider() {
+    super();
+    rand = new Random();
+  }
+
+  /**
+   * This constructor lets you set the seed of the Random Number Generator and
+   * is meant for testing.
+   * @param seed the seed for the random number generator
+   */
+  public RandomSignerSecretProvider(long seed) {
+    super();
+    rand = new Random(seed);
+  }
+
+  @Override
+  protected byte[] generateNewSecret() {
+    return Long.toString(rand.nextLong()).getBytes();
+  }
+}

+ 139 - 0
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/RolloverSignerSecretProvider.java

@@ -0,0 +1,139 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License. See accompanying LICENSE file.
+ */
+package org.apache.hadoop.security.authentication.util;
+
+import java.util.Properties;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * An abstract SignerSecretProvider that can be use used as the base for a
+ * rolling secret.  The secret will roll over at the same interval as the token
+ * validity, so there are only ever a maximum of two valid secrets at any
+ * given time.  This class handles storing and returning the secrets, as well
+ * as the rolling over.  At a minimum, subclasses simply need to implement the
+ * generateNewSecret() method.  More advanced implementations can override
+ * other methods to provide more advanced behavior, but should be careful when
+ * doing so.
+ */
+@InterfaceStability.Unstable
+@InterfaceAudience.Private
+public abstract class RolloverSignerSecretProvider
+    extends SignerSecretProvider {
+
+  private static Logger LOG = LoggerFactory.getLogger(
+    RolloverSignerSecretProvider.class);
+  /**
+   * Stores the currently valid secrets.  The current secret is the 0th element
+   * in the array.
+   */
+  private volatile byte[][] secrets;
+  private ScheduledExecutorService scheduler;
+  private boolean schedulerRunning;
+  private boolean isDestroyed;
+
+  public RolloverSignerSecretProvider() {
+    schedulerRunning = false;
+    isDestroyed = false;
+  }
+
+  /**
+   * Initialize the SignerSecretProvider.  It initializes the current secret
+   * and starts the scheduler for the rollover to run at an interval of
+   * tokenValidity.
+   * @param config filter configuration
+   * @param tokenValidity The amount of time a token is valid for
+   * @throws Exception
+   */
+  @Override
+  public void init(Properties config, long tokenValidity) throws Exception {
+    initSecrets(generateNewSecret(), null);
+    startScheduler(tokenValidity, tokenValidity);
+  }
+
+  /**
+   * Initializes the secrets array.  This should typically be called only once,
+   * during init but some implementations may wish to call it other times.
+   * previousSecret can be null if there isn't a previous secret, but
+   * currentSecret should never be null.
+   * @param currentSecret The current secret
+   * @param previousSecret The previous secret
+   */
+  protected void initSecrets(byte[] currentSecret, byte[] previousSecret) {
+    secrets = new byte[][]{currentSecret, previousSecret};
+  }
+
+  /**
+   * Starts the scheduler for the rollover to run at an interval.
+   * @param initialDelay The initial delay in the rollover in milliseconds
+   * @param period The interval for the rollover in milliseconds
+   */
+  protected synchronized void startScheduler(long initialDelay, long period) {
+    if (!schedulerRunning) {
+      schedulerRunning = true;
+      scheduler = Executors.newSingleThreadScheduledExecutor();
+      scheduler.scheduleAtFixedRate(new Runnable() {
+        @Override
+        public void run() {
+          rollSecret();
+        }
+      }, initialDelay, period, TimeUnit.MILLISECONDS);
+    }
+  }
+
+  @Override
+  public synchronized void destroy() {
+    if (!isDestroyed) {
+      isDestroyed = true;
+      if (scheduler != null) {
+        scheduler.shutdown();
+      }
+      schedulerRunning = false;
+      super.destroy();
+    }
+  }
+
+  /**
+   * Rolls the secret.  It is called automatically at the rollover interval.
+   */
+  protected synchronized void rollSecret() {
+    if (!isDestroyed) {
+      LOG.debug("rolling secret");
+      byte[] newSecret = generateNewSecret();
+      secrets = new byte[][]{newSecret, secrets[0]};
+    }
+  }
+
+  /**
+   * Subclasses should implement this to return a new secret.  It will be called
+   * automatically at the secret rollover interval. It should never return null.
+   * @return a new secret
+   */
+  protected abstract byte[] generateNewSecret();
+
+  @Override
+  public byte[] getCurrentSecret() {
+    return secrets[0];
+  }
+
+  @Override
+  public byte[][] getAllSecrets() {
+    return secrets;
+  }
+}

+ 32 - 14
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/Signer.java

@@ -24,18 +24,19 @@ import java.security.NoSuchAlgorithmException;
 public class Signer {
 public class Signer {
   private static final String SIGNATURE = "&s=";
   private static final String SIGNATURE = "&s=";
 
 
-  private byte[] secret;
+  private SignerSecretProvider secretProvider;
 
 
   /**
   /**
-   * Creates a Signer instance using the specified secret.
+   * Creates a Signer instance using the specified SignerSecretProvider.  The
+   * SignerSecretProvider should already be initialized.
    *
    *
-   * @param secret secret to use for creating the digest.
+   * @param secretProvider The SignerSecretProvider to use
    */
    */
-  public Signer(byte[] secret) {
-    if (secret == null) {
-      throw new IllegalArgumentException("secret cannot be NULL");
+  public Signer(SignerSecretProvider secretProvider) {
+    if (secretProvider == null) {
+      throw new IllegalArgumentException("secretProvider cannot be NULL");
     }
     }
-    this.secret = secret.clone();
+    this.secretProvider = secretProvider;
   }
   }
 
 
   /**
   /**
@@ -47,11 +48,12 @@ public class Signer {
    *
    *
    * @return the signed string.
    * @return the signed string.
    */
    */
-  public String sign(String str) {
+  public synchronized String sign(String str) {
     if (str == null || str.length() == 0) {
     if (str == null || str.length() == 0) {
       throw new IllegalArgumentException("NULL or empty string to sign");
       throw new IllegalArgumentException("NULL or empty string to sign");
     }
     }
-    String signature = computeSignature(str);
+    byte[] secret = secretProvider.getCurrentSecret();
+    String signature = computeSignature(secret, str);
     return str + SIGNATURE + signature;
     return str + SIGNATURE + signature;
   }
   }
 
 
@@ -71,21 +73,19 @@ public class Signer {
     }
     }
     String originalSignature = signedStr.substring(index + SIGNATURE.length());
     String originalSignature = signedStr.substring(index + SIGNATURE.length());
     String rawValue = signedStr.substring(0, index);
     String rawValue = signedStr.substring(0, index);
-    String currentSignature = computeSignature(rawValue);
-    if (!originalSignature.equals(currentSignature)) {
-      throw new SignerException("Invalid signature");
-    }
+    checkSignatures(rawValue, originalSignature);
     return rawValue;
     return rawValue;
   }
   }
 
 
   /**
   /**
    * Returns then signature of a string.
    * Returns then signature of a string.
    *
    *
+   * @param secret The secret to use
    * @param str string to sign.
    * @param str string to sign.
    *
    *
    * @return the signature for the string.
    * @return the signature for the string.
    */
    */
-  protected String computeSignature(String str) {
+  protected String computeSignature(byte[] secret, String str) {
     try {
     try {
       MessageDigest md = MessageDigest.getInstance("SHA");
       MessageDigest md = MessageDigest.getInstance("SHA");
       md.update(str.getBytes());
       md.update(str.getBytes());
@@ -97,4 +97,22 @@ public class Signer {
     }
     }
   }
   }
 
 
+  protected void checkSignatures(String rawValue, String originalSignature)
+      throws SignerException {
+    boolean isValid = false;
+    byte[][] secrets = secretProvider.getAllSecrets();
+    for (int i = 0; i < secrets.length; i++) {
+      byte[] secret = secrets[i];
+      if (secret != null) {
+        String currentSignature = computeSignature(secret, rawValue);
+        if (originalSignature.equals(currentSignature)) {
+          isValid = true;
+          break;
+        }
+      }
+    }
+    if (!isValid) {
+      throw new SignerException("Invalid signature");
+    }
+  }
 }
 }

+ 62 - 0
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/SignerSecretProvider.java

@@ -0,0 +1,62 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License. See accompanying LICENSE file.
+ */
+package org.apache.hadoop.security.authentication.util;
+
+import java.util.Properties;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * The SignerSecretProvider is an abstract way to provide a secret to be used
+ * by the Signer so that we can have different implementations that potentially
+ * do more complicated things in the backend.
+ * See the RolloverSignerSecretProvider class for an implementation that
+ * supports rolling over the secret at a regular interval.
+ */
+@InterfaceStability.Unstable
+@InterfaceAudience.Private
+public abstract class SignerSecretProvider {
+
+  /**
+   * Initialize the SignerSecretProvider
+   * @param config filter configuration
+   * @param tokenValidity The amount of time a token is valid for
+   * @throws Exception
+   */
+  public abstract void init(Properties config, long tokenValidity)
+      throws Exception;
+
+  /**
+   * Will be called on shutdown; subclasses should perform any cleanup here.
+   */
+  public void destroy() {}
+
+  /**
+   * Returns the current secret to be used by the Signer for signing new
+   * cookies.  This should never return null.
+   * <p>
+   * Callers should be careful not to modify the returned value.
+   * @return the current secret
+   */
+  public abstract byte[] getCurrentSecret();
+
+  /**
+   * Returns all secrets that a cookie could have been signed with and are still
+   * valid; this should include the secret returned by getCurrentSecret().
+   * <p>
+   * Callers should be careful not to modify the returned value.
+   * @return the secrets
+   */
+  public abstract byte[][] getAllSecrets();
+}

+ 49 - 0
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/StringSignerSecretProvider.java

@@ -0,0 +1,49 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License. See accompanying LICENSE file.
+ */
+package org.apache.hadoop.security.authentication.util;
+
+import java.util.Properties;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * A SignerSecretProvider that simply creates a secret based on a given String.
+ */
+@InterfaceStability.Unstable
+@InterfaceAudience.Private
+public class StringSignerSecretProvider extends SignerSecretProvider {
+
+  private byte[] secret;
+  private byte[][] secrets;
+
+  public StringSignerSecretProvider(String secretStr) {
+    secret = secretStr.getBytes();
+    secrets = new byte[][]{secret};
+  }
+
+  @Override
+  public void init(Properties config, long tokenValidity) throws Exception {
+    // do nothing
+  }
+
+  @Override
+  public byte[] getCurrentSecret() {
+    return secret;
+  }
+
+  @Override
+  public byte[][] getAllSecrets() {
+    return secrets;
+  }
+}

+ 133 - 4
hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/AuthenticatorTestCase.java

@@ -13,7 +13,22 @@
  */
  */
 package org.apache.hadoop.security.authentication.client;
 package org.apache.hadoop.security.authentication.client;
 
 
+import org.apache.catalina.deploy.FilterDef;
+import org.apache.catalina.deploy.FilterMap;
+import org.apache.catalina.startup.Tomcat;
 import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
 import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
+import org.apache.http.HttpResponse;
+import org.apache.http.auth.AuthScope;
+import org.apache.http.auth.Credentials;
+import org.apache.http.client.HttpClient;
+import org.apache.http.client.methods.HttpGet;
+import org.apache.http.client.methods.HttpPost;
+import org.apache.http.client.methods.HttpUriRequest;
+import org.apache.http.client.params.AuthPolicy;
+import org.apache.http.entity.InputStreamEntity;
+import org.apache.http.impl.auth.SPNegoSchemeFactory;
+import org.apache.http.impl.client.SystemDefaultHttpClient;
+import org.apache.http.util.EntityUtils;
 import org.mortbay.jetty.Server;
 import org.mortbay.jetty.Server;
 import org.mortbay.jetty.servlet.Context;
 import org.mortbay.jetty.servlet.Context;
 import org.mortbay.jetty.servlet.FilterHolder;
 import org.mortbay.jetty.servlet.FilterHolder;
@@ -24,16 +39,19 @@ import javax.servlet.ServletException;
 import javax.servlet.http.HttpServlet;
 import javax.servlet.http.HttpServlet;
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
 import javax.servlet.http.HttpServletResponse;
+import java.io.BufferedReader;
+import java.io.ByteArrayInputStream;
+import java.io.File;
 import java.io.IOException;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.InputStream;
 import java.io.OutputStream;
 import java.io.OutputStream;
 import java.io.OutputStreamWriter;
 import java.io.OutputStreamWriter;
-import java.io.BufferedReader;
 import java.io.InputStreamReader;
 import java.io.InputStreamReader;
 import java.io.Writer;
 import java.io.Writer;
 import java.net.HttpURLConnection;
 import java.net.HttpURLConnection;
 import java.net.ServerSocket;
 import java.net.ServerSocket;
 import java.net.URL;
 import java.net.URL;
+import java.security.Principal;
 import java.util.Properties;
 import java.util.Properties;
 import org.junit.Assert;
 import org.junit.Assert;
 
 
@@ -41,10 +59,18 @@ public class AuthenticatorTestCase {
   private Server server;
   private Server server;
   private String host = null;
   private String host = null;
   private int port = -1;
   private int port = -1;
+  private boolean useTomcat = false;
+  private Tomcat tomcat = null;
   Context context;
   Context context;
 
 
   private static Properties authenticatorConfig;
   private static Properties authenticatorConfig;
 
 
+  public AuthenticatorTestCase() {}
+
+  public AuthenticatorTestCase(boolean useTomcat) {
+    this.useTomcat = useTomcat;
+  }
+
   protected static void setAuthenticationHandlerConfig(Properties config) {
   protected static void setAuthenticationHandlerConfig(Properties config) {
     authenticatorConfig = config;
     authenticatorConfig = config;
   }
   }
@@ -80,7 +106,19 @@ public class AuthenticatorTestCase {
     }
     }
   }
   }
 
 
+  protected int getLocalPort() throws Exception {
+    ServerSocket ss = new ServerSocket(0);
+    int ret = ss.getLocalPort();
+    ss.close();
+    return ret;
+  }
+
   protected void start() throws Exception {
   protected void start() throws Exception {
+    if (useTomcat) startTomcat();
+    else startJetty();
+  }
+
+  protected void startJetty() throws Exception {
     server = new Server(0);
     server = new Server(0);
     context = new Context();
     context = new Context();
     context.setContextPath("/foo");
     context.setContextPath("/foo");
@@ -88,16 +126,42 @@ public class AuthenticatorTestCase {
     context.addFilter(new FilterHolder(TestFilter.class), "/*", 0);
     context.addFilter(new FilterHolder(TestFilter.class), "/*", 0);
     context.addServlet(new ServletHolder(TestServlet.class), "/bar");
     context.addServlet(new ServletHolder(TestServlet.class), "/bar");
     host = "localhost";
     host = "localhost";
-    ServerSocket ss = new ServerSocket(0);
-    port = ss.getLocalPort();
-    ss.close();
+    port = getLocalPort();
     server.getConnectors()[0].setHost(host);
     server.getConnectors()[0].setHost(host);
     server.getConnectors()[0].setPort(port);
     server.getConnectors()[0].setPort(port);
     server.start();
     server.start();
     System.out.println("Running embedded servlet container at: http://" + host + ":" + port);
     System.out.println("Running embedded servlet container at: http://" + host + ":" + port);
   }
   }
 
 
+  protected void startTomcat() throws Exception {
+    tomcat = new Tomcat();
+    File base = new File(System.getProperty("java.io.tmpdir"));
+    org.apache.catalina.Context ctx =
+      tomcat.addContext("/foo",base.getAbsolutePath());
+    FilterDef fd = new FilterDef();
+    fd.setFilterClass(TestFilter.class.getName());
+    fd.setFilterName("TestFilter");
+    FilterMap fm = new FilterMap();
+    fm.setFilterName("TestFilter");
+    fm.addURLPattern("/*");
+    fm.addServletName("/bar");
+    ctx.addFilterDef(fd);
+    ctx.addFilterMap(fm);
+    tomcat.addServlet(ctx, "/bar", TestServlet.class.getName());
+    ctx.addServletMapping("/bar", "/bar");
+    host = "localhost";
+    port = getLocalPort();
+    tomcat.setHostname(host);
+    tomcat.setPort(port);
+    tomcat.start();
+  }
+
   protected void stop() throws Exception {
   protected void stop() throws Exception {
+    if (useTomcat) stopTomcat();
+    else stopJetty();
+  }
+
+  protected void stopJetty() throws Exception {
     try {
     try {
       server.stop();
       server.stop();
     } catch (Exception e) {
     } catch (Exception e) {
@@ -109,6 +173,18 @@ public class AuthenticatorTestCase {
     }
     }
   }
   }
 
 
+  protected void stopTomcat() throws Exception {
+    try {
+      tomcat.stop();
+    } catch (Exception e) {
+    }
+
+    try {
+      tomcat.destroy();
+    } catch (Exception e) {
+    }
+  }
+
   protected String getBaseURL() {
   protected String getBaseURL() {
     return "http://" + host + ":" + port + "/foo/bar";
     return "http://" + host + ":" + port + "/foo/bar";
   }
   }
@@ -165,4 +241,57 @@ public class AuthenticatorTestCase {
     }
     }
   }
   }
 
 
+  private SystemDefaultHttpClient getHttpClient() {
+    final SystemDefaultHttpClient httpClient = new SystemDefaultHttpClient();
+    httpClient.getAuthSchemes().register(AuthPolicy.SPNEGO, new SPNegoSchemeFactory(true));
+     Credentials use_jaas_creds = new Credentials() {
+       public String getPassword() {
+         return null;
+       }
+
+       public Principal getUserPrincipal() {
+         return null;
+       }
+     };
+
+     httpClient.getCredentialsProvider().setCredentials(
+       AuthScope.ANY, use_jaas_creds);
+     return httpClient;
+  }
+
+  private void doHttpClientRequest(HttpClient httpClient, HttpUriRequest request) throws Exception {
+    HttpResponse response = null;
+    try {
+      response = httpClient.execute(request);
+      final int httpStatus = response.getStatusLine().getStatusCode();
+      Assert.assertEquals(HttpURLConnection.HTTP_OK, httpStatus);
+    } finally {
+      if (response != null) EntityUtils.consumeQuietly(response.getEntity());
+    }
+  }
+
+  protected void _testAuthenticationHttpClient(Authenticator authenticator, boolean doPost) throws Exception {
+    start();
+    try {
+      SystemDefaultHttpClient httpClient = getHttpClient();
+      doHttpClientRequest(httpClient, new HttpGet(getBaseURL()));
+
+      // Always do a GET before POST to trigger the SPNego negotiation
+      if (doPost) {
+        HttpPost post = new HttpPost(getBaseURL());
+        byte [] postBytes = POST.getBytes();
+        ByteArrayInputStream bis = new ByteArrayInputStream(postBytes);
+        InputStreamEntity entity = new InputStreamEntity(bis, postBytes.length);
+
+        // Important that the entity is not repeatable -- this means if
+        // we have to renegotiate (e.g. b/c the cookie wasn't handled properly)
+        // the test will fail.
+        Assert.assertFalse(entity.isRepeatable());
+        post.setEntity(entity);
+        doHttpClientRequest(httpClient, post);
+      }
+    } finally {
+      stop();
+    }
+  }
 }
 }

+ 8 - 30
hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestAuthenticatedURL.java

@@ -33,36 +33,6 @@ public class TestAuthenticatedURL {
     token = new AuthenticatedURL.Token("foo");
     token = new AuthenticatedURL.Token("foo");
     Assert.assertTrue(token.isSet());
     Assert.assertTrue(token.isSet());
     Assert.assertEquals("foo", token.toString());
     Assert.assertEquals("foo", token.toString());
-
-    AuthenticatedURL.Token token1 = new AuthenticatedURL.Token();
-    AuthenticatedURL.Token token2 = new AuthenticatedURL.Token();
-    Assert.assertEquals(token1.hashCode(), token2.hashCode());
-    Assert.assertTrue(token1.equals(token2));
-
-    token1 = new AuthenticatedURL.Token();
-    token2 = new AuthenticatedURL.Token("foo");
-    Assert.assertNotSame(token1.hashCode(), token2.hashCode());
-    Assert.assertFalse(token1.equals(token2));
-
-    token1 = new AuthenticatedURL.Token("foo");
-    token2 = new AuthenticatedURL.Token();
-    Assert.assertNotSame(token1.hashCode(), token2.hashCode());
-    Assert.assertFalse(token1.equals(token2));
-
-    token1 = new AuthenticatedURL.Token("foo");
-    token2 = new AuthenticatedURL.Token("foo");
-    Assert.assertEquals(token1.hashCode(), token2.hashCode());
-    Assert.assertTrue(token1.equals(token2));
-
-    token1 = new AuthenticatedURL.Token("bar");
-    token2 = new AuthenticatedURL.Token("foo");
-    Assert.assertNotSame(token1.hashCode(), token2.hashCode());
-    Assert.assertFalse(token1.equals(token2));
-
-    token1 = new AuthenticatedURL.Token("foo");
-    token2 = new AuthenticatedURL.Token("bar");
-    Assert.assertNotSame(token1.hashCode(), token2.hashCode());
-    Assert.assertFalse(token1.equals(token2));
   }
   }
 
 
   @Test
   @Test
@@ -137,4 +107,12 @@ public class TestAuthenticatedURL {
     Mockito.verify(connConf).configure(Mockito.<HttpURLConnection>any());
     Mockito.verify(connConf).configure(Mockito.<HttpURLConnection>any());
   }
   }
 
 
+  @Test
+  public void testGetAuthenticator() throws Exception {
+    Authenticator authenticator = Mockito.mock(Authenticator.class);
+
+    AuthenticatedURL aURL = new AuthenticatedURL(authenticator);
+    Assert.assertEquals(authenticator, aURL.getAuthenticator());
+  }
+
 }
 }

+ 53 - 5
hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestKerberosAuthenticator.java

@@ -20,16 +20,36 @@ import org.apache.hadoop.security.authentication.server.PseudoAuthenticationHand
 import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
 import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
 import org.junit.Assert;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Before;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameters;
+import org.junit.runner.RunWith;
 import org.junit.Test;
 import org.junit.Test;
 
 
 import java.io.File;
 import java.io.File;
 import java.net.HttpURLConnection;
 import java.net.HttpURLConnection;
 import java.net.URL;
 import java.net.URL;
+import java.util.Arrays;
+import java.util.Collection;
 import java.util.Properties;
 import java.util.Properties;
 import java.util.concurrent.Callable;
 import java.util.concurrent.Callable;
 
 
+@RunWith(Parameterized.class)
 public class TestKerberosAuthenticator extends KerberosSecurityTestcase {
 public class TestKerberosAuthenticator extends KerberosSecurityTestcase {
 
 
+  private boolean useTomcat = false;
+
+  public TestKerberosAuthenticator(boolean useTomcat) {
+    this.useTomcat = useTomcat;
+  }
+
+  @Parameterized.Parameters
+  public static Collection booleans() {
+    return Arrays.asList(new Object[][] {
+      { false },
+      { true }
+    });
+  }
+
   @Before
   @Before
   public void setup() throws Exception {
   public void setup() throws Exception {
     // create keytab
     // create keytab
@@ -53,7 +73,7 @@ public class TestKerberosAuthenticator extends KerberosSecurityTestcase {
 
 
   @Test(timeout=60000)
   @Test(timeout=60000)
   public void testFallbacktoPseudoAuthenticator() throws Exception {
   public void testFallbacktoPseudoAuthenticator() throws Exception {
-    AuthenticatorTestCase auth = new AuthenticatorTestCase();
+    AuthenticatorTestCase auth = new AuthenticatorTestCase(useTomcat);
     Properties props = new Properties();
     Properties props = new Properties();
     props.setProperty(AuthenticationFilter.AUTH_TYPE, "simple");
     props.setProperty(AuthenticationFilter.AUTH_TYPE, "simple");
     props.setProperty(PseudoAuthenticationHandler.ANONYMOUS_ALLOWED, "false");
     props.setProperty(PseudoAuthenticationHandler.ANONYMOUS_ALLOWED, "false");
@@ -63,7 +83,7 @@ public class TestKerberosAuthenticator extends KerberosSecurityTestcase {
 
 
   @Test(timeout=60000)
   @Test(timeout=60000)
   public void testFallbacktoPseudoAuthenticatorAnonymous() throws Exception {
   public void testFallbacktoPseudoAuthenticatorAnonymous() throws Exception {
-    AuthenticatorTestCase auth = new AuthenticatorTestCase();
+    AuthenticatorTestCase auth = new AuthenticatorTestCase(useTomcat);
     Properties props = new Properties();
     Properties props = new Properties();
     props.setProperty(AuthenticationFilter.AUTH_TYPE, "simple");
     props.setProperty(AuthenticationFilter.AUTH_TYPE, "simple");
     props.setProperty(PseudoAuthenticationHandler.ANONYMOUS_ALLOWED, "true");
     props.setProperty(PseudoAuthenticationHandler.ANONYMOUS_ALLOWED, "true");
@@ -73,7 +93,7 @@ public class TestKerberosAuthenticator extends KerberosSecurityTestcase {
 
 
   @Test(timeout=60000)
   @Test(timeout=60000)
   public void testNotAuthenticated() throws Exception {
   public void testNotAuthenticated() throws Exception {
-    AuthenticatorTestCase auth = new AuthenticatorTestCase();
+    AuthenticatorTestCase auth = new AuthenticatorTestCase(useTomcat);
     AuthenticatorTestCase.setAuthenticationHandlerConfig(getAuthenticationHandlerConfiguration());
     AuthenticatorTestCase.setAuthenticationHandlerConfig(getAuthenticationHandlerConfiguration());
     auth.start();
     auth.start();
     try {
     try {
@@ -89,7 +109,7 @@ public class TestKerberosAuthenticator extends KerberosSecurityTestcase {
 
 
   @Test(timeout=60000)
   @Test(timeout=60000)
   public void testAuthentication() throws Exception {
   public void testAuthentication() throws Exception {
-    final AuthenticatorTestCase auth = new AuthenticatorTestCase();
+    final AuthenticatorTestCase auth = new AuthenticatorTestCase(useTomcat);
     AuthenticatorTestCase.setAuthenticationHandlerConfig(
     AuthenticatorTestCase.setAuthenticationHandlerConfig(
             getAuthenticationHandlerConfiguration());
             getAuthenticationHandlerConfiguration());
     KerberosTestUtils.doAsClient(new Callable<Void>() {
     KerberosTestUtils.doAsClient(new Callable<Void>() {
@@ -103,7 +123,7 @@ public class TestKerberosAuthenticator extends KerberosSecurityTestcase {
 
 
   @Test(timeout=60000)
   @Test(timeout=60000)
   public void testAuthenticationPost() throws Exception {
   public void testAuthenticationPost() throws Exception {
-    final AuthenticatorTestCase auth = new AuthenticatorTestCase();
+    final AuthenticatorTestCase auth = new AuthenticatorTestCase(useTomcat);
     AuthenticatorTestCase.setAuthenticationHandlerConfig(
     AuthenticatorTestCase.setAuthenticationHandlerConfig(
             getAuthenticationHandlerConfiguration());
             getAuthenticationHandlerConfiguration());
     KerberosTestUtils.doAsClient(new Callable<Void>() {
     KerberosTestUtils.doAsClient(new Callable<Void>() {
@@ -114,4 +134,32 @@ public class TestKerberosAuthenticator extends KerberosSecurityTestcase {
       }
       }
     });
     });
   }
   }
+
+  @Test(timeout=60000)
+  public void testAuthenticationHttpClient() throws Exception {
+    final AuthenticatorTestCase auth = new AuthenticatorTestCase(useTomcat);
+    AuthenticatorTestCase.setAuthenticationHandlerConfig(
+            getAuthenticationHandlerConfiguration());
+    KerberosTestUtils.doAsClient(new Callable<Void>() {
+      @Override
+      public Void call() throws Exception {
+        auth._testAuthenticationHttpClient(new KerberosAuthenticator(), false);
+        return null;
+      }
+    });
+  }
+
+  @Test(timeout=60000)
+  public void testAuthenticationHttpClientPost() throws Exception {
+    final AuthenticatorTestCase auth = new AuthenticatorTestCase(useTomcat);
+    AuthenticatorTestCase.setAuthenticationHandlerConfig(
+            getAuthenticationHandlerConfiguration());
+    KerberosTestUtils.doAsClient(new Callable<Void>() {
+      @Override
+      public Void call() throws Exception {
+        auth._testAuthenticationHttpClient(new KerberosAuthenticator(), true);
+        return null;
+      }
+    });
+  }
 }
 }

+ 110 - 8
hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAuthenticationFilter.java

@@ -23,6 +23,7 @@ import java.util.Vector;
 
 
 import javax.servlet.FilterChain;
 import javax.servlet.FilterChain;
 import javax.servlet.FilterConfig;
 import javax.servlet.FilterConfig;
+import javax.servlet.ServletContext;
 import javax.servlet.ServletException;
 import javax.servlet.ServletException;
 import javax.servlet.ServletRequest;
 import javax.servlet.ServletRequest;
 import javax.servlet.ServletResponse;
 import javax.servlet.ServletResponse;
@@ -33,6 +34,8 @@ import javax.servlet.http.HttpServletResponse;
 import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
 import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
 import org.apache.hadoop.security.authentication.client.AuthenticationException;
 import org.apache.hadoop.security.authentication.client.AuthenticationException;
 import org.apache.hadoop.security.authentication.util.Signer;
 import org.apache.hadoop.security.authentication.util.Signer;
+import org.apache.hadoop.security.authentication.util.SignerSecretProvider;
+import org.apache.hadoop.security.authentication.util.StringSignerSecretProvider;
 import org.junit.Assert;
 import org.junit.Assert;
 import org.junit.Test;
 import org.junit.Test;
 import org.mockito.Mockito;
 import org.mockito.Mockito;
@@ -157,9 +160,14 @@ public class TestAuthenticationFilter {
       Mockito.when(config.getInitParameterNames()).thenReturn(
       Mockito.when(config.getInitParameterNames()).thenReturn(
         new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
         new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
                                  AuthenticationFilter.AUTH_TOKEN_VALIDITY)).elements());
                                  AuthenticationFilter.AUTH_TOKEN_VALIDITY)).elements());
+      ServletContext context = Mockito.mock(ServletContext.class);
+      Mockito.when(context.getAttribute(
+          AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(null);
+      Mockito.when(config.getServletContext()).thenReturn(context);
       filter.init(config);
       filter.init(config);
       Assert.assertEquals(PseudoAuthenticationHandler.class, filter.getAuthenticationHandler().getClass());
       Assert.assertEquals(PseudoAuthenticationHandler.class, filter.getAuthenticationHandler().getClass());
       Assert.assertTrue(filter.isRandomSecret());
       Assert.assertTrue(filter.isRandomSecret());
+      Assert.assertFalse(filter.isCustomSignerSecretProvider());
       Assert.assertNull(filter.getCookieDomain());
       Assert.assertNull(filter.getCookieDomain());
       Assert.assertNull(filter.getCookiePath());
       Assert.assertNull(filter.getCookiePath());
       Assert.assertEquals(TOKEN_VALIDITY_SEC, filter.getValidity());
       Assert.assertEquals(TOKEN_VALIDITY_SEC, filter.getValidity());
@@ -167,6 +175,26 @@ public class TestAuthenticationFilter {
       filter.destroy();
       filter.destroy();
     }
     }
 
 
+    // string secret
+    filter = new AuthenticationFilter();
+    try {
+      FilterConfig config = Mockito.mock(FilterConfig.class);
+      Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn("simple");
+      Mockito.when(config.getInitParameter(AuthenticationFilter.SIGNATURE_SECRET)).thenReturn("secret");
+      Mockito.when(config.getInitParameterNames()).thenReturn(
+        new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
+                                 AuthenticationFilter.SIGNATURE_SECRET)).elements());
+      ServletContext context = Mockito.mock(ServletContext.class);
+      Mockito.when(context.getAttribute(
+          AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(null);
+      Mockito.when(config.getServletContext()).thenReturn(context);
+      filter.init(config);
+      Assert.assertFalse(filter.isRandomSecret());
+      Assert.assertFalse(filter.isCustomSignerSecretProvider());
+    } finally {
+      filter.destroy();
+    }
+
     // custom secret
     // custom secret
     filter = new AuthenticationFilter();
     filter = new AuthenticationFilter();
     try {
     try {
@@ -176,8 +204,26 @@ public class TestAuthenticationFilter {
       Mockito.when(config.getInitParameterNames()).thenReturn(
       Mockito.when(config.getInitParameterNames()).thenReturn(
         new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
         new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
                                  AuthenticationFilter.SIGNATURE_SECRET)).elements());
                                  AuthenticationFilter.SIGNATURE_SECRET)).elements());
+      ServletContext context = Mockito.mock(ServletContext.class);
+      Mockito.when(context.getAttribute(
+          AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(
+            new SignerSecretProvider() {
+              @Override
+              public void init(Properties config, long tokenValidity) {
+              }
+              @Override
+              public byte[] getCurrentSecret() {
+                return null;
+              }
+              @Override
+              public byte[][] getAllSecrets() {
+                return null;
+              }
+            });
+      Mockito.when(config.getServletContext()).thenReturn(context);
       filter.init(config);
       filter.init(config);
       Assert.assertFalse(filter.isRandomSecret());
       Assert.assertFalse(filter.isRandomSecret());
+      Assert.assertTrue(filter.isCustomSignerSecretProvider());
     } finally {
     } finally {
       filter.destroy();
       filter.destroy();
     }
     }
@@ -193,6 +239,10 @@ public class TestAuthenticationFilter {
         new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
         new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
                                  AuthenticationFilter.COOKIE_DOMAIN,
                                  AuthenticationFilter.COOKIE_DOMAIN,
                                  AuthenticationFilter.COOKIE_PATH)).elements());
                                  AuthenticationFilter.COOKIE_PATH)).elements());
+      ServletContext context = Mockito.mock(ServletContext.class);
+      Mockito.when(context.getAttribute(
+          AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(null);
+      Mockito.when(config.getServletContext()).thenReturn(context);
       filter.init(config);
       filter.init(config);
       Assert.assertEquals(".foo.com", filter.getCookieDomain());
       Assert.assertEquals(".foo.com", filter.getCookieDomain());
       Assert.assertEquals("/bar", filter.getCookiePath());
       Assert.assertEquals("/bar", filter.getCookiePath());
@@ -213,6 +263,10 @@ public class TestAuthenticationFilter {
         new Vector<String>(
         new Vector<String>(
           Arrays.asList(AuthenticationFilter.AUTH_TYPE,
           Arrays.asList(AuthenticationFilter.AUTH_TYPE,
                         "management.operation.return")).elements());
                         "management.operation.return")).elements());
+      ServletContext context = Mockito.mock(ServletContext.class);
+      Mockito.when(context.getAttribute(
+          AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(null);
+      Mockito.when(config.getServletContext()).thenReturn(context);
       filter.init(config);
       filter.init(config);
       Assert.assertTrue(DummyAuthenticationHandler.init);
       Assert.assertTrue(DummyAuthenticationHandler.init);
     } finally {
     } finally {
@@ -248,6 +302,10 @@ public class TestAuthenticationFilter {
       Mockito.when(config.getInitParameterNames()).thenReturn(
       Mockito.when(config.getInitParameterNames()).thenReturn(
           new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
           new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
               AuthenticationFilter.AUTH_TOKEN_VALIDITY)).elements());
               AuthenticationFilter.AUTH_TOKEN_VALIDITY)).elements());
+      ServletContext context = Mockito.mock(ServletContext.class);
+      Mockito.when(context.getAttribute(
+          AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(null);
+      Mockito.when(config.getServletContext()).thenReturn(context);
 
 
       filter.init(config);
       filter.init(config);
       Assert.assertEquals(PseudoAuthenticationHandler.class, 
       Assert.assertEquals(PseudoAuthenticationHandler.class, 
@@ -270,6 +328,10 @@ public class TestAuthenticationFilter {
         new Vector<String>(
         new Vector<String>(
           Arrays.asList(AuthenticationFilter.AUTH_TYPE,
           Arrays.asList(AuthenticationFilter.AUTH_TYPE,
                         "management.operation.return")).elements());
                         "management.operation.return")).elements());
+      ServletContext context = Mockito.mock(ServletContext.class);
+      Mockito.when(context.getAttribute(
+          AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(null);
+      Mockito.when(config.getServletContext()).thenReturn(context);
       filter.init(config);
       filter.init(config);
 
 
       HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
       HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
@@ -297,11 +359,15 @@ public class TestAuthenticationFilter {
           Arrays.asList(AuthenticationFilter.AUTH_TYPE,
           Arrays.asList(AuthenticationFilter.AUTH_TYPE,
                         AuthenticationFilter.SIGNATURE_SECRET,
                         AuthenticationFilter.SIGNATURE_SECRET,
                         "management.operation.return")).elements());
                         "management.operation.return")).elements());
+      ServletContext context = Mockito.mock(ServletContext.class);
+      Mockito.when(context.getAttribute(
+          AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(null);
+      Mockito.when(config.getServletContext()).thenReturn(context);
       filter.init(config);
       filter.init(config);
 
 
       AuthenticationToken token = new AuthenticationToken("u", "p", DummyAuthenticationHandler.TYPE);
       AuthenticationToken token = new AuthenticationToken("u", "p", DummyAuthenticationHandler.TYPE);
       token.setExpires(System.currentTimeMillis() + TOKEN_VALIDITY_SEC);
       token.setExpires(System.currentTimeMillis() + TOKEN_VALIDITY_SEC);
-      Signer signer = new Signer("secret".getBytes());
+      Signer signer = new Signer(new StringSignerSecretProvider("secret"));
       String tokenSigned = signer.sign(token.toString());
       String tokenSigned = signer.sign(token.toString());
 
 
       Cookie cookie = new Cookie(AuthenticatedURL.AUTH_COOKIE, tokenSigned);
       Cookie cookie = new Cookie(AuthenticatedURL.AUTH_COOKIE, tokenSigned);
@@ -330,12 +396,16 @@ public class TestAuthenticationFilter {
           Arrays.asList(AuthenticationFilter.AUTH_TYPE,
           Arrays.asList(AuthenticationFilter.AUTH_TYPE,
                         AuthenticationFilter.SIGNATURE_SECRET,
                         AuthenticationFilter.SIGNATURE_SECRET,
                         "management.operation.return")).elements());
                         "management.operation.return")).elements());
+      ServletContext context = Mockito.mock(ServletContext.class);
+      Mockito.when(context.getAttribute(
+          AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(null);
+      Mockito.when(config.getServletContext()).thenReturn(context);
       filter.init(config);
       filter.init(config);
 
 
       AuthenticationToken token =
       AuthenticationToken token =
           new AuthenticationToken("u", "p", DummyAuthenticationHandler.TYPE);
           new AuthenticationToken("u", "p", DummyAuthenticationHandler.TYPE);
       token.setExpires(System.currentTimeMillis() - TOKEN_VALIDITY_SEC);
       token.setExpires(System.currentTimeMillis() - TOKEN_VALIDITY_SEC);
-      Signer signer = new Signer("secret".getBytes());
+      Signer signer = new Signer(new StringSignerSecretProvider("secret"));
       String tokenSigned = signer.sign(token.toString());
       String tokenSigned = signer.sign(token.toString());
 
 
       Cookie cookie = new Cookie(AuthenticatedURL.AUTH_COOKIE, tokenSigned);
       Cookie cookie = new Cookie(AuthenticatedURL.AUTH_COOKIE, tokenSigned);
@@ -371,11 +441,15 @@ public class TestAuthenticationFilter {
           Arrays.asList(AuthenticationFilter.AUTH_TYPE,
           Arrays.asList(AuthenticationFilter.AUTH_TYPE,
                         AuthenticationFilter.SIGNATURE_SECRET,
                         AuthenticationFilter.SIGNATURE_SECRET,
                         "management.operation.return")).elements());
                         "management.operation.return")).elements());
+      ServletContext context = Mockito.mock(ServletContext.class);
+      Mockito.when(context.getAttribute(
+          AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(null);
+      Mockito.when(config.getServletContext()).thenReturn(context);
       filter.init(config);
       filter.init(config);
 
 
       AuthenticationToken token = new AuthenticationToken("u", "p", "invalidtype");
       AuthenticationToken token = new AuthenticationToken("u", "p", "invalidtype");
       token.setExpires(System.currentTimeMillis() + TOKEN_VALIDITY_SEC);
       token.setExpires(System.currentTimeMillis() + TOKEN_VALIDITY_SEC);
-      Signer signer = new Signer("secret".getBytes());
+      Signer signer = new Signer(new StringSignerSecretProvider("secret"));
       String tokenSigned = signer.sign(token.toString());
       String tokenSigned = signer.sign(token.toString());
 
 
       Cookie cookie = new Cookie(AuthenticatedURL.AUTH_COOKIE, tokenSigned);
       Cookie cookie = new Cookie(AuthenticatedURL.AUTH_COOKIE, tokenSigned);
@@ -409,6 +483,10 @@ public class TestAuthenticationFilter {
         new Vector<String>(
         new Vector<String>(
           Arrays.asList(AuthenticationFilter.AUTH_TYPE,
           Arrays.asList(AuthenticationFilter.AUTH_TYPE,
                         "management.operation.return")).elements());
                         "management.operation.return")).elements());
+      ServletContext context = Mockito.mock(ServletContext.class);
+      Mockito.when(context.getAttribute(
+          AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(null);
+      Mockito.when(config.getServletContext()).thenReturn(context);
       filter.init(config);
       filter.init(config);
 
 
       HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
       HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
@@ -458,6 +536,10 @@ public class TestAuthenticationFilter {
             AuthenticationFilter.AUTH_TOKEN_VALIDITY,
             AuthenticationFilter.AUTH_TOKEN_VALIDITY,
             AuthenticationFilter.SIGNATURE_SECRET, "management.operation" +
             AuthenticationFilter.SIGNATURE_SECRET, "management.operation" +
             ".return", "expired.token")).elements());
             ".return", "expired.token")).elements());
+      ServletContext context = Mockito.mock(ServletContext.class);
+      Mockito.when(context.getAttribute(
+          AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(null);
+      Mockito.when(config.getServletContext()).thenReturn(context);
 
 
     if (withDomainPath) {
     if (withDomainPath) {
       Mockito.when(config.getInitParameter(AuthenticationFilter
       Mockito.when(config.getInitParameter(AuthenticationFilter
@@ -511,7 +593,7 @@ public class TestAuthenticationFilter {
         Mockito.verify(chain).doFilter(Mockito.any(ServletRequest.class),
         Mockito.verify(chain).doFilter(Mockito.any(ServletRequest.class),
                 Mockito.any(ServletResponse.class));
                 Mockito.any(ServletResponse.class));
 
 
-        Signer signer = new Signer("secret".getBytes());
+        Signer signer = new Signer(new StringSignerSecretProvider("secret"));
         String value = signer.verifyAndExtract(v);
         String value = signer.verifyAndExtract(v);
         AuthenticationToken token = AuthenticationToken.parse(value);
         AuthenticationToken token = AuthenticationToken.parse(value);
         assertThat(token.getExpires(), not(0L));
         assertThat(token.getExpires(), not(0L));
@@ -578,6 +660,10 @@ public class TestAuthenticationFilter {
         new Vector<String>(
         new Vector<String>(
           Arrays.asList(AuthenticationFilter.AUTH_TYPE,
           Arrays.asList(AuthenticationFilter.AUTH_TYPE,
                         "management.operation.return")).elements());
                         "management.operation.return")).elements());
+      ServletContext context = Mockito.mock(ServletContext.class);
+      Mockito.when(context.getAttribute(
+          AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(null);
+      Mockito.when(config.getServletContext()).thenReturn(context);
       filter.init(config);
       filter.init(config);
 
 
       HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
       HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
@@ -585,7 +671,7 @@ public class TestAuthenticationFilter {
 
 
       AuthenticationToken token = new AuthenticationToken("u", "p", "t");
       AuthenticationToken token = new AuthenticationToken("u", "p", "t");
       token.setExpires(System.currentTimeMillis() + TOKEN_VALIDITY_SEC);
       token.setExpires(System.currentTimeMillis() + TOKEN_VALIDITY_SEC);
-      Signer signer = new Signer("secret".getBytes());
+      Signer signer = new Signer(new StringSignerSecretProvider("secret"));
       String tokenSigned = signer.sign(token.toString());
       String tokenSigned = signer.sign(token.toString());
 
 
       Cookie cookie = new Cookie(AuthenticatedURL.AUTH_COOKIE, tokenSigned);
       Cookie cookie = new Cookie(AuthenticatedURL.AUTH_COOKIE, tokenSigned);
@@ -628,6 +714,10 @@ public class TestAuthenticationFilter {
         new Vector<String>(
         new Vector<String>(
           Arrays.asList(AuthenticationFilter.AUTH_TYPE,
           Arrays.asList(AuthenticationFilter.AUTH_TYPE,
                         "management.operation.return")).elements());
                         "management.operation.return")).elements());
+      ServletContext context = Mockito.mock(ServletContext.class);
+      Mockito.when(context.getAttribute(
+          AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(null);
+      Mockito.when(config.getServletContext()).thenReturn(context);
       filter.init(config);
       filter.init(config);
 
 
       HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
       HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
@@ -691,6 +781,10 @@ public class TestAuthenticationFilter {
           Arrays.asList(AuthenticationFilter.AUTH_TYPE,
           Arrays.asList(AuthenticationFilter.AUTH_TYPE,
                         AuthenticationFilter.SIGNATURE_SECRET,
                         AuthenticationFilter.SIGNATURE_SECRET,
                         "management.operation.return")).elements());
                         "management.operation.return")).elements());
+      ServletContext context = Mockito.mock(ServletContext.class);
+      Mockito.when(context.getAttribute(
+          AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(null);
+      Mockito.when(config.getServletContext()).thenReturn(context);
       filter.init(config);
       filter.init(config);
 
 
       HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
       HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
@@ -698,7 +792,7 @@ public class TestAuthenticationFilter {
 
 
       AuthenticationToken token = new AuthenticationToken("u", "p", DummyAuthenticationHandler.TYPE);
       AuthenticationToken token = new AuthenticationToken("u", "p", DummyAuthenticationHandler.TYPE);
       token.setExpires(System.currentTimeMillis() - TOKEN_VALIDITY_SEC);
       token.setExpires(System.currentTimeMillis() - TOKEN_VALIDITY_SEC);
-      Signer signer = new Signer(secret.getBytes());
+      Signer signer = new Signer(new StringSignerSecretProvider(secret));
       String tokenSigned = signer.sign(token.toString());
       String tokenSigned = signer.sign(token.toString());
 
 
       Cookie cookie = new Cookie(AuthenticatedURL.AUTH_COOKIE, tokenSigned);
       Cookie cookie = new Cookie(AuthenticatedURL.AUTH_COOKIE, tokenSigned);
@@ -758,6 +852,10 @@ public class TestAuthenticationFilter {
           Arrays.asList(AuthenticationFilter.AUTH_TYPE,
           Arrays.asList(AuthenticationFilter.AUTH_TYPE,
                         AuthenticationFilter.SIGNATURE_SECRET,
                         AuthenticationFilter.SIGNATURE_SECRET,
                         "management.operation.return")).elements());
                         "management.operation.return")).elements());
+      ServletContext context = Mockito.mock(ServletContext.class);
+      Mockito.when(context.getAttribute(
+          AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(null);
+      Mockito.when(config.getServletContext()).thenReturn(context);
       filter.init(config);
       filter.init(config);
 
 
       HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
       HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
@@ -765,7 +863,7 @@ public class TestAuthenticationFilter {
 
 
       AuthenticationToken token = new AuthenticationToken("u", "p", "invalidtype");
       AuthenticationToken token = new AuthenticationToken("u", "p", "invalidtype");
       token.setExpires(System.currentTimeMillis() + TOKEN_VALIDITY_SEC);
       token.setExpires(System.currentTimeMillis() + TOKEN_VALIDITY_SEC);
-      Signer signer = new Signer(secret.getBytes());
+      Signer signer = new Signer(new StringSignerSecretProvider(secret));
       String tokenSigned = signer.sign(token.toString());
       String tokenSigned = signer.sign(token.toString());
 
 
       Cookie cookie = new Cookie(AuthenticatedURL.AUTH_COOKIE, tokenSigned);
       Cookie cookie = new Cookie(AuthenticatedURL.AUTH_COOKIE, tokenSigned);
@@ -793,6 +891,10 @@ public class TestAuthenticationFilter {
         new Vector<String>(
         new Vector<String>(
           Arrays.asList(AuthenticationFilter.AUTH_TYPE,
           Arrays.asList(AuthenticationFilter.AUTH_TYPE,
                         "management.operation.return")).elements());
                         "management.operation.return")).elements());
+      ServletContext context = Mockito.mock(ServletContext.class);
+      Mockito.when(context.getAttribute(
+          AuthenticationFilter.SIGNATURE_PROVIDER_ATTRIBUTE)).thenReturn(null);
+      Mockito.when(config.getServletContext()).thenReturn(context);
       filter.init(config);
       filter.init(config);
 
 
       HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
       HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
@@ -812,7 +914,7 @@ public class TestAuthenticationFilter {
 
 
       AuthenticationToken token = new AuthenticationToken("u", "p", "t");
       AuthenticationToken token = new AuthenticationToken("u", "p", "t");
       token.setExpires(System.currentTimeMillis() + TOKEN_VALIDITY_SEC);
       token.setExpires(System.currentTimeMillis() + TOKEN_VALIDITY_SEC);
-      Signer signer = new Signer("secret".getBytes());
+      Signer signer = new Signer(new StringSignerSecretProvider("secret"));
       String tokenSigned = signer.sign(token.toString());
       String tokenSigned = signer.sign(token.toString());
       Cookie cookie = new Cookie(AuthenticatedURL.AUTH_COOKIE, tokenSigned);
       Cookie cookie = new Cookie(AuthenticatedURL.AUTH_COOKIE, tokenSigned);
       Mockito.when(request.getCookies()).thenReturn(new Cookie[]{cookie});
       Mockito.when(request.getCookies()).thenReturn(new Cookie[]{cookie});

+ 63 - 0
hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestRandomSignerSecretProvider.java

@@ -0,0 +1,63 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License. See accompanying LICENSE file.
+ */
+package org.apache.hadoop.security.authentication.util;
+
+import java.util.Random;
+import org.junit.Assert;
+import org.junit.Test;
+
+public class TestRandomSignerSecretProvider {
+
+  @Test
+  public void testGetAndRollSecrets() throws Exception {
+    long rolloverFrequency = 15 * 1000; // rollover every 15 sec
+    // use the same seed so we can predict the RNG
+    long seed = System.currentTimeMillis();
+    Random rand = new Random(seed);
+    byte[] secret1 = Long.toString(rand.nextLong()).getBytes();
+    byte[] secret2 = Long.toString(rand.nextLong()).getBytes();
+    byte[] secret3 = Long.toString(rand.nextLong()).getBytes();
+    RandomSignerSecretProvider secretProvider =
+        new RandomSignerSecretProvider(seed);
+    try {
+      secretProvider.init(null, rolloverFrequency);
+
+      byte[] currentSecret = secretProvider.getCurrentSecret();
+      byte[][] allSecrets = secretProvider.getAllSecrets();
+      Assert.assertArrayEquals(secret1, currentSecret);
+      Assert.assertEquals(2, allSecrets.length);
+      Assert.assertArrayEquals(secret1, allSecrets[0]);
+      Assert.assertNull(allSecrets[1]);
+      Thread.sleep(rolloverFrequency + 2000);
+
+      currentSecret = secretProvider.getCurrentSecret();
+      allSecrets = secretProvider.getAllSecrets();
+      Assert.assertArrayEquals(secret2, currentSecret);
+      Assert.assertEquals(2, allSecrets.length);
+      Assert.assertArrayEquals(secret2, allSecrets[0]);
+      Assert.assertArrayEquals(secret1, allSecrets[1]);
+      Thread.sleep(rolloverFrequency + 2000);
+
+      currentSecret = secretProvider.getCurrentSecret();
+      allSecrets = secretProvider.getAllSecrets();
+      Assert.assertArrayEquals(secret3, currentSecret);
+      Assert.assertEquals(2, allSecrets.length);
+      Assert.assertArrayEquals(secret3, allSecrets[0]);
+      Assert.assertArrayEquals(secret2, allSecrets[1]);
+      Thread.sleep(rolloverFrequency + 2000);
+    } finally {
+      secretProvider.destroy();
+    }
+  }
+}

+ 79 - 0
hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestRolloverSignerSecretProvider.java

@@ -0,0 +1,79 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License. See accompanying LICENSE file.
+ */
+package org.apache.hadoop.security.authentication.util;
+
+import org.junit.Assert;
+import org.junit.Test;
+
+public class TestRolloverSignerSecretProvider {
+
+  @Test
+  public void testGetAndRollSecrets() throws Exception {
+    long rolloverFrequency = 15 * 1000; // rollover every 15 sec
+    byte[] secret1 = "doctor".getBytes();
+    byte[] secret2 = "who".getBytes();
+    byte[] secret3 = "tardis".getBytes();
+    TRolloverSignerSecretProvider secretProvider =
+        new TRolloverSignerSecretProvider(
+            new byte[][]{secret1, secret2, secret3});
+    try {
+      secretProvider.init(null, rolloverFrequency);
+
+      byte[] currentSecret = secretProvider.getCurrentSecret();
+      byte[][] allSecrets = secretProvider.getAllSecrets();
+      Assert.assertArrayEquals(secret1, currentSecret);
+      Assert.assertEquals(2, allSecrets.length);
+      Assert.assertArrayEquals(secret1, allSecrets[0]);
+      Assert.assertNull(allSecrets[1]);
+      Thread.sleep(rolloverFrequency + 2000);
+
+      currentSecret = secretProvider.getCurrentSecret();
+      allSecrets = secretProvider.getAllSecrets();
+      Assert.assertArrayEquals(secret2, currentSecret);
+      Assert.assertEquals(2, allSecrets.length);
+      Assert.assertArrayEquals(secret2, allSecrets[0]);
+      Assert.assertArrayEquals(secret1, allSecrets[1]);
+      Thread.sleep(rolloverFrequency + 2000);
+
+      currentSecret = secretProvider.getCurrentSecret();
+      allSecrets = secretProvider.getAllSecrets();
+      Assert.assertArrayEquals(secret3, currentSecret);
+      Assert.assertEquals(2, allSecrets.length);
+      Assert.assertArrayEquals(secret3, allSecrets[0]);
+      Assert.assertArrayEquals(secret2, allSecrets[1]);
+      Thread.sleep(rolloverFrequency + 2000);
+    } finally {
+      secretProvider.destroy();
+    }
+  }
+
+  class TRolloverSignerSecretProvider extends RolloverSignerSecretProvider {
+
+    private byte[][] newSecretSequence;
+    private int newSecretSequenceIndex;
+
+    public TRolloverSignerSecretProvider(byte[][] newSecretSequence)
+        throws Exception {
+      super();
+      this.newSecretSequence = newSecretSequence;
+      this.newSecretSequenceIndex = 0;
+    }
+
+    @Override
+    protected byte[] generateNewSecret() {
+      return newSecretSequence[newSecretSequenceIndex++];
+    }
+
+  }
+}

+ 69 - 16
hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestSigner.java

@@ -13,24 +13,15 @@
  */
  */
 package org.apache.hadoop.security.authentication.util;
 package org.apache.hadoop.security.authentication.util;
 
 
+import java.util.Properties;
 import org.junit.Assert;
 import org.junit.Assert;
 import org.junit.Test;
 import org.junit.Test;
 
 
 public class TestSigner {
 public class TestSigner {
 
 
-  @Test
-  public void testNoSecret() throws Exception {
-    try {
-      new Signer(null);
-      Assert.fail();
-    }
-    catch (IllegalArgumentException ex) {
-    }
-  }
-
   @Test
   @Test
   public void testNullAndEmptyString() throws Exception {
   public void testNullAndEmptyString() throws Exception {
-    Signer signer = new Signer("secret".getBytes());
+    Signer signer = new Signer(new StringSignerSecretProvider("secret"));
     try {
     try {
       signer.sign(null);
       signer.sign(null);
       Assert.fail();
       Assert.fail();
@@ -51,17 +42,17 @@ public class TestSigner {
 
 
   @Test
   @Test
   public void testSignature() throws Exception {
   public void testSignature() throws Exception {
-    Signer signer = new Signer("secret".getBytes());
+    Signer signer = new Signer(new StringSignerSecretProvider("secret"));
     String s1 = signer.sign("ok");
     String s1 = signer.sign("ok");
     String s2 = signer.sign("ok");
     String s2 = signer.sign("ok");
     String s3 = signer.sign("wrong");
     String s3 = signer.sign("wrong");
     Assert.assertEquals(s1, s2);
     Assert.assertEquals(s1, s2);
-    Assert.assertNotSame(s1, s3);
+    Assert.assertNotEquals(s1, s3);
   }
   }
 
 
   @Test
   @Test
   public void testVerify() throws Exception {
   public void testVerify() throws Exception {
-    Signer signer = new Signer("secret".getBytes());
+    Signer signer = new Signer(new StringSignerSecretProvider("secret"));
     String t = "test";
     String t = "test";
     String s = signer.sign(t);
     String s = signer.sign(t);
     String e = signer.verifyAndExtract(s);
     String e = signer.verifyAndExtract(s);
@@ -70,7 +61,7 @@ public class TestSigner {
 
 
   @Test
   @Test
   public void testInvalidSignedText() throws Exception {
   public void testInvalidSignedText() throws Exception {
-    Signer signer = new Signer("secret".getBytes());
+    Signer signer = new Signer(new StringSignerSecretProvider("secret"));
     try {
     try {
       signer.verifyAndExtract("test");
       signer.verifyAndExtract("test");
       Assert.fail();
       Assert.fail();
@@ -83,7 +74,7 @@ public class TestSigner {
 
 
   @Test
   @Test
   public void testTampering() throws Exception {
   public void testTampering() throws Exception {
-    Signer signer = new Signer("secret".getBytes());
+    Signer signer = new Signer(new StringSignerSecretProvider("secret"));
     String t = "test";
     String t = "test";
     String s = signer.sign(t);
     String s = signer.sign(t);
     s += "x";
     s += "x";
@@ -96,4 +87,66 @@ public class TestSigner {
       Assert.fail();
       Assert.fail();
     }
     }
   }
   }
+
+  @Test
+  public void testMultipleSecrets() throws Exception {
+    TestSignerSecretProvider secretProvider = new TestSignerSecretProvider();
+    Signer signer = new Signer(secretProvider);
+    secretProvider.setCurrentSecret("secretB");
+    String t1 = "test";
+    String s1 = signer.sign(t1);
+    String e1 = signer.verifyAndExtract(s1);
+    Assert.assertEquals(t1, e1);
+    secretProvider.setPreviousSecret("secretA");
+    String t2 = "test";
+    String s2 = signer.sign(t2);
+    String e2 = signer.verifyAndExtract(s2);
+    Assert.assertEquals(t2, e2);
+    Assert.assertEquals(s1, s2); //check is using current secret for signing
+    secretProvider.setCurrentSecret("secretC");
+    secretProvider.setPreviousSecret("secretB");
+    String t3 = "test";
+    String s3 = signer.sign(t3);
+    String e3 = signer.verifyAndExtract(s3);
+    Assert.assertEquals(t3, e3);
+    Assert.assertNotEquals(s1, s3); //check not using current secret for signing
+    String e1b = signer.verifyAndExtract(s1);
+    Assert.assertEquals(t1, e1b); // previous secret still valid
+    secretProvider.setCurrentSecret("secretD");
+    secretProvider.setPreviousSecret("secretC");
+    try {
+      signer.verifyAndExtract(s1);  // previous secret no longer valid
+      Assert.fail();
+    } catch (SignerException ex) {
+      // Expected
+    }
+  }
+
+  class TestSignerSecretProvider extends SignerSecretProvider {
+
+    private byte[] currentSecret;
+    private byte[] previousSecret;
+
+    @Override
+    public void init(Properties config, long tokenValidity) {
+    }
+
+    @Override
+    public byte[] getCurrentSecret() {
+      return currentSecret;
+    }
+
+    @Override
+    public byte[][] getAllSecrets() {
+      return new byte[][]{currentSecret, previousSecret};
+    }
+
+    public void setCurrentSecret(String secretStr) {
+      currentSecret = secretStr.getBytes();
+    }
+
+    public void setPreviousSecret(String previousSecretStr) {
+      previousSecret = previousSecretStr.getBytes();
+    }
+  }
 }
 }

+ 33 - 0
hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestStringSignerSecretProvider.java

@@ -0,0 +1,33 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License. See accompanying LICENSE file.
+ */
+package org.apache.hadoop.security.authentication.util;
+
+import org.junit.Assert;
+import org.junit.Test;
+
+public class TestStringSignerSecretProvider {
+
+  @Test
+  public void testGetSecrets() throws Exception {
+    String secretStr = "secret";
+    StringSignerSecretProvider secretProvider
+        = new StringSignerSecretProvider(secretStr);
+    secretProvider.init(null, -1);
+    byte[] secretBytes = secretStr.getBytes();
+    Assert.assertArrayEquals(secretBytes, secretProvider.getCurrentSecret());
+    byte[][] allSecrets = secretProvider.getAllSecrets();
+    Assert.assertEquals(1, allSecrets.length);
+    Assert.assertArrayEquals(secretBytes, allSecrets[0]);
+  }
+}

+ 388 - 114
hadoop-common-project/hadoop-common/CHANGES.txt

@@ -9,9 +9,9 @@ Trunk (Unreleased)
 
 
     HADOOP-10474 Move o.a.h.record to hadoop-streaming. (wheat9)
     HADOOP-10474 Move o.a.h.record to hadoop-streaming. (wheat9)
 
 
-  NEW FEATURES
+    HADOOP-9902. Shell script rewrite (aw)
 
 
-    HADOOP-10433. Key Management Server based on KeyProvider API. (tucu)
+  NEW FEATURES
 
 
     HADOOP-9629. Support Windows Azure Storage - Blob as a file system in Hadoop.
     HADOOP-9629. Support Windows Azure Storage - Blob as a file system in Hadoop.
     (Dexter Bradshaw, Mostafa Elhemali, Xi Fang, Johannes Klein, David Lao,
     (Dexter Bradshaw, Mostafa Elhemali, Xi Fang, Johannes Klein, David Lao,
@@ -23,9 +23,6 @@ Trunk (Unreleased)
     Mike Liddell, Chuan Liu, Lengning Liu, Ivan Mitic, Michael Rys,
     Mike Liddell, Chuan Liu, Lengning Liu, Ivan Mitic, Michael Rys,
     Alexander Stojanovich, Brian Swan, and Min Wei via cnauroth)
     Alexander Stojanovich, Brian Swan, and Min Wei via cnauroth)
     
     
-    HADOOP-10719. Add generateEncryptedKey and decryptEncryptedKey 
-    methods to KeyProvider. (asuresh via tucu)
-
   IMPROVEMENTS
   IMPROVEMENTS
 
 
     HADOOP-8017. Configure hadoop-main pom to get rid of M2E plugin execution
     HADOOP-8017. Configure hadoop-main pom to get rid of M2E plugin execution
@@ -119,75 +116,20 @@ Trunk (Unreleased)
 
 
     HADOOP-9833 move slf4j to version 1.7.5 (Kousuke Saruta via stevel)
     HADOOP-9833 move slf4j to version 1.7.5 (Kousuke Saruta via stevel)
 
 
-    HADOOP-10141. Create KeyProvider API to separate encryption key storage
-    from the applications. (omalley)
-
-    HADOOP-10201. Add listing to KeyProvider API. (Larry McCay via omalley)
-
-    HADOOP-10177. Create CLI tools for managing keys. (Larry McCay via omalley)
-
-    HADOOP-10244. TestKeyShell improperly tests the results of delete (Larry
-    McCay via omalley)
-
     HADOOP-10325. Improve jenkins javadoc warnings from test-patch.sh (cmccabe)
     HADOOP-10325. Improve jenkins javadoc warnings from test-patch.sh (cmccabe)
 
 
     HADOOP-10342. Add a new method to UGI to use a Kerberos login subject to
     HADOOP-10342. Add a new method to UGI to use a Kerberos login subject to
     build a new UGI. (Larry McCay via omalley)
     build a new UGI. (Larry McCay via omalley)
 
 
-    HADOOP-10237. JavaKeyStoreProvider needs to set keystore permissions 
-    correctly. (Larry McCay via omalley)
-
-    HADOOP-10432. Refactor SSLFactory to expose static method to determine
-    HostnameVerifier. (tucu)
-
-    HADOOP-10427. KeyProvider implementations should be thread safe. (tucu)
-
-    HADOOP-10429. KeyStores should have methods to generate the materials 
-    themselves, KeyShell should use them. (tucu)
-
-    HADOOP-10428. JavaKeyStoreProvider should accept keystore password via 
-    configuration falling back to ENV VAR. (tucu)
-
-    HADOOP-10430. KeyProvider Metadata should have an optional description, 
-    there should be a method to retrieve the metadata from all keys. (tucu)
-
-    HADOOP-10534. KeyProvider getKeysMetadata should take a list of names 
-    rather than returning all keys. (omalley)
-
     HADOOP-10563. Remove the dependency of jsp in trunk. (wheat9)
     HADOOP-10563. Remove the dependency of jsp in trunk. (wheat9)
 
 
     HADOOP-10485. Remove dead classes in hadoop-streaming. (wheat9)
     HADOOP-10485. Remove dead classes in hadoop-streaming. (wheat9)
 
 
-    HADOOP-10696. Add optional attributes to KeyProvider Options and Metadata. 
-    (tucu)
-
-    HADOOP-10695. KMSClientProvider should respect a configurable timeout. 
-    (yoderme via tucu)
-
-    HADOOP-10757. KeyProvider KeyVersion should provide the key name. 
-    (asuresh via tucu)
+    HADOOP-11013. CLASSPATH handling should be consolidated, debuggable (aw)
 
 
-    HADOOP-10769. Create KeyProvider extension to handle delegation tokens.
-    (Arun Suresh via atm)
+    HADOOP-11041. VersionInfo specifies subversion (Tsuyoshi OZAWA via aw)
 
 
-    HADOOP-10812. Delegate KeyProviderExtension#toString to underlying
-    KeyProvider. (wang)
-
-    HADOOP-10736. Add key attributes to the key shell. (Mike Yoder via wang)
-
-    HADOOP-10824. Refactor KMSACLs to avoid locking. (Benoy Antony via umamahesh)
-
-    HADOOP-10841. EncryptedKeyVersion should have a key name property. 
-    (asuresh via tucu)
-
-    HADOOP-10842. CryptoExtension generateEncryptedKey method should 
-    receive the key name. (asuresh via tucu)
-
-    HADOOP-10750. KMSKeyProviderCache should be in hadoop-common. 
-    (asuresh via tucu)
-
-    HADOOP-10720. KMS: Implement generateEncryptedKey and decryptEncryptedKey
-    in the REST API. (asuresh via tucu)
+    HADOOP-10373 create tools/hadoop-amazon for aws/EMR support (stevel)
 
 
   BUG FIXES
   BUG FIXES
 
 
@@ -296,9 +238,6 @@ Trunk (Unreleased)
     HADOOP-8813. Add InterfaceAudience and InterfaceStability annotations
     HADOOP-8813. Add InterfaceAudience and InterfaceStability annotations
     to RPC Server and Client classes. (Brandon Li via suresh)
     to RPC Server and Client classes. (Brandon Li via suresh)
 
 
-    HADOOP-8815. RandomDatum needs to override hashCode().
-    (Brandon Li via suresh)
-
     HADOOP-8436. NPE In getLocalPathForWrite ( path, conf ) when the
     HADOOP-8436. NPE In getLocalPathForWrite ( path, conf ) when the
     required context item is not configured
     required context item is not configured
     (Brahma Reddy Battula via harsh)
     (Brahma Reddy Battula via harsh)
@@ -360,22 +299,9 @@ Trunk (Unreleased)
 
 
     HADOOP-10044 Improve the javadoc of rpc code (sanjay Radia)
     HADOOP-10044 Improve the javadoc of rpc code (sanjay Radia)
 
 
-    HADOOP-10488. TestKeyProviderFactory fails randomly. (tucu)
-
-    HADOOP-10431. Change visibility of KeyStore.Options getter methods to public. (tucu)
-
-    HADOOP-10583. bin/hadoop key throws NPE with no args and assorted other fixups. (clamb via tucu)
-
-    HADOOP-10586. KeyShell doesn't allow setting Options via CLI. (clamb via tucu)
-
     HADOOP-10625. Trim configuration names when putting/getting them
     HADOOP-10625. Trim configuration names when putting/getting them
     to properties. (Wangda Tan via xgong)
     to properties. (Wangda Tan via xgong)
 
 
-    HADOOP-10645. TestKMS fails because race condition writing acl files. (tucu)
-
-    HADOOP-10611. KMS, keyVersion name should not be assumed to be 
-    keyName@versionNumber. (tucu)
-
     HADOOP-10717. HttpServer2 should load jsp DTD from local jars instead of
     HADOOP-10717. HttpServer2 should load jsp DTD from local jars instead of
     going remote. (Dapeng Sun via wheat9)
     going remote. (Dapeng Sun via wheat9)
 
 
@@ -390,17 +316,19 @@ Trunk (Unreleased)
 
 
     HADOOP-10834. Typo in CredentialShell usage. (Benoy Antony via umamahesh)
     HADOOP-10834. Typo in CredentialShell usage. (Benoy Antony via umamahesh)
 
 
-    HADOOP-10816. KeyShell returns -1 on error to the shell, should be 1.
-    (Mike Yoder via wang)
-
     HADOOP-10840. Fix OutOfMemoryError caused by metrics system in Azure File
     HADOOP-10840. Fix OutOfMemoryError caused by metrics system in Azure File
     System. (Shanyu Zhao via cnauroth)
     System. (Shanyu Zhao via cnauroth)
 
 
-    HADOOP-10826. Iteration on KeyProviderFactory.serviceLoader is 
-    thread-unsafe. (benoyantony viat tucu)
+    HADOOP-10925. Compilation fails in native link0 function on Windows.
+    (cnauroth)
 
 
-    HADOOP-10881. Clarify usage of encryption and encrypted encryption
-    key in KeyProviderCryptoExtension. (wang)
+    HADOOP-11002. shell escapes are incompatible with previous releases (aw)
+
+    HADOOP-10996. Stop violence in the *_HOME (aw)
+
+    HADOOP-10748. HttpServer2 should not load JspServlet. (wheat9)
+
+    HADOOP-11033. shell scripts ignore JAVA_HOME on OS X. (aw)
 
 
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
@@ -414,6 +342,11 @@ Release 2.6.0 - UNRELEASED
 
 
   NEW FEATURES
   NEW FEATURES
 
 
+    HADOOP-10433. Key Management Server based on KeyProvider API. (tucu)
+
+    HADOOP-10893. isolated classloader on the client side (Sangjin Lee via
+    jlowe)
+
   IMPROVEMENTS
   IMPROVEMENTS
 
 
     HADOOP-10808. Remove unused native code for munlock. (cnauroth)
     HADOOP-10808. Remove unused native code for munlock. (cnauroth)
@@ -457,8 +390,163 @@ Release 2.6.0 - UNRELEASED
 
 
     HADOOP-10882. Move DirectBufferPool into common util. (todd)
     HADOOP-10882. Move DirectBufferPool into common util. (todd)
 
 
+    HADOOP-8069. Enable TCP_NODELAY by default for IPC. (Todd Lipcon via
+    Arpit Agarwal)
+
+    HADOOP-10902. Deletion of directories with snapshots will not output
+    reason for trash move failure. (Stephen Chu via wang)
+
+    HADOOP-10900. CredentialShell args should use single-dash style. (wang)
+
+    HADOOP-10903. Enhance hadoop classpath command to expand wildcards or write
+    classpath into jar manifest. (cnauroth)
+
+    HADOOP-10791. AuthenticationFilter should support externalizing the 
+    secret for signing and provide rotation support. (rkanter via tucu)
+
+    HADOOP-10771. Refactor HTTP delegation support out of httpfs to common. 
+    (tucu)
+
+    HADOOP-10835. Implement HTTP proxyuser support in HTTP authentication 
+    client/server libraries. (tucu)
+
+    HADOOP-10820. Throw an exception in GenericOptionsParser when passed
+    an empty Path. (Alex Holmes and Zhihai Xu via wang)
+
+    HADOOP-10281. Create a scheduler, which assigns schedulables a priority
+    level. (Chris Li via Arpit Agarwal)
+
+    HADOOP-8944. Shell command fs -count should include human readable option 
+    (Jonathan Allen via aw)
+
+    HADOOP-10231. Add some components in Native Libraries document (Akira 
+    AJISAKA via aw)
+
+    HADOOP-10650. Add ability to specify a reverse ACL (black list) of users
+    and groups. (Benoy Antony via Arpit Agarwal)
+
+    HADOOP-10335. An ip whilelist based implementation to resolve Sasl
+    properties per connection. (Benoy Antony via Arpit Agarwal)
+
+    HADOOP-10975. org.apache.hadoop.util.DataChecksum should support calculating
+    checksums in native code (James Thomas via Colin Patrick McCabe)
+
+    HADOOP-10201. Add listing to KeyProvider API. (Larry McCay via omalley)
+
+    HADOOP-10177. Create CLI tools for managing keys. (Larry McCay via omalley)
+
+    HADOOP-10432. Refactor SSLFactory to expose static method to determine
+    HostnameVerifier. (tucu)
+
+    HADOOP-10429. KeyStores should have methods to generate the materials
+    themselves, KeyShell should use them. (tucu)
+
+    HADOOP-10427. KeyProvider implementations should be thread safe. (tucu)
+
+    HADOOP-10428. JavaKeyStoreProvider should accept keystore password via
+    configuration falling back to ENV VAR. (tucu)
+
+    HADOOP-10430. KeyProvider Metadata should have an optional description,
+    there should be a method to retrieve the metadata from all keys. (tucu)
+
+    HADOOP-10431. Change visibility of KeyStore.Options getter methods to
+    public. (tucu)
+
+    HADOOP-10534. KeyProvider getKeysMetadata should take a list of names
+    rather than returning all keys. (omalley)
+
+    HADOOP-10719. Add generateEncryptedKey and decryptEncryptedKey
+    methods to KeyProvider. (asuresh via tucu)
+
+    HADOOP-10817. ProxyUsers configuration should support configurable
+    prefixes. (tucu)
+
+    HADOOP-10881. Clarify usage of encryption and encrypted encryption
+    key in KeyProviderCryptoExtension. (wang)
+
+    HADOOP-10770. KMS add delegation token support. (tucu)
+
+    HADOOP-10698. KMS, add proxyuser support. (tucu)
+
+    HADOOP-8896. Javadoc points to Wrong Reader and Writer classes 
+    in SequenceFile (Ray Chiang via aw)
+
+    HADOOP-10998. Fix bash tab completion code to work (Jim Hester via aw)
+
+    HADOOP-10880. Move HTTP delegation tokens out of URL querystring to 
+    a header. (tucu)
+
+    HADOOP-11005. Fix HTTP content type for ReconfigurationServlet.
+    (Lei Xu via wang)
+
+    HADOOP-10814. Update Tomcat version used by HttpFS and KMS to latest
+    6.x version. (rkanter via tucu)
+
+    HADOOP-10994. KeyProviderCryptoExtension should use CryptoCodec for 
+    generation/decryption of keys. (tucu)
+
+    HADOOP-11021. Configurable replication factor in the hadoop archive
+    command. (Zhe Zhang via wang)
+
+    HADOOP-11030. Define a variable jackson.version instead of using constant 
+    at multiple places. (Juan Yu via kasha)
+
+    HADOOP-10990. Add missed NFSv3 request and response classes (brandonli)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
+    HADOOP-10838. Byte array native checksumming. (James Thomas via todd)
+
+    HADOOP-10696. Add optional attributes to KeyProvider Options and Metadata.
+    (tucu)
+
+    HADOOP-10695. KMSClientProvider should respect a configurable timeout.
+    (yoderme via tucu)
+
+    HADOOP-10757. KeyProvider KeyVersion should provide the key name.
+    (asuresh via tucu)
+
+    HADOOP-10769. Create KeyProvider extension to handle delegation tokens.
+    (Arun Suresh via atm)
+
+    HADOOP-10812. Delegate KeyProviderExtension#toString to underlying
+    KeyProvider. (wang)
+
+    HADOOP-10736. Add key attributes to the key shell. (Mike Yoder via wang)
+
+    HADOOP-10824. Refactor KMSACLs to avoid locking. (Benoy Antony via umamahesh)
+
+    HADOOP-10841. EncryptedKeyVersion should have a key name property.
+    (asuresh via tucu)
+
+    HADOOP-10842. CryptoExtension generateEncryptedKey method should
+    receive the key name. (asuresh via tucu)
+
+    HADOOP-10750. KMSKeyProviderCache should be in hadoop-common.
+    (asuresh via tucu)
+
+    HADOOP-10720. KMS: Implement generateEncryptedKey and decryptEncryptedKey
+    in the REST API. (asuresh via tucu)
+
+    HADOOP-10891. Add EncryptedKeyVersion factory method to
+    KeyProviderCryptoExtension. (wang)
+
+    HADOOP-10756. KMS audit log should consolidate successful similar requests.
+    (asuresh via tucu)
+
+    HADOOP-10793. KeyShell args should use single-dash style. (wang)
+
+    HADOOP-10936. Change default KeyProvider bitlength to 128. (wang)
+
+    HADOOP-10224. JavaKeyStoreProvider has to protect against corrupting
+    underlying store. (asuresh via tucu)
+
+    HADOOP-10282. Create a FairCallQueue: a multi-level call queue which
+    schedules incoming calls and multiplexes outgoing calls. (Chris Li via
+    Arpit Agarwal)
+
+    HADOOP-10833. Remove unused cache in UserProvider. (Benoy Antony)
+
   BUG FIXES
   BUG FIXES
 
 
     HADOOP-10781. Unportable getgrouplist() usage breaks FreeBSD (Dmitry
     HADOOP-10781. Unportable getgrouplist() usage breaks FreeBSD (Dmitry
@@ -488,7 +576,185 @@ Release 2.6.0 - UNRELEASED
     HADOOP-10830. Missing lock in JavaKeyStoreProvider.createCredentialEntry.
     HADOOP-10830. Missing lock in JavaKeyStoreProvider.createCredentialEntry.
     (Benoy Antony via umamahesh)
     (Benoy Antony via umamahesh)
 
 
-Release 2.5.0 - UNRELEASED
+    HADOOP-10928. Incorrect usage on `hadoop credential list`.
+    (Josh Elser via wang)
+
+    HADOOP-10927. Fix CredentialShell help behavior and error codes.
+    (Josh Elser via wang)
+
+    HADOOP-10933. FileBasedKeyStoresFactory Should use Configuration.getPassword 
+    for SSL Passwords. (lmccay via tucu)
+
+    HADOOP-10759. Remove hardcoded JAVA_HEAP_MAX. (Sam Liu via Eric Yang)
+
+    HADOOP-10905. LdapGroupsMapping Should use configuration.getPassword for SSL
+    and LDAP Passwords. (lmccay via brandonli)
+
+    HADOOP-10931 compile error on tools/hadoop-openstack (xukun via stevel)
+
+    HADOOP-10929. Typo in Configuration.getPasswordFromCredentialProviders
+    (lmccay via brandonli)
+
+    HADOOP-10402. Configuration.getValByRegex does not substitute for
+    variables. (Robert Kanter via kasha)
+
+    HADOOP-10851. NetgroupCache does not remove group memberships. (Benoy
+    Antony via Arpit Agarwal)
+
+    HADOOP-10962. Flags for posix_fadvise are not valid in some architectures
+    (David Villegas via Colin Patrick McCabe)
+
+    HADOOP-10966. Hadoop Common native compilation broken in windows.
+    (David Villegas via Arpit Agarwal)
+
+    HADOOP-10843. TestGridmixRecord unit tests failure on PowerPC (Jinghui Wang
+    via Colin Patrick McCabe)
+
+    HADOOP-10121. Fix javadoc spelling for HadoopArchives#writeTopLevelDirs
+    (Akira AJISAKA via aw)
+
+    HADOOP-10964. Small fix for NetworkTopologyWithNodeGroup#sortByDistance.
+    (Yi Liu via wang)
+
+    HADOOP-10059. RPC authentication and authorization metrics overflow to
+    negative values on busy clusters (Tsuyoshi OZAWA and Akira AJISAKA
+    via jlowe)
+
+    HADOOP-10973. Native Libraries Guide contains format error. (Peter Klavins
+    via Arpit Agarwal)
+
+    HADOOP-10972. Native Libraries Guide contains mis-spelt build line (Peter
+    Klavins via aw)
+
+    HADOOP-10873. Fix dead link in Configuration javadoc (Akira AJISAKA 
+    via aw)
+
+    HADOOP-10968. hadoop native build fails to detect java_libarch on
+    ppc64le (Dinar Valeev via Colin Patrick McCabe)
+
+    HADOOP-10141. Create KeyProvider API to separate encryption key storage
+    from the applications. (omalley)
+
+    HADOOP-10237. JavaKeyStoreProvider needs to set keystore permissions
+    correctly. (Larry McCay via omalley)
+
+    HADOOP-10244. TestKeyShell improperly tests the results of delete (Larry
+    McCay via omalley)
+
+    HADOOP-10583. bin/hadoop key throws NPE with no args and assorted other fixups. (clamb via tucu)
+
+    HADOOP-10586. KeyShell doesn't allow setting Options via CLI. (clamb via tucu)
+
+    HADOOP-10645. TestKMS fails because race condition writing acl files. (tucu)
+
+    HADOOP-10611. KMS, keyVersion name should not be assumed to be
+    keyName@versionNumber. (tucu)
+
+    HADOOP-10816. KeyShell returns -1 on error to the shell, should be 1.
+    (Mike Yoder via wang)
+
+    HADOOP-10826. Iteration on KeyProviderFactory.serviceLoader is
+    thread-unsafe. (benoyantony viat tucu)
+
+    HADOOP-10920. site plugin couldn't parse hadoop-kms index.apt.vm.
+    (Akira Ajisaka via wang)
+
+    HADOOP-10937. Need to set version name correctly before decrypting EEK.
+    (Arun Suresh via wang)
+
+    HADOOP-10918. JMXJsonServlet fails when used within Tomcat. (tucu)
+
+    HADOOP-10939. Fix TestKeyProviderFactory testcases to use default 128 bit
+    length keys. (Arun Suresh via wang)
+
+    HADOOP-10862. Miscellaneous trivial corrections to KMS classes.
+    (asuresh via tucu)
+
+    HADOOP-10967. Improve DefaultCryptoExtension#generateEncryptedKey
+    performance. (hitliuyi via tucu)
+
+    HADOOP-10488. TestKeyProviderFactory fails randomly. (tucu)
+
+    HADOOP-10989. Work around buggy getgrouplist() implementations on Linux that
+    return 0 on failure. (cnauroth)
+
+    HADOOP-8815. RandomDatum needs to override hashCode().
+    (Brandon Li via suresh)
+
+    BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS
+  
+      HADOOP-10734. Implement high-performance secure random number sources.
+      (Yi Liu via Colin Patrick McCabe)
+  
+      HADOOP-10603. Crypto input and output streams implementing Hadoop stream
+      interfaces. (Yi Liu and Charles Lamb)
+  
+      HADOOP-10628. Javadoc and few code style improvement for Crypto
+      input and output streams. (Yi Liu via clamb)
+  
+      HADOOP-10632. Minor improvements to Crypto input and output streams. 
+      (Yi Liu)
+  
+      HADOOP-10635. Add a method to CryptoCodec to generate SRNs for IV. (Yi Liu)
+  
+      HADOOP-10653. Add a new constructor for CryptoInputStream that 
+      receives current position of wrapped stream. (Yi Liu)
+  
+      HADOOP-10662. NullPointerException in CryptoInputStream while wrapped
+      stream is not ByteBufferReadable. Add tests using normal stream. (Yi Liu)
+  
+      HADOOP-10713. Refactor CryptoCodec#generateSecureRandom to take a byte[]. 
+      (wang via yliu)
+  
+      HADOOP-10693. Implementation of AES-CTR CryptoCodec using JNI to OpenSSL. 
+      (Yi Liu via cmccabe)
+  
+      HADOOP-10803. Update OpensslCipher#getInstance to accept CipherSuite#name
+      format. (Yi Liu)
+  
+      HADOOP-10735. Fall back AesCtrCryptoCodec implementation from OpenSSL to
+      JCE if non native support. (Yi Liu)
+  
+      HADOOP-10870. Failed to load OpenSSL cipher error logs on systems with old
+      openssl versions (cmccabe)
+  
+      HADOOP-10853. Refactor get instance of CryptoCodec and support create via
+      algorithm/mode/padding. (Yi Liu)
+  
+      HADOOP-10919. Copy command should preserve raw.* namespace
+      extended attributes. (clamb)
+  
+      HDFS-6873. Constants in CommandWithDestination should be static. (clamb)
+  
+      HADOOP-10871. incorrect prototype in OpensslSecureRandom.c (cmccabe)
+  
+      HADOOP-10886. CryptoCodec#getCodecclasses throws NPE when configurations not 
+      loaded. (umamahesh)      
+    --
+
+    HADOOP-10911. hadoop.auth cookie after HADOOP-10710 still not proper
+    according to RFC2109. (gchanan via tucu)
+
+    HADOOP-11036. Add build directory to .gitignore (Tsuyoshi OZAWA via aw)
+
+    HADOOP-11012. hadoop fs -text of zero-length file causes EOFException
+    (Eric Payne via jlowe)
+
+Release 2.5.1 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
+    HADOOP-11001. Fix test-patch to work with the git repo. (kasha)
+
+Release 2.5.0 - 2014-08-11
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES
 
 
@@ -500,6 +766,9 @@ Release 2.5.0 - UNRELEASED
 
 
     HADOOP-8943. Support multiple group mapping providers. (Kai Zheng via brandonli)
     HADOOP-8943. Support multiple group mapping providers. (Kai Zheng via brandonli)
 
 
+    HADOOP-9361 Strictly define the expected behavior of filesystem APIs and
+    write tests to verify compliance (stevel)
+
   IMPROVEMENTS
   IMPROVEMENTS
 
 
     HADOOP-10451. Remove unused field and imports from SaslRpcServer.
     HADOOP-10451. Remove unused field and imports from SaslRpcServer.
@@ -594,9 +863,6 @@ Release 2.5.0 - UNRELEASED
     HADOOP-10747. Support configurable retries on SASL connection failures in
     HADOOP-10747. Support configurable retries on SASL connection failures in
     RPC client. (cnauroth)
     RPC client. (cnauroth)
 
 
-    HADOOP-10674. Improve PureJavaCrc32 performance and use java.util.zip.CRC32
-    for Java 7 and above. (szetszwo)
-
     HADOOP-10754. Reenable several HA ZooKeeper-related tests on Windows.
     HADOOP-10754. Reenable several HA ZooKeeper-related tests on Windows.
     (cnauroth)
     (cnauroth)
 
 
@@ -608,9 +874,6 @@ Release 2.5.0 - UNRELEASED
 
 
     HADOOP-10767. Clean up unused code in Ls shell command. (cnauroth)
     HADOOP-10767. Clean up unused code in Ls shell command. (cnauroth)
 
 
-    HADOOP-9361 Strictly define the expected behavior of filesystem APIs and
-    write tests to verify compliance (stevel)
-
     HADOOP-9651 Filesystems to throw FileAlreadyExistsException in
     HADOOP-9651 Filesystems to throw FileAlreadyExistsException in
     createFile(path, overwrite=false) when the file exists (stevel)
     createFile(path, overwrite=false) when the file exists (stevel)
     
     
@@ -621,8 +884,14 @@ Release 2.5.0 - UNRELEASED
 
 
     HADOOP-10782. Fix typo in DataChecksum class. (Jingguo Yao via suresh)
     HADOOP-10782. Fix typo in DataChecksum class. (Jingguo Yao via suresh)
 
 
+    HADOOP-10896. Update compatibility doc to capture visibility of 
+    un-annotated classes/ methods. (kasha)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
+    HADOOP-10674. Improve PureJavaCrc32 performance and use java.util.zip.CRC32
+    for Java 7 and above. (szetszwo)
+
   BUG FIXES 
   BUG FIXES 
 
 
     HADOOP-10378. Typo in help printed by hdfs dfs -help.
     HADOOP-10378. Typo in help printed by hdfs dfs -help.
@@ -777,27 +1046,6 @@ Release 2.5.0 - UNRELEASED
 
 
     HADOOP-10801 dead link in site.xml (Akira AJISAKA via stevel)
     HADOOP-10801 dead link in site.xml (Akira AJISAKA via stevel)
 
 
-  BREAKDOWN OF HADOOP-10514 SUBTASKS AND RELATED JIRAS
-
-    HADOOP-10520. Extended attributes definition and FileSystem APIs for
-    extended attributes. (Yi Liu via wang)
-
-    HADOOP-10546. Javadoc and other small fixes for extended attributes in
-    hadoop-common. (Charles Lamb via wang)
-
-    HADOOP-10521. FsShell commands for extended attributes. (Yi Liu via wang)
-
-    HADOOP-10548. Improve FsShell xattr error handling and other fixes. (Charles Lamb via umamahesh)
-
-    HADOOP-10567. Shift XAttr value encoding code out for reuse. (Yi Liu via umamahesh)
-
-    HADOOP-10621. Remove CRLF for xattr value base64 encoding for better display.(Yi Liu via umamahesh)
-
-    HADOOP-10575. Small fixes for XAttrCommands and test. (Yi Liu via umamahesh)
-
-    HADOOP-10561. Copy command with preserve option should handle Xattrs.
-    (Yi Liu via cnauroth)
-
     HADOOP-10590. ServiceAuthorizationManager is not threadsafe. (Benoy Antony via vinayakumarb)
     HADOOP-10590. ServiceAuthorizationManager is not threadsafe. (Benoy Antony via vinayakumarb)
 
 
     HADOOP-10711. Cleanup some extra dependencies from hadoop-auth. (rkanter via tucu)
     HADOOP-10711. Cleanup some extra dependencies from hadoop-auth. (rkanter via tucu)
@@ -819,6 +1067,32 @@ Release 2.5.0 - UNRELEASED
     HADOOP-10890. TestDFVariations.testMount fails intermittently. (Yongjun
     HADOOP-10890. TestDFVariations.testMount fails intermittently. (Yongjun
     Zhang via Arpit Agarwal)
     Zhang via Arpit Agarwal)
 
 
+    HADOOP-10894. Fix dead link in ToolRunner documentation. (Akira Ajisaka
+    via Arpit Agarwal)
+
+    HADOOP-10910. Increase findbugs maxHeap size. (wang)
+
+  BREAKDOWN OF HADOOP-10514 SUBTASKS AND RELATED JIRAS
+
+    HADOOP-10520. Extended attributes definition and FileSystem APIs for
+    extended attributes. (Yi Liu via wang)
+
+    HADOOP-10546. Javadoc and other small fixes for extended attributes in
+    hadoop-common. (Charles Lamb via wang)
+
+    HADOOP-10521. FsShell commands for extended attributes. (Yi Liu via wang)
+
+    HADOOP-10548. Improve FsShell xattr error handling and other fixes. (Charles Lamb via umamahesh)
+
+    HADOOP-10567. Shift XAttr value encoding code out for reuse. (Yi Liu via umamahesh)
+
+    HADOOP-10621. Remove CRLF for xattr value base64 encoding for better display.(Yi Liu via umamahesh)
+
+    HADOOP-10575. Small fixes for XAttrCommands and test. (Yi Liu via umamahesh)
+
+    HADOOP-10561. Copy command with preserve option should handle Xattrs.
+    (Yi Liu via cnauroth)
+
 Release 2.4.1 - 2014-06-23 
 Release 2.4.1 - 2014-06-23 
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES

+ 5 - 0
hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml

@@ -108,6 +108,11 @@
        <Method name="driver" />
        <Method name="driver" />
        <Bug pattern="DM_EXIT" />
        <Bug pattern="DM_EXIT" />
      </Match>
      </Match>
+     <Match>
+       <Class name="org.apache.hadoop.util.RunJar" />
+       <Method name="run" />
+       <Bug pattern="DM_EXIT" />
+     </Match>
      <!--
      <!--
        We need to cast objects between old and new api objects
        We need to cast objects between old and new api objects
      -->
      -->

+ 33 - 1
hadoop-common-project/hadoop-common/pom.xml

@@ -203,6 +203,17 @@
       <artifactId>hadoop-auth</artifactId>
       <artifactId>hadoop-auth</artifactId>
       <scope>compile</scope>
       <scope>compile</scope>
     </dependency>
     </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-auth</artifactId>
+      <type>test-jar</type>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-minikdc</artifactId>
+      <scope>test</scope>
+    </dependency>
     <dependency>
     <dependency>
       <groupId>com.jcraft</groupId>
       <groupId>com.jcraft</groupId>
       <artifactId>jsch</artifactId>
       <artifactId>jsch</artifactId>
@@ -213,6 +224,10 @@
       <scope>compile</scope>
       <scope>compile</scope>
     </dependency>
     </dependency>
 
 
+    <dependency>
+      <groupId>org.htrace</groupId>
+      <artifactId>htrace-core</artifactId>
+    </dependency>
     <dependency>
     <dependency>
       <groupId>org.apache.zookeeper</groupId>
       <groupId>org.apache.zookeeper</groupId>
       <artifactId>zookeeper</artifactId>
       <artifactId>zookeeper</artifactId>
@@ -488,6 +503,10 @@
         <snappy.lib></snappy.lib>
         <snappy.lib></snappy.lib>
         <snappy.include></snappy.include>
         <snappy.include></snappy.include>
         <require.snappy>false</require.snappy>
         <require.snappy>false</require.snappy>
+        <openssl.prefix></openssl.prefix>
+        <openssl.lib></openssl.lib>
+        <openssl.include></openssl.include>
+        <require.openssl>false</require.openssl>
       </properties>
       </properties>
       <build>
       <build>
         <plugins>
         <plugins>
@@ -537,6 +556,8 @@
                     <javahClassName>org.apache.hadoop.io.compress.snappy.SnappyDecompressor</javahClassName>
                     <javahClassName>org.apache.hadoop.io.compress.snappy.SnappyDecompressor</javahClassName>
                     <javahClassName>org.apache.hadoop.io.compress.lz4.Lz4Compressor</javahClassName>
                     <javahClassName>org.apache.hadoop.io.compress.lz4.Lz4Compressor</javahClassName>
                     <javahClassName>org.apache.hadoop.io.compress.lz4.Lz4Decompressor</javahClassName>
                     <javahClassName>org.apache.hadoop.io.compress.lz4.Lz4Decompressor</javahClassName>
+                    <javahClassName>org.apache.hadoop.crypto.OpensslCipher</javahClassName>
+                    <javahClassName>org.apache.hadoop.crypto.random.OpensslSecureRandom</javahClassName>
                     <javahClassName>org.apache.hadoop.util.NativeCrc32</javahClassName>
                     <javahClassName>org.apache.hadoop.util.NativeCrc32</javahClassName>
                     <javahClassName>org.apache.hadoop.net.unix.DomainSocket</javahClassName>
                     <javahClassName>org.apache.hadoop.net.unix.DomainSocket</javahClassName>
                     <javahClassName>org.apache.hadoop.net.unix.DomainSocketWatcher</javahClassName>
                     <javahClassName>org.apache.hadoop.net.unix.DomainSocketWatcher</javahClassName>
@@ -557,7 +578,7 @@
                 <configuration>
                 <configuration>
                   <target>
                   <target>
                     <exec executable="cmake" dir="${project.build.directory}/native" failonerror="true">
                     <exec executable="cmake" dir="${project.build.directory}/native" failonerror="true">
-                      <arg line="${basedir}/src/ -DGENERATED_JAVAH=${project.build.directory}/native/javah -DJVM_ARCH_DATA_MODEL=${sun.arch.data.model} -DREQUIRE_BZIP2=${require.bzip2} -DREQUIRE_SNAPPY=${require.snappy} -DCUSTOM_SNAPPY_PREFIX=${snappy.prefix} -DCUSTOM_SNAPPY_LIB=${snappy.lib} -DCUSTOM_SNAPPY_INCLUDE=${snappy.include}"/>
+                      <arg line="${basedir}/src/ -DGENERATED_JAVAH=${project.build.directory}/native/javah -DJVM_ARCH_DATA_MODEL=${sun.arch.data.model} -DREQUIRE_BZIP2=${require.bzip2} -DREQUIRE_SNAPPY=${require.snappy} -DCUSTOM_SNAPPY_PREFIX=${snappy.prefix} -DCUSTOM_SNAPPY_LIB=${snappy.lib} -DCUSTOM_SNAPPY_INCLUDE=${snappy.include} -DREQUIRE_OPENSSL=${require.openssl} -DCUSTOM_OPENSSL_PREFIX=${openssl.prefix} -DCUSTOM_OPENSSL_LIB=${openssl.lib} -DCUSTOM_OPENSSL_INCLUDE=${openssl.include}"/>
                     </exec>
                     </exec>
                     <exec executable="make" dir="${project.build.directory}/native" failonerror="true">
                     <exec executable="make" dir="${project.build.directory}/native" failonerror="true">
                       <arg line="VERBOSE=1"/>
                       <arg line="VERBOSE=1"/>
@@ -601,6 +622,11 @@
         <snappy.include></snappy.include>
         <snappy.include></snappy.include>
         <require.snappy>false</require.snappy>
         <require.snappy>false</require.snappy>
         <bundle.snappy.in.bin>true</bundle.snappy.in.bin>
         <bundle.snappy.in.bin>true</bundle.snappy.in.bin>
+        <openssl.prefix></openssl.prefix>
+        <openssl.lib></openssl.lib>
+        <openssl.include></openssl.include>
+        <require.openssl>false</require.openssl>
+        <bundle.openssl.in.bin>true</bundle.openssl.in.bin>
       </properties>
       </properties>
       <build>
       <build>
         <plugins>
         <plugins>
@@ -646,6 +672,8 @@
                     <javahClassName>org.apache.hadoop.io.compress.snappy.SnappyDecompressor</javahClassName>
                     <javahClassName>org.apache.hadoop.io.compress.snappy.SnappyDecompressor</javahClassName>
                     <javahClassName>org.apache.hadoop.io.compress.lz4.Lz4Compressor</javahClassName>
                     <javahClassName>org.apache.hadoop.io.compress.lz4.Lz4Compressor</javahClassName>
                     <javahClassName>org.apache.hadoop.io.compress.lz4.Lz4Decompressor</javahClassName>
                     <javahClassName>org.apache.hadoop.io.compress.lz4.Lz4Decompressor</javahClassName>
+                    <javahClassName>org.apache.hadoop.crypto.OpensslCipher</javahClassName>
+                    <javahClassName>org.apache.hadoop.crypto.random.OpensslSecureRandom</javahClassName>
                     <javahClassName>org.apache.hadoop.util.NativeCrc32</javahClassName>
                     <javahClassName>org.apache.hadoop.util.NativeCrc32</javahClassName>
                   </javahClassNames>
                   </javahClassNames>
                   <javahOutputDirectory>${project.build.directory}/native/javah</javahOutputDirectory>
                   <javahOutputDirectory>${project.build.directory}/native/javah</javahOutputDirectory>
@@ -690,6 +718,10 @@
                     <argument>/p:CustomSnappyLib=${snappy.lib}</argument>
                     <argument>/p:CustomSnappyLib=${snappy.lib}</argument>
                     <argument>/p:CustomSnappyInclude=${snappy.include}</argument>
                     <argument>/p:CustomSnappyInclude=${snappy.include}</argument>
                     <argument>/p:RequireSnappy=${require.snappy}</argument>
                     <argument>/p:RequireSnappy=${require.snappy}</argument>
+                    <argument>/p:CustomOpensslPrefix=${openssl.prefix}</argument>
+                    <argument>/p:CustomOpensslLib=${openssl.lib}</argument>
+                    <argument>/p:CustomOpensslInclude=${openssl.include}</argument>
+                    <argument>/p:RequireOpenssl=${require.openssl}</argument>
                   </arguments>
                   </arguments>
                 </configuration>
                 </configuration>
               </execution>
               </execution>

+ 34 - 0
hadoop-common-project/hadoop-common/src/CMakeLists.txt

@@ -145,6 +145,38 @@ else (SNAPPY_LIBRARY AND SNAPPY_INCLUDE_DIR)
     ENDIF(REQUIRE_SNAPPY)
     ENDIF(REQUIRE_SNAPPY)
 endif (SNAPPY_LIBRARY AND SNAPPY_INCLUDE_DIR)
 endif (SNAPPY_LIBRARY AND SNAPPY_INCLUDE_DIR)
 
 
+SET(STORED_CMAKE_FIND_LIBRARY_SUFFIXES CMAKE_FIND_LIBRARY_SUFFIXES)
+set_find_shared_library_version("1.0.0")
+SET(OPENSSL_NAME "crypto")
+IF(${CMAKE_SYSTEM_NAME} MATCHES "Windows")
+    SET(OPENSSL_NAME "eay32")
+ENDIF()
+find_library(OPENSSL_LIBRARY
+    NAMES ${OPENSSL_NAME}
+    PATHS ${CUSTOM_OPENSSL_PREFIX} ${CUSTOM_OPENSSL_PREFIX}/lib
+          ${CUSTOM_OPENSSL_PREFIX}/lib64 ${CUSTOM_OPENSSL_LIB} NO_DEFAULT_PATH)
+find_library(OPENSSL_LIBRARY
+    NAMES ${OPENSSL_NAME})
+SET(CMAKE_FIND_LIBRARY_SUFFIXES STORED_CMAKE_FIND_LIBRARY_SUFFIXES)
+find_path(OPENSSL_INCLUDE_DIR 
+    NAMES openssl/evp.h
+    PATHS ${CUSTOM_OPENSSL_PREFIX} ${CUSTOM_OPENSSL_PREFIX}/include
+          ${CUSTOM_OPENSSL_INCLUDE} NO_DEFAULT_PATH)
+find_path(OPENSSL_INCLUDE_DIR 
+    NAMES openssl/evp.h)
+if (OPENSSL_LIBRARY AND OPENSSL_INCLUDE_DIR)
+    GET_FILENAME_COMPONENT(HADOOP_OPENSSL_LIBRARY ${OPENSSL_LIBRARY} NAME)
+    SET(OPENSSL_SOURCE_FILES
+        "${D}/crypto/OpensslCipher.c"
+        "${D}/crypto/random/OpensslSecureRandom.c")
+else (OPENSSL_LIBRARY AND OPENSSL_INCLUDE_DIR)
+    SET(OPENSSL_INCLUDE_DIR "")
+    SET(OPENSSL_SOURCE_FILES "")
+    IF(REQUIRE_OPENSSL)
+        MESSAGE(FATAL_ERROR "Required openssl library could not be found.  OPENSSL_LIBRARY=${OPENSSL_LIBRARY}, OPENSSL_INCLUDE_DIR=${OPENSSL_INCLUDE_DIR}, CUSTOM_OPENSSL_INCLUDE_DIR=${CUSTOM_OPENSSL_INCLUDE_DIR}, CUSTOM_OPENSSL_PREFIX=${CUSTOM_OPENSSL_PREFIX}, CUSTOM_OPENSSL_INCLUDE=${CUSTOM_OPENSSL_INCLUDE}")
+    ENDIF(REQUIRE_OPENSSL)
+endif (OPENSSL_LIBRARY AND OPENSSL_INCLUDE_DIR)
+
 include_directories(
 include_directories(
     ${GENERATED_JAVAH}
     ${GENERATED_JAVAH}
     main/native/src
     main/native/src
@@ -155,6 +187,7 @@ include_directories(
     ${ZLIB_INCLUDE_DIRS}
     ${ZLIB_INCLUDE_DIRS}
     ${BZIP2_INCLUDE_DIR}
     ${BZIP2_INCLUDE_DIR}
     ${SNAPPY_INCLUDE_DIR}
     ${SNAPPY_INCLUDE_DIR}
+    ${OPENSSL_INCLUDE_DIR}
     ${D}/util
     ${D}/util
 )
 )
 CONFIGURE_FILE(${CMAKE_SOURCE_DIR}/config.h.cmake ${CMAKE_BINARY_DIR}/config.h)
 CONFIGURE_FILE(${CMAKE_SOURCE_DIR}/config.h.cmake ${CMAKE_BINARY_DIR}/config.h)
@@ -172,6 +205,7 @@ add_dual_library(hadoop
     ${D}/io/compress/lz4/lz4.c
     ${D}/io/compress/lz4/lz4.c
     ${D}/io/compress/lz4/lz4hc.c
     ${D}/io/compress/lz4/lz4hc.c
     ${SNAPPY_SOURCE_FILES}
     ${SNAPPY_SOURCE_FILES}
+    ${OPENSSL_SOURCE_FILES}
     ${D}/io/compress/zlib/ZlibCompressor.c
     ${D}/io/compress/zlib/ZlibCompressor.c
     ${D}/io/compress/zlib/ZlibDecompressor.c
     ${D}/io/compress/zlib/ZlibDecompressor.c
     ${BZIP2_SOURCE_FILES}
     ${BZIP2_SOURCE_FILES}

+ 6 - 0
hadoop-common-project/hadoop-common/src/JNIFlags.cmake

@@ -78,6 +78,12 @@ IF("${CMAKE_SYSTEM}" MATCHES "Linux")
         SET(_java_libarch "amd64")
         SET(_java_libarch "amd64")
     ELSEIF (CMAKE_SYSTEM_PROCESSOR MATCHES "^arm")
     ELSEIF (CMAKE_SYSTEM_PROCESSOR MATCHES "^arm")
         SET(_java_libarch "arm")
         SET(_java_libarch "arm")
+    ELSEIF (CMAKE_SYSTEM_PROCESSOR MATCHES "^(powerpc|ppc)64le")
+        IF(EXISTS "${_JAVA_HOME}/jre/lib/ppc64le")
+                SET(_java_libarch "ppc64le")
+        ELSE()
+                SET(_java_libarch "ppc64")
+        ENDIF()
     ELSE()
     ELSE()
         SET(_java_libarch ${CMAKE_SYSTEM_PROCESSOR})
         SET(_java_libarch ${CMAKE_SYSTEM_PROCESSOR})
     ENDIF()
     ENDIF()

+ 1 - 0
hadoop-common-project/hadoop-common/src/config.h.cmake

@@ -21,6 +21,7 @@
 #cmakedefine HADOOP_ZLIB_LIBRARY "@HADOOP_ZLIB_LIBRARY@"
 #cmakedefine HADOOP_ZLIB_LIBRARY "@HADOOP_ZLIB_LIBRARY@"
 #cmakedefine HADOOP_BZIP2_LIBRARY "@HADOOP_BZIP2_LIBRARY@"
 #cmakedefine HADOOP_BZIP2_LIBRARY "@HADOOP_BZIP2_LIBRARY@"
 #cmakedefine HADOOP_SNAPPY_LIBRARY "@HADOOP_SNAPPY_LIBRARY@"
 #cmakedefine HADOOP_SNAPPY_LIBRARY "@HADOOP_SNAPPY_LIBRARY@"
+#cmakedefine HADOOP_OPENSSL_LIBRARY "@HADOOP_OPENSSL_LIBRARY@"
 #cmakedefine HAVE_SYNC_FILE_RANGE
 #cmakedefine HAVE_SYNC_FILE_RANGE
 #cmakedefine HAVE_POSIX_FADVISE
 #cmakedefine HAVE_POSIX_FADVISE
 
 

+ 13 - 15
hadoop-common-project/hadoop-common/src/contrib/bash-tab-completion/hadoop.sh

@@ -26,7 +26,7 @@ _hadoop() {
   COMPREPLY=()
   COMPREPLY=()
   cur=${COMP_WORDS[COMP_CWORD]}
   cur=${COMP_WORDS[COMP_CWORD]}
   prev=${COMP_WORDS[COMP_CWORD-1]}  
   prev=${COMP_WORDS[COMP_CWORD-1]}  
-  script=`which ${COMP_WORDS[0]}`
+  script=$(which ${COMP_WORDS[0]})
   
   
   # Bash lets you tab complete things even if the script doesn't
   # Bash lets you tab complete things even if the script doesn't
   # exist (or isn't executable). Check to make sure it is, as we
   # exist (or isn't executable). Check to make sure it is, as we
@@ -36,9 +36,9 @@ _hadoop() {
     1)
     1)
       # Completing the first argument (the command).
       # Completing the first argument (the command).
 
 
-      temp=`$script | grep -n "^\s*or"`;
-      temp=`$script | head -n $((${temp%%:*} - 1)) | awk '/^ / {print $1}' | sort | uniq`;
-      COMPREPLY=(`compgen -W "${temp}" -- ${cur}`);
+      temp=$($script | grep -n "^\s*or");
+      temp=$($script | head -n $((${temp%%:*} - 1)) | awk '/^ / {print $1}' | sort | uniq);
+      COMPREPLY=($(compgen -W "${temp}" -- ${cur}));
       return 0;;
       return 0;;
 
 
     2)
     2)
@@ -51,21 +51,21 @@ _hadoop() {
       dfs | dfsadmin | fs | job | pipes)
       dfs | dfsadmin | fs | job | pipes)
         # One option per line, enclosed in square brackets
         # One option per line, enclosed in square brackets
 
 
-        temp=`$script ${COMP_WORDS[1]} 2>&1 | awk '/^[ \t]*\[/ {gsub("[[\\]]", ""); print $1}'`;
-        COMPREPLY=(`compgen -W "${temp}" -- ${cur}`);
+        temp=$($script ${COMP_WORDS[1]} 2>&1 | awk '/^[ \t]*\[/ {gsub("[[\\]]", ""); print $1}');
+        COMPREPLY=($(compgen -W "${temp}" -- ${cur}));
         return 0;;
         return 0;;
 
 
       jar)
       jar)
         # Any (jar) file
         # Any (jar) file
 
 
-        COMPREPLY=(`compgen -A file -- ${cur}`);
+        COMPREPLY=($(compgen -A file -- ${cur}));
         return 0;;
         return 0;;
 
 
       namenode)
       namenode)
         # All options specified in one line,
         # All options specified in one line,
         # enclosed in [] and separated with |
         # enclosed in [] and separated with |
-        temp=`$script ${COMP_WORDS[1]} -help 2>&1 | grep Usage: | cut -d '[' -f 2- | awk '{gsub("] \\| \\[|]", " "); print $0}'`;
-        COMPREPLY=(`compgen -W "${temp}" -- ${cur}`);
+        temp=$($script ${COMP_WORDS[1]} -help 2>&1 | grep Usage: | cut -d '[' -f 2- | awk '{gsub("] \\| \\[|]", " "); print $0}');
+        COMPREPLY=($(compgen -W "${temp}" -- ${cur}));
         return 0;;
         return 0;;
 
 
       *)
       *)
@@ -83,26 +83,24 @@ _hadoop() {
         # Pull the list of options, grep for the one the user is trying to use,
         # Pull the list of options, grep for the one the user is trying to use,
         # and then select the description of the relevant argument
         # and then select the description of the relevant argument
         temp=$((${COMP_CWORD} - 1));
         temp=$((${COMP_CWORD} - 1));
-        temp=`$script ${COMP_WORDS[1]} 2>&1 | grep -- "${COMP_WORDS[2]} " | awk '{gsub("[[ \\]]", ""); print $0}' | cut -d '<' -f ${temp}`;
+        temp=$($script ${COMP_WORDS[1]} 2>&1 | grep -- "${COMP_WORDS[2]} " | awk '{gsub("[[ \\]]", ""); print $0}' | cut -d '<' -f ${temp} | cut -d '>' -f 1);
 
 
         if [ ${#temp} -lt 1 ]; then
         if [ ${#temp} -lt 1 ]; then
           # No match
           # No match
           return 1;
           return 1;
         fi;
         fi;
 
 
-        temp=${temp:0:$((${#temp} - 1))};
-
         # Now do completion based on the argument
         # Now do completion based on the argument
         case $temp in
         case $temp in
         path | src | dst)
         path | src | dst)
           # DFS path completion
           # DFS path completion
-          temp=`$script ${COMP_WORDS[1]} -ls "${cur}*" 2>&1 | grep -vE '^Found ' | cut -f 1 | awk '{gsub("^.* ", ""); print $0;}'`
-          COMPREPLY=(`compgen -W "${temp}" -- ${cur}`);
+          temp=$($script ${COMP_WORDS[1]} -ls -d "${cur}*" 2>/dev/null | grep -vE '^Found ' | cut -f 1 | awk '{gsub("^.* ", ""); print $0;}');
+          COMPREPLY=($(compgen -W "${temp}" -- ${cur}));
           return 0;;
           return 0;;
 
 
         localsrc | localdst)
         localsrc | localdst)
           # Local path completion
           # Local path completion
-          COMPREPLY=(`compgen -A file -- ${cur}`);
+          COMPREPLY=($(compgen -A file -- ${cur}));
           return 0;;
           return 0;;
 
 
         *)
         *)

+ 135 - 94
hadoop-common-project/hadoop-common/src/main/bin/hadoop

@@ -15,126 +15,167 @@
 # See the License for the specific language governing permissions and
 # See the License for the specific language governing permissions and
 # limitations under the License.
 # limitations under the License.
 
 
-# This script runs the hadoop core commands. 
-
-bin=`which $0`
-bin=`dirname ${bin}`
-bin=`cd "$bin" > /dev/null; pwd`
- 
-DEFAULT_LIBEXEC_DIR="$bin"/../libexec
-HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
-. $HADOOP_LIBEXEC_DIR/hadoop-config.sh
-
-function print_usage(){
+function hadoop_usage()
+{
   echo "Usage: hadoop [--config confdir] COMMAND"
   echo "Usage: hadoop [--config confdir] COMMAND"
   echo "       where COMMAND is one of:"
   echo "       where COMMAND is one of:"
-  echo "  fs                   run a generic filesystem user client"
-  echo "  version              print the version"
-  echo "  jar <jar>            run a jar file"
-  echo "  checknative [-a|-h]  check native hadoop and compression libraries availability"
-  echo "  distcp <srcurl> <desturl> copy file or directories recursively"
-  echo "  archive -archiveName NAME -p <parent path> <src>* <dest> create a hadoop archive"
+  echo "  archive -archiveName NAME -p <parent path> <src>* <dest>"
+  echo "                       create a Hadoop archive"
+  echo "  checknative [-a|-h]  check native Hadoop and compression "
+  echo "                         libraries availability"
   echo "  classpath            prints the class path needed to get the"
   echo "  classpath            prints the class path needed to get the"
-  echo "                       Hadoop jar and the required libraries"
+  echo "                         Hadoop jar and the required libraries"
+  echo "  credential           interact with credential providers"
   echo "  daemonlog            get/set the log level for each daemon"
   echo "  daemonlog            get/set the log level for each daemon"
+  echo "  distch path:owner:group:permisson"
+  echo "                       distributed metadata changer"
+  echo "  distcp <srcurl> <desturl> "
+  echo "                       copy file or directories recursively"
+  echo "  fs                   run a generic filesystem user client"
+  echo "  jar <jar>            run a jar file"
+  echo "  jnipath              prints the java.library.path"
+  echo "  key                  manage keys via the KeyProvider"
+  echo "  version              print the version"
   echo " or"
   echo " or"
   echo "  CLASSNAME            run the class named CLASSNAME"
   echo "  CLASSNAME            run the class named CLASSNAME"
   echo ""
   echo ""
   echo "Most commands print help when invoked w/o parameters."
   echo "Most commands print help when invoked w/o parameters."
 }
 }
 
 
+
+# This script runs the hadoop core commands.
+
+# let's locate libexec...
+if [[ -n "${HADOOP_PREFIX}" ]]; then
+  DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
+else
+  this="${BASH_SOURCE-$0}"
+  bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
+  DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
+fi
+
+HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}"
+# shellcheck disable=SC2034
+HADOOP_NEW_CONFIG=true
+if [[ -f "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh" ]]; then
+  . "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh"
+else
+  echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/hadoop-config.sh." 2>&1
+  exit 1
+fi
+
 if [ $# = 0 ]; then
 if [ $# = 0 ]; then
-  print_usage
-  exit
+  hadoop_exit_with_usage 1
 fi
 fi
 
 
 COMMAND=$1
 COMMAND=$1
-case $COMMAND in
-  # usage flags
-  --help|-help|-h)
-    print_usage
-    exit
-    ;;
+shift
 
 
-  #hdfs commands
-  namenode|secondarynamenode|datanode|dfs|dfsadmin|fsck|balancer|fetchdt|oiv|dfsgroups|portmap|nfs3)
-    echo "DEPRECATED: Use of this script to execute hdfs command is deprecated." 1>&2
-    echo "Instead use the hdfs command for it." 1>&2
-    echo "" 1>&2
-    #try to locate hdfs and if present, delegate to it.  
-    shift
-    if [ -f "${HADOOP_HDFS_HOME}"/bin/hdfs ]; then
-      exec "${HADOOP_HDFS_HOME}"/bin/hdfs ${COMMAND/dfsgroups/groups}  "$@"
-    elif [ -f "${HADOOP_PREFIX}"/bin/hdfs ]; then
-      exec "${HADOOP_PREFIX}"/bin/hdfs ${COMMAND/dfsgroups/groups} "$@"
+case ${COMMAND} in
+  balancer|datanode|dfs|dfsadmin|dfsgroups|  \
+  namenode|secondarynamenode|fsck|fetchdt|oiv| \
+  portmap|nfs3)
+    hadoop_error "WARNING: Use of this script to execute ${COMMAND} is deprecated."
+    COMMAND=${COMMAND/dfsgroups/groups}
+    hadoop_error "WARNING: Attempting to execute replacement \"hdfs ${COMMAND}\" instead."
+    hadoop_error ""
+    #try to locate hdfs and if present, delegate to it.
+    if [[ -f "${HADOOP_HDFS_HOME}/bin/hdfs" ]]; then
+      # shellcheck disable=SC2086
+      exec "${HADOOP_HDFS_HOME}/bin/hdfs" \
+      --config "${HADOOP_CONF_DIR}" "${COMMAND}"  "$@"
+    elif [[ -f "${HADOOP_PREFIX}/bin/hdfs" ]]; then
+      # shellcheck disable=SC2086
+      exec "${HADOOP_PREFIX}/bin/hdfs" \
+      --config "${HADOOP_CONF_DIR}" "${COMMAND}" "$@"
     else
     else
-      echo "HADOOP_HDFS_HOME not found!"
+      hadoop_error "HADOOP_HDFS_HOME not found!"
       exit 1
       exit 1
     fi
     fi
-    ;;
-
+  ;;
+  
   #mapred commands for backwards compatibility
   #mapred commands for backwards compatibility
   pipes|job|queue|mrgroups|mradmin|jobtracker|tasktracker)
   pipes|job|queue|mrgroups|mradmin|jobtracker|tasktracker)
-    echo "DEPRECATED: Use of this script to execute mapred command is deprecated." 1>&2
-    echo "Instead use the mapred command for it." 1>&2
-    echo "" 1>&2
+    hadoop_error "WARNING: Use of this script to execute ${COMMAND} is deprecated."
+    COMMAND=${COMMAND/mrgroups/groups}
+    hadoop_error "WARNING: Attempting to execute replacement \"mapred ${COMMAND}\" instead."
+    hadoop_error ""
     #try to locate mapred and if present, delegate to it.
     #try to locate mapred and if present, delegate to it.
-    shift
-    if [ -f "${HADOOP_MAPRED_HOME}"/bin/mapred ]; then
-      exec "${HADOOP_MAPRED_HOME}"/bin/mapred ${COMMAND/mrgroups/groups} "$@"
-    elif [ -f "${HADOOP_PREFIX}"/bin/mapred ]; then
-      exec "${HADOOP_PREFIX}"/bin/mapred ${COMMAND/mrgroups/groups} "$@"
+    if [[ -f "${HADOOP_MAPRED_HOME}/bin/mapred" ]]; then
+      exec "${HADOOP_MAPRED_HOME}/bin/mapred" \
+      --config "${HADOOP_CONF_DIR}" "${COMMAND}" "$@"
+    elif [[ -f "${HADOOP_PREFIX}/bin/mapred" ]]; then
+      exec "${HADOOP_PREFIX}/bin/mapred" \
+      --config "${HADOOP_CONF_DIR}" "${COMMAND}" "$@"
     else
     else
-      echo "HADOOP_MAPRED_HOME not found!"
+      hadoop_error "HADOOP_MAPRED_HOME not found!"
       exit 1
       exit 1
     fi
     fi
-    ;;
-
+  ;;
+  archive)
+    CLASS=org.apache.hadoop.tools.HadoopArchives
+    hadoop_debug "Injecting TOOL_PATH into CLASSPATH"
+    hadoop_add_classpath "${TOOL_PATH}"
+  ;;
+  checknative)
+    CLASS=org.apache.hadoop.util.NativeLibraryChecker
+  ;;
   classpath)
   classpath)
-    echo $CLASSPATH
-    exit
-    ;;
-
-  #core commands  
-  *)
-    # the core commands
-    if [ "$COMMAND" = "fs" ] ; then
-      CLASS=org.apache.hadoop.fs.FsShell
-    elif [ "$COMMAND" = "version" ] ; then
-      CLASS=org.apache.hadoop.util.VersionInfo
-    elif [ "$COMMAND" = "jar" ] ; then
-      CLASS=org.apache.hadoop.util.RunJar
-    elif [ "$COMMAND" = "key" ] ; then
-      CLASS=org.apache.hadoop.crypto.key.KeyShell
-    elif [ "$COMMAND" = "checknative" ] ; then
-      CLASS=org.apache.hadoop.util.NativeLibraryChecker
-    elif [ "$COMMAND" = "distcp" ] ; then
-      CLASS=org.apache.hadoop.tools.DistCp
-      CLASSPATH=${CLASSPATH}:${TOOL_PATH}
-    elif [ "$COMMAND" = "daemonlog" ] ; then
-      CLASS=org.apache.hadoop.log.LogLevel
-    elif [ "$COMMAND" = "archive" ] ; then
-      CLASS=org.apache.hadoop.tools.HadoopArchives
-      CLASSPATH=${CLASSPATH}:${TOOL_PATH}
-    elif [ "$COMMAND" = "credential" ] ; then
-      CLASS=org.apache.hadoop.security.alias.CredentialShell
-    elif [[ "$COMMAND" = -*  ]] ; then
-        # class and package names cannot begin with a -
-        echo "Error: No command named \`$COMMAND' was found. Perhaps you meant \`hadoop ${COMMAND#-}'"
-        exit 1
-    else
-      CLASS=$COMMAND
+    if [[ "$#" -eq 1 ]]; then
+      CLASS=org.apache.hadoop.util.Classpath
+    else   
+      hadoop_finalize
+      echo "${CLASSPATH}"
+      exit 0
     fi
     fi
-    shift
-    
-    # Always respect HADOOP_OPTS and HADOOP_CLIENT_OPTS
-    HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
+  ;;
+  credential)
+    CLASS=org.apache.hadoop.security.alias.CredentialShell
+  ;;
+  daemonlog)
+    CLASS=org.apache.hadoop.log.LogLevel
+  ;;
+  distch)
+    CLASS=org.apache.hadoop.tools.DistCh
+    hadoop_debug "Injecting TOOL_PATH into CLASSPATH"
+    hadoop_add_classpath "${TOOL_PATH}"
+  ;;
+  distcp)
+    CLASS=org.apache.hadoop.tools.DistCp
+    hadoop_debug "Injecting TOOL_PATH into CLASSPATH"
+    hadoop_add_classpath "${TOOL_PATH}"
+  ;;
+  fs)
+    CLASS=org.apache.hadoop.fs.FsShell
+  ;;
+  jar)
+    CLASS=org.apache.hadoop.util.RunJar
+  ;;
+  jnipath)
+    hadoop_finalize
+    echo "${JAVA_LIBRARY_PATH}"
+    exit 0
+  ;;
+  key)
+    CLASS=org.apache.hadoop.crypto.key.KeyShell
+  ;;
+  version)
+    CLASS=org.apache.hadoop.util.VersionInfo
+  ;;
+  -*|hdfs)
+    hadoop_exit_with_usage 1
+  ;;
+  *)
+    CLASS="${COMMAND}"
+  ;;
+esac
 
 
-    #make sure security appender is turned off
-    HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,NullAppender}"
+# Always respect HADOOP_OPTS and HADOOP_CLIENT_OPTS
+hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
+HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
 
 
-    export CLASSPATH=$CLASSPATH
-    exec "$JAVA" $JAVA_HEAP_MAX $HADOOP_OPTS $CLASS "$@"
-    ;;
+hadoop_add_param HADOOP_OPTS Xmx "${JAVA_HEAP_MAX}"
+
+hadoop_finalize
+hadoop_java_exec "${COMMAND}" "${CLASS}" "$@"
 
 
-esac

+ 6 - 4
hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.cmd

@@ -282,10 +282,12 @@ if not "%HADOOP_MAPRED_HOME%\%MAPRED_DIR%" == "%HADOOP_YARN_HOME%\%YARN_DIR%" (
 @rem
 @rem
 
 
 if defined HADOOP_CLASSPATH (
 if defined HADOOP_CLASSPATH (
-  if defined HADOOP_USER_CLASSPATH_FIRST (
-    set CLASSPATH=%HADOOP_CLASSPATH%;%CLASSPATH%;
-  ) else (
-    set CLASSPATH=%CLASSPATH%;%HADOOP_CLASSPATH%;
+  if not defined HADOOP_USE_CLIENT_CLASSLOADER (
+    if defined HADOOP_USER_CLASSPATH_FIRST (
+      set CLASSPATH=%HADOOP_CLASSPATH%;%CLASSPATH%;
+    ) else (
+      set CLASSPATH=%CLASSPATH%;%HADOOP_CLASSPATH%;
+    )
   )
   )
 )
 )
 
 

+ 158 - 251
hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh

@@ -1,3 +1,5 @@
+#
+#
 # Licensed to the Apache Software Foundation (ASF) under one or more
 # Licensed to the Apache Software Foundation (ASF) under one or more
 # contributor license agreements.  See the NOTICE file distributed with
 # contributor license agreements.  See the NOTICE file distributed with
 # this work for additional information regarding copyright ownership.
 # this work for additional information regarding copyright ownership.
@@ -13,282 +15,187 @@
 # See the License for the specific language governing permissions and
 # See the License for the specific language governing permissions and
 # limitations under the License.
 # limitations under the License.
 
 
+####
+# IMPORTANT
+####
+
+## The hadoop-config.sh tends to get executed by non-Hadoop scripts.
+## Those parts expect this script to parse/manipulate $@. In order
+## to maintain backward compatibility, this means a surprising
+## lack of functions for bits that would be much better off in
+## a function.
+##
+## In other words, yes, there is some bad things happen here and
+## unless we break the rest of the ecosystem, we can't change it. :(
+
+
 # included in all the hadoop scripts with source command
 # included in all the hadoop scripts with source command
 # should not be executable directly
 # should not be executable directly
 # also should not be passed any arguments, since we need original $*
 # also should not be passed any arguments, since we need original $*
-
-# Resolve links ($0 may be a softlink) and convert a relative path
-# to an absolute path.  NB: The -P option requires bash built-ins
-# or POSIX:2001 compliant cd and pwd.
-
-#   HADOOP_CLASSPATH Extra Java CLASSPATH entries.
 #
 #
-#   HADOOP_USER_CLASSPATH_FIRST      When defined, the HADOOP_CLASSPATH is 
-#                                    added in the beginning of the global
-#                                    classpath. Can be defined, for example,
-#                                    by doing 
-#                                    export HADOOP_USER_CLASSPATH_FIRST=true
-#
-
-this="${BASH_SOURCE-$0}"
-common_bin=$(cd -P -- "$(dirname -- "$this")" && pwd -P)
-script="$(basename -- "$this")"
-this="$common_bin/$script"
-
-[ -f "$common_bin/hadoop-layout.sh" ] && . "$common_bin/hadoop-layout.sh"
-
-HADOOP_COMMON_DIR=${HADOOP_COMMON_DIR:-"share/hadoop/common"}
-HADOOP_COMMON_LIB_JARS_DIR=${HADOOP_COMMON_LIB_JARS_DIR:-"share/hadoop/common/lib"}
-HADOOP_COMMON_LIB_NATIVE_DIR=${HADOOP_COMMON_LIB_NATIVE_DIR:-"lib/native"}
-HDFS_DIR=${HDFS_DIR:-"share/hadoop/hdfs"}
-HDFS_LIB_JARS_DIR=${HDFS_LIB_JARS_DIR:-"share/hadoop/hdfs/lib"}
-YARN_DIR=${YARN_DIR:-"share/hadoop/yarn"}
-YARN_LIB_JARS_DIR=${YARN_LIB_JARS_DIR:-"share/hadoop/yarn/lib"}
-MAPRED_DIR=${MAPRED_DIR:-"share/hadoop/mapreduce"}
-MAPRED_LIB_JARS_DIR=${MAPRED_LIB_JARS_DIR:-"share/hadoop/mapreduce/lib"}
-
-# the root of the Hadoop installation
-# See HADOOP-6255 for directory structure layout
-HADOOP_DEFAULT_PREFIX=$(cd -P -- "$common_bin"/.. && pwd -P)
-HADOOP_PREFIX=${HADOOP_PREFIX:-$HADOOP_DEFAULT_PREFIX}
-export HADOOP_PREFIX
-
-#check to see if the conf dir is given as an optional argument
-if [ $# -gt 1 ]
-then
-    if [ "--config" = "$1" ]
-	  then
-	      shift
-	      confdir=$1
-	      if [ ! -d "$confdir" ]; then
-                echo "Error: Cannot find configuration directory: $confdir"
-                exit 1
-             fi
-	      shift
-	      HADOOP_CONF_DIR=$confdir
-    fi
-fi
- 
-# Allow alternate conf dir location.
-if [ -e "${HADOOP_PREFIX}/conf/hadoop-env.sh" ]; then
-  DEFAULT_CONF_DIR="conf"
-else
-  DEFAULT_CONF_DIR="etc/hadoop"
-fi
-
-export HADOOP_CONF_DIR="${HADOOP_CONF_DIR:-$HADOOP_PREFIX/$DEFAULT_CONF_DIR}"
+# after doing more config, caller should also exec finalize
+# function to finish last minute/default configs for
+# settings that might be different between daemons & interactive
 
 
-if [ -f "${HADOOP_CONF_DIR}/hadoop-env.sh" ]; then
-  . "${HADOOP_CONF_DIR}/hadoop-env.sh"
-fi
-
-# User can specify hostnames or a file where the hostnames are (not both)
-if [[ ( "$HADOOP_SLAVES" != '' ) && ( "$HADOOP_SLAVE_NAMES" != '' ) ]] ; then
-  echo \
-    "Error: Please specify one variable HADOOP_SLAVES or " \
-    "HADOOP_SLAVE_NAME and not both."
+# you must be this high to ride the ride
+if [[ -z "${BASH_VERSINFO}" ]] || [[ "${BASH_VERSINFO}" -lt 3 ]]; then
+  echo "Hadoop requires bash v3 or better. Sorry."
   exit 1
   exit 1
 fi
 fi
 
 
-# Process command line options that specify hosts or file with host
-# list
-if [ $# -gt 1 ]
-then
-    if [ "--hosts" = "$1" ]
-    then
-        shift
-        export HADOOP_SLAVES="${HADOOP_CONF_DIR}/$1"
-        shift
-    elif [ "--hostnames" = "$1" ]
-    then
-        shift
-        export HADOOP_SLAVE_NAMES=$1
-        shift
-    fi
-fi
+# In order to get partially bootstrapped, we need to figure out where
+# we are located. Chances are good that our caller has already done
+# this work for us, but just in case...
 
 
-# User can specify hostnames or a file where the hostnames are (not both)
-# (same check as above but now we know it's command line options that cause
-# the problem)
-if [[ ( "$HADOOP_SLAVES" != '' ) && ( "$HADOOP_SLAVE_NAMES" != '' ) ]] ; then
-  echo \
-    "Error: Please specify one of --hosts or --hostnames options and not both."
-  exit 1
+if [[ -z "${HADOOP_LIBEXEC_DIR}" ]]; then
+  _hadoop_common_this="${BASH_SOURCE-$0}"
+  HADOOP_LIBEXEC_DIR=$(cd -P -- "$(dirname -- "${_hadoop_common_this}")" >/dev/null && pwd -P)
 fi
 fi
 
 
-# check if net.ipv6.bindv6only is set to 1
-bindv6only=$(/sbin/sysctl -n net.ipv6.bindv6only 2> /dev/null)
-if [ -n "$bindv6only" ] && [ "$bindv6only" -eq "1" ] && [ "$HADOOP_ALLOW_IPV6" != "yes" ]
-then
-  echo "Error: \"net.ipv6.bindv6only\" is set to 1 - Java networking could be broken"
-  echo "For more info: http://wiki.apache.org/hadoop/HadoopIPv6"
+# get our functions defined for usage later
+if [[ -n "${HADOOP_COMMON_HOME}" ]] && 
+   [[ -e "${HADOOP_COMMON_HOME}/libexec/hadoop-functions.sh" ]]; then
+  . "${HADOOP_COMMON_HOME}/libexec/hadoop-functions.sh"
+elif [[ -e "${HADOOP_LIBEXEC_DIR}/hadoop-functions.sh" ]]; then
+  . "${HADOOP_LIBEXEC_DIR}/hadoop-functions.sh"
+else
+  echo "ERROR: Unable to exec ${HADOOP_LIBEXEC_DIR}/hadoop-functions.sh." 1>&2
   exit 1
   exit 1
 fi
 fi
 
 
-# Newer versions of glibc use an arena memory allocator that causes virtual
-# memory usage to explode. This interacts badly with the many threads that
-# we use in Hadoop. Tune the variable down to prevent vmem explosion.
-export MALLOC_ARENA_MAX=${MALLOC_ARENA_MAX:-4}
-
-# Attempt to set JAVA_HOME if it is not set
-if [[ -z $JAVA_HOME ]]; then
-  # On OSX use java_home (or /Library for older versions)
-  if [ "Darwin" == "$(uname -s)" ]; then
-    if [ -x /usr/libexec/java_home ]; then
-      export JAVA_HOME=($(/usr/libexec/java_home))
-    else
-      export JAVA_HOME=(/Library/Java/Home)
-    fi
-  fi
-
-  # Bail if we did not detect it
-  if [[ -z $JAVA_HOME ]]; then
-    echo "Error: JAVA_HOME is not set and could not be found." 1>&2
-    exit 1
-  fi
-fi
-
-JAVA=$JAVA_HOME/bin/java
-# some Java parameters
-JAVA_HEAP_MAX=-Xmx1000m 
-
-# check envvars which might override default args
-if [ "$HADOOP_HEAPSIZE" != "" ]; then
-  #echo "run with heapsize $HADOOP_HEAPSIZE"
-  JAVA_HEAP_MAX="-Xmx""$HADOOP_HEAPSIZE""m"
-  #echo $JAVA_HEAP_MAX
-fi
-
-# CLASSPATH initially contains $HADOOP_CONF_DIR
-CLASSPATH="${HADOOP_CONF_DIR}"
-
-# so that filenames w/ spaces are handled correctly in loops below
-IFS=
-
-if [ "$HADOOP_COMMON_HOME" = "" ]; then
-  if [ -d "${HADOOP_PREFIX}/$HADOOP_COMMON_DIR" ]; then
-    export HADOOP_COMMON_HOME=$HADOOP_PREFIX
-  fi
-fi
-
-# for releases, add core hadoop jar & webapps to CLASSPATH
-if [ -d "$HADOOP_COMMON_HOME/$HADOOP_COMMON_DIR/webapps" ]; then
-  CLASSPATH=${CLASSPATH}:$HADOOP_COMMON_HOME/$HADOOP_COMMON_DIR
+# allow overrides of the above and pre-defines of the below
+if [[ -n "${HADOOP_COMMON_HOME}" ]] &&
+   [[ -e "${HADOOP_COMMON_HOME}/libexec/hadoop-layout.sh" ]]; then
+  . "${HADOOP_COMMON_HOME}/libexec/hadoop-layout.sh"
+elif [[ -e "${HADOOP_LIBEXEC_DIR}/hadoop-layout.sh" ]]; then
+  . "${HADOOP_LIBEXEC_DIR}/hadoop-layout.sh"
 fi
 fi
 
 
-if [ -d "$HADOOP_COMMON_HOME/$HADOOP_COMMON_LIB_JARS_DIR" ]; then
-  CLASSPATH=${CLASSPATH}:$HADOOP_COMMON_HOME/$HADOOP_COMMON_LIB_JARS_DIR'/*'
-fi
-
-CLASSPATH=${CLASSPATH}:$HADOOP_COMMON_HOME/$HADOOP_COMMON_DIR'/*'
-
-# default log directory & file
-if [ "$HADOOP_LOG_DIR" = "" ]; then
-  HADOOP_LOG_DIR="$HADOOP_PREFIX/logs"
-fi
-if [ "$HADOOP_LOGFILE" = "" ]; then
-  HADOOP_LOGFILE='hadoop.log'
-fi
-
-# default policy file for service-level authorization
-if [ "$HADOOP_POLICYFILE" = "" ]; then
-  HADOOP_POLICYFILE="hadoop-policy.xml"
-fi
-
-# restore ordinary behaviour
-unset IFS
-
-# setup 'java.library.path' for native-hadoop code if necessary
-
-if [ -d "${HADOOP_PREFIX}/build/native" -o -d "${HADOOP_PREFIX}/$HADOOP_COMMON_LIB_NATIVE_DIR" ]; then
-    
-  if [ -d "${HADOOP_PREFIX}/$HADOOP_COMMON_LIB_NATIVE_DIR" ]; then
-    if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then
-      JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:${HADOOP_PREFIX}/$HADOOP_COMMON_LIB_NATIVE_DIR
-    else
-      JAVA_LIBRARY_PATH=${HADOOP_PREFIX}/$HADOOP_COMMON_LIB_NATIVE_DIR
-    fi
-  fi
-fi
-
-# setup a default TOOL_PATH
-TOOL_PATH="${TOOL_PATH:-$HADOOP_PREFIX/share/hadoop/tools/lib/*}"
-
-HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.log.dir=$HADOOP_LOG_DIR"
-HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.log.file=$HADOOP_LOGFILE"
-HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.home.dir=$HADOOP_PREFIX"
-HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.id.str=$HADOOP_IDENT_STRING"
-HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.root.logger=${HADOOP_ROOT_LOGGER:-INFO,console}"
-if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then
-  HADOOP_OPTS="$HADOOP_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH"
-  export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$JAVA_LIBRARY_PATH
-fi  
-HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.policy.file=$HADOOP_POLICYFILE"
-
-# Disable ipv6 as it can cause issues
-HADOOP_OPTS="$HADOOP_OPTS -Djava.net.preferIPv4Stack=true"
-
-# put hdfs in classpath if present
-if [ "$HADOOP_HDFS_HOME" = "" ]; then
-  if [ -d "${HADOOP_PREFIX}/$HDFS_DIR" ]; then
-    export HADOOP_HDFS_HOME=$HADOOP_PREFIX
-  fi
-fi
-
-if [ -d "$HADOOP_HDFS_HOME/$HDFS_DIR/webapps" ]; then
-  CLASSPATH=${CLASSPATH}:$HADOOP_HDFS_HOME/$HDFS_DIR
-fi
+#
+# IMPORTANT! We are not executing user provided code yet!
+#
 
 
-if [ -d "$HADOOP_HDFS_HOME/$HDFS_LIB_JARS_DIR" ]; then
-  CLASSPATH=${CLASSPATH}:$HADOOP_HDFS_HOME/$HDFS_LIB_JARS_DIR'/*'
-fi
+# Let's go!  Base definitions so we can move forward
+hadoop_bootstrap_init
 
 
-CLASSPATH=${CLASSPATH}:$HADOOP_HDFS_HOME/$HDFS_DIR'/*'
+# let's find our conf.
+#
+# first, check and process params passed to us
+# we process this in-line so that we can directly modify $@
+# if something downstream is processing that directly,
+# we need to make sure our params have been ripped out
+# note that we do many of them here for various utilities.
+# this provides consistency and forces a more consistent
+# user experience
+
+
+# save these off in case our caller needs them
+# shellcheck disable=SC2034
+HADOOP_USER_PARAMS="$@"
+
+HADOOP_DAEMON_MODE="default"
+
+while [[ -z "${_hadoop_common_done}" ]]; do
+  case $1 in
+    --buildpaths)
+      # shellcheck disable=SC2034
+      HADOOP_ENABLE_BUILD_PATHS=true
+      shift
+    ;;
+    --config)
+      shift
+      confdir=$1
+      shift
+      if [[ -d "${confdir}" ]]; then
+        # shellcheck disable=SC2034
+        YARN_CONF_DIR="${confdir}"
+        # shellcheck disable=SC2034
+        HADOOP_CONF_DIR="${confdir}"
+      elif [[ -z "${confdir}" ]]; then
+        hadoop_error "ERROR: No parameter provided for --config "
+        hadoop_exit_with_usage 1
+      else
+        hadoop_error "ERROR: Cannot find configuration directory \"${confdir}\""
+        hadoop_exit_with_usage 1
+      fi
+    ;;
+    --daemon)
+      shift
+      HADOOP_DAEMON_MODE=$1
+      shift
+      if [[ -z "${HADOOP_DAEMON_MODE}" || \
+        ! "${HADOOP_DAEMON_MODE}" =~ ^st(art|op|atus)$ ]]; then
+        hadoop_error "ERROR: --daemon must be followed by either \"start\", \"stop\", or \"status\"."
+        hadoop_exit_with_usage 1
+      fi
+    ;;
+    --debug)
+      shift
+      # shellcheck disable=SC2034
+      HADOOP_SHELL_SCRIPT_DEBUG=true
+    ;; 
+    --help|-help|-h|help|--h|--\?|-\?|\?)
+      hadoop_exit_with_usage 0
+    ;;
+    --hostnames)
+      shift
+      # shellcheck disable=SC2034
+      HADOOP_SLAVE_NAMES="$1"
+      shift
+    ;;
+    --hosts)
+      shift
+      hadoop_populate_slaves_file "$1"
+      shift
+    ;;
+    *)
+      _hadoop_common_done=true
+    ;;
+  esac
+done
+
+hadoop_find_confdir
+hadoop_exec_hadoopenv
 
 
-# put yarn in classpath if present
-if [ "$HADOOP_YARN_HOME" = "" ]; then
-  if [ -d "${HADOOP_PREFIX}/$YARN_DIR" ]; then
-    export HADOOP_YARN_HOME=$HADOOP_PREFIX
-  fi
-fi
+#
+# IMPORTANT! User provided code is now available!
+#
 
 
-if [ -d "$HADOOP_YARN_HOME/$YARN_DIR/webapps" ]; then
-  CLASSPATH=${CLASSPATH}:$HADOOP_YARN_HOME/$YARN_DIR
-fi
+# do all the OS-specific startup bits here
+# this allows us to get a decent JAVA_HOME,
+# call crle for LD_LIBRARY_PATH, etc.
+hadoop_os_tricks
 
 
-if [ -d "$HADOOP_YARN_HOME/$YARN_LIB_JARS_DIR" ]; then
-  CLASSPATH=${CLASSPATH}:$HADOOP_YARN_HOME/$YARN_LIB_JARS_DIR'/*'
-fi
+hadoop_java_setup
 
 
-CLASSPATH=${CLASSPATH}:$HADOOP_YARN_HOME/$YARN_DIR'/*'
+hadoop_basic_init
 
 
-# put mapred in classpath if present AND different from YARN
-if [ "$HADOOP_MAPRED_HOME" = "" ]; then
-  if [ -d "${HADOOP_PREFIX}/$MAPRED_DIR" ]; then
-    export HADOOP_MAPRED_HOME=$HADOOP_PREFIX
-  fi
+# inject any sub-project overrides, defaults, etc.
+if declare -F hadoop_subproject_init >/dev/null ; then
+  hadoop_subproject_init
 fi
 fi
 
 
-if [ "$HADOOP_MAPRED_HOME/$MAPRED_DIR" != "$HADOOP_YARN_HOME/$YARN_DIR" ] ; then
-  if [ -d "$HADOOP_MAPRED_HOME/$MAPRED_DIR/webapps" ]; then
-    CLASSPATH=${CLASSPATH}:$HADOOP_MAPRED_HOME/$MAPRED_DIR
-  fi
+# get the native libs in there pretty quick
+hadoop_add_javalibpath "${HADOOP_PREFIX}/build/native"
+hadoop_add_javalibpath "${HADOOP_PREFIX}/${HADOOP_COMMON_LIB_NATIVE_DIR}"
 
 
-  if [ -d "$HADOOP_MAPRED_HOME/$MAPRED_LIB_JARS_DIR" ]; then
-    CLASSPATH=${CLASSPATH}:$HADOOP_MAPRED_HOME/$MAPRED_LIB_JARS_DIR'/*'
-  fi
-
-  CLASSPATH=${CLASSPATH}:$HADOOP_MAPRED_HOME/$MAPRED_DIR'/*'
-fi
+# get the basic java class path for these subprojects
+# in as quickly as possible since other stuff
+# will definitely depend upon it.
+#
+# at some point, this will get replaced with something pluggable
+# so that these functions can sit in their projects rather than
+# common
+#
+for i in common hdfs yarn mapred
+do
+  hadoop_add_to_classpath_$i
+done
 
 
-# Add the user-specified CLASSPATH via HADOOP_CLASSPATH
-# Add it first or last depending on if user has
-# set env-var HADOOP_USER_CLASSPATH_FIRST
-if [ "$HADOOP_CLASSPATH" != "" ]; then
-  # Prefix it if its to be preceded
-  if [ "$HADOOP_USER_CLASSPATH_FIRST" != "" ]; then
-    CLASSPATH=${HADOOP_CLASSPATH}:${CLASSPATH}
-  else
-    CLASSPATH=${CLASSPATH}:${HADOOP_CLASSPATH}
-  fi
+#
+# backwards compatibility. new stuff should
+# call this when they are ready
+#
+if [[ -z "${HADOOP_NEW_CONFIG}" ]]; then
+  hadoop_finalize
 fi
 fi

+ 28 - 186
hadoop-common-project/hadoop-common/src/main/bin/hadoop-daemon.sh

@@ -15,200 +15,42 @@
 # See the License for the specific language governing permissions and
 # See the License for the specific language governing permissions and
 # limitations under the License.
 # limitations under the License.
 
 
-
-# Runs a Hadoop command as a daemon.
-#
-# Environment Variables
-#
-#   HADOOP_CONF_DIR  Alternate conf dir. Default is ${HADOOP_PREFIX}/conf.
-#   HADOOP_LOG_DIR   Where log files are stored.  PWD by default.
-#   HADOOP_MASTER    host:path where hadoop code should be rsync'd from
-#   HADOOP_PID_DIR   The pid files are stored. /tmp by default.
-#   HADOOP_IDENT_STRING   A string representing this instance of hadoop. $USER by default
-#   HADOOP_NICENESS The scheduling priority for daemons. Defaults to 0.
-##
-
-usage="Usage: hadoop-daemon.sh [--config <conf-dir>] [--hosts hostlistfile] [--script script] (start|stop) <hadoop-command> <args...>"
-
-# if no args specified, show usage
-if [ $# -le 1 ]; then
-  echo $usage
-  exit 1
-fi
-
-bin=`dirname "${BASH_SOURCE-$0}"`
-bin=`cd "$bin"; pwd`
-
-DEFAULT_LIBEXEC_DIR="$bin"/../libexec
-HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
-. $HADOOP_LIBEXEC_DIR/hadoop-config.sh
-
-# get arguments
-
-#default value
-hadoopScript="$HADOOP_PREFIX"/bin/hadoop
-if [ "--script" = "$1" ]
-  then
-    shift
-    hadoopScript=$1
-    shift
-fi
-startStop=$1
-shift
-command=$1
-shift
-
-hadoop_rotate_log ()
+function hadoop_usage
 {
 {
-    log=$1;
-    num=5;
-    if [ -n "$2" ]; then
-	num=$2
-    fi
-    if [ -f "$log" ]; then # rotate logs
-	while [ $num -gt 1 ]; do
-	    prev=`expr $num - 1`
-	    [ -f "$log.$prev" ] && mv "$log.$prev" "$log.$num"
-	    num=$prev
-	done
-	mv "$log" "$log.$num";
-    fi
+  echo "Usage: hadoop-daemon.sh [--config confdir] (start|stop|status) <hadoop-command> <args...>"
 }
 }
 
 
-if [ -f "${HADOOP_CONF_DIR}/hadoop-env.sh" ]; then
-  . "${HADOOP_CONF_DIR}/hadoop-env.sh"
-fi
-
-# Determine if we're starting a secure datanode, and if so, redefine appropriate variables
-if [ "$command" == "datanode" ] && [ "$EUID" -eq 0 ] && [ -n "$HADOOP_SECURE_DN_USER" ]; then
-  export HADOOP_PID_DIR=$HADOOP_SECURE_DN_PID_DIR
-  export HADOOP_LOG_DIR=$HADOOP_SECURE_DN_LOG_DIR
-  export HADOOP_IDENT_STRING=$HADOOP_SECURE_DN_USER
-  starting_secure_dn="true"
-fi
-
-#Determine if we're starting a privileged NFS, if so, redefine the appropriate variables
-if [ "$command" == "nfs3" ] && [ "$EUID" -eq 0 ] && [ -n "$HADOOP_PRIVILEGED_NFS_USER" ]; then
-    export HADOOP_PID_DIR=$HADOOP_PRIVILEGED_NFS_PID_DIR
-    export HADOOP_LOG_DIR=$HADOOP_PRIVILEGED_NFS_LOG_DIR
-    export HADOOP_IDENT_STRING=$HADOOP_PRIVILEGED_NFS_USER
-    starting_privileged_nfs="true"
-fi
-
-if [ "$HADOOP_IDENT_STRING" = "" ]; then
-  export HADOOP_IDENT_STRING="$USER"
-fi
-
-
-# get log directory
-if [ "$HADOOP_LOG_DIR" = "" ]; then
-  export HADOOP_LOG_DIR="$HADOOP_PREFIX/logs"
-fi
-
-if [ ! -w "$HADOOP_LOG_DIR" ] ; then
-  mkdir -p "$HADOOP_LOG_DIR"
-  chown $HADOOP_IDENT_STRING $HADOOP_LOG_DIR
+# let's locate libexec...
+if [[ -n "${HADOOP_PREFIX}" ]]; then
+  DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
+else
+  this="${BASH_SOURCE-$0}"
+  bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
+  DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
+fi
+
+HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}"
+# shellcheck disable=SC2034
+HADOOP_NEW_CONFIG=true
+if [[ -f "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh" ]]; then
+  . "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh"
+else
+  echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/hdfs-config.sh." 2>&1
+  exit 1
 fi
 fi
 
 
-if [ "$HADOOP_PID_DIR" = "" ]; then
-  HADOOP_PID_DIR=/tmp
+if [[ $# = 0 ]]; then
+  hadoop_exit_with_usage 1
 fi
 fi
 
 
-# some variables
-export HADOOP_LOGFILE=hadoop-$HADOOP_IDENT_STRING-$command-$HOSTNAME.log
-export HADOOP_ROOT_LOGGER=${HADOOP_ROOT_LOGGER:-"INFO,RFA"}
-export HADOOP_SECURITY_LOGGER=${HADOOP_SECURITY_LOGGER:-"INFO,RFAS"}
-export HDFS_AUDIT_LOGGER=${HDFS_AUDIT_LOGGER:-"INFO,NullAppender"}
-log=$HADOOP_LOG_DIR/hadoop-$HADOOP_IDENT_STRING-$command-$HOSTNAME.out
-pid=$HADOOP_PID_DIR/hadoop-$HADOOP_IDENT_STRING-$command.pid
-HADOOP_STOP_TIMEOUT=${HADOOP_STOP_TIMEOUT:-5}
+daemonmode=$1
+shift
 
 
-# Set default scheduling priority
-if [ "$HADOOP_NICENESS" = "" ]; then
-    export HADOOP_NICENESS=0
+if [[ -z "${HADOOP_HDFS_HOME}" ]]; then
+  hdfsscript="${HADOOP_PREFIX}/bin/hdfs"
+else
+  hdfsscript="${HADOOP_HDFS_HOME}/bin/hdfs"
 fi
 fi
 
 
-case $startStop in
-
-  (start)
-
-    [ -w "$HADOOP_PID_DIR" ] ||  mkdir -p "$HADOOP_PID_DIR"
-
-    if [ -f $pid ]; then
-      if kill -0 `cat $pid` > /dev/null 2>&1; then
-        echo $command running as process `cat $pid`.  Stop it first.
-        exit 1
-      fi
-    fi
-
-    if [ "$HADOOP_MASTER" != "" ]; then
-      echo rsync from $HADOOP_MASTER
-      rsync -a -e ssh --delete --exclude=.svn --exclude='logs/*' --exclude='contrib/hod/logs/*' $HADOOP_MASTER/ "$HADOOP_PREFIX"
-    fi
-
-    hadoop_rotate_log $log
-    echo starting $command, logging to $log
-    cd "$HADOOP_PREFIX"
-    case $command in
-      namenode|secondarynamenode|datanode|journalnode|dfs|dfsadmin|fsck|balancer|zkfc)
-        if [ -z "$HADOOP_HDFS_HOME" ]; then
-          hdfsScript="$HADOOP_PREFIX"/bin/hdfs
-        else
-          hdfsScript="$HADOOP_HDFS_HOME"/bin/hdfs
-        fi
-        nohup nice -n $HADOOP_NICENESS $hdfsScript --config $HADOOP_CONF_DIR $command "$@" > "$log" 2>&1 < /dev/null &
-      ;;
-      (*)
-        nohup nice -n $HADOOP_NICENESS $hadoopScript --config $HADOOP_CONF_DIR $command "$@" > "$log" 2>&1 < /dev/null &
-      ;;
-    esac
-    echo $! > $pid
-    sleep 1
-    head "$log"
-    # capture the ulimit output
-    if [ "true" = "$starting_secure_dn" ]; then
-      echo "ulimit -a for secure datanode user $HADOOP_SECURE_DN_USER" >> $log
-      # capture the ulimit info for the appropriate user
-      su --shell=/bin/bash $HADOOP_SECURE_DN_USER -c 'ulimit -a' >> $log 2>&1
-    elif [ "true" = "$starting_privileged_nfs" ]; then
-        echo "ulimit -a for privileged nfs user $HADOOP_PRIVILEGED_NFS_USER" >> $log
-        su --shell=/bin/bash $HADOOP_PRIVILEGED_NFS_USER -c 'ulimit -a' >> $log 2>&1
-    else
-      echo "ulimit -a for user $USER" >> $log
-      ulimit -a >> $log 2>&1
-    fi
-    sleep 3;
-    if ! ps -p $! > /dev/null ; then
-      exit 1
-    fi
-    ;;
-          
-  (stop)
-
-    if [ -f $pid ]; then
-      TARGET_PID=`cat $pid`
-      if kill -0 $TARGET_PID > /dev/null 2>&1; then
-        echo stopping $command
-        kill $TARGET_PID
-        sleep $HADOOP_STOP_TIMEOUT
-        if kill -0 $TARGET_PID > /dev/null 2>&1; then
-          echo "$command did not stop gracefully after $HADOOP_STOP_TIMEOUT seconds: killing with kill -9"
-          kill -9 $TARGET_PID
-        fi
-      else
-        echo no $command to stop
-      fi
-      rm -f $pid
-    else
-      echo no $command to stop
-    fi
-    ;;
-
-  (*)
-    echo $usage
-    exit 1
-    ;;
-
-esac
-
+exec "$hdfsscript" --config "${HADOOP_CONF_DIR}" --daemon "${daemonmode}" "$@"
 
 

+ 26 - 11
hadoop-common-project/hadoop-common/src/main/bin/hadoop-daemons.sh

@@ -18,19 +18,34 @@
 
 
 # Run a Hadoop command on all slave hosts.
 # Run a Hadoop command on all slave hosts.
 
 
-usage="Usage: hadoop-daemons.sh [--config confdir] [--hosts hostlistfile] [start|stop] command args..."
+function hadoop_usage
+{
+  echo "Usage: hadoop-daemons.sh [--config confdir] [--hosts hostlistfile] (start|stop|status) <hadoop-command> <args...>"
+}
+
+this="${BASH_SOURCE-$0}"
+bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
+
+# let's locate libexec...
+if [[ -n "${HADOOP_PREFIX}" ]]; then
+  DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
+else
+  DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
+fi
 
 
-# if no args specified, show usage
-if [ $# -le 1 ]; then
-  echo $usage
+HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}"
+# shellcheck disable=SC2034
+HADOOP_NEW_CONFIG=true
+if [[ -f "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh" ]]; then
+  . "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh"
+else
+  echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/hadoop-config.sh." 2>&1
   exit 1
   exit 1
 fi
 fi
 
 
-bin=`dirname "${BASH_SOURCE-$0}"`
-bin=`cd "$bin"; pwd`
-
-DEFAULT_LIBEXEC_DIR="$bin"/../libexec
-HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
-. $HADOOP_LIBEXEC_DIR/hadoop-config.sh
+if [[ $# = 0 ]]; then
+  hadoop_exit_with_usage 1
+fi
 
 
-exec "$bin/slaves.sh" --config $HADOOP_CONF_DIR cd "$HADOOP_PREFIX" \; "$bin/hadoop-daemon.sh" --config $HADOOP_CONF_DIR "$@"
+hadoop_connect_to_hosts "${bin}/hadoop-daemon.sh" \
+--config "${HADOOP_CONF_DIR}" "$@"

+ 1066 - 0
hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh

@@ -0,0 +1,1066 @@
+#!/bin/bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+function hadoop_error
+{
+  # NOTE: This function is not user replaceable.
+
+  echo "$*" 1>&2
+}
+
+function hadoop_debug
+{
+  if [[ -n "${HADOOP_SHELL_SCRIPT_DEBUG}" ]]; then
+    echo "DEBUG: $*" 1>&2
+  fi
+}
+
+function hadoop_bootstrap_init
+{
+  # NOTE: This function is not user replaceable.
+
+  # the root of the Hadoop installation
+  # See HADOOP-6255 for the expected directory structure layout
+  
+  # By now, HADOOP_LIBEXEC_DIR should have been defined upstream
+  # We can piggyback off of that to figure out where the default
+  # HADOOP_FREFIX should be.  This allows us to run without
+  # HADOOP_PREFIX ever being defined by a human! As a consequence
+  # HADOOP_LIBEXEC_DIR now becomes perhaps the single most powerful
+  # env var within Hadoop.
+  if [[ -z "${HADOOP_LIBEXEC_DIR}" ]]; then
+    hadoop_error "HADOOP_LIBEXEC_DIR is not defined.  Exiting."
+    exit 1
+  fi
+  HADOOP_DEFAULT_PREFIX=$(cd -P -- "${HADOOP_LIBEXEC_DIR}/.." >/dev/null && pwd -P)
+  HADOOP_PREFIX=${HADOOP_PREFIX:-$HADOOP_DEFAULT_PREFIX}
+  export HADOOP_PREFIX
+  
+  #
+  # short-cuts. vendors may redefine these as well, preferably
+  # in hadoop-layouts.sh
+  #
+  HADOOP_COMMON_DIR=${HADOOP_COMMON_DIR:-"share/hadoop/common"}
+  HADOOP_COMMON_LIB_JARS_DIR=${HADOOP_COMMON_LIB_JARS_DIR:-"share/hadoop/common/lib"}
+  HADOOP_COMMON_LIB_NATIVE_DIR=${HADOOP_COMMON_LIB_NATIVE_DIR:-"lib/native"}
+  HDFS_DIR=${HDFS_DIR:-"share/hadoop/hdfs"}
+  HDFS_LIB_JARS_DIR=${HDFS_LIB_JARS_DIR:-"share/hadoop/hdfs/lib"}
+  YARN_DIR=${YARN_DIR:-"share/hadoop/yarn"}
+  YARN_LIB_JARS_DIR=${YARN_LIB_JARS_DIR:-"share/hadoop/yarn/lib"}
+  MAPRED_DIR=${MAPRED_DIR:-"share/hadoop/mapreduce"}
+  MAPRED_LIB_JARS_DIR=${MAPRED_LIB_JARS_DIR:-"share/hadoop/mapreduce/lib"}
+  # setup a default TOOL_PATH
+  TOOL_PATH=${TOOL_PATH:-${HADOOP_PREFIX}/share/hadoop/tools/lib/*}
+
+  export HADOOP_OS_TYPE=${HADOOP_OS_TYPE:-$(uname -s)}
+ 
+  # defaults
+  export HADOOP_OPTS=${HADOOP_OPTS:-"-Djava.net.preferIPv4Stack=true"}
+  hadoop_debug "Initial HADOOP_OPTS=${HADOOP_OPTS}"
+}
+
+function hadoop_find_confdir
+{
+  # NOTE: This function is not user replaceable.
+
+  local conf_dir
+  # Look for the basic hadoop configuration area.
+  #
+  #
+  # An attempt at compatibility with some Hadoop 1.x
+  # installs.
+  if [[ -e "${HADOOP_PREFIX}/conf/hadoop-env.sh" ]]; then
+    conf_dir="conf"
+  else
+    conf_dir="etc/hadoop"
+  fi
+  export HADOOP_CONF_DIR="${HADOOP_CONF_DIR:-${HADOOP_PREFIX}/${conf_dir}}"
+
+  hadoop_debug "HADOOP_CONF_DIR=${HADOOP_CONF_DIR}"
+}
+
+function hadoop_exec_hadoopenv
+{
+  # NOTE: This function is not user replaceable.
+
+  if [[ -z "${HADOOP_ENV_PROCESSED}" ]]; then
+    if [[ -f "${HADOOP_CONF_DIR}/hadoop-env.sh" ]]; then
+      export HADOOP_ENV_PROCESSED=true
+      . "${HADOOP_CONF_DIR}/hadoop-env.sh"
+    fi
+  fi
+}
+
+function hadoop_basic_init
+{
+  # Some of these are also set in hadoop-env.sh.
+  # we still set them here just in case hadoop-env.sh is
+  # broken in some way, set up defaults, etc.
+  #
+  # but it is important to note that if you update these
+  # you also need to update hadoop-env.sh as well!!!
+  
+  # CLASSPATH initially contains $HADOOP_CONF_DIR
+  CLASSPATH="${HADOOP_CONF_DIR}"
+  hadoop_debug "Initial CLASSPATH=${HADOOP_CONF_DIR}"
+  
+  if [[ -z "${HADOOP_COMMON_HOME}" ]] &&
+  [[ -d "${HADOOP_PREFIX}/${HADOOP_COMMON_DIR}" ]]; then
+    export HADOOP_COMMON_HOME="${HADOOP_PREFIX}"
+  fi
+  
+  # default policy file for service-level authorization
+  HADOOP_POLICYFILE=${HADOOP_POLICYFILE:-"hadoop-policy.xml"}
+  
+  # define HADOOP_HDFS_HOME
+  if [[ -z "${HADOOP_HDFS_HOME}" ]] &&
+     [[ -d "${HADOOP_PREFIX}/${HDFS_DIR}" ]]; then
+    export HADOOP_HDFS_HOME="${HADOOP_PREFIX}"
+  fi
+  
+  # define HADOOP_YARN_HOME
+  if [[ -z "${HADOOP_YARN_HOME}" ]] &&
+     [[ -d "${HADOOP_PREFIX}/${YARN_DIR}" ]]; then
+    export HADOOP_YARN_HOME="${HADOOP_PREFIX}"
+  fi
+  
+  # define HADOOP_MAPRED_HOME
+  if [[ -z "${HADOOP_MAPRED_HOME}" ]] &&
+     [[ -d "${HADOOP_PREFIX}/${MAPRED_DIR}" ]]; then
+    export HADOOP_MAPRED_HOME="${HADOOP_PREFIX}"
+  fi
+  
+  HADOOP_IDENT_STRING=${HADOP_IDENT_STRING:-$USER}
+  HADOOP_LOG_DIR=${HADOOP_LOG_DIR:-"${HADOOP_PREFIX}/logs"}
+  HADOOP_LOGFILE=${HADOOP_LOGFILE:-hadoop.log}
+  HADOOP_NICENESS=${HADOOP_NICENESS:-0}
+  HADOOP_STOP_TIMEOUT=${HADOOP_STOP_TIMEOUT:-5}
+  HADOOP_PID_DIR=${HADOOP_PID_DIR:-/tmp}
+  HADOOP_ROOT_LOGGER=${HADOOP_ROOT_LOGGER:-INFO,console}
+  HADOOP_DAEMON_ROOT_LOGGER=${HADOOP_DAEMON_ROOT_LOGGER:-INFO,RFA}
+  HADOOP_SECURITY_LOGGER=${HADOOP_SECURITY_LOGGER:-INFO,NullAppender}
+  HADOOP_HEAPSIZE=${HADOOP_HEAPSIZE:-1024}
+  HADOOP_SSH_OPTS=${HADOOP_SSH_OPTS:-"-o BatchMode=yes -o StrictHostKeyChecking=no -o ConnectTimeout=10s"}
+  HADOOP_SECURE_LOG_DIR=${HADOOP_SECURE_LOG_DIR:-${HADOOP_LOG_DIR}}
+  HADOOP_SECURE_PID_DIR=${HADOOP_SECURE_PID_DIR:-${HADOOP_PID_DIR}}
+  HADOOP_SSH_PARALLEL=${HADOOP_SSH_PARALLEL:-10}
+}
+
+function hadoop_populate_slaves_file()
+{
+  # NOTE: This function is not user replaceable.
+
+  local slavesfile=$1
+  shift
+  if [[ -f "${slavesfile}" ]]; then
+    # shellcheck disable=2034
+    HADOOP_SLAVES="${slavesfile}"
+  elif [[ -f "${HADOOP_CONF_DIR}/${slavesfile}" ]]; then
+    # shellcheck disable=2034
+    HADOOP_SLAVES="${HADOOP_CONF_DIR}/${slavesfile}"
+    # shellcheck disable=2034
+    YARN_SLAVES="${HADOOP_CONF_DIR}/${slavesfile}"
+  else
+    hadoop_error "ERROR: Cannot find hosts file \"${slavesfile}\""
+    hadoop_exit_with_usage 1
+  fi
+}
+
+function hadoop_rotate_log
+{
+  #
+  # log rotation (mainly used for .out files)
+  # Users are likely to replace this one for something
+  # that gzips or uses dates or who knows what.
+  #
+  # be aware that &1 and &2 might go through here
+  # so don't do anything too crazy...
+  #
+  local log=$1;
+  local num=${2:-5};
+  
+  if [[ -f "${log}" ]]; then # rotate logs
+    while [[ ${num} -gt 1 ]]; do
+      #shellcheck disable=SC2086
+      let prev=${num}-1
+      if [[ -f "${log}.${prev}" ]]; then
+        mv "${log}.${prev}" "${log}.${num}"
+      fi
+      num=${prev}
+    done
+    mv "${log}" "${log}.${num}"
+  fi
+}
+
+function hadoop_actual_ssh
+{
+  # we are passing this function to xargs
+  # should get hostname followed by rest of command line
+  local slave=$1
+  shift
+  
+  # shellcheck disable=SC2086
+  ssh ${HADOOP_SSH_OPTS} ${slave} $"${@// /\\ }" 2>&1 | sed "s/^/$slave: /"
+}
+
+function hadoop_connect_to_hosts
+{
+  # shellcheck disable=SC2124
+  local params="$@"
+  
+  #
+  # ssh (or whatever) to a host
+  #
+  # User can specify hostnames or a file where the hostnames are (not both)
+  if [[ -n "${HADOOP_SLAVES}" && -n "${HADOOP_SLAVE_NAMES}" ]] ; then
+    hadoop_error "ERROR: Both HADOOP_SLAVES and HADOOP_SLAVE_NAME were defined. Aborting."
+    exit 1
+  fi
+  
+  if [[ -n "${HADOOP_SLAVE_NAMES}" ]] ; then
+    SLAVE_NAMES=${HADOOP_SLAVE_NAMES}
+  else
+    SLAVE_FILE=${HADOOP_SLAVES:-${HADOOP_CONF_DIR}/slaves}
+  fi
+  
+  # if pdsh is available, let's use it.  otherwise default
+  # to a loop around ssh.  (ugh)
+  if [[ -e '/usr/bin/pdsh' ]]; then
+    if [[ -z "${HADOOP_SLAVE_NAMES}" ]] ; then
+      # if we were given a file, just let pdsh deal with it.
+      # shellcheck disable=SC2086
+      PDSH_SSH_ARGS_APPEND="${HADOOP_SSH_OPTS}" pdsh \
+      -f "${HADOOP_SSH_PARALLEL}" -w ^"${SLAVE_FILE}" $"${@// /\\ }" 2>&1
+    else
+      # no spaces allowed in the pdsh arg host list
+      # shellcheck disable=SC2086
+      SLAVE_NAMES=$(echo ${SLAVE_NAMES} | tr -s ' ' ,)
+      PDSH_SSH_ARGS_APPEND="${HADOOP_SSH_OPTS}" pdsh \
+      -f "${HADOOP_SSH_PARALLEL}" -w "${SLAVE_NAMES}" $"${@// /\\ }" 2>&1
+    fi
+  else
+    if [[ -z "${SLAVE_NAMES}" ]]; then
+      SLAVE_NAMES=$(sed 's/#.*$//;/^$/d' "${SLAVE_FILE}")
+    fi
+    
+    # quoting here gets tricky. it's easier to push it into a function
+    # so that we don't have to deal with it. However...
+    # xargs can't use a function so instead we'll export it out
+    # and force it into a subshell
+    # moral of the story: just use pdsh.
+    export -f hadoop_actual_ssh
+    export HADOOP_SSH_OPTS
+    echo "${SLAVE_NAMES}" | \
+    xargs -n 1 -P"${HADOOP_SSH_PARALLEL}" \
+    -I {} bash -c --  "hadoop_actual_ssh {} ${params}"
+    wait
+  fi
+}
+
+function hadoop_add_param
+{
+  #
+  # general param dedupe..
+  # $1 is what we are adding to
+  # $2 is the name of what we want to add (key)
+  # $3 is the key+value of what we're adding
+  #
+  # doing it this way allows us to support all sorts of
+  # different syntaxes, just so long as they are space
+  # delimited
+  #
+  if [[ ! ${!1} =~ $2 ]] ; then
+    # shellcheck disable=SC2086
+    eval $1="'${!1} $3'"
+    hadoop_debug "$1 accepted $3"
+  else
+    hadoop_debug "$1 declined $3"
+  fi
+}
+
+function hadoop_add_classpath
+{
+  # two params:
+  # $1 = directory, file, wildcard, whatever to add
+  # $2 = before or after, which determines where in the
+  #      classpath this object should go. default is after
+  # return 0 = success (added or duplicate)
+  # return 1 = failure (doesn't exist, whatever)
+  
+  # However, with classpath (& JLP), we can do dedupe
+  # along with some sanity checking (e.g., missing directories)
+  # since we have a better idea of what is legal
+  #
+  # for wildcard at end, we can
+  # at least check the dir exists
+  if [[ $1 =~ ^.*\*$ ]]; then
+    local mp=$(dirname "$1")
+    if [[ ! -d "${mp}" ]]; then
+      hadoop_debug "Rejected CLASSPATH: $1 (not a dir)"
+      return 1
+    fi
+    
+    # no wildcard in the middle, so check existence
+    # (doesn't matter *what* it is)
+  elif [[ ! $1 =~ ^.*\*.*$ ]] && [[ ! -e "$1" ]]; then
+    hadoop_debug "Rejected CLASSPATH: $1 (does not exist)"
+    return 1
+  fi
+  if [[ -z "${CLASSPATH}" ]]; then
+    CLASSPATH=$1
+    hadoop_debug "Initial CLASSPATH=$1"
+  elif [[ ":${CLASSPATH}:" != *":$1:"* ]]; then
+    if [[ "$2" = "before" ]]; then
+      CLASSPATH="$1:${CLASSPATH}"
+      hadoop_debug "Prepend CLASSPATH: $1"
+    else
+      CLASSPATH+=:$1
+      hadoop_debug "Append CLASSPATH: $1"
+    fi
+  else
+    hadoop_debug "Dupe CLASSPATH: $1"
+  fi
+  return 0
+}
+
+function hadoop_add_colonpath
+{
+  # two params:
+  # $1 = directory, file, wildcard, whatever to add
+  # $2 = before or after, which determines where in the
+  #      classpath this object should go
+  # return 0 = success
+  # return 1 = failure (duplicate)
+  
+  # this is CLASSPATH, JLP, etc but with dedupe but no
+  # other checking
+  if [[ -d "${2}" ]] && [[ ":${!1}:" != *":$2:"* ]]; then
+    if [[ -z "${!1}" ]]; then
+      # shellcheck disable=SC2086
+      eval $1="'$2'"
+      hadoop_debug "Initial colonpath($1): $2"
+    elif [[ "$3" = "before" ]]; then
+      # shellcheck disable=SC2086
+      eval $1="'$2:${!1}'"
+      hadoop_debug "Prepend colonpath($1): $2"
+    else
+      # shellcheck disable=SC2086
+      eval $1+="'$2'"
+      hadoop_debug "Append colonpath($1): $2"
+    fi
+    return 0
+  fi
+  hadoop_debug "Rejected colonpath($1): $2"
+  return 1
+}
+
+function hadoop_add_javalibpath
+{
+  # specialized function for a common use case
+  hadoop_add_colonpath JAVA_LIBRARY_PATH "$1" "$2"
+}
+
+function hadoop_add_ldlibpath
+{
+  # specialized function for a common use case
+  hadoop_add_colonpath LD_LIBRARY_PATH "$1" "$2"
+  
+  # note that we export this
+  export LD_LIBRARY_PATH
+}
+
+function hadoop_add_to_classpath_common
+{
+  
+  #
+  # get all of the common jars+config in the path
+  #
+  
+  # developers
+  if [[ -n "${HADOOP_ENABLE_BUILD_PATHS}" ]]; then
+    hadoop_add_classpath "${HADOOP_COMMON_HOME}/hadoop-common/target/classes"
+  fi
+  
+  if [[ -d "${HADOOP_COMMON_HOME}/${HADOOP_COMMON_DIR}/webapps" ]]; then
+    hadoop_add_classpath "${HADOOP_COMMON_HOME}/${HADOOP_COMMON_DIR}"
+  fi
+  
+  hadoop_add_classpath "${HADOOP_COMMON_HOME}/${HADOOP_COMMON_LIB_JARS_DIR}"'/*'
+  hadoop_add_classpath "${HADOOP_COMMON_HOME}/${HADOOP_COMMON_DIR}"'/*'
+}
+
+function hadoop_add_to_classpath_hdfs
+{
+  #
+  # get all of the hdfs jars+config in the path
+  #
+  # developers
+  if [[ -n "${HADOOP_ENABLE_BUILD_PATHS}" ]]; then
+    hadoop_add_classpath "${HADOOP_HDFS_HOME}/hadoop-hdfs/target/classes"
+  fi
+  
+  # put hdfs in classpath if present
+  if [[ -d "${HADOOP_HDFS_HOME}/${HDFS_DIR}/webapps" ]]; then
+    hadoop_add_classpath "${HADOOP_HDFS_HOME}/${HDFS_DIR}"
+  fi
+  
+  hadoop_add_classpath "${HADOOP_HDFS_HOME}/${HDFS_LIB_JARS_DIR}"'/*'
+  hadoop_add_classpath "${HADOOP_HDFS_HOME}/${HDFS_DIR}"'/*'
+}
+
+function hadoop_add_to_classpath_yarn
+{
+  local i
+  #
+  # get all of the yarn jars+config in the path
+  #
+  # developers
+  if [[ -n "${HADOOP_ENABLE_BUILD_PATHS}" ]]; then
+    for i in yarn-api yarn-common yarn-mapreduce yarn-master-worker \
+    yarn-server/yarn-server-nodemanager \
+    yarn-server/yarn-server-common \
+    yarn-server/yarn-server-resourcemanager; do
+      hadoop_add_classpath "${HADOOP_YARN_HOME}/$i/target/classes"
+    done
+    
+    hadoop_add_classpath "${HADOOP_YARN_HOME}/build/test/classes"
+    hadoop_add_classpath "${HADOOP_YARN_HOME}/build/tools"
+  fi
+  
+  if [[ -d "${HADOOP_YARN_HOME}/${YARN_DIR}/webapps" ]]; then
+    hadoop_add_classpath "${HADOOP_YARN_HOME}/${YARN_DIR}"
+  fi
+  
+  hadoop_add_classpath "${HADOOP_YARN_HOME}/${YARN_LIB_JARS_DIR}"'/*'
+  hadoop_add_classpath  "${HADOOP_YARN_HOME}/${YARN_DIR}"'/*'
+}
+
+function hadoop_add_to_classpath_mapred
+{
+  #
+  # get all of the mapreduce jars+config in the path
+  #
+  # developers
+  if [[ -n "${HADOOP_ENABLE_BUILD_PATHS}" ]]; then
+    hadoop_add_classpath "${HADOOP_MAPRED_HOME}/hadoop-mapreduce-client-shuffle/target/classes"
+    hadoop_add_classpath "${HADOOP_MAPRED_HOME}/hadoop-mapreduce-client-common/target/classes"
+    hadoop_add_classpath "${HADOOP_MAPRED_HOME}/hadoop-mapreduce-client-hs/target/classes"
+    hadoop_add_classpath "${HADOOP_MAPRED_HOME}/hadoop-mapreduce-client-hs-plugins/target/classes"
+    hadoop_add_classpath "${HADOOP_MAPRED_HOME}/hadoop-mapreduce-client-app/target/classes"
+    hadoop_add_classpath "${HADOOP_MAPRED_HOME}/hadoop-mapreduce-client-jobclient/target/classes"
+    hadoop_add_classpath "${HADOOP_MAPRED_HOME}/hadoop-mapreduce-client-core/target/classes"
+  fi
+  
+  if [[ -d "${HADOOP_MAPRED_HOME}/${MAPRED_DIR}/webapps" ]]; then
+    hadoop_add_classpath "${HADOOP_MAPRED_HOME}/${MAPRED_DIR}"
+  fi
+  
+  hadoop_add_classpath "${HADOOP_MAPRED_HOME}/${MAPRED_LIB_JARS_DIR}"'/*'
+  hadoop_add_classpath "${HADOOP_MAPRED_HOME}/${MAPRED_DIR}"'/*'
+}
+
+function hadoop_add_to_classpath_userpath
+{
+  # Add the user-specified HADOOP_CLASSPATH to the
+  # official CLASSPATH env var if HADOOP_USE_CLIENT_CLASSLOADER
+  # is not set.
+  # Add it first or last depending on if user has
+  # set env-var HADOOP_USER_CLASSPATH_FIRST
+  # we'll also dedupe it, because we're cool like that.
+  #
+  local c
+  local array
+  local i
+  local j
+  let c=0
+
+  if [[ -n "${HADOOP_CLASSPATH}" ]]; then
+    # I wonder if Java runs on VMS.
+    for i in $(echo "${HADOOP_CLASSPATH}" | tr : '\n'); do
+      array[$c]=$i
+      let c+=1
+    done
+    let j=c-1
+    
+    if [[ -z "${HADOOP_USE_CLIENT_CLASSLOADER}" ]]; then
+      if [[ -z "${HADOOP_USER_CLASSPATH_FIRST}" ]]; then
+        for ((i=j; i>=0; i--)); do
+          hadoop_add_classpath "${array[$i]}" before
+        done
+      else
+        for ((i=0; i<=j; i++)); do
+          hadoop_add_classpath "${array[$i]}" after
+        done
+      fi
+    fi
+  fi
+}
+
+function hadoop_os_tricks
+{
+  local bindv6only
+
+  # some OSes have special needs. here's some out of the box
+  # examples for OS X and Linux. Vendors, replace this with your special sauce.
+  case ${HADOOP_OS_TYPE} in
+    Darwin)
+      if [[ -z "${JAVA_HOME}" ]]; then
+        if [[ -x /usr/libexec/java_home ]]; then
+          export JAVA_HOME="$(/usr/libexec/java_home)"
+        else
+          export JAVA_HOME=/Library/Java/Home
+        fi
+      fi
+    ;;
+    Linux)
+      bindv6only=$(/sbin/sysctl -n net.ipv6.bindv6only 2> /dev/null)
+
+      # NOTE! HADOOP_ALLOW_IPV6 is a developer hook.  We leave it
+      # undocumented in hadoop-env.sh because we don't want users to
+      # shoot themselves in the foot while devs make IPv6 work.
+      if [[ -n "${bindv6only}" ]] && 
+         [[ "${bindv6only}" -eq "1" ]] && 
+         [[ "${HADOOP_ALLOW_IPV6}" != "yes" ]]; then
+        hadoop_error "ERROR: \"net.ipv6.bindv6only\" is set to 1 "
+        hadoop_error "ERROR: Hadoop networking could be broken. Aborting."
+        hadoop_error "ERROR: For more info: http://wiki.apache.org/hadoop/HadoopIPv6"
+        exit 1
+      fi
+      # Newer versions of glibc use an arena memory allocator that
+      # causes virtual # memory usage to explode. This interacts badly
+      # with the many threads that we use in Hadoop. Tune the variable
+      # down to prevent vmem explosion.
+      export MALLOC_ARENA_MAX=${MALLOC_ARENA_MAX:-4}
+    ;;
+  esac
+}
+
+function hadoop_java_setup
+{
+  # Bail if we did not detect it
+  if [[ -z "${JAVA_HOME}" ]]; then
+    hadoop_error "ERROR: JAVA_HOME is not set and could not be found."
+    exit 1
+  fi
+  
+  if [[ ! -d "${JAVA_HOME}" ]]; then
+    hadoop_error "ERROR: JAVA_HOME ${JAVA_HOME} does not exist."
+    exit 1
+  fi
+  
+  JAVA="${JAVA_HOME}/bin/java"
+  
+  if [[ ! -x "$JAVA" ]]; then
+    hadoop_error "ERROR: $JAVA is not executable."
+    exit 1
+  fi
+  # shellcheck disable=SC2034
+  JAVA_HEAP_MAX=-Xmx1g
+  HADOOP_HEAPSIZE=${HADOOP_HEAPSIZE:-1024}
+  
+  # check envvars which might override default args
+  if [[ -n "$HADOOP_HEAPSIZE" ]]; then
+    # shellcheck disable=SC2034
+    JAVA_HEAP_MAX="-Xmx${HADOOP_HEAPSIZE}m"
+  fi
+}
+
+function hadoop_finalize_libpaths
+{
+  if [[ -n "${JAVA_LIBRARY_PATH}" ]]; then
+    hadoop_add_param HADOOP_OPTS java.library.path \
+    "-Djava.library.path=${JAVA_LIBRARY_PATH}"
+    export LD_LIBRARY_PATH
+  fi
+}
+
+#
+# fill in any last minute options that might not have been defined yet
+#
+function hadoop_finalize_hadoop_opts
+{
+  hadoop_add_param HADOOP_OPTS hadoop.log.dir "-Dhadoop.log.dir=${HADOOP_LOG_DIR}"
+  hadoop_add_param HADOOP_OPTS hadoop.log.file "-Dhadoop.log.file=${HADOOP_LOGFILE}"
+  hadoop_add_param HADOOP_OPTS hadoop.home.dir "-Dhadoop.home.dir=${HADOOP_PREFIX}"
+  hadoop_add_param HADOOP_OPTS hadoop.id.str "-Dhadoop.id.str=${HADOOP_IDENT_STRING}"
+  hadoop_add_param HADOOP_OPTS hadoop.root.logger "-Dhadoop.root.logger=${HADOOP_ROOT_LOGGER}"
+  hadoop_add_param HADOOP_OPTS hadoop.policy.file "-Dhadoop.policy.file=${HADOOP_POLICYFILE}"
+  hadoop_add_param HADOOP_OPTS hadoop.security.logger "-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER}"
+}
+
+function hadoop_finalize_classpath
+{
+  hadoop_add_classpath "${HADOOP_CONF_DIR}" before
+  
+  # user classpath gets added at the last minute. this allows
+  # override of CONF dirs and more
+  hadoop_add_to_classpath_userpath
+}
+
+function hadoop_finalize
+{
+  # user classpath gets added at the last minute. this allows
+  # override of CONF dirs and more
+  hadoop_finalize_classpath
+  hadoop_finalize_libpaths
+  hadoop_finalize_hadoop_opts
+}
+
+function hadoop_exit_with_usage
+{
+  # NOTE: This function is not user replaceable.
+
+  local exitcode=$1
+  if [[ -z $exitcode ]]; then
+    exitcode=1
+  fi
+  if declare -F hadoop_usage >/dev/null ; then
+    hadoop_usage
+  elif [[ -x /usr/bin/cowsay ]]; then
+    /usr/bin/cowsay -f elephant "Sorry, no help available."
+  else
+    hadoop_error "Sorry, no help available."
+  fi
+  exit $exitcode
+}
+
+function hadoop_verify_secure_prereq
+{
+  # if you are on an OS like Illumos that has functional roles
+  # and you are using pfexec, you'll probably want to change
+  # this.
+  
+  # ${EUID} comes from the shell itself!
+  if [[ "${EUID}" -ne 0 ]] || [[ -n "${HADOOP_SECURE_COMMAND}" ]]; then
+    hadoop_error "ERROR: You must be a privileged in order to run a secure serice."
+    return 1
+  else
+    return 0
+  fi
+}
+
+function hadoop_setup_secure_service
+{
+  # need a more complicated setup? replace me!
+  
+  HADOOP_PID_DIR=${HADOOP_SECURE_PID_DIR}
+  HADOOP_LOG_DIR=${HADOOP_SECURE_LOG_DIR}
+}
+
+function hadoop_verify_piddir
+{
+  if [[ -z "${HADOOP_PID_DIR}" ]]; then
+    hadoop_error "No pid directory defined."
+    exit 1
+  fi
+  if [[ ! -w "${HADOOP_PID_DIR}" ]] && [[ ! -d "${HADOOP_PID_DIR}" ]]; then
+    hadoop_error "WARNING: ${HADOOP_PID_DIR} does not exist. Creating."
+    mkdir -p "${HADOOP_PID_DIR}" > /dev/null 2>&1
+    if [[ $? -gt 0 ]]; then
+      hadoop_error "ERROR: Unable to create ${HADOOP_PID_DIR}. Aborting."
+      exit 1
+    fi
+  fi
+  touch "${HADOOP_PID_DIR}/$$" >/dev/null 2>&1
+  if [[ $? -gt 0 ]]; then
+    hadoop_error "ERROR: Unable to write in ${HADOOP_PID_DIR}. Aborting."
+    exit 1
+  fi
+  rm "${HADOOP_PID_DIR}/$$" >/dev/null 2>&1
+}
+
+function hadoop_verify_logdir
+{
+  if [[ -z "${HADOOP_LOG_DIR}" ]]; then
+    hadoop_error "No log directory defined."
+    exit 1
+  fi
+  if [[ ! -w "${HADOOP_LOG_DIR}" ]] && [[ ! -d "${HADOOP_LOG_DIR}" ]]; then
+    hadoop_error "WARNING: ${HADOOP_LOG_DIR} does not exist. Creating."
+    mkdir -p "${HADOOP_LOG_DIR}" > /dev/null 2>&1
+    if [[ $? -gt 0 ]]; then
+      hadoop_error "ERROR: Unable to create ${HADOOP_LOG_DIR}. Aborting."
+      exit 1
+    fi
+  fi
+  touch "${HADOOP_LOG_DIR}/$$" >/dev/null 2>&1
+  if [[ $? -gt 0 ]]; then
+    hadoop_error "ERROR: Unable to write in ${HADOOP_LOG_DIR}. Aborting."
+    exit 1
+  fi
+  rm "${HADOOP_LOG_DIR}/$$" >/dev/null 2>&1
+}
+
+function hadoop_status_daemon() {
+  #
+  # LSB 4.1.0 compatible status command (1)
+  #
+  # 0 = program is running
+  # 1 = dead, but still a pid (2)
+  # 2 = (not used by us)
+  # 3 = not running
+  #
+  # 1 - this is not an endorsement of the LSB
+  #
+  # 2 - technically, the specification says /var/run/pid, so
+  #     we should never return this value, but we're giving
+  #     them the benefit of a doubt and returning 1 even if
+  #     our pid is not in in /var/run .
+  #
+  
+  local pidfile=$1
+  shift
+  
+  local pid
+  
+  if [[ -f "${pidfile}" ]]; then
+    pid=$(cat "${pidfile}")
+    if ps -p "${pid}" > /dev/null 2>&1; then
+      return 0
+    fi
+    return 1
+  fi
+  return 3
+}
+
+function hadoop_java_exec
+{
+  # run a java command.  this is used for
+  # non-daemons
+
+  local command=$1
+  local class=$2
+  shift 2
+  
+  hadoop_debug "Final CLASSPATH: ${CLASSPATH}"
+  hadoop_debug "Final HADOOP_OPTS: ${HADOOP_OPTS}"
+
+  export CLASSPATH
+  #shellcheck disable=SC2086
+  exec "${JAVA}" "-Dproc_${command}" ${HADOOP_OPTS} "${class}" "$@"
+}
+
+function hadoop_start_daemon
+{
+  # this is our non-privileged daemon starter
+  # that fires up a daemon in the *foreground*
+  # so complex! so wow! much java!
+  local command=$1
+  local class=$2
+  shift 2
+
+  hadoop_debug "Final CLASSPATH: ${CLASSPATH}"
+  hadoop_debug "Final HADOOP_OPTS: ${HADOOP_OPTS}"
+
+  export CLASSPATH
+  #shellcheck disable=SC2086
+  exec "${JAVA}" "-Dproc_${command}" ${HADOOP_OPTS} "${class}" "$@"
+}
+
+function hadoop_start_daemon_wrapper
+{
+  # this is our non-privileged daemon start
+  # that fires up a daemon in the *background*
+  local daemonname=$1
+  local class=$2
+  local pidfile=$3
+  local outfile=$4
+  shift 4
+  
+  hadoop_rotate_log "${outfile}"
+  
+  hadoop_start_daemon "${daemonname}" \
+  "$class" "$@" >> "${outfile}" 2>&1 < /dev/null &
+  #shellcheck disable=SC2086
+  echo $! > "${pidfile}" 2>/dev/null
+  if [[ $? -gt 0 ]]; then
+    hadoop_error "ERROR:  Cannot write pid ${pidfile}."
+  fi
+  
+  # shellcheck disable=SC2086
+  renice "${HADOOP_NICENESS}" $! >/dev/null 2>&1
+  if [[ $? -gt 0 ]]; then
+    hadoop_error "ERROR: Cannot set priority of process $!"
+  fi
+  
+  # shellcheck disable=SC2086
+  disown $! 2>&1
+  if [[ $? -gt 0 ]]; then
+    hadoop_error "ERROR: Cannot disconnect process $!"
+  fi
+  sleep 1
+  
+  # capture the ulimit output
+  ulimit -a >> "${outfile}" 2>&1
+  
+  # shellcheck disable=SC2086
+  if ! ps -p $! >/dev/null 2>&1; then
+    return 1
+  fi
+  return 0
+}
+
+function hadoop_start_secure_daemon
+{
+  # this is used to launch a secure daemon in the *foreground*
+  #
+  local daemonname=$1
+  local class=$2
+  
+  # pid file to create for our deamon
+  local daemonpidfile=$3
+  
+  # where to send stdout. jsvc has bad habits so this *may* be &1
+  # which means you send it to stdout!
+  local daemonoutfile=$4
+  
+  # where to send stderr.  same thing, except &2 = stderr
+  local daemonerrfile=$5
+  shift 5
+ 
+  hadoop_rotate_log "${daemonoutfile}"
+  hadoop_rotate_log "${daemonerrfile}"
+  
+  jsvc="${JSVC_HOME}/jsvc"
+  if [[ ! -f "${jsvc}" ]]; then
+    hadoop_error "JSVC_HOME is not set or set incorrectly. jsvc is required to run secure"
+    hadoop_error "or privileged daemons. Please download and install jsvc from "
+    hadoop_error "http://archive.apache.org/dist/commons/daemon/binaries/ "
+    hadoop_error "and set JSVC_HOME to the directory containing the jsvc binary."
+    exit 1
+  fi
+  
+  # note that shellcheck will throw a
+  # bogus for-our-use-case 2086 here.
+  # it doesn't properly support multi-line situations
+
+  hadoop_debug "Final CLASSPATH: ${CLASSPATH}"
+  hadoop_debug "Final HADOOP_OPTS: ${HADOOP_OPTS}"
+  
+  exec "${jsvc}" \
+  "-Dproc_${daemonname}" \
+  -outfile "${daemonoutfile}" \
+  -errfile "${daemonerrfile}" \
+  -pidfile "${daemonpidfile}" \
+  -nodetach \
+  -user "${HADOOP_SECURE_USER}" \
+  -cp "${CLASSPATH}" \
+  ${HADOOP_OPTS} \
+  "${class}" "$@"
+}
+
+function hadoop_start_secure_daemon_wrapper
+{
+  # this wraps hadoop_start_secure_daemon to take care
+  # of the dirty work to launch a daemon in the background!
+  local daemonname=$1
+  local class=$2
+  
+  # same rules as hadoop_start_secure_daemon except we
+  # have some additional parameters
+  
+  local daemonpidfile=$3
+  
+  local daemonoutfile=$4
+  
+  # the pid file of the subprocess that spawned our
+  # secure launcher
+  local jsvcpidfile=$5
+  
+  # the output of the subprocess that spawned our secure
+  # launcher
+  local jsvcoutfile=$6
+  
+  local daemonerrfile=$7
+  shift 7
+  
+  hadoop_rotate_log "${jsvcoutfile}"
+  
+  hadoop_start_secure_daemon \
+  "${daemonname}" \
+  "${class}" \
+  "${daemonpidfile}" \
+  "${daemonoutfile}" \
+  "${daemonerrfile}" "$@" >> "${jsvcoutfile}" 2>&1 < /dev/null &
+  
+  # This wrapper should only have one child.  Unlike Shawty Lo.
+  #shellcheck disable=SC2086
+  echo $! > "${jsvcpidfile}" 2>/dev/null
+  if [[ $? -gt 0 ]]; then
+    hadoop_error "ERROR:  Cannot write pid ${pidfile}."
+  fi
+  sleep 1
+  #shellcheck disable=SC2086
+  renice "${HADOOP_NICENESS}" $! >/dev/null 2>&1
+  if [[ $? -gt 0 ]]; then
+    hadoop_error "ERROR: Cannot set priority of process $!"
+  fi
+  if [[ -f "${daemonpidfile}" ]]; then
+    #shellcheck disable=SC2046
+    renice "${HADOOP_NICENESS}" $(cat "${daemonpidfile}") >/dev/null 2>&1
+    if [[ $? -gt 0 ]]; then
+      hadoop_error "ERROR: Cannot set priority of process $(cat "${daemonpidfile}")"
+    fi
+  fi
+  #shellcheck disable=SC2086
+  disown $! 2>&1
+  if [[ $? -gt 0 ]]; then
+    hadoop_error "ERROR: Cannot disconnect process $!"
+  fi
+  # capture the ulimit output
+  su "${HADOOP_SECURE_USER}" -c 'bash -c "ulimit -a"' >> "${jsvcoutfile}" 2>&1
+  #shellcheck disable=SC2086
+  if ! ps -p $! >/dev/null 2>&1; then
+    return 1
+  fi
+  return 0
+}
+
+function hadoop_stop_daemon
+{
+  local cmd=$1
+  local pidfile=$2
+  shift 2
+  
+  local pid
+  
+  if [[ -f "${pidfile}" ]]; then
+    pid=$(cat "$pidfile")
+    
+    kill "${pid}" >/dev/null 2>&1
+    sleep "${HADOOP_STOP_TIMEOUT}"
+    if kill -0 "${pid}" > /dev/null 2>&1; then
+      hadoop_error "WARNING: ${cmd} did not stop gracefully after ${HADOOP_STOP_TIMEOUT} seconds: Trying to kill with kill -9"
+      kill -9 "${pid}" >/dev/null 2>&1
+    fi
+    if ps -p "${pid}" > /dev/null 2>&1; then
+      hadoop_error "ERROR: Unable to kill ${pid}"
+    else
+      rm -f "${pidfile}" >/dev/null 2>&1
+    fi
+  fi
+}
+
+function hadoop_stop_secure_daemon
+{
+  local command=$1
+  local daemonpidfile=$2
+  local privpidfile=$3
+  shift 3
+  local ret
+  
+  hadoop_stop_daemon "${command}" "${daemonpidfile}"
+  ret=$?
+  rm -f "${daemonpidfile}" "${privpidfile}" 2>/dev/null
+  return ${ret}
+}
+
+function hadoop_daemon_handler
+{
+  local daemonmode=$1
+  local daemonname=$2
+  local class=$3
+  local pidfile=$4
+  local outfile=$5
+  shift 5
+  
+  case ${daemonmode} in
+    status)
+      hadoop_status_daemon "${daemon_pidfile}"
+      exit $?
+    ;;
+    
+    stop)
+      hadoop_stop_daemon "${daemonname}" "${daemon_pidfile}"
+      exit $?
+    ;;
+    
+    ##COMPAT  -- older hadoops would also start daemons by default
+    start|default)
+      hadoop_verify_piddir
+      hadoop_verify_logdir
+      hadoop_status_daemon "${daemon_pidfile}"
+      if [[ $? == 0  ]]; then
+        hadoop_error "${daemonname} running as process $(cat "${daemon_pidfile}").  Stop it first."
+        exit 1
+      else
+        # stale pid file, so just remove it and continue on
+        rm -f "${daemon_pidfile}" >/dev/null 2>&1
+      fi
+      ##COMPAT  - differenticate between --daemon start and nothing
+      # "nothing" shouldn't detach
+      if [[ "$daemonmode" = "default" ]]; then
+        hadoop_start_daemon "${daemonname}" "${class}" "$@"
+      else
+        hadoop_start_daemon_wrapper "${daemonname}" \
+        "${class}" "${daemon_pidfile}" "${daemon_outfile}" "$@"
+      fi
+    ;;
+  esac
+}
+
+function hadoop_secure_daemon_handler
+{
+  local daemonmode=$1
+  local daemonname=$2
+  local classname=$3
+  local daemon_pidfile=$4
+  local daemon_outfile=$5
+  local priv_pidfile=$6
+  local priv_outfile=$7
+  local priv_errfile=$8
+  shift 8
+  
+  case ${daemonmode} in
+    status)
+      hadoop_status_daemon "${daemon_pidfile}"
+      exit $?
+    ;;
+    
+    stop)
+      hadoop_stop_secure_daemon "${daemonname}" \
+      "${daemon_pidfile}" "${priv_pidfile}"
+      exit $?
+    ;;
+    
+    ##COMPAT  -- older hadoops would also start daemons by default
+    start|default)
+      hadoop_verify_piddir
+      hadoop_verify_logdir
+      hadoop_status_daemon "${daemon_pidfile}"
+      if [[ $? == 0  ]]; then
+        hadoop_error "${daemonname} running as process $(cat "${daemon_pidfile}").  Stop it first."
+        exit 1
+      else
+        # stale pid file, so just remove it and continue on
+        rm -f "${daemon_pidfile}" >/dev/null 2>&1
+      fi
+      
+      ##COMPAT  - differenticate between --daemon start and nothing
+      # "nothing" shouldn't detach
+      if [[ "${daemonmode}" = "default" ]]; then
+        hadoop_start_secure_daemon "${daemonname}" "${classname}" \
+        "${daemon_pidfile}" "${daemon_outfile}" \
+        "${priv_errfile}"  "$@"
+      else
+        hadoop_start_secure_daemon_wrapper "${daemonname}" "${classname}" \
+        "${daemon_pidfile}" "${daemon_outfile}" \
+        "${priv_pidfile}" "${priv_outfile}" "${priv_errfile}"  "$@"
+      fi
+    ;;
+  esac
+}
+

+ 93 - 0
hadoop-common-project/hadoop-common/src/main/bin/hadoop-layout.sh.example

@@ -0,0 +1,93 @@
+# Copyright 2014 The Apache Software Foundation
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+##
+## VENDORS!
+##
+## This is where you can redefine the layout of Hadoop directories
+## and expect to be reasonably compatible.  Needless to say, this
+## is expert level stuff and one needs to tread carefully.
+##
+## If you move HADOOP_LIBEXEC_DIR from some location that
+## isn't bin/../libexec, you MUST define either HADOOP_LIBEXEC_DIR
+## or have HADOOP_PREFIX/libexec/hadoop-config.sh and
+## HADOOP_PREFIX/libexec/hadoop-layout.sh (this file) exist.
+
+## NOTE:
+##
+## hadoop-functions.sh gets executed BEFORE this file.  So you can
+##   redefine all of those functions here.
+##
+## *-env.sh get executed AFTER this file but generally too late to
+## override the settings (but not the functions!) here.  However, this
+## also means you cannot use things like HADOOP_CONF_DIR for these
+## definitions.
+
+####
+# Common disk layout
+####
+
+# Default location for the common/core Hadoop project
+# export HADOOP_COMMON_HOME=$HADOOP_PREFIX
+
+# Relative locations where components under HADOOP_COMMON_HOME are located
+# export HADOOP_COMMON_DIR="share/hadoop/common"
+# export HADOOP_COMMON_LIB_JARS_DIR="share/hadoop/common/lib"
+# export HADOOP_COMMON_LIB_NATIVE_DIR="lib/native"
+
+####
+# HDFS disk layout
+####
+
+# Default location for the HDFS subproject
+# export HADOOP_HDFS_HOME=$HADOOP_PREFIX
+
+# Relative locations where components under HADOOP_HDFS_HOME are located
+# export HDFS_DIR="share/hadoop/hdfs"
+# export HDFS_LIB_JARS_DIR="share/hadoop/hdfs/lib"
+
+####
+# YARN disk layout
+####
+
+# Default location for the YARN subproject
+# export HADOOP_YARN_HOME=$HADOOP_PREFIX
+
+# Relative locations where components under HADOOP_YARN_HOME are located
+# export YARN_DIR="share/hadoop/yarn"
+# export YARN_LIB_JARS_DIR="share/hadoop/yarn/lib"
+
+# Default location for the MapReduce subproject
+# export HADOOP_MAPRED_HOME=$HADOOP_PREFIX
+
+####
+# MapReduce disk layout
+####
+
+# Relative locations where components under HADOOP_MAPRED_HOME are located
+# export MAPRED_DIR="share/hadoop/mapreduce"
+# export MAPRED_LIB_JARS_DIR="share/hadoop/mapreduce/lib"
+
+####
+# Misc paths
+####
+
+# setup a default TOOL_PATH, where things like distcp lives
+# note that this path only gets added for certain commands and not
+# part of the general classpath
+# export TOOL_PATH="$HADOOP_PREFIX/share/hadoop/tools/lib/*"

+ 30 - 3
hadoop-common-project/hadoop-common/src/main/bin/hadoop.cmd

@@ -28,6 +28,26 @@
 @rem                                    classpath. Can be defined, for example,
 @rem                                    classpath. Can be defined, for example,
 @rem                                    by doing
 @rem                                    by doing
 @rem                                    export HADOOP_USER_CLASSPATH_FIRST=true
 @rem                                    export HADOOP_USER_CLASSPATH_FIRST=true
+@rem
+@rem   HADOOP_USE_CLIENT_CLASSLOADER    When defined, HADOOP_CLASSPATH and the
+@rem                                    jar as the hadoop jar argument are
+@rem                                    handled by a separate isolated client
+@rem                                    classloader. If it is set,
+@rem                                    HADOOP_USER_CLASSPATH_FIRST is
+@rem                                    ignored. Can be defined by doing
+@rem                                    export HADOOP_USE_CLIENT_CLASSLOADER=true
+@rem
+@rem   HADOOP_CLIENT_CLASSLOADER_SYSTEM_CLASSES
+@rem                                    When defined, it overrides the default
+@rem                                    definition of system classes for the
+@rem                                    client classloader when
+@rem                                    HADOOP_USE_CLIENT_CLASSLOADER is
+@rem                                    enabled. Names ending in '.' (period)
+@rem                                    are treated as package names, and names
+@rem                                    starting with a '-' are treated as
+@rem                                    negative matches. For example,
+@rem                                    export HADOOP_CLIENT_CLASSLOADER_SYSTEM_CLASSES="-org.apache.hadoop.UserClass,java.,javax.,org.apache.hadoop."
+
 @rem
 @rem
 @rem   HADOOP_HEAPSIZE  The maximum amount of heap to use, in MB.
 @rem   HADOOP_HEAPSIZE  The maximum amount of heap to use, in MB.
 @rem                    Default is 1000.
 @rem                    Default is 1000.
@@ -115,11 +135,14 @@ call :updatepath %HADOOP_BIN_PATH%
   )
   )
 
 
   if %hadoop-command% == classpath (
   if %hadoop-command% == classpath (
-    @echo %CLASSPATH%
-    goto :eof
+    if not defined hadoop-command-arguments (
+      @rem No need to bother starting up a JVM for this simple case.
+      @echo %CLASSPATH%
+      exit /b
+    )
   )
   )
   
   
-  set corecommands=fs version jar checknative distcp daemonlog archive
+  set corecommands=fs version jar checknative distcp daemonlog archive classpath
   for %%i in ( %corecommands% ) do (
   for %%i in ( %corecommands% ) do (
     if %hadoop-command% == %%i set corecommand=true  
     if %hadoop-command% == %%i set corecommand=true  
   )
   )
@@ -175,6 +198,10 @@ call :updatepath %HADOOP_BIN_PATH%
   set CLASSPATH=%CLASSPATH%;%TOOL_PATH%
   set CLASSPATH=%CLASSPATH%;%TOOL_PATH%
   goto :eof
   goto :eof
 
 
+:classpath
+  set CLASS=org.apache.hadoop.util.Classpath
+  goto :eof
+
 :updatepath
 :updatepath
   set path_to_add=%*
   set path_to_add=%*
   set current_path_comparable=%path%
   set current_path_comparable=%path%

+ 17 - 35
hadoop-common-project/hadoop-common/src/main/bin/rcc

@@ -15,47 +15,29 @@
 # See the License for the specific language governing permissions and
 # See the License for the specific language governing permissions and
 # limitations under the License.
 # limitations under the License.
 
 
-
-# The Hadoop record compiler
-#
-# Environment Variables
-#
-#   JAVA_HOME        The java implementation to use.  Overrides JAVA_HOME.
-#
-#   HADOOP_OPTS      Extra Java runtime options.
-#
-#   HADOOP_CONF_DIR  Alternate conf dir. Default is ${HADOOP_PREFIX}/conf.
-#
-
-bin=`dirname "${BASH_SOURCE-$0}"`
-bin=`cd "$bin"; pwd`
+# This script runs the hadoop core commands.
+this="${BASH_SOURCE-$0}"
+bin=$(cd -P -- "$(dirname -- "$this")" >/dev/null && pwd -P)
+script="$(basename -- "$this")"
+this="$bin/$script"
 
 
 DEFAULT_LIBEXEC_DIR="$bin"/../libexec
 DEFAULT_LIBEXEC_DIR="$bin"/../libexec
 HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
 HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
-. $HADOOP_LIBEXEC_DIR/hadoop-config.sh
-
-if [ -f "${HADOOP_CONF_DIR}/hadoop-env.sh" ]; then
-  . "${HADOOP_CONF_DIR}/hadoop-env.sh"
-fi
+# shellcheck disable=SC2034
+HADOOP_NEW_CONFIG=true
+. "$HADOOP_LIBEXEC_DIR/hadoop-config.sh"
 
 
-# some Java parameters
-if [ "$JAVA_HOME" != "" ]; then
-  #echo "run java in $JAVA_HOME"
-  JAVA_HOME=$JAVA_HOME
-fi
-  
-if [ "$JAVA_HOME" = "" ]; then
-  echo "Error: JAVA_HOME is not set."
-  exit 1
+if [ $# = 0 ]; then
+  hadoop_exit_with_usage 1
 fi
 fi
 
 
-JAVA=$JAVA_HOME/bin/java
-JAVA_HEAP_MAX=-Xmx1000m 
+CLASS='org.apache.hadoop.record.compiler.generated.Rcc'
 
 
-# restore ordinary behaviour
-unset IFS
+# Always respect HADOOP_OPTS and HADOOP_CLIENT_OPTS
+hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
+HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
 
 
-CLASS='org.apache.hadoop.record.compiler.generated.Rcc'
+hadoop_add_param HADOOP_OPTS Xmx "${JAVA_HEAP_MAX}"
 
 
-# run it
-exec "$JAVA" $HADOOP_OPTS -classpath "$CLASSPATH" $CLASS "$@"
+hadoop_finalize
+hadoop_java_exec rcc "${CLASS}" "$@"

+ 23 - 28
hadoop-common-project/hadoop-common/src/main/bin/slaves.sh

@@ -27,38 +27,33 @@
 #   HADOOP_SSH_OPTS Options passed to ssh when running remote commands.
 #   HADOOP_SSH_OPTS Options passed to ssh when running remote commands.
 ##
 ##
 
 
-usage="Usage: slaves.sh [--config confdir] command..."
+function hadoop_usage {
+  echo "Usage: slaves.sh [--config confdir] command..."
+}
 
 
-# if no args specified, show usage
-if [ $# -le 0 ]; then
-  echo $usage
-  exit 1
+# let's locate libexec...
+if [[ -n "${HADOOP_PREFIX}" ]]; then
+  DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
+else
+  this="${BASH_SOURCE-$0}"
+  bin=$(cd -P -- "$(dirname -- "${this}")" >dev/null && pwd -P)
+  DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
 fi
 fi
 
 
-bin=`dirname "${BASH_SOURCE-$0}"`
-bin=`cd "$bin"; pwd`
-
-DEFAULT_LIBEXEC_DIR="$bin"/../libexec
-HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
-. $HADOOP_LIBEXEC_DIR/hadoop-config.sh
-
-
-# Where to start the script, see hadoop-config.sh
-# (it set up the variables based on command line options)
-if [ "$HADOOP_SLAVE_NAMES" != '' ] ; then
-  SLAVE_NAMES=$HADOOP_SLAVE_NAMES
+HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}"
+# shellcheck disable=SC2034
+HADOOP_NEW_CONFIG=true
+if [[ -f "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh" ]]; then
+  . "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh"
 else
 else
-  SLAVE_FILE=${HADOOP_SLAVES:-${HADOOP_CONF_DIR}/slaves}
-  SLAVE_NAMES=$(cat "$SLAVE_FILE" | sed  's/#.*$//;/^$/d')
+  echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/hadoop-config.sh." 2>&1
+  exit 1
+fi
+
+# if no args specified, show usage
+if [[ $# -le 0 ]]; then
+  hadoop_exit_with_usage 1
 fi
 fi
 
 
-# start the daemons
-for slave in $SLAVE_NAMES ; do
- ssh $HADOOP_SSH_OPTS $slave $"${@// /\\ }" \
-   2>&1 | sed "s/^/$slave: /" &
- if [ "$HADOOP_SLAVE_SLEEP" != "" ]; then
-   sleep $HADOOP_SLAVE_SLEEP
- fi
-done
+hadoop_connect_to_hosts "$@"
 
 
-wait

+ 26 - 12
hadoop-common-project/hadoop-common/src/main/bin/start-all.sh

@@ -15,24 +15,38 @@
 # See the License for the specific language governing permissions and
 # See the License for the specific language governing permissions and
 # limitations under the License.
 # limitations under the License.
 
 
+echo "This script is deprecated. Use start-dfs.sh and start-yarn.sh instead."
+exit 1
 
 
-# Start all hadoop daemons.  Run this on master node.
 
 
-echo "This script is Deprecated. Instead use start-dfs.sh and start-yarn.sh"
 
 
-bin=`dirname "${BASH_SOURCE-$0}"`
-bin=`cd "$bin"; pwd`
-
-DEFAULT_LIBEXEC_DIR="$bin"/../libexec
-HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
-. $HADOOP_LIBEXEC_DIR/hadoop-config.sh
+# let's locate libexec...
+if [[ -n "${HADOOP_PREFIX}" ]]; then
+  DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
+else
+  this="${BASH_SOURCE-$0}"
+  bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
+  DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
+fi
 
 
+HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}"
+# shellcheck disable=SC2034
+HADOOP_NEW_CONFIG=true
+if [[ -f "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh" ]]; then
+  . "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh"
+else
+  echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/hadoop-config.sh." 2>&1
+  exit 1
+fi
 # start hdfs daemons if hdfs is present
 # start hdfs daemons if hdfs is present
-if [ -f "${HADOOP_HDFS_HOME}"/sbin/start-dfs.sh ]; then
-  "${HADOOP_HDFS_HOME}"/sbin/start-dfs.sh --config $HADOOP_CONF_DIR
+if [[ -f "${HADOOP_HDFS_HOME}/sbin/start-dfs.sh" ]]; then
+  "${HADOOP_HDFS_HOME}/sbin/start-dfs.sh" --config "${HADOOP_CONF_DIR}"
 fi
 fi
 
 
 # start yarn daemons if yarn is present
 # start yarn daemons if yarn is present
-if [ -f "${HADOOP_YARN_HOME}"/sbin/start-yarn.sh ]; then
-  "${HADOOP_YARN_HOME}"/sbin/start-yarn.sh --config $HADOOP_CONF_DIR
+if [[ -f "${HADOOP_YARN_HOME}/sbin/start-yarn.sh" ]]; then
+  "${HADOOP_YARN_HOME}/sbin/start-yarn.sh" --config "${HADOOP_CONF_DIR}"
 fi
 fi
+
+
+

+ 25 - 11
hadoop-common-project/hadoop-common/src/main/bin/stop-all.sh

@@ -18,21 +18,35 @@
 
 
 # Stop all hadoop daemons.  Run this on master node.
 # Stop all hadoop daemons.  Run this on master node.
 
 
-echo "This script is Deprecated. Instead use stop-dfs.sh and stop-yarn.sh"
-
-bin=`dirname "${BASH_SOURCE-$0}"`
-bin=`cd "$bin"; pwd`
+echo "This script is deprecated. Use stop-dfs.sh and stop-yarn.sh instead."
+exit 1
+
+# let's locate libexec...
+if [[ -n "${HADOOP_PREFIX}" ]]; then
+  DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
+else
+  this="${BASH_SOURCE-$0}"
+  bin=$(cd -P -- "$(dirname -- "${this}")" >dev/null && pwd -P)
+  DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
+fi
 
 
-DEFAULT_LIBEXEC_DIR="$bin"/../libexec
-HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
-. $HADOOP_LIBEXEC_DIR/hadoop-config.sh
+HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}"
+# shellcheck disable=SC2034
+HADOOP_NEW_CONFIG=true
+if [[ -f "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh" ]]; then
+  . "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh"
+else
+  echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/hadoop-config.sh." 2>&1
+  exit 1
+fi
 
 
 # stop hdfs daemons if hdfs is present
 # stop hdfs daemons if hdfs is present
-if [ -f "${HADOOP_HDFS_HOME}"/sbin/stop-dfs.sh ]; then
-  "${HADOOP_HDFS_HOME}"/sbin/stop-dfs.sh --config $HADOOP_CONF_DIR
+if [[ -f "${HADOOP_HDFS_HOME}/sbin/stop-dfs.sh" ]]; then
+  "${HADOOP_HDFS_HOME}/sbin/stop-dfs.sh" --config "${HADOOP_CONF_DIR}"
 fi
 fi
 
 
 # stop yarn daemons if yarn is present
 # stop yarn daemons if yarn is present
-if [ -f "${HADOOP_HDFS_HOME}"/sbin/stop-yarn.sh ]; then
-  "${HADOOP_HDFS_HOME}"/sbin/stop-yarn.sh --config $HADOOP_CONF_DIR
+if [[ -f "${HADOOP_HDFS_HOME}/sbin/stop-yarn.sh" ]]; then
+  "${HADOOP_HDFS_HOME}/sbin/stop-yarn.sh" --config "${HADOOP_CONF_DIR}"
 fi
 fi
+

+ 383 - 49
hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh

@@ -1,3 +1,4 @@
+#
 # Licensed to the Apache Software Foundation (ASF) under one
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file
 # or more contributor license agreements.  See the NOTICE file
 # distributed with this work for additional information
 # distributed with this work for additional information
@@ -16,71 +17,404 @@
 
 
 # Set Hadoop-specific environment variables here.
 # Set Hadoop-specific environment variables here.
 
 
-# The only required environment variable is JAVA_HOME.  All others are
-# optional.  When running a distributed configuration it is best to
-# set JAVA_HOME in this file, so that it is correctly defined on
-# remote nodes.
+##
+## THIS FILE ACTS AS THE MASTER FILE FOR ALL HADOOP PROJECTS.
+## SETTINGS HERE WILL BE READ BY ALL HADOOP COMMANDS.  THEREFORE,
+## ONE CAN USE THIS FILE TO SET YARN, HDFS, AND MAPREDUCE
+## CONFIGURATION OPTIONS INSTEAD OF xxx-env.sh.
+##
+## Precedence rules:
+##
+## {yarn-env.sh|hdfs-env.sh} > hadoop-env.sh > hard-coded defaults
+##
+## {YARN_xyz|HDFS_xyz} > HADOOP_xyz > hard-coded defaults
+##
+
+# Many of the options here are built from the perspective that users
+# may want to provide OVERWRITING values on the command line.
+# For example:
+#
+#  JAVA_HOME=/usr/java/testing hdfs dfs -ls
+#
+# Therefore, the vast majority (BUT NOT ALL!) of these defaults
+# are configured for substitution and not append.  If you would
+# like append, you'll # need to modify this file accordingly.
+
+###
+# Generic settings for HADOOP
+###
+
+# Technically, the only required environment variable is JAVA_HOME.
+# All others are optional.  However, our defaults are probably not
+# your defaults.  Many sites configure these options outside of Hadoop,
+# such as in /etc/profile.d
 
 
 # The java implementation to use.
 # The java implementation to use.
-export JAVA_HOME=${JAVA_HOME}
+export JAVA_HOME=${JAVA_HOME:-"hadoop-env.sh is not configured"}
 
 
-# The jsvc implementation to use. Jsvc is required to run secure datanodes.
-#export JSVC_HOME=${JSVC_HOME}
+# Location of Hadoop's configuration information.  i.e., where this
+# file is probably living.  You will almost certainly want to set
+# this in /etc/profile.d or equivalent.
+# export HADOOP_CONF_DIR=$HADOOP_PREFIX/etc/hadoop
 
 
-export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-"/etc/hadoop"}
+# The maximum amount of heap to use, in MB. Default is 1024.
+# export HADOOP_HEAPSIZE=1024
 
 
-# Extra Java CLASSPATH elements.  Automatically insert capacity-scheduler.
-for f in $HADOOP_HOME/contrib/capacity-scheduler/*.jar; do
-  if [ "$HADOOP_CLASSPATH" ]; then
-    export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:$f
-  else
-    export HADOOP_CLASSPATH=$f
-  fi
-done
+# Extra Java runtime options for all Hadoop commands. We don't support
+# IPv6 yet/still, so by default we set preference to IPv4.
+# export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true"
 
 
-# The maximum amount of heap to use, in MB. Default is 1000.
-#export HADOOP_HEAPSIZE=
-#export HADOOP_NAMENODE_INIT_HEAPSIZE=""
+# Some parts of the shell code may do special things dependent upon
+# the operating system.  We have to set this here. See the next
+# section as to why....
+export HADOOP_OS_TYPE=${HADOOP_OS_TYPE:-$(uname -s)}
 
 
-# Extra Java runtime options.  Empty by default.
-export HADOOP_OPTS="$HADOOP_OPTS -Djava.net.preferIPv4Stack=true"
 
 
-MAC_OSX=false
-case "`uname`" in
-Darwin*) MAC_OSX=true;;
+# Under certain conditions, Java on OS X will throw SCDynamicStore errors
+# in the system logs.
+# See HADOOP-8719 for more information.  If you need Kerberos
+# support on OS X, you'll want to change/remove this extra bit.
+case ${HADOOP_OS_TYPE} in
+  Darwin*)
+    export HADOOP_OPTS="${HADOOP_OPTS} -Djava.security.krb5.realm= "
+    export HADOOP_OPTS="${HADOOP_OPTS} -Djava.security.krb5.kdc= "
+    export HADOOP_OPTS="${HADOOP_OPTS} -Djava.security.krb5.conf= "
+  ;;
 esac
 esac
-if $MAC_OSX; then
-    export HADOOP_OPTS="$HADOOP_OPTS -Djava.security.krb5.realm= -Djava.security.krb5.kdc="
-fi
 
 
-# Command specific options appended to HADOOP_OPTS when specified
-export HADOOP_NAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_NAMENODE_OPTS"
-export HADOOP_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,RFAS $HADOOP_DATANODE_OPTS"
+# Extra Java runtime options for Hadoop clients (i.e., hdfs dfs -blah)
+# These get added to HADOOP_OPTS for such commands.  In most cases,
+# this should be left empty and let users supply it on the
+# command line.
+# extra HADOOP_CLIENT_OPTS=""
 
 
-export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_SECONDARYNAMENODE_OPTS"
+#
+# A note about classpaths.
+#
+# The classpath is configured such that entries are stripped prior
+# to handing to Java based either upon duplication or non-existence.
+# Wildcards and/or directories are *NOT* expanded as the
+# de-duplication is fairly simple.  So if two directories are in
+# the classpath that both contain awesome-methods-1.0.jar,
+# awesome-methods-1.0.jar will still be seen by java.  But if
+# the classpath specifically has awesome-methods-1.0.jar from the
+# same directory listed twice, the last one will be removed.
+#
 
 
-export HADOOP_NFS3_OPTS="$HADOOP_NFS3_OPTS"
-export HADOOP_PORTMAP_OPTS="-Xmx512m $HADOOP_PORTMAP_OPTS"
+# An additional, custom CLASSPATH.  This is really meant for
+# end users, but as an administrator, one might want to push
+# something extra in here too, such as the jar to the topology
+# method.  Just be sure to append to the existing HADOOP_USER_CLASSPATH
+# so end users have a way to add stuff.
+# export HADOOP_USER_CLASSPATH="/some/cool/path/on/your/machine"
 
 
-# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
-export HADOOP_CLIENT_OPTS="-Xmx512m $HADOOP_CLIENT_OPTS"
-#HADOOP_JAVA_PLATFORM_OPTS="-XX:-UsePerfData $HADOOP_JAVA_PLATFORM_OPTS"
+# Should HADOOP_USER_CLASSPATH be first in the official CLASSPATH?
+# export HADOOP_USER_CLASSPATH_FIRST="yes"
 
 
-# On secure datanodes, user to run the datanode as after dropping privileges
-export HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER}
+# If HADOOP_USE_CLIENT_CLASSLOADER is set, HADOOP_CLASSPATH along with the main
+# jar are handled by a separate isolated client classloader. If it is set,
+# HADOOP_USER_CLASSPATH_FIRST is ignored. Can be defined by doing
+# export HADOOP_USE_CLIENT_CLASSLOADER=true
 
 
-# Where log files are stored.  $HADOOP_HOME/logs by default.
-#export HADOOP_LOG_DIR=${HADOOP_LOG_DIR}/$USER
+# HADOOP_CLIENT_CLASSLOADER_SYSTEM_CLASSES overrides the default definition of
+# system classes for the client classloader when HADOOP_USE_CLIENT_CLASSLOADER
+# is enabled. Names ending in '.' (period) are treated as package names, and
+# names starting with a '-' are treated as negative matches. For example,
+# export HADOOP_CLIENT_CLASSLOADER_SYSTEM_CLASSES="-org.apache.hadoop.UserClass,java.,javax.,org.apache.hadoop."
 
 
-# Where log files are stored in the secure data environment.
-export HADOOP_SECURE_DN_LOG_DIR=${HADOOP_LOG_DIR}/${HADOOP_HDFS_USER}
+###
+# Options for remote shell connectivity
+###
 
 
-# The directory where pid files are stored. /tmp by default.
-# NOTE: this should be set to a directory that can only be written to by 
-#       the user that will run the hadoop daemons.  Otherwise there is the
-#       potential for a symlink attack.
-export HADOOP_PID_DIR=${HADOOP_PID_DIR}
-export HADOOP_SECURE_DN_PID_DIR=${HADOOP_PID_DIR}
+# There are some optional components of hadoop that allow for
+# command and control of remote hosts.  For example,
+# start-dfs.sh will attempt to bring up all NNs, DNS, etc.
+
+# Options to pass to SSH when one of the "log into a host and
+# start/stop daemons" scripts is executed
+# export HADOOP_SSH_OPTS="-o BatchMode=yes -o StrictHostKeyChecking=no -o ConnectTimeout=10s"
+
+# The built-in ssh handler will limit itself to 10 simultaneous connections.
+# For pdsh users, this sets the fanout size ( -f )
+# Change this to increase/decrease as necessary.
+# export HADOOP_SSH_PARALLEL=10
+
+# Filename which contains all of the hosts for any remote execution
+# helper scripts # such as slaves.sh, start-dfs.sh, etc.
+# export HADOOP_SLAVES="${HADOOP_CONF_DIR}/slaves"
+
+###
+# Options for all daemons
+###
+#
+
+#
+# You can define variables right here and then re-use them later on.
+# For example, it is common to use the same garbage collection settings
+# for all the daemons.  So we could define:
+#
+# export HADOOP_GC_SETTINGS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps"
+#
+# .. and then use it as per the b option under the namenode.
+
+# Where (primarily) daemon log files are stored.
+# $HADOOP_PREFIX/logs by default.
+# export HADOOP_LOG_DIR=${HADOOP_PREFIX}/logs
 
 
 # A string representing this instance of hadoop. $USER by default.
 # A string representing this instance of hadoop. $USER by default.
-export HADOOP_IDENT_STRING=$USER
+# This is used in writing log and pid files, so keep that in mind!
+# export HADOOP_IDENT_STRING=$USER
+
+# How many seconds to pause after stopping a daemon
+# export HADOOP_STOP_TIMEOUT=5
+
+# Where pid files are stored.  /tmp by default.
+# export HADOOP_PID_DIR=/tmp
+
+# Default log level and output location
+# This sets the hadoop.root.logger property
+# export HADOOP_ROOT_LOGGER=INFO,console
+
+# Default log level for daemons spawned explicitly by hadoop-daemon.sh
+# This sets the hadoop.root.logger property
+# export HADOOP_DAEMON_ROOT_LOGGER=INFO,RFA
+
+# Default log level and output location for security-related messages.
+# It sets -Dhadoop.security.logger on the command line.
+# You will almost certainly want to change this on a per-daemon basis!
+# export HADOOP_SECURITY_LOGGER=INFO,NullAppender
+
+# Default log level for file system audit messages.
+# It sets -Dhdfs.audit.logger on the command line.
+# You will almost certainly want to change this on a per-daemon basis!
+# export HADOOP_AUDIT_LOGGER=INFO,NullAppender
+
+# Default process priority level
+# Note that sub-processes will also run at this level!
+# export HADOOP_NICENESS=0
+
+# Default name for the service level authorization file
+# export HADOOP_POLICYFILE="hadoop-policy.xml"
+
+###
+# Secure/privileged execution
+###
+
+#
+# Out of the box, Hadoop uses jsvc from Apache Commons to launch daemons
+# on privileged ports.  This functionality can be replaced by providing
+# custom functions.  See hadoop-functions.sh for more information.
+#
+
+# The jsvc implementation to use. Jsvc is required to run secure datanodes.
+# export JSVC_HOME=/usr/bin
+
+#
+# This directory contains pids for secure and privileged processes.
+#export HADOOP_SECURE_PID_DIR=${HADOOP_PID_DIR}
+
+#
+# This directory contains the logs for secure and privileged processes.
+# export HADOOP_SECURE_LOG=${HADOOP_LOG_DIR}
+
+#
+# When running a secure daemon, the default value of HADOOP_IDENT_STRING
+# ends up being a bit bogus.  Therefore, by default, the code will
+# replace HADOOP_IDENT_STRING with HADOOP_SECURE_xx_USER.  If you want
+# to keep HADOOP_IDENT_STRING untouched, then uncomment this line.
+# export HADOOP_SECURE_IDENT_PRESERVE="true"
+
+###
+# NameNode specific parameters
+###
+# Specify the JVM options to be used when starting the NameNode.
+# These options will be appended to the options specified as HADOOP_OPTS
+# and therefore may override any similar flags set in HADOOP_OPTS
+#
+# a) Set JMX options
+# export HADOOP_NAMENODE_OPTS="-Dcom.sun.management.jmxremote=true -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.port=1026"
+#
+# b) Set garbage collection logs
+# export HADOOP_NAMENODE_OPTS="${HADOOP_GC_SETTINGS} -Xloggc:${HADOOP_LOG_DIR}/gc-rm.log-$(date +'%Y%m%d%H%M')"
+#
+# c) ... or set them directly
+# export HADOOP_NAMENODE_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xloggc:${HADOOP_LOG_DIR}/gc-rm.log-$(date +'%Y%m%d%H%M')"
+
+# this is the default:
+# export HADOOP_NAMENODE_OPTS="-Dhadoop.security.logger=INFO,RFAS -Dhdfs.audit.logger=INFO,NullAppender"
+
+###
+# SecondaryNameNode specific parameters
+###
+# Specify the JVM options to be used when starting the SecondaryNameNode.
+# These options will be appended to the options specified as HADOOP_OPTS
+# and therefore may override any similar flags set in HADOOP_OPTS
+#
+# This is the default:
+# export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=INFO,RFAS -Dhdfs.audit.logger=INFO,NullAppender"
+
+###
+# DataNode specific parameters
+###
+# Specify the JVM options to be used when starting the DataNode.
+# These options will be appended to the options specified as HADOOP_OPTS
+# and therefore may override any similar flags set in HADOOP_OPTS
+#
+# This is the default:
+# export HADOOP_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,RFAS"
+
+# On secure datanodes, user to run the datanode as after dropping privileges
+# This **MUST** be uncommented to enable secure HDFS!
+# export HADOOP_SECURE_DN_USER=hdfs
+
+# Supplemental options for secure datanodes
+# By default, we use jsvc which needs to know to launch a
+# server jvm.
+# export HADOOP_DN_SECURE_EXTRA_OPTS="-jvm server"
+
+# Where datanode log files are stored in the secure data environment.
+# export HADOOP_SECURE_DN_LOG_DIR=${HADOOP_SECURE_LOG_DIR}
+
+# Where datanode pid files are stored in the secure data environment.
+# export HADOOP_SECURE_DN_PID_DIR=${HADOOP_SECURE_PID_DIR}
+
+###
+# NFS3 Gateway specific parameters
+###
+# Specify the JVM options to be used when starting the NFS3 Gateway.
+# These options will be appended to the options specified as HADOOP_OPTS
+# and therefore may override any similar flags set in HADOOP_OPTS
+#
+# export HADOOP_NFS3_OPTS=""
+
+# Specify the JVM options to be used when starting the Hadoop portmapper.
+# These options will be appended to the options specified as HADOOP_OPTS
+# and therefore may override any similar flags set in HADOOP_OPTS
+#
+# export HADOOP_PORTMAP_OPTS="-Xmx512m"
+
+# Supplemental options for priviliged gateways
+# By default, we use jsvc which needs to know to launch a
+# server jvm.
+# export HADOOP_NFS3_SECURE_EXTRA_OPTS="-jvm server"
+
+# On privileged gateways, user to run the gateway as after dropping privileges
+# export HADOOP_PRIVILEGED_NFS_USER=nfsserver
+
+###
+# ZKFailoverController specific parameters
+###
+# Specify the JVM options to be used when starting the ZKFailoverController.
+# These options will be appended to the options specified as HADOOP_OPTS
+# and therefore may override any similar flags set in HADOOP_OPTS
+#
+# export HADOOP_ZKFC_OPTS=""
+
+###
+# QuorumJournalNode specific parameters
+###
+# Specify the JVM options to be used when starting the QuorumJournalNode.
+# These options will be appended to the options specified as HADOOP_OPTS
+# and therefore may override any similar flags set in HADOOP_OPTS
+#
+# export HADOOP_JOURNALNODE_OPTS=""
+
+###
+# HDFS Balancer specific parameters
+###
+# Specify the JVM options to be used when starting the HDFS Balancer.
+# These options will be appended to the options specified as HADOOP_OPTS
+# and therefore may override any similar flags set in HADOOP_OPTS
+#
+# export HADOOP_BALANCER_OPTS=""
+
+###
+# Advanced Users Only!
+###
+
+#
+# When building Hadoop, you can add the class paths to your commands
+# via this special env var:
+# HADOOP_ENABLE_BUILD_PATHS="true"
+
+# You can do things like replace parts of the shell underbelly.
+# Most of this code is in hadoop-functions.sh.
+#
+#
+# For example, if you want to add compression to the rotation
+# menthod for the .out files that daemons generate, you can do
+# that by redefining the hadoop_rotate_log function by
+# uncommenting this code block:
+
+#function hadoop_rotate_log
+#{
+#  #
+#  # log rotation (mainly used for .out files)
+#  # Users are likely to replace this one for something
+#  # that gzips or uses dates or who knows what.
+#  #
+#  # be aware that &1 and &2 might go through here
+#  # so don't do anything too crazy...
+#  #
+#  local log=$1;
+#  local num=${2:-5};
+#
+#  if [[ -f "${log}" ]]; then # rotate logs
+#    while [[ ${num} -gt 1 ]]; do
+#      #shellcheck disable=SC2086
+#      let prev=${num}-1
+#      if [[ -f "${log}.${prev}" ]]; then
+#        mv "${log}.${prev}" "${log}.${num}"
+#      fi
+#      num=${prev}
+#    done
+#    mv "${log}" "${log}.${num}"
+#    gzip -9 "${log}.${num}"
+#  fi
+#}
+#
+#
+# Another example:  finding java
+#
+# By default, Hadoop assumes that $JAVA_HOME is always defined
+# outside of its configuration. Eons ago, Apple standardized
+# on a helper program called java_home to find it for you.
+#
+#function hadoop_java_setup
+#{
+#
+#  if [[ -z "${JAVA_HOME}" ]]; then
+#     case $HADOOP_OS_TYPE in
+#       Darwin*)
+#          JAVA_HOME=$(/usr/libexec/java_home)
+#          ;;
+#     esac
+#  fi
+#
+#  # Bail if we did not detect it
+#  if [[ -z "${JAVA_HOME}" ]]; then
+#    echo "ERROR: JAVA_HOME is not set and could not be found." 1>&2
+#    exit 1
+#  fi
+#
+#  if [[ ! -d "${JAVA_HOME}" ]]; then
+#     echo "ERROR: JAVA_HOME (${JAVA_HOME}) does not exist." 1>&2
+#     exit 1
+#  fi
+#
+#  JAVA="${JAVA_HOME}/bin/java"
+#
+#  if [[ ! -x ${JAVA} ]]; then
+#    echo "ERROR: ${JAVA} is not executable." 1>&2
+#    exit 1
+#  fi
+#  JAVA_HEAP_MAX=-Xmx1g
+#  HADOOP_HEAPSIZE=${HADOOP_HEAPSIZE:-128}
+#
+#  # check envvars which might override default args
+#  if [[ -n "$HADOOP_HEAPSIZE" ]]; then
+#    JAVA_HEAP_MAX="-Xmx${HADOOP_HEAPSIZE}m"
+#  fi
+#}
+
+

+ 73 - 5
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java

@@ -110,8 +110,9 @@ import com.google.common.base.Preconditions;
  *
  *
  * <p>Unless explicitly turned off, Hadoop by default specifies two 
  * <p>Unless explicitly turned off, Hadoop by default specifies two 
  * resources, loaded in-order from the classpath: <ol>
  * resources, loaded in-order from the classpath: <ol>
- * <li><tt><a href="{@docRoot}/../core-default.html">core-default.xml</a>
- * </tt>: Read-only defaults for hadoop.</li>
+ * <li><tt>
+ * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+ * core-default.xml</a></tt>: Read-only defaults for hadoop.</li>
  * <li><tt>core-site.xml</tt>: Site-specific configuration for a given hadoop
  * <li><tt>core-site.xml</tt>: Site-specific configuration for a given hadoop
  * installation.</li>
  * installation.</li>
  * </ol>
  * </ol>
@@ -1781,7 +1782,7 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
   public char[] getPassword(String name) throws IOException {
   public char[] getPassword(String name) throws IOException {
     char[] pass = null;
     char[] pass = null;
 
 
-    pass = getPasswordFromCredenitalProviders(name);
+    pass = getPasswordFromCredentialProviders(name);
 
 
     if (pass == null) {
     if (pass == null) {
       pass = getPasswordFromConfig(name);
       pass = getPasswordFromConfig(name);
@@ -1797,7 +1798,7 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
    * @return password or null if not found
    * @return password or null if not found
    * @throws IOException
    * @throws IOException
    */
    */
-  protected char[] getPasswordFromCredenitalProviders(String name)
+  protected char[] getPasswordFromCredentialProviders(String name)
       throws IOException {
       throws IOException {
     char[] pass = null;
     char[] pass = null;
     try {
     try {
@@ -1843,6 +1844,38 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
     return pass;
     return pass;
   }
   }
 
 
+  /**
+   * Get the socket address for <code>hostProperty</code> as a
+   * <code>InetSocketAddress</code>. If <code>hostProperty</code> is
+   * <code>null</code>, <code>addressProperty</code> will be used. This
+   * is useful for cases where we want to differentiate between host
+   * bind address and address clients should use to establish connection.
+   *
+   * @param hostProperty bind host property name.
+   * @param addressProperty address property name.
+   * @param defaultAddressValue the default value
+   * @param defaultPort the default port
+   * @return InetSocketAddress
+   */
+  public InetSocketAddress getSocketAddr(
+      String hostProperty,
+      String addressProperty,
+      String defaultAddressValue,
+      int defaultPort) {
+
+    InetSocketAddress bindAddr = getSocketAddr(
+      addressProperty, defaultAddressValue, defaultPort);
+
+    final String host = get(hostProperty);
+
+    if (host == null || host.isEmpty()) {
+      return bindAddr;
+    }
+
+    return NetUtils.createSocketAddr(
+        host, bindAddr.getPort(), hostProperty);
+  }
+
   /**
   /**
    * Get the socket address for <code>name</code> property as a
    * Get the socket address for <code>name</code> property as a
    * <code>InetSocketAddress</code>.
    * <code>InetSocketAddress</code>.
@@ -1864,6 +1897,40 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
   public void setSocketAddr(String name, InetSocketAddress addr) {
   public void setSocketAddr(String name, InetSocketAddress addr) {
     set(name, NetUtils.getHostPortString(addr));
     set(name, NetUtils.getHostPortString(addr));
   }
   }
+
+  /**
+   * Set the socket address a client can use to connect for the
+   * <code>name</code> property as a <code>host:port</code>.  The wildcard
+   * address is replaced with the local host's address. If the host and address
+   * properties are configured the host component of the address will be combined
+   * with the port component of the addr to generate the address.  This is to allow
+   * optional control over which host name is used in multi-home bind-host
+   * cases where a host can have multiple names
+   * @param hostProperty the bind-host configuration name
+   * @param addressProperty the service address configuration name
+   * @param defaultAddressValue the service default address configuration value
+   * @param addr InetSocketAddress of the service listener
+   * @return InetSocketAddress for clients to connect
+   */
+  public InetSocketAddress updateConnectAddr(
+      String hostProperty,
+      String addressProperty,
+      String defaultAddressValue,
+      InetSocketAddress addr) {
+
+    final String host = get(hostProperty);
+    final String connectHostPort = getTrimmed(addressProperty, defaultAddressValue);
+
+    if (host == null || host.isEmpty() || connectHostPort == null || connectHostPort.isEmpty()) {
+      //not our case, fall back to original logic
+      return updateConnectAddr(addressProperty, addr);
+    }
+
+    final String connectHost = connectHostPort.split(":")[0];
+    // Create connect address using client address hostname and server port.
+    return updateConnectAddr(addressProperty, NetUtils.createSocketAddrForHost(
+        connectHost, addr.getPort()));
+  }
   
   
   /**
   /**
    * Set the socket address a client can use to connect for the
    * Set the socket address a client can use to connect for the
@@ -2689,7 +2756,8 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
           item.getValue() instanceof String) {
           item.getValue() instanceof String) {
         m = p.matcher((String)item.getKey());
         m = p.matcher((String)item.getKey());
         if(m.find()) { // match
         if(m.find()) { // match
-          result.put((String) item.getKey(), (String) item.getValue());
+          result.put((String) item.getKey(),
+              substituteVars(getProps().getProperty((String) item.getKey())));
         }
         }
       }
       }
     }
     }

+ 2 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurationServlet.java

@@ -200,6 +200,7 @@ public class ReconfigurationServlet extends HttpServlet {
   protected void doGet(HttpServletRequest req, HttpServletResponse resp)
   protected void doGet(HttpServletRequest req, HttpServletResponse resp)
     throws ServletException, IOException {
     throws ServletException, IOException {
     LOG.info("GET");
     LOG.info("GET");
+    resp.setContentType("text/html");
     PrintWriter out = resp.getWriter();
     PrintWriter out = resp.getWriter();
     
     
     Reconfigurable reconf = getReconfigurable(req);
     Reconfigurable reconf = getReconfigurable(req);
@@ -214,6 +215,7 @@ public class ReconfigurationServlet extends HttpServlet {
   protected void doPost(HttpServletRequest req, HttpServletResponse resp)
   protected void doPost(HttpServletRequest req, HttpServletResponse resp)
     throws ServletException, IOException {
     throws ServletException, IOException {
     LOG.info("POST");
     LOG.info("POST");
+    resp.setContentType("text/html");
     PrintWriter out = resp.getWriter();
     PrintWriter out = resp.getWriter();
 
 
     Reconfigurable reconf = getReconfigurable(req);
     Reconfigurable reconf = getReconfigurable(req);

+ 67 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/AesCtrCryptoCodec.java

@@ -0,0 +1,67 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.crypto;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+import com.google.common.base.Preconditions;
+
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public abstract class AesCtrCryptoCodec extends CryptoCodec {
+
+  protected static final CipherSuite SUITE = CipherSuite.AES_CTR_NOPADDING;
+
+  /**
+   * For AES, the algorithm block is fixed size of 128 bits.
+   * @see http://en.wikipedia.org/wiki/Advanced_Encryption_Standard
+   */
+  private static final int AES_BLOCK_SIZE = SUITE.getAlgorithmBlockSize();
+  private static final int CTR_OFFSET = 8;
+
+  @Override
+  public CipherSuite getCipherSuite() {
+    return SUITE;
+  }
+  
+  /**
+   * The IV is produced by adding the initial IV to the counter. IV length 
+   * should be the same as {@link #AES_BLOCK_SIZE}
+   */
+  @Override
+  public void calculateIV(byte[] initIV, long counter, byte[] IV) {
+    Preconditions.checkArgument(initIV.length == AES_BLOCK_SIZE);
+    Preconditions.checkArgument(IV.length == AES_BLOCK_SIZE);
+    
+    System.arraycopy(initIV, 0, IV, 0, CTR_OFFSET);
+    long l = 0;
+    for (int i = 0; i < 8; i++) {
+      l = ((l << 8) | (initIV[CTR_OFFSET + i] & 0xff));
+    }
+    l += counter;
+    IV[CTR_OFFSET + 0] = (byte) (l >>> 56);
+    IV[CTR_OFFSET + 1] = (byte) (l >>> 48);
+    IV[CTR_OFFSET + 2] = (byte) (l >>> 40);
+    IV[CTR_OFFSET + 3] = (byte) (l >>> 32);
+    IV[CTR_OFFSET + 4] = (byte) (l >>> 24);
+    IV[CTR_OFFSET + 5] = (byte) (l >>> 16);
+    IV[CTR_OFFSET + 6] = (byte) (l >>> 8);
+    IV[CTR_OFFSET + 7] = (byte) (l);
+  }
+}

+ 115 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CipherSuite.java

@@ -0,0 +1,115 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.crypto;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+
+/**
+ * Defines properties of a CipherSuite. Modeled after the ciphers in
+ * {@link javax.crypto.Cipher}.
+ */
+@InterfaceAudience.Private
+public enum CipherSuite {
+  UNKNOWN("Unknown", 0),
+  AES_CTR_NOPADDING("AES/CTR/NoPadding", 16);
+
+  private final String name;
+  private final int algoBlockSize;
+
+  private Integer unknownValue = null;
+
+  CipherSuite(String name, int algoBlockSize) {
+    this.name = name;
+    this.algoBlockSize = algoBlockSize;
+  }
+
+  public void setUnknownValue(int unknown) {
+    this.unknownValue = unknown;
+  }
+
+  public int getUnknownValue() {
+    return unknownValue;
+  }
+
+  /**
+   * @return name of cipher suite, as in {@link javax.crypto.Cipher}
+   */
+  public String getName() {
+    return name;
+  }
+
+  /**
+   * @return size of an algorithm block in bytes
+   */
+  public int getAlgorithmBlockSize() {
+    return algoBlockSize;
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder builder = new StringBuilder("{");
+    builder.append("name: " + name);
+    builder.append(", algorithmBlockSize: " + algoBlockSize);
+    if (unknownValue != null) {
+      builder.append(", unknownValue: " + unknownValue);
+    }
+    builder.append("}");
+    return builder.toString();
+  }
+  
+  public static void checkName(String name) {
+    CipherSuite[] suites = CipherSuite.values();
+    for (CipherSuite suite : suites) {
+      if (suite.getName().equals(name)) {
+        return;
+      }
+    }
+    throw new IllegalArgumentException("Invalid cipher suite name: " + name);
+  }
+  
+  /**
+   * Convert to CipherSuite from name, {@link #algoBlockSize} is fixed for
+   * certain cipher suite, just need to compare the name.
+   * @param name cipher suite name
+   * @return CipherSuite cipher suite
+   */
+  public static CipherSuite convert(String name) {
+    CipherSuite[] suites = CipherSuite.values();
+    for (CipherSuite suite : suites) {
+      if (suite.getName().equals(name)) {
+        return suite;
+      }
+    }
+    throw new IllegalArgumentException("Invalid cipher suite name: " + name);
+  }
+  
+  /**
+   * Returns suffix of cipher suite configuration.
+   * @return String configuration suffix
+   */
+  public String getConfigSuffix() {
+    String[] parts = name.split("/");
+    StringBuilder suffix = new StringBuilder();
+    for (String part : parts) {
+      suffix.append(".").append(part.toLowerCase());
+    }
+    
+    return suffix.toString();
+  }
+}

+ 179 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoCodec.java

@@ -0,0 +1,179 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.crypto;
+
+import java.security.GeneralSecurityException;
+import java.util.List;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configurable;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.util.PerformanceAdvisory;
+import org.apache.hadoop.util.ReflectionUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Splitter;
+import com.google.common.collect.Lists;
+
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_CODEC_CLASSES_KEY_PREFIX;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_CIPHER_SUITE_KEY;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_CIPHER_SUITE_DEFAULT;
+
+/**
+ * Crypto codec class, encapsulates encryptor/decryptor pair.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public abstract class CryptoCodec implements Configurable {
+  public static Logger LOG = LoggerFactory.getLogger(CryptoCodec.class);
+  
+  /**
+   * Get crypto codec for specified algorithm/mode/padding.
+   * 
+   * @param conf
+   *          the configuration
+   * @param cipherSuite
+   *          algorithm/mode/padding
+   * @return CryptoCodec the codec object. Null value will be returned if no
+   *         crypto codec classes with cipher suite configured.
+   */
+  public static CryptoCodec getInstance(Configuration conf, 
+      CipherSuite cipherSuite) {
+    List<Class<? extends CryptoCodec>> klasses = getCodecClasses(
+        conf, cipherSuite);
+    if (klasses == null) {
+      return null;
+    }
+    CryptoCodec codec = null;
+    for (Class<? extends CryptoCodec> klass : klasses) {
+      try {
+        CryptoCodec c = ReflectionUtils.newInstance(klass, conf);
+        if (c.getCipherSuite().getName().equals(cipherSuite.getName())) {
+          if (codec == null) {
+            PerformanceAdvisory.LOG.debug("Using crypto codec {}.",
+                klass.getName());
+            codec = c;
+          }
+        } else {
+          PerformanceAdvisory.LOG.debug(
+              "Crypto codec {} doesn't meet the cipher suite {}.",
+              klass.getName(), cipherSuite.getName());
+        }
+      } catch (Exception e) {
+        PerformanceAdvisory.LOG.debug("Crypto codec {} is not available.",
+            klass.getName());
+      }
+    }
+    
+    if (codec != null) {
+      return codec;
+    }
+    
+    throw new RuntimeException("No available crypto codec which meets " + 
+        "the cipher suite " + cipherSuite.getName() + ".");
+  }
+  
+  /**
+   * Get crypto codec for algorithm/mode/padding in config value
+   * hadoop.security.crypto.cipher.suite
+   * 
+   * @param conf
+   *          the configuration
+   * @return CryptoCodec the codec object Null value will be returned if no
+   *         crypto codec classes with cipher suite configured.
+   */
+  public static CryptoCodec getInstance(Configuration conf) {
+    String name = conf.get(HADOOP_SECURITY_CRYPTO_CIPHER_SUITE_KEY, 
+        HADOOP_SECURITY_CRYPTO_CIPHER_SUITE_DEFAULT);
+    return getInstance(conf, CipherSuite.convert(name));
+  }
+  
+  private static List<Class<? extends CryptoCodec>> getCodecClasses(
+      Configuration conf, CipherSuite cipherSuite) {
+    List<Class<? extends CryptoCodec>> result = Lists.newArrayList();
+    String configName = HADOOP_SECURITY_CRYPTO_CODEC_CLASSES_KEY_PREFIX + 
+        cipherSuite.getConfigSuffix();
+    String codecString = conf.get(configName);
+    if (codecString == null) {
+      PerformanceAdvisory.LOG.debug(
+          "No crypto codec classes with cipher suite configured.");
+      return null;
+    }
+    for (String c : Splitter.on(',').trimResults().omitEmptyStrings().
+        split(codecString)) {
+      try {
+        Class<?> cls = conf.getClassByName(c);
+        result.add(cls.asSubclass(CryptoCodec.class));
+      } catch (ClassCastException e) {
+        PerformanceAdvisory.LOG.debug("Class {} is not a CryptoCodec.", c);
+      } catch (ClassNotFoundException e) {
+        PerformanceAdvisory.LOG.debug("Crypto codec {} not found.", c);
+      }
+    }
+    
+    return result;
+  }
+
+  /**
+   * @return the CipherSuite for this codec.
+   */
+  public abstract CipherSuite getCipherSuite();
+
+  /**
+   * Create a {@link org.apache.hadoop.crypto.Encryptor}. 
+   * @return Encryptor the encryptor
+   */
+  public abstract Encryptor createEncryptor() throws GeneralSecurityException;
+  
+  /**
+   * Create a {@link org.apache.hadoop.crypto.Decryptor}.
+   * @return Decryptor the decryptor
+   */
+  public abstract Decryptor createDecryptor() throws GeneralSecurityException;
+  
+  /**
+   * This interface is only for Counter (CTR) mode. Generally the Encryptor
+   * or Decryptor calculates the IV and maintain encryption context internally. 
+   * For example a {@link javax.crypto.Cipher} will maintain its encryption 
+   * context internally when we do encryption/decryption using the 
+   * Cipher#update interface. 
+   * <p/>
+   * Encryption/Decryption is not always on the entire file. For example,
+   * in Hadoop, a node may only decrypt a portion of a file (i.e. a split).
+   * In these situations, the counter is derived from the file position.
+   * <p/>
+   * The IV can be calculated by combining the initial IV and the counter with 
+   * a lossless operation (concatenation, addition, or XOR).
+   * @see http://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#Counter_.28CTR.29
+   * 
+   * @param initIV initial IV
+   * @param counter counter for input stream position 
+   * @param IV the IV for input stream position
+   */
+  public abstract void calculateIV(byte[] initIV, long counter, byte[] IV);
+  
+  /**
+   * Generate a number of secure, random bytes suitable for cryptographic use.
+   * This method needs to be thread-safe.
+   *
+   * @param bytes byte array to populate with random data
+   */
+  public abstract void generateSecureRandom(byte[] bytes);
+}

+ 680 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java

@@ -0,0 +1,680 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.crypto;
+
+import java.io.FileDescriptor;
+import java.io.FileInputStream;
+import java.io.FilterInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.nio.ByteBuffer;
+import java.security.GeneralSecurityException;
+import java.util.EnumSet;
+import java.util.Queue;
+import java.util.concurrent.ConcurrentLinkedQueue;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.ByteBufferReadable;
+import org.apache.hadoop.fs.CanSetDropBehind;
+import org.apache.hadoop.fs.CanSetReadahead;
+import org.apache.hadoop.fs.HasEnhancedByteBufferAccess;
+import org.apache.hadoop.fs.HasFileDescriptor;
+import org.apache.hadoop.fs.PositionedReadable;
+import org.apache.hadoop.fs.ReadOption;
+import org.apache.hadoop.fs.Seekable;
+import org.apache.hadoop.io.ByteBufferPool;
+
+import com.google.common.base.Preconditions;
+
+/**
+ * CryptoInputStream decrypts data. It is not thread-safe. AES CTR mode is
+ * required in order to ensure that the plain text and cipher text have a 1:1
+ * mapping. The decryption is buffer based. The key points of the decryption
+ * are (1) calculating the counter and (2) padding through stream position:
+ * <p/>
+ * counter = base + pos/(algorithm blocksize); 
+ * padding = pos%(algorithm blocksize); 
+ * <p/>
+ * The underlying stream offset is maintained as state.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class CryptoInputStream extends FilterInputStream implements 
+    Seekable, PositionedReadable, ByteBufferReadable, HasFileDescriptor, 
+    CanSetDropBehind, CanSetReadahead, HasEnhancedByteBufferAccess {
+  private static final byte[] oneByteBuf = new byte[1];
+  private final CryptoCodec codec;
+  private final Decryptor decryptor;
+  private final int bufferSize;
+  
+  /**
+   * Input data buffer. The data starts at inBuffer.position() and ends at 
+   * to inBuffer.limit().
+   */
+  private ByteBuffer inBuffer;
+  
+  /**
+   * The decrypted data buffer. The data starts at outBuffer.position() and 
+   * ends at outBuffer.limit();
+   */
+  private ByteBuffer outBuffer;
+  private long streamOffset = 0; // Underlying stream offset.
+  
+  /**
+   * Whether the underlying stream supports 
+   * {@link org.apache.hadoop.fs.ByteBufferReadable}
+   */
+  private Boolean usingByteBufferRead = null;
+  
+  /**
+   * Padding = pos%(algorithm blocksize); Padding is put into {@link #inBuffer} 
+   * before any other data goes in. The purpose of padding is to put the input 
+   * data at proper position.
+   */
+  private byte padding;
+  private boolean closed;
+  private final byte[] key;
+  private final byte[] initIV;
+  private byte[] iv;
+  
+  /** DirectBuffer pool */
+  private final Queue<ByteBuffer> bufferPool = 
+      new ConcurrentLinkedQueue<ByteBuffer>();
+  /** Decryptor pool */
+  private final Queue<Decryptor> decryptorPool = 
+      new ConcurrentLinkedQueue<Decryptor>();
+  
+  public CryptoInputStream(InputStream in, CryptoCodec codec, 
+      int bufferSize, byte[] key, byte[] iv) throws IOException {
+    this(in, codec, bufferSize, key, iv, 
+        CryptoStreamUtils.getInputStreamOffset(in));
+  }
+  
+  public CryptoInputStream(InputStream in, CryptoCodec codec,
+      int bufferSize, byte[] key, byte[] iv, long streamOffset) throws IOException {
+    super(in);
+    this.bufferSize = CryptoStreamUtils.checkBufferSize(codec, bufferSize);
+    this.codec = codec;
+    this.key = key.clone();
+    this.initIV = iv.clone();
+    this.iv = iv.clone();
+    this.streamOffset = streamOffset;
+    inBuffer = ByteBuffer.allocateDirect(this.bufferSize);
+    outBuffer = ByteBuffer.allocateDirect(this.bufferSize);
+    decryptor = getDecryptor();
+    resetStreamOffset(streamOffset);
+  }
+  
+  public CryptoInputStream(InputStream in, CryptoCodec codec,
+      byte[] key, byte[] iv) throws IOException {
+    this(in, codec, CryptoStreamUtils.getBufferSize(codec.getConf()), key, iv);
+  }
+  
+  public InputStream getWrappedStream() {
+    return in;
+  }
+  
+  /**
+   * Decryption is buffer based.
+   * If there is data in {@link #outBuffer}, then read it out of this buffer.
+   * If there is no data in {@link #outBuffer}, then read more from the 
+   * underlying stream and do the decryption.
+   * @param b the buffer into which the decrypted data is read.
+   * @param off the buffer offset.
+   * @param len the maximum number of decrypted data bytes to read.
+   * @return int the total number of decrypted data bytes read into the buffer.
+   * @throws IOException
+   */
+  @Override
+  public int read(byte[] b, int off, int len) throws IOException {
+    checkStream();
+    if (b == null) {
+      throw new NullPointerException();
+    } else if (off < 0 || len < 0 || len > b.length - off) {
+      throw new IndexOutOfBoundsException();
+    } else if (len == 0) {
+      return 0;
+    }
+    
+    final int remaining = outBuffer.remaining();
+    if (remaining > 0) {
+      int n = Math.min(len, remaining);
+      outBuffer.get(b, off, n);
+      return n;
+    } else {
+      int n = 0;
+      
+      /*
+       * Check whether the underlying stream is {@link ByteBufferReadable},
+       * it can avoid bytes copy.
+       */
+      if (usingByteBufferRead == null) {
+        if (in instanceof ByteBufferReadable) {
+          try {
+            n = ((ByteBufferReadable) in).read(inBuffer);
+            usingByteBufferRead = Boolean.TRUE;
+          } catch (UnsupportedOperationException e) {
+            usingByteBufferRead = Boolean.FALSE;
+          }
+        } else {
+          usingByteBufferRead = Boolean.FALSE;
+        }
+        if (!usingByteBufferRead) {
+          n = readFromUnderlyingStream(inBuffer);
+        }
+      } else {
+        if (usingByteBufferRead) {
+          n = ((ByteBufferReadable) in).read(inBuffer);
+        } else {
+          n = readFromUnderlyingStream(inBuffer);
+        }
+      }
+      if (n <= 0) {
+        return n;
+      }
+      
+      streamOffset += n; // Read n bytes
+      decrypt(decryptor, inBuffer, outBuffer, padding);
+      padding = afterDecryption(decryptor, inBuffer, streamOffset, iv);
+      n = Math.min(len, outBuffer.remaining());
+      outBuffer.get(b, off, n);
+      return n;
+    }
+  }
+  
+  /** Read data from underlying stream. */
+  private int readFromUnderlyingStream(ByteBuffer inBuffer) throws IOException {
+    final int toRead = inBuffer.remaining();
+    final byte[] tmp = getTmpBuf();
+    final int n = in.read(tmp, 0, toRead);
+    if (n > 0) {
+      inBuffer.put(tmp, 0, n);
+    }
+    return n;
+  }
+  
+  private byte[] tmpBuf;
+  private byte[] getTmpBuf() {
+    if (tmpBuf == null) {
+      tmpBuf = new byte[bufferSize];
+    }
+    return tmpBuf;
+  }
+  
+  /**
+   * Do the decryption using inBuffer as input and outBuffer as output.
+   * Upon return, inBuffer is cleared; the decrypted data starts at 
+   * outBuffer.position() and ends at outBuffer.limit();
+   */
+  private void decrypt(Decryptor decryptor, ByteBuffer inBuffer, 
+      ByteBuffer outBuffer, byte padding) throws IOException {
+    Preconditions.checkState(inBuffer.position() >= padding);
+    if(inBuffer.position() == padding) {
+      // There is no real data in inBuffer.
+      return;
+    }
+    inBuffer.flip();
+    outBuffer.clear();
+    decryptor.decrypt(inBuffer, outBuffer);
+    inBuffer.clear();
+    outBuffer.flip();
+    if (padding > 0) {
+      /*
+       * The plain text and cipher text have a 1:1 mapping, they start at the 
+       * same position.
+       */
+      outBuffer.position(padding);
+    }
+  }
+  
+  /**
+   * This method is executed immediately after decryption. Check whether 
+   * decryptor should be updated and recalculate padding if needed. 
+   */
+  private byte afterDecryption(Decryptor decryptor, ByteBuffer inBuffer, 
+      long position, byte[] iv) throws IOException {
+    byte padding = 0;
+    if (decryptor.isContextReset()) {
+      /*
+       * This code is generally not executed since the decryptor usually 
+       * maintains decryption context (e.g. the counter) internally. However, 
+       * some implementations can't maintain context so a re-init is necessary 
+       * after each decryption call.
+       */
+      updateDecryptor(decryptor, position, iv);
+      padding = getPadding(position);
+      inBuffer.position(padding);
+    }
+    return padding;
+  }
+  
+  private long getCounter(long position) {
+    return position / codec.getCipherSuite().getAlgorithmBlockSize();
+  }
+  
+  private byte getPadding(long position) {
+    return (byte)(position % codec.getCipherSuite().getAlgorithmBlockSize());
+  }
+  
+  /** Calculate the counter and iv, update the decryptor. */
+  private void updateDecryptor(Decryptor decryptor, long position, byte[] iv) 
+      throws IOException {
+    final long counter = getCounter(position);
+    codec.calculateIV(initIV, counter, iv);
+    decryptor.init(key, iv);
+  }
+  
+  /**
+   * Reset the underlying stream offset; clear {@link #inBuffer} and 
+   * {@link #outBuffer}. This Typically happens during {@link #seek(long)} 
+   * or {@link #skip(long)}.
+   */
+  private void resetStreamOffset(long offset) throws IOException {
+    streamOffset = offset;
+    inBuffer.clear();
+    outBuffer.clear();
+    outBuffer.limit(0);
+    updateDecryptor(decryptor, offset, iv);
+    padding = getPadding(offset);
+    inBuffer.position(padding); // Set proper position for input data.
+  }
+  
+  @Override
+  public void close() throws IOException {
+    if (closed) {
+      return;
+    }
+    
+    super.close();
+    freeBuffers();
+    closed = true;
+  }
+  
+  /** Positioned read. It is thread-safe */
+  @Override
+  public int read(long position, byte[] buffer, int offset, int length)
+      throws IOException {
+    checkStream();
+    try {
+      final int n = ((PositionedReadable) in).read(position, buffer, offset, 
+          length);
+      if (n > 0) {
+        // This operation does not change the current offset of the file
+        decrypt(position, buffer, offset, n);
+      }
+      
+      return n;
+    } catch (ClassCastException e) {
+      throw new UnsupportedOperationException("This stream does not support " +
+          "positioned read.");
+    }
+  }
+  
+  /**
+   * Decrypt length bytes in buffer starting at offset. Output is also put 
+   * into buffer starting at offset. It is thread-safe.
+   */
+  private void decrypt(long position, byte[] buffer, int offset, int length) 
+      throws IOException {
+    ByteBuffer inBuffer = getBuffer();
+    ByteBuffer outBuffer = getBuffer();
+    Decryptor decryptor = null;
+    try {
+      decryptor = getDecryptor();
+      byte[] iv = initIV.clone();
+      updateDecryptor(decryptor, position, iv);
+      byte padding = getPadding(position);
+      inBuffer.position(padding); // Set proper position for input data.
+      
+      int n = 0;
+      while (n < length) {
+        int toDecrypt = Math.min(length - n, inBuffer.remaining());
+        inBuffer.put(buffer, offset + n, toDecrypt);
+        // Do decryption
+        decrypt(decryptor, inBuffer, outBuffer, padding);
+        
+        outBuffer.get(buffer, offset + n, toDecrypt);
+        n += toDecrypt;
+        padding = afterDecryption(decryptor, inBuffer, position + n, iv);
+      }
+    } finally {
+      returnBuffer(inBuffer);
+      returnBuffer(outBuffer);
+      returnDecryptor(decryptor);
+    }
+  }
+  
+  /** Positioned read fully. It is thread-safe */
+  @Override
+  public void readFully(long position, byte[] buffer, int offset, int length)
+      throws IOException {
+    checkStream();
+    try {
+      ((PositionedReadable) in).readFully(position, buffer, offset, length);
+      if (length > 0) {
+        // This operation does not change the current offset of the file
+        decrypt(position, buffer, offset, length);
+      }
+    } catch (ClassCastException e) {
+      throw new UnsupportedOperationException("This stream does not support " +
+          "positioned readFully.");
+    }
+  }
+
+  @Override
+  public void readFully(long position, byte[] buffer) throws IOException {
+    readFully(position, buffer, 0, buffer.length);
+  }
+
+  /** Seek to a position. */
+  @Override
+  public void seek(long pos) throws IOException {
+    Preconditions.checkArgument(pos >= 0, "Cannot seek to negative offset.");
+    checkStream();
+    try {
+      /*
+       * If data of target pos in the underlying stream has already been read 
+       * and decrypted in outBuffer, we just need to re-position outBuffer.
+       */
+      if (pos <= streamOffset && pos >= (streamOffset - outBuffer.remaining())) {
+        int forward = (int) (pos - (streamOffset - outBuffer.remaining()));
+        if (forward > 0) {
+          outBuffer.position(outBuffer.position() + forward);
+        }
+      } else {
+        ((Seekable) in).seek(pos);
+        resetStreamOffset(pos);
+      }
+    } catch (ClassCastException e) {
+      throw new UnsupportedOperationException("This stream does not support " +
+          "seek.");
+    }
+  }
+  
+  /** Skip n bytes */
+  @Override
+  public long skip(long n) throws IOException {
+    Preconditions.checkArgument(n >= 0, "Negative skip length.");
+    checkStream();
+    
+    if (n == 0) {
+      return 0;
+    } else if (n <= outBuffer.remaining()) {
+      int pos = outBuffer.position() + (int) n;
+      outBuffer.position(pos);
+      return n;
+    } else {
+      /*
+       * Subtract outBuffer.remaining() to see how many bytes we need to 
+       * skip in the underlying stream. Add outBuffer.remaining() to the 
+       * actual number of skipped bytes in the underlying stream to get the 
+       * number of skipped bytes from the user's point of view.
+       */
+      n -= outBuffer.remaining();
+      long skipped = in.skip(n);
+      if (skipped < 0) {
+        skipped = 0;
+      }
+      long pos = streamOffset + skipped;
+      skipped += outBuffer.remaining();
+      resetStreamOffset(pos);
+      return skipped;
+    }
+  }
+
+  /** Get underlying stream position. */
+  @Override
+  public long getPos() throws IOException {
+    checkStream();
+    // Equals: ((Seekable) in).getPos() - outBuffer.remaining()
+    return streamOffset - outBuffer.remaining();
+  }
+  
+  /** ByteBuffer read. */
+  @Override
+  public int read(ByteBuffer buf) throws IOException {
+    checkStream();
+    if (in instanceof ByteBufferReadable) {
+      final int unread = outBuffer.remaining();
+      if (unread > 0) { // Have unread decrypted data in buffer.
+        int toRead = buf.remaining();
+        if (toRead <= unread) {
+          final int limit = outBuffer.limit();
+          outBuffer.limit(outBuffer.position() + toRead);
+          buf.put(outBuffer);
+          outBuffer.limit(limit);
+          return toRead;
+        } else {
+          buf.put(outBuffer);
+        }
+      }
+      
+      final int pos = buf.position();
+      final int n = ((ByteBufferReadable) in).read(buf);
+      if (n > 0) {
+        streamOffset += n; // Read n bytes
+        decrypt(buf, n, pos);
+      }
+      return n;
+    }
+
+    throw new UnsupportedOperationException("ByteBuffer read unsupported " +
+        "by input stream.");
+  }
+  
+  /**
+   * Decrypt all data in buf: total n bytes from given start position.
+   * Output is also buf and same start position.
+   * buf.position() and buf.limit() should be unchanged after decryption.
+   */
+  private void decrypt(ByteBuffer buf, int n, int start) 
+      throws IOException {
+    final int pos = buf.position();
+    final int limit = buf.limit();
+    int len = 0;
+    while (len < n) {
+      buf.position(start + len);
+      buf.limit(start + len + Math.min(n - len, inBuffer.remaining()));
+      inBuffer.put(buf);
+      // Do decryption
+      try {
+        decrypt(decryptor, inBuffer, outBuffer, padding);
+        buf.position(start + len);
+        buf.limit(limit);
+        len += outBuffer.remaining();
+        buf.put(outBuffer);
+      } finally {
+        padding = afterDecryption(decryptor, inBuffer, streamOffset - (n - len), iv);
+      }
+    }
+    buf.position(pos);
+  }
+  
+  @Override
+  public int available() throws IOException {
+    checkStream();
+    
+    return in.available() + outBuffer.remaining();
+  }
+
+  @Override
+  public boolean markSupported() {
+    return false;
+  }
+  
+  @Override
+  public void mark(int readLimit) {
+  }
+  
+  @Override
+  public void reset() throws IOException {
+    throw new IOException("Mark/reset not supported");
+  }
+
+  @Override
+  public boolean seekToNewSource(long targetPos) throws IOException {
+    Preconditions.checkArgument(targetPos >= 0, 
+        "Cannot seek to negative offset.");
+    checkStream();
+    try {
+      boolean result = ((Seekable) in).seekToNewSource(targetPos);
+      resetStreamOffset(targetPos);
+      return result;
+    } catch (ClassCastException e) {
+      throw new UnsupportedOperationException("This stream does not support " +
+          "seekToNewSource.");
+    }
+  }
+
+  @Override
+  public ByteBuffer read(ByteBufferPool bufferPool, int maxLength,
+      EnumSet<ReadOption> opts) throws IOException,
+      UnsupportedOperationException {
+    checkStream();
+    try {
+      if (outBuffer.remaining() > 0) {
+        // Have some decrypted data unread, need to reset.
+        ((Seekable) in).seek(getPos());
+        resetStreamOffset(getPos());
+      }
+      final ByteBuffer buffer = ((HasEnhancedByteBufferAccess) in).
+          read(bufferPool, maxLength, opts);
+      if (buffer != null) {
+        final int n = buffer.remaining();
+        if (n > 0) {
+          streamOffset += buffer.remaining(); // Read n bytes
+          final int pos = buffer.position();
+          decrypt(buffer, n, pos);
+        }
+      }
+      return buffer;
+    } catch (ClassCastException e) {
+      throw new UnsupportedOperationException("This stream does not support " + 
+          "enhanced byte buffer access.");
+    }
+  }
+
+  @Override
+  public void releaseBuffer(ByteBuffer buffer) {
+    try {
+      ((HasEnhancedByteBufferAccess) in).releaseBuffer(buffer);
+    } catch (ClassCastException e) {
+      throw new UnsupportedOperationException("This stream does not support " + 
+          "release buffer.");
+    }
+  }
+
+  @Override
+  public void setReadahead(Long readahead) throws IOException,
+      UnsupportedOperationException {
+    try {
+      ((CanSetReadahead) in).setReadahead(readahead);
+    } catch (ClassCastException e) {
+      throw new UnsupportedOperationException("This stream does not support " +
+          "setting the readahead caching strategy.");
+    }
+  }
+
+  @Override
+  public void setDropBehind(Boolean dropCache) throws IOException,
+      UnsupportedOperationException {
+    try {
+      ((CanSetDropBehind) in).setDropBehind(dropCache);
+    } catch (ClassCastException e) {
+      throw new UnsupportedOperationException("This stream does not " +
+          "support setting the drop-behind caching setting.");
+    }
+  }
+
+  @Override
+  public FileDescriptor getFileDescriptor() throws IOException {
+    if (in instanceof HasFileDescriptor) {
+      return ((HasFileDescriptor) in).getFileDescriptor();
+    } else if (in instanceof FileInputStream) {
+      return ((FileInputStream) in).getFD();
+    } else {
+      return null;
+    }
+  }
+  
+  @Override
+  public int read() throws IOException {
+    return (read(oneByteBuf, 0, 1) == -1) ? -1 : (oneByteBuf[0] & 0xff);
+  }
+  
+  private void checkStream() throws IOException {
+    if (closed) {
+      throw new IOException("Stream closed");
+    }
+  }
+  
+  /** Get direct buffer from pool */
+  private ByteBuffer getBuffer() {
+    ByteBuffer buffer = bufferPool.poll();
+    if (buffer == null) {
+      buffer = ByteBuffer.allocateDirect(bufferSize);
+    }
+    
+    return buffer;
+  }
+  
+  /** Return direct buffer to pool */
+  private void returnBuffer(ByteBuffer buf) {
+    if (buf != null) {
+      buf.clear();
+      bufferPool.add(buf);
+    }
+  }
+  
+  /** Forcibly free the direct buffers. */
+  private void freeBuffers() {
+    CryptoStreamUtils.freeDB(inBuffer);
+    CryptoStreamUtils.freeDB(outBuffer);
+    cleanBufferPool();
+  }
+  
+  /** Clean direct buffer pool */
+  private void cleanBufferPool() {
+    ByteBuffer buf;
+    while ((buf = bufferPool.poll()) != null) {
+      CryptoStreamUtils.freeDB(buf);
+    }
+  }
+  
+  /** Get decryptor from pool */
+  private Decryptor getDecryptor() throws IOException {
+    Decryptor decryptor = decryptorPool.poll();
+    if (decryptor == null) {
+      try {
+        decryptor = codec.createDecryptor();
+      } catch (GeneralSecurityException e) {
+        throw new IOException(e);
+      }
+    }
+    
+    return decryptor;
+  }
+  
+  /** Return decryptor to pool */
+  private void returnDecryptor(Decryptor decryptor) {
+    if (decryptor != null) {
+      decryptorPool.add(decryptor);
+    }
+  }
+}

+ 280 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java

@@ -0,0 +1,280 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.crypto;
+
+import java.io.FilterOutputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.nio.ByteBuffer;
+import java.security.GeneralSecurityException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.CanSetDropBehind;
+import org.apache.hadoop.fs.Syncable;
+
+import com.google.common.base.Preconditions;
+
+/**
+ * CryptoOutputStream encrypts data. It is not thread-safe. AES CTR mode is
+ * required in order to ensure that the plain text and cipher text have a 1:1
+ * mapping. The encryption is buffer based. The key points of the encryption are
+ * (1) calculating counter and (2) padding through stream position.
+ * <p/>
+ * counter = base + pos/(algorithm blocksize); 
+ * padding = pos%(algorithm blocksize); 
+ * <p/>
+ * The underlying stream offset is maintained as state.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class CryptoOutputStream extends FilterOutputStream implements 
+    Syncable, CanSetDropBehind {
+  private static final byte[] oneByteBuf = new byte[1];
+  private final CryptoCodec codec;
+  private final Encryptor encryptor;
+  private final int bufferSize;
+  
+  /**
+   * Input data buffer. The data starts at inBuffer.position() and ends at 
+   * inBuffer.limit().
+   */
+  private ByteBuffer inBuffer;
+  
+  /**
+   * Encrypted data buffer. The data starts at outBuffer.position() and ends at 
+   * outBuffer.limit();
+   */
+  private ByteBuffer outBuffer;
+  private long streamOffset = 0; // Underlying stream offset.
+  
+  /**
+   * Padding = pos%(algorithm blocksize); Padding is put into {@link #inBuffer} 
+   * before any other data goes in. The purpose of padding is to put input data
+   * at proper position.
+   */
+  private byte padding;
+  private boolean closed;
+  private final byte[] key;
+  private final byte[] initIV;
+  private byte[] iv;
+  
+  public CryptoOutputStream(OutputStream out, CryptoCodec codec, 
+      int bufferSize, byte[] key, byte[] iv) throws IOException {
+    this(out, codec, bufferSize, key, iv, 0);
+  }
+  
+  public CryptoOutputStream(OutputStream out, CryptoCodec codec, 
+      int bufferSize, byte[] key, byte[] iv, long streamOffset) 
+      throws IOException {
+    super(out);
+    this.bufferSize = CryptoStreamUtils.checkBufferSize(codec, bufferSize);
+    this.codec = codec;
+    this.key = key.clone();
+    this.initIV = iv.clone();
+    this.iv = iv.clone();
+    inBuffer = ByteBuffer.allocateDirect(this.bufferSize);
+    outBuffer = ByteBuffer.allocateDirect(this.bufferSize);
+    this.streamOffset = streamOffset;
+    try {
+      encryptor = codec.createEncryptor();
+    } catch (GeneralSecurityException e) {
+      throw new IOException(e);
+    }
+    updateEncryptor();
+  }
+  
+  public CryptoOutputStream(OutputStream out, CryptoCodec codec, 
+      byte[] key, byte[] iv) throws IOException {
+    this(out, codec, key, iv, 0);
+  }
+  
+  public CryptoOutputStream(OutputStream out, CryptoCodec codec, 
+      byte[] key, byte[] iv, long streamOffset) throws IOException {
+    this(out, codec, CryptoStreamUtils.getBufferSize(codec.getConf()), 
+        key, iv, streamOffset);
+  }
+  
+  public OutputStream getWrappedStream() {
+    return out;
+  }
+  
+  /**
+   * Encryption is buffer based.
+   * If there is enough room in {@link #inBuffer}, then write to this buffer.
+   * If {@link #inBuffer} is full, then do encryption and write data to the
+   * underlying stream.
+   * @param b the data.
+   * @param off the start offset in the data.
+   * @param len the number of bytes to write.
+   * @throws IOException
+   */
+  @Override
+  public void write(byte[] b, int off, int len) throws IOException {
+    checkStream();
+    if (b == null) {
+      throw new NullPointerException();
+    } else if (off < 0 || len < 0 || off > b.length || 
+        len > b.length - off) {
+      throw new IndexOutOfBoundsException();
+    }
+    while (len > 0) {
+      final int remaining = inBuffer.remaining();
+      if (len < remaining) {
+        inBuffer.put(b, off, len);
+        len = 0;
+      } else {
+        inBuffer.put(b, off, remaining);
+        off += remaining;
+        len -= remaining;
+        encrypt();
+      }
+    }
+  }
+  
+  /**
+   * Do the encryption, input is {@link #inBuffer} and output is 
+   * {@link #outBuffer}.
+   */
+  private void encrypt() throws IOException {
+    Preconditions.checkState(inBuffer.position() >= padding);
+    if (inBuffer.position() == padding) {
+      // There is no real data in the inBuffer.
+      return;
+    }
+    inBuffer.flip();
+    outBuffer.clear();
+    encryptor.encrypt(inBuffer, outBuffer);
+    inBuffer.clear();
+    outBuffer.flip();
+    if (padding > 0) {
+      /*
+       * The plain text and cipher text have a 1:1 mapping, they start at the 
+       * same position.
+       */
+      outBuffer.position(padding);
+      padding = 0;
+    }
+    final int len = outBuffer.remaining();
+    
+    /*
+     * If underlying stream supports {@link ByteBuffer} write in future, needs
+     * refine here. 
+     */
+    final byte[] tmp = getTmpBuf();
+    outBuffer.get(tmp, 0, len);
+    out.write(tmp, 0, len);
+    
+    streamOffset += len;
+    if (encryptor.isContextReset()) {
+      /*
+       * This code is generally not executed since the encryptor usually
+       * maintains encryption context (e.g. the counter) internally. However,
+       * some implementations can't maintain context so a re-init is necessary
+       * after each encryption call.
+       */
+      updateEncryptor();
+    }
+  }
+  
+  /** Update the {@link #encryptor}: calculate counter and {@link #padding}. */
+  private void updateEncryptor() throws IOException {
+    final long counter =
+        streamOffset / codec.getCipherSuite().getAlgorithmBlockSize();
+    padding =
+        (byte)(streamOffset % codec.getCipherSuite().getAlgorithmBlockSize());
+    inBuffer.position(padding); // Set proper position for input data.
+    codec.calculateIV(initIV, counter, iv);
+    encryptor.init(key, iv);
+  }
+  
+  private byte[] tmpBuf;
+  private byte[] getTmpBuf() {
+    if (tmpBuf == null) {
+      tmpBuf = new byte[bufferSize];
+    }
+    return tmpBuf;
+  }
+  
+  @Override
+  public void close() throws IOException {
+    if (closed) {
+      return;
+    }
+    
+    super.close();
+    freeBuffers();
+    closed = true;
+  }
+  
+  /**
+   * To flush, we need to encrypt the data in the buffer and write to the 
+   * underlying stream, then do the flush.
+   */
+  @Override
+  public void flush() throws IOException {
+    checkStream();
+    encrypt();
+    super.flush();
+  }
+  
+  @Override
+  public void write(int b) throws IOException {
+    oneByteBuf[0] = (byte)(b & 0xff);
+    write(oneByteBuf, 0, oneByteBuf.length);
+  }
+  
+  private void checkStream() throws IOException {
+    if (closed) {
+      throw new IOException("Stream closed");
+    }
+  }
+  
+  @Override
+  public void setDropBehind(Boolean dropCache) throws IOException,
+      UnsupportedOperationException {
+    try {
+      ((CanSetDropBehind) out).setDropBehind(dropCache);
+    } catch (ClassCastException e) {
+      throw new UnsupportedOperationException("This stream does not " +
+          "support setting the drop-behind caching.");
+    }
+  }
+
+  @Override
+  public void hflush() throws IOException {
+    flush();
+    if (out instanceof Syncable) {
+      ((Syncable)out).hflush();
+    }
+  }
+
+  @Override
+  public void hsync() throws IOException {
+    flush();
+    if (out instanceof Syncable) {
+      ((Syncable)out).hsync();
+    }
+  }
+  
+  /** Forcibly free the direct buffers. */
+  private void freeBuffers() {
+    CryptoStreamUtils.freeDB(inBuffer);
+    CryptoStreamUtils.freeDB(outBuffer);
+  }
+}

+ 70 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoStreamUtils.java

@@ -0,0 +1,70 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.crypto;
+
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_BUFFER_SIZE_DEFAULT;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_BUFFER_SIZE_KEY;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.nio.ByteBuffer;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Seekable;
+
+import com.google.common.base.Preconditions;
+
+@InterfaceAudience.Private
+public class CryptoStreamUtils {
+  private static final int MIN_BUFFER_SIZE = 512;
+  
+  /** Forcibly free the direct buffer. */
+  public static void freeDB(ByteBuffer buffer) {
+    if (buffer instanceof sun.nio.ch.DirectBuffer) {
+      final sun.misc.Cleaner bufferCleaner =
+          ((sun.nio.ch.DirectBuffer) buffer).cleaner();
+      bufferCleaner.clean();
+    }
+  }
+  
+  /** Read crypto buffer size */
+  public static int getBufferSize(Configuration conf) {
+    return conf.getInt(HADOOP_SECURITY_CRYPTO_BUFFER_SIZE_KEY, 
+        HADOOP_SECURITY_CRYPTO_BUFFER_SIZE_DEFAULT);
+  }
+  
+  /** Check and floor buffer size */
+  public static int checkBufferSize(CryptoCodec codec, int bufferSize) {
+    Preconditions.checkArgument(bufferSize >= MIN_BUFFER_SIZE, 
+        "Minimum value of buffer size is " + MIN_BUFFER_SIZE + ".");
+    return bufferSize - bufferSize % codec.getCipherSuite()
+        .getAlgorithmBlockSize();
+  }
+  
+  /**
+   * If input stream is {@link org.apache.hadoop.fs.Seekable}, return it's
+   * current position, otherwise return 0;
+   */
+  public static long getInputStreamOffset(InputStream in) throws IOException {
+    if (in instanceof Seekable) {
+      return ((Seekable) in).getPos();
+    }
+    return 0;
+  }
+}

+ 72 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/Decryptor.java

@@ -0,0 +1,72 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.crypto;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public interface Decryptor {
+  
+  /**
+   * Initialize the decryptor and the internal decryption context. 
+   * reset.
+   * @param key decryption key.
+   * @param iv decryption initialization vector
+   * @throws IOException if initialization fails
+   */
+  public void init(byte[] key, byte[] iv) throws IOException;
+  
+  /**
+   * Indicate whether the decryption context is reset.
+   * <p/>
+   * Certain modes, like CTR, require a different IV depending on the 
+   * position in the stream. Generally, the decryptor maintains any necessary
+   * context for calculating the IV and counter so that no reinit is necessary 
+   * during the decryption. Reinit before each operation is inefficient.
+   * @return boolean whether context is reset.
+   */
+  public boolean isContextReset();
+  
+  /**
+   * This presents a direct interface decrypting with direct ByteBuffers.
+   * <p/>
+   * This function does not always decrypt the entire buffer and may potentially
+   * need to be called multiple times to process an entire buffer. The object 
+   * may hold the decryption context internally.
+   * <p/>
+   * Some implementations may require sufficient space in the destination 
+   * buffer to decrypt the entire input buffer.
+   * <p/>
+   * Upon return, inBuffer.position() will be advanced by the number of bytes
+   * read and outBuffer.position() by bytes written. Implementations should 
+   * not modify inBuffer.limit() and outBuffer.limit().
+   * <p/>
+   * @param inBuffer a direct {@link ByteBuffer} to read from. inBuffer may 
+   * not be null and inBuffer.remaining() must be > 0
+   * @param outBuffer a direct {@link ByteBuffer} to write to. outBuffer may 
+   * not be null and outBuffer.remaining() must be > 0
+   * @throws IOException if decryption fails
+   */
+  public void decrypt(ByteBuffer inBuffer, ByteBuffer outBuffer) 
+      throws IOException;
+}

+ 71 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/Encryptor.java

@@ -0,0 +1,71 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.crypto;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public interface Encryptor {
+  
+  /**
+   * Initialize the encryptor and the internal encryption context.
+   * @param key encryption key.
+   * @param iv encryption initialization vector
+   * @throws IOException if initialization fails
+   */
+  public void init(byte[] key, byte[] iv) throws IOException;
+  
+  /**
+   * Indicate whether the encryption context is reset.
+   * <p/>
+   * Certain modes, like CTR, require a different IV depending on the
+   * position in the stream. Generally, the encryptor maintains any necessary
+   * context for calculating the IV and counter so that no reinit is necessary
+   * during the encryption. Reinit before each operation is inefficient. 
+   * @return boolean whether context is reset.
+   */
+  public boolean isContextReset();
+  
+  /**
+   * This presents a direct interface encrypting with direct ByteBuffers.
+   * <p/>
+   * This function does not always encrypt the entire buffer and may potentially
+   * need to be called multiple times to process an entire buffer. The object 
+   * may hold the encryption context internally.
+   * <p/>
+   * Some implementations may require sufficient space in the destination 
+   * buffer to encrypt the entire input buffer.
+   * <p/>
+   * Upon return, inBuffer.position() will be advanced by the number of bytes
+   * read and outBuffer.position() by bytes written. Implementations should
+   * not modify inBuffer.limit() and outBuffer.limit().
+   * <p/>
+   * @param inBuffer a direct {@link ByteBuffer} to read from. inBuffer may 
+   * not be null and inBuffer.remaining() must be > 0
+   * @param outBuffer a direct {@link ByteBuffer} to write to. outBuffer may 
+   * not be null and outBuffer.remaining() must be > 0
+   * @throws IOException if encryption fails
+   */
+  public void encrypt(ByteBuffer inBuffer, ByteBuffer outBuffer) 
+      throws IOException;
+}

+ 165 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JceAesCtrCryptoCodec.java

@@ -0,0 +1,165 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.crypto;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.security.GeneralSecurityException;
+import java.security.SecureRandom;
+
+import javax.crypto.Cipher;
+import javax.crypto.spec.IvParameterSpec;
+import javax.crypto.spec.SecretKeySpec;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+
+import com.google.common.base.Preconditions;
+
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_JCE_PROVIDER_KEY;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_JAVA_SECURE_RANDOM_ALGORITHM_KEY;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_JAVA_SECURE_RANDOM_ALGORITHM_DEFAULT;
+
+/**
+ * Implement the AES-CTR crypto codec using JCE provider.
+ */
+@InterfaceAudience.Private
+public class JceAesCtrCryptoCodec extends AesCtrCryptoCodec {
+  private static final Log LOG =
+      LogFactory.getLog(JceAesCtrCryptoCodec.class.getName());
+  
+  private Configuration conf;
+  private String provider;
+  private SecureRandom random;
+
+  public JceAesCtrCryptoCodec() {
+  }
+  
+  @Override
+  public Configuration getConf() {
+    return conf;
+  }
+  
+  @Override
+  public void setConf(Configuration conf) {
+    this.conf = conf;
+    provider = conf.get(HADOOP_SECURITY_CRYPTO_JCE_PROVIDER_KEY);
+    final String secureRandomAlg = conf.get(
+        HADOOP_SECURITY_JAVA_SECURE_RANDOM_ALGORITHM_KEY, 
+        HADOOP_SECURITY_JAVA_SECURE_RANDOM_ALGORITHM_DEFAULT);
+    try {
+      random = (provider != null) ? 
+          SecureRandom.getInstance(secureRandomAlg, provider) : 
+            SecureRandom.getInstance(secureRandomAlg);
+    } catch (GeneralSecurityException e) {
+      LOG.warn(e.getMessage());
+      random = new SecureRandom();
+    }
+  }
+
+  @Override
+  public Encryptor createEncryptor() throws GeneralSecurityException {
+    return new JceAesCtrCipher(Cipher.ENCRYPT_MODE, provider);
+  }
+
+  @Override
+  public Decryptor createDecryptor() throws GeneralSecurityException {
+    return new JceAesCtrCipher(Cipher.DECRYPT_MODE, provider);
+  }
+  
+  @Override
+  public void generateSecureRandom(byte[] bytes) {
+    random.nextBytes(bytes);
+  }  
+  
+  private static class JceAesCtrCipher implements Encryptor, Decryptor {
+    private final Cipher cipher;
+    private final int mode;
+    private boolean contextReset = false;
+    
+    public JceAesCtrCipher(int mode, String provider) 
+        throws GeneralSecurityException {
+      this.mode = mode;
+      if (provider == null || provider.isEmpty()) {
+        cipher = Cipher.getInstance(SUITE.getName());
+      } else {
+        cipher = Cipher.getInstance(SUITE.getName(), provider);
+      }
+    }
+
+    @Override
+    public void init(byte[] key, byte[] iv) throws IOException {
+      Preconditions.checkNotNull(key);
+      Preconditions.checkNotNull(iv);
+      contextReset = false;
+      try {
+        cipher.init(mode, new SecretKeySpec(key, "AES"), 
+            new IvParameterSpec(iv));
+      } catch (Exception e) {
+        throw new IOException(e);
+      }
+    }
+
+    /**
+     * AES-CTR will consume all of the input data. It requires enough space in 
+     * the destination buffer to encrypt entire input buffer.
+     */
+    @Override
+    public void encrypt(ByteBuffer inBuffer, ByteBuffer outBuffer)
+        throws IOException {
+      process(inBuffer, outBuffer);
+    }
+    
+    /**
+     * AES-CTR will consume all of the input data. It requires enough space in
+     * the destination buffer to decrypt entire input buffer.
+     */
+    @Override
+    public void decrypt(ByteBuffer inBuffer, ByteBuffer outBuffer)
+        throws IOException {
+      process(inBuffer, outBuffer);
+    }
+    
+    private void process(ByteBuffer inBuffer, ByteBuffer outBuffer)
+        throws IOException {
+      try {
+        int inputSize = inBuffer.remaining();
+        // Cipher#update will maintain crypto context.
+        int n = cipher.update(inBuffer, outBuffer);
+        if (n < inputSize) {
+          /**
+           * Typically code will not get here. Cipher#update will consume all 
+           * input data and put result in outBuffer. 
+           * Cipher#doFinal will reset the crypto context.
+           */
+          contextReset = true;
+          cipher.doFinal(inBuffer, outBuffer);
+        }
+      } catch (Exception e) {
+        throw new IOException(e);
+      }
+    }
+    
+    @Override
+    public boolean isContextReset() {
+      return contextReset;
+    }
+  }
+}

+ 164 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslAesCtrCryptoCodec.java

@@ -0,0 +1,164 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.crypto;
+
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_SECURE_RANDOM_IMPL_KEY;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.security.GeneralSecurityException;
+import java.security.SecureRandom;
+import java.util.Random;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.crypto.random.OsSecureRandom;
+import org.apache.hadoop.util.ReflectionUtils;
+
+/**
+ * Implement the AES-CTR crypto codec using JNI into OpenSSL.
+ */
+@InterfaceAudience.Private
+public class OpensslAesCtrCryptoCodec extends AesCtrCryptoCodec {
+  private static final Log LOG =
+      LogFactory.getLog(OpensslAesCtrCryptoCodec.class.getName());
+
+  private Configuration conf;
+  private Random random;
+  
+  public OpensslAesCtrCryptoCodec() {
+    String loadingFailureReason = OpensslCipher.getLoadingFailureReason();
+    if (loadingFailureReason != null) {
+      throw new RuntimeException(loadingFailureReason);
+    }
+  }
+
+  @Override
+  public void setConf(Configuration conf) {
+    this.conf = conf;
+    final Class<? extends Random> klass = conf.getClass(
+        HADOOP_SECURITY_SECURE_RANDOM_IMPL_KEY, OsSecureRandom.class, 
+        Random.class);
+    try {
+      random = ReflectionUtils.newInstance(klass, conf);
+    } catch (Exception e) {
+      LOG.info("Unable to use " + klass.getName() + ".  Falling back to " +
+          "Java SecureRandom.", e);
+      this.random = new SecureRandom();
+    }
+  }
+
+  @Override
+  protected void finalize() throws Throwable {
+    try {
+      Closeable r = (Closeable) this.random;
+      r.close();
+    } catch (ClassCastException e) {
+    }
+    super.finalize();
+  }
+
+  @Override
+  public Configuration getConf() {
+    return conf;
+  }
+
+  @Override
+  public Encryptor createEncryptor() throws GeneralSecurityException {
+    return new OpensslAesCtrCipher(OpensslCipher.ENCRYPT_MODE);
+  }
+
+  @Override
+  public Decryptor createDecryptor() throws GeneralSecurityException {
+    return new OpensslAesCtrCipher(OpensslCipher.DECRYPT_MODE);
+  }
+  
+  @Override
+  public void generateSecureRandom(byte[] bytes) {
+    random.nextBytes(bytes);
+  }
+  
+  private static class OpensslAesCtrCipher implements Encryptor, Decryptor {
+    private final OpensslCipher cipher;
+    private final int mode;
+    private boolean contextReset = false;
+    
+    public OpensslAesCtrCipher(int mode) throws GeneralSecurityException {
+      this.mode = mode;
+      cipher = OpensslCipher.getInstance(SUITE.getName());
+    }
+
+    @Override
+    public void init(byte[] key, byte[] iv) throws IOException {
+      Preconditions.checkNotNull(key);
+      Preconditions.checkNotNull(iv);
+      contextReset = false;
+      cipher.init(mode, key, iv);
+    }
+    
+    /**
+     * AES-CTR will consume all of the input data. It requires enough space in 
+     * the destination buffer to encrypt entire input buffer.
+     */
+    @Override
+    public void encrypt(ByteBuffer inBuffer, ByteBuffer outBuffer)
+        throws IOException {
+      process(inBuffer, outBuffer);
+    }
+    
+    /**
+     * AES-CTR will consume all of the input data. It requires enough space in
+     * the destination buffer to decrypt entire input buffer.
+     */
+    @Override
+    public void decrypt(ByteBuffer inBuffer, ByteBuffer outBuffer)
+        throws IOException {
+      process(inBuffer, outBuffer);
+    }
+    
+    private void process(ByteBuffer inBuffer, ByteBuffer outBuffer)
+        throws IOException {
+      try {
+        int inputSize = inBuffer.remaining();
+        // OpensslCipher#update will maintain crypto context.
+        int n = cipher.update(inBuffer, outBuffer);
+        if (n < inputSize) {
+          /**
+           * Typically code will not get here. OpensslCipher#update will 
+           * consume all input data and put result in outBuffer. 
+           * OpensslCipher#doFinal will reset the crypto context.
+           */
+          contextReset = true;
+          cipher.doFinal(outBuffer);
+        }
+      } catch (Exception e) {
+        throw new IOException(e);
+      }
+    }
+    
+    @Override
+    public boolean isContextReset() {
+      return contextReset;
+    }
+  }
+}

+ 289 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslCipher.java

@@ -0,0 +1,289 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.crypto;
+
+import java.nio.ByteBuffer;
+import java.security.NoSuchAlgorithmException;
+import java.util.StringTokenizer;
+
+import javax.crypto.BadPaddingException;
+import javax.crypto.IllegalBlockSizeException;
+import javax.crypto.NoSuchPaddingException;
+import javax.crypto.ShortBufferException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.util.NativeCodeLoader;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.util.PerformanceAdvisory;
+
+/**
+ * OpenSSL cipher using JNI.
+ * Currently only AES-CTR is supported. It's flexible to add 
+ * other crypto algorithms/modes.
+ */
+@InterfaceAudience.Private
+public final class OpensslCipher {
+  private static final Log LOG =
+      LogFactory.getLog(OpensslCipher.class.getName());
+  public static final int ENCRYPT_MODE = 1;
+  public static final int DECRYPT_MODE = 0;
+  
+  /** Currently only support AES/CTR/NoPadding. */
+  private static enum AlgMode {
+    AES_CTR;
+    
+    static int get(String algorithm, String mode) 
+        throws NoSuchAlgorithmException {
+      try {
+        return AlgMode.valueOf(algorithm + "_" + mode).ordinal();
+      } catch (Exception e) {
+        throw new NoSuchAlgorithmException("Doesn't support algorithm: " + 
+            algorithm + " and mode: " + mode);
+      }
+    }
+  }
+  
+  private static enum Padding {
+    NoPadding;
+    
+    static int get(String padding) throws NoSuchPaddingException {
+      try {
+        return Padding.valueOf(padding).ordinal();
+      } catch (Exception e) {
+        throw new NoSuchPaddingException("Doesn't support padding: " + padding);
+      }
+    }
+  }
+  
+  private long context = 0;
+  private final int alg;
+  private final int padding;
+  
+  private static final String loadingFailureReason;
+
+  static {
+    String loadingFailure = null;
+    try {
+      if (!NativeCodeLoader.buildSupportsOpenssl()) {
+        PerformanceAdvisory.LOG.debug("Build does not support openssl");
+        loadingFailure = "build does not support openssl.";
+      } else {
+        initIDs();
+      }
+    } catch (Throwable t) {
+      loadingFailure = t.getMessage();
+      LOG.debug("Failed to load OpenSSL Cipher.", t);
+    } finally {
+      loadingFailureReason = loadingFailure;
+    }
+  }
+  
+  public static String getLoadingFailureReason() {
+    return loadingFailureReason;
+  }
+  
+  private OpensslCipher(long context, int alg, int padding) {
+    this.context = context;
+    this.alg = alg;
+    this.padding = padding;
+  }
+  
+  /**
+   * Return an <code>OpensslCipher<code> object that implements the specified
+   * transformation.
+   * 
+   * @param transformation the name of the transformation, e.g., 
+   * AES/CTR/NoPadding.
+   * @return OpensslCipher an <code>OpensslCipher<code> object
+   * @throws NoSuchAlgorithmException if <code>transformation</code> is null, 
+   * empty, in an invalid format, or if Openssl doesn't implement the 
+   * specified algorithm.
+   * @throws NoSuchPaddingException if <code>transformation</code> contains 
+   * a padding scheme that is not available.
+   */
+  public static final OpensslCipher getInstance(String transformation) 
+      throws NoSuchAlgorithmException, NoSuchPaddingException {
+    Transform transform = tokenizeTransformation(transformation);
+    int algMode = AlgMode.get(transform.alg, transform.mode);
+    int padding = Padding.get(transform.padding);
+    long context = initContext(algMode, padding);
+    return new OpensslCipher(context, algMode, padding);
+  }
+  
+  /** Nested class for algorithm, mode and padding. */
+  private static class Transform {
+    final String alg;
+    final String mode;
+    final String padding;
+    
+    public Transform(String alg, String mode, String padding) {
+      this.alg = alg;
+      this.mode = mode;
+      this.padding = padding;
+    }
+  }
+  
+  private static Transform tokenizeTransformation(String transformation) 
+      throws NoSuchAlgorithmException {
+    if (transformation == null) {
+      throw new NoSuchAlgorithmException("No transformation given.");
+    }
+    
+    /*
+     * Array containing the components of a Cipher transformation:
+     * 
+     * index 0: algorithm (e.g., AES)
+     * index 1: mode (e.g., CTR)
+     * index 2: padding (e.g., NoPadding)
+     */
+    String[] parts = new String[3];
+    int count = 0;
+    StringTokenizer parser = new StringTokenizer(transformation, "/");
+    while (parser.hasMoreTokens() && count < 3) {
+      parts[count++] = parser.nextToken().trim();
+    }
+    if (count != 3 || parser.hasMoreTokens()) {
+      throw new NoSuchAlgorithmException("Invalid transformation format: " + 
+          transformation);
+    }
+    return new Transform(parts[0], parts[1], parts[2]);
+  }
+  
+  /**
+   * Initialize this cipher with a key and IV.
+   * 
+   * @param mode {@link #ENCRYPT_MODE} or {@link #DECRYPT_MODE}
+   * @param key crypto key
+   * @param iv crypto iv
+   */
+  public void init(int mode, byte[] key, byte[] iv) {
+    context = init(context, mode, alg, padding, key, iv);
+  }
+  
+  /**
+   * Continues a multiple-part encryption or decryption operation. The data
+   * is encrypted or decrypted, depending on how this cipher was initialized.
+   * <p/>
+   * 
+   * All <code>input.remaining()</code> bytes starting at 
+   * <code>input.position()</code> are processed. The result is stored in
+   * the output buffer.
+   * <p/>
+   * 
+   * Upon return, the input buffer's position will be equal to its limit;
+   * its limit will not have changed. The output buffer's position will have
+   * advanced by n, when n is the value returned by this method; the output
+   * buffer's limit will not have changed.
+   * <p/>
+   * 
+   * If <code>output.remaining()</code> bytes are insufficient to hold the
+   * result, a <code>ShortBufferException</code> is thrown.
+   * 
+   * @param input the input ByteBuffer
+   * @param output the output ByteBuffer
+   * @return int number of bytes stored in <code>output</code>
+   * @throws ShortBufferException if there is insufficient space in the
+   * output buffer
+   */
+  public int update(ByteBuffer input, ByteBuffer output) 
+      throws ShortBufferException {
+    checkState();
+    Preconditions.checkArgument(input.isDirect() && output.isDirect(), 
+        "Direct buffers are required.");
+    int len = update(context, input, input.position(), input.remaining(),
+        output, output.position(), output.remaining());
+    input.position(input.limit());
+    output.position(output.position() + len);
+    return len;
+  }
+  
+  /**
+   * Finishes a multiple-part operation. The data is encrypted or decrypted,
+   * depending on how this cipher was initialized.
+   * <p/>
+   * 
+   * The result is stored in the output buffer. Upon return, the output buffer's
+   * position will have advanced by n, where n is the value returned by this
+   * method; the output buffer's limit will not have changed.
+   * <p/>
+   * 
+   * If <code>output.remaining()</code> bytes are insufficient to hold the result,
+   * a <code>ShortBufferException</code> is thrown.
+   * <p/>
+   * 
+   * Upon finishing, this method resets this cipher object to the state it was
+   * in when previously initialized. That is, the object is available to encrypt
+   * or decrypt more data.
+   * <p/>
+   * 
+   * If any exception is thrown, this cipher object need to be reset before it 
+   * can be used again.
+   * 
+   * @param output the output ByteBuffer
+   * @return int number of bytes stored in <code>output</code>
+   * @throws ShortBufferException
+   * @throws IllegalBlockSizeException
+   * @throws BadPaddingException
+   */
+  public int doFinal(ByteBuffer output) throws ShortBufferException, 
+      IllegalBlockSizeException, BadPaddingException {
+    checkState();
+    Preconditions.checkArgument(output.isDirect(), "Direct buffer is required.");
+    int len = doFinal(context, output, output.position(), output.remaining());
+    output.position(output.position() + len);
+    return len;
+  }
+  
+  /** Forcibly clean the context. */
+  public void clean() {
+    if (context != 0) {
+      clean(context);
+      context = 0;
+    }
+  }
+
+  /** Check whether context is initialized. */
+  private void checkState() {
+    Preconditions.checkState(context != 0);
+  }
+  
+  @Override
+  protected void finalize() throws Throwable {
+    clean();
+  }
+
+  private native static void initIDs();
+  
+  private native static long initContext(int alg, int padding);
+  
+  private native long init(long context, int mode, int alg, int padding, 
+      byte[] key, byte[] iv);
+  
+  private native int update(long context, ByteBuffer input, int inputOffset, 
+      int inputLength, ByteBuffer output, int outputOffset, int maxOutputLength);
+  
+  private native int doFinal(long context, ByteBuffer output, int offset, 
+      int maxOutputLength);
+  
+  private native void clean(long context);
+  
+  public native static String getLibraryName();
+}

+ 229 - 19
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/JavaKeyStoreProvider.java

@@ -27,8 +27,11 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.security.ProviderUtils;
 import org.apache.hadoop.security.ProviderUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 
 import javax.crypto.spec.SecretKeySpec;
 import javax.crypto.spec.SecretKeySpec;
+
 import java.io.IOException;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.InputStream;
 import java.io.ObjectInputStream;
 import java.io.ObjectInputStream;
@@ -80,6 +83,9 @@ import java.util.concurrent.locks.ReentrantReadWriteLock;
 @InterfaceAudience.Private
 @InterfaceAudience.Private
 public class JavaKeyStoreProvider extends KeyProvider {
 public class JavaKeyStoreProvider extends KeyProvider {
   private static final String KEY_METADATA = "KeyMetadata";
   private static final String KEY_METADATA = "KeyMetadata";
+  private static Logger LOG =
+      LoggerFactory.getLogger(JavaKeyStoreProvider.class);
+
   public static final String SCHEME_NAME = "jceks";
   public static final String SCHEME_NAME = "jceks";
 
 
   public static final String KEYSTORE_PASSWORD_FILE_KEY =
   public static final String KEYSTORE_PASSWORD_FILE_KEY =
@@ -102,6 +108,7 @@ public class JavaKeyStoreProvider extends KeyProvider {
   private final Map<String, Metadata> cache = new HashMap<String, Metadata>();
   private final Map<String, Metadata> cache = new HashMap<String, Metadata>();
 
 
   private JavaKeyStoreProvider(URI uri, Configuration conf) throws IOException {
   private JavaKeyStoreProvider(URI uri, Configuration conf) throws IOException {
+    super(conf);
     this.uri = uri;
     this.uri = uri;
     path = ProviderUtils.unnestUri(uri);
     path = ProviderUtils.unnestUri(uri);
     fs = path.getFileSystem(conf);
     fs = path.getFileSystem(conf);
@@ -115,6 +122,10 @@ public class JavaKeyStoreProvider extends KeyProvider {
       if (pwFile != null) {
       if (pwFile != null) {
         ClassLoader cl = Thread.currentThread().getContextClassLoader();
         ClassLoader cl = Thread.currentThread().getContextClassLoader();
         URL pwdFile = cl.getResource(pwFile);
         URL pwdFile = cl.getResource(pwFile);
+        if (pwdFile == null) {
+          // Provided Password file does not exist
+          throw new IOException("Password file does not exists");
+        }
         if (pwdFile != null) {
         if (pwdFile != null) {
           InputStream is = pwdFile.openStream();
           InputStream is = pwdFile.openStream();
           try {
           try {
@@ -129,19 +140,25 @@ public class JavaKeyStoreProvider extends KeyProvider {
       password = KEYSTORE_PASSWORD_DEFAULT;
       password = KEYSTORE_PASSWORD_DEFAULT;
     }
     }
     try {
     try {
+      Path oldPath = constructOldPath(path);
+      Path newPath = constructNewPath(path);
       keyStore = KeyStore.getInstance(SCHEME_NAME);
       keyStore = KeyStore.getInstance(SCHEME_NAME);
+      FsPermission perm = null;
       if (fs.exists(path)) {
       if (fs.exists(path)) {
-        // save off permissions in case we need to
-        // rewrite the keystore in flush()
-        FileStatus s = fs.getFileStatus(path);
-        permissions = s.getPermission();
-
-        keyStore.load(fs.open(path), password);
+        // flush did not proceed to completion
+        // _NEW should not exist
+        if (fs.exists(newPath)) {
+          throw new IOException(
+              String.format("Keystore not loaded due to some inconsistency "
+              + "('%s' and '%s' should not exist together)!!", path, newPath));
+        }
+        perm = tryLoadFromPath(path, oldPath);
       } else {
       } else {
-        permissions = new FsPermission("700");
-        // required to create an empty keystore. *sigh*
-        keyStore.load(null, password);
+        perm = tryLoadIncompleteFlush(oldPath, newPath);
       }
       }
+      // Need to save off permissions in case we need to
+      // rewrite the keystore in flush()
+      permissions = perm;
     } catch (KeyStoreException e) {
     } catch (KeyStoreException e) {
       throw new IOException("Can't create keystore", e);
       throw new IOException("Can't create keystore", e);
     } catch (NoSuchAlgorithmException e) {
     } catch (NoSuchAlgorithmException e) {
@@ -154,6 +171,136 @@ public class JavaKeyStoreProvider extends KeyProvider {
     writeLock = lock.writeLock();
     writeLock = lock.writeLock();
   }
   }
 
 
+  /**
+   * Try loading from the user specified path, else load from the backup
+   * path in case Exception is not due to bad/wrong password
+   * @param path Actual path to load from
+   * @param backupPath Backup path (_OLD)
+   * @return The permissions of the loaded file
+   * @throws NoSuchAlgorithmException
+   * @throws CertificateException
+   * @throws IOException
+   */
+  private FsPermission tryLoadFromPath(Path path, Path backupPath)
+      throws NoSuchAlgorithmException, CertificateException,
+      IOException {
+    FsPermission perm = null;
+    try {
+      perm = loadFromPath(path, password);
+      // Remove _OLD if exists
+      if (fs.exists(backupPath)) {
+        fs.delete(backupPath, true);
+      }
+      LOG.debug("KeyStore loaded successfully !!");
+    } catch (IOException ioe) {
+      // If file is corrupted for some reason other than
+      // wrong password try the _OLD file if exits
+      if (!isBadorWrongPassword(ioe)) {
+        perm = loadFromPath(backupPath, password);
+        // Rename CURRENT to CORRUPTED
+        renameOrFail(path, new Path(path.toString() + "_CORRUPTED_"
+            + System.currentTimeMillis()));
+        renameOrFail(backupPath, path);
+        LOG.debug(String.format(
+            "KeyStore loaded successfully from '%s' since '%s'"
+                + "was corrupted !!", backupPath, path));
+      } else {
+        throw ioe;
+      }
+    }
+    return perm;
+  }
+
+  /**
+   * The KeyStore might have gone down during a flush, In which case either the
+   * _NEW or _OLD files might exists. This method tries to load the KeyStore
+   * from one of these intermediate files.
+   * @param oldPath the _OLD file created during flush
+   * @param newPath the _NEW file created during flush
+   * @return The permissions of the loaded file
+   * @throws IOException
+   * @throws NoSuchAlgorithmException
+   * @throws CertificateException
+   */
+  private FsPermission tryLoadIncompleteFlush(Path oldPath, Path newPath)
+      throws IOException, NoSuchAlgorithmException, CertificateException {
+    FsPermission perm = null;
+    // Check if _NEW exists (in case flush had finished writing but not
+    // completed the re-naming)
+    if (fs.exists(newPath)) {
+      perm = loadAndReturnPerm(newPath, oldPath);
+    }
+    // try loading from _OLD (An earlier Flushing MIGHT not have completed
+    // writing completely)
+    if ((perm == null) && fs.exists(oldPath)) {
+      perm = loadAndReturnPerm(oldPath, newPath);
+    }
+    // If not loaded yet,
+    // required to create an empty keystore. *sigh*
+    if (perm == null) {
+      keyStore.load(null, password);
+      LOG.debug("KeyStore initialized anew successfully !!");
+      perm = new FsPermission("700");
+    }
+    return perm;
+  }
+
+  private FsPermission loadAndReturnPerm(Path pathToLoad, Path pathToDelete)
+      throws NoSuchAlgorithmException, CertificateException,
+      IOException {
+    FsPermission perm = null;
+    try {
+      perm = loadFromPath(pathToLoad, password);
+      renameOrFail(pathToLoad, path);
+      LOG.debug(String.format("KeyStore loaded successfully from '%s'!!",
+          pathToLoad));
+      if (fs.exists(pathToDelete)) {
+        fs.delete(pathToDelete, true);
+      }
+    } catch (IOException e) {
+      // Check for password issue : don't want to trash file due
+      // to wrong password
+      if (isBadorWrongPassword(e)) {
+        throw e;
+      }
+    }
+    return perm;
+  }
+
+  private boolean isBadorWrongPassword(IOException ioe) {
+    // As per documentation this is supposed to be the way to figure
+    // if password was correct
+    if (ioe.getCause() instanceof UnrecoverableKeyException) {
+      return true;
+    }
+    // Unfortunately that doesn't seem to work..
+    // Workaround :
+    if ((ioe.getCause() == null)
+        && (ioe.getMessage() != null)
+        && ((ioe.getMessage().contains("Keystore was tampered")) || (ioe
+            .getMessage().contains("password was incorrect")))) {
+      return true;
+    }
+    return false;
+  }
+
+  private FsPermission loadFromPath(Path p, char[] password)
+      throws IOException, NoSuchAlgorithmException, CertificateException {
+    FileStatus s = fs.getFileStatus(p);
+    keyStore.load(fs.open(p), password);
+    return s.getPermission();
+  }
+
+  private Path constructNewPath(Path path) {
+    Path newPath = new Path(path.toString() + "_NEW");
+    return newPath;
+  }
+
+  private Path constructOldPath(Path path) {
+    Path oldPath = new Path(path.toString() + "_OLD");
+    return oldPath;
+  }
+
   @Override
   @Override
   public KeyVersion getKeyVersion(String versionName) throws IOException {
   public KeyVersion getKeyVersion(String versionName) throws IOException {
     readLock.lock();
     readLock.lock();
@@ -352,11 +499,22 @@ public class JavaKeyStoreProvider extends KeyProvider {
 
 
   @Override
   @Override
   public void flush() throws IOException {
   public void flush() throws IOException {
+    Path newPath = constructNewPath(path);
+    Path oldPath = constructOldPath(path);
     writeLock.lock();
     writeLock.lock();
     try {
     try {
       if (!changed) {
       if (!changed) {
         return;
         return;
       }
       }
+      // Might exist if a backup has been restored etc.
+      if (fs.exists(newPath)) {
+        renameOrFail(newPath, new Path(newPath.toString()
+            + "_ORPHANED_" + System.currentTimeMillis()));
+      }
+      if (fs.exists(oldPath)) {
+        renameOrFail(oldPath, new Path(oldPath.toString()
+            + "_ORPHANED_" + System.currentTimeMillis()));
+      }
       // put all of the updates into the keystore
       // put all of the updates into the keystore
       for(Map.Entry<String, Metadata> entry: cache.entrySet()) {
       for(Map.Entry<String, Metadata> entry: cache.entrySet()) {
         try {
         try {
@@ -366,25 +524,77 @@ public class JavaKeyStoreProvider extends KeyProvider {
           throw new IOException("Can't set metadata key " + entry.getKey(),e );
           throw new IOException("Can't set metadata key " + entry.getKey(),e );
         }
         }
       }
       }
+
+      // Save old File first
+      boolean fileExisted = backupToOld(oldPath);
       // write out the keystore
       // write out the keystore
-      FSDataOutputStream out = FileSystem.create(fs, path, permissions);
+      // Write to _NEW path first :
       try {
       try {
-        keyStore.store(out, password);
-      } catch (KeyStoreException e) {
-        throw new IOException("Can't store keystore " + this, e);
-      } catch (NoSuchAlgorithmException e) {
-        throw new IOException("No such algorithm storing keystore " + this, e);
-      } catch (CertificateException e) {
-        throw new IOException("Certificate exception storing keystore " + this,
-            e);
+        writeToNew(newPath);
+      } catch (IOException ioe) {
+        // rename _OLD back to curent and throw Exception
+        revertFromOld(oldPath, fileExisted);
+        throw ioe;
       }
       }
-      out.close();
+      // Rename _NEW to CURRENT and delete _OLD
+      cleanupNewAndOld(newPath, oldPath);
       changed = false;
       changed = false;
     } finally {
     } finally {
       writeLock.unlock();
       writeLock.unlock();
     }
     }
   }
   }
 
 
+  private void cleanupNewAndOld(Path newPath, Path oldPath) throws IOException {
+    // Rename _NEW to CURRENT
+    renameOrFail(newPath, path);
+    // Delete _OLD
+    if (fs.exists(oldPath)) {
+      fs.delete(oldPath, true);
+    }
+  }
+
+  private void writeToNew(Path newPath) throws IOException {
+    FSDataOutputStream out =
+        FileSystem.create(fs, newPath, permissions);
+    try {
+      keyStore.store(out, password);
+    } catch (KeyStoreException e) {
+      throw new IOException("Can't store keystore " + this, e);
+    } catch (NoSuchAlgorithmException e) {
+      throw new IOException(
+          "No such algorithm storing keystore " + this, e);
+    } catch (CertificateException e) {
+      throw new IOException(
+          "Certificate exception storing keystore " + this, e);
+    }
+    out.close();
+  }
+
+  private void revertFromOld(Path oldPath, boolean fileExisted)
+      throws IOException {
+    if (fileExisted) {
+      renameOrFail(oldPath, path);
+    }
+  }
+
+  private boolean backupToOld(Path oldPath)
+      throws IOException {
+    boolean fileExisted = false;
+    if (fs.exists(path)) {
+      renameOrFail(path, oldPath);
+      fileExisted = true;
+    }
+    return fileExisted;
+  }
+
+  private void renameOrFail(Path src, Path dest)
+      throws IOException {
+    if (!fs.rename(src, dest)) {
+      throw new IOException("Rename unsuccessful : "
+          + String.format("'%s' to '%s'", src, dest));
+    }
+  }
+
   @Override
   @Override
   public String toString() {
   public String toString() {
     return uri.toString();
     return uri.toString();

+ 31 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java

@@ -54,7 +54,9 @@ public abstract class KeyProvider {
   public static final String DEFAULT_CIPHER = "AES/CTR/NoPadding";
   public static final String DEFAULT_CIPHER = "AES/CTR/NoPadding";
   public static final String DEFAULT_BITLENGTH_NAME =
   public static final String DEFAULT_BITLENGTH_NAME =
       "hadoop.security.key.default.bitlength";
       "hadoop.security.key.default.bitlength";
-  public static final int DEFAULT_BITLENGTH = 256;
+  public static final int DEFAULT_BITLENGTH = 128;
+
+  private final Configuration conf;
 
 
   /**
   /**
    * The combination of both the key version name and the key material.
    * The combination of both the key version name and the key material.
@@ -341,8 +343,36 @@ public abstract class KeyProvider {
     public Map<String, String> getAttributes() {
     public Map<String, String> getAttributes() {
       return (attributes == null) ? Collections.EMPTY_MAP : attributes;
       return (attributes == null) ? Collections.EMPTY_MAP : attributes;
     }
     }
+
+    @Override
+    public String toString() {
+      return "Options{" +
+          "cipher='" + cipher + '\'' +
+          ", bitLength=" + bitLength +
+          ", description='" + description + '\'' +
+          ", attributes=" + attributes +
+          '}';
+    }
   }
   }
 
 
+  /**
+   * Constructor.
+   * 
+   * @param conf configuration for the provider
+   */
+  public KeyProvider(Configuration conf) {
+    this.conf = new Configuration(conf);
+  }
+
+  /**
+   * Return the provider configuration.
+   * 
+   * @return the provider configuration
+   */
+  public Configuration getConf() {
+    return conf;
+  }
+  
   /**
   /**
    * A helper function to create an options object.
    * A helper function to create an options object.
    * @param conf the configuration to use
    * @param conf the configuration to use

+ 74 - 17
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderCryptoExtension.java

@@ -19,14 +19,20 @@
 package org.apache.hadoop.crypto.key;
 package org.apache.hadoop.crypto.key;
 
 
 import java.io.IOException;
 import java.io.IOException;
+import java.nio.ByteBuffer;
 import java.security.GeneralSecurityException;
 import java.security.GeneralSecurityException;
 import java.security.SecureRandom;
 import java.security.SecureRandom;
+
 import javax.crypto.Cipher;
 import javax.crypto.Cipher;
 import javax.crypto.spec.IvParameterSpec;
 import javax.crypto.spec.IvParameterSpec;
 import javax.crypto.spec.SecretKeySpec;
 import javax.crypto.spec.SecretKeySpec;
 
 
 import com.google.common.base.Preconditions;
 import com.google.common.base.Preconditions;
+
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.crypto.CryptoCodec;
+import org.apache.hadoop.crypto.Decryptor;
+import org.apache.hadoop.crypto.Encryptor;
 
 
 /**
 /**
  * A KeyProvider with Cryptographic Extensions specifically for generating
  * A KeyProvider with Cryptographic Extensions specifically for generating
@@ -79,6 +85,30 @@ public class KeyProviderCryptoExtension extends
       this.encryptedKeyVersion = encryptedKeyVersion;
       this.encryptedKeyVersion = encryptedKeyVersion;
     }
     }
 
 
+    /**
+     * Factory method to create a new EncryptedKeyVersion that can then be
+     * passed into {@link #decryptEncryptedKey}. Note that the fields of the
+     * returned EncryptedKeyVersion will only partially be populated; it is not
+     * necessarily suitable for operations besides decryption.
+     *
+     * @param encryptionKeyVersionName Version name of the encryption key used
+     *                                 to encrypt the encrypted key.
+     * @param encryptedKeyIv           Initialization vector of the encrypted
+     *                                 key. The IV of the encryption key used to
+     *                                 encrypt the encrypted key is derived from
+     *                                 this IV.
+     * @param encryptedKeyMaterial     Key material of the encrypted key.
+     * @return EncryptedKeyVersion suitable for decryption.
+     */
+    public static EncryptedKeyVersion createForDecryption(String
+        encryptionKeyVersionName, byte[] encryptedKeyIv,
+        byte[] encryptedKeyMaterial) {
+      KeyVersion encryptedKeyVersion = new KeyVersion(null, EEK,
+          encryptedKeyMaterial);
+      return new EncryptedKeyVersion(null, encryptionKeyVersionName,
+          encryptedKeyIv, encryptedKeyVersion);
+    }
+
     /**
     /**
      * @return Name of the encryption key used to encrypt the encrypted key.
      * @return Name of the encryption key used to encrypt the encrypted key.
      */
      */
@@ -193,6 +223,13 @@ public class KeyProviderCryptoExtension extends
   private static class DefaultCryptoExtension implements CryptoExtension {
   private static class DefaultCryptoExtension implements CryptoExtension {
 
 
     private final KeyProvider keyProvider;
     private final KeyProvider keyProvider;
+    private static final ThreadLocal<SecureRandom> RANDOM = 
+        new ThreadLocal<SecureRandom>() {
+      @Override
+      protected SecureRandom initialValue() {
+        return new SecureRandom();
+      }
+    };
 
 
     private DefaultCryptoExtension(KeyProvider keyProvider) {
     private DefaultCryptoExtension(KeyProvider keyProvider) {
       this.keyProvider = keyProvider;
       this.keyProvider = keyProvider;
@@ -206,18 +243,25 @@ public class KeyProviderCryptoExtension extends
       Preconditions.checkNotNull(encryptionKey,
       Preconditions.checkNotNull(encryptionKey,
           "No KeyVersion exists for key '%s' ", encryptionKeyName);
           "No KeyVersion exists for key '%s' ", encryptionKeyName);
       // Generate random bytes for new key and IV
       // Generate random bytes for new key and IV
-      Cipher cipher = Cipher.getInstance("AES/CTR/NoPadding");
-      SecureRandom random = SecureRandom.getInstance("SHA1PRNG");
+
+      CryptoCodec cc = CryptoCodec.getInstance(keyProvider.getConf());
       final byte[] newKey = new byte[encryptionKey.getMaterial().length];
       final byte[] newKey = new byte[encryptionKey.getMaterial().length];
-      random.nextBytes(newKey);
-      final byte[] iv = random.generateSeed(cipher.getBlockSize());
+      cc.generateSecureRandom(newKey);
+      final byte[] iv = new byte[cc.getCipherSuite().getAlgorithmBlockSize()];
+      cc.generateSecureRandom(iv);
       // Encryption key IV is derived from new key's IV
       // Encryption key IV is derived from new key's IV
       final byte[] encryptionIV = EncryptedKeyVersion.deriveIV(iv);
       final byte[] encryptionIV = EncryptedKeyVersion.deriveIV(iv);
-      // Encrypt the new key
-      cipher.init(Cipher.ENCRYPT_MODE,
-          new SecretKeySpec(encryptionKey.getMaterial(), "AES"),
-          new IvParameterSpec(encryptionIV));
-      final byte[] encryptedKey = cipher.doFinal(newKey);
+      Encryptor encryptor = cc.createEncryptor();
+      encryptor.init(encryptionKey.getMaterial(), encryptionIV);
+      int keyLen = newKey.length;
+      ByteBuffer bbIn = ByteBuffer.allocateDirect(keyLen);
+      ByteBuffer bbOut = ByteBuffer.allocateDirect(keyLen);
+      bbIn.put(newKey);
+      bbIn.flip();
+      encryptor.encrypt(bbIn, bbOut);
+      bbOut.flip();
+      byte[] encryptedKey = new byte[keyLen];
+      bbOut.get(encryptedKey);    
       return new EncryptedKeyVersion(encryptionKeyName,
       return new EncryptedKeyVersion(encryptionKeyName,
           encryptionKey.getVersionName(), iv,
           encryptionKey.getVersionName(), iv,
           new KeyVersion(encryptionKey.getName(), EEK, encryptedKey));
           new KeyVersion(encryptionKey.getName(), EEK, encryptedKey));
@@ -234,19 +278,32 @@ public class KeyProviderCryptoExtension extends
           keyProvider.getKeyVersion(encryptionKeyVersionName);
           keyProvider.getKeyVersion(encryptionKeyVersionName);
       Preconditions.checkNotNull(encryptionKey,
       Preconditions.checkNotNull(encryptionKey,
           "KeyVersion name '%s' does not exist", encryptionKeyVersionName);
           "KeyVersion name '%s' does not exist", encryptionKeyVersionName);
-      final byte[] encryptionKeyMaterial = encryptionKey.getMaterial();
+      Preconditions.checkArgument(
+              encryptedKeyVersion.getEncryptedKeyVersion().getVersionName()
+                    .equals(KeyProviderCryptoExtension.EEK),
+                "encryptedKey version name must be '%s', is '%s'",
+                KeyProviderCryptoExtension.EEK,
+                encryptedKeyVersion.getEncryptedKeyVersion().getVersionName()
+            );
+
       // Encryption key IV is determined from encrypted key's IV
       // Encryption key IV is determined from encrypted key's IV
       final byte[] encryptionIV =
       final byte[] encryptionIV =
           EncryptedKeyVersion.deriveIV(encryptedKeyVersion.getEncryptedKeyIv());
           EncryptedKeyVersion.deriveIV(encryptedKeyVersion.getEncryptedKeyIv());
-      // Init the cipher with encryption key parameters
-      Cipher cipher = Cipher.getInstance("AES/CTR/NoPadding");
-      cipher.init(Cipher.DECRYPT_MODE,
-          new SecretKeySpec(encryptionKeyMaterial, "AES"),
-          new IvParameterSpec(encryptionIV));
-      // Decrypt the encrypted key
+
+      CryptoCodec cc = CryptoCodec.getInstance(keyProvider.getConf());
+      Decryptor decryptor = cc.createDecryptor();
+      decryptor.init(encryptionKey.getMaterial(), encryptionIV);
       final KeyVersion encryptedKV =
       final KeyVersion encryptedKV =
           encryptedKeyVersion.getEncryptedKeyVersion();
           encryptedKeyVersion.getEncryptedKeyVersion();
-      final byte[] decryptedKey = cipher.doFinal(encryptedKV.getMaterial());
+      int keyLen = encryptedKV.getMaterial().length;
+      ByteBuffer bbIn = ByteBuffer.allocateDirect(keyLen);
+      ByteBuffer bbOut = ByteBuffer.allocateDirect(keyLen);
+      bbIn.put(encryptedKV.getMaterial());
+      bbIn.flip();
+      decryptor.decrypt(bbIn, bbOut);
+      bbOut.flip();
+      byte[] decryptedKey = new byte[keyLen];
+      bbOut.get(decryptedKey);
       return new KeyVersion(encryptionKey.getName(), EK, decryptedKey);
       return new KeyVersion(encryptionKey.getName(), EK, decryptedKey);
     }
     }
 
 

+ 6 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderDelegationTokenExtension.java

@@ -20,6 +20,8 @@ package org.apache.hadoop.crypto.key;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
 
 
+import java.io.IOException;
+
 /**
 /**
  * A KeyProvider extension with the ability to add a renewer's Delegation 
  * A KeyProvider extension with the ability to add a renewer's Delegation 
  * Tokens to the provided Credentials.
  * Tokens to the provided Credentials.
@@ -45,9 +47,10 @@ public class KeyProviderDelegationTokenExtension extends
      * @param renewer the user allowed to renew the delegation tokens
      * @param renewer the user allowed to renew the delegation tokens
      * @param credentials cache in which to add new delegation tokens
      * @param credentials cache in which to add new delegation tokens
      * @return list of new delegation tokens
      * @return list of new delegation tokens
+     * @throws IOException thrown if IOException if an IO error occurs.
      */
      */
     public Token<?>[] addDelegationTokens(final String renewer, 
     public Token<?>[] addDelegationTokens(final String renewer, 
-        Credentials credentials);
+        Credentials credentials) throws IOException;
   }
   }
   
   
   /**
   /**
@@ -76,9 +79,10 @@ public class KeyProviderDelegationTokenExtension extends
    * @param renewer the user allowed to renew the delegation tokens
    * @param renewer the user allowed to renew the delegation tokens
    * @param credentials cache in which to add new delegation tokens
    * @param credentials cache in which to add new delegation tokens
    * @return list of new delegation tokens
    * @return list of new delegation tokens
+   * @throws IOException thrown if IOException if an IO error occurs.
    */
    */
   public Token<?>[] addDelegationTokens(final String renewer, 
   public Token<?>[] addDelegationTokens(final String renewer, 
-      Credentials credentials) {
+      Credentials credentials) throws IOException {
     return getExtension().addDelegationTokens(renewer, credentials);
     return getExtension().addDelegationTokens(renewer, credentials);
   }
   }
   
   

+ 1 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderExtension.java

@@ -40,6 +40,7 @@ public abstract class KeyProviderExtension
   private E extension;
   private E extension;
 
 
   public KeyProviderExtension(KeyProvider keyProvider, E extensions) {
   public KeyProviderExtension(KeyProvider keyProvider, E extensions) {
+    super(keyProvider.getConf());
     this.keyProvider = keyProvider;
     this.keyProvider = keyProvider;
     this.extension = extensions;
     this.extension = extensions;
   }
   }

+ 45 - 44
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyShell.java

@@ -38,9 +38,9 @@ import org.apache.hadoop.util.ToolRunner;
  */
  */
 public class KeyShell extends Configured implements Tool {
 public class KeyShell extends Configured implements Tool {
   final static private String USAGE_PREFIX = "Usage: hadoop key " +
   final static private String USAGE_PREFIX = "Usage: hadoop key " +
-  		"[generic options]\n";
+      "[generic options]\n";
   final static private String COMMANDS =
   final static private String COMMANDS =
-      "   [--help]\n" +
+      "   [-help]\n" +
       "   [" + CreateCommand.USAGE + "]\n" +
       "   [" + CreateCommand.USAGE + "]\n" +
       "   [" + RollCommand.USAGE + "]\n" +
       "   [" + RollCommand.USAGE + "]\n" +
       "   [" + DeleteCommand.USAGE + "]\n" +
       "   [" + DeleteCommand.USAGE + "]\n" +
@@ -90,11 +90,11 @@ public class KeyShell extends Configured implements Tool {
   /**
   /**
    * Parse the command line arguments and initialize the data
    * Parse the command line arguments and initialize the data
    * <pre>
    * <pre>
-   * % hadoop key create keyName [--size size] [--cipher algorithm]
-   *    [--provider providerPath]
-   * % hadoop key roll keyName [--provider providerPath]
+   * % hadoop key create keyName [-size size] [-cipher algorithm]
+   *    [-provider providerPath]
+   * % hadoop key roll keyName [-provider providerPath]
    * % hadoop key list [-provider providerPath]
    * % hadoop key list [-provider providerPath]
-   * % hadoop key delete keyName [--provider providerPath] [-i]
+   * % hadoop key delete keyName [-provider providerPath] [-i]
    * </pre>
    * </pre>
    * @param args Command line arguments.
    * @param args Command line arguments.
    * @return 0 on success, 1 on failure.
    * @return 0 on success, 1 on failure.
@@ -107,47 +107,47 @@ public class KeyShell extends Configured implements Tool {
     for (int i = 0; i < args.length; i++) { // parse command line
     for (int i = 0; i < args.length; i++) { // parse command line
       boolean moreTokens = (i < args.length - 1);
       boolean moreTokens = (i < args.length - 1);
       if (args[i].equals("create")) {
       if (args[i].equals("create")) {
-        String keyName = "--help";
+        String keyName = "-help";
         if (moreTokens) {
         if (moreTokens) {
           keyName = args[++i];
           keyName = args[++i];
         }
         }
 
 
         command = new CreateCommand(keyName, options);
         command = new CreateCommand(keyName, options);
-        if ("--help".equals(keyName)) {
+        if ("-help".equals(keyName)) {
           printKeyShellUsage();
           printKeyShellUsage();
           return 1;
           return 1;
         }
         }
       } else if (args[i].equals("delete")) {
       } else if (args[i].equals("delete")) {
-        String keyName = "--help";
+        String keyName = "-help";
         if (moreTokens) {
         if (moreTokens) {
           keyName = args[++i];
           keyName = args[++i];
         }
         }
 
 
         command = new DeleteCommand(keyName);
         command = new DeleteCommand(keyName);
-        if ("--help".equals(keyName)) {
+        if ("-help".equals(keyName)) {
           printKeyShellUsage();
           printKeyShellUsage();
           return 1;
           return 1;
         }
         }
       } else if (args[i].equals("roll")) {
       } else if (args[i].equals("roll")) {
-        String keyName = "--help";
+        String keyName = "-help";
         if (moreTokens) {
         if (moreTokens) {
           keyName = args[++i];
           keyName = args[++i];
         }
         }
 
 
         command = new RollCommand(keyName);
         command = new RollCommand(keyName);
-        if ("--help".equals(keyName)) {
+        if ("-help".equals(keyName)) {
           printKeyShellUsage();
           printKeyShellUsage();
           return 1;
           return 1;
         }
         }
       } else if ("list".equals(args[i])) {
       } else if ("list".equals(args[i])) {
         command = new ListCommand();
         command = new ListCommand();
-      } else if ("--size".equals(args[i]) && moreTokens) {
+      } else if ("-size".equals(args[i]) && moreTokens) {
         options.setBitLength(Integer.parseInt(args[++i]));
         options.setBitLength(Integer.parseInt(args[++i]));
-      } else if ("--cipher".equals(args[i]) && moreTokens) {
+      } else if ("-cipher".equals(args[i]) && moreTokens) {
         options.setCipher(args[++i]);
         options.setCipher(args[++i]);
-      } else if ("--description".equals(args[i]) && moreTokens) {
+      } else if ("-description".equals(args[i]) && moreTokens) {
         options.setDescription(args[++i]);
         options.setDescription(args[++i]);
-      } else if ("--attr".equals(args[i]) && moreTokens) {
+      } else if ("-attr".equals(args[i]) && moreTokens) {
         final String attrval[] = args[++i].split("=", 2);
         final String attrval[] = args[++i].split("=", 2);
         final String attr = attrval[0].trim();
         final String attr = attrval[0].trim();
         final String val = attrval[1].trim();
         final String val = attrval[1].trim();
@@ -164,14 +164,14 @@ public class KeyShell extends Configured implements Tool {
           return 1;
           return 1;
         }
         }
         attributes.put(attr, val);
         attributes.put(attr, val);
-      } else if ("--provider".equals(args[i]) && moreTokens) {
+      } else if ("-provider".equals(args[i]) && moreTokens) {
         userSuppliedProvider = true;
         userSuppliedProvider = true;
         getConf().set(KeyProviderFactory.KEY_PROVIDER_PATH, args[++i]);
         getConf().set(KeyProviderFactory.KEY_PROVIDER_PATH, args[++i]);
-      } else if ("--metadata".equals(args[i])) {
+      } else if ("-metadata".equals(args[i])) {
         getConf().setBoolean(LIST_METADATA, true);
         getConf().setBoolean(LIST_METADATA, true);
-      } else if ("-i".equals(args[i]) || ("--interactive".equals(args[i]))) {
+      } else if ("-i".equals(args[i]) || ("-interactive".equals(args[i]))) {
         interactive = true;
         interactive = true;
-      } else if ("--help".equals(args[i])) {
+      } else if ("-help".equals(args[i])) {
         printKeyShellUsage();
         printKeyShellUsage();
         return 1;
         return 1;
       } else {
       } else {
@@ -258,11 +258,11 @@ public class KeyShell extends Configured implements Tool {
 
 
   private class ListCommand extends Command {
   private class ListCommand extends Command {
     public static final String USAGE =
     public static final String USAGE =
-        "list [--provider <provider>] [--metadata] [--help]";
+        "list [-provider <provider>] [-metadata] [-help]";
     public static final String DESC =
     public static final String DESC =
         "The list subcommand displays the keynames contained within\n" +
         "The list subcommand displays the keynames contained within\n" +
         "a particular provider as configured in core-site.xml or\n" +
         "a particular provider as configured in core-site.xml or\n" +
-        "specified with the --provider argument. --metadata displays\n" +
+        "specified with the -provider argument. -metadata displays\n" +
         "the metadata.";
         "the metadata.";
 
 
     private boolean metadata = false;
     private boolean metadata = false;
@@ -272,9 +272,9 @@ public class KeyShell extends Configured implements Tool {
       provider = getKeyProvider();
       provider = getKeyProvider();
       if (provider == null) {
       if (provider == null) {
         out.println("There are no non-transient KeyProviders configured.\n"
         out.println("There are no non-transient KeyProviders configured.\n"
-          + "Use the --provider option to specify a provider. If you\n"
+          + "Use the -provider option to specify a provider. If you\n"
           + "want to list a transient provider then you must use the\n"
           + "want to list a transient provider then you must use the\n"
-          + "--provider argument.");
+          + "-provider argument.");
         rc = false;
         rc = false;
       }
       }
       metadata = getConf().getBoolean(LIST_METADATA, false);
       metadata = getConf().getBoolean(LIST_METADATA, false);
@@ -310,10 +310,10 @@ public class KeyShell extends Configured implements Tool {
   }
   }
 
 
   private class RollCommand extends Command {
   private class RollCommand extends Command {
-    public static final String USAGE = "roll <keyname> [--provider <provider>] [--help]";
+    public static final String USAGE = "roll <keyname> [-provider <provider>] [-help]";
     public static final String DESC =
     public static final String DESC =
       "The roll subcommand creates a new version for the specified key\n" +
       "The roll subcommand creates a new version for the specified key\n" +
-      "within the provider indicated using the --provider argument\n";
+      "within the provider indicated using the -provider argument\n";
 
 
     String keyName = null;
     String keyName = null;
 
 
@@ -326,13 +326,13 @@ public class KeyShell extends Configured implements Tool {
       provider = getKeyProvider();
       provider = getKeyProvider();
       if (provider == null) {
       if (provider == null) {
         out.println("There are no valid KeyProviders configured. The key\n" +
         out.println("There are no valid KeyProviders configured. The key\n" +
-          "has not been rolled. Use the --provider option to specify\n" +
+          "has not been rolled. Use the -provider option to specify\n" +
           "a provider.");
           "a provider.");
         rc = false;
         rc = false;
       }
       }
       if (keyName == null) {
       if (keyName == null) {
         out.println("Please provide a <keyname>.\n" +
         out.println("Please provide a <keyname>.\n" +
-          "See the usage description by using --help.");
+          "See the usage description by using -help.");
         rc = false;
         rc = false;
       }
       }
       return rc;
       return rc;
@@ -367,11 +367,11 @@ public class KeyShell extends Configured implements Tool {
   }
   }
 
 
   private class DeleteCommand extends Command {
   private class DeleteCommand extends Command {
-    public static final String USAGE = "delete <keyname> [--provider <provider>] [--help]";
+    public static final String USAGE = "delete <keyname> [-provider <provider>] [-help]";
     public static final String DESC =
     public static final String DESC =
         "The delete subcommand deletes all versions of the key\n" +
         "The delete subcommand deletes all versions of the key\n" +
         "specified by the <keyname> argument from within the\n" +
         "specified by the <keyname> argument from within the\n" +
-        "provider specified --provider.";
+        "provider specified -provider.";
 
 
     String keyName = null;
     String keyName = null;
     boolean cont = true;
     boolean cont = true;
@@ -385,12 +385,12 @@ public class KeyShell extends Configured implements Tool {
       provider = getKeyProvider();
       provider = getKeyProvider();
       if (provider == null) {
       if (provider == null) {
         out.println("There are no valid KeyProviders configured. Nothing\n"
         out.println("There are no valid KeyProviders configured. Nothing\n"
-          + "was deleted. Use the --provider option to specify a provider.");
+          + "was deleted. Use the -provider option to specify a provider.");
         return false;
         return false;
       }
       }
       if (keyName == null) {
       if (keyName == null) {
         out.println("There is no keyName specified. Please specify a " +
         out.println("There is no keyName specified. Please specify a " +
-            "<keyname>. See the usage description with --help.");
+            "<keyname>. See the usage description with -help.");
         return false;
         return false;
       }
       }
       if (interactive) {
       if (interactive) {
@@ -436,19 +436,19 @@ public class KeyShell extends Configured implements Tool {
 
 
   private class CreateCommand extends Command {
   private class CreateCommand extends Command {
     public static final String USAGE =
     public static final String USAGE =
-      "create <keyname> [--cipher <cipher>] [--size <size>]\n" +
-      "                     [--description <description>]\n" +
-      "                     [--attr <attribute=value>]\n" +
-      "                     [--provider <provider>] [--help]";
+      "create <keyname> [-cipher <cipher>] [-size <size>]\n" +
+      "                     [-description <description>]\n" +
+      "                     [-attr <attribute=value>]\n" +
+      "                     [-provider <provider>] [-help]";
     public static final String DESC =
     public static final String DESC =
       "The create subcommand creates a new key for the name specified\n" +
       "The create subcommand creates a new key for the name specified\n" +
       "by the <keyname> argument within the provider specified by the\n" +
       "by the <keyname> argument within the provider specified by the\n" +
-      "--provider argument. You may specify a cipher with the --cipher\n" +
+      "-provider argument. You may specify a cipher with the -cipher\n" +
       "argument. The default cipher is currently \"AES/CTR/NoPadding\".\n" +
       "argument. The default cipher is currently \"AES/CTR/NoPadding\".\n" +
-      "The default keysize is 256. You may specify the requested key\n" +
-      "length using the --size argument. Arbitrary attribute=value\n" +
-      "style attributes may be specified using the --attr argument.\n" +
-      "--attr may be specified multiple times, once per attribute.\n";
+      "The default keysize is 128. You may specify the requested key\n" +
+      "length using the -size argument. Arbitrary attribute=value\n" +
+      "style attributes may be specified using the -attr argument.\n" +
+      "-attr may be specified multiple times, once per attribute.\n";
 
 
     final String keyName;
     final String keyName;
     final Options options;
     final Options options;
@@ -463,13 +463,13 @@ public class KeyShell extends Configured implements Tool {
       provider = getKeyProvider();
       provider = getKeyProvider();
       if (provider == null) {
       if (provider == null) {
         out.println("There are no valid KeyProviders configured. No key\n" +
         out.println("There are no valid KeyProviders configured. No key\n" +
-          " was created. You can use the --provider option to specify\n" +
+          " was created. You can use the -provider option to specify\n" +
           " a provider to use.");
           " a provider to use.");
         rc = false;
         rc = false;
       }
       }
       if (keyName == null) {
       if (keyName == null) {
         out.println("Please provide a <keyname>. See the usage description" +
         out.println("Please provide a <keyname>. See the usage description" +
-          " with --help.");
+          " with -help.");
         rc = false;
         rc = false;
       }
       }
       return rc;
       return rc;
@@ -479,7 +479,8 @@ public class KeyShell extends Configured implements Tool {
       warnIfTransientProvider();
       warnIfTransientProvider();
       try {
       try {
         provider.createKey(keyName, options);
         provider.createKey(keyName, options);
-        out.println(keyName + " has been successfully created.");
+        out.println(keyName + " has been successfully created with options "
+            + options.toString() + ".");
         provider.flush();
         provider.flush();
         printProviderWritten();
         printProviderWritten();
       } catch (InvalidParameterException e) {
       } catch (InvalidParameterException e) {

+ 3 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/UserProvider.java

@@ -44,7 +44,8 @@ public class UserProvider extends KeyProvider {
   private final Credentials credentials;
   private final Credentials credentials;
   private final Map<String, Metadata> cache = new HashMap<String, Metadata>();
   private final Map<String, Metadata> cache = new HashMap<String, Metadata>();
 
 
-  private UserProvider() throws IOException {
+  private UserProvider(Configuration conf) throws IOException {
+    super(conf);
     user = UserGroupInformation.getCurrentUser();
     user = UserGroupInformation.getCurrentUser();
     credentials = user.getCredentials();
     credentials = user.getCredentials();
   }
   }
@@ -145,7 +146,7 @@ public class UserProvider extends KeyProvider {
     public KeyProvider createProvider(URI providerName,
     public KeyProvider createProvider(URI providerName,
                                       Configuration conf) throws IOException {
                                       Configuration conf) throws IOException {
       if (SCHEME_NAME.equals(providerName.getScheme())) {
       if (SCHEME_NAME.equals(providerName.getScheme())) {
-        return new UserProvider();
+        return new UserProvider(conf);
       }
       }
       return null;
       return null;
     }
     }

+ 87 - 30
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java

@@ -22,15 +22,18 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.crypto.key.KeyProvider;
 import org.apache.hadoop.crypto.key.KeyProvider;
 import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion;
 import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion;
+import org.apache.hadoop.crypto.key.KeyProviderDelegationTokenExtension;
 import org.apache.hadoop.crypto.key.KeyProviderFactory;
 import org.apache.hadoop.crypto.key.KeyProviderFactory;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.ProviderUtils;
 import org.apache.hadoop.security.ProviderUtils;
-import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authentication.client.AuthenticationException;
 import org.apache.hadoop.security.authentication.client.AuthenticationException;
 import org.apache.hadoop.security.authentication.client.ConnectionConfigurator;
 import org.apache.hadoop.security.authentication.client.ConnectionConfigurator;
-import org.apache.hadoop.security.authentication.client.PseudoAuthenticator;
 import org.apache.hadoop.security.ssl.SSLFactory;
 import org.apache.hadoop.security.ssl.SSLFactory;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticatedURL;
 import org.apache.http.client.utils.URIBuilder;
 import org.apache.http.client.utils.URIBuilder;
 import org.codehaus.jackson.map.ObjectMapper;
 import org.codehaus.jackson.map.ObjectMapper;
 
 
@@ -50,6 +53,7 @@ import java.net.URL;
 import java.net.URLEncoder;
 import java.net.URLEncoder;
 import java.security.GeneralSecurityException;
 import java.security.GeneralSecurityException;
 import java.security.NoSuchAlgorithmException;
 import java.security.NoSuchAlgorithmException;
+import java.security.PrivilegedExceptionAction;
 import java.text.MessageFormat;
 import java.text.MessageFormat;
 import java.util.ArrayList;
 import java.util.ArrayList;
 import java.util.Date;
 import java.util.Date;
@@ -69,7 +73,10 @@ import com.google.common.base.Preconditions;
  * KMS client <code>KeyProvider</code> implementation.
  * KMS client <code>KeyProvider</code> implementation.
  */
  */
 @InterfaceAudience.Private
 @InterfaceAudience.Private
-public class KMSClientProvider extends KeyProvider implements CryptoExtension {
+public class KMSClientProvider extends KeyProvider implements CryptoExtension,
+    KeyProviderDelegationTokenExtension.DelegationTokenExtension {
+
+  public static final String TOKEN_KIND = "kms-dt";
 
 
   public static final String SCHEME_NAME = "kms";
   public static final String SCHEME_NAME = "kms";
 
 
@@ -229,6 +236,8 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension {
   private String kmsUrl;
   private String kmsUrl;
   private SSLFactory sslFactory;
   private SSLFactory sslFactory;
   private ConnectionConfigurator configurator;
   private ConnectionConfigurator configurator;
+  private DelegationTokenAuthenticatedURL.Token authToken;
+  private UserGroupInformation loginUgi;
 
 
   @Override
   @Override
   public String toString() {
   public String toString() {
@@ -274,6 +283,7 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension {
   }
   }
 
 
   public KMSClientProvider(URI uri, Configuration conf) throws IOException {
   public KMSClientProvider(URI uri, Configuration conf) throws IOException {
+    super(conf);
     Path path = ProviderUtils.unnestUri(uri);
     Path path = ProviderUtils.unnestUri(uri);
     URL url = path.toUri().toURL();
     URL url = path.toUri().toURL();
     kmsUrl = createServiceURL(url);
     kmsUrl = createServiceURL(url);
@@ -309,6 +319,8 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension {
                 CommonConfigurationKeysPublic.
                 CommonConfigurationKeysPublic.
                     KMS_CLIENT_ENC_KEY_CACHE_NUM_REFILL_THREADS_DEFAULT),
                     KMS_CLIENT_ENC_KEY_CACHE_NUM_REFILL_THREADS_DEFAULT),
             new EncryptedQueueRefiller());
             new EncryptedQueueRefiller());
+    authToken = new DelegationTokenAuthenticatedURL.Token();
+    loginUgi = UserGroupInformation.getCurrentUser();
   }
   }
 
 
   private String createServiceURL(URL url) throws IOException {
   private String createServiceURL(URL url) throws IOException {
@@ -325,12 +337,14 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension {
     try {
     try {
       StringBuilder sb = new StringBuilder();
       StringBuilder sb = new StringBuilder();
       sb.append(kmsUrl);
       sb.append(kmsUrl);
-      sb.append(collection);
-      if (resource != null) {
-        sb.append("/").append(URLEncoder.encode(resource, UTF8));
-      }
-      if (subResource != null) {
-        sb.append("/").append(subResource);
+      if (collection != null) {
+        sb.append(collection);
+        if (resource != null) {
+          sb.append("/").append(URLEncoder.encode(resource, UTF8));
+          if (subResource != null) {
+            sb.append("/").append(subResource);
+          }
+        }
       }
       }
       URIBuilder uriBuilder = new URIBuilder(sb.toString());
       URIBuilder uriBuilder = new URIBuilder(sb.toString());
       if (parameters != null) {
       if (parameters != null) {
@@ -365,14 +379,29 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension {
     return conn;
     return conn;
   }
   }
 
 
-  private HttpURLConnection createConnection(URL url, String method)
+  private HttpURLConnection createConnection(final URL url, String method)
       throws IOException {
       throws IOException {
     HttpURLConnection conn;
     HttpURLConnection conn;
     try {
     try {
-      AuthenticatedURL authUrl = new AuthenticatedURL(new PseudoAuthenticator(),
-          configurator);
-      conn = authUrl.openConnection(url, new AuthenticatedURL.Token());
-    } catch (AuthenticationException ex) {
+      // if current UGI is different from UGI at constructor time, behave as
+      // proxyuser
+      UserGroupInformation currentUgi = UserGroupInformation.getCurrentUser();
+      final String doAsUser =
+          (loginUgi.getShortUserName().equals(currentUgi.getShortUserName()))
+          ? null : currentUgi.getShortUserName();
+
+      // creating the HTTP connection using the current UGI at constructor time
+      conn = loginUgi.doAs(new PrivilegedExceptionAction<HttpURLConnection>() {
+        @Override
+        public HttpURLConnection run() throws Exception {
+          DelegationTokenAuthenticatedURL authUrl =
+              new DelegationTokenAuthenticatedURL(configurator);
+          return authUrl.openConnection(url, authToken, doAsUser);
+        }
+      });
+    } catch (IOException ex) {
+      throw ex;
+    } catch (Exception ex) {
       throw new IOException(ex);
       throw new IOException(ex);
     }
     }
     conn.setUseCaches(false);
     conn.setUseCaches(false);
@@ -403,20 +432,27 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension {
     if (status != expected) {
     if (status != expected) {
       InputStream es = null;
       InputStream es = null;
       try {
       try {
-        es = conn.getErrorStream();
-        ObjectMapper mapper = new ObjectMapper();
-        Map json = mapper.readValue(es, Map.class);
-        String exClass = (String) json.get(
-            KMSRESTConstants.ERROR_EXCEPTION_JSON);
-        String exMsg = (String)
-            json.get(KMSRESTConstants.ERROR_MESSAGE_JSON);
         Exception toThrow;
         Exception toThrow;
-        try {
-          ClassLoader cl = KMSClientProvider.class.getClassLoader();
-          Class klass = cl.loadClass(exClass);
-          Constructor constr = klass.getConstructor(String.class);
-          toThrow = (Exception) constr.newInstance(exMsg);
-        } catch (Exception ex) {
+        String contentType = conn.getHeaderField(CONTENT_TYPE);
+        if (contentType != null &&
+            contentType.toLowerCase().startsWith(APPLICATION_JSON_MIME)) {
+          es = conn.getErrorStream();
+          ObjectMapper mapper = new ObjectMapper();
+          Map json = mapper.readValue(es, Map.class);
+          String exClass = (String) json.get(
+              KMSRESTConstants.ERROR_EXCEPTION_JSON);
+          String exMsg = (String)
+              json.get(KMSRESTConstants.ERROR_MESSAGE_JSON);
+          try {
+            ClassLoader cl = KMSClientProvider.class.getClassLoader();
+            Class klass = cl.loadClass(exClass);
+            Constructor constr = klass.getConstructor(String.class);
+            toThrow = (Exception) constr.newInstance(exMsg);
+          } catch (Exception ex) {
+            toThrow = new IOException(MessageFormat.format(
+                "HTTP status [{0}], {1}", status, conn.getResponseMessage()));
+          }
+        } else {
           toThrow = new IOException(MessageFormat.format(
           toThrow = new IOException(MessageFormat.format(
               "HTTP status [{0}], {1}", status, conn.getResponseMessage()));
               "HTTP status [{0}], {1}", status, conn.getResponseMessage()));
         }
         }
@@ -512,7 +548,7 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension {
     List<String> batch = new ArrayList<String>();
     List<String> batch = new ArrayList<String>();
     int batchLen = 0;
     int batchLen = 0;
     for (String name : keyNames) {
     for (String name : keyNames) {
-      int additionalLen = KMSRESTConstants.KEY_OP.length() + 1 + name.length();
+      int additionalLen = KMSRESTConstants.KEY.length() + 1 + name.length();
       batchLen += additionalLen;
       batchLen += additionalLen;
       // topping at 1500 to account for initial URL and encoded names
       // topping at 1500 to account for initial URL and encoded names
       if (batchLen > 1500) {
       if (batchLen > 1500) {
@@ -536,7 +572,7 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension {
     for (String[] keySet : keySets) {
     for (String[] keySet : keySets) {
       if (keyNames.length > 0) {
       if (keyNames.length > 0) {
         Map<String, Object> queryStr = new HashMap<String, Object>();
         Map<String, Object> queryStr = new HashMap<String, Object>();
-        queryStr.put(KMSRESTConstants.KEY_OP, keySet);
+        queryStr.put(KMSRESTConstants.KEY, keySet);
         URL url = createURL(KMSRESTConstants.KEYS_METADATA_RESOURCE, null,
         URL url = createURL(KMSRESTConstants.KEYS_METADATA_RESOURCE, null,
             null, queryStr);
             null, queryStr);
         HttpURLConnection conn = createConnection(url, HTTP_GET);
         HttpURLConnection conn = createConnection(url, HTTP_GET);
@@ -653,7 +689,7 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension {
         encryptedKeyVersion.getEncryptedKeyVersion().getVersionName()
         encryptedKeyVersion.getEncryptedKeyVersion().getVersionName()
             .equals(KeyProviderCryptoExtension.EEK),
             .equals(KeyProviderCryptoExtension.EEK),
         "encryptedKey version name must be '%s', is '%s'",
         "encryptedKey version name must be '%s', is '%s'",
-        KeyProviderCryptoExtension.EK,
+        KeyProviderCryptoExtension.EEK,
         encryptedKeyVersion.getEncryptedKeyVersion().getVersionName()
         encryptedKeyVersion.getEncryptedKeyVersion().getVersionName()
     );
     );
     checkNotNull(encryptedKeyVersion.getEncryptedKeyVersion(), "encryptedKey");
     checkNotNull(encryptedKeyVersion.getEncryptedKeyVersion(), "encryptedKey");
@@ -729,4 +765,25 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension {
     }
     }
   }
   }
 
 
+  @Override
+  public Token<?>[] addDelegationTokens(String renewer,
+      Credentials credentials) throws IOException {
+    Token<?>[] tokens;
+    URL url = createURL(null, null, null, null);
+    DelegationTokenAuthenticatedURL authUrl =
+        new DelegationTokenAuthenticatedURL(configurator);
+    try {
+      Token<?> token = authUrl.getDelegationToken(url, authToken, renewer);
+      if (token != null) {
+        credentials.addToken(token.getService(), token);
+        tokens = new Token<?>[] { token };
+      } else {
+        throw new IOException("Got NULL as delegation token");
+      }
+    } catch (AuthenticationException ex) {
+      throw new IOException(ex);
+    }
+    return tokens;
+  }
+
 }
 }

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSRESTConstants.java

@@ -37,7 +37,7 @@ public class KMSRESTConstants {
   public static final String EEK_SUB_RESOURCE = "_eek";
   public static final String EEK_SUB_RESOURCE = "_eek";
   public static final String CURRENT_VERSION_SUB_RESOURCE = "_currentversion";
   public static final String CURRENT_VERSION_SUB_RESOURCE = "_currentversion";
 
 
-  public static final String KEY_OP = "key";
+  public static final String KEY = "key";
   public static final String EEK_OP = "eek_op";
   public static final String EEK_OP = "eek_op";
   public static final String EEK_GENERATE = "generate";
   public static final String EEK_GENERATE = "generate";
   public static final String EEK_DECRYPT = "decrypt";
   public static final String EEK_DECRYPT = "decrypt";

+ 122 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/random/OpensslSecureRandom.java

@@ -0,0 +1,122 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.crypto.random;
+
+import java.util.Random;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.util.NativeCodeLoader;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.util.PerformanceAdvisory;
+
+/**
+ * OpenSSL secure random using JNI.
+ * This implementation is thread-safe.
+ * <p/>
+ * 
+ * If using an Intel chipset with RDRAND, the high-performance hardware 
+ * random number generator will be used and it's much faster than
+ * {@link java.security.SecureRandom}. If RDRAND is unavailable, default
+ * OpenSSL secure random generator will be used. It's still faster
+ * and can generate strong random bytes.
+ * <p/>
+ * @see https://wiki.openssl.org/index.php/Random_Numbers
+ * @see http://en.wikipedia.org/wiki/RdRand
+ */
+@InterfaceAudience.Private
+public class OpensslSecureRandom extends Random {
+  private static final long serialVersionUID = -7828193502768789584L;
+  private static final Log LOG =
+      LogFactory.getLog(OpensslSecureRandom.class.getName());
+  
+  /** If native SecureRandom unavailable, use java SecureRandom */
+  private java.security.SecureRandom fallback = null;
+  private static boolean nativeEnabled = false;
+  static {
+    if (NativeCodeLoader.isNativeCodeLoaded() &&
+        NativeCodeLoader.buildSupportsOpenssl()) {
+      try {
+        initSR();
+        nativeEnabled = true;
+      } catch (Throwable t) {
+        LOG.error("Failed to load Openssl SecureRandom", t);
+      }
+    }
+  }
+  
+  public static boolean isNativeCodeLoaded() {
+    return nativeEnabled;
+  }
+  
+  public OpensslSecureRandom() {
+    if (!nativeEnabled) {
+      PerformanceAdvisory.LOG.debug("Build does not support openssl, " +
+          "falling back to Java SecureRandom.");
+      fallback = new java.security.SecureRandom();
+    }
+  }
+  
+  /**
+   * Generates a user-specified number of random bytes.
+   * It's thread-safe.
+   * 
+   * @param bytes the array to be filled in with random bytes.
+   */
+  @Override
+  public void nextBytes(byte[] bytes) {
+    if (!nativeEnabled || !nextRandBytes(bytes)) {
+      fallback.nextBytes(bytes);
+    }
+  }
+  
+  @Override
+  public void setSeed(long seed) {
+    // Self-seeding.
+  }
+  
+  /**
+   * Generates an integer containing the user-specified number of
+   * random bits (right justified, with leading zeros).
+   *
+   * @param numBits number of random bits to be generated, where
+   * 0 <= <code>numBits</code> <= 32.
+   *
+   * @return int an <code>int</code> containing the user-specified number
+   * of random bits (right justified, with leading zeros).
+   */
+  @Override
+  final protected int next(int numBits) {
+    Preconditions.checkArgument(numBits >= 0 && numBits <= 32);
+    int numBytes = (numBits + 7) / 8;
+    byte b[] = new byte[numBytes];
+    int next = 0;
+    
+    nextBytes(b);
+    for (int i = 0; i < numBytes; i++) {
+      next = (next << 8) + (b[i] & 0xFF);
+    }
+    
+    return next >>> (numBytes * 8 - numBits);
+  }
+  
+  private native static void initSR();
+  private native boolean nextRandBytes(byte[] bytes); 
+}

+ 115 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/random/OsSecureRandom.java

@@ -0,0 +1,115 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.crypto.random;
+
+import java.io.Closeable;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.util.Random;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configurable;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.IOUtils;
+
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_SECURE_RANDOM_DEVICE_FILE_PATH_KEY;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_SECURE_RANDOM_DEVICE_FILE_PATH_DEFAULT;
+
+/**
+ * A Random implementation that uses random bytes sourced from the
+ * operating system.
+ */
+@InterfaceAudience.Private
+public class OsSecureRandom extends Random implements Closeable, Configurable {
+  private static final long serialVersionUID = 6391500337172057900L;
+
+  private transient Configuration conf;
+
+  private final int RESERVOIR_LENGTH = 8192;
+
+  private String randomDevPath;
+
+  private transient FileInputStream stream;
+
+  private final byte[] reservoir = new byte[RESERVOIR_LENGTH];
+
+  private int pos = reservoir.length;
+
+  private void fillReservoir(int min) {
+    if (pos >= reservoir.length - min) {
+      try {
+        IOUtils.readFully(stream, reservoir, 0, reservoir.length);
+      } catch (IOException e) {
+        throw new RuntimeException("failed to fill reservoir", e);
+      }
+      pos = 0;
+    }
+  }
+
+  public OsSecureRandom() {
+  }
+  
+  @Override
+  synchronized public void setConf(Configuration conf) {
+    this.conf = conf;
+    this.randomDevPath = conf.get(
+        HADOOP_SECURITY_SECURE_RANDOM_DEVICE_FILE_PATH_KEY,
+        HADOOP_SECURITY_SECURE_RANDOM_DEVICE_FILE_PATH_DEFAULT);
+    File randomDevFile = new File(randomDevPath);
+    try {
+      this.stream = new FileInputStream(randomDevFile);
+      fillReservoir(0);
+    } catch (IOException e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+  @Override
+  synchronized public Configuration getConf() {
+    return conf;
+  }
+
+  @Override
+  synchronized public void nextBytes(byte[] bytes) {
+    int off = 0;
+    int n = 0;
+    while (off < bytes.length) {
+      fillReservoir(0);
+      n = Math.min(bytes.length - off, reservoir.length - pos);
+      System.arraycopy(reservoir, pos, bytes, off, n);
+      off += n;
+      pos += n;
+    }
+  }
+
+  @Override
+  synchronized protected int next(int nbits) {
+    fillReservoir(4);
+    int n = 0;
+    for (int i = 0; i < 4; i++) {
+      n = ((n << 8) | (reservoir[pos++] & 0xff));
+    }
+    return n & (0xffffffff >> (32 - nbits));
+  }
+
+  @Override
+  synchronized public void close() throws IOException {
+    stream.close();
+  }
+}

+ 28 - 75
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java

@@ -43,6 +43,7 @@ import org.apache.hadoop.fs.Options.CreateOpts;
 import org.apache.hadoop.fs.Options.Rename;
 import org.apache.hadoop.fs.Options.Rename;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.InvalidPathException;
 import org.apache.hadoop.fs.InvalidPathException;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.AccessControlException;
@@ -803,6 +804,18 @@ public abstract class AbstractFileSystem {
       throws AccessControlException, FileNotFoundException,
       throws AccessControlException, FileNotFoundException,
       UnresolvedLinkException, IOException;
       UnresolvedLinkException, IOException;
 
 
+  /**
+   * The specification of this method matches that of
+   * {@link FileContext#access(Path, FsAction)}
+   * except that an UnresolvedLinkException may be thrown if a symlink is
+   * encountered in the path.
+   */
+  @InterfaceAudience.LimitedPrivate({"HDFS", "Hive"})
+  public void access(Path path, FsAction mode) throws AccessControlException,
+      FileNotFoundException, UnresolvedLinkException, IOException {
+    FileSystem.checkAccessPermissions(this.getFileStatus(path), mode);
+  }
+
   /**
   /**
    * The specification of this method matches that of
    * The specification of this method matches that of
    * {@link FileContext#getFileLinkStatus(Path)}
    * {@link FileContext#getFileLinkStatus(Path)}
@@ -1040,21 +1053,10 @@ public abstract class AbstractFileSystem {
 
 
   /**
   /**
    * Set an xattr of a file or directory.
    * Set an xattr of a file or directory.
-   * The name must be prefixed with user/trusted/security/system and
-   * followed by ".". For example, "user.attr".
-   * <p/>
-   * A regular user can only set an xattr for the "user" namespace.
-   * The super user can set an xattr of either the "user" or "trusted" namespaces.
-   * The xattrs of the "security" and "system" namespaces are only used/exposed 
-   * internally by/to the FS impl.
-   * <p/>
-   * The access permissions of an xattr in the "user" namespace are
-   * defined by the file and directory permission bits.
-   * An xattr can only be set when the logged-in user has the correct permissions.
-   * If the xattr exists, it will be replaced.
+   * The name must be prefixed with the namespace followed by ".". For example,
+   * "user.attr".
    * <p/>
    * <p/>
-   * @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
-   * http://en.wikipedia.org/wiki/Extended_file_attributes</a>
+   * Refer to the HDFS extended attributes user documentation for details.
    *
    *
    * @param path Path to modify
    * @param path Path to modify
    * @param name xattr name.
    * @param name xattr name.
@@ -1069,21 +1071,10 @@ public abstract class AbstractFileSystem {
 
 
   /**
   /**
    * Set an xattr of a file or directory.
    * Set an xattr of a file or directory.
-   * The name must be prefixed with user/trusted/security/system and
-   * followed by ".". For example, "user.attr".
+   * The name must be prefixed with the namespace followed by ".". For example,
+   * "user.attr".
    * <p/>
    * <p/>
-   * A regular user can only set an xattr for the "user" namespace.
-   * The super user can set an xattr of either the "user" or "trusted" namespaces.
-   * The xattrs of the "security" and "system" namespaces are only used/exposed 
-   * internally by/to the FS impl.
-   * <p/>
-   * The access permissions of an xattr in the "user" namespace are
-   * defined by the file and directory permission bits.
-   * An xattr can only be set when the logged-in user has the correct permissions.
-   * If the xattr exists, it will be replaced.
-   * <p/>
-   * @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
-   * http://en.wikipedia.org/wiki/Extended_file_attributes</a>
+   * Refer to the HDFS extended attributes user documentation for details.
    *
    *
    * @param path Path to modify
    * @param path Path to modify
    * @param name xattr name.
    * @param name xattr name.
@@ -1099,18 +1090,10 @@ public abstract class AbstractFileSystem {
 
 
   /**
   /**
    * Get an xattr for a file or directory.
    * Get an xattr for a file or directory.
-   * The name must be prefixed with user/trusted/security/system and
-   * followed by ".". For example, "user.attr".
-   * <p/>
-   * A regular user can only get an xattr for the "user" namespace.
-   * The super user can get an xattr of either the "user" or "trusted" namespaces.
-   * The xattrs of the "security" and "system" namespaces are only used/exposed 
-   * internally by/to the FS impl.
+   * The name must be prefixed with the namespace followed by ".". For example,
+   * "user.attr".
    * <p/>
    * <p/>
-   * An xattr will only be returned when the logged-in user has the correct permissions.
-   * <p/>
-   * @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
-   * http://en.wikipedia.org/wiki/Extended_file_attributes</a>
+   * Refer to the HDFS extended attributes user documentation for details.
    *
    *
    * @param path Path to get extended attribute
    * @param path Path to get extended attribute
    * @param name xattr name.
    * @param name xattr name.
@@ -1127,13 +1110,7 @@ public abstract class AbstractFileSystem {
    * Only those xattrs for which the logged-in user has permissions to view
    * Only those xattrs for which the logged-in user has permissions to view
    * are returned.
    * are returned.
    * <p/>
    * <p/>
-   * A regular user can only get xattrs for the "user" namespace.
-   * The super user can only get xattrs for "user" and "trusted" namespaces.
-   * The xattr of "security" and "system" namespaces are only used/exposed 
-   * internally by/to the FS impl.
-   * <p/>
-   * @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
-   * http://en.wikipedia.org/wiki/Extended_file_attributes</a>
+   * Refer to the HDFS extended attributes user documentation for details.
    *
    *
    * @param path Path to get extended attributes
    * @param path Path to get extended attributes
    * @return Map<String, byte[]> describing the XAttrs of the file or directory
    * @return Map<String, byte[]> describing the XAttrs of the file or directory
@@ -1149,13 +1126,7 @@ public abstract class AbstractFileSystem {
    * Only those xattrs for which the logged-in user has permissions to view
    * Only those xattrs for which the logged-in user has permissions to view
    * are returned.
    * are returned.
    * <p/>
    * <p/>
-   * A regular user can only get xattrs for the "user" namespace.
-   * The super user can only get xattrs for "user" and "trusted" namespaces.
-   * The xattr of "security" and "system" namespaces are only used/exposed 
-   * internally by/to the FS impl.
-   * <p/>
-   * @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
-   * http://en.wikipedia.org/wiki/Extended_file_attributes</a>
+   * Refer to the HDFS extended attributes user documentation for details.
    *
    *
    * @param path Path to get extended attributes
    * @param path Path to get extended attributes
    * @param names XAttr names.
    * @param names XAttr names.
@@ -1173,14 +1144,7 @@ public abstract class AbstractFileSystem {
    * Only the xattr names for which the logged-in user has permissions to view
    * Only the xattr names for which the logged-in user has permissions to view
    * are returned.
    * are returned.
    * <p/>
    * <p/>
-   * A regular user can only get xattr names for the "user" namespace.
-   * The super user can only get xattr names for the "user" and "trusted"
-   * namespaces.
-   * The xattr names in the "security" and "system" namespaces are only
-   * used/exposed internally by/to the FS impl.
-   * <p/>
-   * @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
-   * http://en.wikipedia.org/wiki/Extended_file_attributes</a>
+   * Refer to the HDFS extended attributes user documentation for details.
    *
    *
    * @param path Path to get extended attributes
    * @param path Path to get extended attributes
    * @return Map<String, byte[]> describing the XAttrs of the file or directory
    * @return Map<String, byte[]> describing the XAttrs of the file or directory
@@ -1194,21 +1158,10 @@ public abstract class AbstractFileSystem {
 
 
   /**
   /**
    * Remove an xattr of a file or directory.
    * Remove an xattr of a file or directory.
-   * The name must be prefixed with user/trusted/security/system and
-   * followed by ".". For example, "user.attr".
-   * <p/>
-   * A regular user can only remove an xattr for the "user" namespace.
-   * The super user can remove an xattr of either the "user" or "trusted" namespaces.
-   * The xattrs of the "security" and "system" namespaces are only used/exposed 
-   * internally by/to the FS impl.
-   * <p/>
-   * The access permissions of an xattr in the "user" namespace are
-   * defined by the file and directory permission bits.
-   * An xattr can only be set when the logged-in user has the correct permissions.
-   * If the xattr exists, it will be replaced.
+   * The name must be prefixed with the namespace followed by ".". For example,
+   * "user.attr".
    * <p/>
    * <p/>
-   * @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
-   * http://en.wikipedia.org/wiki/Extended_file_attributes</a>
+   * Refer to the HDFS extended attributes user documentation for details.
    *
    *
    * @param path Path to remove extended attribute
    * @param path Path to remove extended attribute
    * @param name xattr name
    * @param name xattr name

+ 5 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java

@@ -381,7 +381,8 @@ public abstract class ChecksumFileSystem extends FilterFileSystem {
                           long blockSize,
                           long blockSize,
                           Progressable progress)
                           Progressable progress)
       throws IOException {
       throws IOException {
-      super(DataChecksum.newCrc32(), fs.getBytesPerSum(), 4);
+      super(DataChecksum.newDataChecksum(DataChecksum.Type.CRC32,
+          fs.getBytesPerSum()));
       int bytesPerSum = fs.getBytesPerSum();
       int bytesPerSum = fs.getBytesPerSum();
       this.datas = fs.getRawFileSystem().create(file, overwrite, bufferSize, 
       this.datas = fs.getRawFileSystem().create(file, overwrite, bufferSize, 
                                          replication, blockSize, progress);
                                          replication, blockSize, progress);
@@ -405,10 +406,11 @@ public abstract class ChecksumFileSystem extends FilterFileSystem {
     }
     }
     
     
     @Override
     @Override
-    protected void writeChunk(byte[] b, int offset, int len, byte[] checksum)
+    protected void writeChunk(byte[] b, int offset, int len, byte[] checksum,
+        int ckoff, int cklen)
     throws IOException {
     throws IOException {
       datas.write(b, offset, len);
       datas.write(b, offset, len);
-      sums.write(checksum);
+      sums.write(checksum, ckoff, cklen);
     }
     }
 
 
     @Override
     @Override

+ 5 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java

@@ -337,7 +337,8 @@ public abstract class ChecksumFs extends FilterFs {
       final short replication, final long blockSize, 
       final short replication, final long blockSize, 
       final Progressable progress, final ChecksumOpt checksumOpt,
       final Progressable progress, final ChecksumOpt checksumOpt,
       final boolean createParent) throws IOException {
       final boolean createParent) throws IOException {
-      super(DataChecksum.newCrc32(), fs.getBytesPerSum(), 4);
+      super(DataChecksum.newDataChecksum(DataChecksum.Type.CRC32,
+          fs.getBytesPerSum()));
 
 
       // checksumOpt is passed down to the raw fs. Unless it implements
       // checksumOpt is passed down to the raw fs. Unless it implements
       // checksum impelemts internally, checksumOpt will be ignored.
       // checksum impelemts internally, checksumOpt will be ignored.
@@ -370,10 +371,11 @@ public abstract class ChecksumFs extends FilterFs {
     }
     }
     
     
     @Override
     @Override
-    protected void writeChunk(byte[] b, int offset, int len, byte[] checksum)
+    protected void writeChunk(byte[] b, int offset, int len, byte[] checksum,
+        int ckoff, int cklen)
       throws IOException {
       throws IOException {
       datas.write(b, offset, len);
       datas.write(b, offset, len);
-      sums.write(checksum);
+      sums.write(checksum, ckoff, cklen);
     }
     }
 
 
     @Override
     @Override

+ 3 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java

@@ -134,6 +134,9 @@ public class CommonConfigurationKeys extends CommonConfigurationKeysPublic {
   HADOOP_SECURITY_SERVICE_AUTHORIZATION_DEFAULT_ACL = 
   HADOOP_SECURITY_SERVICE_AUTHORIZATION_DEFAULT_ACL = 
       "security.service.authorization.default.acl";
       "security.service.authorization.default.acl";
   public static final String 
   public static final String 
+  HADOOP_SECURITY_SERVICE_AUTHORIZATION_DEFAULT_BLOCKED_ACL =
+      "security.service.authorization.default.acl.blocked";
+  public static final String
   HADOOP_SECURITY_SERVICE_AUTHORIZATION_REFRESH_POLICY = 
   HADOOP_SECURITY_SERVICE_AUTHORIZATION_REFRESH_POLICY = 
       "security.refresh.policy.protocol.acl";
       "security.refresh.policy.protocol.acl";
   public static final String 
   public static final String 
@@ -280,5 +283,4 @@ public class CommonConfigurationKeys extends CommonConfigurationKeysPublic {
   public static final String NFS_EXPORTS_ALLOWED_HOSTS_SEPARATOR = ";";
   public static final String NFS_EXPORTS_ALLOWED_HOSTS_SEPARATOR = ";";
   public static final String NFS_EXPORTS_ALLOWED_HOSTS_KEY = "nfs.exports.allowed.hosts";
   public static final String NFS_EXPORTS_ALLOWED_HOSTS_KEY = "nfs.exports.allowed.hosts";
   public static final String NFS_EXPORTS_ALLOWED_HOSTS_KEY_DEFAULT = "* rw";
   public static final String NFS_EXPORTS_ALLOWED_HOSTS_KEY_DEFAULT = "* rw";
-
 }
 }

+ 32 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java

@@ -207,7 +207,7 @@ public class CommonConfigurationKeysPublic {
   public static final String  IPC_CLIENT_TCPNODELAY_KEY =
   public static final String  IPC_CLIENT_TCPNODELAY_KEY =
     "ipc.client.tcpnodelay";
     "ipc.client.tcpnodelay";
   /** Defalt value for IPC_CLIENT_TCPNODELAY_KEY */
   /** Defalt value for IPC_CLIENT_TCPNODELAY_KEY */
-  public static final boolean IPC_CLIENT_TCPNODELAY_DEFAULT = false;
+  public static final boolean IPC_CLIENT_TCPNODELAY_DEFAULT = true;
   /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
   /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
   public static final String  IPC_SERVER_LISTEN_QUEUE_SIZE_KEY =
   public static final String  IPC_SERVER_LISTEN_QUEUE_SIZE_KEY =
     "ipc.server.listen.queue.size";
     "ipc.server.listen.queue.size";
@@ -226,7 +226,7 @@ public class CommonConfigurationKeysPublic {
   public static final String  IPC_SERVER_TCPNODELAY_KEY =
   public static final String  IPC_SERVER_TCPNODELAY_KEY =
     "ipc.server.tcpnodelay";
     "ipc.server.tcpnodelay";
   /** Default value for IPC_SERVER_TCPNODELAY_KEY */
   /** Default value for IPC_SERVER_TCPNODELAY_KEY */
-  public static final boolean IPC_SERVER_TCPNODELAY_DEFAULT = false;
+  public static final boolean IPC_SERVER_TCPNODELAY_DEFAULT = true;
 
 
   /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
   /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
   public static final String  HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_KEY =
   public static final String  HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_KEY =
@@ -288,6 +288,21 @@ public class CommonConfigurationKeysPublic {
   /** Class to override Sasl Properties for a connection */
   /** Class to override Sasl Properties for a connection */
   public static final String  HADOOP_SECURITY_SASL_PROPS_RESOLVER_CLASS =
   public static final String  HADOOP_SECURITY_SASL_PROPS_RESOLVER_CLASS =
     "hadoop.security.saslproperties.resolver.class";
     "hadoop.security.saslproperties.resolver.class";
+  public static final String HADOOP_SECURITY_CRYPTO_CODEC_CLASSES_KEY_PREFIX = 
+    "hadoop.security.crypto.codec.classes";
+  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  public static final String HADOOP_SECURITY_CRYPTO_CIPHER_SUITE_KEY =
+    "hadoop.security.crypto.cipher.suite";
+  public static final String HADOOP_SECURITY_CRYPTO_CIPHER_SUITE_DEFAULT = 
+    "AES/CTR/NoPadding";
+  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  public static final String HADOOP_SECURITY_CRYPTO_JCE_PROVIDER_KEY =
+    "hadoop.security.crypto.jce.provider";
+  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  public static final String HADOOP_SECURITY_CRYPTO_BUFFER_SIZE_KEY = 
+    "hadoop.security.crypto.buffer.size";
+  /** Defalt value for HADOOP_SECURITY_CRYPTO_BUFFER_SIZE_KEY */
+  public static final int HADOOP_SECURITY_CRYPTO_BUFFER_SIZE_DEFAULT = 8192;
   /** Class to override Impersonation provider */
   /** Class to override Impersonation provider */
   public static final String  HADOOP_SECURITY_IMPERSONATION_PROVIDER_CLASS =
   public static final String  HADOOP_SECURITY_IMPERSONATION_PROVIDER_CLASS =
     "hadoop.security.impersonation.provider.class";
     "hadoop.security.impersonation.provider.class";
@@ -318,5 +333,20 @@ public class CommonConfigurationKeysPublic {
       "hadoop.security.kms.client.encrypted.key.cache.expiry";
       "hadoop.security.kms.client.encrypted.key.cache.expiry";
   /** Default value for KMS_CLIENT_ENC_KEY_CACHE_EXPIRY (12 hrs)*/
   /** Default value for KMS_CLIENT_ENC_KEY_CACHE_EXPIRY (12 hrs)*/
   public static final int KMS_CLIENT_ENC_KEY_CACHE_EXPIRY_DEFAULT = 43200000;
   public static final int KMS_CLIENT_ENC_KEY_CACHE_EXPIRY_DEFAULT = 43200000;
+
+  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  public static final String HADOOP_SECURITY_JAVA_SECURE_RANDOM_ALGORITHM_KEY = 
+    "hadoop.security.java.secure.random.algorithm";
+  /** Defalt value for HADOOP_SECURITY_JAVA_SECURE_RANDOM_ALGORITHM_KEY */
+  public static final String HADOOP_SECURITY_JAVA_SECURE_RANDOM_ALGORITHM_DEFAULT = 
+    "SHA1PRNG";
+  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  public static final String HADOOP_SECURITY_SECURE_RANDOM_IMPL_KEY = 
+    "hadoop.security.secure.random.impl";
+  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  public static final String HADOOP_SECURITY_SECURE_RANDOM_DEVICE_FILE_PATH_KEY = 
+    "hadoop.security.random.device.file.path";
+  public static final String HADOOP_SECURITY_SECURE_RANDOM_DEVICE_FILE_PATH_DEFAULT = 
+    "/dev/urandom";
 }
 }
 
 

+ 37 - 9
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java

@@ -24,6 +24,7 @@ import java.io.IOException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.util.StringUtils;
 
 
 /** Store the summary of a content (a directory or a file). */
 /** Store the summary of a content (a directory or a file). */
 @InterfaceAudience.Public
 @InterfaceAudience.Public
@@ -102,7 +103,7 @@ public class ContentSummary implements Writable{
    * <----12----> <----12----> <-------18------->
    * <----12----> <----12----> <-------18------->
    *    DIR_COUNT   FILE_COUNT       CONTENT_SIZE FILE_NAME    
    *    DIR_COUNT   FILE_COUNT       CONTENT_SIZE FILE_NAME    
    */
    */
-  private static final String STRING_FORMAT = "%12d %12d %18d ";
+  private static final String STRING_FORMAT = "%12s %12s %18s ";
   /** 
   /** 
    * Output format:
    * Output format:
    * <----12----> <----15----> <----15----> <----15----> <----12----> <----12----> <-------18------->
    * <----12----> <----15----> <----15----> <----15----> <----12----> <----12----> <-------18------->
@@ -117,7 +118,7 @@ public class ContentSummary implements Writable{
 
 
   private static final String QUOTA_HEADER = String.format(
   private static final String QUOTA_HEADER = String.format(
       QUOTA_STRING_FORMAT + SPACE_QUOTA_STRING_FORMAT, 
       QUOTA_STRING_FORMAT + SPACE_QUOTA_STRING_FORMAT, 
-      "quota", "remaining quota", "space quota", "reamaining quota") +
+      "name quota", "rem name quota", "space quota", "rem space quota") +
       HEADER;
       HEADER;
   
   
   /** Return the header of the output.
   /** Return the header of the output.
@@ -139,11 +140,25 @@ public class ContentSummary implements Writable{
   /** Return the string representation of the object in the output format.
   /** Return the string representation of the object in the output format.
    * if qOption is false, output directory count, file count, and content size;
    * if qOption is false, output directory count, file count, and content size;
    * if qOption is true, output quota and remaining quota as well.
    * if qOption is true, output quota and remaining quota as well.
+   *
+   * @param qOption a flag indicating if quota needs to be printed or not
+   * @return the string representation of the object
+  */
+  public String toString(boolean qOption) {
+    return toString(qOption, false);
+  }
+
+  /** Return the string representation of the object in the output format.
+   * if qOption is false, output directory count, file count, and content size;
+   * if qOption is true, output quota and remaining quota as well.
+   * if hOption is false file sizes are returned in bytes
+   * if hOption is true file sizes are returned in human readable 
    * 
    * 
    * @param qOption a flag indicating if quota needs to be printed or not
    * @param qOption a flag indicating if quota needs to be printed or not
+   * @param hOption a flag indicating if human readable output if to be used
    * @return the string representation of the object
    * @return the string representation of the object
    */
    */
-  public String toString(boolean qOption) {
+  public String toString(boolean qOption, boolean hOption) {
     String prefix = "";
     String prefix = "";
     if (qOption) {
     if (qOption) {
       String quotaStr = "none";
       String quotaStr = "none";
@@ -152,19 +167,32 @@ public class ContentSummary implements Writable{
       String spaceQuotaRem = "inf";
       String spaceQuotaRem = "inf";
       
       
       if (quota>0) {
       if (quota>0) {
-        quotaStr = Long.toString(quota);
-        quotaRem = Long.toString(quota-(directoryCount+fileCount));
+        quotaStr = formatSize(quota, hOption);
+        quotaRem = formatSize(quota-(directoryCount+fileCount), hOption);
       }
       }
       if (spaceQuota>0) {
       if (spaceQuota>0) {
-        spaceQuotaStr = Long.toString(spaceQuota);
-        spaceQuotaRem = Long.toString(spaceQuota - spaceConsumed);        
+        spaceQuotaStr = formatSize(spaceQuota, hOption);
+        spaceQuotaRem = formatSize(spaceQuota - spaceConsumed, hOption);
       }
       }
       
       
       prefix = String.format(QUOTA_STRING_FORMAT + SPACE_QUOTA_STRING_FORMAT, 
       prefix = String.format(QUOTA_STRING_FORMAT + SPACE_QUOTA_STRING_FORMAT, 
                              quotaStr, quotaRem, spaceQuotaStr, spaceQuotaRem);
                              quotaStr, quotaRem, spaceQuotaStr, spaceQuotaRem);
     }
     }
     
     
-    return prefix + String.format(STRING_FORMAT, directoryCount, 
-                                  fileCount, length);
+    return prefix + String.format(STRING_FORMAT,
+     formatSize(directoryCount, hOption),
+     formatSize(fileCount, hOption),
+     formatSize(length, hOption));
+  }
+  /**
+   * Formats a size to be human readable or in bytes
+   * @param size value to be formatted
+   * @param humanReadable flag indicating human readable or not
+   * @return String representation of the size
+  */
+  private String formatSize(long size, boolean humanReadable) {
+    return humanReadable
+      ? StringUtils.TraditionalBinaryPrefix.long2String(size, "", 1)
+      : String.valueOf(size);
   }
   }
 }
 }

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStream.java

@@ -102,7 +102,7 @@ public class FSDataOutputStream extends DataOutputStream
   }
   }
 
 
   /**
   /**
-   * Get a reference to the wrapped output stream. Used by unit tests.
+   * Get a reference to the wrapped output stream.
    *
    *
    * @return the underlying output stream
    * @return the underlying output stream
    */
    */

+ 70 - 37
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSOutputSummer.java

@@ -18,13 +18,14 @@
 
 
 package org.apache.hadoop.fs;
 package org.apache.hadoop.fs;
 
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.util.DataChecksum;
+
 import java.io.IOException;
 import java.io.IOException;
 import java.io.OutputStream;
 import java.io.OutputStream;
 import java.util.zip.Checksum;
 import java.util.zip.Checksum;
 
 
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-
 /**
 /**
  * This is a generic output stream for generating checksums for
  * This is a generic output stream for generating checksums for
  * data before it is written to the underlying stream
  * data before it is written to the underlying stream
@@ -33,7 +34,7 @@ import org.apache.hadoop.classification.InterfaceStability;
 @InterfaceStability.Unstable
 @InterfaceStability.Unstable
 abstract public class FSOutputSummer extends OutputStream {
 abstract public class FSOutputSummer extends OutputStream {
   // data checksum
   // data checksum
-  private Checksum sum;
+  private final DataChecksum sum;
   // internal buffer for storing data before it is checksumed
   // internal buffer for storing data before it is checksumed
   private byte buf[];
   private byte buf[];
   // internal buffer for storing checksum
   // internal buffer for storing checksum
@@ -41,18 +42,24 @@ abstract public class FSOutputSummer extends OutputStream {
   // The number of valid bytes in the buffer.
   // The number of valid bytes in the buffer.
   private int count;
   private int count;
   
   
-  protected FSOutputSummer(Checksum sum, int maxChunkSize, int checksumSize) {
+  // We want this value to be a multiple of 3 because the native code checksums
+  // 3 chunks simultaneously. The chosen value of 9 strikes a balance between
+  // limiting the number of JNI calls and flushing to the underlying stream
+  // relatively frequently.
+  private static final int BUFFER_NUM_CHUNKS = 9;
+  
+  protected FSOutputSummer(DataChecksum sum) {
     this.sum = sum;
     this.sum = sum;
-    this.buf = new byte[maxChunkSize];
-    this.checksum = new byte[checksumSize];
+    this.buf = new byte[sum.getBytesPerChecksum() * BUFFER_NUM_CHUNKS];
+    this.checksum = new byte[sum.getChecksumSize() * BUFFER_NUM_CHUNKS];
     this.count = 0;
     this.count = 0;
   }
   }
   
   
   /* write the data chunk in <code>b</code> staring at <code>offset</code> with
   /* write the data chunk in <code>b</code> staring at <code>offset</code> with
-   * a length of <code>len</code>, and its checksum
+   * a length of <code>len > 0</code>, and its checksum
    */
    */
-  protected abstract void writeChunk(byte[] b, int offset, int len, byte[] checksum)
-  throws IOException;
+  protected abstract void writeChunk(byte[] b, int bOffset, int bLen,
+      byte[] checksum, int checksumOffset, int checksumLen) throws IOException;
   
   
   /**
   /**
    * Check if the implementing OutputStream is closed and should no longer
    * Check if the implementing OutputStream is closed and should no longer
@@ -66,7 +73,6 @@ abstract public class FSOutputSummer extends OutputStream {
   /** Write one byte */
   /** Write one byte */
   @Override
   @Override
   public synchronized void write(int b) throws IOException {
   public synchronized void write(int b) throws IOException {
-    sum.update(b);
     buf[count++] = (byte)b;
     buf[count++] = (byte)b;
     if(count == buf.length) {
     if(count == buf.length) {
       flushBuffer();
       flushBuffer();
@@ -111,18 +117,17 @@ abstract public class FSOutputSummer extends OutputStream {
    */
    */
   private int write1(byte b[], int off, int len) throws IOException {
   private int write1(byte b[], int off, int len) throws IOException {
     if(count==0 && len>=buf.length) {
     if(count==0 && len>=buf.length) {
-      // local buffer is empty and user data has one chunk
-      // checksum and output data
+      // local buffer is empty and user buffer size >= local buffer size, so
+      // simply checksum the user buffer and send it directly to the underlying
+      // stream
       final int length = buf.length;
       final int length = buf.length;
-      sum.update(b, off, length);
-      writeChecksumChunk(b, off, length, false);
+      writeChecksumChunks(b, off, length);
       return length;
       return length;
     }
     }
     
     
     // copy user data to local buffer
     // copy user data to local buffer
     int bytesToCopy = buf.length-count;
     int bytesToCopy = buf.length-count;
     bytesToCopy = (len<bytesToCopy) ? len : bytesToCopy;
     bytesToCopy = (len<bytesToCopy) ? len : bytesToCopy;
-    sum.update(b, off, bytesToCopy);
     System.arraycopy(b, off, buf, count, bytesToCopy);
     System.arraycopy(b, off, buf, count, bytesToCopy);
     count += bytesToCopy;
     count += bytesToCopy;
     if (count == buf.length) {
     if (count == buf.length) {
@@ -136,22 +141,45 @@ abstract public class FSOutputSummer extends OutputStream {
    * the underlying output stream. 
    * the underlying output stream. 
    */
    */
   protected synchronized void flushBuffer() throws IOException {
   protected synchronized void flushBuffer() throws IOException {
-    flushBuffer(false);
+    flushBuffer(false, true);
   }
   }
 
 
-  /* Forces any buffered output bytes to be checksumed and written out to
-   * the underlying output stream.  If keep is true, then the state of 
-   * this object remains intact.
+  /* Forces buffered output bytes to be checksummed and written out to
+   * the underlying output stream. If there is a trailing partial chunk in the
+   * buffer,
+   * 1) flushPartial tells us whether to flush that chunk
+   * 2) if flushPartial is true, keep tells us whether to keep that chunk in the
+   * buffer (if flushPartial is false, it is always kept in the buffer)
+   *
+   * Returns the number of bytes that were flushed but are still left in the
+   * buffer (can only be non-zero if keep is true).
    */
    */
-  protected synchronized void flushBuffer(boolean keep) throws IOException {
-    if (count != 0) {
-      int chunkLen = count;
+  protected synchronized int flushBuffer(boolean keep,
+      boolean flushPartial) throws IOException {
+    int bufLen = count;
+    int partialLen = bufLen % sum.getBytesPerChecksum();
+    int lenToFlush = flushPartial ? bufLen : bufLen - partialLen;
+    if (lenToFlush != 0) {
+      writeChecksumChunks(buf, 0, lenToFlush);
+      if (!flushPartial || keep) {
+        count = partialLen;
+        System.arraycopy(buf, bufLen - count, buf, 0, count);
+      } else {
       count = 0;
       count = 0;
-      writeChecksumChunk(buf, 0, chunkLen, keep);
-      if (keep) {
-        count = chunkLen;
       }
       }
     }
     }
+
+    // total bytes left minus unflushed bytes left
+    return count - (bufLen - lenToFlush);
+  }
+
+  /**
+   * Checksums all complete data chunks and flushes them to the underlying
+   * stream. If there is a trailing partial chunk, it is not flushed and is
+   * maintained in the buffer.
+   */
+  public void flush() throws IOException {
+    flushBuffer(false, false);
   }
   }
 
 
   /**
   /**
@@ -161,18 +189,18 @@ abstract public class FSOutputSummer extends OutputStream {
     return count;
     return count;
   }
   }
   
   
-  /** Generate checksum for the data chunk and output data chunk & checksum
-   * to the underlying output stream. If keep is true then keep the
-   * current checksum intact, do not reset it.
+  /** Generate checksums for the given data chunks and output chunks & checksums
+   * to the underlying output stream.
    */
    */
-  private void writeChecksumChunk(byte b[], int off, int len, boolean keep)
+  private void writeChecksumChunks(byte b[], int off, int len)
   throws IOException {
   throws IOException {
-    int tempChecksum = (int)sum.getValue();
-    if (!keep) {
-      sum.reset();
+    sum.calculateChunkedSums(b, off, len, checksum, 0);
+    for (int i = 0; i < len; i += sum.getBytesPerChecksum()) {
+      int chunkLen = Math.min(sum.getBytesPerChecksum(), len - i);
+      int ckOffset = i / sum.getBytesPerChecksum() * sum.getChecksumSize();
+      writeChunk(b, off + i, chunkLen, checksum, ckOffset,
+          sum.getChecksumSize());
     }
     }
-    int2byte(tempChecksum, checksum);
-    writeChunk(b, off, len, checksum);
   }
   }
 
 
   /**
   /**
@@ -196,9 +224,14 @@ abstract public class FSOutputSummer extends OutputStream {
   /**
   /**
    * Resets existing buffer with a new one of the specified size.
    * Resets existing buffer with a new one of the specified size.
    */
    */
-  protected synchronized void resetChecksumChunk(int size) {
-    sum.reset();
+  protected synchronized void setChecksumBufSize(int size) {
     this.buf = new byte[size];
     this.buf = new byte[size];
+    this.checksum = new byte[((size - 1) / sum.getBytesPerChecksum() + 1) *
+        sum.getChecksumSize()];
     this.count = 0;
     this.count = 0;
   }
   }
+
+  protected synchronized void resetChecksumBufSize() {
+    setChecksumBufSize(sum.getBytesPerChecksum() * BUFFER_NUM_CHUNKS);
+  }
 }
 }

+ 65 - 76
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java

@@ -44,6 +44,7 @@ import org.apache.hadoop.fs.FileSystem.Statistics;
 import org.apache.hadoop.fs.Options.CreateOpts;
 import org.apache.hadoop.fs.Options.CreateOpts;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_DEFAULT;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_DEFAULT;
@@ -1108,6 +1109,55 @@ public final class FileContext {
     }.resolve(this, absF);
     }.resolve(this, absF);
   }
   }
 
 
+  /**
+   * Checks if the user can access a path.  The mode specifies which access
+   * checks to perform.  If the requested permissions are granted, then the
+   * method returns normally.  If access is denied, then the method throws an
+   * {@link AccessControlException}.
+   * <p/>
+   * The default implementation of this method calls {@link #getFileStatus(Path)}
+   * and checks the returned permissions against the requested permissions.
+   * Note that the getFileStatus call will be subject to authorization checks.
+   * Typically, this requires search (execute) permissions on each directory in
+   * the path's prefix, but this is implementation-defined.  Any file system
+   * that provides a richer authorization model (such as ACLs) may override the
+   * default implementation so that it checks against that model instead.
+   * <p>
+   * In general, applications should avoid using this method, due to the risk of
+   * time-of-check/time-of-use race conditions.  The permissions on a file may
+   * change immediately after the access call returns.  Most applications should
+   * prefer running specific file system actions as the desired user represented
+   * by a {@link UserGroupInformation}.
+   *
+   * @param path Path to check
+   * @param mode type of access to check
+   * @throws AccessControlException if access is denied
+   * @throws FileNotFoundException if the path does not exist
+   * @throws UnsupportedFileSystemException if file system for <code>path</code>
+   *   is not supported
+   * @throws IOException see specific implementation
+   * 
+   * Exceptions applicable to file systems accessed over RPC:
+   * @throws RpcClientException If an exception occurred in the RPC client
+   * @throws RpcServerException If an exception occurred in the RPC server
+   * @throws UnexpectedServerException If server implementation throws 
+   *           undeclared exception to RPC server
+   */
+  @InterfaceAudience.LimitedPrivate({"HDFS", "Hive"})
+  public void access(final Path path, final FsAction mode)
+      throws AccessControlException, FileNotFoundException,
+      UnsupportedFileSystemException, IOException {
+    final Path absPath = fixRelativePart(path);
+    new FSLinkResolver<Void>() {
+      @Override
+      public Void next(AbstractFileSystem fs, Path p) throws IOException,
+          UnresolvedLinkException {
+        fs.access(p, mode);
+        return null;
+      }
+    }.resolve(this, absPath);
+  }
+
   /**
   /**
    * Return a file status object that represents the path. If the path 
    * Return a file status object that represents the path. If the path 
    * refers to a symlink then the FileStatus of the symlink is returned.
    * refers to a symlink then the FileStatus of the symlink is returned.
@@ -2297,21 +2347,10 @@ public final class FileContext {
 
 
   /**
   /**
    * Set an xattr of a file or directory.
    * Set an xattr of a file or directory.
-   * The name must be prefixed with user/trusted/security/system and
-   * followed by ".". For example, "user.attr".
-   * <p/>
-   * A regular user can only set an xattr for the "user" namespace.
-   * The super user can set an xattr of either the "user" or "trusted" namespaces.
-   * The xattrs of the "security" and "system" namespaces are only used/exposed 
-   * internally by/to the FS impl.
+   * The name must be prefixed with the namespace followed by ".". For example,
+   * "user.attr".
    * <p/>
    * <p/>
-   * The access permissions of an xattr in the "user" namespace are
-   * defined by the file and directory permission bits.
-   * An xattr can only be set when the logged-in user has the correct permissions.
-   * If the xattr exists, it will be replaced.
-   * <p/>
-   * @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
-   * http://en.wikipedia.org/wiki/Extended_file_attributes</a>
+   * Refer to the HDFS extended attributes user documentation for details.
    *
    *
    * @param path Path to modify
    * @param path Path to modify
    * @param name xattr name.
    * @param name xattr name.
@@ -2326,21 +2365,10 @@ public final class FileContext {
 
 
   /**
   /**
    * Set an xattr of a file or directory.
    * Set an xattr of a file or directory.
-   * The name must be prefixed with user/trusted/security/system and
-   * followed by ".". For example, "user.attr".
-   * <p/>
-   * A regular user can only set an xattr for the "user" namespace.
-   * The super user can set an xattr of either the "user" or "trusted" namespaces.
-   * The xattrs of the "security" and "system" namespaces are only used/exposed 
-   * internally by/to the FS impl.
+   * The name must be prefixed with the namespace followed by ".". For example,
+   * "user.attr".
    * <p/>
    * <p/>
-   * The access permissions of an xattr in the "user" namespace are
-   * defined by the file and directory permission bits.
-   * An xattr can only be set when the logged-in user has the correct permissions.
-   * If the xattr exists, it will be replaced.
-   * <p/>
-   * @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
-   * http://en.wikipedia.org/wiki/Extended_file_attributes</a>
+   * Refer to the HDFS extended attributes user documentation for details.
    *
    *
    * @param path Path to modify
    * @param path Path to modify
    * @param name xattr name.
    * @param name xattr name.
@@ -2363,19 +2391,10 @@ public final class FileContext {
 
 
   /**
   /**
    * Get an xattr for a file or directory.
    * Get an xattr for a file or directory.
-   * The name must be prefixed with user/trusted/security/system and
-   * followed by ".". For example, "user.attr".
+   * The name must be prefixed with the namespace followed by ".". For example,
+   * "user.attr".
    * <p/>
    * <p/>
-   * 
-   * A regular user can only get an xattr for the "user" namespace.
-   * The super user can get an xattr of either the "user" or "trusted" namespaces.
-   * The xattrs of the "security" and "system" namespaces are only used/exposed 
-   * internally by/to the FS impl.
-   * <p/>
-   * An xattr will only be returned when the logged-in user has the correct permissions.
-   * <p/>
-   * @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
-   * http://en.wikipedia.org/wiki/Extended_file_attributes</a>
+   * Refer to the HDFS extended attributes user documentation for details.
    *
    *
    * @param path Path to get extended attribute
    * @param path Path to get extended attribute
    * @param name xattr name.
    * @param name xattr name.
@@ -2398,13 +2417,7 @@ public final class FileContext {
    * Only those xattrs for which the logged-in user has permissions to view
    * Only those xattrs for which the logged-in user has permissions to view
    * are returned.
    * are returned.
    * <p/>
    * <p/>
-   * A regular user can only get xattrs for the "user" namespace.
-   * The super user can only get xattrs for "user" and "trusted" namespaces.
-   * The xattr of "security" and "system" namespaces are only used/exposed 
-   * internally by/to the FS impl.
-   * <p/>
-   * @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
-   * http://en.wikipedia.org/wiki/Extended_file_attributes</a>
+   * Refer to the HDFS extended attributes user documentation for details.
    *
    *
    * @param path Path to get extended attributes
    * @param path Path to get extended attributes
    * @return Map<String, byte[]> describing the XAttrs of the file or directory
    * @return Map<String, byte[]> describing the XAttrs of the file or directory
@@ -2426,13 +2439,7 @@ public final class FileContext {
    * Only those xattrs for which the logged-in user has permissions to view
    * Only those xattrs for which the logged-in user has permissions to view
    * are returned.
    * are returned.
    * <p/>
    * <p/>
-   * A regular user can only get xattrs for the "user" namespace.
-   * The super user can only get xattrs for "user" and "trusted" namespaces.
-   * The xattr of "security" and "system" namespaces are only used/exposed 
-   * internally by/to the FS impl.
-   * <p/>
-   * @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
-   * http://en.wikipedia.org/wiki/Extended_file_attributes</a>
+   * Refer to the HDFS extended attributes user documentation for details.
    *
    *
    * @param path Path to get extended attributes
    * @param path Path to get extended attributes
    * @param names XAttr names.
    * @param names XAttr names.
@@ -2453,21 +2460,10 @@ public final class FileContext {
 
 
   /**
   /**
    * Remove an xattr of a file or directory.
    * Remove an xattr of a file or directory.
-   * The name must be prefixed with user/trusted/security/system and
-   * followed by ".". For example, "user.attr".
+   * The name must be prefixed with the namespace followed by ".". For example,
+   * "user.attr".
    * <p/>
    * <p/>
-   * A regular user can only remove an xattr for the "user" namespace.
-   * The super user can remove an xattr of either the "user" or "trusted" namespaces.
-   * The xattrs of the "security" and "system" namespaces are only used/exposed 
-   * internally by/to the FS impl.
-   * <p/>
-   * The access permissions of an xattr in the "user" namespace are
-   * defined by the file and directory permission bits.
-   * An xattr can only be set when the logged-in user has the correct permissions.
-   * If the xattr exists, it will be replaced.
-   * <p/>
-   * @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
-   * http://en.wikipedia.org/wiki/Extended_file_attributes</a>
+   * Refer to the HDFS extended attributes user documentation for details.
    *
    *
    * @param path Path to remove extended attribute
    * @param path Path to remove extended attribute
    * @param name xattr name
    * @param name xattr name
@@ -2490,14 +2486,7 @@ public final class FileContext {
    * Only those xattr names which the logged-in user has permissions to view
    * Only those xattr names which the logged-in user has permissions to view
    * are returned.
    * are returned.
    * <p/>
    * <p/>
-   * A regular user can only get xattr names for the "user" namespace.
-   * The super user can only get xattr names for "user" and "trusted"
-   * namespaces.
-   * The xattrs of the "security" and "system" namespaces are only
-   * used/exposed internally by/to the FS impl.
-   * <p/>
-   * @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
-   * http://en.wikipedia.org/wiki/Extended_file_attributes</a>
+   * Refer to the HDFS extended attributes user documentation for details.
    *
    *
    * @param path Path to get extended attributes
    * @param path Path to get extended attributes
    * @return List<String> of the XAttr names of the file or directory
    * @return List<String> of the XAttr names of the file or directory

+ 102 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileEncryptionInfo.java

@@ -0,0 +1,102 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs;
+
+import org.apache.commons.codec.binary.Hex;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.crypto.CipherSuite;
+
+import static com.google.common.base.Preconditions.checkArgument;
+import static com.google.common.base.Preconditions.checkNotNull;
+
+/**
+ * FileEncryptionInfo encapsulates all the encryption-related information for
+ * an encrypted file.
+ */
+@InterfaceAudience.Private
+public class FileEncryptionInfo {
+
+  private final CipherSuite cipherSuite;
+  private final byte[] edek;
+  private final byte[] iv;
+  private final String ezKeyVersionName;
+
+  /**
+   * Create a FileEncryptionInfo.
+   *
+   * @param suite CipherSuite used to encrypt the file
+   * @param edek encrypted data encryption key (EDEK) of the file
+   * @param iv initialization vector (IV) used to encrypt the file
+   * @param ezKeyVersionName name of the KeyVersion used to encrypt the
+   *                         encrypted data encryption key.
+   */
+  public FileEncryptionInfo(final CipherSuite suite, final byte[] edek,
+      final byte[] iv, final String ezKeyVersionName) {
+    checkNotNull(suite);
+    checkNotNull(edek);
+    checkNotNull(iv);
+    checkNotNull(ezKeyVersionName);
+    checkArgument(edek.length == suite.getAlgorithmBlockSize(),
+        "Unexpected key length");
+    checkArgument(iv.length == suite.getAlgorithmBlockSize(),
+        "Unexpected IV length");
+    this.cipherSuite = suite;
+    this.edek = edek;
+    this.iv = iv;
+    this.ezKeyVersionName = ezKeyVersionName;
+  }
+
+  /**
+   * @return {@link org.apache.hadoop.crypto.CipherSuite} used to encrypt
+   * the file.
+   */
+  public CipherSuite getCipherSuite() {
+    return cipherSuite;
+  }
+
+  /**
+   * @return encrypted data encryption key (EDEK) for the file
+   */
+  public byte[] getEncryptedDataEncryptionKey() {
+    return edek;
+  }
+
+  /**
+   * @return initialization vector (IV) for the cipher used to encrypt the file
+   */
+  public byte[] getIV() {
+    return iv;
+  }
+
+  /**
+   * @return name of the encryption zone KeyVersion used to encrypt the
+   * encrypted data encryption key (EDEK).
+   */
+  public String getEzKeyVersionName() { return ezKeyVersionName; }
+
+  @Override
+  public String toString() {
+    StringBuilder builder = new StringBuilder("{");
+    builder.append("cipherSuite: " + cipherSuite);
+    builder.append(", edek: " + Hex.encodeHexString(edek));
+    builder.append(", iv: " + Hex.encodeHexString(iv));
+    builder.append(", ezKeyVersionName: " + ezKeyVersionName);
+    builder.append("}");
+    return builder.toString();
+  }
+}

+ 82 - 77
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java

@@ -25,6 +25,7 @@ import java.net.URI;
 import java.net.URISyntaxException;
 import java.net.URISyntaxException;
 import java.security.PrivilegedExceptionAction;
 import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.EnumSet;
 import java.util.EnumSet;
 import java.util.HashMap;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.HashSet;
@@ -50,6 +51,7 @@ import org.apache.hadoop.fs.Options.ChecksumOpt;
 import org.apache.hadoop.fs.Options.Rename;
 import org.apache.hadoop.fs.Options.Rename;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.io.MultipleIOException;
 import org.apache.hadoop.io.MultipleIOException;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Text;
@@ -2072,6 +2074,71 @@ public abstract class FileSystem extends Configured implements Closeable {
    */
    */
   public abstract FileStatus getFileStatus(Path f) throws IOException;
   public abstract FileStatus getFileStatus(Path f) throws IOException;
 
 
+  /**
+   * Checks if the user can access a path.  The mode specifies which access
+   * checks to perform.  If the requested permissions are granted, then the
+   * method returns normally.  If access is denied, then the method throws an
+   * {@link AccessControlException}.
+   * <p/>
+   * The default implementation of this method calls {@link #getFileStatus(Path)}
+   * and checks the returned permissions against the requested permissions.
+   * Note that the getFileStatus call will be subject to authorization checks.
+   * Typically, this requires search (execute) permissions on each directory in
+   * the path's prefix, but this is implementation-defined.  Any file system
+   * that provides a richer authorization model (such as ACLs) may override the
+   * default implementation so that it checks against that model instead.
+   * <p>
+   * In general, applications should avoid using this method, due to the risk of
+   * time-of-check/time-of-use race conditions.  The permissions on a file may
+   * change immediately after the access call returns.  Most applications should
+   * prefer running specific file system actions as the desired user represented
+   * by a {@link UserGroupInformation}.
+   *
+   * @param path Path to check
+   * @param mode type of access to check
+   * @throws AccessControlException if access is denied
+   * @throws FileNotFoundException if the path does not exist
+   * @throws IOException see specific implementation
+   */
+  @InterfaceAudience.LimitedPrivate({"HDFS", "Hive"})
+  public void access(Path path, FsAction mode) throws AccessControlException,
+      FileNotFoundException, IOException {
+    checkAccessPermissions(this.getFileStatus(path), mode);
+  }
+
+  /**
+   * This method provides the default implementation of
+   * {@link #access(Path, FsAction)}.
+   *
+   * @param stat FileStatus to check
+   * @param mode type of access to check
+   * @throws IOException for any error
+   */
+  @InterfaceAudience.Private
+  static void checkAccessPermissions(FileStatus stat, FsAction mode)
+      throws IOException {
+    FsPermission perm = stat.getPermission();
+    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
+    String user = ugi.getShortUserName();
+    List<String> groups = Arrays.asList(ugi.getGroupNames());
+    if (user.equals(stat.getOwner())) {
+      if (perm.getUserAction().implies(mode)) {
+        return;
+      }
+    } else if (groups.contains(stat.getGroup())) {
+      if (perm.getGroupAction().implies(mode)) {
+        return;
+      }
+    } else {
+      if (perm.getOtherAction().implies(mode)) {
+        return;
+      }
+    }
+    throw new AccessControlException(String.format(
+      "Permission denied: user=%s, path=\"%s\":%s:%s:%s%s", user, stat.getPath(),
+      stat.getOwner(), stat.getGroup(), stat.isDirectory() ? "d" : "-", perm));
+  }
+
   /**
   /**
    * See {@link FileContext#fixRelativePart}
    * See {@link FileContext#fixRelativePart}
    */
    */
@@ -2364,21 +2431,10 @@ public abstract class FileSystem extends Configured implements Closeable {
 
 
   /**
   /**
    * Set an xattr of a file or directory.
    * Set an xattr of a file or directory.
-   * The name must be prefixed with user/trusted/security/system and
-   * followed by ".". For example, "user.attr".
-   * <p/>
-   * A regular user can only set an xattr for the "user" namespace.
-   * The super user can set an xattr of either the "user" or "trusted" namespaces.
-   * The xattrs of the "security" and "system" namespaces are only used/exposed 
-   * internally by/to the FS impl.
+   * The name must be prefixed with the namespace followed by ".". For example,
+   * "user.attr".
    * <p/>
    * <p/>
-   * The access permissions of an xattr in the "user" namespace are
-   * defined by the file and directory permission bits.
-   * An xattr can only be set when the logged-in user has the correct permissions.
-   * If the xattr exists, it will be replaced.
-   * <p/>
-   * @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
-   * http://en.wikipedia.org/wiki/Extended_file_attributes</a>
+   * Refer to the HDFS extended attributes user documentation for details.
    *
    *
    * @param path Path to modify
    * @param path Path to modify
    * @param name xattr name.
    * @param name xattr name.
@@ -2393,21 +2449,10 @@ public abstract class FileSystem extends Configured implements Closeable {
 
 
   /**
   /**
    * Set an xattr of a file or directory.
    * Set an xattr of a file or directory.
-   * The name must be prefixed with user/trusted/security/system and
-   * followed by ".". For example, "user.attr".
-   * <p/>
-   * A regular user can only set an xattr for the "user" namespace.
-   * The super user can set an xattr of either the "user" or "trusted" namespaces.
-   * The xattrs of the "security" and "system" namespaces are only used/exposed 
-   * internally by/to the FS impl.
+   * The name must be prefixed with the namespace followed by ".". For example,
+   * "user.attr".
    * <p/>
    * <p/>
-   * The access permissions of an xattr in the "user" namespace are
-   * defined by the file and directory permission bits.
-   * An xattr can only be set if the logged-in user has the correct permissions.
-   * If the xattr exists, it is replaced.
-   * <p/>
-   * @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
-   * http://en.wikipedia.org/wiki/Extended_file_attributes</a>
+   * Refer to the HDFS extended attributes user documentation for details.
    *
    *
    * @param path Path to modify
    * @param path Path to modify
    * @param name xattr name.
    * @param name xattr name.
@@ -2423,20 +2468,10 @@ public abstract class FileSystem extends Configured implements Closeable {
 
 
   /**
   /**
    * Get an xattr name and value for a file or directory.
    * Get an xattr name and value for a file or directory.
-   * The name must be prefixed with user/trusted/security/system and
-   * followed by ".". For example, "user.attr".
+   * The name must be prefixed with the namespace followed by ".". For example,
+   * "user.attr".
    * <p/>
    * <p/>
-   * 
-   * A regular user can only get an xattr for the "user" namespace.
-   * The super user can get an xattr of either the "user" or "trusted" namespaces.
-   * The xattrs of the "security" and "system" namespaces are only used/exposed 
-   * internally by/to the FS impl.
-   * <p/>
-   * An xattr will only be returned if the logged-in user has the
-   * correct permissions.
-   * <p/>
-   * @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
-   * http://en.wikipedia.org/wiki/Extended_file_attributes</a>
+   * Refer to the HDFS extended attributes user documentation for details.
    *
    *
    * @param path Path to get extended attribute
    * @param path Path to get extended attribute
    * @param name xattr name.
    * @param name xattr name.
@@ -2453,13 +2488,7 @@ public abstract class FileSystem extends Configured implements Closeable {
    * Only those xattrs which the logged-in user has permissions to view
    * Only those xattrs which the logged-in user has permissions to view
    * are returned.
    * are returned.
    * <p/>
    * <p/>
-   * A regular user can only get xattrs for the "user" namespace.
-   * The super user can only get xattrs for "user" and "trusted" namespaces.
-   * The xattrs of the "security" and "system" namespaces are only used/exposed
-   * internally by/to the FS impl.
-   * <p/>
-   * @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
-   * http://en.wikipedia.org/wiki/Extended_file_attributes</a>
+   * Refer to the HDFS extended attributes user documentation for details.
    *
    *
    * @param path Path to get extended attributes
    * @param path Path to get extended attributes
    * @return Map<String, byte[]> describing the XAttrs of the file or directory
    * @return Map<String, byte[]> describing the XAttrs of the file or directory
@@ -2475,13 +2504,7 @@ public abstract class FileSystem extends Configured implements Closeable {
    * Only those xattrs which the logged-in user has permissions to view
    * Only those xattrs which the logged-in user has permissions to view
    * are returned.
    * are returned.
    * <p/>
    * <p/>
-   * A regular user can only get xattrs for the "user" namespace.
-   * The super user can only get xattrs for "user" and "trusted" namespaces.
-   * The xattrs of the "security" and "system" namespaces are only used/exposed
-   * internally by/to the FS impl.
-   * <p/>
-   * @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
-   * http://en.wikipedia.org/wiki/Extended_file_attributes</a>
+   * Refer to the HDFS extended attributes user documentation for details.
    *
    *
    * @param path Path to get extended attributes
    * @param path Path to get extended attributes
    * @param names XAttr names.
    * @param names XAttr names.
@@ -2499,14 +2522,7 @@ public abstract class FileSystem extends Configured implements Closeable {
    * Only those xattr names which the logged-in user has permissions to view
    * Only those xattr names which the logged-in user has permissions to view
    * are returned.
    * are returned.
    * <p/>
    * <p/>
-   * A regular user can only get xattr names for the "user" namespace.
-   * The super user can only get xattr names for "user" and "trusted"
-   * namespaces.
-   * The xattrs of the "security" and "system" namespaces are only
-   * used/exposed internally by/to the FS impl.
-   * <p/>
-   * @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
-   * http://en.wikipedia.org/wiki/Extended_file_attributes</a>
+   * Refer to the HDFS extended attributes user documentation for details.
    *
    *
    * @param path Path to get extended attributes
    * @param path Path to get extended attributes
    * @return List<String> of the XAttr names of the file or directory
    * @return List<String> of the XAttr names of the file or directory
@@ -2519,21 +2535,10 @@ public abstract class FileSystem extends Configured implements Closeable {
 
 
   /**
   /**
    * Remove an xattr of a file or directory.
    * Remove an xattr of a file or directory.
-   * The name must be prefixed with user/trusted/security/system and
-   * followed by ".". For example, "user.attr".
-   * <p/>
-   * A regular user can only remove an xattr for the "user" namespace.
-   * The super user can remove an xattr of either the "user" or "trusted" namespaces.
-   * The xattrs of the "security" and "system" namespaces are only used/exposed 
-   * internally by/to the FS impl.
-   * <p/>
-   * The access permissions of an xattr in the "user" namespace are
-   * defined by the file and directory permission bits.
-   * An xattr can only be set when the logged-in user has the correct permissions.
-   * If the xattr exists, it will be replaced.
+   * The name must be prefixed with the namespace followed by ".". For example,
+   * "user.attr".
    * <p/>
    * <p/>
-   * @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
-   * http://en.wikipedia.org/wiki/Extended_file_attributes</a>
+   * Refer to the HDFS extended attributes user documentation for details.
    *
    *
    * @param path Path to remove extended attribute
    * @param path Path to remove extended attribute
    * @param name xattr name
    * @param name xattr name

+ 7 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java

@@ -30,6 +30,7 @@ import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.Options.ChecksumOpt;
 import org.apache.hadoop.fs.Options.ChecksumOpt;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.AccessControlException;
@@ -397,6 +398,12 @@ public class FilterFileSystem extends FileSystem {
     return fs.getFileStatus(f);
     return fs.getFileStatus(f);
   }
   }
 
 
+  @Override
+  public void access(Path path, FsAction mode) throws AccessControlException,
+      FileNotFoundException, IOException {
+    fs.access(path, mode);
+  }
+
   public void createSymlink(final Path target, final Path link,
   public void createSymlink(final Path target, final Path link,
       final boolean createParent) throws AccessControlException,
       final boolean createParent) throws AccessControlException,
       FileAlreadyExistsException, FileNotFoundException,
       FileAlreadyExistsException, FileNotFoundException,

+ 8 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java

@@ -29,6 +29,7 @@ import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.FileSystem.Statistics;
 import org.apache.hadoop.fs.FileSystem.Statistics;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.Options.ChecksumOpt;
 import org.apache.hadoop.fs.Options.ChecksumOpt;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.AccessControlException;
@@ -119,6 +120,13 @@ public abstract class FilterFs extends AbstractFileSystem {
     return myFs.getFileStatus(f);
     return myFs.getFileStatus(f);
   }
   }
 
 
+  @Override
+  public void access(Path path, FsAction mode) throws AccessControlException,
+      FileNotFoundException, UnresolvedLinkException, IOException {
+    checkPath(path);
+    myFs.access(path, mode);
+  }
+
   @Override
   @Override
   public FileStatus getFileLinkStatus(final Path f) 
   public FileStatus getFileLinkStatus(final Path f) 
     throws IOException, UnresolvedLinkException {
     throws IOException, UnresolvedLinkException {

+ 6 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Globber.java

@@ -232,6 +232,10 @@ class Globber {
               }
               }
             }
             }
             for (FileStatus child : children) {
             for (FileStatus child : children) {
+              if (componentIdx < components.size() - 1) {
+                // Don't try to recurse into non-directories.  See HADOOP-10957.
+                if (!child.isDirectory()) continue; 
+              }
               // Set the child path based on the parent path.
               // Set the child path based on the parent path.
               child.setPath(new Path(candidate.getPath(),
               child.setPath(new Path(candidate.getPath(),
                       child.getPath().getName()));
                       child.getPath().getName()));
@@ -249,8 +253,8 @@ class Globber {
                 new Path(candidate.getPath(), component));
                 new Path(candidate.getPath(), component));
             if (childStatus != null) {
             if (childStatus != null) {
               newCandidates.add(childStatus);
               newCandidates.add(childStatus);
-             }
-           }
+            }
+          }
         }
         }
         candidates = newCandidates;
         candidates = newCandidates;
       }
       }

+ 37 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/crypto/CryptoFSDataInputStream.java

@@ -0,0 +1,37 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.crypto;
+
+import java.io.IOException;
+
+import org.apache.hadoop.crypto.CryptoCodec;
+import org.apache.hadoop.crypto.CryptoInputStream;
+import org.apache.hadoop.fs.FSDataInputStream;
+
+public class CryptoFSDataInputStream extends FSDataInputStream {
+  
+  public CryptoFSDataInputStream(FSDataInputStream in, CryptoCodec codec, 
+      int bufferSize, byte[] key, byte[] iv) throws IOException {
+    super(new CryptoInputStream(in, codec, bufferSize, key, iv)); 
+  }
+  
+  public CryptoFSDataInputStream(FSDataInputStream in, CryptoCodec codec, 
+      byte[] key, byte[] iv) throws IOException {
+    super(new CryptoInputStream(in, codec, key, iv)); 
+  }
+}

+ 47 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/crypto/CryptoFSDataOutputStream.java

@@ -0,0 +1,47 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.crypto;
+
+import java.io.IOException;
+
+import org.apache.hadoop.crypto.CryptoCodec;
+import org.apache.hadoop.crypto.CryptoOutputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+
+public class CryptoFSDataOutputStream extends FSDataOutputStream {
+  private final FSDataOutputStream fsOut;
+  
+  public CryptoFSDataOutputStream(FSDataOutputStream out, CryptoCodec codec,
+      int bufferSize, byte[] key, byte[] iv) throws IOException {
+    super(new CryptoOutputStream(out, codec, bufferSize, key, iv, 
+        out.getPos()), null, out.getPos()); 
+    this.fsOut = out;
+  }
+  
+  public CryptoFSDataOutputStream(FSDataOutputStream out, CryptoCodec codec,
+      byte[] key, byte[] iv) throws IOException {
+    super(new CryptoOutputStream(out, codec, key, iv, out.getPos()), 
+        null, out.getPos()); 
+    this.fsOut = out;
+  }
+  
+  @Override
+  public long getPos() {
+    return fsOut.getPos();
+  }
+}

+ 68 - 7
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java

@@ -57,6 +57,17 @@ abstract class CommandWithDestination extends FsCommand {
   private boolean verifyChecksum = true;
   private boolean verifyChecksum = true;
   private boolean writeChecksum = true;
   private boolean writeChecksum = true;
   
   
+  /**
+   * The name of the raw xattr namespace. It would be nice to use
+   * XAttr.RAW.name() but we can't reference the hadoop-hdfs project.
+   */
+  private static final String RAW = "raw.";
+
+  /**
+   * The name of the reserved raw directory.
+   */
+  private static final String RESERVED_RAW = "/.reserved/raw";
+
   /**
   /**
    * 
    * 
    * This method is used to enable the force(-f)  option while copying the files.
    * This method is used to enable the force(-f)  option while copying the files.
@@ -231,7 +242,7 @@ abstract class CommandWithDestination extends FsCommand {
   /**
   /**
    * Called with a source and target destination pair
    * Called with a source and target destination pair
    * @param src for the operation
    * @param src for the operation
-   * @param target for the operation
+   * @param dst for the operation
    * @throws IOException if anything goes wrong
    * @throws IOException if anything goes wrong
    */
    */
   protected void processPath(PathData src, PathData dst) throws IOException {
   protected void processPath(PathData src, PathData dst) throws IOException {
@@ -253,6 +264,8 @@ abstract class CommandWithDestination extends FsCommand {
       // modify dst as we descend to append the basename of the
       // modify dst as we descend to append the basename of the
       // current directory being processed
       // current directory being processed
       dst = getTargetPath(src);
       dst = getTargetPath(src);
+      final boolean preserveRawXattrs =
+          checkPathsForReservedRaw(src.path, dst.path);
       if (dst.exists) {
       if (dst.exists) {
         if (!dst.stat.isDirectory()) {
         if (!dst.stat.isDirectory()) {
           throw new PathIsNotDirectoryException(dst.toString());
           throw new PathIsNotDirectoryException(dst.toString());
@@ -268,7 +281,7 @@ abstract class CommandWithDestination extends FsCommand {
       }      
       }      
       super.recursePath(src);
       super.recursePath(src);
       if (dst.stat.isDirectory()) {
       if (dst.stat.isDirectory()) {
-        preserveAttributes(src, dst);
+        preserveAttributes(src, dst, preserveRawXattrs);
       }
       }
     } finally {
     } finally {
       dst = savedDst;
       dst = savedDst;
@@ -295,18 +308,60 @@ abstract class CommandWithDestination extends FsCommand {
    * @param target where to copy the item
    * @param target where to copy the item
    * @throws IOException if copy fails
    * @throws IOException if copy fails
    */ 
    */ 
-  protected void copyFileToTarget(PathData src, PathData target) throws IOException {
+  protected void copyFileToTarget(PathData src, PathData target)
+      throws IOException {
+    final boolean preserveRawXattrs =
+        checkPathsForReservedRaw(src.path, target.path);
     src.fs.setVerifyChecksum(verifyChecksum);
     src.fs.setVerifyChecksum(verifyChecksum);
     InputStream in = null;
     InputStream in = null;
     try {
     try {
       in = src.fs.open(src.path);
       in = src.fs.open(src.path);
       copyStreamToTarget(in, target);
       copyStreamToTarget(in, target);
-      preserveAttributes(src, target);
+      preserveAttributes(src, target, preserveRawXattrs);
     } finally {
     } finally {
       IOUtils.closeStream(in);
       IOUtils.closeStream(in);
     }
     }
   }
   }
   
   
+  /**
+   * Check the source and target paths to ensure that they are either both in
+   * /.reserved/raw or neither in /.reserved/raw. If neither src nor target are
+   * in /.reserved/raw, then return false, indicating not to preserve raw.*
+   * xattrs. If both src/target are in /.reserved/raw, then return true,
+   * indicating raw.* xattrs should be preserved. If only one of src/target is
+   * in /.reserved/raw then throw an exception.
+   *
+   * @param src The source path to check. This should be a fully-qualified
+   *            path, not relative.
+   * @param target The target path to check. This should be a fully-qualified
+   *               path, not relative.
+   * @return true if raw.* xattrs should be preserved.
+   * @throws PathOperationException is only one of src/target are in
+   * /.reserved/raw.
+   */
+  private boolean checkPathsForReservedRaw(Path src, Path target)
+      throws PathOperationException {
+    final boolean srcIsRR = Path.getPathWithoutSchemeAndAuthority(src).
+        toString().startsWith(RESERVED_RAW);
+    final boolean dstIsRR = Path.getPathWithoutSchemeAndAuthority(target).
+        toString().startsWith(RESERVED_RAW);
+    boolean preserveRawXattrs = false;
+    if (srcIsRR && !dstIsRR) {
+      final String s = "' copy from '" + RESERVED_RAW + "' to non '" +
+          RESERVED_RAW + "'. Either both source and target must be in '" +
+          RESERVED_RAW + "' or neither.";
+      throw new PathOperationException("'" + src.toString() + s);
+    } else if (!srcIsRR && dstIsRR) {
+      final String s = "' copy from non '" + RESERVED_RAW +"' to '" +
+          RESERVED_RAW + "'. Either both source and target must be in '" +
+          RESERVED_RAW + "' or neither.";
+      throw new PathOperationException("'" + dst.toString() + s);
+    } else if (srcIsRR && dstIsRR) {
+      preserveRawXattrs = true;
+    }
+    return preserveRawXattrs;
+  }
+
   /**
   /**
    * Copies the stream contents to a temporary file.  If the copy is
    * Copies the stream contents to a temporary file.  If the copy is
    * successful, the temporary file will be renamed to the real path,
    * successful, the temporary file will be renamed to the real path,
@@ -337,9 +392,11 @@ abstract class CommandWithDestination extends FsCommand {
    * attribute to preserve.
    * attribute to preserve.
    * @param src source to preserve
    * @param src source to preserve
    * @param target where to preserve attributes
    * @param target where to preserve attributes
+   * @param preserveRawXAttrs true if raw.* xattrs should be preserved
    * @throws IOException if fails to preserve attributes
    * @throws IOException if fails to preserve attributes
    */
    */
-  protected void preserveAttributes(PathData src, PathData target)
+  protected void preserveAttributes(PathData src, PathData target,
+      boolean preserveRawXAttrs)
       throws IOException {
       throws IOException {
     if (shouldPreserve(FileAttribute.TIMESTAMPS)) {
     if (shouldPreserve(FileAttribute.TIMESTAMPS)) {
       target.fs.setTimes(
       target.fs.setTimes(
@@ -369,13 +426,17 @@ abstract class CommandWithDestination extends FsCommand {
         target.fs.setAcl(target.path, srcFullEntries);
         target.fs.setAcl(target.path, srcFullEntries);
       }
       }
     }
     }
-    if (shouldPreserve(FileAttribute.XATTR)) {
+    final boolean preserveXAttrs = shouldPreserve(FileAttribute.XATTR);
+    if (preserveXAttrs || preserveRawXAttrs) {
       Map<String, byte[]> srcXAttrs = src.fs.getXAttrs(src.path);
       Map<String, byte[]> srcXAttrs = src.fs.getXAttrs(src.path);
       if (srcXAttrs != null) {
       if (srcXAttrs != null) {
         Iterator<Entry<String, byte[]>> iter = srcXAttrs.entrySet().iterator();
         Iterator<Entry<String, byte[]>> iter = srcXAttrs.entrySet().iterator();
         while (iter.hasNext()) {
         while (iter.hasNext()) {
           Entry<String, byte[]> entry = iter.next();
           Entry<String, byte[]> entry = iter.next();
-          target.fs.setXAttr(target.path, entry.getKey(), entry.getValue());
+          final String xattrName = entry.getKey();
+          if (xattrName.startsWith(RAW) || preserveXAttrs) {
+            target.fs.setXAttr(target.path, entry.getKey(), entry.getValue());
+          }
         }
         }
       }
       }
     }
     }

+ 5 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java

@@ -143,7 +143,11 @@ class CopyCommands {
       "timestamps, ownership, permission. If -pa is specified, " +
       "timestamps, ownership, permission. If -pa is specified, " +
       "then preserves permission also because ACL is a super-set of " +
       "then preserves permission also because ACL is a super-set of " +
       "permission. Passing -f overwrites the destination if it " +
       "permission. Passing -f overwrites the destination if it " +
-      "already exists.\n";
+      "already exists. raw namespace extended attributes are preserved " +
+      "if (1) they are supported (HDFS only) and, (2) all of the source and " +
+      "target pathnames are in the /.reserved/raw hierarchy. raw namespace " +
+      "xattr preservation is determined solely by the presence (or absence) " +
+      "of the /.reserved/raw prefix and not by the -p option.\n";
 
 
     @Override
     @Override
     protected void processOptions(LinkedList<String> args) throws IOException {
     protected void processOptions(LinkedList<String> args) throws IOException {

+ 31 - 5
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Count.java

@@ -42,16 +42,22 @@ public class Count extends FsCommand {
     factory.addClass(Count.class, "-count");
     factory.addClass(Count.class, "-count");
   }
   }
 
 
+  private static final String OPTION_QUOTA = "q";
+  private static final String OPTION_HUMAN = "h";
+
   public static final String NAME = "count";
   public static final String NAME = "count";
-  public static final String USAGE = "[-q] <path> ...";
+  public static final String USAGE =
+      "[-" + OPTION_QUOTA + "] [-" + OPTION_HUMAN + "] <path> ...";
   public static final String DESCRIPTION = 
   public static final String DESCRIPTION = 
       "Count the number of directories, files and bytes under the paths\n" +
       "Count the number of directories, files and bytes under the paths\n" +
       "that match the specified file pattern.  The output columns are:\n" +
       "that match the specified file pattern.  The output columns are:\n" +
       "DIR_COUNT FILE_COUNT CONTENT_SIZE FILE_NAME or\n" +
       "DIR_COUNT FILE_COUNT CONTENT_SIZE FILE_NAME or\n" +
       "QUOTA REMAINING_QUOTA SPACE_QUOTA REMAINING_SPACE_QUOTA \n" +
       "QUOTA REMAINING_QUOTA SPACE_QUOTA REMAINING_SPACE_QUOTA \n" +
-      "      DIR_COUNT FILE_COUNT CONTENT_SIZE FILE_NAME";
+      "      DIR_COUNT FILE_COUNT CONTENT_SIZE FILE_NAME\n" +
+      "The -h option shows file sizes in human readable format.";
   
   
   private boolean showQuotas;
   private boolean showQuotas;
+  private boolean humanReadable;
 
 
   /** Constructor */
   /** Constructor */
   public Count() {}
   public Count() {}
@@ -70,17 +76,37 @@ public class Count extends FsCommand {
 
 
   @Override
   @Override
   protected void processOptions(LinkedList<String> args) {
   protected void processOptions(LinkedList<String> args) {
-    CommandFormat cf = new CommandFormat(1, Integer.MAX_VALUE, "q");
+    CommandFormat cf = new CommandFormat(1, Integer.MAX_VALUE,
+      OPTION_QUOTA, OPTION_HUMAN);
     cf.parse(args);
     cf.parse(args);
     if (args.isEmpty()) { // default path is the current working directory
     if (args.isEmpty()) { // default path is the current working directory
       args.add(".");
       args.add(".");
     }
     }
-    showQuotas = cf.getOpt("q");
+    showQuotas = cf.getOpt(OPTION_QUOTA);
+    humanReadable = cf.getOpt(OPTION_HUMAN);
   }
   }
 
 
   @Override
   @Override
   protected void processPath(PathData src) throws IOException {
   protected void processPath(PathData src) throws IOException {
     ContentSummary summary = src.fs.getContentSummary(src.path);
     ContentSummary summary = src.fs.getContentSummary(src.path);
-    out.println(summary.toString(showQuotas) + src);
+    out.println(summary.toString(showQuotas, isHumanReadable()) + src);
+  }
+  
+  /**
+   * Should quotas get shown as part of the report?
+   * @return if quotas should be shown then true otherwise false
+   */
+  @InterfaceAudience.Private
+  boolean isShowQuotas() {
+    return showQuotas;
+  }
+  
+  /**
+   * Should sizes be shown in human readable format rather than bytes?
+   * @return true if human readable format
+   */
+  @InterfaceAudience.Private
+  boolean isHumanReadable() {
+    return humanReadable;
   }
   }
 }
 }

+ 5 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Delete.java

@@ -118,7 +118,11 @@ class Delete {
         } catch(FileNotFoundException fnfe) {
         } catch(FileNotFoundException fnfe) {
           throw fnfe;
           throw fnfe;
         } catch (IOException ioe) {
         } catch (IOException ioe) {
-            throw new IOException(ioe.getMessage() + ". Consider using -skipTrash option", ioe);
+          String msg = ioe.getMessage();
+          if (ioe.getCause() != null) {
+            msg += ": " + ioe.getCause().getMessage();
+	  }
+          throw new IOException(msg + ". Consider using -skipTrash option", ioe);
         }
         }
       }
       }
       return success;
       return success;

+ 11 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Display.java

@@ -18,6 +18,7 @@
 package org.apache.hadoop.fs.shell;
 package org.apache.hadoop.fs.shell;
 
 
 import java.io.ByteArrayOutputStream;
 import java.io.ByteArrayOutputStream;
+import java.io.EOFException;
 import java.io.IOException;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.InputStream;
 import java.util.LinkedList;
 import java.util.LinkedList;
@@ -126,8 +127,17 @@ class Display extends FsCommand {
     protected InputStream getInputStream(PathData item) throws IOException {
     protected InputStream getInputStream(PathData item) throws IOException {
       FSDataInputStream i = (FSDataInputStream)super.getInputStream(item);
       FSDataInputStream i = (FSDataInputStream)super.getInputStream(item);
 
 
+      // Handle 0 and 1-byte files
+      short leadBytes;
+      try {
+        leadBytes = i.readShort();
+      } catch (EOFException e) {
+        i.seek(0);
+        return i;
+      }
+
       // Check type of stream first
       // Check type of stream first
-      switch(i.readShort()) {
+      switch(leadBytes) {
         case 0x1f8b: { // RFC 1952
         case 0x1f8b: { // RFC 1952
           // Must be gzip
           // Must be gzip
           i.seek(0);
           i.seek(0);

+ 8 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java

@@ -41,7 +41,9 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.Progressable;
 
 
 /**
 /**
@@ -222,6 +224,12 @@ class ChRootedFileSystem extends FilterFileSystem {
     return super.getFileStatus(fullPath(f));
     return super.getFileStatus(fullPath(f));
   }
   }
 
 
+  @Override
+  public void access(Path path, FsAction mode) throws AccessControlException,
+      FileNotFoundException, IOException {
+    super.access(fullPath(path), mode);
+  }
+
   @Override
   @Override
   public FsStatus getStatus(Path p) throws IOException {
   public FsStatus getStatus(Path p) throws IOException {
     return super.getStatus(fullPath(p));
     return super.getStatus(fullPath(p));

+ 7 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java

@@ -41,7 +41,9 @@ import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.Progressable;
 
 
@@ -200,6 +202,11 @@ class ChRootedFs extends AbstractFileSystem {
     return myFs.getFileStatus(fullPath(f));
     return myFs.getFileStatus(fullPath(f));
   }
   }
 
 
+  public void access(Path path, FsAction mode) throws AccessControlException,
+      FileNotFoundException, UnresolvedLinkException, IOException {
+    myFs.access(fullPath(path), mode);
+  }
+
   @Override
   @Override
   public FileStatus getFileLinkStatus(final Path f) 
   public FileStatus getFileLinkStatus(final Path f) 
     throws IOException, UnresolvedLinkException {
     throws IOException, UnresolvedLinkException {

+ 9 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java

@@ -51,6 +51,7 @@ import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.AclUtil;
 import org.apache.hadoop.fs.permission.AclUtil;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.viewfs.InodeTree.INode;
 import org.apache.hadoop.fs.viewfs.InodeTree.INode;
 import org.apache.hadoop.fs.viewfs.InodeTree.INodeLink;
 import org.apache.hadoop.fs.viewfs.InodeTree.INodeLink;
@@ -359,7 +360,14 @@ public class ViewFileSystem extends FileSystem {
     return new ViewFsFileStatus(status, this.makeQualified(f));
     return new ViewFsFileStatus(status, this.makeQualified(f));
   }
   }
   
   
-  
+  @Override
+  public void access(Path path, FsAction mode) throws AccessControlException,
+      FileNotFoundException, IOException {
+    InodeTree.ResolveResult<FileSystem> res =
+      fsState.resolve(getUriPath(path), true);
+    res.targetFileSystem.access(res.remainingPath, mode);
+  }
+
   @Override
   @Override
   public FileStatus[] listStatus(final Path f) throws AccessControlException,
   public FileStatus[] listStatus(final Path f) throws AccessControlException,
       FileNotFoundException, IOException {
       FileNotFoundException, IOException {

+ 9 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java

@@ -54,6 +54,7 @@ import org.apache.hadoop.fs.local.LocalConfigKeys;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclUtil;
 import org.apache.hadoop.fs.permission.AclUtil;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.viewfs.InodeTree.INode;
 import org.apache.hadoop.fs.viewfs.InodeTree.INode;
 import org.apache.hadoop.fs.viewfs.InodeTree.INodeLink;
 import org.apache.hadoop.fs.viewfs.InodeTree.INodeLink;
@@ -352,6 +353,14 @@ public class ViewFs extends AbstractFileSystem {
     return new ViewFsFileStatus(status, this.makeQualified(f));
     return new ViewFsFileStatus(status, this.makeQualified(f));
   }
   }
 
 
+  @Override
+  public void access(Path path, FsAction mode) throws AccessControlException,
+      FileNotFoundException, UnresolvedLinkException, IOException {
+    InodeTree.ResolveResult<AbstractFileSystem> res =
+      fsState.resolve(getUriPath(path), true);
+    res.targetFileSystem.access(res.remainingPath, mode);
+  }
+
   @Override
   @Override
   public FileStatus getFileLinkStatus(final Path f)
   public FileStatus getFileLinkStatus(final Path f)
      throws AccessControlException, FileNotFoundException,
      throws AccessControlException, FileNotFoundException,

+ 14 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java

@@ -44,6 +44,7 @@ import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletRequestWrapper;
 import javax.servlet.http.HttpServletRequestWrapper;
 import javax.servlet.http.HttpServletResponse;
 import javax.servlet.http.HttpServletResponse;
 
 
+import com.google.common.collect.ImmutableMap;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.HadoopIllegalArgumentException;
@@ -415,6 +416,17 @@ public final class HttpServer2 implements FilterContainer {
   private static WebAppContext createWebAppContext(String name,
   private static WebAppContext createWebAppContext(String name,
       Configuration conf, AccessControlList adminsAcl, final String appDir) {
       Configuration conf, AccessControlList adminsAcl, final String appDir) {
     WebAppContext ctx = new WebAppContext();
     WebAppContext ctx = new WebAppContext();
+    ctx.setDefaultsDescriptor(null);
+    ServletHolder holder = new ServletHolder(new DefaultServlet());
+    Map<String, String> params = ImmutableMap. <String, String> builder()
+            .put("acceptRanges", "true")
+            .put("dirAllowed", "false")
+            .put("gzip", "true")
+            .put("useFileMappedBuffer", "true")
+            .build();
+    holder.setInitParameters(params);
+    ctx.setWelcomeFiles(new String[] {"index.html"});
+    ctx.addServlet(holder, "/");
     ctx.setDisplayName(name);
     ctx.setDisplayName(name);
     ctx.setContextPath("/");
     ctx.setContextPath("/");
     ctx.setWar(appDir + "/" + name);
     ctx.setWar(appDir + "/" + name);
@@ -1005,7 +1017,7 @@ public final class HttpServer2 implements FilterContainer {
 
 
     String remoteUser = request.getRemoteUser();
     String remoteUser = request.getRemoteUser();
     if (remoteUser == null) {
     if (remoteUser == null) {
-      response.sendError(HttpServletResponse.SC_UNAUTHORIZED,
+      response.sendError(HttpServletResponse.SC_FORBIDDEN,
                          "Unauthenticated users are not " +
                          "Unauthenticated users are not " +
                          "authorized to access this page.");
                          "authorized to access this page.");
       return false;
       return false;
@@ -1013,7 +1025,7 @@ public final class HttpServer2 implements FilterContainer {
 
 
     if (servletContext.getAttribute(ADMINS_ACL) != null &&
     if (servletContext.getAttribute(ADMINS_ACL) != null &&
         !userHasAdministratorAccess(servletContext, remoteUser)) {
         !userHasAdministratorAccess(servletContext, remoteUser)) {
-      response.sendError(HttpServletResponse.SC_UNAUTHORIZED, "User "
+      response.sendError(HttpServletResponse.SC_FORBIDDEN, "User "
           + remoteUser + " is unauthorized to access this page.");
           + remoteUser + " is unauthorized to access this page.");
       return false;
       return false;
     }
     }

+ 5 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java

@@ -53,8 +53,9 @@ import org.apache.hadoop.util.Time;
  * <code>SequenceFile</code>s are flat files consisting of binary key/value 
  * <code>SequenceFile</code>s are flat files consisting of binary key/value 
  * pairs.
  * pairs.
  * 
  * 
- * <p><code>SequenceFile</code> provides {@link Writer}, {@link Reader} and
- * {@link Sorter} classes for writing, reading and sorting respectively.</p>
+ * <p><code>SequenceFile</code> provides {@link SequenceFile.Writer},
+ * {@link SequenceFile.Reader} and {@link Sorter} classes for writing,
+ * reading and sorting respectively.</p>
  * 
  * 
  * There are three <code>SequenceFile</code> <code>Writer</code>s based on the 
  * There are three <code>SequenceFile</code> <code>Writer</code>s based on the 
  * {@link CompressionType} used to compress key/value pairs:
  * {@link CompressionType} used to compress key/value pairs:
@@ -79,8 +80,8 @@ import org.apache.hadoop.util.Time;
  * <p>The recommended way is to use the static <tt>createWriter</tt> methods
  * <p>The recommended way is to use the static <tt>createWriter</tt> methods
  * provided by the <code>SequenceFile</code> to chose the preferred format.</p>
  * provided by the <code>SequenceFile</code> to chose the preferred format.</p>
  *
  *
- * <p>The {@link Reader} acts as the bridge and can read any of the above 
- * <code>SequenceFile</code> formats.</p>
+ * <p>The {@link SequenceFile.Reader} acts as the bridge and can read any of the
+ * above <code>SequenceFile</code> formats.</p>
  *
  *
  * <h4 id="Formats">SequenceFile Formats</h4>
  * <h4 id="Formats">SequenceFile Formats</h4>
  * 
  * 

+ 16 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java

@@ -33,9 +33,11 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.fs.HardLink;
 import org.apache.hadoop.io.SecureIOUtils.AlreadyExistsException;
 import org.apache.hadoop.io.SecureIOUtils.AlreadyExistsException;
 import org.apache.hadoop.util.NativeCodeLoader;
 import org.apache.hadoop.util.NativeCodeLoader;
 import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.Shell;
+import org.apache.hadoop.util.PerformanceAdvisory;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
 
 
@@ -195,7 +197,7 @@ public class NativeIO {
           // This can happen if the user has an older version of libhadoop.so
           // This can happen if the user has an older version of libhadoop.so
           // installed - in this case we can continue without native IO
           // installed - in this case we can continue without native IO
           // after warning
           // after warning
-          LOG.error("Unable to initialize NativeIO libraries", t);
+          PerformanceAdvisory.LOG.debug("Unable to initialize NativeIO libraries", t);
         }
         }
       }
       }
     }
     }
@@ -573,7 +575,7 @@ public class NativeIO {
           // This can happen if the user has an older version of libhadoop.so
           // This can happen if the user has an older version of libhadoop.so
           // installed - in this case we can continue without native IO
           // installed - in this case we can continue without native IO
           // after warning
           // after warning
-          LOG.error("Unable to initialize NativeIO libraries", t);
+          PerformanceAdvisory.LOG.debug("Unable to initialize NativeIO libraries", t);
         }
         }
       }
       }
     }
     }
@@ -592,7 +594,7 @@ public class NativeIO {
         // This can happen if the user has an older version of libhadoop.so
         // This can happen if the user has an older version of libhadoop.so
         // installed - in this case we can continue without native IO
         // installed - in this case we can continue without native IO
         // after warning
         // after warning
-        LOG.error("Unable to initialize NativeIO libraries", t);
+        PerformanceAdvisory.LOG.debug("Unable to initialize NativeIO libraries", t);
       }
       }
     }
     }
   }
   }
@@ -823,6 +825,14 @@ public class NativeIO {
     }
     }
   }
   }
 
 
+  public static void link(File src, File dst) throws IOException {
+    if (!nativeLoaded) {
+      HardLink.createHardLink(src, dst);
+    } else {
+      link0(src.getAbsolutePath(), dst.getAbsolutePath());
+    }
+  }
+
   /**
   /**
    * A version of renameTo that throws a descriptive exception when it fails.
    * A version of renameTo that throws a descriptive exception when it fails.
    *
    *
@@ -833,4 +843,7 @@ public class NativeIO {
    */
    */
   private static native void renameTo0(String src, String dst)
   private static native void renameTo0(String src, String dst)
       throws NativeIOException;
       throws NativeIOException;
+
+  private static native void link0(String src, String dst)
+      throws NativeIOException;
 }
 }

+ 8 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java

@@ -88,6 +88,7 @@ import org.apache.hadoop.util.ProtoUtil;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.Time;
+import org.htrace.Trace;
 
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import com.google.common.base.Preconditions;
@@ -694,6 +695,9 @@ public class Client {
         if (LOG.isDebugEnabled()) {
         if (LOG.isDebugEnabled()) {
           LOG.debug("Connecting to "+server);
           LOG.debug("Connecting to "+server);
         }
         }
+        if (Trace.isTracing()) {
+          Trace.addTimelineAnnotation("IPC client connecting to " + server);
+        }
         short numRetries = 0;
         short numRetries = 0;
         Random rand = null;
         Random rand = null;
         while (true) {
         while (true) {
@@ -758,6 +762,10 @@ public class Client {
           // update last activity time
           // update last activity time
           touch();
           touch();
 
 
+          if (Trace.isTracing()) {
+            Trace.addTimelineAnnotation("IPC client connected to " + server);
+          }
+
           // start the receiver thread after the socket connection has been set
           // start the receiver thread after the socket connection has been set
           // up
           // up
           start();
           start();

+ 522 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java

@@ -0,0 +1,522 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ipc;
+
+import java.lang.ref.WeakReference;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Timer;
+import java.util.TimerTask;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.atomic.AtomicReference;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.metrics2.util.MBeans;
+
+import org.codehaus.jackson.map.ObjectMapper;
+import com.google.common.annotations.VisibleForTesting;
+
+/**
+ * The decay RPC scheduler counts incoming requests in a map, then
+ * decays the counts at a fixed time interval. The scheduler is optimized
+ * for large periods (on the order of seconds), as it offloads work to the
+ * decay sweep.
+ */
+public class DecayRpcScheduler implements RpcScheduler, DecayRpcSchedulerMXBean {
+  /**
+   * Period controls how many milliseconds between each decay sweep.
+   */
+  public static final String IPC_CALLQUEUE_DECAYSCHEDULER_PERIOD_KEY =
+    "faircallqueue.decay-scheduler.period-ms";
+  public static final long   IPC_CALLQUEUE_DECAYSCHEDULER_PERIOD_DEFAULT =
+    5000L;
+
+  /**
+   * Decay factor controls how much each count is suppressed by on each sweep.
+   * Valid numbers are > 0 and < 1. Decay factor works in tandem with period
+   * to control how long the scheduler remembers an identity.
+   */
+  public static final String IPC_CALLQUEUE_DECAYSCHEDULER_FACTOR_KEY =
+    "faircallqueue.decay-scheduler.decay-factor";
+  public static final double IPC_CALLQUEUE_DECAYSCHEDULER_FACTOR_DEFAULT =
+    0.5;
+
+  /**
+   * Thresholds are specified as integer percentages, and specify which usage
+   * range each queue will be allocated to. For instance, specifying the list
+   *  10, 40, 80
+   * implies 4 queues, with
+   * - q3 from 80% up
+   * - q2 from 40 up to 80
+   * - q1 from 10 up to 40
+   * - q0 otherwise.
+   */
+  public static final String IPC_CALLQUEUE_DECAYSCHEDULER_THRESHOLDS_KEY =
+    "faircallqueue.decay-scheduler.thresholds";
+
+  // Specifies the identity to use when the IdentityProvider cannot handle
+  // a schedulable.
+  public static final String DECAYSCHEDULER_UNKNOWN_IDENTITY =
+    "IdentityProvider.Unknown";
+
+  public static final Log LOG = LogFactory.getLog(DecayRpcScheduler.class);
+
+  // Track the number of calls for each schedulable identity
+  private final ConcurrentHashMap<Object, AtomicLong> callCounts =
+    new ConcurrentHashMap<Object, AtomicLong>();
+
+  // Should be the sum of all AtomicLongs in callCounts
+  private final AtomicLong totalCalls = new AtomicLong();
+
+  // Pre-computed scheduling decisions during the decay sweep are
+  // atomically swapped in as a read-only map
+  private final AtomicReference<Map<Object, Integer>> scheduleCacheRef =
+    new AtomicReference<Map<Object, Integer>>();
+
+  // Tune the behavior of the scheduler
+  private final long decayPeriodMillis; // How long between each tick
+  private final double decayFactor; // nextCount = currentCount / decayFactor
+  private final int numQueues; // affects scheduling decisions, from 0 to numQueues - 1
+  private final double[] thresholds;
+  private final IdentityProvider identityProvider;
+
+  /**
+   * This TimerTask will call decayCurrentCounts until
+   * the scheduler has been garbage collected.
+   */
+  public static class DecayTask extends TimerTask {
+    private WeakReference<DecayRpcScheduler> schedulerRef;
+    private Timer timer;
+
+    public DecayTask(DecayRpcScheduler scheduler, Timer timer) {
+      this.schedulerRef = new WeakReference<DecayRpcScheduler>(scheduler);
+      this.timer = timer;
+    }
+
+    @Override
+    public void run() {
+      DecayRpcScheduler sched = schedulerRef.get();
+      if (sched != null) {
+        sched.decayCurrentCounts();
+      } else {
+        // Our scheduler was garbage collected since it is no longer in use,
+        // so we should terminate the timer as well
+        timer.cancel();
+        timer.purge();
+      }
+    }
+  }
+
+  /**
+   * Create a decay scheduler.
+   * @param numQueues number of queues to schedule for
+   * @param ns config prefix, so that we can configure multiple schedulers
+   *           in a single instance.
+   * @param conf configuration to use.
+   */
+  public DecayRpcScheduler(int numQueues, String ns, Configuration conf) {
+    if (numQueues < 1) {
+      throw new IllegalArgumentException("number of queues must be > 0");
+    }
+
+    this.numQueues = numQueues;
+    this.decayFactor = parseDecayFactor(ns, conf);
+    this.decayPeriodMillis = parseDecayPeriodMillis(ns, conf);
+    this.identityProvider = this.parseIdentityProvider(ns, conf);
+    this.thresholds = parseThresholds(ns, conf, numQueues);
+
+    // Setup delay timer
+    Timer timer = new Timer();
+    DecayTask task = new DecayTask(this, timer);
+    timer.scheduleAtFixedRate(task, 0, this.decayPeriodMillis);
+
+    MetricsProxy prox = MetricsProxy.getInstance(ns);
+    prox.setDelegate(this);
+  }
+
+  // Load configs
+  private IdentityProvider parseIdentityProvider(String ns, Configuration conf) {
+    List<IdentityProvider> providers = conf.getInstances(
+      ns + "." + CommonConfigurationKeys.IPC_CALLQUEUE_IDENTITY_PROVIDER_KEY,
+      IdentityProvider.class);
+
+    if (providers.size() < 1) {
+      LOG.info("IdentityProvider not specified, " +
+        "defaulting to UserIdentityProvider");
+      return new UserIdentityProvider();
+    }
+
+    return providers.get(0); // use the first
+  }
+
+  private static double parseDecayFactor(String ns, Configuration conf) {
+    double factor = conf.getDouble(ns + "." +
+        IPC_CALLQUEUE_DECAYSCHEDULER_FACTOR_KEY,
+      IPC_CALLQUEUE_DECAYSCHEDULER_FACTOR_DEFAULT
+    );
+
+    if (factor <= 0 || factor >= 1) {
+      throw new IllegalArgumentException("Decay Factor " +
+        "must be between 0 and 1");
+    }
+
+    return factor;
+  }
+
+  private static long parseDecayPeriodMillis(String ns, Configuration conf) {
+    long period = conf.getLong(ns + "." +
+        IPC_CALLQUEUE_DECAYSCHEDULER_PERIOD_KEY,
+      IPC_CALLQUEUE_DECAYSCHEDULER_PERIOD_DEFAULT
+    );
+
+    if (period <= 0) {
+      throw new IllegalArgumentException("Period millis must be >= 0");
+    }
+
+    return period;
+  }
+
+  private static double[] parseThresholds(String ns, Configuration conf,
+      int numQueues) {
+    int[] percentages = conf.getInts(ns + "." +
+      IPC_CALLQUEUE_DECAYSCHEDULER_THRESHOLDS_KEY);
+
+    if (percentages.length == 0) {
+      return getDefaultThresholds(numQueues);
+    } else if (percentages.length != numQueues-1) {
+      throw new IllegalArgumentException("Number of thresholds should be " +
+        (numQueues-1) + ". Was: " + percentages.length);
+    }
+
+    // Convert integer percentages to decimals
+    double[] decimals = new double[percentages.length];
+    for (int i = 0; i < percentages.length; i++) {
+      decimals[i] = percentages[i] / 100.0;
+    }
+
+    return decimals;
+  }
+
+  /**
+   * Generate default thresholds if user did not specify. Strategy is
+   * to halve each time, since queue usage tends to be exponential.
+   * So if numQueues is 4, we would generate: double[]{0.125, 0.25, 0.5}
+   * which specifies the boundaries between each queue's usage.
+   * @param numQueues number of queues to compute for
+   * @return array of boundaries of length numQueues - 1
+   */
+  private static double[] getDefaultThresholds(int numQueues) {
+    double[] ret = new double[numQueues - 1];
+    double div = Math.pow(2, numQueues - 1);
+
+    for (int i = 0; i < ret.length; i++) {
+      ret[i] = Math.pow(2, i)/div;
+    }
+    return ret;
+  }
+
+  /**
+   * Decay the stored counts for each user and clean as necessary.
+   * This method should be called periodically in order to keep
+   * counts current.
+   */
+  private void decayCurrentCounts() {
+    long total = 0;
+    Iterator<Map.Entry<Object, AtomicLong>> it =
+      callCounts.entrySet().iterator();
+
+    while (it.hasNext()) {
+      Map.Entry<Object, AtomicLong> entry = it.next();
+      AtomicLong count = entry.getValue();
+
+      // Compute the next value by reducing it by the decayFactor
+      long currentValue = count.get();
+      long nextValue = (long)(currentValue * decayFactor);
+      total += nextValue;
+      count.set(nextValue);
+
+      if (nextValue == 0) {
+        // We will clean up unused keys here. An interesting optimization might
+        // be to have an upper bound on keyspace in callCounts and only
+        // clean once we pass it.
+        it.remove();
+      }
+    }
+
+    // Update the total so that we remain in sync
+    totalCalls.set(total);
+
+    // Now refresh the cache of scheduling decisions
+    recomputeScheduleCache();
+  }
+
+  /**
+   * Update the scheduleCache to match current conditions in callCounts.
+   */
+  private void recomputeScheduleCache() {
+    Map<Object, Integer> nextCache = new HashMap<Object, Integer>();
+
+    for (Map.Entry<Object, AtomicLong> entry : callCounts.entrySet()) {
+      Object id = entry.getKey();
+      AtomicLong value = entry.getValue();
+
+      long snapshot = value.get();
+      int computedLevel = computePriorityLevel(snapshot);
+
+      nextCache.put(id, computedLevel);
+    }
+
+    // Swap in to activate
+    scheduleCacheRef.set(Collections.unmodifiableMap(nextCache));
+  }
+
+  /**
+   * Get the number of occurrences and increment atomically.
+   * @param identity the identity of the user to increment
+   * @return the value before incrementation
+   */
+  private long getAndIncrement(Object identity) throws InterruptedException {
+    // We will increment the count, or create it if no such count exists
+    AtomicLong count = this.callCounts.get(identity);
+    if (count == null) {
+      // Create the count since no such count exists.
+      count = new AtomicLong(0);
+
+      // Put it in, or get the AtomicInteger that was put in by another thread
+      AtomicLong otherCount = callCounts.putIfAbsent(identity, count);
+      if (otherCount != null) {
+        count = otherCount;
+      }
+    }
+
+    // Update the total
+    totalCalls.getAndIncrement();
+
+    // At this point value is guaranteed to be not null. It may however have
+    // been clobbered from callCounts. Nonetheless, we return what
+    // we have.
+    return count.getAndIncrement();
+  }
+
+  /**
+   * Given the number of occurrences, compute a scheduling decision.
+   * @param occurrences how many occurrences
+   * @return scheduling decision from 0 to numQueues - 1
+   */
+  private int computePriorityLevel(long occurrences) {
+    long totalCallSnapshot = totalCalls.get();
+
+    double proportion = 0;
+    if (totalCallSnapshot > 0) {
+      proportion = (double) occurrences / totalCallSnapshot;
+    }
+
+    // Start with low priority queues, since they will be most common
+    for(int i = (numQueues - 1); i > 0; i--) {
+      if (proportion >= this.thresholds[i - 1]) {
+        return i; // We've found our queue number
+      }
+    }
+
+    // If we get this far, we're at queue 0
+    return 0;
+  }
+
+  /**
+   * Returns the priority level for a given identity by first trying the cache,
+   * then computing it.
+   * @param identity an object responding to toString and hashCode
+   * @return integer scheduling decision from 0 to numQueues - 1
+   */
+  private int cachedOrComputedPriorityLevel(Object identity) {
+    try {
+      long occurrences = this.getAndIncrement(identity);
+
+      // Try the cache
+      Map<Object, Integer> scheduleCache = scheduleCacheRef.get();
+      if (scheduleCache != null) {
+        Integer priority = scheduleCache.get(identity);
+        if (priority != null) {
+          return priority;
+        }
+      }
+
+      // Cache was no good, compute it
+      return computePriorityLevel(occurrences);
+    } catch (InterruptedException ie) {
+      LOG.warn("Caught InterruptedException, returning low priority queue");
+      return numQueues - 1;
+    }
+  }
+
+  /**
+   * Compute the appropriate priority for a schedulable based on past requests.
+   * @param obj the schedulable obj to query and remember
+   * @return the queue index which we recommend scheduling in
+   */
+  @Override
+  public int getPriorityLevel(Schedulable obj) {
+    // First get the identity
+    String identity = this.identityProvider.makeIdentity(obj);
+    if (identity == null) {
+      // Identity provider did not handle this
+      identity = DECAYSCHEDULER_UNKNOWN_IDENTITY;
+    }
+
+    return cachedOrComputedPriorityLevel(identity);
+  }
+
+  // For testing
+  @VisibleForTesting
+  public double getDecayFactor() { return decayFactor; }
+
+  @VisibleForTesting
+  public long getDecayPeriodMillis() { return decayPeriodMillis; }
+
+  @VisibleForTesting
+  public double[] getThresholds() { return thresholds; }
+
+  @VisibleForTesting
+  public void forceDecay() { decayCurrentCounts(); }
+
+  @VisibleForTesting
+  public Map<Object, Long> getCallCountSnapshot() {
+    HashMap<Object, Long> snapshot = new HashMap<Object, Long>();
+
+    for (Map.Entry<Object, AtomicLong> entry : callCounts.entrySet()) {
+      snapshot.put(entry.getKey(), entry.getValue().get());
+    }
+
+    return Collections.unmodifiableMap(snapshot);
+  }
+
+  @VisibleForTesting
+  public long getTotalCallSnapshot() {
+    return totalCalls.get();
+  }
+
+  /**
+   * MetricsProxy is a singleton because we may init multiple schedulers and we
+   * want to clean up resources when a new scheduler replaces the old one.
+   */
+  private static final class MetricsProxy implements DecayRpcSchedulerMXBean {
+    // One singleton per namespace
+    private static final HashMap<String, MetricsProxy> INSTANCES =
+      new HashMap<String, MetricsProxy>();
+
+    // Weakref for delegate, so we don't retain it forever if it can be GC'd
+    private WeakReference<DecayRpcScheduler> delegate;
+
+    private MetricsProxy(String namespace) {
+      MBeans.register(namespace, "DecayRpcScheduler", this);
+    }
+
+    public static synchronized MetricsProxy getInstance(String namespace) {
+      MetricsProxy mp = INSTANCES.get(namespace);
+      if (mp == null) {
+        // We must create one
+        mp = new MetricsProxy(namespace);
+        INSTANCES.put(namespace, mp);
+      }
+      return mp;
+    }
+
+    public void setDelegate(DecayRpcScheduler obj) {
+      this.delegate = new WeakReference<DecayRpcScheduler>(obj);
+    }
+
+    @Override
+    public String getSchedulingDecisionSummary() {
+      DecayRpcScheduler scheduler = delegate.get();
+      if (scheduler == null) {
+        return "No Active Scheduler";
+      } else {
+        return scheduler.getSchedulingDecisionSummary();
+      }
+    }
+
+    @Override
+    public String getCallVolumeSummary() {
+      DecayRpcScheduler scheduler = delegate.get();
+      if (scheduler == null) {
+        return "No Active Scheduler";
+      } else {
+        return scheduler.getCallVolumeSummary();
+      }
+    }
+
+    @Override
+    public int getUniqueIdentityCount() {
+      DecayRpcScheduler scheduler = delegate.get();
+      if (scheduler == null) {
+        return -1;
+      } else {
+        return scheduler.getUniqueIdentityCount();
+      }
+    }
+
+    @Override
+    public long getTotalCallVolume() {
+      DecayRpcScheduler scheduler = delegate.get();
+      if (scheduler == null) {
+        return -1;
+      } else {
+        return scheduler.getTotalCallVolume();
+      }
+    }
+  }
+
+  public int getUniqueIdentityCount() {
+    return callCounts.size();
+  }
+
+  public long getTotalCallVolume() {
+    return totalCalls.get();
+  }
+
+  public String getSchedulingDecisionSummary() {
+    Map<Object, Integer> decisions = scheduleCacheRef.get();
+    if (decisions == null) {
+      return "{}";
+    } else {
+      try {
+        ObjectMapper om = new ObjectMapper();
+        return om.writeValueAsString(decisions);
+      } catch (Exception e) {
+        return "Error: " + e.getMessage();
+      }
+    }
+  }
+
+  public String getCallVolumeSummary() {
+    try {
+      ObjectMapper om = new ObjectMapper();
+      return om.writeValueAsString(callCounts);
+    } catch (Exception e) {
+      return "Error: " + e.getMessage();
+    }
+  }
+}

+ 30 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcSchedulerMXBean.java

@@ -0,0 +1,30 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ipc;
+
+/**
+ * Provides metrics for Decay scheduler.
+ */
+public interface DecayRpcSchedulerMXBean {
+  // Get an overview of the requests in history.
+  String getSchedulingDecisionSummary();
+  String getCallVolumeSummary();
+  int getUniqueIdentityCount();
+  long getTotalCallVolume();
+}

+ 449 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueue.java

@@ -0,0 +1,449 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ipc;
+
+import java.lang.ref.WeakReference;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.AbstractQueue;
+import java.util.HashMap;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.locks.ReentrantLock;
+import java.util.concurrent.locks.Condition;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicLong;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.commons.lang.NotImplementedException;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.metrics2.util.MBeans;
+
+/**
+ * A queue with multiple levels for each priority.
+ */
+public class FairCallQueue<E extends Schedulable> extends AbstractQueue<E>
+  implements BlockingQueue<E> {
+  // Configuration Keys
+  public static final int    IPC_CALLQUEUE_PRIORITY_LEVELS_DEFAULT = 4;
+  public static final String IPC_CALLQUEUE_PRIORITY_LEVELS_KEY =
+    "faircallqueue.priority-levels";
+
+  public static final Log LOG = LogFactory.getLog(FairCallQueue.class);
+
+  /* The queues */
+  private final ArrayList<BlockingQueue<E>> queues;
+
+  /* Read locks */
+  private final ReentrantLock takeLock = new ReentrantLock();
+  private final Condition notEmpty = takeLock.newCondition();
+  private void signalNotEmpty() {
+    takeLock.lock();
+    try {
+      notEmpty.signal();
+    } finally {
+      takeLock.unlock();
+    }
+  }
+
+  /* Scheduler picks which queue to place in */
+  private RpcScheduler scheduler;
+
+  /* Multiplexer picks which queue to draw from */
+  private RpcMultiplexer multiplexer;
+
+  /* Statistic tracking */
+  private final ArrayList<AtomicLong> overflowedCalls;
+
+  /**
+   * Create a FairCallQueue.
+   * @param capacity the maximum size of each sub-queue
+   * @param ns the prefix to use for configuration
+   * @param conf the configuration to read from
+   * Notes: the FairCallQueue has no fixed capacity. Rather, it has a minimum
+   * capacity of `capacity` and a maximum capacity of `capacity * number_queues`
+   */
+  public FairCallQueue(int capacity, String ns, Configuration conf) {
+    int numQueues = parseNumQueues(ns, conf);
+    LOG.info("FairCallQueue is in use with " + numQueues + " queues.");
+
+    this.queues = new ArrayList<BlockingQueue<E>>(numQueues);
+    this.overflowedCalls = new ArrayList<AtomicLong>(numQueues);
+
+    for(int i=0; i < numQueues; i++) {
+      this.queues.add(new LinkedBlockingQueue<E>(capacity));
+      this.overflowedCalls.add(new AtomicLong(0));
+    }
+
+    this.scheduler = new DecayRpcScheduler(numQueues, ns, conf);
+    this.multiplexer = new WeightedRoundRobinMultiplexer(numQueues, ns, conf);
+
+    // Make this the active source of metrics
+    MetricsProxy mp = MetricsProxy.getInstance(ns);
+    mp.setDelegate(this);
+  }
+
+  /**
+   * Read the number of queues from the configuration.
+   * This will affect the FairCallQueue's overall capacity.
+   * @throws IllegalArgumentException on invalid queue count
+   */
+  private static int parseNumQueues(String ns, Configuration conf) {
+    int retval = conf.getInt(ns + "." + IPC_CALLQUEUE_PRIORITY_LEVELS_KEY,
+      IPC_CALLQUEUE_PRIORITY_LEVELS_DEFAULT);
+    if(retval < 1) {
+      throw new IllegalArgumentException("numQueues must be at least 1");
+    }
+    return retval;
+  }
+
+  /**
+   * Returns the first non-empty queue with equal or lesser priority
+   * than <i>startIdx</i>. Wraps around, searching a maximum of N
+   * queues, where N is this.queues.size().
+   *
+   * @param startIdx the queue number to start searching at
+   * @return the first non-empty queue with less priority, or null if
+   * everything was empty
+   */
+  private BlockingQueue<E> getFirstNonEmptyQueue(int startIdx) {
+    final int numQueues = this.queues.size();
+    for(int i=0; i < numQueues; i++) {
+      int idx = (i + startIdx) % numQueues; // offset and wrap around
+      BlockingQueue<E> queue = this.queues.get(idx);
+      if (queue.size() != 0) {
+        return queue;
+      }
+    }
+
+    // All queues were empty
+    return null;
+  }
+
+  /* AbstractQueue and BlockingQueue methods */
+
+  /**
+   * Put and offer follow the same pattern:
+   * 1. Get a priorityLevel from the scheduler
+   * 2. Get the nth sub-queue matching this priorityLevel
+   * 3. delegate the call to this sub-queue.
+   *
+   * But differ in how they handle overflow:
+   * - Put will move on to the next queue until it lands on the last queue
+   * - Offer does not attempt other queues on overflow
+   */
+  @Override
+  public void put(E e) throws InterruptedException {
+    int priorityLevel = scheduler.getPriorityLevel(e);
+
+    final int numLevels = this.queues.size();
+    while (true) {
+      BlockingQueue<E> q = this.queues.get(priorityLevel);
+      boolean res = q.offer(e);
+      if (!res) {
+        // Update stats
+        this.overflowedCalls.get(priorityLevel).getAndIncrement();
+
+        // If we failed to insert, try again on the next level
+        priorityLevel++;
+
+        if (priorityLevel == numLevels) {
+          // That was the last one, we will block on put in the last queue
+          // Delete this line to drop the call
+          this.queues.get(priorityLevel-1).put(e);
+          break;
+        }
+      } else {
+        break;
+      }
+    }
+
+
+    signalNotEmpty();
+  }
+
+  @Override
+  public boolean offer(E e, long timeout, TimeUnit unit)
+      throws InterruptedException {
+    int priorityLevel = scheduler.getPriorityLevel(e);
+    BlockingQueue<E> q = this.queues.get(priorityLevel);
+    boolean ret = q.offer(e, timeout, unit);
+
+    signalNotEmpty();
+
+    return ret;
+  }
+
+  @Override
+  public boolean offer(E e) {
+    int priorityLevel = scheduler.getPriorityLevel(e);
+    BlockingQueue<E> q = this.queues.get(priorityLevel);
+    boolean ret = q.offer(e);
+
+    signalNotEmpty();
+
+    return ret;
+  }
+
+  @Override
+  public E take() throws InterruptedException {
+    int startIdx = this.multiplexer.getAndAdvanceCurrentIndex();
+
+    takeLock.lockInterruptibly();
+    try {
+      // Wait while queue is empty
+      for (;;) {
+        BlockingQueue<E> q = this.getFirstNonEmptyQueue(startIdx);
+        if (q != null) {
+          // Got queue, so return if we can poll out an object
+          E e = q.poll();
+          if (e != null) {
+            return e;
+          }
+        }
+
+        notEmpty.await();
+      }
+    } finally {
+      takeLock.unlock();
+    }
+  }
+
+  @Override
+  public E poll(long timeout, TimeUnit unit)
+      throws InterruptedException {
+
+    int startIdx = this.multiplexer.getAndAdvanceCurrentIndex();
+
+    long nanos = unit.toNanos(timeout);
+    takeLock.lockInterruptibly();
+    try {
+      for (;;) {
+        BlockingQueue<E> q = this.getFirstNonEmptyQueue(startIdx);
+        if (q != null) {
+          E e = q.poll();
+          if (e != null) {
+            // Escape condition: there might be something available
+            return e;
+          }
+        }
+
+        if (nanos <= 0) {
+          // Wait has elapsed
+          return null;
+        }
+
+        try {
+          // Now wait on the condition for a bit. If we get
+          // spuriously awoken we'll re-loop
+          nanos = notEmpty.awaitNanos(nanos);
+        } catch (InterruptedException ie) {
+          notEmpty.signal(); // propagate to a non-interrupted thread
+          throw ie;
+        }
+      }
+    } finally {
+      takeLock.unlock();
+    }
+  }
+
+  /**
+   * poll() provides no strict consistency: it is possible for poll to return
+   * null even though an element is in the queue.
+   */
+  @Override
+  public E poll() {
+    int startIdx = this.multiplexer.getAndAdvanceCurrentIndex();
+
+    BlockingQueue<E> q = this.getFirstNonEmptyQueue(startIdx);
+    if (q == null) {
+      return null; // everything is empty
+    }
+
+    // Delegate to the sub-queue's poll, which could still return null
+    return q.poll();
+  }
+
+  /**
+   * Peek, like poll, provides no strict consistency.
+   */
+  @Override
+  public E peek() {
+    BlockingQueue<E> q = this.getFirstNonEmptyQueue(0);
+    if (q == null) {
+      return null;
+    } else {
+      return q.peek();
+    }
+  }
+
+  /**
+   * Size returns the sum of all sub-queue sizes, so it may be greater than
+   * capacity.
+   * Note: size provides no strict consistency, and should not be used to
+   * control queue IO.
+   */
+  @Override
+  public int size() {
+    int size = 0;
+    for (BlockingQueue q : this.queues) {
+      size += q.size();
+    }
+    return size;
+  }
+
+  /**
+   * Iterator is not implemented, as it is not needed.
+   */
+  @Override
+  public Iterator<E> iterator() {
+    throw new NotImplementedException();
+  }
+
+  /**
+   * drainTo defers to each sub-queue. Note that draining from a FairCallQueue
+   * to another FairCallQueue will likely fail, since the incoming calls
+   * may be scheduled differently in the new FairCallQueue. Nonetheless this
+   * method is provided for completeness.
+   */
+  @Override
+  public int drainTo(Collection<? super E> c, int maxElements) {
+    int sum = 0;
+    for (BlockingQueue<E> q : this.queues) {
+      sum += q.drainTo(c, maxElements);
+    }
+    return sum;
+  }
+
+  @Override
+  public int drainTo(Collection<? super E> c) {
+    int sum = 0;
+    for (BlockingQueue<E> q : this.queues) {
+      sum += q.drainTo(c);
+    }
+    return sum;
+  }
+
+  /**
+   * Returns maximum remaining capacity. This does not reflect how much you can
+   * ideally fit in this FairCallQueue, as that would depend on the scheduler's
+   * decisions.
+   */
+  @Override
+  public int remainingCapacity() {
+    int sum = 0;
+    for (BlockingQueue q : this.queues) {
+      sum += q.remainingCapacity();
+    }
+    return sum;
+  }
+
+  /**
+   * MetricsProxy is a singleton because we may init multiple
+   * FairCallQueues, but the metrics system cannot unregister beans cleanly.
+   */
+  private static final class MetricsProxy implements FairCallQueueMXBean {
+    // One singleton per namespace
+    private static final HashMap<String, MetricsProxy> INSTANCES =
+      new HashMap<String, MetricsProxy>();
+
+    // Weakref for delegate, so we don't retain it forever if it can be GC'd
+    private WeakReference<FairCallQueue> delegate;
+
+    // Keep track of how many objects we registered
+    private int revisionNumber = 0;
+
+    private MetricsProxy(String namespace) {
+      MBeans.register(namespace, "FairCallQueue", this);
+    }
+
+    public static synchronized MetricsProxy getInstance(String namespace) {
+      MetricsProxy mp = INSTANCES.get(namespace);
+      if (mp == null) {
+        // We must create one
+        mp = new MetricsProxy(namespace);
+        INSTANCES.put(namespace, mp);
+      }
+      return mp;
+    }
+
+    public void setDelegate(FairCallQueue obj) {
+      this.delegate = new WeakReference<FairCallQueue>(obj);
+      this.revisionNumber++;
+    }
+
+    @Override
+    public int[] getQueueSizes() {
+      FairCallQueue obj = this.delegate.get();
+      if (obj == null) {
+        return new int[]{};
+      }
+
+      return obj.getQueueSizes();
+    }
+
+    @Override
+    public long[] getOverflowedCalls() {
+      FairCallQueue obj = this.delegate.get();
+      if (obj == null) {
+        return new long[]{};
+      }
+
+      return obj.getOverflowedCalls();
+    }
+
+    @Override public int getRevision() {
+      return revisionNumber;
+    }
+  }
+
+  // FairCallQueueMXBean
+  public int[] getQueueSizes() {
+    int numQueues = queues.size();
+    int[] sizes = new int[numQueues];
+    for (int i=0; i < numQueues; i++) {
+      sizes[i] = queues.get(i).size();
+    }
+    return sizes;
+  }
+
+  public long[] getOverflowedCalls() {
+    int numQueues = queues.size();
+    long[] calls = new long[numQueues];
+    for (int i=0; i < numQueues; i++) {
+      calls[i] = overflowedCalls.get(i).get();
+    }
+    return calls;
+  }
+
+  // For testing
+  @VisibleForTesting
+  public void setScheduler(RpcScheduler newScheduler) {
+    this.scheduler = newScheduler;
+  }
+
+  @VisibleForTesting
+  public void setMultiplexer(RpcMultiplexer newMux) {
+    this.multiplexer = newMux;
+  }
+}

+ 27 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueueMXBean.java

@@ -0,0 +1,27 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ipc;
+
+public interface FairCallQueueMXBean {
+  // Get the size of each subqueue, the index corrosponding to the priority
+  // level.
+  int[] getQueueSizes();
+  long[] getOverflowedCalls();
+  int getRevision();
+}

Algúns arquivos non se mostraron porque demasiados arquivos cambiaron neste cambio