소스 검색

Merge branch 'apache-trunk' into HDFS-6994

cnauroth 10 년 전
부모
커밋
a607429b55
100개의 변경된 파일7222개의 추가작업 그리고 674개의 파일을 삭제
  1. 7 0
      .gitignore
  2. 9 16
      BUILDING.txt
  3. 3 4
      LICENSE.txt
  4. 6 0
      dev-support/create-release.sh
  5. 47 0
      dev-support/smart-apply-patch.sh
  6. 0 73
      dev-support/test-patch.sh
  7. 2 2
      hadoop-assemblies/pom.xml
  8. 1 1
      hadoop-client/pom.xml
  9. 6 8
      hadoop-common-project/hadoop-annotations/pom.xml
  10. 4 1
      hadoop-common-project/hadoop-auth-examples/src/main/java/org/apache/hadoop/security/authentication/examples/WhoClient.java
  11. 6 7
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/AuthenticatedURL.java
  12. 1 1
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/Authenticator.java
  13. 10 6
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java
  14. 5 5
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/PseudoAuthenticator.java
  15. 1 4
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AltKerberosAuthenticationHandler.java
  16. 50 29
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java
  17. 12 12
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationHandler.java
  18. 2 2
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationToken.java
  19. 6 7
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java
  20. 15 11
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/PseudoAuthenticationHandler.java
  21. 2 2
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosName.java
  22. 3 5
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosUtil.java
  23. 3 1
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/RandomSignerSecretProvider.java
  24. 2 3
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/Signer.java
  25. 2 1
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/StringSignerSecretProvider.java
  26. 7 4
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/ZKSignerSecretProvider.java
  27. 3 2
      hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestPseudoAuthenticator.java
  28. 5 1
      hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAuthenticationFilter.java
  29. 3 6
      hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestPseudoAuthenticationHandler.java
  30. 490 28
      hadoop-common-project/hadoop-common/CHANGES.txt
  31. 17 1
      hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
  32. 14 3
      hadoop-common-project/hadoop-common/pom.xml
  33. 57 19
      hadoop-common-project/hadoop-common/src/CMakeLists.txt
  34. 8 3
      hadoop-common-project/hadoop-common/src/main/bin/hadoop
  35. 15 1
      hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.cmd
  36. 9 0
      hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh
  37. 14 5
      hadoop-common-project/hadoop-common/src/main/bin/hadoop-daemons.sh
  38. 135 23
      hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
  39. 27 2
      hadoop-common-project/hadoop-common/src/main/bin/hadoop.cmd
  40. 0 2
      hadoop-common-project/hadoop-common/src/main/bin/rcc
  41. 1 1
      hadoop-common-project/hadoop-common/src/main/bin/slaves.sh
  42. 1 1
      hadoop-common-project/hadoop-common/src/main/bin/stop-all.sh
  43. 102 44
      hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh
  44. 0 7
      hadoop-common-project/hadoop-common/src/main/conf/hadoop-user-functions.sh.example
  45. 3569 0
      hadoop-common-project/hadoop-common/src/main/docs/releasenotes.html
  46. 75 10
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
  47. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurableBase.java
  48. 2 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurationServlet.java
  49. 12 15
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/AesCtrCryptoCodec.java
  50. 66 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CipherOption.java
  51. 37 9
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java
  52. 1 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java
  53. 7 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoStreamUtils.java
  54. 60 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/UnsupportedCodecException.java
  55. 5 7
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/JavaKeyStoreProvider.java
  56. 12 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java
  57. 8 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderCryptoExtension.java
  58. 17 15
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyShell.java
  59. 66 23
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
  60. 21 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/ValueQueue.java
  61. 7 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
  62. 5 6
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ByteBufferReadable.java
  63. 10 11
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
  64. 10 7
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSOutputSummer.java
  65. 13 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
  66. 30 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
  67. 24 8
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
  68. 7 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
  69. 4 19
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java
  70. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java
  71. 4 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MD5MD5CRC32FileChecksum.java
  72. 12 8
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Options.java
  73. 0 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java
  74. 91 34
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
  75. 8 13
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
  76. 0 66
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AccessControlException.java
  77. 3 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclEntry.java
  78. 77 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclStatus.java
  79. 18 14
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/AclCommands.java
  80. 27 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Command.java
  81. 1 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandFactory.java
  82. 7 5
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Display.java
  83. 2 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsCommand.java
  84. 7 10
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsUsage.java
  85. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java
  86. 2 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Stat.java
  87. 2 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Test.java
  88. 84 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/find/And.java
  89. 302 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/find/BaseExpression.java
  90. 107 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/find/Expression.java
  91. 156 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/find/ExpressionFactory.java
  92. 144 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/find/FilterExpression.java
  93. 444 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/find/Find.java
  94. 271 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/find/FindOptions.java
  95. 100 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/find/Name.java
  96. 76 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/find/Print.java
  97. 88 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/find/Result.java
  98. 3 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java
  99. 0 6
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java
  100. 2 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/SshFenceByTcpPort.java

+ 7 - 0
.gitignore

@@ -3,6 +3,10 @@
 *.iws
 *.orig
 *.rej
+**/.keep
+*.sdf
+*.suo
+*.vcxproj.user
 .idea
 .svn
 .classpath
@@ -15,3 +19,6 @@ hadoop-hdfs-project/hadoop-hdfs/downloads
 hadoop-hdfs-project/hadoop-hdfs-httpfs/downloads
 hadoop-common-project/hadoop-common/src/test/resources/contract-test-options.xml
 hadoop-tools/hadoop-openstack/src/test/resources/contract-test-options.xml
+hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/tla/yarnregistry.toolbox
+yarnregistry.pdf
+hadoop-tools/hadoop-aws/src/test/resources/contract-test-options.xml

+ 9 - 16
BUILDING.txt

@@ -4,7 +4,7 @@ Build instructions for Hadoop
 Requirements:
 
 * Unix System
-* JDK 1.6+
+* JDK 1.7+
 * Maven 3.0 or later
 * Findbugs 1.3.9 (if running findbugs)
 * ProtocolBuffer 2.5.0
@@ -198,34 +198,25 @@ export MAVEN_OPTS="-Xms256m -Xmx512m"
 
 ----------------------------------------------------------------------------------
 
-Building on OS/X
-
-----------------------------------------------------------------------------------
-
-A one-time manual step is required to enable building Hadoop OS X with Java 7
-every time the JDK is updated.
-see: https://issues.apache.org/jira/browse/HADOOP-9350
-
-$ sudo mkdir `/usr/libexec/java_home`/Classes
-$ sudo ln -s `/usr/libexec/java_home`/lib/tools.jar `/usr/libexec/java_home`/Classes/classes.jar
-
-----------------------------------------------------------------------------------
-
 Building on Windows
 
 ----------------------------------------------------------------------------------
 Requirements:
 
 * Windows System
-* JDK 1.6+
+* JDK 1.7+
 * Maven 3.0 or later
 * Findbugs 1.3.9 (if running findbugs)
 * ProtocolBuffer 2.5.0
 * CMake 2.6 or newer
 * Windows SDK or Visual Studio 2010 Professional
-* Unix command-line tools from GnuWin32 or Cygwin: sh, mkdir, rm, cp, tar, gzip
 * zlib headers (if building native code bindings for zlib)
 * Internet connection for first build (to fetch all Maven and Hadoop dependencies)
+* Unix command-line tools from GnuWin32: sh, mkdir, rm, cp, tar, gzip. These
+  tools must be present on your PATH.
+
+Unix command-line tools are also included with the Windows Git package which
+can be downloaded from http://git-scm.com/download/win.
 
 If using Visual Studio, it must be Visual Studio 2010 Professional (not 2012).
 Do not use Visual Studio Express.  It does not support compiling for 64-bit,
@@ -234,6 +225,8 @@ download here:
 
 http://www.microsoft.com/en-us/download/details.aspx?id=8279
 
+Cygwin is neither required nor supported.
+
 ----------------------------------------------------------------------------------
 Building:
 

+ 3 - 4
LICENSE.txt

@@ -252,13 +252,12 @@ in src/main/native/src/org/apache/hadoop/util:
  *   BSD-style license that can be found in the LICENSE file.
  */
 
-For src/main/native/src/org/apache/hadoop/io/compress/lz4/{lz4.h,lz4.c,
-lz4_encoder.h,lz4hc.h,lz4hc.c,lz4hc_encoder.h},
+For src/main/native/src/org/apache/hadoop/io/compress/lz4/{lz4.h,lz4.c,lz4hc.h,lz4hc.c},
 
 /*
    LZ4 - Fast LZ compression algorithm
    Header File
-   Copyright (C) 2011-2013, Yann Collet.
+   Copyright (C) 2011-2014, Yann Collet.
    BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
 
    Redistribution and use in source and binary forms, with or without
@@ -285,8 +284,8 @@ lz4_encoder.h,lz4hc.h,lz4hc.c,lz4hc_encoder.h},
    OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
    You can contact the author at :
-   - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html
    - LZ4 source repository : http://code.google.com/p/lz4/
+   - LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c
 */
 
 

+ 6 - 0
dev-support/create-release.sh

@@ -47,6 +47,9 @@ RC_LABEL=$1
 # Extract Hadoop version from POM
 HADOOP_VERSION=`cat pom.xml | grep "<version>" | head -1 | sed 's|^ *<version>||' | sed 's|</version>.*$||'`
 
+# Setup git
+GIT=${GIT:-git}
+
 echo
 echo "*****************************************************************"
 echo
@@ -70,6 +73,9 @@ fi
 
 ARTIFACTS_DIR="target/artifacts"
 
+# git clean to clear any remnants from previous build
+run ${GIT} clean -xdf
+
 # mvn clean for sanity
 run ${MVN} clean
 

+ 47 - 0
dev-support/smart-apply-patch.sh

@@ -13,6 +13,40 @@
 
 set -e
 
+#
+# Determine if the patch file is a git diff file with prefixes.
+# These files are generated via "git diff" *without* the --no-prefix option.
+#
+# We can apply these patches more easily because we know that the a/ and b/
+# prefixes in the "diff" lines stands for the project root directory.
+# So we don't have to hunt for the project root.
+# And of course, we know that the patch file was generated using git, so we
+# know git apply can handle it properly.
+#
+# Arguments: file name.
+# Return: 0 if it is a git diff; 1 otherwise.
+#
+is_git_diff_with_prefix() {
+  DIFF_TYPE="unknown"
+  while read -r line; do
+    if [[ "$line" =~ ^diff\  ]]; then
+      if [[ "$line" =~ ^diff\ \-\-git ]]; then
+        DIFF_TYPE="git"
+      else
+        return 1 # All diff lines must be diff --git lines.
+      fi
+    fi
+    if [[ "$line" =~ ^\+\+\+\  ]] ||
+       [[ "$line" =~ ^\-\-\-\  ]]; then
+      if ! [[ "$line" =~ ^....[ab]/ ]]; then
+        return 1 # All +++ and --- lines must start with a/ or b/.
+      fi
+    fi
+  done < $1
+  [ x$DIFF_TYPE == x"git" ] || return 1
+  return 0 # return true (= 0 in bash)
+}
+
 PATCH_FILE=$1
 DRY_RUN=$2
 if [ -z "$PATCH_FILE" ]; then
@@ -37,6 +71,19 @@ if [ "$PATCH_FILE" == "-" ]; then
   TOCLEAN="$TOCLEAN $PATCH_FILE"
 fi
 
+# Special case for git-diff patches without --no-prefix
+if is_git_diff_with_prefix "$PATCH_FILE"; then
+  GIT_FLAGS="--binary -p1 -v"
+  if [[ -z $DRY_RUN ]]; then
+      GIT_FLAGS="$GIT_FLAGS --stat --apply "
+      echo Going to apply git patch with: git apply "${GIT_FLAGS}"
+  else
+      GIT_FLAGS="$GIT_FLAGS --check "
+  fi
+  git apply ${GIT_FLAGS} "${PATCH_FILE}"
+  exit $?
+fi
+
 # Come up with a list of changed files into $TMP
 TMP=/tmp/tmp.paths.$$
 TOCLEAN="$TOCLEAN $TMP"

+ 0 - 73
dev-support/test-patch.sh

@@ -13,7 +13,6 @@
 
 
 #set -x
-ulimit -n 1024
 
 ### Setup some variables.  
 ### BUILD_URL is set by Hudson if it is run by patch process
@@ -858,74 +857,6 @@ findModules () {
   rm $TMP_MODULES
   echo $CHANGED_MODULES
 }
-###############################################################################
-### Run the test-contrib target
-runContribTests () {
-  echo ""
-  echo ""
-  echo "======================================================================"
-  echo "======================================================================"
-  echo "    Running contrib tests."
-  echo "======================================================================"
-  echo "======================================================================"
-  echo ""
-  echo ""
-
-  if [[ `$GREP -c 'test-contrib' build.xml` == 0 ]] ; then
-    echo "No contrib tests in this project."
-    return 0
-  fi
-
-  ### Kill any rogue build processes from the last attempt
-  $PS auxwww | $GREP ${PROJECT_NAME}PatchProcess | $AWK '{print $2}' | /usr/bin/xargs -t -I {} /bin/kill -9 {} > /dev/null
-
-  #echo "$ANT_HOME/bin/ant -Dversion="${VERSION}" $ECLIPSE_PROPERTY -DHadoopPatchProcess= -Dtest.junit.output.format=xml -Dtest.output=no test-contrib"
-  #$ANT_HOME/bin/ant -Dversion="${VERSION}" $ECLIPSE_PROPERTY -DHadoopPatchProcess= -Dtest.junit.output.format=xml -Dtest.output=no test-contrib
-  echo "NOP"
-  if [[ $? != 0 ]] ; then
-    JIRA_COMMENT="$JIRA_COMMENT
-
-    {color:red}-1 contrib tests{color}.  The patch failed contrib unit tests."
-    return 1
-  fi
-  JIRA_COMMENT="$JIRA_COMMENT
-
-    {color:green}+1 contrib tests{color}.  The patch passed contrib unit tests."
-  return 0
-}
-
-###############################################################################
-### Run the inject-system-faults target
-checkInjectSystemFaults () {
-  echo ""
-  echo ""
-  echo "======================================================================"
-  echo "======================================================================"
-  echo "    Checking the integrity of system test framework code."
-  echo "======================================================================"
-  echo "======================================================================"
-  echo ""
-  echo ""
-  
-  ### Kill any rogue build processes from the last attempt
-  $PS auxwww | $GREP ${PROJECT_NAME}PatchProcess | $AWK '{print $2}' | /usr/bin/xargs -t -I {} /bin/kill -9 {} > /dev/null
-
-  #echo "$ANT_HOME/bin/ant -Dversion="${VERSION}" -DHadoopPatchProcess= -Dtest.junit.output.format=xml -Dtest.output=no -Dcompile.c++=yes -Dforrest.home=$FORREST_HOME inject-system-faults"
-  #$ANT_HOME/bin/ant -Dversion="${VERSION}" -DHadoopPatchProcess= -Dtest.junit.output.format=xml -Dtest.output=no -Dcompile.c++=yes -Dforrest.home=$FORREST_HOME inject-system-faults
-  echo "NOP"
-  return 0
-  if [[ $? != 0 ]] ; then
-    JIRA_COMMENT="$JIRA_COMMENT
-
-    {color:red}-1 system test framework{color}.  The patch failed system test framework compile."
-    return 1
-  fi
-  JIRA_COMMENT="$JIRA_COMMENT
-
-    {color:green}+1 system test framework{color}.  The patch passed system test framework compile."
-  return 0
-}
-
 ###############################################################################
 ### Submit a comment to the defect's Jira
 submitJiraComment () {
@@ -1060,11 +991,7 @@ checkReleaseAuditWarnings
 if [[ $JENKINS == "true" || $RUN_TESTS == "true" ]] ; then
   runTests
   (( RESULT = RESULT + $? ))
-  runContribTests
-  (( RESULT = RESULT + $? ))
 fi
-checkInjectSystemFaults
-(( RESULT = RESULT + $? ))
 JIRA_COMMENT_FOOTER="Test results: $BUILD_URL/testReport/
 $JIRA_COMMENT_FOOTER"
 

+ 2 - 2
hadoop-assemblies/pom.xml

@@ -45,10 +45,10 @@
         <configuration>
           <rules>
             <requireMavenVersion>
-              <version>[3.0.0,)</version>
+              <version>${enforced.maven.version}</version>
             </requireMavenVersion>
             <requireJavaVersion>
-              <version>1.6</version>
+              <version>${enforced.java.version}</version>
             </requireJavaVersion>
           </rules>
         </configuration>

+ 1 - 1
hadoop-client/pom.xml

@@ -13,7 +13,7 @@
  limitations under the License. See accompanying LICENSE file.
 -->
 <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <modelVersion>4.0.0</modelVersion>
 <parent>
    <groupId>org.apache.hadoop</groupId>

+ 6 - 8
hadoop-common-project/hadoop-annotations/pom.xml

@@ -40,32 +40,30 @@
 
   <profiles>
     <profile>
-      <id>os.linux</id>
+      <id>jdk1.7</id>
       <activation>
-        <os>
-          <family>!Mac</family>
-        </os>
+        <jdk>1.7</jdk>
       </activation>
       <dependencies>
         <dependency>
           <groupId>jdk.tools</groupId>
           <artifactId>jdk.tools</artifactId>
-          <version>1.6</version>
+          <version>1.7</version>
           <scope>system</scope>
           <systemPath>${java.home}/../lib/tools.jar</systemPath>
         </dependency>
       </dependencies>
     </profile>
     <profile>
-      <id>jdk1.7</id>
+      <id>jdk1.8</id>
       <activation>
-        <jdk>1.7</jdk>
+        <jdk>1.8</jdk>
       </activation>
       <dependencies>
         <dependency>
           <groupId>jdk.tools</groupId>
           <artifactId>jdk.tools</artifactId>
-          <version>1.7</version>
+          <version>1.8</version>
           <scope>system</scope>
           <systemPath>${java.home}/../lib/tools.jar</systemPath>
         </dependency>

+ 4 - 1
hadoop-common-project/hadoop-auth-examples/src/main/java/org/apache/hadoop/security/authentication/examples/WhoClient.java

@@ -19,6 +19,7 @@ import java.io.BufferedReader;
 import java.io.InputStreamReader;
 import java.net.HttpURLConnection;
 import java.net.URL;
+import java.nio.charset.Charset;
 
 /**
  * Example that uses <code>AuthenticatedURL</code>.
@@ -39,7 +40,9 @@ public class WhoClient {
       System.out.println("Status code: " + conn.getResponseCode() + " " + conn.getResponseMessage());
       System.out.println();
       if (conn.getResponseCode() == HttpURLConnection.HTTP_OK) {
-        BufferedReader reader = new BufferedReader(new InputStreamReader(conn.getInputStream()));
+        BufferedReader reader = new BufferedReader(
+            new InputStreamReader(
+                conn.getInputStream(), Charset.forName("UTF-8")));
         String line = reader.readLine();
         while (line != null) {
           System.out.println(line);

+ 6 - 7
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/AuthenticatedURL.java

@@ -24,19 +24,18 @@ import java.util.Map;
 /**
  * The {@link AuthenticatedURL} class enables the use of the JDK {@link URL} class
  * against HTTP endpoints protected with the {@link AuthenticationFilter}.
- * <p/>
+ * <p>
  * The authentication mechanisms supported by default are Hadoop Simple  authentication
  * (also known as pseudo authentication) and Kerberos SPNEGO authentication.
- * <p/>
+ * <p>
  * Additional authentication mechanisms can be supported via {@link Authenticator} implementations.
- * <p/>
+ * <p>
  * The default {@link Authenticator} is the {@link KerberosAuthenticator} class which supports
  * automatic fallback from Kerberos SPNEGO to Hadoop Simple authentication.
- * <p/>
+ * <p>
  * <code>AuthenticatedURL</code> instances are not thread-safe.
- * <p/>
+ * <p>
  * The usage pattern of the {@link AuthenticatedURL} is:
- * <p/>
  * <pre>
  *
  * // establishing an initial connection
@@ -240,7 +239,7 @@ public class AuthenticatedURL {
 
   /**
    * Helper method that extracts an authentication token received from a connection.
-   * <p/>
+   * <p>
    * This method is used by {@link Authenticator} implementations.
    *
    * @param conn connection to extract the authentication token from.

+ 1 - 1
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/Authenticator.java

@@ -19,7 +19,7 @@ import java.net.URL;
 
 /**
  * Interface for client authentication mechanisms.
- * <p/>
+ * <p>
  * Implementations are use-once instances, they don't need to be thread safe.
  */
 public interface Authenticator {

+ 10 - 6
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java

@@ -23,6 +23,8 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import javax.security.auth.Subject;
+import javax.security.auth.kerberos.KerberosKey;
+import javax.security.auth.kerberos.KerberosTicket;
 import javax.security.auth.login.AppConfigurationEntry;
 import javax.security.auth.login.Configuration;
 import javax.security.auth.login.LoginContext;
@@ -41,9 +43,9 @@ import static org.apache.hadoop.util.PlatformName.IBM_JAVA;
 
 /**
  * The {@link KerberosAuthenticator} implements the Kerberos SPNEGO authentication sequence.
- * <p/>
+ * <p>
  * It uses the default principal for the Kerberos cache (normally set via kinit).
- * <p/>
+ * <p>
  * It falls back to the {@link PseudoAuthenticator} if the HTTP endpoint does not trigger an SPNEGO authentication
  * sequence.
  */
@@ -160,9 +162,9 @@ public class KerberosAuthenticator implements Authenticator {
 
   /**
    * Performs SPNEGO authentication against the specified URL.
-   * <p/>
+   * <p>
    * If a token is given it does a NOP and returns the given token.
-   * <p/>
+   * <p>
    * If no token is given, it will perform the SPNEGO authentication sequence using an
    * HTTP <code>OPTIONS</code> request.
    *
@@ -209,7 +211,7 @@ public class KerberosAuthenticator implements Authenticator {
 
   /**
    * If the specified URL does not support SPNEGO authentication, a fallback {@link Authenticator} will be used.
-   * <p/>
+   * <p>
    * This implementation returns a {@link PseudoAuthenticator}.
    *
    * @return the fallback {@link Authenticator}.
@@ -247,7 +249,9 @@ public class KerberosAuthenticator implements Authenticator {
     try {
       AccessControlContext context = AccessController.getContext();
       Subject subject = Subject.getSubject(context);
-      if (subject == null) {
+      if (subject == null
+          || (subject.getPrivateCredentials(KerberosKey.class).isEmpty()
+              && subject.getPrivateCredentials(KerberosTicket.class).isEmpty())) {
         LOG.debug("No subject in context, logging in");
         subject = new Subject();
         LoginContext login = new LoginContext("", subject,

+ 5 - 5
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/PseudoAuthenticator.java

@@ -20,7 +20,7 @@ import java.net.URL;
 /**
  * The {@link PseudoAuthenticator} implementation provides an authentication equivalent to Hadoop's
  * Simple authentication, it trusts the value of the 'user.name' Java System property.
- * <p/>
+ * <p>
  * The 'user.name' value is propagated using an additional query string parameter {@link #USER_NAME} ('user.name').
  */
 public class PseudoAuthenticator implements Authenticator {
@@ -47,13 +47,13 @@ public class PseudoAuthenticator implements Authenticator {
 
   /**
    * Performs simple authentication against the specified URL.
-   * <p/>
+   * <p>
    * If a token is given it does a NOP and returns the given token.
-   * <p/>
+   * <p>
    * If no token is given, it will perform an HTTP <code>OPTIONS</code> request injecting an additional
    * parameter {@link #USER_NAME} in the query string with the value returned by the {@link #getUserName()}
    * method.
-   * <p/>
+   * <p>
    * If the response is successful it will update the authentication token.
    *
    * @param url the URl to authenticate against.
@@ -79,7 +79,7 @@ public class PseudoAuthenticator implements Authenticator {
 
   /**
    * Returns the current user name.
-   * <p/>
+   * <p>
    * This implementation returns the value of the Java system property 'user.name'
    *
    * @return the current user name.

+ 1 - 4
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AltKerberosAuthenticationHandler.java

@@ -28,7 +28,6 @@ import org.apache.hadoop.security.authentication.client.AuthenticationException;
  * to allow a developer to implement their own custom authentication for browser
  * access.  The alternateAuthenticate method will be called whenever a request
  * comes from a browser.
- * <p/>
  */
 public abstract class AltKerberosAuthenticationHandler
                         extends KerberosAuthenticationHandler {
@@ -52,7 +51,6 @@ public abstract class AltKerberosAuthenticationHandler
   /**
    * Returns the authentication type of the authentication handler,
    * 'alt-kerberos'.
-   * <p/>
    *
    * @return the authentication type of the authentication handler,
    * 'alt-kerberos'.
@@ -80,7 +78,6 @@ public abstract class AltKerberosAuthenticationHandler
    * completed successfully (in the case of Java access) and only after the
    * custom authentication implemented by the subclass in alternateAuthenticate
    * has completed successfully (in the case of browser access).
-   * <p/>
    *
    * @param request the HTTP client request.
    * @param response the HTTP client response.
@@ -109,7 +106,7 @@ public abstract class AltKerberosAuthenticationHandler
    * refers to a browser.  If its not a browser, then Kerberos authentication
    * will be used; if it is a browser, alternateAuthenticate from the subclass
    * will be used.
-   * <p/>
+   * <p>
    * A User-Agent String is considered to be a browser if it does not contain
    * any of the values from alt-kerberos.non-browser.user-agents; the default
    * behavior is to consider everything a browser unless it contains one of:

+ 50 - 29
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java

@@ -17,6 +17,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
 import org.apache.hadoop.security.authentication.client.AuthenticationException;
+import org.apache.hadoop.security.authentication.client.KerberosAuthenticator;
 import org.apache.hadoop.security.authentication.util.Signer;
 import org.apache.hadoop.security.authentication.util.SignerException;
 import org.apache.hadoop.security.authentication.util.RandomSignerSecretProvider;
@@ -36,24 +37,27 @@ import javax.servlet.http.Cookie;
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletRequestWrapper;
 import javax.servlet.http.HttpServletResponse;
+
 import java.io.IOException;
 import java.security.Principal;
 import java.text.SimpleDateFormat;
 import java.util.*;
 
 /**
- * The {@link AuthenticationFilter} enables protecting web application resources with different (pluggable)
+ * <p>The {@link AuthenticationFilter} enables protecting web application
+ * resources with different (pluggable)
  * authentication mechanisms and signer secret providers.
- * <p/>
+ * </p>
+ * <p>
  * Out of the box it provides 2 authentication mechanisms: Pseudo and Kerberos SPNEGO.
- * <p/>
+ * </p>
  * Additional authentication mechanisms are supported via the {@link AuthenticationHandler} interface.
- * <p/>
+ * <p>
  * This filter delegates to the configured authentication handler for authentication and once it obtains an
  * {@link AuthenticationToken} from it, sets a signed HTTP cookie with the token. For client requests
  * that provide the signed HTTP cookie, it verifies the validity of the cookie, extracts the user information
  * and lets the request proceed to the target resource.
- * <p/>
+ * </p>
  * The supported configuration properties are:
  * <ul>
  * <li>config.prefix: indicates the prefix to be used by all other configuration properties, the default value
@@ -71,18 +75,19 @@ import java.util.*;
  * <li>[#PREFIX#.]cookie.domain: domain to use for the HTTP cookie that stores the authentication token.</li>
  * <li>[#PREFIX#.]cookie.path: path to use for the HTTP cookie that stores the authentication token.</li>
  * </ul>
- * <p/>
+ * <p>
  * The rest of the configuration properties are specific to the {@link AuthenticationHandler} implementation and the
  * {@link AuthenticationFilter} will take all the properties that start with the prefix #PREFIX#, it will remove
  * the prefix from it and it will pass them to the the authentication handler for initialization. Properties that do
  * not start with the prefix will not be passed to the authentication handler initialization.
- * <p/>
+ * </p>
+ * <p>
  * Out of the box it provides 3 signer secret provider implementations:
  * "string", "random", and "zookeeper"
- * <p/>
+ * </p>
  * Additional signer secret providers are supported via the
  * {@link SignerSecretProvider} class.
- * <p/>
+ * <p>
  * For the HTTP cookies mentioned above, the SignerSecretProvider is used to
  * determine the secret to use for signing the cookies. Different
  * implementations can have different behaviors.  The "string" implementation
@@ -92,7 +97,7 @@ import java.util.*;
  * [#PREFIX#.]token.validity mentioned above.  The "zookeeper" implementation
  * is like the "random" one, except that it synchronizes the random secret
  * and rollovers between multiple servers; it's meant for HA services.
- * <p/>
+ * </p>
  * The relevant configuration properties are:
  * <ul>
  * <li>signer.secret.provider: indicates the name of the SignerSecretProvider
@@ -106,10 +111,10 @@ import java.util.*;
  * implementations are specified, this value is used as the rollover
  * interval.</li>
  * </ul>
- * <p/>
+ * <p>
  * The "zookeeper" implementation has additional configuration properties that
  * must be specified; see {@link ZKSignerSecretProvider} for details.
- * <p/>
+ * </p>
  * For subclasses of AuthenticationFilter that want additional control over the
  * SignerSecretProvider, they can use the following attribute set in the
  * ServletContext:
@@ -188,10 +193,9 @@ public class AuthenticationFilter implements Filter {
   private String cookiePath;
 
   /**
-   * Initializes the authentication filter and signer secret provider.
-   * <p/>
-   * It instantiates and initializes the specified {@link AuthenticationHandler}.
-   * <p/>
+   * <p>Initializes the authentication filter and signer secret provider.</p>
+   * It instantiates and initializes the specified {@link
+   * AuthenticationHandler}.
    *
    * @param filterConfig filter configuration.
    *
@@ -219,6 +223,19 @@ public class AuthenticationFilter implements Filter {
       authHandlerClassName = authHandlerName;
     }
 
+    validity = Long.parseLong(config.getProperty(AUTH_TOKEN_VALIDITY, "36000"))
+        * 1000; //10 hours
+    initializeSecretProvider(filterConfig);
+
+    initializeAuthHandler(authHandlerClassName, filterConfig);
+
+
+    cookieDomain = config.getProperty(COOKIE_DOMAIN, null);
+    cookiePath = config.getProperty(COOKIE_PATH, null);
+  }
+
+  protected void initializeAuthHandler(String authHandlerClassName, FilterConfig filterConfig)
+      throws ServletException {
     try {
       Class<?> klass = Thread.currentThread().getContextClassLoader().loadClass(authHandlerClassName);
       authHandler = (AuthenticationHandler) klass.newInstance();
@@ -230,9 +247,10 @@ public class AuthenticationFilter implements Filter {
     } catch (IllegalAccessException ex) {
       throw new ServletException(ex);
     }
+  }
 
-    validity = Long.parseLong(config.getProperty(AUTH_TOKEN_VALIDITY, "36000"))
-        * 1000; //10 hours
+  protected void initializeSecretProvider(FilterConfig filterConfig)
+      throws ServletException {
     secretProvider = (SignerSecretProvider) filterConfig.getServletContext().
         getAttribute(SIGNER_SECRET_PROVIDER_ATTRIBUTE);
     if (secretProvider == null) {
@@ -254,9 +272,6 @@ public class AuthenticationFilter implements Filter {
       customSecretProvider = true;
     }
     signer = new Signer(secretProvider);
-
-    cookieDomain = config.getProperty(COOKIE_DOMAIN, null);
-    cookiePath = config.getProperty(COOKIE_PATH, null);
   }
 
   @SuppressWarnings("unchecked")
@@ -362,7 +377,7 @@ public class AuthenticationFilter implements Filter {
 
   /**
    * Destroys the filter.
-   * <p/>
+   * <p>
    * It invokes the {@link AuthenticationHandler#destroy()} method to release any resources it may hold.
    */
   @Override
@@ -380,7 +395,7 @@ public class AuthenticationFilter implements Filter {
    * Returns the filtered configuration (only properties starting with the specified prefix). The property keys
    * are also trimmed from the prefix. The returned {@link Properties} object is used to initialized the
    * {@link AuthenticationHandler}.
-   * <p/>
+   * <p>
    * This method can be overriden by subclasses to obtain the configuration from other configuration source than
    * the web.xml file.
    *
@@ -406,7 +421,7 @@ public class AuthenticationFilter implements Filter {
 
   /**
    * Returns the full URL of the request including the query string.
-   * <p/>
+   * <p>
    * Used as a convenience method for logging purposes.
    *
    * @param request the request object.
@@ -423,11 +438,11 @@ public class AuthenticationFilter implements Filter {
 
   /**
    * Returns the {@link AuthenticationToken} for the request.
-   * <p/>
+   * <p>
    * It looks at the received HTTP cookies and extracts the value of the {@link AuthenticatedURL#AUTH_COOKIE}
    * if present. It verifies the signature and if correct it creates the {@link AuthenticationToken} and returns
    * it.
-   * <p/>
+   * <p>
    * If this method returns <code>null</code> the filter will invoke the configured {@link AuthenticationHandler}
    * to perform user authentication.
    *
@@ -554,6 +569,13 @@ public class AuthenticationFilter implements Filter {
       if (!httpResponse.isCommitted()) {
         createAuthCookie(httpResponse, "", getCookieDomain(),
                 getCookiePath(), 0, isHttps);
+        // If response code is 401. Then WWW-Authenticate Header should be
+        // present.. reset to 403 if not found..
+        if ((errCode == HttpServletResponse.SC_UNAUTHORIZED)
+            && (!httpResponse.containsHeader(
+                KerberosAuthenticator.WWW_AUTHENTICATE))) {
+          errCode = HttpServletResponse.SC_FORBIDDEN;
+        }
         if (authenticationEx == null) {
           httpResponse.sendError(errCode, "Authentication required");
         } else {
@@ -577,7 +599,7 @@ public class AuthenticationFilter implements Filter {
    *
    * @param token authentication token for the cookie.
    * @param expires UNIX timestamp that indicates the expire date of the
-   *                cookie. It has no effect if its value < 0.
+   *                cookie. It has no effect if its value &lt; 0.
    *
    * XXX the following code duplicate some logic in Jetty / Servlet API,
    * because of the fact that Hadoop is stuck at servlet 2.5 and jetty 6
@@ -589,9 +611,8 @@ public class AuthenticationFilter implements Filter {
     StringBuilder sb = new StringBuilder(AuthenticatedURL.AUTH_COOKIE)
                            .append("=");
     if (token != null && token.length() > 0) {
-      sb.append(token);
+      sb.append("\"").append(token).append("\"");
     }
-    sb.append("; Version=1");
 
     if (path != null) {
       sb.append("; Path=").append(path);

+ 12 - 12
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationHandler.java

@@ -18,21 +18,21 @@ import org.apache.hadoop.security.authentication.client.AuthenticationException;
 import javax.servlet.ServletException;
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
+
 import java.io.IOException;
 import java.util.Properties;
 
 /**
  * Interface for server authentication mechanisms.
- * <p/>
  * The {@link AuthenticationFilter} manages the lifecycle of the authentication handler.
- * <p/>
  * Implementations must be thread-safe as one instance is initialized and used for all requests.
  */
 public interface AuthenticationHandler {
 
+  public static final String WWW_AUTHENTICATE = "WWW-Authenticate";
+
   /**
    * Returns the authentication type of the authentication handler.
-   * <p/>
    * This should be a name that uniquely identifies the authentication type.
    * For example 'simple' or 'kerberos'.
    *
@@ -42,7 +42,7 @@ public interface AuthenticationHandler {
 
   /**
    * Initializes the authentication handler instance.
-   * <p/>
+   * <p>
    * This method is invoked by the {@link AuthenticationFilter#init} method.
    *
    * @param config configuration properties to initialize the handler.
@@ -53,21 +53,21 @@ public interface AuthenticationHandler {
 
   /**
    * Destroys the authentication handler instance.
-   * <p/>
+   * <p>
    * This method is invoked by the {@link AuthenticationFilter#destroy} method.
    */
   public void destroy();
 
   /**
    * Performs an authentication management operation.
-   * <p/>
+   * <p>
    * This is useful for handling operations like get/renew/cancel
    * delegation tokens which are being handled as operations of the
    * service end-point.
-   * <p/>
+   * <p>
    * If the method returns <code>TRUE</code> the request will continue normal
    * processing, this means the method has not produced any HTTP response.
-   * <p/>
+   * <p>
    * If the method returns <code>FALSE</code> the request will end, this means 
    * the method has produced the corresponding HTTP response.
    *
@@ -88,17 +88,17 @@ public interface AuthenticationHandler {
 
   /**
    * Performs an authentication step for the given HTTP client request.
-   * <p/>
+   * <p>
    * This method is invoked by the {@link AuthenticationFilter} only if the HTTP client request is
    * not yet authenticated.
-   * <p/>
+   * <p>
    * Depending upon the authentication mechanism being implemented, a particular HTTP client may
    * end up making a sequence of invocations before authentication is successfully established (this is
    * the case of Kerberos SPNEGO).
-   * <p/>
+   * <p>
    * This method must return an {@link AuthenticationToken} only if the the HTTP client request has
    * been successfully and fully authenticated.
-   * <p/>
+   * <p>
    * If the HTTP client request has not been completely authenticated, this method must take over
    * the corresponding HTTP response and it must return <code>null</code>.
    *

+ 2 - 2
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationToken.java

@@ -29,7 +29,7 @@ import javax.servlet.http.HttpServletRequest;
  * The {@link AuthenticationToken} contains information about an authenticated
  * HTTP client and doubles as the {@link Principal} to be returned by
  * authenticated {@link HttpServletRequest}s
- * <p/>
+ * <p>
  * The token can be serialized/deserialized to and from a string as it is sent
  * and received in HTTP client responses and requests as a HTTP cookie (this is
  * done by the {@link AuthenticationFilter}).
@@ -170,7 +170,7 @@ public class AuthenticationToken implements Principal {
 
   /**
    * Returns the string representation of the token.
-   * <p/>
+   * <p>
    * This string representation is parseable by the {@link #parse} method.
    *
    * @return the string representation of the token.

+ 6 - 7
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java

@@ -51,7 +51,7 @@ import static org.apache.hadoop.util.PlatformName.IBM_JAVA;
 
 /**
  * The {@link KerberosAuthenticationHandler} implements the Kerberos SPNEGO authentication mechanism for HTTP.
- * <p/>
+ * <p>
  * The supported configuration properties are:
  * <ul>
  * <li>kerberos.principal: the Kerberos principal to used by the server. As stated by the Kerberos SPNEGO
@@ -168,9 +168,9 @@ public class KerberosAuthenticationHandler implements AuthenticationHandler {
 
   /**
    * Initializes the authentication handler instance.
-   * <p/>
+   * <p>
    * It creates a Kerberos context using the principal and keytab specified in the configuration.
-   * <p/>
+   * <p>
    * This method is invoked by the {@link AuthenticationFilter#init} method.
    *
    * @param config configuration properties to initialize the handler.
@@ -243,7 +243,7 @@ public class KerberosAuthenticationHandler implements AuthenticationHandler {
 
   /**
    * Releases any resources initialized by the authentication handler.
-   * <p/>
+   * <p>
    * It destroys the Kerberos context.
    */
   @Override
@@ -262,7 +262,7 @@ public class KerberosAuthenticationHandler implements AuthenticationHandler {
 
   /**
    * Returns the authentication type of the authentication handler, 'kerberos'.
-   * <p/>
+   * <p>
    *
    * @return the authentication type of the authentication handler, 'kerberos'.
    */
@@ -313,7 +313,6 @@ public class KerberosAuthenticationHandler implements AuthenticationHandler {
   /**
    * It enforces the the Kerberos SPNEGO authentication sequence returning an {@link AuthenticationToken} only
    * after the Kerberos SPNEGO sequence has completed successfully.
-   * <p/>
    *
    * @param request the HTTP client request.
    * @param response the HTTP client response.
@@ -331,7 +330,7 @@ public class KerberosAuthenticationHandler implements AuthenticationHandler {
     String authorization = request.getHeader(KerberosAuthenticator.AUTHORIZATION);
 
     if (authorization == null || !authorization.startsWith(KerberosAuthenticator.NEGOTIATE)) {
-      response.setHeader(KerberosAuthenticator.WWW_AUTHENTICATE, KerberosAuthenticator.NEGOTIATE);
+      response.setHeader(WWW_AUTHENTICATE, KerberosAuthenticator.NEGOTIATE);
       response.setStatus(HttpServletResponse.SC_UNAUTHORIZED);
       if (authorization == null) {
         LOG.trace("SPNEGO starting");

+ 15 - 11
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/PseudoAuthenticationHandler.java

@@ -15,13 +15,13 @@ package org.apache.hadoop.security.authentication.server;
 
 import org.apache.hadoop.security.authentication.client.AuthenticationException;
 import org.apache.hadoop.security.authentication.client.PseudoAuthenticator;
-
 import org.apache.http.client.utils.URLEncodedUtils;
 import org.apache.http.NameValuePair;
 
 import javax.servlet.ServletException;
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
+
 import java.io.IOException;
 import java.nio.charset.Charset;
 import java.util.List;
@@ -30,12 +30,12 @@ import java.util.Properties;
 /**
  * The <code>PseudoAuthenticationHandler</code> provides a pseudo authentication mechanism that accepts
  * the user name specified as a query string parameter.
- * <p/>
+ * <p>
  * This mimics the model of Hadoop Simple authentication which trust the 'user.name' property provided in
  * the configuration object.
- * <p/>
+ * <p>
  * This handler can be configured to support anonymous users.
- * <p/>
+ * <p>
  * The only supported configuration property is:
  * <ul>
  * <li>simple.anonymous.allowed: <code>true|false</code>, default value is <code>false</code></li>
@@ -54,6 +54,9 @@ public class PseudoAuthenticationHandler implements AuthenticationHandler {
   public static final String ANONYMOUS_ALLOWED = TYPE + ".anonymous.allowed";
 
   private static final Charset UTF8_CHARSET = Charset.forName("UTF-8");
+
+  private static final String PSEUDO_AUTH = "PseudoAuth";
+
   private boolean acceptAnonymous;
   private String type;
 
@@ -77,7 +80,7 @@ public class PseudoAuthenticationHandler implements AuthenticationHandler {
 
   /**
    * Initializes the authentication handler instance.
-   * <p/>
+   * <p>
    * This method is invoked by the {@link AuthenticationFilter#init} method.
    *
    * @param config configuration properties to initialize the handler.
@@ -100,7 +103,7 @@ public class PseudoAuthenticationHandler implements AuthenticationHandler {
 
   /**
    * Releases any resources initialized by the authentication handler.
-   * <p/>
+   * <p>
    * This implementation does a NOP.
    */
   @Override
@@ -109,7 +112,6 @@ public class PseudoAuthenticationHandler implements AuthenticationHandler {
 
   /**
    * Returns the authentication type of the authentication handler, 'simple'.
-   * <p/>
    *
    * @return the authentication type of the authentication handler, 'simple'.
    */
@@ -153,14 +155,14 @@ public class PseudoAuthenticationHandler implements AuthenticationHandler {
 
   /**
    * Authenticates an HTTP client request.
-   * <p/>
+   * <p>
    * It extracts the {@link PseudoAuthenticator#USER_NAME} parameter from the query string and creates
    * an {@link AuthenticationToken} with it.
-   * <p/>
+   * <p>
    * If the HTTP client request does not contain the {@link PseudoAuthenticator#USER_NAME} parameter and
    * the handler is configured to allow anonymous users it returns the {@link AuthenticationToken#ANONYMOUS}
    * token.
-   * <p/>
+   * <p>
    * If the HTTP client request does not contain the {@link PseudoAuthenticator#USER_NAME} parameter and
    * the handler is configured to disallow anonymous users it throws an {@link AuthenticationException}.
    *
@@ -181,7 +183,9 @@ public class PseudoAuthenticationHandler implements AuthenticationHandler {
       if (getAcceptAnonymous()) {
         token = AuthenticationToken.ANONYMOUS;
       } else {
-        throw new AuthenticationException("Anonymous requests are disallowed");
+        response.setStatus(HttpServletResponse.SC_FORBIDDEN);
+        response.setHeader(WWW_AUTHENTICATE, PSEUDO_AUTH);
+        token = null;
       }
     } else {
       token = new AuthenticationToken(userName, userName, getType());

+ 2 - 2
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosName.java

@@ -92,7 +92,7 @@ public class KerberosName {
 
   /**
    * Create a name from the full Kerberos principal name.
-   * @param name
+   * @param name full Kerberos principal name.
    */
   public KerberosName(String name) {
     Matcher match = nameParser.matcher(name);
@@ -367,7 +367,7 @@ public class KerberosName {
    * Get the translation of the principal name into an operating system
    * user name.
    * @return the short name
-   * @throws IOException
+   * @throws IOException throws if something is wrong with the rules
    */
   public String getShortName() throws IOException {
     String[] params;

+ 3 - 5
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosUtil.java

@@ -135,12 +135,10 @@ public class KerberosUtil {
   /**
    * Get all the unique principals from keytabfile which matches a pattern.
    * 
-   * @param keytab 
-   *          Name of the keytab file to be read.
-   * @param pattern 
-   *         pattern to be matched.
+   * @param keytab Name of the keytab file to be read.
+   * @param pattern pattern to be matched.
    * @return list of unique principals which matches the pattern.
-   * @throws IOException 
+   * @throws IOException if cannot get the principal name
    */
   public static final String[] getPrincipalNames(String keytab,
       Pattern pattern) throws IOException {

+ 3 - 1
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/RandomSignerSecretProvider.java

@@ -14,6 +14,8 @@
 package org.apache.hadoop.security.authentication.util;
 
 import com.google.common.annotations.VisibleForTesting;
+
+import java.nio.charset.Charset;
 import java.util.Random;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
@@ -46,6 +48,6 @@ public class RandomSignerSecretProvider extends RolloverSignerSecretProvider {
 
   @Override
   protected byte[] generateNewSecret() {
-    return Long.toString(rand.nextLong()).getBytes();
+    return Long.toString(rand.nextLong()).getBytes(Charset.forName("UTF-8"));
   }
 }

+ 2 - 3
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/Signer.java

@@ -15,6 +15,7 @@ package org.apache.hadoop.security.authentication.util;
 
 import org.apache.commons.codec.binary.Base64;
 
+import java.nio.charset.Charset;
 import java.security.MessageDigest;
 import java.security.NoSuchAlgorithmException;
 
@@ -41,8 +42,6 @@ public class Signer {
 
   /**
    * Returns a signed string.
-   * <p/>
-   * The signature '&s=SIGNATURE' is appended at the end of the string.
    *
    * @param str string to sign.
    *
@@ -88,7 +87,7 @@ public class Signer {
   protected String computeSignature(byte[] secret, String str) {
     try {
       MessageDigest md = MessageDigest.getInstance("SHA");
-      md.update(str.getBytes());
+      md.update(str.getBytes(Charset.forName("UTF-8")));
       md.update(secret);
       byte[] digest = md.digest();
       return new Base64(0).encodeToString(digest);

+ 2 - 1
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/StringSignerSecretProvider.java

@@ -13,6 +13,7 @@
  */
 package org.apache.hadoop.security.authentication.util;
 
+import java.nio.charset.Charset;
 import java.util.Properties;
 import javax.servlet.ServletContext;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -36,7 +37,7 @@ public class StringSignerSecretProvider extends SignerSecretProvider {
           long tokenValidity) throws Exception {
     String signatureSecret = config.getProperty(
             AuthenticationFilter.SIGNATURE_SECRET, null);
-    secret = signatureSecret.getBytes();
+    secret = signatureSecret.getBytes(Charset.forName("UTF-8"));
     secrets = new byte[][]{secret};
   }
 

+ 7 - 4
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/ZKSignerSecretProvider.java

@@ -15,6 +15,7 @@ package org.apache.hadoop.security.authentication.util;
 
 import com.google.common.annotations.VisibleForTesting;
 import java.nio.ByteBuffer;
+import java.nio.charset.Charset;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
@@ -44,7 +45,7 @@ import org.slf4j.LoggerFactory;
 /**
  * A SignerSecretProvider that synchronizes a rolling random secret between
  * multiple servers using ZooKeeper.
- * <p/>
+ * <p>
  * It works by storing the secrets and next rollover time in a ZooKeeper znode.
  * All ZKSignerSecretProviders looking at that znode will use those
  * secrets and next rollover time to ensure they are synchronized.  There is no
@@ -55,7 +56,7 @@ import org.slf4j.LoggerFactory;
  * your own Curator client, you can pass it to ZKSignerSecretProvider; see
  * {@link org.apache.hadoop.security.authentication.server.AuthenticationFilter}
  * for more details.
- * <p/>
+ * <p>
  * The supported configuration properties are:
  * <ul>
  * <li>signer.secret.provider.zookeeper.connection.string: indicates the
@@ -77,11 +78,13 @@ import org.slf4j.LoggerFactory;
  * </ul>
  *
  * The following attribute in the ServletContext can also be set if desired:
+ * <ul>
  * <li>signer.secret.provider.zookeeper.curator.client: A CuratorFramework
  * client object can be passed here. If given, the "zookeeper" implementation
  * will use this Curator client instead of creating its own, which is useful if
  * you already have a Curator client or want more control over its
  * configuration.</li>
+ * </ul>
  */
 @InterfaceStability.Unstable
 @InterfaceAudience.Private
@@ -367,14 +370,14 @@ public class ZKSignerSecretProvider extends RolloverSignerSecretProvider {
   }
 
   private byte[] generateRandomSecret() {
-    return Long.toString(rand.nextLong()).getBytes();
+    return Long.toString(rand.nextLong()).getBytes(Charset.forName("UTF-8"));
   }
 
   /**
    * This method creates the Curator client and connects to ZooKeeper.
    * @param config configuration properties
    * @return A Curator client
-   * @throws java.lang.Exception
+   * @throws Exception
    */
   protected CuratorFramework createCuratorClient(Properties config)
           throws Exception {

+ 3 - 2
hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestPseudoAuthenticator.java

@@ -63,8 +63,9 @@ public class TestPseudoAuthenticator {
       URL url = new URL(auth.getBaseURL());
       HttpURLConnection conn = (HttpURLConnection) url.openConnection();
       conn.connect();
-      Assert.assertEquals(HttpURLConnection.HTTP_FORBIDDEN, conn.getResponseCode());
-      Assert.assertEquals("Anonymous requests are disallowed", conn.getResponseMessage());
+      Assert.assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED, conn.getResponseCode());
+      Assert.assertTrue(conn.getHeaderFields().containsKey("WWW-Authenticate"));
+      Assert.assertEquals("Authentication required", conn.getResponseMessage());
     } finally {
       auth.stop();
     }

+ 5 - 1
hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAuthenticationFilter.java

@@ -283,6 +283,8 @@ public class TestAuthenticationFilter {
     filter = new AuthenticationFilter();
     try {
       FilterConfig config = Mockito.mock(FilterConfig.class);
+      ServletContext sc = Mockito.mock(ServletContext.class);
+      Mockito.when(config.getServletContext()).thenReturn(sc);
       Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn("kerberos");
       Mockito.when(config.getInitParameterNames()).thenReturn(
         new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE)).elements());
@@ -535,11 +537,11 @@ public class TestAuthenticationFilter {
         }
       ).when(chain).doFilter(Mockito.<ServletRequest>anyObject(), Mockito.<ServletResponse>anyObject());
 
+      Mockito.when(response.containsHeader("WWW-Authenticate")).thenReturn(true);
       filter.doFilter(request, response, chain);
 
       Mockito.verify(response).sendError(
           HttpServletResponse.SC_UNAUTHORIZED, "Authentication required");
-      Mockito.verify(response).setHeader("WWW-Authenticate", "dummyauth");
     } finally {
       filter.destroy();
     }
@@ -850,6 +852,7 @@ public class TestAuthenticationFilter {
       Mockito.when(request.getCookies()).thenReturn(new Cookie[]{cookie});
 
       HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
+      Mockito.when(response.containsHeader("WWW-Authenticate")).thenReturn(true);
       FilterChain chain = Mockito.mock(FilterChain.class);
 
       verifyUnauthorized(filter, request, response, chain);
@@ -928,6 +931,7 @@ public class TestAuthenticationFilter {
       Mockito.when(request.getCookies()).thenReturn(new Cookie[]{cookie});
 
       HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
+      Mockito.when(response.containsHeader("WWW-Authenticate")).thenReturn(true);
       FilterChain chain = Mockito.mock(FilterChain.class);
 
       verifyUnauthorized(filter, request, response, chain);

+ 3 - 6
hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestPseudoAuthenticationHandler.java

@@ -21,6 +21,7 @@ import org.mockito.Mockito;
 
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
+
 import java.util.Properties;
 
 public class TestPseudoAuthenticationHandler {
@@ -74,12 +75,8 @@ public class TestPseudoAuthenticationHandler {
       HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
       HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
 
-      handler.authenticate(request, response);
-      Assert.fail();
-    } catch (AuthenticationException ex) {
-      // Expected
-    } catch (Exception ex) {
-      Assert.fail();
+      AuthenticationToken token = handler.authenticate(request, response);
+      Assert.assertNull(token);
     } finally {
       handler.destroy();
     }

+ 490 - 28
hadoop-common-project/hadoop-common/CHANGES.txt

@@ -11,17 +11,13 @@ Trunk (Unreleased)
 
     HADOOP-9902. Shell script rewrite (aw)
 
+    HADOOP-10950. rework heap management vars (John Smith via aw)
+
   NEW FEATURES
 
-    HADOOP-9629. Support Windows Azure Storage - Blob as a file system in Hadoop.
-    (Dexter Bradshaw, Mostafa Elhemali, Xi Fang, Johannes Klein, David Lao,
-    Mike Liddell, Chuan Liu, Lengning Liu, Ivan Mitic, Michael Rys,
-    Alexander Stojanovic, Brian Swan, and Min Wei via cnauroth)
+    HADOOP-6590. Add a username check for hadoop sub-commands (John Smith via aw)
 
-    HADOOP-10728. Metrics system for Windows Azure Storage Filesystem.
-    (Dexter Bradshaw, Mostafa Elhemali, Xi Fang, Johannes Klein, David Lao,
-    Mike Liddell, Chuan Liu, Lengning Liu, Ivan Mitic, Michael Rys,
-    Alexander Stojanovich, Brian Swan, and Min Wei via cnauroth)
+    HADOOP-11353. Add support for .hadooprc (aw)
     
   IMPROVEMENTS
 
@@ -118,8 +114,6 @@ Trunk (Unreleased)
     HADOOP-10342. Add a new method to UGI to use a Kerberos login subject to
     build a new UGI. (Larry McCay via omalley)
 
-    HADOOP-10563. Remove the dependency of jsp in trunk. (wheat9)
-
     HADOOP-10485. Remove dead classes in hadoop-streaming. (wheat9)
 
     HADOOP-11013. CLASSPATH handling should be consolidated, debuggable (aw)
@@ -129,8 +123,34 @@ Trunk (Unreleased)
     HADOOP-11092. hadoop shell commands should print usage if not given a
     a class (aw)
 
+    HADOOP-11231. Remove dead code in ServletUtil. (Li Lu via wheat9)
+
+    HADOOP-11025. hadoop-daemons.sh should just call hdfs directly (Masatake
+    Iwasaki via aw)
+
+    HADOOP-11150. hadoop command should show the reason on failure by 
+    invalid COMMAND or CLASSNAME (Masatake Iwasaki via aw)
+
+    HADOOP-11208. Replace "daemon" with better name in script subcommands (aw)
+
+    HADOOP-10926. Improve smart-apply-patch.sh to apply binary diffs (cmccabe)
+
+    HADOOP-11081. Document hadoop properties expected to be set by the shell 
+    code in *-env.sh (aw)
+
+    HADOOP-11352 Clean up test-patch.sh to disable "+1 contrib tests"
+    (Akira AJISAKA via stevel)
+
+    HADOOP-10788. Rewrite kms to use new shell framework (John Smith via aw)
+
+    HADOOP-11058. Missing HADOOP_CONF_DIR generates strange results
+    (Masatake Iwasaki via aw)
+
   BUG FIXES
 
+    HADOOP-11473. test-patch says "-1 overall" even when all checks are +1
+    (Jason Lowe via raviprak)
+
     HADOOP-9451. Fault single-layer config if node group topology is enabled.
     (Junping Du via llu)
 
@@ -300,29 +320,14 @@ Trunk (Unreleased)
     HADOOP-10625. Trim configuration names when putting/getting them
     to properties. (Wangda Tan via xgong)
 
-    HADOOP-10717. HttpServer2 should load jsp DTD from local jars instead of
-    going remote. (Dapeng Sun via wheat9)
-
-    HADOOP-10689. InputStream is not closed in
-    AzureNativeFileSystemStore#retrieve(). (Chen He via cnauroth)
-
-    HADOOP-10690. Lack of synchronization on access to InputStream in
-    NativeAzureFileSystem#NativeAzureFsInputStream#close().
-    (Chen He via cnauroth)
-
     HADOOP-10831. UserProvider is not thread safe. (Benoy Antony via umamahesh)
 
     HADOOP-10834. Typo in CredentialShell usage. (Benoy Antony via umamahesh)
 
-    HADOOP-10840. Fix OutOfMemoryError caused by metrics system in Azure File
-    System. (Shanyu Zhao via cnauroth)
-
     HADOOP-11002. shell escapes are incompatible with previous releases (aw)
 
     HADOOP-10996. Stop violence in the *_HOME (aw)
 
-    HADOOP-10748. HttpServer2 should not load JspServlet. (wheat9)
-
     HADOOP-11033. shell scripts ignore JAVA_HOME on OS X. (aw)
 
     HADOOP-11052. hadoop_verify_secure_prereq's results aren't checked 
@@ -333,6 +338,20 @@ Trunk (Unreleased)
     HADOOP-11022. User replaced functions get lost 2-3 levels deep (e.g., 
     sbin) (aw)
 
+    HADOOP-11284. Fix variable name mismatches in hadoop-functions.sh (Masatake
+    Iwasaki via aw)
+
+    HADOOP-11298. slaves.sh and stop-all.sh are missing slashes (aw)
+
+    HADOOP-11296. hadoop-daemons.sh throws 'host1: bash: host3: 
+    command not found...' (vinayakumarb)
+
+    HADOOP-11380. Restore Rack Awareness documenation (aw)
+
+    HADOOP-11397. Can't override HADOOP_IDENT_STRING (Kengo Seki via aw)
+
+    HADOOP-10908. Common needs updates for shell rewrite (aw)
+
   OPTIMIZATIONS
 
     HADOOP-7761. Improve the performance of raw comparisons. (todd)
@@ -343,21 +362,346 @@ Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES
 
+    HADOOP-10530 Make hadoop build on Java7+ only (stevel)
+
   NEW FEATURES
 
+    HADOOP-10987. Provide an iterator-based listing API for FileSystem (kihwal)
+
+    HADOOP-7984. Add hadoop --loglevel option to change log level.
+    (Akira AJISAKA via cnauroth)
+
+    HADOOP-9629. Support Windows Azure Storage - Blob as a file system in Hadoop.
+    (Dexter Bradshaw, Mostafa Elhemali, Xi Fang, Johannes Klein, David Lao,
+    Mike Liddell, Chuan Liu, Lengning Liu, Ivan Mitic, Michael Rys,
+    Alexander Stojanovic, Brian Swan, and Min Wei via cnauroth)
+
+    HADOOP-10728. Metrics system for Windows Azure Storage Filesystem.
+    (Dexter Bradshaw, Mostafa Elhemali, Xi Fang, Johannes Klein, David Lao,
+    Mike Liddell, Chuan Liu, Lengning Liu, Ivan Mitic, Michael Rys,
+    Alexander Stojanovich, Brian Swan, and Min Wei via cnauroth)
+
+    HADOOP-8989. hadoop fs -find feature (Jonathan Allen via aw)
+
   IMPROVEMENTS
 
     HADOOP-11156. DelegateToFileSystem should implement
     getFsStatus(final Path f). (Zhihai Xu via wang)
 
+    HADOOP-11172. Improve error message in Shell#runCommand on OutOfMemoryError.
+    (Yongjun Zhang via wang)
+
+    HADOOP-10748. HttpServer2 should not load JspServlet. (wheat9)
+
+    HADOOP-6857. FsShell should report raw disk usage including replication
+    factor. (Byron Wong via shv)
+
+    HADOOP-10847. Remove the usage of sun.security.x509.* in testing code.
+    (Pascal Oliva via wheat9)
+
+    HADOOP-10563. Remove the dependency of jsp in trunk. (wheat9)
+
+    HADOOP-10786. Fix UGI#reloginFromKeytab on Java 8. (Stephen Chu via wheat9)
+
+    HADOOP-11291. Log the cause of SASL connection failures.
+    (Stephen Chu via cnauroth)
+
+    HADOOP-11173. Improve error messages for some KeyShell commands. (wang)
+
+    HADOOP-11257: Update "hadoop jar" documentation to warn against using it
+    for launching yarn jars (iwasakims via cmccabe)
+
+    HADOOP-11341. KMS support for whitelist key ACLs. (Arun Suresh via wang)
+
+    HADOOP-11301. [optionally] update jmx cache to drop old metrics
+    (Maysam Yabandeh via stack)
+
+    HADOOP-11356. Removed deprecated o.a.h.fs.permission.AccessControlException.
+    (Li Lu via wheat9)
+
+    HADOOP-11313. Adding a document about NativeLibraryChecker.
+    (Tsuyoshi OZAWA via cnauroth)
+
+    HADOOP-11287. Simplify UGI#reloginFromKeytab for Java 7+.
+    (Li Lu via wheat9)
+
+    HADOOP-10476) Bumping the findbugs version to 3.0.0. (wheat9)
+
+    HADOOP-11410. Make the rpath of libhadoop.so configurable (cmccabe)
+
+    HADOOP-11416. Move ChunkedArrayList into hadoop-common (cmccabe)
+
+    HADOOP-10840. Fix OutOfMemoryError caused by metrics system in Azure File
+    System. (Shanyu Zhao via cnauroth)
+
+    HADOOP-11248. Add hadoop configuration to disable Azure Filesystem metrics
+    collection. (Shanyu Zhao via cnauroth)
+
+    HADOOP-11421. Add IOUtils#listDirectory (cmccabe)
+
+    HADOOP-11427. ChunkedArrayList: fix removal via iterator and implement get
+    (cmccabe)
+
+    HADOOP-11430. Add GenericTestUtils#disableLog, GenericTestUtils#setLogLevel
+    (cmccabe)
+
+    HADOOP-11422. Check CryptoCodec is AES-CTR for Crypto input/output stream
+    (Yi Liu via Colin P. McCabe)
+
+    HADOOP-11213. Typos in html pages: SecureMode and EncryptedShuffle. 
+    (Wei Yan via kasha)
+
+    HADOOP-11395. Add site documentation for Azure Storage FileSystem
+    integration. (Chris Nauroth via Arpit Agarwal)
+
+    HDFS-7555. Remove the support of unmanaged connectors in HttpServer2.
+    (wheat9)
+
+    HADOOP-11399. Java Configuration file and .xml files should be
+    automatically cross-compared (rchiang via rkanter)
+
+    HADOOP-11455. KMS and Credential CLI should request confirmation for
+    deletion by default. (Charles Lamb via yliu)
+
+    HADOOP-11390 Metrics 2 ganglia provider to include hostname in
+    unresolved address problems. (Varun Saxena via stevel)
+
+    HADOOP-11032. Replace use of Guava's Stopwatch with Hadoop's StopWatch
+    (ozawa)
+
+    HADOOP-11464. Reinstate support for launching Hadoop processes on Windows
+    using Cygwin. (cnauroth)
+
+    HADOOP-9992. Modify the NN loadGenerator to optionally run as a MapReduce job
+    (Akshay Radia via brandonli)
+
+    HADOOP-11465. Fix findbugs warnings in hadoop-gridmix. (Varun Saxena via
+    Arpit Agarwal)
+
   OPTIMIZATIONS
 
+    HADOOP-11323. WritableComparator#compare keeps reference to byte array.
+    (Wilfred Spiegelenburg via wang)
+
+    HADOOP-11238. Update the NameNode's Group Cache in the background when
+    possible (Chris Li via Colin P. McCabe)
+
+    HADOOP-10809. hadoop-azure: page blob support. (Dexter Bradshaw,
+    Mostafa Elhemali, Eric Hanson, and Mike Liddell via cnauroth)
+
+    HADOOP-11188. hadoop-azure: automatically expand page blobs when they become
+    full. (Eric Hanson via cnauroth)
+
   BUG FIXES
 
-    HADOOP-10404. Some accesses to DomainSocketWatcher#closed are not protected
-    by the lock (cmccabe)
+    HADOOP 11400. GraphiteSink does not reconnect to Graphite after 'broken pipe' 
+    (Kamil Gorlo via raviprak)
+
+    HADOOP-11236. NFS: Fix javadoc warning in RpcProgram.java (Abhiraj Butala via harsh)
+
+    HADOOP-11166. Remove ulimit from test-patch.sh. (wang)
+
+    HDFS-7227. Fix findbugs warning about NP_DEREFERENCE_OF_READLINE_VALUE in
+    SpanReceiverHost (cmccabe)
+
+    HADOOP-11186. documentation should talk about
+    hadoop.htrace.spanreceiver.classes, not hadoop.trace.spanreceiver.classes (cmccabe)
+
+    HADOOP-11165. TestUTF8 fails when run against java 8.
+    (Stephen Chu via cnauroth)
+
+    HADOOP-10717. HttpServer2 should load jsp DTD from local jars instead of
+    going remote. (Dapeng Sun via wheat9)
+
+    HADOOP-11268. Update BUILDING.txt to remove the workaround for tools.jar.
+    (Li Lu via wheat9)
+
+    HADOOP-11230. Add missing dependency of bouncycastle for kms, httpfs, hdfs, MR
+    and YARN. (Robert Kanter via wheat9)
+
+    HADOOP-11269. Add java 8 profile for hadoop-annotations. (Li Lu via wheat9)
+
+    HADOOP-11271. Use Time.monotonicNow() in Shell.java instead of Time.now()
+    (vinayakumarb)
+
+    HADOOP-11266. Remove no longer supported activation properties for packaging
+    from pom. (Masatake Iwasaki via wheat9)
+
+    HADOOP-11267. TestSecurityUtil fails when run with JDK8 because of empty
+    principal names. (Stephen Chu via wheat9)
+
+    HADOOP-10714. AmazonS3Client.deleteObjects() need to be limited to 1000
+    entries per call. (Juan Yu via atm)
 
-Release 2.6.0 - UNRELEASED
+    HADOOP-11272. Allow ZKSignerSecretProvider and
+    ZKDelegationTokenSecretManager to use the same curator client. (Arun Suresh via atm)
+
+    HADOOP-11187 NameNode - KMS communication fails after a long period of
+    inactivity. (Arun Suresh via atm)
+
+    HADOOP-11289. Fix typo in RpcUtil log message. (Charles Lamb via wheat9)
+
+    HADOOP-11294. Nfs3FileAttributes should not change the values of rdev,
+    nlink and size in the constructor. (Brandon Li via wheat9)
+
+    HADOOP-11157. ZKDelegationTokenSecretManager never shuts down
+    listenerThreadPool. (Arun Suresh via atm)
+
+    HADOOP-11311. Restrict uppercase key names from being created with JCEKS.
+    (wang)
+
+    HADOOP-11309. System class pattern package.Foo should match
+    package.Foo$Bar, too (Gera Shegalov via jlowe)
+
+    HADOOP-11312. Fix unit tests to not use uppercase key names. (wang)
+
+    HADOOP-11201. Hadoop Archives should support globs resolving to files.
+    (Gera Shegalov via cnauroth)
+
+    HADOOP-11322. key based ACL check in KMS always check KeyOpType.MANAGEMENT
+    even actual KeyOpType is not MANAGEMENT. (Dian Fu via yliu)
+
+    HADOOP-11300. KMS startup scripts must not display the keystore /
+    truststore passwords. (Arun Suresh via wang)
+
+    HADOOP-11333. Fix deadlock in DomainSocketWatcher when the notification
+    pipe is full (zhaoyunjiong via cmccabe)
+
+    HADOOP-11337. KeyAuthorizationKeyProvider access checks need to be done
+    atomically. (Dian Fu via wang)
+
+    HADOOP-11344. KMS kms-config.sh sets a default value for the keystore
+    password even in non-ssl setup. (Arun Suresh via wang)
+
+    HADOOP-11342. KMS key ACL should ignore ALL operation for default key ACL
+    and whitelist key ACL. (Dian Fu via wang)
+
+    HADOOP-11332. KerberosAuthenticator#doSpnegoSequence should check if
+    kerberos TGT is available in the subject. (Dian Fu via atm)
+
+    HADOOP-11348. Remove unused variable from CMake error message for finding
+    openssl (Dian Fu via Colin P. McCabe)
+
+    HADOOP-11355. When accessing data in HDFS and the key has been deleted,
+    a Null Pointer Exception is shown. (Arun Suresh via wang)
+
+    HADOOP-11343. Overflow is not properly handled in caclulating final iv for
+    AES CTR. (Jerry Chen via wang)
+
+    HADOOP-11354. ThrottledInputStream doesn't perform effective throttling.
+    (Ted Yu via jing9)
+
+    HADOOP-11329. Add JAVA_LIBRARY_PATH to KMS startup options. (Arun Suresh via wang)
+
+    HADOOP-11363 Hadoop maven surefire-plugin uses must set heap size. (stevel)
+
+    HADOOP-10134 [JDK8] Fix Javadoc errors caused by incorrect or illegal tags in doc
+    comments. (apurtell via stevel)
+
+    HADOOP-11367. Fix warnings from findbugs 3.0 in hadoop-streaming. (Li Lu via wheat9)
+
+    HADOOP-11369. Fix new findbugs warnings in hadoop-mapreduce-client,
+    non-core directories. (Li Lu via wheat9)
+
+    HADOOP-11368. Fix SSLFactory truststore reloader thread leak in
+    KMSClientProvider. (Arun Suresh via wang)
+
+    HADOOP-11372. Fix new findbugs warnings in mapreduce-examples.
+    (Li Lu via wheat9)
+
+    HADOOP-11273. TestMiniKdc failure: login options not compatible with IBM
+    JDK. (Gao Zhong Liang via wheat9)
+
+    HADOOP-11379. Fix new findbugs warnings in hadoop-auth*. (Li Lu via wheat9)
+
+    HADOOP-11378. Fix new findbugs warnings in hadoop-kms. (Li Lu via wheat9)
+
+    HADOOP-11349. RawLocalFileSystem leaks file descriptor while creating a
+    file if creat succeeds but chmod fails. (Varun Saxena via Colin P. McCabe)
+
+    HADOOP-11381. Fix findbugs warnings in hadoop-distcp, hadoop-aws,
+    hadoop-azure, and hadoop-openstack. (Li Lu via wheat9)
+
+    HADOOP-10482. Fix various findbugs warnings in hadoop-common. (wheat9)
+
+    HADOOP-11388. Remove deprecated o.a.h.metrics.file.FileContext.
+    (Li Lu via wheat9)
+
+    HADOOP-11386. Replace \n by %n in format hadoop-common format strings.
+    (Li Lu via wheat9)
+
+    HADOOP-11211. mapreduce.job.classloader.system.classes semantics should be
+    be order-independent. (Yitong Zhou via gera)
+
+    HADOOP-11389. Clean up byte to string encoding issues in hadoop-common.
+    (wheat9)
+
+    HADOOP-11394. hadoop-aws documentation missing. (cnauroth)
+
+    HADOOP-11396. Provide navigation in the site documentation linking to the
+    Hadoop Compatible File Systems. (cnauroth)
+
+    HADOOP-11412 POMs mention "The Apache Software License" rather than
+    "Apache License". (Herve Boutemy via stevel)
+
+    HADOOP-11321. copyToLocal cannot save a file to an SMB share unless the user
+    has Full Control permissions. (cnauroth)
+
+    HADOOP-11420. Use latest maven-site-plugin and replace link to svn with
+    link to git. (Herve Boutemy via wheat9)
+
+    HADOOP-10689. InputStream is not closed in
+    AzureNativeFileSystemStore#retrieve(). (Chen He via cnauroth)
+
+    HADOOP-10690. Lack of synchronization on access to InputStream in
+    NativeAzureFileSystem#NativeAzureFsInputStream#close().
+    (Chen He via cnauroth)
+
+    HADOOP-11358. Tests for encryption/decryption with IV calculation
+    overflow. (yliu)
+
+    HADOOP-11125. Remove redundant tests in TestOsSecureRandom.
+    (Masanori Oyama via wheat9)
+
+    HADOOP-11385. Prevent cross site scripting attack on JMXJSONServlet.
+    (wheat9)
+
+    HADOOP-11409. FileContext.getFileContext can stack overflow if default fs
+    misconfigured (Gera Shegalov via jlowe)
+
+    HADOOP-11428. Remove obsolete reference to Cygwin in BUILDING.txt.
+    (Arpit Agarwal via wheat9)
+
+    HADOOP-11431. clean up redundant maven-site-plugin configuration.
+    (Herve Boutemy via wheat9)
+
+    HADOOP-11429. Findbugs warnings in hadoop extras.
+    (Varun Saxena via wheat9)
+
+    HADOOP-11414. FileBasedIPList#readLines() can leak file descriptors.
+    (ozawa)
+
+    HADOOP-11283. SequenceFile.Writer can leak file descriptors in
+    DistCpV1#setup(). (Varun Saxena via ozawa)
+
+    HADOOP-11448. Fix findbugs warnings in FileBasedIPList. (ozawa)
+
+    HADOOP-11039. ByteBufferReadable API doc is inconsistent with the
+    implementations. (Yi Liu via Colin P. McCabe)
+
+    HADOOP-11446. S3AOutputStream should use shared thread pool to
+    avoid OutOfMemoryError. (Ted Yu via stevel)	
+
+    HADOOP-11459. Fix recent findbugs in ActiveStandbyElector, NetUtils
+    and ShellBasedIdMapping (vinayakumarb)
+
+    HADOOP-11445. Bzip2Codec: Data block is skipped when position of newly
+    created stream is equal to start of split (Ankit Kamboj via jlowe)
+
+    HADOOP-11462. TestSocketIOWithTimeout needs change for PowerPC platform.
+    (Ayappan via cnauroth)
+
+Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES
 
@@ -577,6 +921,31 @@ Release 2.6.0 - UNRELEASED
 
     HADOOP-11153. Make number of KMS threads configurable. (wang)
 
+    HADOOP-11007. Reinstate building of ant tasks support. (jlowe via kihwal)
+
+    HADOOP-11178. Fix findbugs exclude file. (Arun Suresh via wang)
+
+    HADOOP-11174. Delegation token for KMS should only be got once if it
+    already exists. (Yi Liu via wang)
+
+    HADOOP-11184. Update Hadoop's lz4 to version r123. (cmccabe)
+
+    HADOOP-11181. Generalized o.a.h.s.t.d.DelegationTokenManager to handle all
+    sub-classes of AbstractDelegationTokenIdentifier. (zjshen)
+
+    HADOOP-11207. Enhanced common DelegationTokenAuthenticationHandler to support
+    proxy-users on Delegation-token management operations. (Zhijie Shen via
+    vinodkv)
+
+    HADOOP-11216. Improve Openssl library finding. (cmccabe via yliu)
+
+    HADOOP-11254. Changed visibility of AccessControlList to be public for
+    consumption by ecosystem. (Zhijie Shen via vinodkv)
+
+    HADOOP-11286. Copied LimitInputStream from guava-0.14 to hadoop to avoid
+    issues with newer versions of guava in applications. (Christopher Tubbs
+    via acmurthy)
+
   OPTIMIZATIONS
 
     HADOOP-10838. Byte array native checksumming. (James Thomas via todd)
@@ -638,8 +1007,17 @@ Release 2.6.0 - UNRELEASED
     HADOOP-10681. Remove unnecessary synchronization from Snappy & Zlib
     codecs. (Gopal Vijayaraghavan via acmurthy)
 
+    HADOOP-11194. Ignore .keep files. (kasha)
+
+    HADOOP-11195. Move Id-Name mapping in NFS to the hadoop-common area for
+    better maintenance (Yongjun Zhang via brandonli)
+
+    HADOOP-11247. Fix a couple javac warnings in NFS. (Brandon Li via wheat9)
+
   BUG FIXES
 
+    HADOOP-11182. GraphiteSink emits wrong timestamps (Sascha Coenen via raviprak)
+
     HADOOP-10781. Unportable getgrouplist() usage breaks FreeBSD (Dmitry
     Sivachenko via Colin Patrick McCabe)
 
@@ -798,6 +1176,30 @@ Release 2.6.0 - UNRELEASED
     HADOOP-11168. Remove duplicated entry "dfs.webhdfs.enabled" in the useri
     doc. (Yi Liu via wheat9)
 
+    HADOOP-11169. Fix DelegationTokenAuthenticatedURL to pass the connection
+    Configurator to the authenticator. (Arun Suresh via wang)
+
+    HADOOP-10404. Some accesses to DomainSocketWatcher#closed are not protected
+    by the lock (cmccabe)
+
+    HADOOP-11161. Expose close method in KeyProvider to give clients of
+    Provider implementations a hook to release resources. (Arun Suresh via atm)
+
+    HADOOP-11133. Should trim the content of keystore password file for JavaKeyStoreProvider
+    (Yi Liu via umamahesh)
+
+    HADOOP-11193. Fix uninitialized variables in NativeIO.c
+    (Xiaoyu Yao via wheat9)
+
+    HADOOP-11176. KMSClientProvider authentication fails when both currentUgi
+    and loginUgi are a proxied user. (Arun Suresh via atm)
+
+    HADOOP-11198. Fix typo in javadoc for FileSystem#listStatus().
+    (Li Lu via wheat9)
+
+    HADOOP-11253. Hadoop streaming test TestStreamXmlMultipleRecords fails on
+    Windows. (Varun Vasudev via wheat9)
+
     BREAKDOWN OF HDFS-6134 AND HADOOP-10150 SUBTASKS AND RELATED JIRAS
   
       HADOOP-10734. Implement high-performance secure random number sources.
@@ -939,6 +1341,66 @@ Release 2.6.0 - UNRELEASED
     HADOOP-11163. MetricsSystemImpl may miss a registered source.
     (Chuan Liu via cnauroth)
 
+    HADOOP-11179. Java untar should handle the case that the file entry comes
+    without its parent directory entry. (Craig Welch via zjshen)
+
+    HADOOP-11175. Fix several issues of hadoop security configuration in user
+    doc. (Yi Liu via cnauroth)
+
+    HADOOP-11122. Fix findbugs in ZK DelegationTokenSecretManagers.
+    (Arun Suresh via kasha)
+
+    HADOOP-11228. Winutils task: unsecure path should not call
+    AddNodeManagerAndUserACEsToObject. (Remus Rusanu via jianhe)
+
+    HADOOP-11170. ZKDelegationTokenSecretManager fails to renewToken created by 
+    a peer. (Arun Suresh and Gregory Chanan via kasha)
+
+    HADOOP-11217. Disable SSLv3 in KMS. (Robert Kanter via kasha)
+
+    HADOOP-11068. Match hadoop.auth cookie format to jetty output.
+    (Gregory Chanan via cnauroth)
+
+    HADOOP-11250. fix endmacro of set_find_shared_library_without_version in
+    CMakeLists (Yi Liu via Colin P. McCabe)
+
+    HADOOP-11221. IdentityHashStore assumes System.identityHashCode() is
+    non-negative. (Jinghui Wang via szetszwo)
+
+    HADOOP-11241. Fixed intermittent TestNMSimulator failure due to timing issue.
+    (Varun Vasudev via zjshen)
+
+    HADOOP-11265. Credential and Key Shell Commands not available on Windows.
+    (Larry McCay via cnauroth)
+
+    HADOOP-11280. TestWinUtils#testChmod fails after removal of
+    NO_PROPAGATE_INHERIT_ACE. (cnauroth)
+
+    HADOOP-11282. Skip NFS TestShellBasedIdMapping tests that are irrelevant on
+    Windows. (cnauroth)
+
+    HADOOP-9576. Changed NetUtils#wrapException to throw EOFException instead
+    of wrapping it as IOException. (Steve Loughran via jianhe)
+
+
+Release 2.5.2 - 2014-11-19
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+  
+  OPTIMIZATIONS
+
+  BUG FIXES
+
+    HADOOP-11243. SSLFactory shouldn't allow SSLv3. (Wei Yan via kasha)
+
+    HADOOP-11260. Patch up Jetty to disable SSLv3. (Mike Yoder via kasha)
+
+    HADOOP-11307. create-release script should run git clean first (kasha)
+
 Release 2.5.1 - 2014-09-05
 
   INCOMPATIBLE CHANGES

+ 17 - 1
hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml

@@ -241,6 +241,16 @@
        <Method name="writeVLong" />
        <Bug pattern="SF_SWITCH_FALLTHROUGH" />
      </Match>
+     <Match>
+       <Class name="org.apache.hadoop.io.Text" />
+       <Method name="bytesToCodePoint" />
+       <Bug pattern="SF_SWITCH_NO_DEFAULT" />
+     </Match>
+     <Match>
+       <Class name="org.apache.hadoop.util.PureJavaCrc32C" />
+       <Method name="update" />
+       <Bug pattern="SF_SWITCH_NO_DEFAULT" />
+     </Match>
     <!--
 	  The switch condition fall through is intentional and for performance
 	  purposes.
@@ -298,7 +308,7 @@
     </Match>
     <Match>
       <!-- protobuf generated code -->
-      <Class name="~org\.apache\.hadoop\.tracing\.TraceAdminPB.*">
+      <Class name="~org\.apache\.hadoop\.tracing\.TraceAdminPB.*"/>
     </Match>
 
     <!--
@@ -376,4 +386,10 @@
     <Bug pattern="REC_CATCH_EXCEPTION"/>
   </Match>
 
+  <Match>
+    <Class name="org.apache.hadoop.conf.Configuration"/>
+    <Method name="loadProperty"/>
+    <Bug pattern="NP_NULL_PARAM_DEREF"/>
+  </Match>
+
 </FindBugsFilter>

+ 14 - 3
hadoop-common-project/hadoop-common/pom.xml

@@ -34,6 +34,8 @@
     <kdc.resource.dir>src/test/resources/kdc</kdc.resource.dir>
     <hadoop.component>common</hadoop.component>
     <is.hadoop.component>true</is.hadoop.component>
+    <wsce.config.dir>../etc/hadoop</wsce.config.dir>
+    <wsce.config.file>wsce-site.xml</wsce.config.file>
   </properties>
 
 
@@ -278,6 +280,11 @@
       <groupId>org.apache.commons</groupId>
       <artifactId>commons-compress</artifactId>
     </dependency>
+    <dependency>
+      <groupId>org.bouncycastle</groupId>
+      <artifactId>bcprov-jdk16</artifactId>
+      <scope>test</scope>
+    </dependency>
   </dependencies>
 
   <build>
@@ -490,7 +497,6 @@
             <exclude>src/test/resources/kdc/ldif/users.ldif</exclude>
             <exclude>src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4.h</exclude>
             <exclude>src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4.c</exclude>
-            <exclude>src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4_encoder.h</exclude>
             <exclude>src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4hc.h</exclude>
             <exclude>src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4hc.c</exclude>
             <exclude>src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4hc_encoder.h</exclude>
@@ -523,6 +529,8 @@
         <openssl.include></openssl.include>
         <require.openssl>false</require.openssl>
         <runningWithNative>true</runningWithNative>
+        <bundle.openssl.in.bin>false</bundle.openssl.in.bin>
+        <extra.libhadoop.rpath></extra.libhadoop.rpath>
       </properties>
       <build>
         <plugins>
@@ -594,7 +602,7 @@
                 <configuration>
                   <target>
                     <exec executable="cmake" dir="${project.build.directory}/native" failonerror="true">
-                      <arg line="${basedir}/src/ -DGENERATED_JAVAH=${project.build.directory}/native/javah -DJVM_ARCH_DATA_MODEL=${sun.arch.data.model} -DREQUIRE_BZIP2=${require.bzip2} -DREQUIRE_SNAPPY=${require.snappy} -DCUSTOM_SNAPPY_PREFIX=${snappy.prefix} -DCUSTOM_SNAPPY_LIB=${snappy.lib} -DCUSTOM_SNAPPY_INCLUDE=${snappy.include} -DREQUIRE_OPENSSL=${require.openssl} -DCUSTOM_OPENSSL_PREFIX=${openssl.prefix} -DCUSTOM_OPENSSL_LIB=${openssl.lib} -DCUSTOM_OPENSSL_INCLUDE=${openssl.include}"/>
+                      <arg line="${basedir}/src/ -DGENERATED_JAVAH=${project.build.directory}/native/javah -DJVM_ARCH_DATA_MODEL=${sun.arch.data.model} -DREQUIRE_BZIP2=${require.bzip2} -DREQUIRE_SNAPPY=${require.snappy} -DCUSTOM_SNAPPY_PREFIX=${snappy.prefix} -DCUSTOM_SNAPPY_LIB=${snappy.lib} -DCUSTOM_SNAPPY_INCLUDE=${snappy.include} -DREQUIRE_OPENSSL=${require.openssl} -DCUSTOM_OPENSSL_PREFIX=${openssl.prefix} -DCUSTOM_OPENSSL_LIB=${openssl.lib} -DCUSTOM_OPENSSL_INCLUDE=${openssl.include} -DEXTRA_LIBHADOOP_RPATH=${extra.libhadoop.rpath}"/>
                     </exec>
                     <exec executable="make" dir="${project.build.directory}/native" failonerror="true">
                       <arg line="VERBOSE=1"/>
@@ -643,7 +651,7 @@
         <openssl.include></openssl.include>
         <require.openssl>false</require.openssl>
         <runningWithNative>true</runningWithNative>
-        <bundle.openssl.in.bin>true</bundle.openssl.in.bin>
+        <bundle.openssl.in.bin>false</bundle.openssl.in.bin>
       </properties>
       <build>
         <plugins>
@@ -715,6 +723,9 @@
                     <argument>/nologo</argument>
                     <argument>/p:Configuration=Release</argument>
                     <argument>/p:OutDir=${project.build.directory}/bin/</argument>
+                    <argument>/p:IntermediateOutputPath=${project.build.directory}/winutils/</argument>
+                    <argument>/p:WsceConfigDir=${wsce.config.dir}</argument>
+                    <argument>/p:WsceConfigFile=${wsce.config.file}</argument>
                   </arguments>
                 </configuration>
               </execution>

+ 57 - 19
hadoop-common-project/hadoop-common/src/CMakeLists.txt

@@ -78,16 +78,34 @@ macro(set_find_shared_library_version LVERS)
     ENDIF()
 endmacro(set_find_shared_library_version LVERS)
 
+#
+# Alter the behavior of find_package and find_library so that we find only
+# shared libraries without any version suffix.  You should save
+# CMAKE_FIND_LIBRARY_SUFFIXES before calling this function and restore it
+# afterwards.
+#
+macro(set_find_shared_library_without_version)
+    IF(${CMAKE_SYSTEM_NAME} MATCHES "Darwin")
+        # Mac OS uses .dylib
+        SET(CMAKE_FIND_LIBRARY_SUFFIXES ".dylib")
+    ELSEIF(${CMAKE_SYSTEM_NAME} MATCHES "Windows")
+        # No effect
+    ELSE()
+        # Most UNIX variants use .so
+        SET(CMAKE_FIND_LIBRARY_SUFFIXES ".so")
+    ENDIF()
+endmacro(set_find_shared_library_without_version)
+
 if (NOT GENERATED_JAVAH)
     # Must identify where the generated headers have been placed
     MESSAGE(FATAL_ERROR "You must set the cmake variable GENERATED_JAVAH")
 endif (NOT GENERATED_JAVAH)
 find_package(JNI REQUIRED)
 
-SET(STORED_CMAKE_FIND_LIBRARY_SUFFIXES CMAKE_FIND_LIBRARY_SUFFIXES)
+SET(STORED_CMAKE_FIND_LIBRARY_SUFFIXES ${CMAKE_FIND_LIBRARY_SUFFIXES})
 set_find_shared_library_version("1")
 find_package(ZLIB REQUIRED)
-SET(CMAKE_FIND_LIBRARY_SUFFIXES STORED_CMAKE_FIND_LIBRARY_SUFFIXES)
+SET(CMAKE_FIND_LIBRARY_SUFFIXES ${STORED_CMAKE_FIND_LIBRARY_SUFFIXES})
 
 set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -g -Wall -O2")
 set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D_REENTRANT -D_GNU_SOURCE")
@@ -97,7 +115,7 @@ set(T main/native/src/test/org/apache/hadoop)
 
 GET_FILENAME_COMPONENT(HADOOP_ZLIB_LIBRARY ${ZLIB_LIBRARIES} NAME)
 
-SET(STORED_CMAKE_FIND_LIBRARY_SUFFIXES CMAKE_FIND_LIBRARY_SUFFIXES)
+SET(STORED_CMAKE_FIND_LIBRARY_SUFFIXES ${CMAKE_FIND_LIBRARY_SUFFIXES})
 set_find_shared_library_version("1")
 find_package(BZip2 QUIET)
 if (BZIP2_INCLUDE_DIR AND BZIP2_LIBRARIES)
@@ -112,7 +130,7 @@ else (BZIP2_INCLUDE_DIR AND BZIP2_LIBRARIES)
         MESSAGE(FATAL_ERROR "Required bzip2 library and/or header files could not be found.")
     ENDIF(REQUIRE_BZIP2)
 endif (BZIP2_INCLUDE_DIR AND BZIP2_LIBRARIES)
-SET(CMAKE_FIND_LIBRARY_SUFFIXES STORED_CMAKE_FIND_LIBRARY_SUFFIXES)
+SET(CMAKE_FIND_LIBRARY_SUFFIXES ${STORED_CMAKE_FIND_LIBRARY_SUFFIXES})
 
 INCLUDE(CheckFunctionExists)
 INCLUDE(CheckCSourceCompiles)
@@ -121,13 +139,13 @@ CHECK_FUNCTION_EXISTS(sync_file_range HAVE_SYNC_FILE_RANGE)
 CHECK_FUNCTION_EXISTS(posix_fadvise HAVE_POSIX_FADVISE)
 CHECK_LIBRARY_EXISTS(dl dlopen "" NEED_LINK_DL)
 
-SET(STORED_CMAKE_FIND_LIBRARY_SUFFIXES CMAKE_FIND_LIBRARY_SUFFIXES)
+SET(STORED_CMAKE_FIND_LIBRARY_SUFFIXES ${CMAKE_FIND_LIBRARY_SUFFIXES})
 set_find_shared_library_version("1")
 find_library(SNAPPY_LIBRARY 
     NAMES snappy
     PATHS ${CUSTOM_SNAPPY_PREFIX} ${CUSTOM_SNAPPY_PREFIX}/lib
           ${CUSTOM_SNAPPY_PREFIX}/lib64 ${CUSTOM_SNAPPY_LIB})
-SET(CMAKE_FIND_LIBRARY_SUFFIXES STORED_CMAKE_FIND_LIBRARY_SUFFIXES)
+SET(CMAKE_FIND_LIBRARY_SUFFIXES ${STORED_CMAKE_FIND_LIBRARY_SUFFIXES})
 find_path(SNAPPY_INCLUDE_DIR 
     NAMES snappy.h
     PATHS ${CUSTOM_SNAPPY_PREFIX} ${CUSTOM_SNAPPY_PREFIX}/include
@@ -145,37 +163,53 @@ else (SNAPPY_LIBRARY AND SNAPPY_INCLUDE_DIR)
     ENDIF(REQUIRE_SNAPPY)
 endif (SNAPPY_LIBRARY AND SNAPPY_INCLUDE_DIR)
 
-SET(STORED_CMAKE_FIND_LIBRARY_SUFFIXES CMAKE_FIND_LIBRARY_SUFFIXES)
-set_find_shared_library_version("1.0.0")
+# Find the no-suffix version of libcrypto.
+# See HADOOP-11216 for details.
+SET(STORED_CMAKE_FIND_LIBRARY_SUFFIXES ${CMAKE_FIND_LIBRARY_SUFFIXES})
+set_find_shared_library_without_version()
 SET(OPENSSL_NAME "crypto")
 IF(${CMAKE_SYSTEM_NAME} MATCHES "Windows")
     SET(OPENSSL_NAME "eay32")
 ENDIF()
+MESSAGE("CUSTOM_OPENSSL_PREFIX = ${CUSTOM_OPENSSL_PREFIX}")
 find_library(OPENSSL_LIBRARY
     NAMES ${OPENSSL_NAME}
     PATHS ${CUSTOM_OPENSSL_PREFIX} ${CUSTOM_OPENSSL_PREFIX}/lib
           ${CUSTOM_OPENSSL_PREFIX}/lib64 ${CUSTOM_OPENSSL_LIB} NO_DEFAULT_PATH)
-find_library(OPENSSL_LIBRARY
-    NAMES ${OPENSSL_NAME})
-SET(CMAKE_FIND_LIBRARY_SUFFIXES STORED_CMAKE_FIND_LIBRARY_SUFFIXES)
+find_library(OPENSSL_LIBRARY NAMES ${OPENSSL_NAME})
 find_path(OPENSSL_INCLUDE_DIR 
     NAMES openssl/evp.h
     PATHS ${CUSTOM_OPENSSL_PREFIX} ${CUSTOM_OPENSSL_PREFIX}/include
           ${CUSTOM_OPENSSL_INCLUDE} NO_DEFAULT_PATH)
-find_path(OPENSSL_INCLUDE_DIR 
-    NAMES openssl/evp.h)
+find_path(OPENSSL_INCLUDE_DIR NAMES openssl/evp.h)
+SET(CMAKE_FIND_LIBRARY_SUFFIXES ${STORED_CMAKE_FIND_LIBRARY_SUFFIXES})
+SET(USABLE_OPENSSL 0)
 if (OPENSSL_LIBRARY AND OPENSSL_INCLUDE_DIR)
     GET_FILENAME_COMPONENT(HADOOP_OPENSSL_LIBRARY ${OPENSSL_LIBRARY} NAME)
+    INCLUDE(CheckCSourceCompiles)
+    SET(OLD_CMAKE_REQUIRED_INCLUDES ${CMAKE_REQUIRED_INCLUDES})
+    SET(CMAKE_REQUIRED_INCLUDES ${OPENSSL_INCLUDE_DIR})
+    CHECK_C_SOURCE_COMPILES("#include \"${OPENSSL_INCLUDE_DIR}/openssl/evp.h\"\nint main(int argc, char **argv) { return !EVP_aes_256_ctr; }" HAS_NEW_ENOUGH_OPENSSL)
+    SET(CMAKE_REQUIRED_INCLUDES ${OLD_CMAKE_REQUIRED_INCLUDES})
+    if(NOT HAS_NEW_ENOUGH_OPENSSL)
+        MESSAGE("The OpenSSL library installed at ${OPENSSL_LIBRARY} is too old.  You need a version at least new enough to have EVP_aes_256_ctr.")
+    else(NOT HAS_NEW_ENOUGH_OPENSSL)
+        SET(USABLE_OPENSSL 1)
+    endif(NOT HAS_NEW_ENOUGH_OPENSSL)
+endif (OPENSSL_LIBRARY AND OPENSSL_INCLUDE_DIR)
+if (USABLE_OPENSSL)
     SET(OPENSSL_SOURCE_FILES
         "${D}/crypto/OpensslCipher.c"
         "${D}/crypto/random/OpensslSecureRandom.c")
-else (OPENSSL_LIBRARY AND OPENSSL_INCLUDE_DIR)
-    SET(OPENSSL_INCLUDE_DIR "")
-    SET(OPENSSL_SOURCE_FILES "")
+else (USABLE_OPENSSL)
+    MESSAGE("Cannot find a usable OpenSSL library.  OPENSSL_LIBRARY=${OPENSSL_LIBRARY}, OPENSSL_INCLUDE_DIR=${OPENSSL_INCLUDE_DIR}, CUSTOM_OPENSSL_LIB=${CUSTOM_OPENSSL_LIB}, CUSTOM_OPENSSL_PREFIX=${CUSTOM_OPENSSL_PREFIX}, CUSTOM_OPENSSL_INCLUDE=${CUSTOM_OPENSSL_INCLUDE}")
     IF(REQUIRE_OPENSSL)
-        MESSAGE(FATAL_ERROR "Required openssl library could not be found.  OPENSSL_LIBRARY=${OPENSSL_LIBRARY}, OPENSSL_INCLUDE_DIR=${OPENSSL_INCLUDE_DIR}, CUSTOM_OPENSSL_INCLUDE_DIR=${CUSTOM_OPENSSL_INCLUDE_DIR}, CUSTOM_OPENSSL_PREFIX=${CUSTOM_OPENSSL_PREFIX}, CUSTOM_OPENSSL_INCLUDE=${CUSTOM_OPENSSL_INCLUDE}")
+        MESSAGE(FATAL_ERROR "Terminating build because require.openssl was specified.")
     ENDIF(REQUIRE_OPENSSL)
-endif (OPENSSL_LIBRARY AND OPENSSL_INCLUDE_DIR)
+    SET(OPENSSL_LIBRARY "")
+    SET(OPENSSL_INCLUDE_DIR "")
+    SET(OPENSSL_SOURCE_FILES "")
+endif (USABLE_OPENSSL)
 
 include_directories(
     ${GENERATED_JAVAH}
@@ -233,8 +267,12 @@ IF (${CMAKE_SYSTEM_NAME} MATCHES "Linux")
     # dlopen will look in the directory containing libhadoop.so.
     # However, $ORIGIN is not supported by all operating systems.
     #
+    set(RPATH "\$ORIGIN/")
+    if (EXTRA_LIBHADOOP_RPATH)
+        set(RPATH "${RPATH}:${EXTRA_LIBHADOOP_RPATH}/")
+    endif(EXTRA_LIBHADOOP_RPATH)
     SET_TARGET_PROPERTIES(hadoop 
-        PROPERTIES INSTALL_RPATH "\$ORIGIN/")
+        PROPERTIES INSTALL_RPATH "${RPATH}")
 ENDIF()
 
 target_link_dual_libraries(hadoop

+ 8 - 3
hadoop-common-project/hadoop-common/src/main/bin/hadoop

@@ -17,7 +17,7 @@
 
 function hadoop_usage()
 {
-  echo "Usage: hadoop [--config confdir] COMMAND"
+  echo "Usage: hadoop [--config confdir] [--loglevel loglevel] COMMAND"
   echo "       where COMMAND is one of:"
   echo "  archive -archiveName NAME -p <parent path> <src>* <dest>"
   echo "                       create a Hadoop archive"
@@ -33,6 +33,8 @@ function hadoop_usage()
   echo "                       copy file or directories recursively"
   echo "  fs                   run a generic filesystem user client"
   echo "  jar <jar>            run a jar file"
+  echo "                       note: please use \"yarn jar\" to launch"
+  echo "                             YARN applications, not this command."
   echo "  jnipath              prints the java.library.path"
   echo "  key                  manage keys via the KeyProvider"
   echo "  trace                view and modify Hadoop tracing settings"
@@ -150,6 +152,9 @@ case ${COMMAND} in
     CLASS=org.apache.hadoop.fs.FsShell
   ;;
   jar)
+    if [[ -n "${YARN_OPTS}" ]] || [[ -n "${YARN_CLIENT_OPTS}" ]]; then
+      hadoop_error "WARNING: Use \"yarn jar\" to launch YARN applications."
+    fi
     CLASS=org.apache.hadoop.util.RunJar
   ;;
   jnipath)
@@ -174,12 +179,12 @@ case ${COMMAND} in
   ;;
 esac
 
+hadoop_verify_user "${COMMAND}"
+
 # Always respect HADOOP_OPTS and HADOOP_CLIENT_OPTS
 hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
 HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
 
-hadoop_add_param HADOOP_OPTS Xmx "${JAVA_HEAP_MAX}"
-
 hadoop_finalize
 hadoop_java_exec "${COMMAND}" "${CLASS}" "$@"
 

+ 15 - 1
hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.cmd

@@ -88,6 +88,16 @@ if "%1" == "--hosts" (
   shift
 )
 
+@rem
+@rem Set log level. Default to INFO.
+@rem
+
+if "%1" == "--loglevel" (
+  set HADOOP_LOGLEVEL=%2
+  shift
+  shift
+)
+
 if exist %HADOOP_CONF_DIR%\hadoop-env.cmd (
   call %HADOOP_CONF_DIR%\hadoop-env.cmd
 )
@@ -157,8 +167,12 @@ if not defined HADOOP_LOGFILE (
   set HADOOP_LOGFILE=hadoop.log
 )
 
+if not defined HADOOP_LOGLEVEL (
+  set HADOOP_LOGLEVEL=INFO
+)
+
 if not defined HADOOP_ROOT_LOGGER (
-  set HADOOP_ROOT_LOGGER=INFO,console
+  set HADOOP_ROOT_LOGGER=%HADOOP_LOGLEVEL%,console
 )
 
 @rem

+ 9 - 0
hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh

@@ -148,6 +148,12 @@ while [[ -z "${_hadoop_common_done}" ]]; do
       hadoop_populate_slaves_file "$1"
       shift
     ;;
+    --loglevel)
+      shift
+      # shellcheck disable=SC2034
+      HADOOP_LOGLEVEL="$1"
+      shift
+    ;;
     *)
       _hadoop_common_done=true
     ;;
@@ -162,6 +168,9 @@ hadoop_exec_userfuncs
 # IMPORTANT! User provided code is now available!
 #
 
+hadoop_exec_hadooprc
+hadoop_verify_confdir
+
 # do all the OS-specific startup bits here
 # this allows us to get a decent JAVA_HOME,
 # call crle for LD_LIBRARY_PATH, etc.

+ 14 - 5
hadoop-common-project/hadoop-common/src/main/bin/hadoop-daemons.sh

@@ -36,10 +36,10 @@ fi
 HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}"
 # shellcheck disable=SC2034
 HADOOP_NEW_CONFIG=true
-if [[ -f "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh" ]]; then
-  . "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh"
+if [[ -f "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh" ]]; then
+  . "${HADOOP_LIBEXEC_DIR}/hdfs-config.sh"
 else
-  echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/hadoop-config.sh." 2>&1
+  echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/hdfs-config.sh." 2>&1
   exit 1
 fi
 
@@ -47,5 +47,14 @@ if [[ $# = 0 ]]; then
   hadoop_exit_with_usage 1
 fi
 
-hadoop_connect_to_hosts "${bin}/hadoop-daemon.sh" \
---config "${HADOOP_CONF_DIR}" "$@"
+daemonmode=$1
+shift
+
+if [[ -z "${HADOOP_HDFS_HOME}" ]]; then
+  hdfsscript="${HADOOP_PREFIX}/bin/hdfs"
+else
+  hdfsscript="${HADOOP_HDFS_HOME}/bin/hdfs"
+fi
+
+hadoop_connect_to_hosts "$hdfsscript" \
+    --config "${HADOOP_CONF_DIR}" --daemon "${daemonmode}" "$@"

+ 135 - 23
hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh

@@ -92,6 +92,15 @@ function hadoop_find_confdir
   hadoop_debug "HADOOP_CONF_DIR=${HADOOP_CONF_DIR}"
 }
 
+function hadoop_verify_confdir
+{
+  # Check only log4j.properties by default.
+  # --loglevel does not work without logger settings in log4j.log4j.properties.
+  if [[ ! -f "${HADOOP_CONF_DIR}/log4j.properties" ]]; then
+    hadoop_error "WARNING: log4j.properties is not found. HADOOP_CONF_DIR may be incomplete."
+  fi
+}
+
 function hadoop_exec_hadoopenv
 {
   # NOTE: This function is not user replaceable.
@@ -113,6 +122,17 @@ function hadoop_exec_userfuncs
   fi
 }
 
+function hadoop_exec_hadooprc
+{
+  # Read the user's settings.  This provides for users to override 
+  # and/or append hadoop-env.sh. It is not meant as a complete system override.
+
+  if [[ -f "${HOME}/.hadooprc" ]]; then
+    hadoop_debug "Applying the user's .hadooprc"
+    . "${HOME}/.hadooprc"
+  fi
+}
+
 function hadoop_basic_init
 {
   # Some of these are also set in hadoop-env.sh.
@@ -152,16 +172,16 @@ function hadoop_basic_init
     export HADOOP_MAPRED_HOME="${HADOOP_PREFIX}"
   fi
   
-  HADOOP_IDENT_STRING=${HADOP_IDENT_STRING:-$USER}
+  HADOOP_IDENT_STRING=${HADOOP_IDENT_STRING:-$USER}
   HADOOP_LOG_DIR=${HADOOP_LOG_DIR:-"${HADOOP_PREFIX}/logs"}
   HADOOP_LOGFILE=${HADOOP_LOGFILE:-hadoop.log}
+  HADOOP_LOGLEVEL=${HADOOP_LOGLEVEL:-INFO}
   HADOOP_NICENESS=${HADOOP_NICENESS:-0}
   HADOOP_STOP_TIMEOUT=${HADOOP_STOP_TIMEOUT:-5}
   HADOOP_PID_DIR=${HADOOP_PID_DIR:-/tmp}
-  HADOOP_ROOT_LOGGER=${HADOOP_ROOT_LOGGER:-INFO,console}
-  HADOOP_DAEMON_ROOT_LOGGER=${HADOOP_DAEMON_ROOT_LOGGER:-INFO,RFA}
+  HADOOP_ROOT_LOGGER=${HADOOP_ROOT_LOGGER:-${HADOOP_LOGLEVEL},console}
+  HADOOP_DAEMON_ROOT_LOGGER=${HADOOP_DAEMON_ROOT_LOGGER:-${HADOOP_LOGLEVEL},RFA}
   HADOOP_SECURITY_LOGGER=${HADOOP_SECURITY_LOGGER:-INFO,NullAppender}
-  HADOOP_HEAPSIZE=${HADOOP_HEAPSIZE:-1024}
   HADOOP_SSH_OPTS=${HADOOP_SSH_OPTS:-"-o BatchMode=yes -o StrictHostKeyChecking=no -o ConnectTimeout=10s"}
   HADOOP_SECURE_LOG_DIR=${HADOOP_SECURE_LOG_DIR:-${HADOOP_LOG_DIR}}
   HADOOP_SECURE_PID_DIR=${HADOOP_SECURE_PID_DIR:-${HADOOP_PID_DIR}}
@@ -272,6 +292,12 @@ function hadoop_connect_to_hosts
     # moral of the story: just use pdsh.
     export -f hadoop_actual_ssh
     export HADOOP_SSH_OPTS
+
+    # xargs is used with option -I to replace the placeholder in arguments
+    # list with each hostname read from stdin/pipe. But it consider one 
+    # line as one argument while reading from stdin/pipe. So place each 
+    # hostname in different lines while passing via pipe.
+    SLAVE_NAMES=$(echo "$SLAVE_NAMES" | tr ' ' '\n' )
     echo "${SLAVE_NAMES}" | \
     xargs -n 1 -P"${HADOOP_SSH_PARALLEL}" \
     -I {} bash -c --  "hadoop_actual_ssh {} ${params}"
@@ -285,6 +311,9 @@ function hadoop_validate_classname
   shift 1
 
   if [[ ! ${class} =~ \. ]]; then
+    # assuming the arg is typo of command if it does not conatain ".".
+    # class belonging to no package is not allowed as a result.
+    hadoop_error "ERROR: ${class} is not COMMAND nor fully qualified CLASSNAME."
     return 1
   fi
   return 0
@@ -533,8 +562,10 @@ function hadoop_os_tricks
 {
   local bindv6only
 
-  # some OSes have special needs. here's some out of the box
-  # examples for OS X and Linux. Vendors, replace this with your special sauce.
+  # Some OSes have special needs.  Here's some out of the box examples for OS X,
+  # Linux and Windows on Cygwin.
+  # Vendors, replace this with your special sauce.
+  HADOOP_IS_CYGWIN=false
   case ${HADOOP_OS_TYPE} in
     Darwin)
       if [[ -z "${JAVA_HOME}" ]]; then
@@ -565,6 +596,10 @@ function hadoop_os_tricks
       # down to prevent vmem explosion.
       export MALLOC_ARENA_MAX=${MALLOC_ARENA_MAX:-4}
     ;;
+    CYGWIN*)
+      # Flag that we're running on Cygwin to trigger path translation later.
+      HADOOP_IS_CYGWIN=true
+    ;;
   esac
 }
 
@@ -587,34 +622,71 @@ function hadoop_java_setup
     hadoop_error "ERROR: $JAVA is not executable."
     exit 1
   fi
-  # shellcheck disable=SC2034
-  JAVA_HEAP_MAX=-Xmx1g
-  HADOOP_HEAPSIZE=${HADOOP_HEAPSIZE:-1024}
-  
-  # check envvars which might override default args
-  if [[ -n "$HADOOP_HEAPSIZE" ]]; then
-    # shellcheck disable=SC2034
-    JAVA_HEAP_MAX="-Xmx${HADOOP_HEAPSIZE}m"
-  fi
 }
 
 function hadoop_finalize_libpaths
 {
   if [[ -n "${JAVA_LIBRARY_PATH}" ]]; then
+    hadoop_translate_cygwin_path JAVA_LIBRARY_PATH
     hadoop_add_param HADOOP_OPTS java.library.path \
     "-Djava.library.path=${JAVA_LIBRARY_PATH}"
     export LD_LIBRARY_PATH
   fi
 }
 
+function hadoop_finalize_hadoop_heap
+{
+  if [[ -n "${HADOOP_HEAPSIZE_MAX}" ]]; then
+    if [[ "${HADOOP_HEAPSIZE_MAX}" =~ ^[0-9]+$ ]]; then
+      HADOOP_HEAPSIZE_MAX="${HADOOP_HEAPSIZE_MAX}m"
+    fi
+    hadoop_add_param HADOOP_OPTS Xmx "-Xmx${HADOOP_HEAPSIZE_MAX}"
+  fi
+
+  # backwards compatibility
+  if [[ -n "${HADOOP_HEAPSIZE}" ]]; then
+    if [[ "${HADOOP_HEAPSIZE}" =~ ^[0-9]+$ ]]; then
+      HADOOP_HEAPSIZE="${HADOOP_HEAPSIZE}m"
+    fi
+    hadoop_add_param HADOOP_OPTS Xmx "-Xmx${HADOOP_HEAPSIZE}"
+  fi
+
+  if [[ -n "${HADOOP_HEAPSIZE_MIN}" ]]; then
+    if [[ "${HADOOP_HEAPSIZE_MIN}" =~ ^[0-9]+$ ]]; then
+      HADOOP_HEAPSIZE_MIN="${HADOOP_HEAPSIZE_MIN}m"
+    fi
+    hadoop_add_param HADOOP_OPTS Xms "-Xms${HADOOP_HEAPSIZE_MIN}"
+  fi
+}
+
+# Accepts a variable name.  If running on Cygwin, sets the variable value to the
+# equivalent translated Windows path by running the cygpath utility.  If the
+# second argument is true, then the variable is treated as a path list.
+function hadoop_translate_cygwin_path
+{
+  if [[ "${HADOOP_IS_CYGWIN}" = "true" ]]; then
+    if [[ "$2" = "true" ]]; then
+      #shellcheck disable=SC2016
+      eval "$1"='$(cygpath -p -w "${!1}" 2>/dev/null)'
+    else
+      #shellcheck disable=SC2016
+      eval "$1"='$(cygpath -w "${!1}" 2>/dev/null)'
+    fi
+  fi
+}
+
 #
 # fill in any last minute options that might not have been defined yet
 #
 function hadoop_finalize_hadoop_opts
 {
+  hadoop_translate_cygwin_path HADOOP_LOG_DIR
   hadoop_add_param HADOOP_OPTS hadoop.log.dir "-Dhadoop.log.dir=${HADOOP_LOG_DIR}"
   hadoop_add_param HADOOP_OPTS hadoop.log.file "-Dhadoop.log.file=${HADOOP_LOGFILE}"
-  hadoop_add_param HADOOP_OPTS hadoop.home.dir "-Dhadoop.home.dir=${HADOOP_PREFIX}"
+  HADOOP_HOME=${HADOOP_PREFIX}
+  hadoop_translate_cygwin_path HADOOP_HOME
+  export HADOOP_HOME
+  hadoop_add_param HADOOP_OPTS hadoop.home.dir "-Dhadoop.home.dir=${HADOOP_HOME}"
   hadoop_add_param HADOOP_OPTS hadoop.id.str "-Dhadoop.id.str=${HADOOP_IDENT_STRING}"
   hadoop_add_param HADOOP_OPTS hadoop.root.logger "-Dhadoop.root.logger=${HADOOP_ROOT_LOGGER}"
   hadoop_add_param HADOOP_OPTS hadoop.policy.file "-Dhadoop.policy.file=${HADOOP_POLICYFILE}"
@@ -628,6 +700,26 @@ function hadoop_finalize_classpath
   # user classpath gets added at the last minute. this allows
   # override of CONF dirs and more
   hadoop_add_to_classpath_userpath
+  hadoop_translate_cygwin_path CLASSPATH true
+}
+
+function hadoop_finalize_catalina_opts
+{
+
+  local prefix=${HADOOP_CATALINA_PREFIX}
+
+  hadoop_add_param CATALINA_OPTS hadoop.home.dir "-Dhadoop.home.dir=${HADOOP_PREFIX}"
+  if [[ -n "${JAVA_LIBRARY_PATH}" ]]; then
+    hadoop_add_param CATALINA_OPTS java.library.path "-Djava.library.path=${JAVA_LIBRARY_PATH}"
+  fi
+  hadoop_add_param CATALINA_OPTS "${prefix}.home.dir" "-D${prefix}.home.dir=${HADOOP_PREFIX}"
+  hadoop_add_param CATALINA_OPTS "${prefix}.config.dir" "-D${prefix}.config.dir=${HADOOP_CATALINA_CONFIG}"
+  hadoop_add_param CATALINA_OPTS "${prefix}.log.dir" "-D${prefix}.log.dir=${HADOOP_CATALINA_LOG}"
+  hadoop_add_param CATALINA_OPTS "${prefix}.temp.dir" "-D${prefix}.temp.dir=${HADOOP_CATALINA_TEMP}"
+  hadoop_add_param CATALINA_OPTS "${prefix}.admin.port" "-D${prefix}.admin.port=${HADOOP_CATALINA_ADMIN_PORT}"
+  hadoop_add_param CATALINA_OPTS "${prefix}.http.port" "-D${prefix}.http.port=${HADOOP_CATALINA_HTTP_PORT}"
+  hadoop_add_param CATALINA_OPTS "${prefix}.max.threads" "-D${prefix}.max.threads=${HADOOP_CATALINA_MAX_THREADS}"
+  hadoop_add_param CATALINA_OPTS "${prefix}.ssl.keystore.file" "-D${prefix}.ssl.keystore.file=${HADOOP_CATALINA_SSL_KEYSTORE_FILE}"
 }
 
 function hadoop_finalize
@@ -636,7 +728,15 @@ function hadoop_finalize
   # override of CONF dirs and more
   hadoop_finalize_classpath
   hadoop_finalize_libpaths
+  hadoop_finalize_hadoop_heap
   hadoop_finalize_hadoop_opts
+
+  hadoop_translate_cygwin_path HADOOP_PREFIX
+  hadoop_translate_cygwin_path HADOOP_CONF_DIR
+  hadoop_translate_cygwin_path HADOOP_COMMON_HOME
+  hadoop_translate_cygwin_path HADOOP_HDFS_HOME
+  hadoop_translate_cygwin_path HADOOP_YARN_HOME
+  hadoop_translate_cygwin_path HADOOP_MAPRED_HOME
 }
 
 function hadoop_exit_with_usage
@@ -837,13 +937,13 @@ function hadoop_start_daemon_wrapper
   # shellcheck disable=SC2086
   renice "${HADOOP_NICENESS}" $! >/dev/null 2>&1
   if [[ $? -gt 0 ]]; then
-    hadoop_error "ERROR: Cannot set priority of ${daemoname} process $!"
+    hadoop_error "ERROR: Cannot set priority of ${daemonname} process $!"
   fi
   
   # shellcheck disable=SC2086
   disown %+ >/dev/null 2>&1
   if [[ $? -gt 0 ]]; then
-    hadoop_error "ERROR: Cannot disconnect ${daemoname} process $!"
+    hadoop_error "ERROR: Cannot disconnect ${daemonname} process $!"
   fi
   sleep 1
   
@@ -898,7 +998,7 @@ function hadoop_start_secure_daemon
   #shellcheck disable=SC2086
   echo $$ > "${privpidfile}" 2>/dev/null
   if [[ $? -gt 0 ]]; then
-    hadoop_error "ERROR:  Cannot write ${daemoname} pid ${privpidfile}."
+    hadoop_error "ERROR:  Cannot write ${daemonname} pid ${privpidfile}."
   fi
   
   exec "${jsvc}" \
@@ -954,7 +1054,7 @@ function hadoop_start_secure_daemon_wrapper
   # so let's wait for the fork to finish 
   # before overriding with the daemonized pid
   (( counter=0 ))
-  while [[ ! -f ${pidfile} && ${counter} -le 5 ]]; do
+  while [[ ! -f ${daemonpidfile} && ${counter} -le 5 ]]; do
     sleep 1
     (( counter++ ))
   done
@@ -963,7 +1063,7 @@ function hadoop_start_secure_daemon_wrapper
   #shellcheck disable=SC2086
   echo $! > "${jsvcpidfile}" 2>/dev/null
   if [[ $? -gt 0 ]]; then
-    hadoop_error "ERROR:  Cannot write ${daemonname} pid ${pidfile}."
+    hadoop_error "ERROR:  Cannot write ${daemonname} pid ${daemonpidfile}."
   fi
   
   sleep 1
@@ -1037,8 +1137,8 @@ function hadoop_daemon_handler
   local daemonmode=$1
   local daemonname=$2
   local class=$3
-  local pidfile=$4
-  local outfile=$5
+  local daemon_pidfile=$4
+  local daemon_outfile=$5
   shift 5
   
   case ${daemonmode} in
@@ -1128,3 +1228,15 @@ function hadoop_secure_daemon_handler
   esac
 }
 
+function hadoop_verify_user
+{
+  local command=$1
+  local uservar="HADOOP_${command}_USER"
+
+  if [[ -n ${!uservar} ]]; then
+    if [[ ${!uservar} !=  ${USER} ]]; then
+      hadoop_error "ERROR: ${command} can only be executed by ${!uservar}."
+      exit 1
+    fi
+  fi
+}

+ 27 - 2
hadoop-common-project/hadoop-common/src/main/bin/hadoop.cmd

@@ -88,6 +88,10 @@ call :updatepath %HADOOP_BIN_PATH%
     shift
     shift
   )
+  if "%1" == "--loglevel" (
+    shift
+    shift
+  )
 
   set hadoop-command=%1
   if not defined hadoop-command (
@@ -142,7 +146,7 @@ call :updatepath %HADOOP_BIN_PATH%
     )
   )
   
-  set corecommands=fs version jar checknative distcp daemonlog archive classpath
+  set corecommands=fs version jar checknative distcp daemonlog archive classpath credential key
   for %%i in ( %corecommands% ) do (
     if %hadoop-command% == %%i set corecommand=true  
   )
@@ -177,6 +181,11 @@ call :updatepath %HADOOP_BIN_PATH%
   goto :eof
 
 :jar
+  if defined YARN_OPTS (
+    @echo WARNING: Use "yarn jar" to launch YARN applications.
+  ) else if defined YARN_CLIENT_OPTS (
+    @echo WARNING: Use "yarn jar" to launch YARN applications.
+  )
   set CLASS=org.apache.hadoop.util.RunJar
   goto :eof
 
@@ -202,6 +211,14 @@ call :updatepath %HADOOP_BIN_PATH%
   set CLASS=org.apache.hadoop.util.Classpath
   goto :eof
 
+:credential
+  set CLASS=org.apache.hadoop.security.alias.CredentialShell
+  goto :eof
+
+:key
+  set CLASS=org.apache.hadoop.crypto.key.KeyShell
+  goto :eof
+
 :updatepath
   set path_to_add=%*
   set current_path_comparable=%path%
@@ -230,6 +247,10 @@ call :updatepath %HADOOP_BIN_PATH%
     shift
     shift
   )
+  if "%1" == "--loglevel" (
+    shift
+    shift
+  )
   if [%2] == [] goto :eof
   shift
   set _arguments=
@@ -248,16 +269,20 @@ call :updatepath %HADOOP_BIN_PATH%
   goto :eof
 
 :print_usage
-  @echo Usage: hadoop [--config confdir] COMMAND
+  @echo Usage: hadoop [--config confdir] [--loglevel loglevel] COMMAND
   @echo where COMMAND is one of:
   @echo   fs                   run a generic filesystem user client
   @echo   version              print the version
   @echo   jar ^<jar^>            run a jar file
+  @echo                        note: please use "yarn jar" to launch
+  @echo                              YARN applications, not this command.
   @echo   checknative [-a^|-h]  check native hadoop and compression libraries availability
   @echo   distcp ^<srcurl^> ^<desturl^> copy file or directories recursively
   @echo   archive -archiveName NAME -p ^<parent path^> ^<src^>* ^<dest^> create a hadoop archive
   @echo   classpath            prints the class path needed to get the
   @echo                        Hadoop jar and the required libraries
+  @echo   credential           interact with credential providers
+  @echo   key                  manage keys via the KeyProvider
   @echo   daemonlog            get/set the log level for each daemon
   @echo  or
   @echo   CLASSNAME            run the class named CLASSNAME

+ 0 - 2
hadoop-common-project/hadoop-common/src/main/bin/rcc

@@ -37,7 +37,5 @@ CLASS='org.apache.hadoop.record.compiler.generated.Rcc'
 hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
 HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
 
-hadoop_add_param HADOOP_OPTS Xmx "${JAVA_HEAP_MAX}"
-
 hadoop_finalize
 hadoop_java_exec rcc "${CLASS}" "$@"

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/bin/slaves.sh

@@ -36,7 +36,7 @@ if [[ -n "${HADOOP_PREFIX}" ]]; then
   DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
 else
   this="${BASH_SOURCE-$0}"
-  bin=$(cd -P -- "$(dirname -- "${this}")" >dev/null && pwd -P)
+  bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
   DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
 fi
 

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/bin/stop-all.sh

@@ -26,7 +26,7 @@ if [[ -n "${HADOOP_PREFIX}" ]]; then
   DEFAULT_LIBEXEC_DIR="${HADOOP_PREFIX}/libexec"
 else
   this="${BASH_SOURCE-$0}"
-  bin=$(cd -P -- "$(dirname -- "${this}")" >dev/null && pwd -P)
+  bin=$(cd -P -- "$(dirname -- "${this}")" >/dev/null && pwd -P)
   DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
 fi
 

+ 102 - 44
hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh

@@ -37,31 +37,49 @@
 #  JAVA_HOME=/usr/java/testing hdfs dfs -ls
 #
 # Therefore, the vast majority (BUT NOT ALL!) of these defaults
-# are configured for substitution and not append.  If you would
-# like append, you'll # need to modify this file accordingly.
+# are configured for substitution and not append.  If append
+# is preferable, modify this file accordingly.
 
 ###
 # Generic settings for HADOOP
 ###
 
 # Technically, the only required environment variable is JAVA_HOME.
-# All others are optional.  However, our defaults are probably not
-# your defaults.  Many sites configure these options outside of Hadoop,
+# All others are optional.  However, the defaults are probably not
+# preferred.  Many sites configure these options outside of Hadoop,
 # such as in /etc/profile.d
 
-# The java implementation to use.
-export JAVA_HOME=${JAVA_HOME:-"hadoop-env.sh is not configured"}
+# The java implementation to use. By default, this environment 
+# variable is REQUIRED on ALL platforms except OS X!
+# export JAVA_HOME=
+
+# Location of Hadoop.  By default, Hadoop will attempt to determine
+# this location based upon its execution path.
+# export HADOOP_PREFIX=
 
 # Location of Hadoop's configuration information.  i.e., where this
-# file is probably living.  You will almost certainly want to set
-# this in /etc/profile.d or equivalent.
+# file is probably living. Many sites will also set this in the
+# same location where JAVA_HOME is defined.  If this is not defined
+# Hadoop will attempt to locate it based upon its execution
+# path.
 # export HADOOP_CONF_DIR=$HADOOP_PREFIX/etc/hadoop
 
-# The maximum amount of heap to use, in MB. Default is 1024.
-# export HADOOP_HEAPSIZE=1024
+# The maximum amount of heap to use (Java -Xmx).  If no unit 
+# is provided, it will be converted to MB.  Daemons will 
+# prefer any Xmx setting in their respective _OPT variable.
+# There is no default; the JVM will autoscale based upon machine
+# memory size.
+# export HADOOP_HEAPSIZE_MAX=
+
+# The minimum amount of heap to use (Java -Xms).  If no unit 
+# is provided, it will be converted to MB.  Daemons will 
+# prefer any Xms setting in their respective _OPT variable.
+# There is no default; the JVM will autoscale based upon machine
+# memory size.
+# export HADOOP_HEAPSIZE_MIN=
 
 # Extra Java runtime options for all Hadoop commands. We don't support
-# IPv6 yet/still, so by default we set preference to IPv4.
+# IPv6 yet/still, so by default the preference is set to IPv4.
 # export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true"
 
 # Some parts of the shell code may do special things dependent upon
@@ -72,8 +90,8 @@ export HADOOP_OS_TYPE=${HADOOP_OS_TYPE:-$(uname -s)}
 
 # Under certain conditions, Java on OS X will throw SCDynamicStore errors
 # in the system logs.
-# See HADOOP-8719 for more information.  If you need Kerberos
-# support on OS X, you'll want to change/remove this extra bit.
+# See HADOOP-8719 for more information.  If one needs Kerberos
+# support on OS X, one will want to change/remove this extra bit.
 case ${HADOOP_OS_TYPE} in
   Darwin*)
     export HADOOP_OPTS="${HADOOP_OPTS} -Djava.security.krb5.realm= "
@@ -82,11 +100,11 @@ case ${HADOOP_OS_TYPE} in
   ;;
 esac
 
-# Extra Java runtime options for Hadoop clients (i.e., hdfs dfs -blah)
-# These get added to HADOOP_OPTS for such commands.  In most cases,
-# this should be left empty and let users supply it on the
-# command line.
-# extra HADOOP_CLIENT_OPTS=""
+# Extra Java runtime options for some Hadoop commands
+# and clients (i.e., hdfs dfs -blah).  These get appended to HADOOP_OPTS for 
+# such commands.  In most cases, # this should be left empty and 
+# let users supply it on the command line.
+# export HADOOP_CLIENT_OPTS=""
 
 #
 # A note about classpaths.
@@ -149,20 +167,22 @@ esac
 #
 
 #
-# You can define variables right here and then re-use them later on.
-# For example, it is common to use the same garbage collection settings
-# for all the daemons.  So we could define:
-#
-# export HADOOP_GC_SETTINGS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps"
+# Many options may also be specified as Java properties.  It is
+# very common, and in many cases, desirable, to hard-set these
+# in daemon _OPTS variables.  Where applicable, the appropriate
+# Java property is also identified.  Note that many are re-used
+# or set differently in certain contexts (e.g., secure vs
+# non-secure)
 #
-# .. and then use it as per the b option under the namenode.
 
-# Where (primarily) daemon log files are stored.
-# $HADOOP_PREFIX/logs by default.
+# Where (primarily) daemon log files are stored.  # $HADOOP_PREFIX/logs 
+# by default.
+# Java property: hadoop.log.dir
 # export HADOOP_LOG_DIR=${HADOOP_PREFIX}/logs
 
 # A string representing this instance of hadoop. $USER by default.
 # This is used in writing log and pid files, so keep that in mind!
+# Java property: hadoop.id.str
 # export HADOOP_IDENT_STRING=$USER
 
 # How many seconds to pause after stopping a daemon
@@ -171,22 +191,26 @@ esac
 # Where pid files are stored.  /tmp by default.
 # export HADOOP_PID_DIR=/tmp
 
-# Default log level and output location
-# This sets the hadoop.root.logger property
+# Default log4j setting for interactive commands
+# Java property: hadoop.root.logger
 # export HADOOP_ROOT_LOGGER=INFO,console
 
-# Default log level for daemons spawned explicitly by hadoop-daemon.sh
-# This sets the hadoop.root.logger property
+# Default log4j setting for daemons spawned explicitly by 
+# --daemon option of hadoop, hdfs, mapred and yarn command.
+# Java property: hadoop.root.logger
 # export HADOOP_DAEMON_ROOT_LOGGER=INFO,RFA
 
 # Default log level and output location for security-related messages.
-# It sets -Dhadoop.security.logger on the command line.
-# You will almost certainly want to change this on a per-daemon basis!
+# You will almost certainly want to change this on a per-daemon basis via
+# the Java property (i.e., -Dhadoop.security.logger=foo). (Note that the
+# defaults for the NN and 2NN override this by default.)
+# Java property: hadoop.security.logger
 # export HADOOP_SECURITY_LOGGER=INFO,NullAppender
 
 # Default log level for file system audit messages.
-# It sets -Dhdfs.audit.logger on the command line.
-# You will almost certainly want to change this on a per-daemon basis!
+# Generally, this is specifically set in the namenode-specific
+# options line.
+# Java property: hdfs.audit.logger
 # export HADOOP_AUDIT_LOGGER=INFO,NullAppender
 
 # Default process priority level
@@ -194,8 +218,19 @@ esac
 # export HADOOP_NICENESS=0
 
 # Default name for the service level authorization file
+# Java property: hadoop.policy.file
 # export HADOOP_POLICYFILE="hadoop-policy.xml"
 
+#
+# NOTE: this is not used by default!  <-----
+# You can define variables right here and then re-use them later on.
+# For example, it is common to use the same garbage collection settings
+# for all the daemons.  So one could define:
+#
+# export HADOOP_GC_SETTINGS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps"
+#
+# .. and then use it as per the b option under the namenode.
+
 ###
 # Secure/privileged execution
 ###
@@ -206,7 +241,10 @@ esac
 # custom functions.  See hadoop-functions.sh for more information.
 #
 
-# The jsvc implementation to use. Jsvc is required to run secure datanodes.
+# The jsvc implementation to use. Jsvc is required to run secure datanodes
+# that bind to privileged ports to provide authentication of data transfer
+# protocol.  Jsvc is not required if SASL is configured for authentication of
+# data transfer protocol using non-privileged ports.
 # export JSVC_HOME=/usr/bin
 
 #
@@ -215,18 +253,26 @@ esac
 
 #
 # This directory contains the logs for secure and privileged processes.
+# Java property: hadoop.log.dir
 # export HADOOP_SECURE_LOG=${HADOOP_LOG_DIR}
 
 #
 # When running a secure daemon, the default value of HADOOP_IDENT_STRING
 # ends up being a bit bogus.  Therefore, by default, the code will
-# replace HADOOP_IDENT_STRING with HADOOP_SECURE_xx_USER.  If you want
+# replace HADOOP_IDENT_STRING with HADOOP_SECURE_xx_USER.  If one wants
 # to keep HADOOP_IDENT_STRING untouched, then uncomment this line.
 # export HADOOP_SECURE_IDENT_PRESERVE="true"
 
 ###
 # NameNode specific parameters
 ###
+
+# Default log level and output location for file system related change
+# messages. For non-namenode daemons, the Java property must be set in
+# the appropriate _OPTS if one wants something other than INFO,NullAppender
+# Java property: hdfs.audit.logger
+# export HDFS_AUDIT_LOGGER=INFO,NullAppender
+
 # Specify the JVM options to be used when starting the NameNode.
 # These options will be appended to the options specified as HADOOP_OPTS
 # and therefore may override any similar flags set in HADOOP_OPTS
@@ -241,7 +287,7 @@ esac
 # export HADOOP_NAMENODE_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xloggc:${HADOOP_LOG_DIR}/gc-rm.log-$(date +'%Y%m%d%H%M')"
 
 # this is the default:
-# export HADOOP_NAMENODE_OPTS="-Dhadoop.security.logger=INFO,RFAS -Dhdfs.audit.logger=INFO,NullAppender"
+# export HADOOP_NAMENODE_OPTS="-Dhadoop.security.logger=INFO,RFAS"
 
 ###
 # SecondaryNameNode specific parameters
@@ -251,7 +297,7 @@ esac
 # and therefore may override any similar flags set in HADOOP_OPTS
 #
 # This is the default:
-# export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=INFO,RFAS -Dhdfs.audit.logger=INFO,NullAppender"
+# export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=INFO,RFAS"
 
 ###
 # DataNode specific parameters
@@ -263,16 +309,21 @@ esac
 # This is the default:
 # export HADOOP_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,RFAS"
 
-# On secure datanodes, user to run the datanode as after dropping privileges
-# This **MUST** be uncommented to enable secure HDFS!
+# On secure datanodes, user to run the datanode as after dropping privileges.
+# This **MUST** be uncommented to enable secure HDFS if using privileged ports
+# to provide authentication of data transfer protocol.  This **MUST NOT** be
+# defined if SASL is configured for authentication of data transfer protocol
+# using non-privileged ports.
+# This will replace the hadoop.id.str Java property in secure mode.
 # export HADOOP_SECURE_DN_USER=hdfs
 
 # Supplemental options for secure datanodes
-# By default, we use jsvc which needs to know to launch a
+# By default, Hadoop uses jsvc which needs to know to launch a
 # server jvm.
 # export HADOOP_DN_SECURE_EXTRA_OPTS="-jvm server"
 
 # Where datanode log files are stored in the secure data environment.
+# This will replace the hadoop.log.dir Java property in secure mode.
 # export HADOOP_SECURE_DN_LOG_DIR=${HADOOP_SECURE_LOG_DIR}
 
 # Where datanode pid files are stored in the secure data environment.
@@ -294,11 +345,12 @@ esac
 # export HADOOP_PORTMAP_OPTS="-Xmx512m"
 
 # Supplemental options for priviliged gateways
-# By default, we use jsvc which needs to know to launch a
+# By default, Hadoop uses jsvc which needs to know to launch a
 # server jvm.
 # export HADOOP_NFS3_SECURE_EXTRA_OPTS="-jvm server"
 
 # On privileged gateways, user to run the gateway as after dropping privileges
+# This will replace the hadoop.id.str Java property in secure mode.
 # export HADOOP_PRIVILEGED_NFS_USER=nfsserver
 
 ###
@@ -342,7 +394,13 @@ esac
 ###
 
 #
-# When building Hadoop, you can add the class paths to your commands
+# When building Hadoop, one can add the class paths to the commands
 # via this special env var:
-# HADOOP_ENABLE_BUILD_PATHS="true"
+# export HADOOP_ENABLE_BUILD_PATHS="true"
 
+#
+# To prevent accidents, shell commands be (superficially) locked
+# to only allow certain users to execute certain subcommands.
+#
+# For example, to limit who can execute the namenode command,
+# export HADOOP_namenode_USER=hdfs

+ 0 - 7
hadoop-common-project/hadoop-common/src/main/conf/hadoop-user-functions.sh.example

@@ -84,11 +84,4 @@
 #    echo "ERROR: ${JAVA} is not executable." 1>&2
 #    exit 1
 #  fi
-#  JAVA_HEAP_MAX=-Xmx1g
-#  HADOOP_HEAPSIZE=${HADOOP_HEAPSIZE:-128}
-#
-#  # check envvars which might override default args
-#  if [[ -n "$HADOOP_HEAPSIZE" ]]; then
-#    JAVA_HEAP_MAX="-Xmx${HADOOP_HEAPSIZE}m"
-#  fi
 #}

+ 3569 - 0
hadoop-common-project/hadoop-common/src/main/docs/releasenotes.html

@@ -1,4 +1,3573 @@
 <META http-equiv="Content-Type" content="text/html; charset=UTF-8">
+<title>Hadoop  2.6.0 Release Notes</title>
+<STYLE type="text/css">
+	H1 {font-family: sans-serif}
+	H2 {font-family: sans-serif; margin-left: 7mm}
+	TABLE {margin-left: 7mm}
+</STYLE>
+</head>
+<body>
+<h1>Hadoop  2.6.0 Release Notes</h1>
+These release notes include new developer and user-facing incompatibilities, features, and major improvements. 
+<a name="changes"/>
+<h2>Changes since Hadoop 2.5.1</h2>
+<ul>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2853">YARN-2853</a>.
+     Major bug reported by Jian He and fixed by Jian He <br>
+     <b>Killing app may hang while AM is unregistering</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2846">YARN-2846</a>.
+     Blocker bug reported by Junping Du and fixed by Junping Du (nodemanager)<br>
+     <b>Incorrect persist exit code for running containers in reacquireContainer() that interrupted by NodeManager restart.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2843">YARN-2843</a>.
+     Major sub-task reported by Sushmitha Sreenivasan and fixed by Wangda Tan (resourcemanager)<br>
+     <b>NodeLabels manager should trim all inputs for hosts and labels</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2841">YARN-2841</a>.
+     Critical sub-task reported by Jian He and fixed by Jian He (resourcemanager)<br>
+     <b>RMProxy should retry EOFException </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2834">YARN-2834</a>.
+     Blocker bug reported by Yesha Vora and fixed by Jian He <br>
+     <b>Resource manager crashed with Null Pointer Exception</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2830">YARN-2830</a>.
+     Blocker bug reported by Jonathan Eagles and fixed by Jonathan Eagles <br>
+     <b>Add backwords compatible ContainerId.newInstance constructor for use within Tez Local Mode</b><br>
+     <blockquote>I just committed this. Thanks [~jeagles] for the patch and [~ozawa] for the reviews!</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2827">YARN-2827</a>.
+     Critical bug reported by Wangda Tan and fixed by Wangda Tan (client)<br>
+     <b>Fix bugs of yarn queue CLI</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2826">YARN-2826</a>.
+     Critical bug reported by Sidharta Seethana and fixed by Wangda Tan <br>
+     <b>User-Group mappings not updated by RM when a user is removed from a group.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2825">YARN-2825</a>.
+     Critical bug reported by Jian He and fixed by Jian He <br>
+     <b>Container leak on NM</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2824">YARN-2824</a>.
+     Critical sub-task reported by Wangda Tan and fixed by Wangda Tan (resourcemanager)<br>
+     <b>Capacity of labels should be zero by default</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2823">YARN-2823</a>.
+     Critical bug reported by Gour Saha and fixed by Jian He (resourcemanager)<br>
+     <b>NullPointerException in RM HA enabled 3-node cluster</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2819">YARN-2819</a>.
+     Critical bug reported by Gopal V and fixed by Zhijie Shen (timelineserver)<br>
+     <b>NPE in ATS Timeline Domains when upgrading from 2.4 to 2.6</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2818">YARN-2818</a>.
+     Critical bug reported by Zhijie Shen and fixed by Zhijie Shen (timelineserver)<br>
+     <b>Remove the logic to inject entity owner as the primary filter</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2813">YARN-2813</a>.
+     Major bug reported by Zhijie Shen and fixed by Zhijie Shen (timelineserver)<br>
+     <b>NPE from MemoryTimelineStore.getDomains</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2812">YARN-2812</a>.
+     Major test reported by Zhijie Shen and fixed by Zhijie Shen (timelineserver)<br>
+     <b>TestApplicationHistoryServer is likely to fail on less powerful machine</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2810">YARN-2810</a>.
+     Major test reported by Varun Vasudev and fixed by Varun Vasudev (resourcemanager)<br>
+     <b>TestRMProxyUsersConf fails on Windows VMs</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2805">YARN-2805</a>.
+     Blocker bug reported by Arpit Gupta and fixed by Wangda Tan (resourcemanager)<br>
+     <b>RM2 in HA setup tries to login using the RM1's kerberos principal</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2804">YARN-2804</a>.
+     Critical bug reported by Zhijie Shen and fixed by Zhijie Shen <br>
+     <b>Timeline server .out log have JAXB binding exceptions and warnings.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2803">YARN-2803</a>.
+     Critical bug reported by Chris Nauroth and fixed by Craig Welch (nodemanager)<br>
+     <b>MR distributed cache not working correctly on Windows after NodeManager privileged account changes.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2798">YARN-2798</a>.
+     Blocker bug reported by Arpit Gupta and fixed by Zhijie Shen (timelineserver)<br>
+     <b>YarnClient doesn't need to translate Kerberos name of timeline DT renewer</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2795">YARN-2795</a>.
+     Major sub-task reported by Phil D'Amore and fixed by Wangda Tan (resourcemanager)<br>
+     <b>Resource Manager fails startup with HDFS label storage and secure cluster</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2794">YARN-2794</a>.
+     Major bug reported by Jian He and fixed by Jian He <br>
+     <b>Fix log msgs about distributing system-credentials </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2790">YARN-2790</a>.
+     Critical bug reported by Tassapol Athiapinya and fixed by Jian He (nodemanager)<br>
+     <b>NM can't aggregate logs past HDFS delegation token expiry.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2789">YARN-2789</a>.
+     Critical task reported by Siddharth Seth and fixed by Wangda Tan <br>
+     <b>Re-instate the NodeReport.newInstance API modified in YARN-2698</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2788">YARN-2788</a>.
+     Blocker bug reported by Gopal V and fixed by Xuan Gong (log-aggregation)<br>
+     <b>yarn logs -applicationId on 2.6.0 should support logs written by 2.4.0</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2785">YARN-2785</a>.
+     Major test reported by Varun Vasudev and fixed by Varun Vasudev <br>
+     <b>TestContainerResourceUsage fails intermittently</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2779">YARN-2779</a>.
+     Critical bug reported by Zhijie Shen and fixed by Zhijie Shen (resourcemanager , timelineserver)<br>
+     <b>SystemMetricsPublisher can use Kerberos directly instead of timeline DT</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2778">YARN-2778</a>.
+     Major sub-task reported by Wangda Tan and fixed by Wangda Tan (client)<br>
+     <b>YARN node CLI should display labels on returned node reports</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2770">YARN-2770</a>.
+     Critical sub-task reported by Zhijie Shen and fixed by Zhijie Shen (timelineserver)<br>
+     <b>Timeline delegation tokens need to be automatically renewed by the RM</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2769">YARN-2769</a>.
+     Major bug reported by Varun Vasudev and fixed by Varun Vasudev (applications/distributed-shell)<br>
+     <b>Timeline server domain not set correctly when using shell_command on Windows</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2767">YARN-2767</a>.
+     Major test reported by Varun Vasudev and fixed by Varun Vasudev (resourcemanager)<br>
+     <b>RM web services - add test case to ensure the http static user cannot kill or submit apps in secure mode</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2760">YARN-2760</a>.
+     Trivial bug reported by Harsh J and fixed by Harsh J (documentation)<br>
+     <b>Completely remove word 'experimental' from FairScheduler docs</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2758">YARN-2758</a>.
+     Major test reported by Zhijie Shen and fixed by Zhijie Shen (timelineserver)<br>
+     <b>Update TestApplicationHistoryClientService to use the new generic history store</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2755">YARN-2755</a>.
+     Critical bug reported by Siqi Li and fixed by Siqi Li <br>
+     <b>NM fails to clean up usercache_DEL_&lt;timestamp&gt; dirs after YARN-661</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2753">YARN-2753</a>.
+     Major sub-task reported by zhihai xu and fixed by zhihai xu <br>
+     <b>Fix potential issues and code clean up for *NodeLabelsManager</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2752">YARN-2752</a>.
+     Critical bug reported by Xuan Gong and fixed by Xuan Gong <br>
+     <b>ContainerExecutor always append "nice -n" in command on branch-2</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2747">YARN-2747</a>.
+     Major test reported by Xuan Gong and fixed by Xuan Gong <br>
+     <b>TestAggregatedLogFormat fails in trunk</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2744">YARN-2744</a>.
+     Critical sub-task reported by Sumit Mohanty and fixed by Wangda Tan (capacityscheduler)<br>
+     <b>Under some scenario, it is possible to end up with capacity scheduler configuration that uses labels that no longer exist</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2743">YARN-2743</a>.
+     Blocker bug reported by Arpit Gupta and fixed by Jian He (resourcemanager)<br>
+     <b>Yarn jobs via oozie fail with failed to renew token (secure) or digest mismatch (unsecure) errors when RM is being killed</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2741">YARN-2741</a>.
+     Major bug reported by Craig Welch and fixed by Craig Welch (nodemanager)<br>
+     <b>Windows: Node manager cannot serve up log files via the web user interface when yarn.nodemanager.log-dirs to any drive letter other than C: (or, the drive that nodemanager is running on)</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2734">YARN-2734</a>.
+     Major bug reported by Sumit Mohanty and fixed by Xuan Gong (log-aggregation)<br>
+     <b>If a sub-folder is encountered by log aggregator it results in invalid aggregated file</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2732">YARN-2732</a>.
+     Major bug reported by Jian He and fixed by Jian He <br>
+     <b>Fix syntax error in SecureContainer.apt.vm</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2730">YARN-2730</a>.
+     Critical bug reported by Siqi Li and fixed by Siqi Li <br>
+     <b>DefaultContainerExecutor runs only one localizer at a time</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2726">YARN-2726</a>.
+     Minor sub-task reported by Phil D'Amore and fixed by Wangda Tan (capacityscheduler)<br>
+     <b>CapacityScheduler should explicitly log when an accessible label has no capacity</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2724">YARN-2724</a>.
+     Major bug reported by Sumit Mohanty and fixed by Xuan Gong (log-aggregation)<br>
+     <b>If an unreadable file is encountered during log aggregation then aggregated file in HDFS badly formed</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2723">YARN-2723</a>.
+     Major sub-task reported by Phil D'Amore and fixed by Naganarasimha G R (client)<br>
+     <b>rmadmin -replaceLabelsOnNode does not correctly parse port</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2721">YARN-2721</a>.
+     Blocker bug reported by Jian He and fixed by Jian He <br>
+     <b>Race condition: ZKRMStateStore retry logic may throw NodeExist exception </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2720">YARN-2720</a>.
+     Major bug reported by Craig Welch and fixed by Craig Welch (nodemanager)<br>
+     <b>Windows: Wildcard classpath variables not expanded against resources contained in archives</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2717">YARN-2717</a>.
+     Major sub-task reported by Xuan Gong and fixed by Xuan Gong (log-aggregation)<br>
+     <b>containerLogNotFound log shows multiple time for the same container</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2715">YARN-2715</a>.
+     Blocker bug reported by Zhijie Shen and fixed by Zhijie Shen (resourcemanager)<br>
+     <b>Proxy user is problem for RPC interface if yarn.resourcemanager.webapp.proxyuser is not set.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2711">YARN-2711</a>.
+     Major test reported by Varun Vasudev and fixed by Varun Vasudev <br>
+     <b>TestDefaultContainerExecutor#testContainerLaunchError fails on Windows</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2709">YARN-2709</a>.
+     Major sub-task reported by Li Lu and fixed by Li Lu <br>
+     <b>Add retry for timeline client getDelegationToken method</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2707">YARN-2707</a>.
+     Minor bug reported by Ted Yu and fixed by Gera Shegalov <br>
+     <b>Potential null dereference in FSDownload</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2705">YARN-2705</a>.
+     Critical sub-task reported by Wangda Tan and fixed by Wangda Tan (api , client , resourcemanager)<br>
+     <b>Changes of RM node label manager default configuration</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2704">YARN-2704</a>.
+     Critical sub-task reported by Jian He and fixed by Jian He <br>
+     <b> Localization and log-aggregation will fail if hdfs delegation token expired after token-max-life-time</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2703">YARN-2703</a>.
+     Major sub-task reported by Xuan Gong and fixed by Xuan Gong (nodemanager , resourcemanager)<br>
+     <b>Add logUploadedTime into LogValue for better display</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2701">YARN-2701</a>.
+     Blocker bug reported by Xuan Gong and fixed by Xuan Gong <br>
+     <b>Potential race condition in startLocalizer when using LinuxContainerExecutor  </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2700">YARN-2700</a>.
+     Major sub-task reported by Steve Loughran and fixed by Steve Loughran (api , resourcemanager)<br>
+     <b>TestSecureRMRegistryOperations failing on windows: auth problems</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2699">YARN-2699</a>.
+     Blocker sub-task reported by Wangda Tan and fixed by Wangda Tan (client)<br>
+     <b>Fix test timeout in TestResourceTrackerOnHA#testResourceTrackerOnHA</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2698">YARN-2698</a>.
+     Critical sub-task reported by Wangda Tan and fixed by Wangda Tan (api , client , resourcemanager)<br>
+     <b>Move getClusterNodeLabels and getNodeToLabels to YarnClient instead of AdminService</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2692">YARN-2692</a>.
+     Major sub-task reported by Steve Loughran and fixed by Steve Loughran <br>
+     <b>ktutil test hanging on some machines/ktutil versions</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2689">YARN-2689</a>.
+     Major sub-task reported by Steve Loughran and fixed by Steve Loughran (api , resourcemanager)<br>
+     <b>TestSecureRMRegistryOperations failing on windows: secure ZK won't start</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2685">YARN-2685</a>.
+     Major sub-task reported by Wangda Tan and fixed by Wangda Tan (api , client , resourcemanager)<br>
+     <b>Resource on each label not correct when multiple NMs in a same host and some has label some not</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2682">YARN-2682</a>.
+     Minor bug reported by zhihai xu and fixed by zhihai xu (nodemanager)<br>
+     <b>WindowsSecureContainerExecutor should not depend on DefaultContainerExecutor#getFirstApplicationDir. </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2678">YARN-2678</a>.
+     Major sub-task reported by Gour Saha and fixed by Steve Loughran (api , resourcemanager)<br>
+     <b>Improved Yarn Registry service record structure</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2677">YARN-2677</a>.
+     Major sub-task reported by Steve Loughran and fixed by Steve Loughran (api , resourcemanager)<br>
+     <b>registry punycoding of usernames doesn't fix all usernames to be DNS-valid</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2676">YARN-2676</a>.
+     Major sub-task reported by Zhijie Shen and fixed by Zhijie Shen (timelineserver)<br>
+     <b>Timeline authentication filter should add support for proxy user</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2673">YARN-2673</a>.
+     Major sub-task reported by Li Lu and fixed by Li Lu <br>
+     <b>Add retry for timeline client put APIs</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2671">YARN-2671</a>.
+     Blocker bug reported by Zhijie Shen and fixed by Wangda Tan (resourcemanager)<br>
+     <b>ApplicationSubmissionContext change breaks the existing app submission</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2668">YARN-2668</a>.
+     Major sub-task reported by Steve Loughran and fixed by Steve Loughran (client)<br>
+     <b>yarn-registry JAR won't link against ZK 3.4.5</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2667">YARN-2667</a>.
+     Minor bug reported by Yi Liu and fixed by Yi Liu <br>
+     <b>Fix the release audit warning caused by hadoop-yarn-registry</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2662">YARN-2662</a>.
+     Minor bug reported by Chris Nauroth and fixed by Chris Nauroth (test)<br>
+     <b>TestCgroupsLCEResourcesHandler leaks file descriptors.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2656">YARN-2656</a>.
+     Major bug reported by Varun Vasudev and fixed by Zhijie Shen (resourcemanager)<br>
+     <b>RM web services authentication filter should add support for proxy user</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2652">YARN-2652</a>.
+     Major sub-task reported by Steve Loughran and fixed by Steve Loughran (api)<br>
+     <b>add hadoop-yarn-registry package under hadoop-yarn</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2651">YARN-2651</a>.
+     Major sub-task reported by Xuan Gong and fixed by Xuan Gong (nodemanager , resourcemanager)<br>
+     <b>Spin off the LogRollingInterval from LogAggregationContext</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2649">YARN-2649</a>.
+     Major bug reported by Ming Ma and fixed by Ming Ma <br>
+     <b>Flaky test TestAMRMRPCNodeUpdates</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2647">YARN-2647</a>.
+     Major sub-task reported by Wangda Tan and fixed by Sunil G (client)<br>
+     <b>Add yarn queue CLI to get queue infos</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2644">YARN-2644</a>.
+     Major sub-task reported by Craig Welch and fixed by Craig Welch <br>
+     <b>Recalculate headroom more frequently to keep it accurate</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2635">YARN-2635</a>.
+     Major bug reported by Wei Yan and fixed by Wei Yan <br>
+     <b>TestRM, TestRMRestart, TestClientToAMTokens should run with both CS and FS</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2632">YARN-2632</a>.
+     Blocker sub-task reported by Junping Du and fixed by Junping Du (nodemanager)<br>
+     <b>Document NM Restart feature</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2630">YARN-2630</a>.
+     Major bug reported by Jian He and fixed by Jian He <br>
+     <b>TestDistributedShell#testDSRestartWithPreviousRunningContainers fails</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2629">YARN-2629</a>.
+     Major sub-task reported by Zhijie Shen and fixed by Zhijie Shen (timelineserver)<br>
+     <b>Make distributed shell use the domain-based timeline ACLs</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2628">YARN-2628</a>.
+     Major bug reported by Varun Vasudev and fixed by Varun Vasudev (capacityscheduler)<br>
+     <b>Capacity scheduler with DominantResourceCalculator carries out reservation even though slots are free</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2627">YARN-2627</a>.
+     Major improvement reported by Xuan Gong and fixed by Xuan Gong <br>
+     <b>Add logs when attemptFailuresValidityInterval is enabled</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2624">YARN-2624</a>.
+     Blocker bug reported by Anubhav Dhoot and fixed by Anubhav Dhoot (nodemanager)<br>
+     <b>Resource Localization fails on a cluster due to existing cache directories</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2621">YARN-2621</a>.
+     Major sub-task reported by Zhijie Shen and fixed by Zhijie Shen (timelineserver)<br>
+     <b>Simplify the output when the user doesn't have the access for getDomain(s) </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2617">YARN-2617</a>.
+     Major bug reported by Jun Gong and fixed by Jun Gong (nodemanager)<br>
+     <b>NM does not need to send finished container whose APP is not running to RM</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2615">YARN-2615</a>.
+     Blocker sub-task reported by Junping Du and fixed by Junping Du <br>
+     <b>ClientToAMTokenIdentifier and DelegationTokenIdentifier should allow extended fields</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2611">YARN-2611</a>.
+     Major sub-task reported by Subru Krishnan and fixed by Subru Krishnan (capacityscheduler , resourcemanager , scheduler)<br>
+     <b>Fix jenkins findbugs warning and test case failures for trunk merge patch</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2610">YARN-2610</a>.
+     Major bug reported by Ray Chiang and fixed by Ray Chiang <br>
+     <b>Hamlet should close table tags</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2608">YARN-2608</a>.
+     Major bug reported by Wei Yan and fixed by Wei Yan <br>
+     <b>FairScheduler: Potential deadlocks in loading alloc files and clock access</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2607">YARN-2607</a>.
+     Major test reported by Ted Yu and fixed by Wangda Tan <br>
+     <b>TestDistributedShell fails in trunk</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2606">YARN-2606</a>.
+     Major bug reported by Mit Desai and fixed by Mit Desai (timelineserver)<br>
+     <b>Application History Server tries to access hdfs before doing secure login</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2602">YARN-2602</a>.
+     Major bug reported by Karam Singh and fixed by Zhijie Shen (timelineserver)<br>
+     <b>Generic History Service of TimelineServer sometimes not able to handle NPE</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2596">YARN-2596</a>.
+     Major test reported by Junping Du and fixed by Karthik Kambatla <br>
+     <b>TestWorkPreservingRMRestart fails with FairScheduler</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2594">YARN-2594</a>.
+     Blocker bug reported by Karam Singh and fixed by Wangda Tan (resourcemanager)<br>
+     <b>Potential deadlock in RM when querying ApplicationResourceUsageReport</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2591">YARN-2591</a>.
+     Major sub-task reported by Zhijie Shen and fixed by Zhijie Shen (timelineserver)<br>
+     <b>AHSWebServices should return FORBIDDEN(403) if the request user doesn't have access to the history data</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2588">YARN-2588</a>.
+     Major bug reported by Rohith and fixed by Rohith (resourcemanager)<br>
+     <b>Standby RM does not transitionToActive if previous transitionToActive is failed with ZK exception.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2584">YARN-2584</a>.
+     Major test reported by Zhijie Shen and fixed by Jian He <br>
+     <b>TestContainerManagerSecurity fails on trunk</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2583">YARN-2583</a>.
+     Major sub-task reported by Xuan Gong and fixed by Xuan Gong (nodemanager , resourcemanager)<br>
+     <b>Modify the LogDeletionService to support Log aggregation for LRS</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2582">YARN-2582</a>.
+     Major sub-task reported by Xuan Gong and fixed by Xuan Gong (nodemanager , resourcemanager)<br>
+     <b>Log related CLI and Web UI changes for Aggregated Logs in LRS</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2581">YARN-2581</a>.
+     Major sub-task reported by Xuan Gong and fixed by Xuan Gong (nodemanager , resourcemanager)<br>
+     <b>NMs need to find a way to get LogAggregationContext</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2579">YARN-2579</a>.
+     Blocker bug reported by Rohith and fixed by Rohith (resourcemanager)<br>
+     <b>Deadlock when EmbeddedElectorService and FatalEventDispatcher try to transition RM to StandBy at the same time</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2577">YARN-2577</a>.
+     Trivial improvement reported by Miklos Christine and fixed by Miklos Christine (documentation , fairscheduler)<br>
+     <b>Clarify ACL delimiter and how to configure ACL groups only</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2576">YARN-2576</a>.
+     Major sub-task reported by Subru Krishnan and fixed by Subru Krishnan (capacityscheduler , resourcemanager , scheduler)<br>
+     <b>Prepare yarn-1051 branch for merging with trunk</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2569">YARN-2569</a>.
+     Major sub-task reported by Xuan Gong and fixed by Xuan Gong (nodemanager , resourcemanager)<br>
+     <b>API changes for handling logs of long-running services</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2568">YARN-2568</a>.
+     Major bug reported by Jian He and fixed by Jian He <br>
+     <b>TestAMRMClientOnRMRestart test fails</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2566">YARN-2566</a>.
+     Critical sub-task reported by zhihai xu and fixed by zhihai xu (nodemanager)<br>
+     <b>DefaultContainerExecutor should pick a working directory randomly</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2565">YARN-2565</a>.
+     Major bug reported by Karam Singh and fixed by Zhijie Shen (resourcemanager , timelineserver)<br>
+     <b>RM shouldn't use the old RMApplicationHistoryWriter unless explicitly setting FileSystemApplicationHistoryStore</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2563">YARN-2563</a>.
+     Blocker bug reported by Arpit Gupta and fixed by Zhijie Shen (timelineserver)<br>
+     <b>On secure clusters call to timeline server fails with authentication errors when running a job via oozie</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2562">YARN-2562</a>.
+     Critical bug reported by Vinod Kumar Vavilapalli and fixed by Tsuyoshi OZAWA <br>
+     <b>ContainerId@toString() is unreadable for epoch &gt;0 after YARN-2182</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2561">YARN-2561</a>.
+     Blocker sub-task reported by Tassapol Athiapinya and fixed by Junping Du <br>
+     <b>MR job client cannot reconnect to AM after NM restart.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2559">YARN-2559</a>.
+     Major bug reported by Karam Singh and fixed by Zhijie Shen (resourcemanager , timelineserver)<br>
+     <b>ResourceManager sometime become un-responsive due to NPE in SystemMetricsPublisher</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2558">YARN-2558</a>.
+     Blocker sub-task reported by Tsuyoshi OZAWA and fixed by Tsuyoshi OZAWA <br>
+     <b>Updating ContainerTokenIdentifier#read/write to use ContainerId#getContainerId</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2557">YARN-2557</a>.
+     Major bug reported by Xuan Gong and fixed by Xuan Gong (applications/distributed-shell)<br>
+     <b>Add a parameter "attempt_Failures_Validity_Interval" in DistributedShell </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2549">YARN-2549</a>.
+     Minor test reported by Chris Nauroth and fixed by Chris Nauroth (nodemanager , test)<br>
+     <b>TestContainerLaunch fails due to classpath problem with hamcrest classes.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2547">YARN-2547</a>.
+     Major sub-task reported by Jonathan Eagles and fixed by Mit Desai (timelineserver)<br>
+     <b>Cross Origin Filter throws UnsupportedOperationException upon destroy</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2546">YARN-2546</a>.
+     Major bug reported by Doug Haigh and fixed by Varun Vasudev (api)<br>
+     <b>REST API for application creation/submission is using strings for numeric &amp; boolean values</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2544">YARN-2544</a>.
+     Major sub-task reported by Wangda Tan and fixed by Wangda Tan (api , client , resourcemanager)<br>
+     <b>Common server side PB changes (not include user API PB changes)</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2542">YARN-2542</a>.
+     Major sub-task reported by Zhijie Shen and fixed by Zhijie Shen <br>
+     <b>"yarn application -status &lt;appId&gt;" throws NPE when retrieving the app from the timelineserver</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2541">YARN-2541</a>.
+     Major bug reported by Jian He and fixed by Jian He <br>
+     <b>Fix ResourceManagerRest.apt.vm syntax error</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2540">YARN-2540</a>.
+     Major bug reported by Ashwin Shankar and fixed by Ashwin Shankar (scheduler)<br>
+     <b>FairScheduler: Queue filters not working on scheduler page in RM UI</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2539">YARN-2539</a>.
+     Minor improvement reported by Wei Yan and fixed by Wei Yan <br>
+     <b>FairScheduler: Set the default value for maxAMShare to 0.5</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2538">YARN-2538</a>.
+     Major sub-task reported by Xuan Gong and fixed by Xuan Gong (resourcemanager)<br>
+     <b>Add logs when RM send new AMRMToken to ApplicationMaster</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2534">YARN-2534</a>.
+     Major bug reported by zhihai xu and fixed by zhihai xu (scheduler)<br>
+     <b>FairScheduler: Potential integer overflow calculating totalMaxShare</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2531">YARN-2531</a>.
+     Major improvement reported by Varun Vasudev and fixed by Varun Vasudev <br>
+     <b>CGroups - Admins should be allowed to enforce strict cpu limits</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2529">YARN-2529</a>.
+     Major sub-task reported by Zhijie Shen and fixed by Zhijie Shen (timelineserver)<br>
+     <b>Generic history service RPC interface doesn't work when service authorization is enabled</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2528">YARN-2528</a>.
+     Major sub-task reported by Jonathan Eagles and fixed by Jonathan Eagles (timelineserver)<br>
+     <b>Cross Origin Filter Http response split vulnerability protection rejects valid origins</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2527">YARN-2527</a>.
+     Major bug reported by Benoy Antony and fixed by Benoy Antony (resourcemanager)<br>
+     <b>NPE in ApplicationACLsManager</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2526">YARN-2526</a>.
+     Critical bug reported by Wei Yan and fixed by Wei Yan (scheduler-load-simulator)<br>
+     <b>SLS can deadlock when all the threads are taken by AMSimulators</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2523">YARN-2523</a>.
+     Major bug reported by Nishan Shetty and fixed by Rohith (resourcemanager , webapp)<br>
+     <b>ResourceManager UI showing negative value for "Decommissioned Nodes" field</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2519">YARN-2519</a>.
+     Major test reported by Xiaoyu Yao and fixed by Xiaoyu Yao (webapp)<br>
+     <b>Credential Provider related unit tests failed on Windows</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2515">YARN-2515</a>.
+     Major sub-task reported by Tsuyoshi OZAWA and fixed by Tsuyoshi OZAWA (resourcemanager)<br>
+     <b>Update ConverterUtils#toContainerId to parse epoch</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2512">YARN-2512</a>.
+     Major sub-task reported by Jonathan Eagles and fixed by Jonathan Eagles (timelineserver)<br>
+     <b>Allow for origin pattern matching in cross origin filter</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2511">YARN-2511</a>.
+     Major sub-task reported by Jonathan Eagles and fixed by Jonathan Eagles (timelineserver)<br>
+     <b>Allow All Origins by default when Cross Origin Filter is enabled</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2509">YARN-2509</a>.
+     Major sub-task reported by Jonathan Eagles and fixed by Mit Desai (timelineserver)<br>
+     <b>Enable Cross Origin Filter for timeline server only and not all Yarn servers</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2508">YARN-2508</a>.
+     Major sub-task reported by Jonathan Eagles and fixed by Mit Desai (timelineserver)<br>
+     <b>Cross Origin configuration parameters prefix are not honored</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2507">YARN-2507</a>.
+     Major sub-task reported by Jonathan Eagles and fixed by Jonathan Eagles (documentation , timelineserver)<br>
+     <b>Document Cross Origin Filter Configuration for ATS</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2505">YARN-2505</a>.
+     Major sub-task reported by Wangda Tan and fixed by Craig Welch (resourcemanager)<br>
+     <b>Support get/add/remove/change labels in RM REST API</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2504">YARN-2504</a>.
+     Critical sub-task reported by Wangda Tan and fixed by Wangda Tan (resourcemanager)<br>
+     <b>Support get/add/remove/change labels in RM admin CLI </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2503">YARN-2503</a>.
+     Major sub-task reported by Wangda Tan and fixed by Wangda Tan (resourcemanager)<br>
+     <b>Changes in RM Web UI to better show labels to end users</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2502">YARN-2502</a>.
+     Major sub-task reported by Wangda Tan and fixed by Wangda Tan (resourcemanager)<br>
+     <b>Changes in distributed shell to support specify labels</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2501">YARN-2501</a>.
+     Major sub-task reported by Wangda Tan and fixed by Wangda Tan (resourcemanager)<br>
+     <b>Changes in AMRMClient to support labels</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2500">YARN-2500</a>.
+     Major sub-task reported by Wangda Tan and fixed by Wangda Tan (resourcemanager)<br>
+     <b>Miscellaneous changes in ResourceManager to support labels</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2496">YARN-2496</a>.
+     Major sub-task reported by Wangda Tan and fixed by Wangda Tan (resourcemanager)<br>
+     <b>Changes for capacity scheduler to support allocate resource respect labels</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2494">YARN-2494</a>.
+     Major sub-task reported by Wangda Tan and fixed by Wangda Tan (resourcemanager)<br>
+     <b>Node label manager API and storage implementations</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2493">YARN-2493</a>.
+     Major sub-task reported by Wangda Tan and fixed by Wangda Tan (api)<br>
+     <b>API changes for users</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2484">YARN-2484</a>.
+     Trivial bug reported by Tsuyoshi OZAWA and fixed by Tsuyoshi OZAWA <br>
+     <b>FileSystemRMStateStore#readFile/writeFile should close FSData(In|Out)putStream in final block</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2475">YARN-2475</a>.
+     Major sub-task reported by Carlo Curino and fixed by Carlo Curino (resourcemanager)<br>
+     <b>ReservationSystem: replan upon capacity reduction</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2468">YARN-2468</a>.
+     Major sub-task reported by Xuan Gong and fixed by Xuan Gong (log-aggregation , nodemanager , resourcemanager)<br>
+     <b>Log handling for LRS</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2462">YARN-2462</a>.
+     Major bug reported by Jason Lowe and fixed by Eric Payne <br>
+     <b>TestNodeManagerResync#testBlockNewContainerRequestsOnStartAndResync should have a test timeout</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2460">YARN-2460</a>.
+     Minor bug reported by Ray Chiang and fixed by Ray Chiang <br>
+     <b>Remove obsolete entries from yarn-default.xml</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2459">YARN-2459</a>.
+     Major bug reported by Mayank Bansal and fixed by Mayank Bansal (resourcemanager)<br>
+     <b>RM crashes if App gets rejected for any reason and HA is enabled</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2456">YARN-2456</a>.
+     Major sub-task reported by Jian He and fixed by Jian He (resourcemanager)<br>
+     <b>Possible livelock in CapacityScheduler when RM is recovering apps</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2453">YARN-2453</a>.
+     Major bug reported by zhihai xu and fixed by zhihai xu <br>
+     <b>TestProportionalCapacityPreemptionPolicy fails with FairScheduler</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2452">YARN-2452</a>.
+     Major bug reported by zhihai xu and fixed by zhihai xu <br>
+     <b>TestRMApplicationHistoryWriter fails with FairScheduler</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2450">YARN-2450</a>.
+     Trivial bug reported by Ray Chiang and fixed by Ray Chiang <br>
+     <b>Fix typos in log messages</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2449">YARN-2449</a>.
+     Critical bug reported by Karam Singh and fixed by Varun Vasudev (timelineserver)<br>
+     <b>Timelineserver returns invalid Delegation token in secure kerberos enabled cluster when hadoop.http.filter.initializers are not set</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2448">YARN-2448</a>.
+     Major improvement reported by Varun Vasudev and fixed by Varun Vasudev <br>
+     <b>RM should expose the resource types considered during scheduling when AMs register</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2447">YARN-2447</a>.
+     Major bug reported by Varun Vasudev and fixed by Varun Vasudev <br>
+     <b>RM web services app submission doesn't pass secrets correctly</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2446">YARN-2446</a>.
+     Major sub-task reported by Zhijie Shen and fixed by Zhijie Shen (timelineserver)<br>
+     <b>Using TimelineNamespace to shield the entities of a user</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2440">YARN-2440</a>.
+     Major bug reported by Varun Vasudev and fixed by Varun Vasudev <br>
+     <b>Cgroups should allow YARN containers to be limited to allocated cores</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2434">YARN-2434</a>.
+     Major sub-task reported by Jian He and fixed by Jian He <br>
+     <b>RM should not recover containers from previously failed attempt when AM restart is not enabled</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2431">YARN-2431</a>.
+     Major sub-task reported by Jason Lowe and fixed by Jason Lowe (nodemanager)<br>
+     <b>NM restart: cgroup is not removed for reacquired containers</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2424">YARN-2424</a>.
+     Blocker bug reported by Allen Wittenauer and fixed by Allen Wittenauer (nodemanager)<br>
+     <b>LCE should support non-cgroups, non-secure mode</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2411">YARN-2411</a>.
+     Major improvement reported by Ram Venkatesh and fixed by Ram Venkatesh (capacityscheduler)<br>
+     <b>[Capacity Scheduler] support simple user and group mappings to queues</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2409">YARN-2409</a>.
+     Critical bug reported by Nishan Shetty and fixed by Rohith (resourcemanager)<br>
+     <b>Active to StandBy transition does not stop rmDispatcher that causes 1 AsyncDispatcher thread leak. </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2406">YARN-2406</a>.
+     Major sub-task reported by Jian He and fixed by Tsuyoshi OZAWA <br>
+     <b>Move RM recovery related proto to yarn_server_resourcemanager_recovery.proto</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2405">YARN-2405</a>.
+     Major bug reported by Maysam Yabandeh and fixed by Tsuyoshi OZAWA <br>
+     <b>NPE in FairSchedulerAppsBlock</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2400">YARN-2400</a>.
+     Major bug reported by Jian He and fixed by Jian He <br>
+     <b>TestAMRestart fails intermittently</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2399">YARN-2399</a>.
+     Major improvement reported by Karthik Kambatla and fixed by Karthik Kambatla (fairscheduler)<br>
+     <b>FairScheduler: Merge AppSchedulable and FSSchedulerApp into FSAppAttempt</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2397">YARN-2397</a>.
+     Critical bug reported by Varun Vasudev and fixed by Varun Vasudev <br>
+     <b>RM and TS web interfaces sometimes return request is a replay error in secure mode</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2395">YARN-2395</a>.
+     Major new feature reported by Ashwin Shankar and fixed by Wei Yan (fairscheduler)<br>
+     <b>FairScheduler: Preemption timeout should be configurable per queue</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2394">YARN-2394</a>.
+     Major new feature reported by Ashwin Shankar and fixed by Wei Yan (fairscheduler)<br>
+     <b>FairScheduler: Configure fairSharePreemptionThreshold per queue</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2393">YARN-2393</a>.
+     Major new feature reported by Ashwin Shankar and fixed by Wei Yan (fairscheduler)<br>
+     <b>FairScheduler: Add the notion of steady fair share</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2389">YARN-2389</a>.
+     Major sub-task reported by Subru Krishnan and fixed by Subru Krishnan (capacityscheduler , fairscheduler)<br>
+     <b>Adding support for drainig a queue, ie killing all apps in the queue</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2388">YARN-2388</a>.
+     Major test reported by Zhijie Shen and fixed by Zhijie Shen <br>
+     <b>TestTimelineWebServices fails on trunk after HADOOP-10791</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2387">YARN-2387</a>.
+     Blocker bug reported by Mit Desai and fixed by Mit Desai <br>
+     <b>Resource Manager crashes with NPE due to lack of synchronization</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2378">YARN-2378</a>.
+     Major sub-task reported by Subru Krishnan and fixed by Subru Krishnan (capacityscheduler)<br>
+     <b>Adding support for moving apps between queues in Capacity Scheduler</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2377">YARN-2377</a>.
+     Major improvement reported by Gera Shegalov and fixed by Gera Shegalov (nodemanager)<br>
+     <b>Localization exception stack traces are not passed as diagnostic info</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2374">YARN-2374</a>.
+     Major bug reported by Varun Vasudev and fixed by Varun Vasudev <br>
+     <b>YARN trunk build failing TestDistributedShell.testDSShell</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2373">YARN-2373</a>.
+     Major bug reported by Larry McCay and fixed by Larry McCay <br>
+     <b>WebAppUtils Should Use configuration.getPassword for Accessing SSL Passwords</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2372">YARN-2372</a>.
+     Minor improvement reported by Fengdong Yu and fixed by Fengdong Yu (documentation)<br>
+     <b>There are Chinese Characters in the FairScheduler's document</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2370">YARN-2370</a>.
+     Trivial bug reported by Wenwu Peng and fixed by Wenwu Peng (resourcemanager)<br>
+     <b>Fix comment in o.a.h.y.server.resourcemanager.schedulerAppSchedulingInfo</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2363">YARN-2363</a>.
+     Major bug reported by Jason Lowe and fixed by Jason Lowe (resourcemanager)<br>
+     <b>Submitted applications occasionally lack a tracking URL</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2361">YARN-2361</a>.
+     Trivial improvement reported by zhihai xu and fixed by zhihai xu (resourcemanager)<br>
+     <b>RMAppAttempt state machine entries for KILLED state has duplicate event entries</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2360">YARN-2360</a>.
+     Major new feature reported by Ashwin Shankar and fixed by Ashwin Shankar (fairscheduler)<br>
+     <b>Fair Scheduler: Display dynamic fair share for queues on the scheduler page</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2359">YARN-2359</a>.
+     Critical bug reported by zhihai xu and fixed by zhihai xu (resourcemanager)<br>
+     <b>Application hangs when it fails to launch AM container </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2354">YARN-2354</a>.
+     Major sub-task reported by Jian He and fixed by Li Lu <br>
+     <b>DistributedShell may allocate more containers than client specified after it restarts</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2352">YARN-2352</a>.
+     Major improvement reported by Karthik Kambatla and fixed by Karthik Kambatla (scheduler)<br>
+     <b>FairScheduler: Collect metrics on duration of critical methods that affect performance</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2347">YARN-2347</a>.
+     Major sub-task reported by Junping Du and fixed by Junping Du <br>
+     <b>Consolidate RMStateVersion and NMDBSchemaVersion into StateVersion in yarn-server-common</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2343">YARN-2343</a>.
+     Trivial improvement reported by Li Lu and fixed by Li Lu <br>
+     <b>Improve error message on token expire exception</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2337">YARN-2337</a>.
+     Trivial improvement reported by zhihai xu and fixed by zhihai xu (resourcemanager)<br>
+     <b>ResourceManager sets ClientRMService in RMContext multiple times</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2323">YARN-2323</a>.
+     Minor improvement reported by Hong Zhiguo and fixed by Hong Zhiguo (fairscheduler)<br>
+     <b>FairShareComparator creates too many Resource objects</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2321">YARN-2321</a>.
+     Major bug reported by Leitao Guo and fixed by Leitao Guo (nodemanager)<br>
+     <b>NodeManager web UI can incorrectly report Pmem enforcement</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2317">YARN-2317</a>.
+     Major sub-task reported by Li Lu and fixed by Li Lu (documentation)<br>
+     <b>Update documentation about how to write YARN applications</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2314">YARN-2314</a>.
+     Critical bug reported by Jason Lowe and fixed by Jason Lowe (client)<br>
+     <b>ContainerManagementProtocolProxy can create thousands of threads for a large cluster</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2313">YARN-2313</a>.
+     Major bug reported by Tsuyoshi OZAWA and fixed by Tsuyoshi OZAWA (fairscheduler)<br>
+     <b>Livelock can occur in FairScheduler when there are lots of running apps</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2312">YARN-2312</a>.
+     Major sub-task reported by Tsuyoshi OZAWA and fixed by Tsuyoshi OZAWA (resourcemanager)<br>
+     <b>Marking ContainerId#getId as deprecated</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2308">YARN-2308</a>.
+     Critical bug reported by Wangda Tan and fixed by chang li (resourcemanager , scheduler)<br>
+     <b>NPE happened when RM restart after CapacityScheduler queue configuration changed </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2302">YARN-2302</a>.
+     Major sub-task reported by Zhijie Shen and fixed by Zhijie Shen (timelineserver)<br>
+     <b>Refactor TimelineWebServices</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2298">YARN-2298</a>.
+     Major sub-task reported by Zhijie Shen and fixed by Zhijie Shen (client)<br>
+     <b>Move TimelineClient to yarn-common project</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2295">YARN-2295</a>.
+     Major sub-task reported by Li Lu and fixed by Li Lu <br>
+     <b>Refactor YARN distributed shell with existing public stable API</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2288">YARN-2288</a>.
+     Major sub-task reported by Junping Du and fixed by Junping Du (timelineserver)<br>
+     <b>Data persistent in timelinestore should be versioned</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2279">YARN-2279</a>.
+     Major sub-task reported by Zhijie Shen and fixed by Zhijie Shen (timelineserver)<br>
+     <b>Add UTs to cover timeline server authentication</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2277">YARN-2277</a>.
+     Major sub-task reported by Jonathan Eagles and fixed by Jonathan Eagles (timelineserver)<br>
+     <b>Add Cross-Origin support to the ATS REST API</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2274">YARN-2274</a>.
+     Trivial improvement reported by Karthik Kambatla and fixed by Karthik Kambatla (fairscheduler)<br>
+     <b>FairScheduler: Add debug information about cluster capacity, availability and reservations</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2273">YARN-2273</a>.
+     Major bug reported by Andy Skelton and fixed by Wei Yan (fairscheduler , resourcemanager)<br>
+     <b>NPE in ContinuousScheduling thread when we lose a node</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2269">YARN-2269</a>.
+     Major bug reported by Yesha Vora and fixed by Craig Welch <br>
+     <b>External links need to be removed from YARN UI</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2264">YARN-2264</a>.
+     Major bug reported by Siddharth Seth and fixed by Li Lu <br>
+     <b>Race in DrainDispatcher can cause random test failures</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2260">YARN-2260</a>.
+     Major sub-task reported by Jian He and fixed by Jian He (resourcemanager)<br>
+     <b>Add containers to launchedContainers list in RMNode on container recovery</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2252">YARN-2252</a>.
+     Major bug reported by Ratandeep Ratti and fixed by  (scheduler)<br>
+     <b>Intermittent failure of TestFairScheduler.testContinuousScheduling</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2251">YARN-2251</a>.
+     Major bug reported by Zhijie Shen and fixed by Zhijie Shen <br>
+     <b>Avoid negative elapsed time in JHS/MRAM web UI and services</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2249">YARN-2249</a>.
+     Major sub-task reported by Jian He and fixed by Jian He (resourcemanager)<br>
+     <b>AM release request may be lost on RM restart</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2248">YARN-2248</a>.
+     Major sub-task reported by Janos Matyas and fixed by Janos Matyas (capacityscheduler)<br>
+     <b>Capacity Scheduler changes for moving apps between queues</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2244">YARN-2244</a>.
+     Critical bug reported by Anubhav Dhoot and fixed by Anubhav Dhoot (fairscheduler)<br>
+     <b>FairScheduler missing handling of containers for unknown application attempts </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2242">YARN-2242</a>.
+     Major sub-task reported by Li Lu and fixed by Li Lu <br>
+     <b>Improve exception information on AM launch crashes</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2237">YARN-2237</a>.
+     Major sub-task reported by Xuan Gong and fixed by Xuan Gong (resourcemanager)<br>
+     <b>MRAppMaster changes for AMRMToken roll-up</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2229">YARN-2229</a>.
+     Major sub-task reported by Tsuyoshi OZAWA and fixed by Tsuyoshi OZAWA (resourcemanager)<br>
+     <b>ContainerId can overflow with RM restart</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2228">YARN-2228</a>.
+     Major sub-task reported by Zhijie Shen and fixed by Zhijie Shen <br>
+     <b>TimelineServer should load pseudo authentication filter when authentication = simple</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2219">YARN-2219</a>.
+     Major bug reported by Ashwin Shankar and fixed by Jian He (resourcemanager)<br>
+     <b>AMs and NMs can get exceptions after recovery but before scheduler knowns apps and app-attempts</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2214">YARN-2214</a>.
+     Major improvement reported by Ashwin Shankar and fixed by Ashwin Shankar (scheduler)<br>
+     <b>FairScheduler: preemptContainerPreCheck() in FSParentQueue delays convergence towards fairness</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2212">YARN-2212</a>.
+     Major sub-task reported by Xuan Gong and fixed by Xuan Gong (resourcemanager)<br>
+     <b>ApplicationMaster needs to find a way to update the AMRMToken periodically</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2211">YARN-2211</a>.
+     Major sub-task reported by Xuan Gong and fixed by Xuan Gong (resourcemanager)<br>
+     <b>RMStateStore needs to save AMRMToken master key for recovery when RM restart/failover happens </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2209">YARN-2209</a>.
+     Major improvement reported by Jian He and fixed by Jian He <br>
+     <b>Replace AM resync/shutdown command with corresponding exceptions</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2208">YARN-2208</a>.
+     Major sub-task reported by Xuan Gong and fixed by Xuan Gong (resourcemanager)<br>
+     <b>AMRMTokenManager need to have a way to roll over AMRMToken</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2207">YARN-2207</a>.
+     Major task reported by Xuan Gong and fixed by Xuan Gong (resourcemanager)<br>
+     <b>Add ability to roll over AMRMToken</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2198">YARN-2198</a>.
+     Major improvement reported by Remus Rusanu and fixed by Remus Rusanu <br>
+     <b>Remove the need to run NodeManager as privileged account for Windows Secure Container Executor</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2197">YARN-2197</a>.
+     Minor improvement reported by Akira AJISAKA and fixed by Akira AJISAKA (documentation)<br>
+     <b>Add a link to YARN CHANGES.txt in the left side of doc</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2182">YARN-2182</a>.
+     Major sub-task reported by Tsuyoshi OZAWA and fixed by Tsuyoshi OZAWA <br>
+     <b>Update ContainerId#toString() to avoid conflicts before and after RM restart</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2181">YARN-2181</a>.
+     Major bug reported by Wangda Tan and fixed by Wangda Tan (resourcemanager , webapp)<br>
+     <b>Add preemption info to RM Web UI and add logs when preemption occurs</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2174">YARN-2174</a>.
+     Major sub-task reported by Zhijie Shen and fixed by Zhijie Shen <br>
+     <b>Enabling HTTPs for the writer REST API of TimelineServer</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2161">YARN-2161</a>.
+     Major bug reported by Binglin Chang and fixed by Binglin Chang <br>
+     <b>Fix build on macosx: YARN parts</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2158">YARN-2158</a>.
+     Minor test reported by Ted Yu and fixed by Varun Vasudev <br>
+     <b>TestRMWebServicesAppsModification sometimes fails in trunk</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2153">YARN-2153</a>.
+     Major sub-task reported by Jian He and fixed by Jian He (resourcemanager)<br>
+     <b>Ensure distributed shell work with RM work-preserving recovery</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2147">YARN-2147</a>.
+     Minor bug reported by Jason Lowe and fixed by Chen He (resourcemanager)<br>
+     <b>client lacks delegation token exception details when application submit fails</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2138">YARN-2138</a>.
+     Major bug reported by Jian He and fixed by Varun Saxena <br>
+     <b>Cleanup notifyDone* methods in RMStateStore</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2131">YARN-2131</a>.
+     Major new feature reported by Karthik Kambatla and fixed by Robert Kanter (resourcemanager)<br>
+     <b>Add a way to format the RMStateStore</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2102">YARN-2102</a>.
+     Major sub-task reported by Zhijie Shen and fixed by Zhijie Shen <br>
+     <b>More generalized timeline ACLs</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2088">YARN-2088</a>.
+     Major bug reported by Binglin Chang and fixed by Binglin Chang <br>
+     <b>Fix code bug in GetApplicationsRequestPBImpl#mergeLocalToBuilder</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2080">YARN-2080</a>.
+     Major sub-task reported by Subru Krishnan and fixed by Subru Krishnan (resourcemanager)<br>
+     <b>Admission Control: Integrate Reservation subsystem with ResourceManager</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2070">YARN-2070</a>.
+     Minor sub-task reported by Zhijie Shen and fixed by Robert Kanter <br>
+     <b>DistributedShell publishes unfriendly user information to the timeline server</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2045">YARN-2045</a>.
+     Major sub-task reported by Junping Du and fixed by Junping Du (nodemanager)<br>
+     <b>Data persisted in NM should be versioned</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2035">YARN-2035</a>.
+     Major sub-task reported by Jonathan Eagles and fixed by Jonathan Eagles <br>
+     <b>FileSystemApplicationHistoryStore blocks RM and AHS while NN is in safemode</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2034">YARN-2034</a>.
+     Minor bug reported by Jason Lowe and fixed by Chen He (nodemanager)<br>
+     <b>Description for yarn.nodemanager.localizer.cache.target-size-mb is incorrect</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2033">YARN-2033</a>.
+     Major sub-task reported by Vinod Kumar Vavilapalli and fixed by Zhijie Shen <br>
+     <b>Merging generic-history into the Timeline Store</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2026">YARN-2026</a>.
+     Major bug reported by Ashwin Shankar and fixed by Ashwin Shankar (scheduler)<br>
+     <b>Fair scheduler: Consider only active queues for computing fairshare</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2013">YARN-2013</a>.
+     Major sub-task reported by Zhijie Shen and fixed by Tsuyoshi OZAWA (nodemanager)<br>
+     <b>The diagnostics is always the ExitCodeException stack when the container crashes</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2010">YARN-2010</a>.
+     Blocker bug reported by bc Wong and fixed by Karthik Kambatla (resourcemanager)<br>
+     <b>Handle app-recovery failures gracefully</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2008">YARN-2008</a>.
+     Major sub-task reported by Chen He and fixed by Craig Welch <br>
+     <b>CapacityScheduler may report incorrect queueMaxCap if there is hierarchy queue structure </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2001">YARN-2001</a>.
+     Major sub-task reported by Jian He and fixed by Jian He (resourcemanager)<br>
+     <b>Threshold for RM to accept requests from AM after failover</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1994">YARN-1994</a>.
+     Major improvement reported by Arpit Agarwal and fixed by Craig Welch (nodemanager , resourcemanager , webapp)<br>
+     <b>Expose YARN/MR endpoints on multiple interfaces</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1972">YARN-1972</a>.
+     Major sub-task reported by Remus Rusanu and fixed by Remus Rusanu (nodemanager)<br>
+     <b>Implement secure Windows Container Executor</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1964">YARN-1964</a>.
+     Major new feature reported by Arun C Murthy and fixed by Abin Shahab <br>
+     <b>Create Docker analog of the LinuxContainerExecutor in YARN</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1959">YARN-1959</a>.
+     Major bug reported by Sandy Ryza and fixed by Anubhav Dhoot <br>
+     <b>Fix headroom calculation in FairScheduler</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1954">YARN-1954</a>.
+     Major improvement reported by Zhijie Shen and fixed by Tsuyoshi OZAWA (client)<br>
+     <b>Add waitFor to AMRMClient(Async)</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1922">YARN-1922</a>.
+     Major bug reported by Billie Rinaldi and fixed by Billie Rinaldi (nodemanager)<br>
+     <b>Process group remains alive after container process is killed externally</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1919">YARN-1919</a>.
+     Minor bug reported by Devaraj K and fixed by Tsuyoshi OZAWA (resourcemanager)<br>
+     <b>Potential NPE in EmbeddedElectorService#stop</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1918">YARN-1918</a>.
+     Trivial improvement reported by Devaraj K and fixed by Anandha L Ranganathan <br>
+     <b>Typo in description and error message for 'yarn.resourcemanager.cluster-id'</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1915">YARN-1915</a>.
+     Blocker sub-task reported by Hitesh Shah and fixed by Jason Lowe <br>
+     <b>ClientToAMTokenMasterKey should be provided to AM at launch time</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1879">YARN-1879</a>.
+     Critical sub-task reported by Jian He and fixed by Tsuyoshi OZAWA (resourcemanager)<br>
+     <b>Mark Idempotent/AtMostOnce annotations to ApplicationMasterProtocol for RM fail over</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1857">YARN-1857</a>.
+     Critical sub-task reported by Thomas Graves and fixed by Chen He (capacityscheduler)<br>
+     <b>CapacityScheduler headroom doesn't account for other AM's running</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1796">YARN-1796</a>.
+     Minor bug reported by Aaron T. Myers and fixed by Aaron T. Myers (nodemanager)<br>
+     <b>container-executor shouldn't require o-r permissions</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1779">YARN-1779</a>.
+     Blocker sub-task reported by Karthik Kambatla and fixed by Jian He (resourcemanager)<br>
+     <b>Handle AMRMTokens across RM failover</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1769">YARN-1769</a>.
+     Major improvement reported by Thomas Graves and fixed by Thomas Graves (capacityscheduler)<br>
+     <b>CapacityScheduler:  Improve reservations</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1712">YARN-1712</a>.
+     Major sub-task reported by Carlo Curino and fixed by Carlo Curino (capacityscheduler , resourcemanager)<br>
+     <b>Admission Control: plan follower</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1711">YARN-1711</a>.
+     Major sub-task reported by Carlo Curino and fixed by Carlo Curino <br>
+     <b>CapacityOverTimePolicy: a policy to enforce quotas over time for YARN-1709</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1710">YARN-1710</a>.
+     Major sub-task reported by Carlo Curino and fixed by Carlo Curino (resourcemanager)<br>
+     <b>Admission Control: agents to allocate reservation</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1709">YARN-1709</a>.
+     Major sub-task reported by Carlo Curino and fixed by Subru Krishnan (resourcemanager)<br>
+     <b>Admission Control: Reservation subsystem</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1708">YARN-1708</a>.
+     Major sub-task reported by Carlo Curino and fixed by Subru Krishnan (resourcemanager)<br>
+     <b>Add a public API to reserve resources (part of YARN-1051)</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1707">YARN-1707</a>.
+     Major sub-task reported by Carlo Curino and fixed by Carlo Curino (capacityscheduler)<br>
+     <b>Making the CapacityScheduler more dynamic</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1372">YARN-1372</a>.
+     Major sub-task reported by Bikas Saha and fixed by Anubhav Dhoot (resourcemanager)<br>
+     <b>Ensure all completed containers are reported to the AMs across RM restart</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1370">YARN-1370</a>.
+     Major sub-task reported by Bikas Saha and fixed by Anubhav Dhoot (resourcemanager)<br>
+     <b>Fair scheduler to re-populate container allocation state</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1367">YARN-1367</a>.
+     Major sub-task reported by Bikas Saha and fixed by Anubhav Dhoot (resourcemanager)<br>
+     <b>After restart NM should resync with the RM without killing containers</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1354">YARN-1354</a>.
+     Major sub-task reported by Jason Lowe and fixed by Jason Lowe (nodemanager)<br>
+     <b>Recover applications upon nodemanager restart</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1342">YARN-1342</a>.
+     Major sub-task reported by Jason Lowe and fixed by Jason Lowe (nodemanager)<br>
+     <b>Recover container tokens upon nodemanager restart</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1341">YARN-1341</a>.
+     Major sub-task reported by Jason Lowe and fixed by Jason Lowe (nodemanager)<br>
+     <b>Recover NMTokens upon nodemanager restart</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1337">YARN-1337</a>.
+     Major sub-task reported by Jason Lowe and fixed by Jason Lowe (nodemanager)<br>
+     <b>Recover containers upon nodemanager restart</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1326">YARN-1326</a>.
+     Major sub-task reported by Tsuyoshi OZAWA and fixed by Tsuyoshi OZAWA <br>
+     <b>RM should log using RMStore at startup time</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1250">YARN-1250</a>.
+     Major sub-task reported by Vinod Kumar Vavilapalli and fixed by Zhijie Shen <br>
+     <b>Generic history service should support application-acls</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1063">YARN-1063</a>.
+     Major sub-task reported by Kyle Leckie and fixed by Remus Rusanu (nodemanager)<br>
+     <b>Winutils needs ability to create task as domain user</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1051">YARN-1051</a>.
+     Major improvement reported by Carlo Curino and fixed by Carlo Curino (capacityscheduler , resourcemanager , scheduler)<br>
+     <b>YARN Admission Control/Planner: enhancing the resource allocation model with time.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-668">YARN-668</a>.
+     Blocker sub-task reported by Siddharth Seth and fixed by Junping Du <br>
+     <b>TokenIdentifier serialization should consider Unknown fields</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-611">YARN-611</a>.
+     Major sub-task reported by Chris Riccomini and fixed by Xuan Gong (resourcemanager)<br>
+     <b>Add an AM retry count reset window to YARN RM</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-415">YARN-415</a>.
+     Major new feature reported by Kendall Thrapp and fixed by Eric Payne (resourcemanager)<br>
+     <b>Capture aggregate memory allocation at the app-level for chargeback</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-90">YARN-90</a>.
+     Major sub-task reported by Ravi Gummadi and fixed by Varun Vasudev (nodemanager)<br>
+     <b>NodeManager should identify failed disks becoming good again</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-6156">MAPREDUCE-6156</a>.
+     Blocker bug reported by Sidharta Seethana and fixed by Junping Du <br>
+     <b>Fetcher - connect() doesn't handle connection refused correctly </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-6142">MAPREDUCE-6142</a>.
+     Critical sub-task reported by Zhijie Shen and fixed by Zhijie Shen <br>
+     <b>Test failure in TestJobHistoryEventHandler and TestMRTimelineEventHandling</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-6126">MAPREDUCE-6126</a>.
+     Major bug reported by Junping Du and fixed by Junping Du <br>
+     <b>(Rumen) Rumen tool returns error "ava.lang.IllegalArgumentException: JobBuilder.process(HistoryEvent): unknown event type"</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-6125">MAPREDUCE-6125</a>.
+     Major bug reported by Mit Desai and fixed by Mit Desai (test)<br>
+     <b>TestContainerLauncherImpl sometimes fails</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-6123">MAPREDUCE-6123</a>.
+     Trivial bug reported by Chris Nauroth and fixed by Chris Nauroth (test)<br>
+     <b>TestCombineFileInputFormat incorrectly starts 2 MiniDFSCluster instances.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-6122">MAPREDUCE-6122</a>.
+     Trivial bug reported by Chris Nauroth and fixed by Chris Nauroth (test)<br>
+     <b>TestLineRecordReader may fail due to test data files checked out of git with incorrect line endings.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-6115">MAPREDUCE-6115</a>.
+     Minor test reported by Ted Yu and fixed by Binglin Chang <br>
+     <b>TestPipeApplication#testSubmitter fails in trunk</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-6109">MAPREDUCE-6109</a>.
+     Trivial bug reported by Charles Lamb and fixed by Charles Lamb (distcp)<br>
+     <b>Fix minor typo in distcp -p usage text</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-6104">MAPREDUCE-6104</a>.
+     Major bug reported by Mit Desai and fixed by Mit Desai <br>
+     <b>TestJobHistoryParsing.testPartialJob fails in branch-2</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-6095">MAPREDUCE-6095</a>.
+     Major bug reported by Gera Shegalov and fixed by Gera Shegalov (applicationmaster , distributed-cache)<br>
+     <b>Enable DistributedCache for uber-mode Jobs</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-6094">MAPREDUCE-6094</a>.
+     Minor bug reported by Sangjin Lee and fixed by Akira AJISAKA (test)<br>
+     <b>TestMRCJCFileInputFormat.testAddInputPath() fails on trunk</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-6093">MAPREDUCE-6093</a>.
+     Trivial bug reported by Charles Lamb and fixed by Charles Lamb (distcp , documentation)<br>
+     <b>minor distcp doc edits</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-6091">MAPREDUCE-6091</a>.
+     Major bug reported by Sangjin Lee and fixed by Sangjin Lee (client)<br>
+     <b>YARNRunner.getJobStatus() fails with ApplicationNotFoundException if the job rolled off the RM view</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-6090">MAPREDUCE-6090</a>.
+     Major bug reported by Robert Kanter and fixed by Robert Kanter (client)<br>
+     <b>mapred hsadmin getGroups fails to connect in some cases</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-6086">MAPREDUCE-6086</a>.
+     Major improvement reported by zhihai xu and fixed by zhihai xu (security)<br>
+     <b>mapreduce.job.credentials.binary should allow all URIs</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-6075">MAPREDUCE-6075</a>.
+     Major bug reported by Jason Lowe and fixed by Jason Lowe (jobhistoryserver)<br>
+     <b>HistoryServerFileSystemStateStore can create zero-length files</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-6073">MAPREDUCE-6073</a>.
+     Trivial bug reported by Tsuyoshi OZAWA and fixed by Tsuyoshi OZAWA (documentation)<br>
+     <b>Description of mapreduce.job.speculative.slowtaskthreshold in mapred-default should be moved into description tags</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-6072">MAPREDUCE-6072</a>.
+     Minor improvement reported by Akira AJISAKA and fixed by Akira AJISAKA (documentation)<br>
+     <b>Remove INSTALL document</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-6071">MAPREDUCE-6071</a>.
+     Trivial improvement reported by Tsuyoshi OZAWA and fixed by Tsuyoshi OZAWA (client)<br>
+     <b>JobImpl#makeUberDecision doesn't log that Uber mode is disabled because of too much CPUs</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-6070">MAPREDUCE-6070</a>.
+     Trivial improvement reported by Tsuyoshi OZAWA and fixed by Tsuyoshi OZAWA (documentation)<br>
+     <b>yarn.app.am.resource.mb/cpu-vcores affects uber mode but is not documented</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-6063">MAPREDUCE-6063</a>.
+     Major bug reported by zhihai xu and fixed by zhihai xu (mrv1 , mrv2)<br>
+     <b>In sortAndSpill of MapTask.java, size is calculated wrongly when bufend &lt; bufstart.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-6052">MAPREDUCE-6052</a>.
+     Major bug reported by Junping Du and fixed by Junping Du <br>
+     <b>Support overriding log4j.properties per job</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-6051">MAPREDUCE-6051</a>.
+     Trivial bug reported by Ray Chiang and fixed by Ray Chiang <br>
+     <b>Fix typos in log messages</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-6048">MAPREDUCE-6048</a>.
+     Minor test reported by Ted Yu and fixed by Varun Vasudev <br>
+     <b>TestJavaSerialization fails in trunk build</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-6044">MAPREDUCE-6044</a>.
+     Major bug reported by Zhijie Shen and fixed by Zhijie Shen (jobhistoryserver)<br>
+     <b>Fully qualified intermediate done directory will break per-user dir creation on Windows</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-6036">MAPREDUCE-6036</a>.
+     Major bug reported by Mit Desai and fixed by chang li <br>
+     <b>TestJobEndNotifier fails intermittently in branch-2</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-6032">MAPREDUCE-6032</a>.
+     Major bug reported by Benjamin Zhitomirsky and fixed by Benjamin Zhitomirsky (jobhistoryserver)<br>
+     <b>Unable to check mapreduce job status if submitted using a non-default namenode</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-6029">MAPREDUCE-6029</a>.
+     Major bug reported by Ted Yu and fixed by Mit Desai <br>
+     <b>TestCommitterEventHandler fails in trunk</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-6024">MAPREDUCE-6024</a>.
+     Critical improvement reported by zhaoyunjiong and fixed by zhaoyunjiong (mr-am , task)<br>
+     <b>java.net.SocketTimeoutException in Fetcher caused jobs stuck for more than 1 hour</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-6022">MAPREDUCE-6022</a>.
+     Major bug reported by Jason Lowe and fixed by Jason Lowe <br>
+     <b>map_input_file is missing from streaming job environment</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-6021">MAPREDUCE-6021</a>.
+     Major bug reported by Jason Lowe and fixed by Jason Lowe (mr-am)<br>
+     <b>MR AM should have working directory in LD_LIBRARY_PATH</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-6019">MAPREDUCE-6019</a>.
+     Major bug reported by Xuan Gong and fixed by Craig Welch <br>
+     <b>MapReduce changes for exposing YARN/MR endpoints on multiple interfaces.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-6018">MAPREDUCE-6018</a>.
+     Major sub-task reported by Jonathan Eagles and fixed by Robert Kanter <br>
+     <b>Create a framework specific config to enable timeline server</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-6014">MAPREDUCE-6014</a>.
+     Major bug reported by Mit Desai and fixed by Mit Desai <br>
+     <b>New task status field in task attempts table can lead to an empty web page </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-6012">MAPREDUCE-6012</a>.
+     Major bug reported by Julien Serdaru and fixed by Wei Yan <br>
+     <b>DBInputSplit creates invalid ranges on Oracle</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-6010">MAPREDUCE-6010</a>.
+     Major bug reported by Jason Lowe and fixed by Jason Lowe (jobhistoryserver)<br>
+     <b>HistoryServerFileSystemStateStore fails to update tokens</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5999">MAPREDUCE-5999</a>.
+     Minor bug reported by Akira AJISAKA and fixed by Akira AJISAKA (documentation)<br>
+     <b>Fix dead link in InputFormat javadoc</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5998">MAPREDUCE-5998</a>.
+     Minor bug reported by Akira AJISAKA and fixed by Akira AJISAKA (documentation)<br>
+     <b>CompositeInputFormat javadoc is broken</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5974">MAPREDUCE-5974</a>.
+     Major sub-task reported by Todd Lipcon and fixed by Todd Lipcon (task)<br>
+     <b>Allow specifying multiple MapOutputCollectors with fallback</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5971">MAPREDUCE-5971</a>.
+     Trivial improvement reported by Charles Lamb and fixed by Charles Lamb (distcp)<br>
+     <b>Move the default options for distcp -p to DistCpOptionSwitch</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5970">MAPREDUCE-5970</a>.
+     Minor improvement reported by Gera Shegalov and fixed by Gera Shegalov (applicationmaster , client)<br>
+     <b>Provide a boolean switch to enable MR-AM profiling</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5963">MAPREDUCE-5963</a>.
+     Major sub-task reported by Junping Du and fixed by Junping Du <br>
+     <b>ShuffleHandler DB schema should be versioned with compatible/incompatible changes</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5960">MAPREDUCE-5960</a>.
+     Major bug reported by Gera Shegalov and fixed by Gera Shegalov (client)<br>
+     <b>JobSubmitter's check whether job.jar is local is incorrect with no authority in job jar path.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5958">MAPREDUCE-5958</a>.
+     Minor bug reported by Emilio Coppa and fixed by Emilio Coppa <br>
+     <b>Wrong reduce task progress if map output is compressed</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5957">MAPREDUCE-5957</a>.
+     Major bug reported by Sangjin Lee and fixed by Sangjin Lee <br>
+     <b>AM throws ClassNotFoundException with job classloader enabled if custom output format/committer is used</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5956">MAPREDUCE-5956</a>.
+     Blocker sub-task reported by Vinod Kumar Vavilapalli and fixed by Wangda Tan (applicationmaster , mrv2)<br>
+     <b>MapReduce AM should not use maxAttempts to determine if this is the last retry</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5950">MAPREDUCE-5950</a>.
+     Major bug reported by Yongjun Zhang and fixed by Akira AJISAKA (documentation)<br>
+     <b>incorrect description in distcp2 document</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5945">MAPREDUCE-5945</a>.
+     Minor sub-task reported by Akira AJISAKA and fixed by Akira AJISAKA (documentation)<br>
+     <b>Update the description of GenericOptionsParser -jt option</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5943">MAPREDUCE-5943</a>.
+     Minor improvement reported by Akira AJISAKA and fixed by Akira AJISAKA (documentation)<br>
+     <b>Separate mapred commands from CommandsManual.apt.vm</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5942">MAPREDUCE-5942</a>.
+     Minor sub-task reported by Akira AJISAKA and fixed by Akira AJISAKA (documentation)<br>
+     <b>Remove MRv1 commands from CommandsManual.apt.vm</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5933">MAPREDUCE-5933</a>.
+     Major sub-task reported by Zhijie Shen and fixed by Robert Kanter (mr-am)<br>
+     <b>Enable MR AM to post history events to the timeline server</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5931">MAPREDUCE-5931</a>.
+     Minor bug reported by Gera Shegalov and fixed by Gera Shegalov (test)<br>
+     <b>Validate SleepJob command line parameters</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5910">MAPREDUCE-5910</a>.
+     Major task reported by Rohith and fixed by Rohith (applicationmaster)<br>
+     <b>MRAppMaster should handle Resync from RM instead of shutting down.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5906">MAPREDUCE-5906</a>.
+     Minor improvement reported by Akira AJISAKA and fixed by Akira AJISAKA <br>
+     <b>Inconsistent configuration in property "mapreduce.reduce.shuffle.input.buffer.percent"</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5891">MAPREDUCE-5891</a>.
+     Major sub-task reported by Jason Lowe and fixed by Junping Du <br>
+     <b>Improved shuffle error handling across NM restarts</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5885">MAPREDUCE-5885</a>.
+     Major bug reported by Jason Lowe and fixed by Chen He (test)<br>
+     <b>build/test/test.mapred.spill causes release audit warnings</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5878">MAPREDUCE-5878</a>.
+     Major bug reported by Sangjin Lee and fixed by Sangjin Lee (mrv2)<br>
+     <b>some standard JDK APIs are not part of system classes defaults</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5873">MAPREDUCE-5873</a>.
+     Major bug reported by Siqi Li and fixed by Siqi Li <br>
+     <b>Shuffle bandwidth computation includes time spent waiting for maps</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5866">MAPREDUCE-5866</a>.
+     Major test reported by Varun Vasudev and fixed by Varun Vasudev (client , test)<br>
+     <b>TestFixedLengthInputFormat fails in windows</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5831">MAPREDUCE-5831</a>.
+     Blocker bug reported by Zhijie Shen and fixed by Junping Du (client , mr-am)<br>
+     <b>Old MR client is not compatible with new MR application</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5796">MAPREDUCE-5796</a>.
+     Minor bug reported by Akira AJISAKA and fixed by Akira AJISAKA (documentation)<br>
+     <b>Use current version of the archive name in DistributedCacheDeploy document</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5756">MAPREDUCE-5756</a>.
+     Major bug reported by Jason Dere and fixed by Jason Dere <br>
+     <b>CombineFileInputFormat.getSplits() including directories in its results</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5597">MAPREDUCE-5597</a>.
+     Minor bug reported by Christopher Tubbs and fixed by Akira AJISAKA (client , documentation , job submission)<br>
+     <b>Missing alternatives in javadocs for deprecated constructors in mapreduce.Job</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5595">MAPREDUCE-5595</a>.
+     Trivial bug reported by Efe Gencer and fixed by Akira AJISAKA <br>
+     <b>Typo in MergeManagerImpl.java</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5542">MAPREDUCE-5542</a>.
+     Major bug reported by Jason Lowe and fixed by Rohith (client , mrv2)<br>
+     <b>Killing a job just as it finishes can generate an NPE in client</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5363">MAPREDUCE-5363</a>.
+     Minor bug reported by Sandy Ryza and fixed by Akira AJISAKA (mrv1 , mrv2)<br>
+     <b>Fix doc and spelling for TaskCompletionEvent#getTaskStatus and getStatus</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5279">MAPREDUCE-5279</a>.
+     Critical bug reported by Peng Zhang and fixed by Peng Zhang (mrv2 , scheduler)<br>
+     <b>Jobs can deadlock if headroom is limited by cpu instead of memory</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5130">MAPREDUCE-5130</a>.
+     Major improvement reported by Sandy Ryza and fixed by Ray Chiang (documentation)<br>
+     <b>Add missing job config options to mapred-default.xml</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-4791">MAPREDUCE-4791</a>.
+     Minor improvement reported by Matt Lavin and fixed by Akira AJISAKA (documentation)<br>
+     <b>Javadoc for KeyValueTextInputFormat should include default separator and how to change it</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-883">MAPREDUCE-883</a>.
+     Minor improvement reported by Koji Noguchi and fixed by Akira AJISAKA (documentation , harchive)<br>
+     <b>harchive: Document how to unarchive</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7391">HDFS-7391</a>.
+     Blocker bug reported by Robert Kanter and fixed by Robert Kanter (webhdfs)<br>
+     <b>Renable SSLv2Hello in HttpFS</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7387">HDFS-7387</a>.
+     Critical bug reported by Brandon Li and fixed by Brandon Li (nfs)<br>
+     <b>NFS may only do partial commit due to a race between COMMIT and write</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7385">HDFS-7385</a>.
+     Blocker bug reported by jiangyu and fixed by jiangyu (namenode)<br>
+     <b>ThreadLocal used in FSEditLog class causes FSImage permission mess up</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7383">HDFS-7383</a>.
+     Major bug reported by Tsz Wo Nicholas Sze and fixed by Tsz Wo Nicholas Sze (datanode)<br>
+     <b>DataNode.requestShortCircuitFdsForRead may throw NullPointerException</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7382">HDFS-7382</a>.
+     Minor bug reported by Chris Nauroth and fixed by Chris Nauroth (datanode , security)<br>
+     <b>DataNode in secure mode may throw NullPointerException if client connects before DataNode registers itself with NameNode.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7379">HDFS-7379</a>.
+     Minor bug reported by Xiaoyu Yao and fixed by Xiaoyu Yao (test)<br>
+     <b>TestBalancer#testBalancerWithRamDisk creates test files incorrectly</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7367">HDFS-7367</a>.
+     Major bug reported by Chris Nauroth and fixed by Chris Nauroth (hdfs-client)<br>
+     <b>HDFS short-circuit read cannot negotiate shared memory slot and file descriptors when SASL is enabled on DataTransferProtocol.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7364">HDFS-7364</a>.
+     Minor bug reported by Tsz Wo Nicholas Sze and fixed by Tsz Wo Nicholas Sze (balancer &amp; mover)<br>
+     <b>Balancer always shows zero Bytes Already Moved</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7359">HDFS-7359</a>.
+     Major bug reported by Chris Nauroth and fixed by Chris Nauroth (journal-node , namenode)<br>
+     <b>NameNode in secured HA cluster fails to start if dfs.namenode.secondary.http-address cannot be interpreted as a network address.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7355">HDFS-7355</a>.
+     Trivial test reported by Chris Nauroth and fixed by Chris Nauroth (test)<br>
+     <b>TestDataNodeVolumeFailure#testUnderReplicationAfterVolFailure fails on Windows, because we cannot deny access to the file owner.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7340">HDFS-7340</a>.
+     Major bug reported by Arpit Gupta and fixed by Jing Zhao (ha)<br>
+     <b>make rollingUpgrade start/finalize idempotent</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7334">HDFS-7334</a>.
+     Minor bug reported by Charles Lamb and fixed by Charles Lamb (test)<br>
+     <b>Fix periodic failures of TestCheckpoint#testTooManyEditReplayFailures</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7328">HDFS-7328</a>.
+     Trivial bug reported by Chris Nauroth and fixed by Chris Nauroth (test)<br>
+     <b>TestTraceAdmin assumes Unix line endings.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7313">HDFS-7313</a>.
+     Major improvement reported by Chris Nauroth and fixed by Chris Nauroth (datanode , hdfs-client , security)<br>
+     <b>Support optional configuration of AES cipher suite on DataTransferProtocol.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7309">HDFS-7309</a>.
+     Minor bug reported by Ravi Prakash and fixed by Colin Patrick McCabe <br>
+     <b>XMLUtils.mangleXmlString doesn't seem to handle less than sign</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7305">HDFS-7305</a>.
+     Minor bug reported by Arpit Gupta and fixed by Jing Zhao (webhdfs)<br>
+     <b>NPE seen in wbhdfs FS while running SLive</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7300">HDFS-7300</a>.
+     Critical bug reported by Kihwal Lee and fixed by Kihwal Lee <br>
+     <b>The getMaxNodesPerRack() method in BlockPlacementPolicyDefault is flawed</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7296">HDFS-7296</a>.
+     Minor bug reported by Jing Zhao and fixed by Jing Zhao <br>
+     <b>HdfsConstants#MEMORY_STORAGE_POLICY_ID and HdfsConstants#MEMORY_STORAGE_POLICY_ID are missing in branch-2</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7291">HDFS-7291</a>.
+     Major sub-task reported by Xiaoyu Yao and fixed by Xiaoyu Yao (datanode)<br>
+     <b>Persist in-memory replicas with appropriate unbuffered copy API on POSIX and Windows</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7287">HDFS-7287</a>.
+     Major bug reported by Ravi Prakash and fixed by Ravi Prakash <br>
+     <b>The OfflineImageViewer (OIV) can output invalid XML depending on the filename</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7276">HDFS-7276</a>.
+     Major improvement reported by Tsz Wo Nicholas Sze and fixed by Tsz Wo Nicholas Sze (hdfs-client)<br>
+     <b>Limit the number of byte arrays used by DFSOutputStream</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7274">HDFS-7274</a>.
+     Blocker bug reported by Robert Kanter and fixed by Robert Kanter (webhdfs)<br>
+     <b>Disable SSLv3 in HttpFS</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7260">HDFS-7260</a>.
+     Minor improvement reported by Tsz Wo Nicholas Sze and fixed by Tsz Wo Nicholas Sze (hdfs-client)<br>
+     <b>Make DFSOutputStream.MAX_PACKETS configurable</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7259">HDFS-7259</a>.
+     Major bug reported by Brandon Li and fixed by Brandon Li (nfs)<br>
+     <b>Unresponseive NFS mount point due to deferred COMMIT response</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7243">HDFS-7243</a>.
+     Major bug reported by Yi Liu and fixed by Charles Lamb (encryption , namenode)<br>
+     <b>HDFS concat operation should not be allowed in Encryption Zone</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7237">HDFS-7237</a>.
+     Minor bug reported by Tsz Wo Nicholas Sze and fixed by Tsz Wo Nicholas Sze (namenode)<br>
+     <b>namenode -rollingUpgrade throws ArrayIndexOutOfBoundsException</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7236">HDFS-7236</a>.
+     Major bug reported by Yongjun Zhang and fixed by Yongjun Zhang <br>
+     <b>Fix TestOpenFilesWithSnapshot#testOpenFilesWithMultipleSnapshots</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7233">HDFS-7233</a>.
+     Major improvement reported by Rushabh S Shah and fixed by Rushabh S Shah (namenode)<br>
+     <b>NN logs unnecessary org.apache.hadoop.hdfs.protocol.UnresolvedPathException</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7230">HDFS-7230</a>.
+     Major improvement reported by Tsz Wo Nicholas Sze and fixed by Tsz Wo Nicholas Sze (documentation)<br>
+     <b>Add rolling downgrade documentation</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7228">HDFS-7228</a>.
+     Major improvement reported by Jing Zhao and fixed by Jing Zhao <br>
+     <b>Add an SSD policy into the default BlockStoragePolicySuite</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7226">HDFS-7226</a>.
+     Major bug reported by Yongjun Zhang and fixed by Yongjun Zhang (ha)<br>
+     <b>TestDNFencing.testQueueingWithAppend failed often in latest test</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7221">HDFS-7221</a>.
+     Minor bug reported by Charles Lamb and fixed by Charles Lamb (test)<br>
+     <b>TestDNFencingWithReplication fails consistently</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7218">HDFS-7218</a>.
+     Minor bug reported by Charles Lamb and fixed by Charles Lamb (namenode)<br>
+     <b>FSNamesystem ACL operations should write to audit log on failure</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7217">HDFS-7217</a>.
+     Major improvement reported by Kihwal Lee and fixed by Kihwal Lee <br>
+     <b>Better batching of IBRs</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7215">HDFS-7215</a>.
+     Minor improvement reported by Brandon Li and fixed by Brandon Li (nfs)<br>
+     <b>Add JvmPauseMonitor to NFS gateway</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7208">HDFS-7208</a>.
+     Major bug reported by Ming Ma and fixed by Ming Ma (namenode)<br>
+     <b>NN doesn't schedule replication when a DN storage fails</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7199">HDFS-7199</a>.
+     Critical bug reported by Jason Lowe and fixed by Rushabh S Shah (hdfs-client)<br>
+     <b>DFSOutputStream should not silently drop data if DataStreamer crashes with an unchecked exception</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7195">HDFS-7195</a>.
+     Major improvement reported by Yi Liu and fixed by Chris Nauroth (documentation , security)<br>
+     <b>Update user doc of secure mode about Datanodes don't require root or jsvc</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7185">HDFS-7185</a>.
+     Major bug reported by Colin Patrick McCabe and fixed by Jing Zhao (namenode)<br>
+     <b>The active NameNode will not accept an fsimage sent from the standby during rolling upgrade</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7181">HDFS-7181</a>.
+     Critical sub-task reported by Andrew Wang and fixed by Andrew Wang (encryption)<br>
+     <b>Remove incorrect precondition check on key length in FileEncryptionInfo</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7180">HDFS-7180</a>.
+     Critical bug reported by Eric Zhiqiang Ma and fixed by Brandon Li (nfs)<br>
+     <b>NFSv3 gateway frequently gets stuck due to GC</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7179">HDFS-7179</a>.
+     Critical sub-task reported by Andrew Wang and fixed by Andrew Wang (encryption)<br>
+     <b>DFSClient should instantiate a KeyProvider, not a KeyProviderCryptoExtension</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7178">HDFS-7178</a>.
+     Major bug reported by Arpit Agarwal and fixed by Arpit Agarwal (test)<br>
+     <b>Additional unit test for replica write with full disk</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7176">HDFS-7176</a>.
+     Minor bug reported by Colin Patrick McCabe and fixed by Colin Patrick McCabe (namenode)<br>
+     <b>The namenode usage message doesn't include "-rollingupgrade started"</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7172">HDFS-7172</a>.
+     Trivial bug reported by Chris Nauroth and fixed by Chris Nauroth (test)<br>
+     <b>Test data files may be checked out of git with incorrect line endings, causing test failures in TestHDFSCLI.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7171">HDFS-7171</a>.
+     Major sub-task reported by Arpit Agarwal and fixed by Arpit Agarwal (datanode)<br>
+     <b>Fix Jenkins failures in HDFS-6581 branch</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7169">HDFS-7169</a>.
+     Minor bug reported by Tsz Wo Nicholas Sze and fixed by Tsz Wo Nicholas Sze (build)<br>
+     <b>Fix a findbugs warning in ReplaceDatanodeOnFailure</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7167">HDFS-7167</a>.
+     Major bug reported by Prabushankar Chinnasamy and fixed by Jing Zhao (balancer &amp; mover)<br>
+     <b>NPE while running Mover if the given path is for a file</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7162">HDFS-7162</a>.
+     Major bug reported by Chengbing Liu and fixed by Chengbing Liu (fuse-dfs)<br>
+     <b>Wrong path when deleting through fuse-dfs a file which already exists in trash</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7159">HDFS-7159</a>.
+     Major sub-task reported by Arpit Agarwal and fixed by Arpit Agarwal (namenode)<br>
+     <b>Use block storage policy to set lazy persist preference</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7158">HDFS-7158</a>.
+     Major improvement reported by Haohui Mai and fixed by Haohui Mai <br>
+     <b>Reduce the memory usage of WebImageViewer</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7157">HDFS-7157</a>.
+     Major sub-task reported by Lei (Eddy) Xu and fixed by Lei (Eddy) Xu (datanode)<br>
+     <b>Using Time.now() for recording start/end time of reconfiguration tasks</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7156">HDFS-7156</a>.
+     Major bug reported by Konstantin Shvachko and fixed by Masahiro Yamaguchi (documentation)<br>
+     <b>Fsck documentation is outdated.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7155">HDFS-7155</a>.
+     Major sub-task reported by Arpit Agarwal and fixed by Arpit Agarwal (datanode)<br>
+     <b>Bugfix in createLocatedFileStatus caused by bad merge</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7154">HDFS-7154</a>.
+     Major sub-task reported by Lei (Eddy) Xu and fixed by Lei (Eddy) Xu (datanode)<br>
+     <b>Fix returning value of starting reconfiguration task</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7153">HDFS-7153</a>.
+     Major improvement reported by Arpit Agarwal and fixed by Arpit Agarwal (namenode)<br>
+     <b>Add storagePolicy to NN edit log during file creation</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7148">HDFS-7148</a>.
+     Major bug reported by Andrew Wang and fixed by Andrew Wang (encryption)<br>
+     <b>TestEncryptionZones#testIsEncryptedMethod fails on branch-2 after archival storage merge</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7147">HDFS-7147</a>.
+     Blocker bug reported by Tsz Wo Nicholas Sze and fixed by Tsz Wo Nicholas Sze (documentation)<br>
+     <b>Update archival storage user documentation</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7144">HDFS-7144</a>.
+     Minor sub-task reported by Tsz Wo Nicholas Sze and fixed by Tsz Wo Nicholas Sze (datanode)<br>
+     <b>Fix findbugs warnings in RamDiskReplicaTracker</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7143">HDFS-7143</a>.
+     Major sub-task reported by Tsz Wo Nicholas Sze and fixed by Tsz Wo Nicholas Sze (datanode)<br>
+     <b>Fix findbugs warnings in HDFS-6581 branch</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7140">HDFS-7140</a>.
+     Minor sub-task reported by Jing Zhao and fixed by Jing Zhao (hdfs-client , namenode)<br>
+     <b>Add a tool to list all the existing block storage policies</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7139">HDFS-7139</a>.
+     Minor sub-task reported by Zhe Zhang and fixed by Zhe Zhang <br>
+     <b>Unit test for creating encryption zone on root path</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7138">HDFS-7138</a>.
+     Major sub-task reported by Charles Lamb and fixed by Charles Lamb (namenode)<br>
+     <b>Fix hftp to work with encryption</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7132">HDFS-7132</a>.
+     Minor bug reported by Charles Lamb and fixed by Charles Lamb (namenode)<br>
+     <b>hdfs namenode -metadataVersion command does not honor configured name dirs</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7131">HDFS-7131</a>.
+     Major bug reported by Jing Zhao and fixed by Jing Zhao (qjm)<br>
+     <b>During HA upgrade, JournalNode should create a new committedTxnId file in the current directory</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7130">HDFS-7130</a>.
+     Major bug reported by Chris Nauroth and fixed by Chris Nauroth (test)<br>
+     <b>TestDataTransferKeepalive fails intermittently on Windows.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7129">HDFS-7129</a>.
+     Major sub-task reported by Arpit Agarwal and fixed by Xiaoyu Yao (datanode)<br>
+     <b>Metrics to track usage of memory for writes</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7128">HDFS-7128</a>.
+     Major improvement reported by Ming Ma and fixed by Ming Ma (namenode)<br>
+     <b>Decommission slows way down when it gets towards the end</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7127">HDFS-7127</a>.
+     Major bug reported by Chris Nauroth and fixed by Chris Nauroth (test)<br>
+     <b>TestLeaseRecovery leaks MiniDFSCluster instances.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7126">HDFS-7126</a>.
+     Minor test reported by Xiaoyu Yao and fixed by Xiaoyu Yao (security , test)<br>
+     <b>TestEncryptionZonesWithHA assumes Unix path separator for KMS key store path</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7124">HDFS-7124</a>.
+     Minor sub-task reported by Charles Lamb and fixed by Charles Lamb (namenode)<br>
+     <b>Remove EncryptionZoneManager.NULL_EZ</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7122">HDFS-7122</a>.
+     Blocker bug reported by Jeff Buell and fixed by Andrew Wang (namenode)<br>
+     <b>Use of ThreadLocal&lt;Random&gt; results in poor block placement</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7119">HDFS-7119</a>.
+     Minor sub-task reported by Chris Nauroth and fixed by Chris Nauroth (journal-node)<br>
+     <b>Split error checks in AtomicFileOutputStream#close into separate conditions to improve diagnostics.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7118">HDFS-7118</a>.
+     Major sub-task reported by Chris Nauroth and fixed by Chris Nauroth (journal-node , namenode)<br>
+     <b>Improve diagnostics on storage directory rename operations by using NativeIO#renameTo in Storage#rename.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7115">HDFS-7115</a>.
+     Major test reported by Xiaoyu Yao and fixed by Xiaoyu Yao (encryption)<br>
+     <b>TestEncryptionZones assumes Unix path separator for KMS key store path</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7112">HDFS-7112</a>.
+     Major sub-task reported by Arpit Agarwal and fixed by Xiaoyu Yao (datanode)<br>
+     <b>LazyWriter should use either async IO or one thread per physical disk</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7111">HDFS-7111</a>.
+     Trivial bug reported by Chris Nauroth and fixed by Chris Nauroth (test)<br>
+     <b>TestSafeMode assumes Unix line endings in safe mode tip.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7110">HDFS-7110</a>.
+     Minor bug reported by Chris Nauroth and fixed by Chris Nauroth (test)<br>
+     <b>Skip tests related to short-circuit read on platforms that do not currently implement short-circuit read.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7109">HDFS-7109</a>.
+     Minor bug reported by Chris Nauroth and fixed by Chris Nauroth (test)<br>
+     <b>TestDataStorage does not release file locks between tests.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7108">HDFS-7108</a>.
+     Major sub-task reported by Arpit Agarwal and fixed by Arpit Agarwal (test)<br>
+     <b>Fix unit test failures in SimulatedFsDataset</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7107">HDFS-7107</a>.
+     Trivial bug reported by Chris Nauroth and fixed by Chris Nauroth (ha)<br>
+     <b>Avoid Findbugs warning for synchronization on AbstractNNFailoverProxyProvider#fallbackToSimpleAuth.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7106">HDFS-7106</a>.
+     Major bug reported by Chris Nauroth and fixed by Chris Nauroth (datanode)<br>
+     <b>Reconfiguring DataNode volumes does not release the lock files in removed volumes.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7105">HDFS-7105</a>.
+     Minor bug reported by Ray Chiang and fixed by Ray Chiang (test)<br>
+     <b>Fix TestJournalNode#testFailToStartWithBadConfig to match log output change</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7104">HDFS-7104</a>.
+     Minor bug reported by Zhe Zhang and fixed by Zhe Zhang <br>
+     <b>Fix and clarify INodeInPath getter functions</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7100">HDFS-7100</a>.
+     Major sub-task reported by Arpit Agarwal and fixed by Arpit Agarwal (datanode)<br>
+     <b>Make eviction scheme pluggable</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7096">HDFS-7096</a>.
+     Minor bug reported by Charles Lamb and fixed by Charles Lamb (test)<br>
+     <b>Fix TestRpcProgramNfs3 to use DFS_ENCRYPTION_KEY_PROVIDER_URI</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7095">HDFS-7095</a>.
+     Minor sub-task reported by Tsz Wo Nicholas Sze and fixed by Jing Zhao (test)<br>
+     <b>TestStorageMover often fails in Jenkins</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7093">HDFS-7093</a>.
+     Major improvement reported by Arpit Agarwal and fixed by Arpit Agarwal (namenode)<br>
+     <b>Add config key to restrict setStoragePolicy</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7091">HDFS-7091</a>.
+     Minor sub-task reported by Arpit Agarwal and fixed by Arpit Agarwal (namenode , test)<br>
+     <b>Add forwarding constructor for INodeFile for existing callers</b><br>
+     <blockquote>Thanks Nicholas! Revised title and committed to the feature branch.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7090">HDFS-7090</a>.
+     Major sub-task reported by Arpit Agarwal and fixed by Xiaoyu Yao (datanode)<br>
+     <b>Use unbuffered writes when persisting in-memory replicas</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7084">HDFS-7084</a>.
+     Minor sub-task reported by Xiaoyu Yao and fixed by Xiaoyu Yao (datanode)<br>
+     <b>FsDatasetImpl#copyBlockFiles debug log can be improved</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7081">HDFS-7081</a>.
+     Major sub-task reported by Jing Zhao and fixed by Jing Zhao (balancer &amp; mover , namenode)<br>
+     <b>Add new DistributedFileSystem API for getting all the existing storage policies</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7080">HDFS-7080</a>.
+     Major sub-task reported by Arpit Agarwal and fixed by Arpit Agarwal (test)<br>
+     <b>Fix finalize and upgrade unit test failures</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7079">HDFS-7079</a>.
+     Major sub-task reported by Arpit Agarwal and fixed by Arpit Agarwal (test)<br>
+     <b>Few more unit test fixes for HDFS-6581</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7078">HDFS-7078</a>.
+     Major sub-task reported by Andrew Wang and fixed by Andrew Wang (encryption)<br>
+     <b>Fix listEZs to work correctly with snapshots</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7077">HDFS-7077</a>.
+     Major sub-task reported by Andrew Wang and fixed by Andrew Wang (encryption)<br>
+     <b>Separate CipherSuite from crypto protocol version</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7075">HDFS-7075</a>.
+     Major bug reported by Colin Patrick McCabe and fixed by Colin Patrick McCabe <br>
+     <b>hadoop-fuse-dfs fails because it cannot find JavaKeyStoreProvider$Factory</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7073">HDFS-7073</a>.
+     Major bug reported by Chris Nauroth and fixed by Chris Nauroth (datanode , hdfs-client , security)<br>
+     <b>Allow falling back to a non-SASL connection on DataTransferProtocol in several edge cases.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7066">HDFS-7066</a>.
+     Minor sub-task reported by Xiaoyu Yao and fixed by Xiaoyu Yao (datanode)<br>
+     <b>LazyWriter#evictBlocks misses a null check for replicaState</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7065">HDFS-7065</a>.
+     Critical bug reported by Kihwal Lee and fixed by Kihwal Lee (datanode)<br>
+     <b>Pipeline close recovery race can cause block corruption</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7064">HDFS-7064</a>.
+     Major sub-task reported by Arpit Agarwal and fixed by Xiaoyu Yao (test)<br>
+     <b>Fix unit test failures in HDFS-6581 branch</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7061">HDFS-7061</a>.
+     Minor sub-task reported by Stephen Chu and fixed by Stephen Chu (encryption , test)<br>
+     <b>Add test to verify encryption zone creation after NameNode restart without saving namespace</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7059">HDFS-7059</a>.
+     Minor improvement reported by Rushabh S Shah and fixed by Rushabh S Shah <br>
+     <b>HAadmin transtionToActive with forceActive option can show confusing message.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7051">HDFS-7051</a>.
+     Minor test reported by Chris Nauroth and fixed by Chris Nauroth (datanode , test)<br>
+     <b>TestDataNodeRollingUpgrade#isBlockFileInPrevious assumes Unix file path separator.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7049">HDFS-7049</a>.
+     Minor bug reported by Juan Yu and fixed by Juan Yu (test)<br>
+     <b>TestByteRangeInputStream.testPropagatedClose fails and throw NPE on branch-2</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7047">HDFS-7047</a>.
+     Major sub-task reported by Andrew Wang and fixed by Colin Patrick McCabe (encryption)<br>
+     <b>Expose FileStatus#isEncrypted in libhdfs</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7046">HDFS-7046</a>.
+     Critical bug reported by Daryn Sharp and fixed by Kihwal Lee (namenode)<br>
+     <b>HA NN can NPE upon transition to active</b><br>
+     <blockquote>Thanks for the reviews, gentlemen. It's been committed to trunk and branch-2.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7045">HDFS-7045</a>.
+     Critical bug reported by Yi Liu and fixed by Yi Liu (namenode)<br>
+     <b>Fix NameNode deadlock when opening file under /.reserved path</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7042">HDFS-7042</a>.
+     Blocker bug reported by Chris Nauroth and fixed by Chris Nauroth (journal-node)<br>
+     <b>Upgrade fails for Windows HA cluster due to file locks held during rename in JournalNode.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7032">HDFS-7032</a>.
+     Major sub-task reported by Stephen Chu and fixed by Charles Lamb (encryption , webhdfs)<br>
+     <b>Add WebHDFS support for reading and writing to encryption zones</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7025">HDFS-7025</a>.
+     Major test reported by Xiaoyu Yao and fixed by Xiaoyu Yao (encryption)<br>
+     <b>HDFS Credential Provider related  Unit Test Failure</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7006">HDFS-7006</a>.
+     Major test reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (security , test)<br>
+     <b>Test encryption zones with KMS</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7005">HDFS-7005</a>.
+     Critical bug reported by Daryn Sharp and fixed by Daryn Sharp (hdfs-client)<br>
+     <b>DFS input streams do not timeout</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7004">HDFS-7004</a>.
+     Major sub-task reported by Andrew Wang and fixed by Andrew Wang (encryption)<br>
+     <b>Update KeyProvider instantiation to create by URI</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7003">HDFS-7003</a>.
+     Major sub-task reported by Stephen Chu and fixed by Charles Lamb (encryption , nfs)<br>
+     <b>Add NFS Gateway support for reading and writing to encryption zones</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-7001">HDFS-7001</a>.
+     Minor bug reported by Masatake Iwasaki and fixed by Masatake Iwasaki <br>
+     <b>Tests in TestTracing should not depend on the order of execution</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6996">HDFS-6996</a>.
+     Major bug reported by Jing Zhao and fixed by Jing Zhao (snapshots)<br>
+     <b>SnapshotDiff report can hit IndexOutOfBoundsException when there are nested renamed directory/file</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6995">HDFS-6995</a>.
+     Major bug reported by Vinayakumar B and fixed by Vinayakumar B (namenode)<br>
+     <b>Block should be placed in the client's 'rack-local' node if 'client-local' node is not available</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6991">HDFS-6991</a>.
+     Major sub-task reported by Arpit Agarwal and fixed by Arpit Agarwal (datanode , namenode)<br>
+     <b>Notify NN of evicted block before deleting it from RAM disk</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6990">HDFS-6990</a>.
+     Major sub-task reported by Xiaoyu Yao and fixed by Xiaoyu Yao (datanode)<br>
+     <b>Add unit test for evict/delete RAM_DISK block with open handle</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6988">HDFS-6988</a>.
+     Major sub-task reported by Arpit Agarwal and fixed by Xiaoyu Yao (datanode)<br>
+     <b>Improve HDFS-6581 eviction configuration</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6987">HDFS-6987</a>.
+     Major sub-task reported by Andrew Wang and fixed by Zhe Zhang (encryption)<br>
+     <b>Move CipherSuite xattr information up to the encryption zone root</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6986">HDFS-6986</a>.
+     Major sub-task reported by Alejandro Abdelnur and fixed by Zhe Zhang (security)<br>
+     <b>DistributedFileSystem must get delegation tokens from configured KeyProvider</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6981">HDFS-6981</a>.
+     Major bug reported by James Thomas and fixed by Arpit Agarwal (datanode)<br>
+     <b>Fix DN upgrade with layout version change</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6979">HDFS-6979</a>.
+     Minor bug reported by Remus Rusanu and fixed by Chris Nauroth (hdfs-client)<br>
+     <b>hdfs.dll does not produce .pdb files</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6978">HDFS-6978</a>.
+     Major sub-task reported by Arpit Agarwal and fixed by Arpit Agarwal (datanode)<br>
+     <b>Directory scanner should correctly reconcile blocks on RAM disk</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6977">HDFS-6977</a>.
+     Major sub-task reported by Nathan Yao and fixed by Arpit Agarwal (datanode)<br>
+     <b>Delete all copies when a block is deleted from the block space</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6972">HDFS-6972</a>.
+     Major bug reported by Yongjun Zhang and fixed by Yongjun Zhang <br>
+     <b>TestRefreshUserMappings.testRefreshSuperUserGroupsConfiguration doesn't decode url correctly</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6970">HDFS-6970</a>.
+     Major sub-task reported by Andrew Wang and fixed by Andrew Wang (encryption)<br>
+     <b>Move startFile EDEK retries to the DFSClient</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6966">HDFS-6966</a>.
+     Major sub-task reported by Stephen Chu and fixed by Stephen Chu (encryption)<br>
+     <b>Add additional unit tests for encryption zones</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6965">HDFS-6965</a>.
+     Major bug reported by Daryn Sharp and fixed by Rushabh S Shah (namenode)<br>
+     <b>NN continues to issue block locations for DNs with full disks</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6960">HDFS-6960</a>.
+     Major sub-task reported by Arpit Agarwal and fixed by Arpit Agarwal (datanode , test)<br>
+     <b>Bugfix in LazyWriter, fix test case and some refactoring</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6959">HDFS-6959</a>.
+     Minor new feature reported by Kevin Odell and fixed by Yongjun Zhang <br>
+     <b>Make the HDFS home directory location customizable.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6956">HDFS-6956</a>.
+     Major sub-task reported by Colin Patrick McCabe and fixed by Colin Patrick McCabe (datanode , namenode)<br>
+     <b>Allow dynamically changing the tracing level in Hadoop servers</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6954">HDFS-6954</a>.
+     Major bug reported by Allen Wittenauer and fixed by Charles Lamb (encryption)<br>
+     <b>With crypto, no native lib systems are too verbose</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6951">HDFS-6951</a>.
+     Major sub-task reported by Stephen Chu and fixed by Charles Lamb (encryption)<br>
+     <b>Correctly persist raw namespace xattrs to edit log and fsimage</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6950">HDFS-6950</a>.
+     Major sub-task reported by Xiaoyu Yao and fixed by Xiaoyu Yao <br>
+     <b>Add Additional unit tests for HDFS-6581</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6948">HDFS-6948</a>.
+     Major bug reported by Daryn Sharp and fixed by Eric Payne <br>
+     <b>DN rejects blocks if it has older UC block</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6943">HDFS-6943</a>.
+     Minor improvement reported by Ming Ma and fixed by Ming Ma (namenode)<br>
+     <b>Improve NN allocateBlock log to include replicas' datanode IPs</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6942">HDFS-6942</a>.
+     Trivial bug reported by Ray Chiang and fixed by Ray Chiang <br>
+     <b>Fix typos in log messages</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6934">HDFS-6934</a>.
+     Major sub-task reported by Arpit Agarwal and fixed by Chris Nauroth (datanode , hdfs-client)<br>
+     <b>Move checksum computation off the hot path when writing to RAM disk</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6932">HDFS-6932</a>.
+     Major sub-task reported by Arpit Agarwal and fixed by Xiaoyu Yao (datanode)<br>
+     <b>Balancer and Mover tools should ignore replicas on RAM_DISK</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6931">HDFS-6931</a>.
+     Major sub-task reported by Arpit Agarwal and fixed by Arpit Agarwal (datanode)<br>
+     <b>Move lazily persisted replicas to finalized directory on DN startup</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6930">HDFS-6930</a>.
+     Major sub-task reported by Arpit Agarwal and fixed by Arpit Agarwal (datanode)<br>
+     <b>Improve replica eviction from RAM disk</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6929">HDFS-6929</a>.
+     Major sub-task reported by Arpit Agarwal and fixed by Arpit Agarwal (datanode)<br>
+     <b>NN periodically unlinks lazy persist files with missing replicas from namespace</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6928">HDFS-6928</a>.
+     Major sub-task reported by Tassapol Athiapinya and fixed by Arpit Agarwal (datanode)<br>
+     <b>'hdfs put' command should accept lazyPersist flag for testing</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6927">HDFS-6927</a>.
+     Major sub-task reported by Arpit Agarwal and fixed by Arpit Agarwal (datanode)<br>
+     <b>Add unit tests</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6926">HDFS-6926</a>.
+     Major sub-task reported by Arpit Agarwal and fixed by Arpit Agarwal (datanode)<br>
+     <b>DN support for saving replicas to persistent storage and evicting in-memory replicas</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6925">HDFS-6925</a>.
+     Major sub-task reported by Arpit Agarwal and fixed by Arpit Agarwal (datanode)<br>
+     <b>DataNode should attempt to place replicas on transient storage first if lazyPersist flag is received</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6924">HDFS-6924</a>.
+     Major sub-task reported by Arpit Agarwal and fixed by Arpit Agarwal (datanode)<br>
+     <b>Add new RAM_DISK storage type</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6923">HDFS-6923</a>.
+     Major sub-task reported by Arpit Agarwal and fixed by Arpit Agarwal (datanode)<br>
+     <b>Propagate LazyPersist flag to DNs via DataTransferProtocol</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6921">HDFS-6921</a>.
+     Major sub-task reported by Arpit Agarwal and fixed by Arpit Agarwal <br>
+     <b>Add LazyPersist flag to FileStatus</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6912">HDFS-6912</a>.
+     Minor bug reported by Gopal V and fixed by Colin Patrick McCabe (caching)<br>
+     <b>SharedFileDescriptorFactory should not allocate sparse files</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6908">HDFS-6908</a>.
+     Critical bug reported by Juan Yu and fixed by Juan Yu (snapshots)<br>
+     <b>incorrect snapshot directory diff generated by snapshot deletion</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6905">HDFS-6905</a>.
+     Blocker bug reported by Allen Wittenauer and fixed by Charles Lamb <br>
+     <b>fs-encryption merge triggered release audit failures</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6904">HDFS-6904</a>.
+     Critical bug reported by Varun Vasudev and fixed by Jitendra Nath Pandey (webhdfs)<br>
+     <b>YARN unable to renew delegation token fetched via webhdfs due to incorrect service port</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6902">HDFS-6902</a>.
+     Minor bug reported by Ted Yu and fixed by Tsuyoshi OZAWA <br>
+     <b>FileWriter should be closed in finally block in BlockReceiver#receiveBlock()</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6899">HDFS-6899</a>.
+     Major improvement reported by Arpit Agarwal and fixed by Arpit Agarwal (datanode , test)<br>
+     <b>Allow changing MiniDFSCluster volumes per DN and capacity per volume</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6894">HDFS-6894</a>.
+     Major sub-task reported by Brandon Li and fixed by Brandon Li (nfs)<br>
+     <b>Add XDR parser method for each NFS response</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6892">HDFS-6892</a>.
+     Major sub-task reported by Brandon Li and fixed by Brandon Li (nfs)<br>
+     <b>Add XDR packaging method for each NFS request</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6890">HDFS-6890</a>.
+     Major bug reported by Brandon Li and fixed by Brandon Li (nfs)<br>
+     <b>NFS readdirplus doesn't return dotdot attributes</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6886">HDFS-6886</a>.
+     Critical improvement reported by Yi Liu and fixed by Yi Liu (namenode)<br>
+     <b>Use single editlog record for creating file + overwrite.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6880">HDFS-6880</a>.
+     Major sub-task reported by Masatake Iwasaki and fixed by Masatake Iwasaki <br>
+     <b>Adding tracing to DataNode data transfer protocol</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6879">HDFS-6879</a>.
+     Major sub-task reported by Masatake Iwasaki and fixed by Masatake Iwasaki <br>
+     <b>Adding tracing to Hadoop RPC</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6878">HDFS-6878</a>.
+     Minor test reported by Tsz Wo Nicholas Sze and fixed by Arpit Agarwal (test)<br>
+     <b>Change MiniDFSCluster to support StorageType configuration for individual directories</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6870">HDFS-6870</a>.
+     Major bug reported by Yi Liu and fixed by Yi Liu (namenode)<br>
+     <b>Blocks and INodes could leak for Rename with overwrite flag</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6868">HDFS-6868</a>.
+     Major bug reported by Allen Wittenauer and fixed by Brandon Li (documentation , nfs)<br>
+     <b>portmap and nfs3 are documented as hadoop commands instead of hdfs</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6865">HDFS-6865</a>.
+     Major sub-task reported by James Thomas and fixed by James Thomas (hdfs-client , performance)<br>
+     <b>Byte array native checksumming on client side (HDFS changes)</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6862">HDFS-6862</a>.
+     Major bug reported by Arpit Agarwal and fixed by Xiaoyu Yao (test)<br>
+     <b>Add missing timeout annotations to tests</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6858">HDFS-6858</a>.
+     Minor improvement reported by Benoy Antony and fixed by Benoy Antony (security)<br>
+     <b>Allow dfs.data.transfer.saslproperties.resolver.class default to hadoop.security.saslproperties.resolver.class</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6851">HDFS-6851</a>.
+     Major sub-task reported by Charles Lamb and fixed by Charles Lamb (namenode , security)<br>
+     <b>Refactor EncryptionZoneWithId and EncryptionZone</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6850">HDFS-6850</a>.
+     Minor improvement reported by Zhe Zhang and fixed by Zhe Zhang (nfs)<br>
+     <b>Move NFS out of order write unit tests into TestWrites class</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6849">HDFS-6849</a>.
+     Major improvement reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (security)<br>
+     <b>Replace HttpFS custom proxyuser handling with common implementation</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6848">HDFS-6848</a>.
+     Minor bug reported by Ted Yu and fixed by Xiaoyu Yao <br>
+     <b>Lack of synchronization on access to datanodeUuid in DataStorage#format() </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6843">HDFS-6843</a>.
+     Major sub-task reported by Charles Lamb and fixed by Charles Lamb (namenode , security)<br>
+     <b>Create FileStatus isEncrypted() method</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6840">HDFS-6840</a>.
+     Critical bug reported by Jason Lowe and fixed by Andrew Wang <br>
+     <b>Clients are always sent to the same datanode when read is off rack</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6838">HDFS-6838</a>.
+     Minor bug reported by Jing Zhao and fixed by Jing Zhao (namenode)<br>
+     <b>Code cleanup for unnecessary INode replacement</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6837">HDFS-6837</a>.
+     Minor improvement reported by Tsz Wo Nicholas Sze and fixed by Tsz Wo Nicholas Sze (balancer &amp; mover)<br>
+     <b>Code cleanup for Balancer and Dispatcher</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6836">HDFS-6836</a>.
+     Major improvement reported by Gopal V and fixed by Nathan Yao (datanode)<br>
+     <b>HDFS INFO logging is verbose &amp; uses file appenders</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6831">HDFS-6831</a>.
+     Minor bug reported by Akira AJISAKA and fixed by Xiaoyu Yao <br>
+     <b>Inconsistency between 'hdfs dfsadmin' and 'hdfs dfsadmin -help'</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6830">HDFS-6830</a>.
+     Major bug reported by Arpit Agarwal and fixed by Arpit Agarwal (namenode)<br>
+     <b>BlockInfo.addStorage fails when DN changes the storage for a block replica</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6829">HDFS-6829</a>.
+     Minor bug reported by zhaoyunjiong and fixed by zhaoyunjiong (tools)<br>
+     <b>DFSAdmin refreshSuperUserGroupsConfiguration failed in security cluster</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6828">HDFS-6828</a>.
+     Major improvement reported by Tsz Wo Nicholas Sze and fixed by Tsz Wo Nicholas Sze (balancer &amp; mover)<br>
+     <b>Separate block replica dispatching from Balancer</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6825">HDFS-6825</a>.
+     Major bug reported by Yongjun Zhang and fixed by Yongjun Zhang (namenode)<br>
+     <b>Edit log corruption due to delayed block removal</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6823">HDFS-6823</a>.
+     Minor bug reported by Allen Wittenauer and fixed by Allen Wittenauer (namenode)<br>
+     <b>dfs.web.authentication.kerberos.principal shows up in logs for insecure HDFS</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6812">HDFS-6812</a>.
+     Minor improvement reported by Tsz Wo Nicholas Sze and fixed by Tsz Wo Nicholas Sze (namenode)<br>
+     <b>Remove addBlock and replaceBlock from DatanodeDescriptor</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6810">HDFS-6810</a>.
+     Minor bug reported by Ted Yu and fixed by Tsz Wo Nicholas Sze (namenode)<br>
+     <b>StorageReport array is initialized with wrong size in DatanodeDescriptor#getStorageReports</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6809">HDFS-6809</a>.
+     Minor improvement reported by Tsz Wo Nicholas Sze and fixed by Tsz Wo Nicholas Sze (balancer &amp; mover)<br>
+     <b>Move some Balancer's inner classes to standalone classes</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6808">HDFS-6808</a>.
+     Major sub-task reported by Lei (Eddy) Xu and fixed by Lei (Eddy) Xu (datanode)<br>
+     <b>Add command line option to ask DataNode reload configuration.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6802">HDFS-6802</a>.
+     Major bug reported by Akira AJISAKA and fixed by Akira AJISAKA (test)<br>
+     <b>Some tests in TestDFSClientFailover are missing @Test annotation</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6800">HDFS-6800</a>.
+     Major improvement reported by Colin Patrick McCabe and fixed by James Thomas (datanode)<br>
+     <b>Support Datanode layout changes with rolling upgrade</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6799">HDFS-6799</a>.
+     Minor bug reported by Megasthenis Asteris and fixed by Megasthenis Asteris (datanode , test)<br>
+     <b>The invalidate method in SimulatedFSDataset.java failed to remove (invalidate) blocks from the file system.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6798">HDFS-6798</a>.
+     Major improvement reported by Benoy Antony and fixed by Benoy Antony (balancer &amp; mover)<br>
+     <b>Add test case for incorrect data node condition during balancing</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6797">HDFS-6797</a>.
+     Major bug reported by Benoy Antony and fixed by Benoy Antony (datanode)<br>
+     <b>DataNode logs wrong layoutversion during upgrade</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6796">HDFS-6796</a>.
+     Minor improvement reported by Benoy Antony and fixed by Benoy Antony (balancer &amp; mover)<br>
+     <b>Improving the argument check during balancer command line parsing</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6794">HDFS-6794</a>.
+     Minor improvement reported by Arpit Agarwal and fixed by Arpit Agarwal (namenode)<br>
+     <b>Update BlockManager methods to use DatanodeStorageInfo where possible</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6791">HDFS-6791</a>.
+     Major bug reported by Ming Ma and fixed by Ming Ma <br>
+     <b>A block could remain under replicated if all of its replicas are on decommissioned nodes</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6790">HDFS-6790</a>.
+     Major bug reported by Larry McCay and fixed by Larry McCay <br>
+     <b>DFSUtil Should Use configuration.getPassword for SSL passwords</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6789">HDFS-6789</a>.
+     Major bug reported by Rushabh S Shah and fixed by Akira AJISAKA (test)<br>
+     <b>TestDFSClientFailover.testFileContextDoesntDnsResolveLogicalURI and TestDFSClientFailover.testDoesntDnsResolveLogicalURI failing on jdk7</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6788">HDFS-6788</a>.
+     Major bug reported by Yongjun Zhang and fixed by Yongjun Zhang (datanode)<br>
+     <b>Improve synchronization in BPOfferService with read write lock</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6787">HDFS-6787</a>.
+     Major bug reported by Yi Liu and fixed by Yi Liu (namenode)<br>
+     <b>Remove duplicate code in FSDirectory#unprotectedConcat</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6783">HDFS-6783</a>.
+     Major bug reported by Yi Liu and fixed by Yi Liu (caching)<br>
+     <b>Fix HDFS CacheReplicationMonitor rescan logic</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6781">HDFS-6781</a>.
+     Major improvement reported by Akira AJISAKA and fixed by Akira AJISAKA (documentation)<br>
+     <b>Separate HDFS commands from CommandsManual.apt.vm</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6779">HDFS-6779</a>.
+     Minor improvement reported by Allen Wittenauer and fixed by Sasaki Toru (scripts)<br>
+     <b>Add missing version subcommand for hdfs</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6778">HDFS-6778</a>.
+     Major bug reported by Charles Lamb and fixed by Charles Lamb <br>
+     <b>The extended attributes javadoc should simply refer to the user docs</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6777">HDFS-6777</a>.
+     Major sub-task reported by James Thomas and fixed by James Thomas (qjm)<br>
+     <b>Supporting consistent edit log reads when in-progress edit log segments are included</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6776">HDFS-6776</a>.
+     Major bug reported by Yongjun Zhang and fixed by Yongjun Zhang <br>
+     <b>Using distcp to copy data between insecure and secure cluster via webdhfs doesn't work</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6774">HDFS-6774</a>.
+     Major sub-task reported by Lei (Eddy) Xu and fixed by Lei (Eddy) Xu (datanode)<br>
+     <b>Make FsDataset and DataStore support removing volumes.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6773">HDFS-6773</a>.
+     Major improvement reported by Daryn Sharp and fixed by Stephen Chu (namenode)<br>
+     <b>MiniDFSCluster should skip edit log fsync by default</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6772">HDFS-6772</a>.
+     Major improvement reported by Ming Ma and fixed by Ming Ma <br>
+     <b>Get DN storages out of blockContentsStale state faster after NN restarts</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6768">HDFS-6768</a>.
+     Major bug reported by Arpit Agarwal and fixed by Arpit Agarwal (test)<br>
+     <b>Fix a few unit tests that use hard-coded port numbers</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6758">HDFS-6758</a>.
+     Major improvement reported by Arpit Agarwal and fixed by Arpit Agarwal (datanode , hdfs-client)<br>
+     <b>block writer should pass the expected block size to DataXceiverServer</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6755">HDFS-6755</a>.
+     Major improvement reported by Mit Desai and fixed by Mit Desai <br>
+     <b>There is an unnecessary sleep in the code path where DFSOutputStream#close gives up its attempt to contact the namenode</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6754">HDFS-6754</a>.
+     Major bug reported by Mit Desai and fixed by Mit Desai <br>
+     <b>TestNamenodeCapacityReport.testXceiverCount may sometimes fail due to lack of retry</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6750">HDFS-6750</a>.
+     Major sub-task reported by Colin Patrick McCabe and fixed by Colin Patrick McCabe (datanode , hdfs-client)<br>
+     <b>The DataNode should use its shared memory segment to mark short-circuit replicas that have been unlinked as stale</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6749">HDFS-6749</a>.
+     Major bug reported by Charles Lamb and fixed by Charles Lamb (namenode)<br>
+     <b>FSNamesystem methods should call resolvePath</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6740">HDFS-6740</a>.
+     Major sub-task reported by Lei (Eddy) Xu and fixed by Lei (Eddy) Xu (datanode)<br>
+     <b>Make FSDataset support adding data volumes dynamically</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6739">HDFS-6739</a>.
+     Major improvement reported by Tsz Wo Nicholas Sze and fixed by Tsz Wo Nicholas Sze (hdfs-client , namenode)<br>
+     <b>Add getDatanodeStorageReport to ClientProtocol</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6731">HDFS-6731</a>.
+     Major bug reported by WenJin Ma and fixed by Masatake Iwasaki (auto-failover , ha)<br>
+     <b>Run "hdfs zkfc-formatZK" on a server in a non-namenode  will cause a null pointer exception.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6728">HDFS-6728</a>.
+     Major sub-task reported by Lei (Eddy) Xu and fixed by Lei (Eddy) Xu (datanode)<br>
+     <b>Dynamically add new volumes to DataStorage, formatted if necessary.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6727">HDFS-6727</a>.
+     Major sub-task reported by Lei (Eddy) Xu and fixed by Lei (Eddy) Xu (datanode)<br>
+     <b>Refresh data volumes on DataNode based on configuration changes</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6722">HDFS-6722</a>.
+     Major sub-task reported by Ming Ma and fixed by Ming Ma <br>
+     <b>Display readable last contact time for dead nodes on NN webUI</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6717">HDFS-6717</a>.
+     Minor sub-task reported by Jeff Hansen and fixed by Brandon Li (nfs)<br>
+     <b>Jira HDFS-5804 breaks default nfs-gateway behavior for unsecured config</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6715">HDFS-6715</a>.
+     Major bug reported by Arpit Gupta and fixed by Jing Zhao (ha , webhdfs)<br>
+     <b>webhdfs wont fail over when it gets java.io.IOException: Namenode is in startup mode</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6714">HDFS-6714</a>.
+     Minor bug reported by Vinayakumar B and fixed by Vinayakumar B (test)<br>
+     <b>TestBlocksScheduledCounter#testBlocksScheduledCounter should shutdown cluster</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6705">HDFS-6705</a>.
+     Major sub-task reported by Charles Lamb and fixed by Charles Lamb (namenode , security)<br>
+     <b>Create an XAttr that disallows the HDFS admin from accessing a file</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6704">HDFS-6704</a>.
+     Minor bug reported by Akira AJISAKA and fixed by Akira AJISAKA (documentation)<br>
+     <b>Fix the command to launch JournalNode in HDFS-HA document</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6702">HDFS-6702</a>.
+     Major bug reported by Tsz Wo Nicholas Sze and fixed by Tsz Wo Nicholas Sze (datanode , hdfs-client)<br>
+     <b>DFSClient should create blocks using StorageType </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6701">HDFS-6701</a>.
+     Major improvement reported by Ashwin Shankar and fixed by Ashwin Shankar (namenode)<br>
+     <b>Make seed optional in NetworkTopology#sortByDistance</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6700">HDFS-6700</a>.
+     Minor improvement reported by Tsz Wo Nicholas Sze and fixed by Tsz Wo Nicholas Sze (namenode)<br>
+     <b>BlockPlacementPolicy shoud choose storage but not datanode for deletion</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6693">HDFS-6693</a>.
+     Major bug reported by Vinayakumar B and fixed by Vinayakumar B (test , tools)<br>
+     <b>TestDFSAdminWithHA fails on windows</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6690">HDFS-6690</a>.
+     Major improvement reported by Andrew Wang and fixed by Andrew Wang (namenode)<br>
+     <b>Deduplicate xattr names in memory</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6689">HDFS-6689</a>.
+     Major bug reported by Yesha Vora and fixed by Brandon Li (nfs)<br>
+     <b>NFS doesn't return correct lookup access for directories</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6685">HDFS-6685</a>.
+     Major improvement reported by Tsz Wo Nicholas Sze and fixed by Tsz Wo Nicholas Sze (balancer &amp; mover)<br>
+     <b>Balancer should preserve storage type of replicas</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6678">HDFS-6678</a>.
+     Minor bug reported by Chris Nauroth and fixed by Chris Nauroth (test)<br>
+     <b>MiniDFSCluster may still be partially running after initialization fails.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6667">HDFS-6667</a>.
+     Major bug reported by Jian He and fixed by Jing Zhao (security)<br>
+     <b>In HDFS HA mode, Distcp/SLive with webhdfs on secure cluster fails with Client cannot authenticate via:[TOKEN, KERBEROS] error</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6665">HDFS-6665</a>.
+     Major test reported by Stephen Chu and fixed by Stephen Chu (hdfs-client)<br>
+     <b>Add tests for XAttrs in combination with viewfs</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6664">HDFS-6664</a>.
+     Trivial bug reported by Chris Nauroth and fixed by Ray Chiang (documentation)<br>
+     <b>HDFS permissions guide documentation states incorrect default group mapping class.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6655">HDFS-6655</a>.
+     Major improvement reported by Vinayakumar B and fixed by Vinayakumar B <br>
+     <b>Add 'header banner' to 'explorer.html' also in Namenode UI</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6646">HDFS-6646</a>.
+     Major bug reported by Brahma Reddy Battula and fixed by Brahma Reddy Battula (tools)<br>
+     <b>[ HDFS Rolling Upgrade - Shell  ] shutdownDatanode and getDatanodeInfo usage is missed</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6645">HDFS-6645</a>.
+     Minor test reported by Stephen Chu and fixed by Stephen Chu (snapshots , test)<br>
+     <b>Add test for successive Snapshots between XAttr modifications</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6643">HDFS-6643</a>.
+     Minor improvement reported by Tsz Wo Nicholas Sze and fixed by Tsz Wo Nicholas Sze (namenode)<br>
+     <b>Refactor INodeFile.HeaderFormat and INodeWithAdditionalFields.PermissionStatusFormat</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6640">HDFS-6640</a>.
+     Major bug reported by Brahma Reddy Battula and fixed by Stephen Chu (documentation , webhdfs)<br>
+     <b>[ Web HDFS ] Syntax for MKDIRS, CREATESYMLINK, and SETXATTR are given wrongly(missed webhdfs/v1).).</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6638">HDFS-6638</a>.
+     Major test reported by Liang Xie and fixed by Liang Xie (test)<br>
+     <b>shorten test run time with a smaller retry timeout setting</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6634">HDFS-6634</a>.
+     Major new feature reported by James Thomas and fixed by James Thomas (hdfs-client , namenode , qjm)<br>
+     <b>inotify in HDFS</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6630">HDFS-6630</a>.
+     Major bug reported by J.Andreina and fixed by Haohui Mai (namenode)<br>
+     <b>Unable to fetch the block information  by Browsing the file system on Namenode UI through IE9</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6627">HDFS-6627</a>.
+     Major improvement reported by Liang Xie and fixed by Liang Xie (datanode)<br>
+     <b>Rename DataNode#checkWriteAccess to checkReadAccess.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6621">HDFS-6621</a>.
+     Major bug reported by Benjamin Bowman and fixed by Rafal Wojdyla (balancer &amp; mover)<br>
+     <b>Hadoop Balancer prematurely exits iterations</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6617">HDFS-6617</a>.
+     Minor test reported by Liang Xie and fixed by Liang Xie (auto-failover , test)<br>
+     <b>Flake TestDFSZKFailoverController.testManualFailoverWithDFSHAAdmin due to a long edit log sync op</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6616">HDFS-6616</a>.
+     Minor bug reported by zhaoyunjiong and fixed by zhaoyunjiong (webhdfs)<br>
+     <b>bestNode shouldn't always return the first DataNode</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6613">HDFS-6613</a>.
+     Minor improvement reported by Andrew Wang and fixed by Andrew Wang (caching)<br>
+     <b>Improve logging in caching classes</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6609">HDFS-6609</a>.
+     Major sub-task reported by Jing Zhao and fixed by Jing Zhao (namenode)<br>
+     <b>Use DirectorySnapshottableFeature to represent a snapshottable directory</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6606">HDFS-6606</a>.
+     Major improvement reported by Yi Liu and fixed by Yi Liu (datanode , hdfs-client , security)<br>
+     <b>Optimize HDFS Encrypted Transport performance</b><br>
+     <blockquote>HDFS now supports the option to configure AES encryption for block data transfer.  AES offers improved cryptographic strength and performance over the prior options of 3DES and RC4.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6597">HDFS-6597</a>.
+     Major improvement reported by Danilo Vunjak and fixed by Danilo Vunjak (namenode)<br>
+     <b>Add a new option to NN upgrade to terminate the process after upgrade on NN is completed</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6584">HDFS-6584</a>.
+     Major new feature reported by Tsz Wo Nicholas Sze and fixed by Tsz Wo Nicholas Sze (balancer &amp; mover , namenode)<br>
+     <b>Support Archival Storage</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6582">HDFS-6582</a>.
+     Minor bug reported by Ted Yu and fixed by Abhiraj Butala (nfs)<br>
+     <b>Missing null check in RpcProgramNfs3#read(XDR, SecurityHandler)</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6581">HDFS-6581</a>.
+     Major new feature reported by Arpit Agarwal and fixed by Arpit Agarwal (datanode , hdfs-client , namenode)<br>
+     <b>Write to single replica in memory</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6570">HDFS-6570</a>.
+     Major improvement reported by Thejas M Nair and fixed by Jitendra Nath Pandey (hdfs-client , namenode , webhdfs)<br>
+     <b>add api that enables checking if a user has certain permissions on a file</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6569">HDFS-6569</a>.
+     Major bug reported by Brandon Li and fixed by Brandon Li (datanode)<br>
+     <b>OOB message can't be sent to the client when DataNode shuts down for upgrade</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6567">HDFS-6567</a>.
+     Major bug reported by Haohui Mai and fixed by Tassapol Athiapinya <br>
+     <b>Normalize the order of public final in HdfsFileStatus</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6544">HDFS-6544</a>.
+     Minor bug reported by Suraj Nayak M and fixed by Suraj Nayak M <br>
+     <b>Broken Link for GFS in package.html</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6534">HDFS-6534</a>.
+     Minor bug reported by Binglin Chang and fixed by Binglin Chang <br>
+     <b>Fix build on macosx: HDFS parts</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6519">HDFS-6519</a>.
+     Major improvement reported by Akira AJISAKA and fixed by Akira AJISAKA (documentation)<br>
+     <b>Document oiv_legacy command</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6517">HDFS-6517</a>.
+     Major bug reported by Akira AJISAKA and fixed by Akira AJISAKA <br>
+     <b>Remove hadoop-metrics2.properties from hdfs project</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6511">HDFS-6511</a>.
+     Minor improvement reported by Juan Yu and fixed by Juan Yu <br>
+     <b>BlockManager#computeInvalidateWork() could do nothing</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6506">HDFS-6506</a>.
+     Major bug reported by Binglin Chang and fixed by Binglin Chang (balancer &amp; mover , test)<br>
+     <b>Newly moved block replica been invalidated and deleted in TestBalancer</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6482">HDFS-6482</a>.
+     Major improvement reported by James Thomas and fixed by James Thomas (datanode)<br>
+     <b>Use block ID-based block layout on datanodes</b><br>
+     <blockquote>The directory structure for finalized replicas on DNs has been changed. Now, the directory that a finalized replica goes in is determined uniquely by its ID. Specifically, we use a two-level directory structure, with the 24th through 17th bits identifying the correct directory at the first level and the 16th through 8th bits identifying the correct directory at the second level.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6478">HDFS-6478</a>.
+     Major bug reported by Ming Ma and fixed by Ming Ma <br>
+     <b>RemoteException can't be retried properly for non-HA scenario</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6456">HDFS-6456</a>.
+     Major bug reported by Yesha Vora and fixed by Abhiraj Butala (nfs)<br>
+     <b>NFS should throw error for invalid entry in dfs.nfs.exports.allowed.hosts</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6455">HDFS-6455</a>.
+     Major bug reported by Yesha Vora and fixed by Abhiraj Butala (nfs)<br>
+     <b>NFS: Exception should be added in NFS log for invalid separator in nfs.exports.allowed.hosts</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6451">HDFS-6451</a>.
+     Major bug reported by Brandon Li and fixed by Abhiraj Butala (nfs)<br>
+     <b>NFS should not return NFS3ERR_IO for AccessControlException </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6441">HDFS-6441</a>.
+     Major improvement reported by Benoy Antony and fixed by Benoy Antony (balancer &amp; mover)<br>
+     <b>Add ability to exclude/include specific datanodes while balancing</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6385">HDFS-6385</a>.
+     Major sub-task reported by Jing Zhao and fixed by Chris Nauroth <br>
+     <b>Show when block deletion will start after NameNode startup in WebUI</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6376">HDFS-6376</a>.
+     Major bug reported by Dave Marion and fixed by Dave Marion (datanode , federation , hdfs-client)<br>
+     <b>Distcp data between two HA clusters requires another configuration</b><br>
+     <blockquote>Allow distcp to copy data between HA clusters. Users can use a new configuration property "dfs.internal.nameservices" to explicitly specify the name services belonging to the local cluster, while continue using the configuration property "dfs.nameservices" to specify all the name services in the local and remote clusters.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6247">HDFS-6247</a>.
+     Major bug reported by Vinayakumar B and fixed by Vinayakumar B (balancer &amp; mover , datanode)<br>
+     <b>Avoid timeouts for replaceBlock() call by sending intermediate responses to Balancer</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6188">HDFS-6188</a>.
+     Major improvement reported by Benoy Antony and fixed by Benoy Antony (security)<br>
+     <b>An ip whitelist based implementation of TrustedChannelResolver</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6134">HDFS-6134</a>.
+     Major new feature reported by Alejandro Abdelnur and fixed by Charles Lamb (security)<br>
+     <b>Transparent data at rest encryption</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6114">HDFS-6114</a>.
+     Critical bug reported by Vinayakumar B and fixed by Vinayakumar B (datanode)<br>
+     <b>Block Scan log rolling will never happen if blocks written continuously leading to huge size of dncp_block_verification.log.curr</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6036">HDFS-6036</a>.
+     Major sub-task reported by Colin Patrick McCabe and fixed by Colin Patrick McCabe (caching , datanode)<br>
+     <b>Forcibly timeout misbehaving DFSClients that try to do no-checksum reads that extend too long</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5919">HDFS-5919</a>.
+     Major bug reported by Vinayakumar B and fixed by Vinayakumar B (namenode)<br>
+     <b>FileJournalManager doesn't purge empty and corrupt inprogress edits files</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5809">HDFS-5809</a>.
+     Critical bug reported by ikweesung and fixed by Colin Patrick McCabe (datanode)<br>
+     <b>BlockPoolSliceScanner and high speed hdfs appending make datanode to drop into infinite loop</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5723">HDFS-5723</a>.
+     Major bug reported by Vinayakumar B and fixed by Vinayakumar B (namenode)<br>
+     <b>Append failed FINALIZED replica should not be accepted as valid when that block is underconstruction</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5624">HDFS-5624</a>.
+     Major test reported by Chris Nauroth and fixed by Stephen Chu (hdfs-client , test)<br>
+     <b>Add HDFS tests for ACLs in combination with viewfs.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5202">HDFS-5202</a>.
+     Major improvement reported by Colin Patrick McCabe and fixed by Chris Nauroth (datanode)<br>
+     <b>Support Centralized Cache Management on Windows.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5185">HDFS-5185</a>.
+     Critical bug reported by Vinayakumar B and fixed by Vinayakumar B (datanode)<br>
+     <b>DN fails to startup if one of the data dir is full</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5182">HDFS-5182</a>.
+     Major improvement reported by Colin Patrick McCabe and fixed by Colin Patrick McCabe (hdfs-client)<br>
+     <b>BlockReaderLocal must allow zero-copy  reads only when the DN believes it's valid</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5089">HDFS-5089</a>.
+     Major bug reported by Tsz Wo Nicholas Sze and fixed by Tsz Wo Nicholas Sze (namenode)<br>
+     <b>When a LayoutVersion support SNAPSHOT, it must support FSIMAGE_NAME_OPTIMIZATION.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-4852">HDFS-4852</a>.
+     Minor bug reported by Andrew Wang and fixed by Chris Nauroth <br>
+     <b>libhdfs documentation is out of date</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-4629">HDFS-4629</a>.
+     Major bug reported by Amir Sanjar and fixed by  (tools)<br>
+     <b>Using com.sun.org.apache.xml.internal.serialize.* in XmlEditsVisitor.java is JVM vendor specific. Breaks IBM JAVA</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-4486">HDFS-4486</a>.
+     Minor improvement reported by Todd Lipcon and fixed by Zhe Zhang <br>
+     <b>Add log category for long-running DFSClient notices</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-4257">HDFS-4257</a>.
+     Minor new feature reported by Harsh J and fixed by Tsz Wo Nicholas Sze (hdfs-client)<br>
+     <b>The ReplaceDatanodeOnFailure policies could have a forgiving option</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-4227">HDFS-4227</a>.
+     Major bug reported by Eli Collins and fixed by Daisuke Kobayashi (documentation)<br>
+     <b>Document dfs.namenode.resource.*  </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-4165">HDFS-4165</a>.
+     Trivial bug reported by Binglin Chang and fixed by Binglin Chang (namenode)<br>
+     <b>Faulty sanity check in FsDirectory.unprotectedSetQuota</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-4120">HDFS-4120</a>.
+     Minor improvement reported by Liang Xie and fixed by Rakesh R (ha , namenode)<br>
+     <b>Add a new "-skipSharedEditsCheck" option for BootstrapStandby</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3851">HDFS-3851</a>.
+     Trivial improvement reported by Jing Zhao and fixed by Jing Zhao (hdfs-client)<br>
+     <b>Make DFSOuputSteram$Packet default constructor reuse the other constructor</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3528">HDFS-3528</a>.
+     Major improvement reported by Todd Lipcon and fixed by James Thomas (datanode , hdfs-client , performance)<br>
+     <b>Use native CRC32 in DFS write path</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3482">HDFS-3482</a>.
+     Minor bug reported by Stephen Chu and fixed by madhukara phatak (balancer &amp; mover)<br>
+     <b>hdfs balancer throws ArrayIndexOutOfBoundsException if option is specified without arguments</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2976">HDFS-2976</a>.
+     Trivial bug reported by Uma Maheswara Rao G and fixed by Uma Maheswara Rao G (hdfs-client)<br>
+     <b>Remove unnecessary method (tokenRefetchNeeded) in DFSClient</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2975">HDFS-2975</a>.
+     Major bug reported by Uma Maheswara Rao G and fixed by Yi Liu (namenode)<br>
+     <b>Rename with overwrite flag true can make NameNode to stuck in safemode on NN (crash + restart).</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2856">HDFS-2856</a>.
+     Major improvement reported by Owen O'Malley and fixed by Chris Nauroth (datanode , security)<br>
+     <b>Fix block protocol so that Datanodes don't require root or jsvc</b><br>
+     <blockquote>SASL now can be used to secure the DataTransferProtocol, which transfers file block content between HDFS clients and DataNodes.  In this configuration, it is no longer required for secured clusters to start the DataNode as root and bind to privileged ports.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-573">HDFS-573</a>.
+     Major improvement reported by Ziliang Guo and fixed by Chris Nauroth (libhdfs)<br>
+     <b>Porting libhdfs to Windows</b><br>
+     <blockquote>The libhdfs C API is now supported on Windows.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-11286">HADOOP-11286</a>.
+     Blocker bug reported by Christopher Tubbs and fixed by  <br>
+     <b>Map/Reduce dangerously adds Guava @Beta class to CryptoUtils</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-11282">HADOOP-11282</a>.
+     Trivial bug reported by Chris Nauroth and fixed by Chris Nauroth (test)<br>
+     <b>Skip NFS TestShellBasedIdMapping tests that are irrelevant on Windows.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-11280">HADOOP-11280</a>.
+     Trivial bug reported by Chris Nauroth and fixed by Chris Nauroth (native , test)<br>
+     <b>TestWinUtils#testChmod fails after removal of NO_PROPAGATE_INHERIT_ACE.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-11265">HADOOP-11265</a>.
+     Major bug reported by Larry McCay and fixed by Larry McCay (scripts)<br>
+     <b>Credential and Key Shell Commands not available on Windows</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-11260">HADOOP-11260</a>.
+     Blocker bug reported by Karthik Kambatla and fixed by Mike Yoder (security)<br>
+     <b>Patch up Jetty to disable SSLv3</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-11254">HADOOP-11254</a>.
+     Major bug reported by Zhijie Shen and fixed by Zhijie Shen <br>
+     <b>Promoting AccessControlList to be public</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-11253">HADOOP-11253</a>.
+     Major test reported by Varun Vasudev and fixed by Varun Vasudev (tools)<br>
+     <b>Hadoop streaming test TestStreamXmlMultipleRecords fails on Windows</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-11250">HADOOP-11250</a>.
+     Minor bug reported by Yi Liu and fixed by Yi Liu (build)<br>
+     <b>fix endmacro of set_find_shared_library_without_version in CMakeLists</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-11247">HADOOP-11247</a>.
+     Major bug reported by Brandon Li and fixed by Brandon Li (nfs)<br>
+     <b>Fix a couple javac warnings in NFS</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-11243">HADOOP-11243</a>.
+     Blocker bug reported by Wei Yan and fixed by Wei Yan <br>
+     <b>SSLFactory shouldn't allow SSLv3</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-11241">HADOOP-11241</a>.
+     Major test reported by Varun Vasudev and fixed by Varun Vasudev <br>
+     <b>TestNMSimulator fails sometimes due to timing issue</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-11233">HADOOP-11233</a>.
+     Minor bug reported by Steve Loughran and fixed by Stephen Chu (conf)<br>
+     <b>hadoop.security.kms.client.encrypted.key.cache.expiry property spelled wrong in core-default</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-11228">HADOOP-11228</a>.
+     Major bug reported by Remus Rusanu and fixed by Remus Rusanu <br>
+     <b>winutils task: unsecure path should not call AddNodeManagerAndUserACEsToObject</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-11221">HADOOP-11221</a>.
+     Major bug reported by Jinghui Wang and fixed by Jinghui Wang (util)<br>
+     <b>JAVA specification for hashcode does not enforce it to be non-negative, but IdentityHashStore assumes System.identityHashCode() is non-negative</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-11217">HADOOP-11217</a>.
+     Blocker bug reported by Robert Kanter and fixed by Robert Kanter (kms)<br>
+     <b>Disable SSLv3 in KMS</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-11216">HADOOP-11216</a>.
+     Major improvement reported by Yi Liu and fixed by Colin Patrick McCabe (security)<br>
+     <b>Improve Openssl library finding</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-11207">HADOOP-11207</a>.
+     Major bug reported by Zhijie Shen and fixed by Zhijie Shen (security)<br>
+     <b>DelegationTokenAuthenticationHandler needs to support DT operations for proxy user</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-11198">HADOOP-11198</a>.
+     Minor bug reported by Ted Yu and fixed by Li Lu <br>
+     <b>Fix typo in javadoc for FileSystem#listStatus()</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-11195">HADOOP-11195</a>.
+     Major improvement reported by Yongjun Zhang and fixed by Yongjun Zhang (nfs , security)<br>
+     <b>Move Id-Name mapping in NFS to the hadoop-common area for better maintenance</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-11194">HADOOP-11194</a>.
+     Major bug reported by Karthik Kambatla and fixed by Karthik Kambatla <br>
+     <b>Ignore .keep files</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-11193">HADOOP-11193</a>.
+     Major bug reported by Xiaoyu Yao and fixed by Xiaoyu Yao (native)<br>
+     <b>Fix uninitialized variables in NativeIO.c</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-11184">HADOOP-11184</a>.
+     Major improvement reported by Colin Patrick McCabe and fixed by Colin Patrick McCabe (native)<br>
+     <b>Update Hadoop's lz4 to r123</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-11182">HADOOP-11182</a>.
+     Major bug reported by Sascha Coenen and fixed by Ravi Prakash <br>
+     <b>GraphiteSink emits wrong timestamps</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-11181">HADOOP-11181</a>.
+     Major bug reported by Zhijie Shen and fixed by Zhijie Shen (security)<br>
+     <b>o.a.h.security.token.delegation.DelegationTokenManager should be more generalized to handle other DelegationTokenIdentifier</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-11179">HADOOP-11179</a>.
+     Major bug reported by Hitesh Shah and fixed by Craig Welch <br>
+     <b>Tarball as local resource type archive fails to localize on Windows </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-11178">HADOOP-11178</a>.
+     Minor bug reported by Arun Suresh and fixed by Arun Suresh (build)<br>
+     <b>Fix findbugs exclude file</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-11176">HADOOP-11176</a>.
+     Major bug reported by Arun Suresh and fixed by Arun Suresh <br>
+     <b>KMSClientProvider authentication fails when both currentUgi and loginUgi are a proxied user</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-11175">HADOOP-11175</a>.
+     Trivial bug reported by Yi Liu and fixed by Yi Liu (documentation , security)<br>
+     <b>Fix several issues of hadoop security configuration in user doc.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-11174">HADOOP-11174</a>.
+     Major bug reported by Yi Liu and fixed by Yi Liu (kms , security)<br>
+     <b>Delegation token for KMS should only be got once if it already exists</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-11170">HADOOP-11170</a>.
+     Major bug reported by Arun Suresh and fixed by Arun Suresh <br>
+     <b>ZKDelegationTokenSecretManager fails to renewToken created by a peer</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-11169">HADOOP-11169</a>.
+     Major bug reported by Arun Suresh and fixed by Arun Suresh <br>
+     <b>Fix DelegationTokenAuthenticatedURL to pass the connection Configurator to the authenticator </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-11168">HADOOP-11168</a>.
+     Trivial bug reported by Yi Liu and fixed by Yi Liu (documentation)<br>
+     <b>Remove duplicated entry "dfs.webhdfs.enabled" in the user doc</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-11163">HADOOP-11163</a>.
+     Minor bug reported by Chuan Liu and fixed by Chuan Liu (metrics)<br>
+     <b>MetricsSystemImpl may miss a registered source</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-11161">HADOOP-11161</a>.
+     Major bug reported by Arun Suresh and fixed by Arun Suresh <br>
+     <b>Expose close method in KeyProvider to give clients of Provider implementations a hook to release resources</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-11160">HADOOP-11160</a>.
+     Trivial bug reported by Charles Lamb and fixed by Charles Lamb (nfs)<br>
+     <b>Fix  typo in nfs3 server duplicate entry reporting</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-11154">HADOOP-11154</a>.
+     Trivial bug reported by Chris Nauroth and fixed by Chris Nauroth (documentation , native)<br>
+     <b>Update BUILDING.txt to state that CMake 3.0 or newer is required on Mac.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-11153">HADOOP-11153</a>.
+     Major improvement reported by Andrew Wang and fixed by Andrew Wang (kms)<br>
+     <b>Make number of KMS threads configurable</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-11151">HADOOP-11151</a>.
+     Major bug reported by zhubin and fixed by Arun Suresh (security)<br>
+     <b>Automatically refresh auth token and retry on auth failure</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-11145">HADOOP-11145</a>.
+     Major bug reported by Akira AJISAKA and fixed by Akira AJISAKA (test)<br>
+     <b>TestFairCallQueue fails</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-11143">HADOOP-11143</a>.
+     Minor bug reported by Steve Loughran and fixed by Steve Loughran (net)<br>
+     <b>NetUtils.wrapException loses inner stack trace on BindException</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-11140">HADOOP-11140</a>.
+     Major bug reported by Juan Yu and fixed by Juan Yu <br>
+     <b>hadoop-aws only need test-scoped dependency on hadoop-common's tests jar</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-11133">HADOOP-11133</a>.
+     Minor bug reported by zhubin and fixed by Yi Liu (security)<br>
+     <b>Should trim the content of keystore password file for JavaKeyStoreProvider</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-11130">HADOOP-11130</a>.
+     Major bug reported by Allen Wittenauer and fixed by Brandon Li (nfs)<br>
+     <b>NFS updateMaps OS check is reversed</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-11122">HADOOP-11122</a>.
+     Blocker bug reported by Karthik Kambatla and fixed by Arun Suresh <br>
+     <b>Fix findbugs in ZK DelegationTokenSecretManagers </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-11113">HADOOP-11113</a>.
+     Major bug reported by Arun Suresh and fixed by Arun Suresh (security)<br>
+     <b>Namenode not able to reconnect to KMS after KMS restart</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-11112">HADOOP-11112</a>.
+     Major bug reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (security)<br>
+     <b>TestKMSWithZK does not use KEY_PROVIDER_URI</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-11111">HADOOP-11111</a>.
+     Minor improvement reported by Steve Loughran and fixed by Steve Loughran (tools)<br>
+     <b>MiniKDC to use locale EN_US for case conversions</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-11110">HADOOP-11110</a>.
+     Major bug reported by Andrew Wang and fixed by Arun Suresh <br>
+     <b>JavaKeystoreProvider should not report a key as created if it was not flushed to the backing file</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-11109">HADOOP-11109</a>.
+     Major bug reported by Jian He and fixed by Jian He <br>
+     <b>Site build is broken </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-11106">HADOOP-11106</a>.
+     Minor improvement reported by Andrew Wang and fixed by Charles Lamb (documentation)<br>
+     <b>Document considerations of HAR and Encryption</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-11105">HADOOP-11105</a>.
+     Major bug reported by Chuan Liu and fixed by Chuan Liu (metrics)<br>
+     <b>MetricsSystemImpl could leak memory in registered callbacks</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-11101">HADOOP-11101</a>.
+     Minor improvement reported by skrho and fixed by skrho <br>
+     <b>How about inputstream close statement from catch block to finally block in FileContext#copy() ?</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-11099">HADOOP-11099</a>.
+     Major bug reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (security)<br>
+     <b>KMS return HTTP UNAUTHORIZED 401 on ACL failure</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-11097">HADOOP-11097</a>.
+     Trivial bug reported by Charles Lamb and fixed by Charles Lamb (documentation)<br>
+     <b>kms docs say proxyusers, not proxyuser for config params</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-11096">HADOOP-11096</a>.
+     Major bug reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (security)<br>
+     <b>KMS: KeyAuthorizationKeyProvider should verify the keyversion belongs to the keyname on decrypt</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-11091">HADOOP-11091</a>.
+     Minor bug reported by David S. Wang and fixed by David S. Wang (fs/s3)<br>
+     <b>Eliminate old configuration parameter names from s3a</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-11088">HADOOP-11088</a>.
+     Major test reported by Xiaoyu Yao and fixed by Xiaoyu Yao (security)<br>
+     <b>Unittest TestKeyShell, TestCredShell and TestKMS assume UNIX path separator for JECKS key store path</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-11085">HADOOP-11085</a>.
+     Major bug reported by Mit Desai and fixed by Mit Desai <br>
+     <b>Excessive logging by org.apache.hadoop.util.Progress when value is NaN</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-11083">HADOOP-11083</a>.
+     Major bug reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (security)<br>
+     <b>After refactoring of HTTP proxyuser to common, doAs param is case sensitive</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-11077">HADOOP-11077</a>.
+     Major bug reported by Gregory Chanan and fixed by Gregory Chanan (security)<br>
+     <b>NPE if hosts not specified in ProxyUsers</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-11074">HADOOP-11074</a>.
+     Major sub-task reported by David S. Wang and fixed by David S. Wang (fs/s3)<br>
+     <b>Move s3-related FS connector code to hadoop-aws</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-11073">HADOOP-11073</a>.
+     Major test reported by Xiaoyu Yao and fixed by Xiaoyu Yao (security)<br>
+     <b>Credential Provider related Unit Tests Failure on Windows</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-11071">HADOOP-11071</a>.
+     Minor test reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (security)<br>
+     <b>KMSClientProvider should drain the local generated EEK cache on key rollover</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-11070">HADOOP-11070</a>.
+     Major test reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (security , test)<br>
+     <b>Create MiniKMS for testing</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-11069">HADOOP-11069</a>.
+     Major bug reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (security)<br>
+     <b>KMSClientProvider should use getAuthenticationMethod() to determine if in proxyuser mode or not</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-11068">HADOOP-11068</a>.
+     Major improvement reported by Gregory Chanan and fixed by Gregory Chanan (security)<br>
+     <b>Match hadoop.auth cookie format to jetty output</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-11067">HADOOP-11067</a>.
+     Major bug reported by Yesha Vora and fixed by Xiaoyu Yao <br>
+     <b>warning message 'ssl.client.truststore.location has not been set' gets printed for hftp command</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-11064">HADOOP-11064</a>.
+     Blocker bug reported by Steve Loughran and fixed by Chris Nauroth (native)<br>
+     <b>UnsatisifedLinkError with hadoop 2.4 JARs on hadoop-2.6 due to NativeCRC32 method changes</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-11063">HADOOP-11063</a>.
+     Blocker bug reported by Chris Nauroth and fixed by Chris Nauroth <br>
+     <b>KMS cannot deploy on Windows, because class names are too long.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-11062">HADOOP-11062</a>.
+     Major bug reported by Alejandro Abdelnur and fixed by Arun Suresh (security , test)<br>
+     <b>CryptoCodec testcases requiring OpenSSL should be run only if -Pnative is used</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-11060">HADOOP-11060</a>.
+     Major test reported by Alejandro Abdelnur and fixed by Yi Liu (security)<br>
+     <b>Create a CryptoCodec test that verifies interoperability between the JCE and OpenSSL implementations</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-11057">HADOOP-11057</a>.
+     Minor improvement reported by Steve Loughran and fixed by Xiaoyu Yao (native)<br>
+     <b>checknative command to probe for winutils.exe on windows</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-11056">HADOOP-11056</a>.
+     Major bug reported by Yongjun Zhang and fixed by Yongjun Zhang (security)<br>
+     <b>OsSecureRandom.setConf() might leak file descriptors.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-11054">HADOOP-11054</a>.
+     Major improvement reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (security)<br>
+     <b>Add a KeyProvider instantiation based on a URI</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-11049">HADOOP-11049</a>.
+     Minor bug reported by Sangjin Lee and fixed by Sangjin Lee (util)<br>
+     <b>javax package system class default is too broad</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-11048">HADOOP-11048</a>.
+     Minor bug reported by Sangjin Lee and fixed by Sangjin Lee (util)<br>
+     <b>user/custom LogManager fails to load if the client classloader is enabled</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-11040">HADOOP-11040</a>.
+     Major bug reported by Yi Liu and fixed by Yi Liu (security)<br>
+     <b>Return value of read(ByteBuffer buf) in CryptoInputStream is incorrect in some cases</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-11036">HADOOP-11036</a>.
+     Minor bug reported by Tsuyoshi OZAWA and fixed by Tsuyoshi OZAWA <br>
+     <b>Add build directory to .gitignore</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-11030">HADOOP-11030</a>.
+     Minor improvement reported by Juan Yu and fixed by Juan Yu <br>
+     <b>Define a variable jackson.version instead of using constant at multiple places</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-11021">HADOOP-11021</a>.
+     Minor improvement reported by Zhe Zhang and fixed by Zhe Zhang <br>
+     <b>Configurable replication factor in the hadoop archive command</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-11017">HADOOP-11017</a>.
+     Major improvement reported by Alejandro Abdelnur and fixed by Arun Suresh (security)<br>
+     <b>KMS delegation token secret manager should be able to use zookeeper as store</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-11016">HADOOP-11016</a>.
+     Major improvement reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (security)<br>
+     <b>KMS should support signing cookies with zookeeper secret manager</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-11015">HADOOP-11015</a>.
+     Major improvement reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur <br>
+     <b>Http server/client utils to propagate and recreate Exceptions from server to client</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-11012">HADOOP-11012</a>.
+     Major bug reported by Eric Payne and fixed by Eric Payne (fs)<br>
+     <b>hadoop fs -text of zero-length file causes EOFException</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-11009">HADOOP-11009</a>.
+     Major improvement reported by Gary Steelman and fixed by Gary Steelman (tools/distcp)<br>
+     <b>Add Timestamp Preservation to DistCp</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-11007">HADOOP-11007</a>.
+     Major improvement reported by Jason Lowe and fixed by Jason Lowe (build , fs)<br>
+     <b>Reinstate building of ant tasks support</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-11005">HADOOP-11005</a>.
+     Minor bug reported by Lei (Eddy) Xu and fixed by Lei (Eddy) Xu (conf)<br>
+     <b>Fix HTTP content type for ReconfigurationServlet</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10998">HADOOP-10998</a>.
+     Trivial improvement reported by Jim Hester and fixed by Jim Hester (scripts)<br>
+     <b>Fix bash tab completion code to work</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10994">HADOOP-10994</a>.
+     Major task reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (security)<br>
+     <b>KeyProviderCryptoExtension should use CryptoCodec for generation/decryption of keys</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10992">HADOOP-10992</a>.
+     Major task reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (security)<br>
+     <b>Merge KMS to branch-2</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10990">HADOOP-10990</a>.
+     Major improvement reported by Brandon Li and fixed by Brandon Li (nfs)<br>
+     <b>Add missed NFSv3 request and response classes</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10989">HADOOP-10989</a>.
+     Major bug reported by Chris Nauroth and fixed by Chris Nauroth (native)<br>
+     <b>Work around buggy getgrouplist() implementations on Linux that return 0 on failure</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10982">HADOOP-10982</a>.
+     Major improvement reported by Andrew Wang and fixed by Alejandro Abdelnur <br>
+     <b>KMS: Support for multiple Kerberos principals</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10975">HADOOP-10975</a>.
+     Major improvement reported by James Thomas and fixed by James Thomas (performance)<br>
+     <b>org.apache.hadoop.util.DataChecksum should support native checksum calculation</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10973">HADOOP-10973</a>.
+     Minor bug reported by Peter Klavins and fixed by Peter Klavins (documentation)<br>
+     <b>Native Libraries Guide contains format error</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10972">HADOOP-10972</a>.
+     Major bug reported by Peter Klavins and fixed by Peter Klavins (documentation)<br>
+     <b>Native Libraries Guide contains mis-spelt build line</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10970">HADOOP-10970</a>.
+     Major improvement reported by Andrew Wang and fixed by Andrew Wang <br>
+     <b>Cleanup KMS configuration keys</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10968">HADOOP-10968</a>.
+     Major bug reported by Dinar Valeev and fixed by  (build)<br>
+     <b>hadoop native build fails to detect java_libarch on ppc64le</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10967">HADOOP-10967</a>.
+     Major improvement reported by Yi Liu and fixed by Yi Liu (security)<br>
+     <b>Improve DefaultCryptoExtension#generateEncryptedKey performance </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10966">HADOOP-10966</a>.
+     Blocker bug reported by Vinayakumar B and fixed by David Villegas (native)<br>
+     <b>Hadoop Common native compilation broken in windows</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10964">HADOOP-10964</a>.
+     Minor bug reported by Yi Liu and fixed by Yi Liu <br>
+     <b>Small fix for NetworkTopologyWithNodeGroup#sortByDistance</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10962">HADOOP-10962</a>.
+     Major bug reported by David Villegas and fixed by David Villegas (native)<br>
+     <b>Flags for posix_fadvise are not valid in some architectures</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10954">HADOOP-10954</a>.
+     Minor improvement reported by Masatake Iwasaki and fixed by Masatake Iwasaki (documentation)<br>
+     <b>Adding site documents of hadoop-tools</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10946">HADOOP-10946</a>.
+     Trivial bug reported by Ray Chiang and fixed by Ray Chiang <br>
+     <b>Fix a bunch of typos in log messages</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10939">HADOOP-10939</a>.
+     Major bug reported by Arun Suresh and fixed by Arun Suresh <br>
+     <b>Fix TestKeyProviderFactory testcases to use default 128 bit length keys</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10937">HADOOP-10937</a>.
+     Major bug reported by Arun Suresh and fixed by Arun Suresh (security)<br>
+     <b>Need to set version name correctly before decrypting EEK</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10936">HADOOP-10936</a>.
+     Major improvement reported by Andrew Wang and fixed by Andrew Wang <br>
+     <b>Change default KeyProvider bitlength to 128</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10933">HADOOP-10933</a>.
+     Major sub-task reported by Larry McCay and fixed by Larry McCay (security)<br>
+     <b>FileBasedKeyStoresFactory Should use Configuration.getPassword for SSL Passwords</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10931">HADOOP-10931</a>.
+     Minor bug reported by xukun and fixed by  (build , fs/swift)<br>
+     <b>compile error on project "Apache Hadoop OpenStack support"</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10929">HADOOP-10929</a>.
+     Trivial bug reported by Larry McCay and fixed by Larry McCay (security)<br>
+     <b>Typo in Configuration.getPasswordFromCredentialProviders</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10928">HADOOP-10928</a>.
+     Trivial bug reported by Josh Elser and fixed by Josh Elser (security)<br>
+     <b>Incorrect usage on 'hadoop credential list'</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10927">HADOOP-10927</a>.
+     Minor bug reported by Josh Elser and fixed by Josh Elser (security)<br>
+     <b>Fix CredentialShell help behavior and error codes</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10925">HADOOP-10925</a>.
+     Blocker bug reported by Chris Nauroth and fixed by Chris Nauroth (native)<br>
+     <b>Compilation fails in native link0 function on Windows.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10922">HADOOP-10922</a>.
+     Major improvement reported by Andrew Wang and fixed by Larry McCay <br>
+     <b>User documentation for CredentialShell</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10920">HADOOP-10920</a>.
+     Minor bug reported by Ted Yu and fixed by Akira AJISAKA (documentation)<br>
+     <b>site plugin couldn't parse hadoop-kms index.apt.vm</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10918">HADOOP-10918</a>.
+     Major bug reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur <br>
+     <b>JMXJsonServlet fails when used within Tomcat</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10911">HADOOP-10911</a>.
+     Major bug reported by Gregory Chanan and fixed by  (security)<br>
+     <b>hadoop.auth cookie after HADOOP-10710 still not proper according to RFC2109</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10905">HADOOP-10905</a>.
+     Major sub-task reported by Larry McCay and fixed by Larry McCay (security)<br>
+     <b>LdapGroupsMapping Should use configuration.getPassword for SSL and LDAP Passwords</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10903">HADOOP-10903</a>.
+     Major improvement reported by Chris Nauroth and fixed by Chris Nauroth (scripts , util)<br>
+     <b>Enhance hadoop classpath command to expand wildcards or write classpath into jar manifest.</b><br>
+     <blockquote>The "hadoop classpath" command has been enhanced to support options for automatic expansion of wildcards in classpath elements and writing the classpath to a jar file manifest.  These options make it easier to construct a correct classpath for libhdfs applications.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10902">HADOOP-10902</a>.
+     Minor improvement reported by Stephen Chu and fixed by Stephen Chu <br>
+     <b>Deletion of directories with snapshots will not output reason for trash move failure</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10900">HADOOP-10900</a>.
+     Minor bug reported by Andrew Wang and fixed by Andrew Wang <br>
+     <b>CredentialShell args should use single-dash style</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10893">HADOOP-10893</a>.
+     Major new feature reported by Sangjin Lee and fixed by Sangjin Lee (util)<br>
+     <b>isolated classloader on the client side</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10891">HADOOP-10891</a>.
+     Major improvement reported by Andrew Wang and fixed by Andrew Wang <br>
+     <b>Add EncryptedKeyVersion factory method to KeyProviderCryptoExtension</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10887">HADOOP-10887</a>.
+     Major bug reported by Stephen Chu and fixed by Stephen Chu (fs , test)<br>
+     <b>Add XAttrs to ViewFs and make XAttrs + ViewFileSystem internal dir behavior consistent</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10886">HADOOP-10886</a>.
+     Major sub-task reported by Uma Maheswara Rao G and fixed by Uma Maheswara Rao G (fs)<br>
+     <b>CryptoCodec#getCodecclasses throws NPE when configurations not loaded.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10884">HADOOP-10884</a>.
+     Minor sub-task reported by Akira AJISAKA and fixed by Akira AJISAKA (documentation)<br>
+     <b>Fix dead link in Configuration javadoc</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10882">HADOOP-10882</a>.
+     Minor task reported by Todd Lipcon and fixed by Todd Lipcon (util)<br>
+     <b>Move DirectBufferPool into common util</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10881">HADOOP-10881</a>.
+     Major improvement reported by Andrew Wang and fixed by Andrew Wang <br>
+     <b>Clarify usage of encryption and encrypted encryption key in KeyProviderCryptoExtension</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10880">HADOOP-10880</a>.
+     Blocker bug reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (security)<br>
+     <b>Move HTTP delegation tokens out of URL querystring to a header</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10868">HADOOP-10868</a>.
+     Major sub-task reported by Robert Kanter and fixed by Robert Kanter (security)<br>
+     <b>Create a ZooKeeper-backed secret provider</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10866">HADOOP-10866</a>.
+     Major bug reported by Yongjun Zhang and fixed by Yongjun Zhang <br>
+     <b>RawLocalFileSystem fails to read symlink targets via the stat command when the format of stat command uses non-curly quotes</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10863">HADOOP-10863</a>.
+     Major improvement reported by Alejandro Abdelnur and fixed by Arun Suresh (security)<br>
+     <b>KMS should have a blacklist for decrypting EEKs</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10862">HADOOP-10862</a>.
+     Major bug reported by Alejandro Abdelnur and fixed by Arun Suresh (security)<br>
+     <b>Miscellaneous trivial corrections to KMS classes</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10857">HADOOP-10857</a>.
+     Major bug reported by Tsuyoshi OZAWA and fixed by Tsuyoshi OZAWA (documentation)<br>
+     <b>Native Libraries Guide doen't mention a dependency on openssl-development package</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10855">HADOOP-10855</a>.
+     Minor improvement reported by Todd Lipcon and fixed by Todd Lipcon (io)<br>
+     <b>Allow Text to be read with a known length</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10851">HADOOP-10851</a>.
+     Major bug reported by Benoy Antony and fixed by Benoy Antony (security)<br>
+     <b>NetgroupCache does not remove group memberships</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10845">HADOOP-10845</a>.
+     Major improvement reported by Chris Nauroth and fixed by Stephen Chu (fs , test)<br>
+     <b>Add common tests for ACLs in combination with viewfs.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10843">HADOOP-10843</a>.
+     Major bug reported by Jinghui Wang and fixed by Jinghui Wang (test , tools)<br>
+     <b>TestGridmixRecord unit tests failure on PowerPC</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10842">HADOOP-10842</a>.
+     Major bug reported by Alejandro Abdelnur and fixed by Arun Suresh (security)<br>
+     <b>CryptoExtension generateEncryptedKey method should receive the key name</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10841">HADOOP-10841</a>.
+     Major improvement reported by Alejandro Abdelnur and fixed by Arun Suresh (security)<br>
+     <b>EncryptedKeyVersion should have a key name property</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10839">HADOOP-10839</a>.
+     Major improvement reported by shanyu zhao and fixed by shanyu zhao (metrics)<br>
+     <b>Add unregisterSource() to MetricsSystem API</b><br>
+     <blockquote>The MetricsSystem abstract class has added a new abstract method, unregisterSource, for unregistering a previously registered metrics source.  Custom subclasses of MetricsSystem must be updated to provide an implementation of this method.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10838">HADOOP-10838</a>.
+     Major improvement reported by James Thomas and fixed by James Thomas (performance)<br>
+     <b>Byte array native checksumming</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10835">HADOOP-10835</a>.
+     Major improvement reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (security)<br>
+     <b>Implement HTTP proxyuser support in HTTP authentication client/server libraries</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10833">HADOOP-10833</a>.
+     Major improvement reported by Benoy Antony and fixed by Benoy Antony (security)<br>
+     <b>Remove unused cache in UserProvider</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10830">HADOOP-10830</a>.
+     Major bug reported by Benoy Antony and fixed by Benoy Antony (security)<br>
+     <b>Missing lock in JavaKeyStoreProvider.createCredentialEntry</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10826">HADOOP-10826</a>.
+     Major improvement reported by Benoy Antony and fixed by Benoy Antony (security)<br>
+     <b>Iteration on KeyProviderFactory.serviceLoader  is thread-unsafe</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10824">HADOOP-10824</a>.
+     Major improvement reported by Benoy Antony and fixed by Benoy Antony (security)<br>
+     <b>Refactor KMSACLs to avoid locking</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10820">HADOOP-10820</a>.
+     Minor bug reported by Alex Holmes and fixed by zhihai xu <br>
+     <b>Throw an exception in GenericOptionsParser when passed an empty Path</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10817">HADOOP-10817</a>.
+     Major improvement reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (security)<br>
+     <b>ProxyUsers configuration should support configurable prefixes </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10816">HADOOP-10816</a>.
+     Major bug reported by Mike Yoder and fixed by Mike Yoder (security)<br>
+     <b>KeyShell returns -1 on error to the shell, should be 1</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10815">HADOOP-10815</a>.
+     Major improvement reported by Chris Nauroth and fixed by Chris Nauroth (native)<br>
+     <b>Implement Windows equivalent of mlock.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10814">HADOOP-10814</a>.
+     Major bug reported by Alejandro Abdelnur and fixed by Robert Kanter <br>
+     <b>Update Tomcat version used by HttpFS and KMS to latest 6.x version</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10812">HADOOP-10812</a>.
+     Trivial improvement reported by Andrew Wang and fixed by Andrew Wang <br>
+     <b>Delegate KeyProviderExtension#toString to underlying KeyProvider</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10810">HADOOP-10810</a>.
+     Minor bug reported by Chris Nauroth and fixed by Chris Nauroth (native)<br>
+     <b>Clean up native code compilation warnings.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10808">HADOOP-10808</a>.
+     Minor improvement reported by Chris Nauroth and fixed by Chris Nauroth (native)<br>
+     <b>Remove unused native code for munlock.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10793">HADOOP-10793</a>.
+     Major improvement reported by Mike Yoder and fixed by Andrew Wang (security)<br>
+     <b>KeyShell args should use single-dash style</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10791">HADOOP-10791</a>.
+     Major improvement reported by Alejandro Abdelnur and fixed by Robert Kanter (security)<br>
+     <b>AuthenticationFilter should support externalizing the secret for signing and provide rotation support</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10781">HADOOP-10781</a>.
+     Major bug reported by Dmitry Sivachenko and fixed by Dmitry Sivachenko <br>
+     <b>Unportable getgrouplist() usage breaks FreeBSD</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10780">HADOOP-10780</a>.
+     Major bug reported by Dmitry Sivachenko and fixed by Dmitry Sivachenko <br>
+     <b>hadoop_user_info_alloc fails on FreeBSD due to incorrect sysconf use</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10771">HADOOP-10771</a>.
+     Major improvement reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (security)<br>
+     <b>Refactor HTTP delegation support out of httpfs to common</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10770">HADOOP-10770</a>.
+     Major improvement reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (security)<br>
+     <b>KMS add delegation token support</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10769">HADOOP-10769</a>.
+     Major improvement reported by Alejandro Abdelnur and fixed by Arun Suresh (security)<br>
+     <b>Create KeyProvider extension to handle delegation tokens</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10758">HADOOP-10758</a>.
+     Major improvement reported by Alejandro Abdelnur and fixed by Arun Suresh (security)<br>
+     <b>KMS: add ACLs on per key basis.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10757">HADOOP-10757</a>.
+     Major improvement reported by Alejandro Abdelnur and fixed by Arun Suresh (security)<br>
+     <b>KeyProvider KeyVersion should provide the key name</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10756">HADOOP-10756</a>.
+     Major improvement reported by Alejandro Abdelnur and fixed by Arun Suresh (security)<br>
+     <b>KMS audit log should consolidate successful similar requests</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10755">HADOOP-10755</a>.
+     Major improvement reported by Andrew Wang and fixed by Lei (Eddy) Xu (security)<br>
+     <b>Support negative caching of user-group mapping</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10750">HADOOP-10750</a>.
+     Major improvement reported by Alejandro Abdelnur and fixed by Arun Suresh (security)<br>
+     <b>KMSKeyProviderCache should be in hadoop-common</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10736">HADOOP-10736</a>.
+     Major improvement reported by Mike Yoder and fixed by Mike Yoder (security)<br>
+     <b>Add key attributes to the key shell</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10733">HADOOP-10733</a>.
+     Minor bug reported by Ted Yu and fixed by Ted Yu <br>
+     <b>Potential null dereference in CredentialShell#promptForCredential()</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10732">HADOOP-10732</a>.
+     Minor bug reported by Ted Yu and fixed by Ted Yu <br>
+     <b>Update without holding write lock in JavaKeyStoreProvider#innerSetCredential()</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10731">HADOOP-10731</a>.
+     Trivial improvement reported by Henry Saputra and fixed by Henry Saputra (documentation)<br>
+     <b>Remove @date JavaDoc comment in ProgramDriver class</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10720">HADOOP-10720</a>.
+     Major improvement reported by Alejandro Abdelnur and fixed by Arun Suresh (security)<br>
+     <b>KMS: Implement generateEncryptedKey and decryptEncryptedKey in the REST API</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10719">HADOOP-10719</a>.
+     Major new feature reported by Alejandro Abdelnur and fixed by Arun Suresh (security)<br>
+     <b>Add generateEncryptedKey and decryptEncryptedKey methods to KeyProvider</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10698">HADOOP-10698</a>.
+     Major improvement reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (security)<br>
+     <b>KMS, add proxyuser support</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10696">HADOOP-10696</a>.
+     Major improvement reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (security)<br>
+     <b>Add optional attributes to KeyProvider Options and Metadata</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10695">HADOOP-10695</a>.
+     Major improvement reported by Andrew Wang and fixed by Mike Yoder <br>
+     <b>KMSClientProvider should respect a configurable timeout.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10681">HADOOP-10681</a>.
+     Major bug reported by Gopal V and fixed by Gopal V (performance)<br>
+     <b>Remove synchronized blocks from SnappyCodec and ZlibCodec buffering inner loop</b><br>
+     <blockquote>Remove unnecessary synchronized blocks from Snappy/Zlib codecs.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10677">HADOOP-10677</a>.
+     Major bug reported by David S. Wang and fixed by David S. Wang (fs/s3)<br>
+     <b>ExportSnapshot fails on kerberized cluster using s3a</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10676">HADOOP-10676</a>.
+     Major bug reported by David S. Wang and fixed by David S. Wang (fs/s3)<br>
+     <b>S3AOutputStream not reading new config knobs for multipart configs</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10675">HADOOP-10675</a>.
+     Major improvement reported by David S. Wang and fixed by David S. Wang (fs/s3)<br>
+     <b>Add server-side encryption functionality to s3a</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10673">HADOOP-10673</a>.
+     Major bug reported by Ming Ma and fixed by Ming Ma <br>
+     <b>Update rpc metrics when the call throws an exception</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10650">HADOOP-10650</a>.
+     Major sub-task reported by Benoy Antony and fixed by Benoy Antony (security)<br>
+     <b>Add ability to specify a reverse ACL (black list) of users and groups</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10645">HADOOP-10645</a>.
+     Major bug reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (security)<br>
+     <b>TestKMS fails because race condition writing acl files</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10635">HADOOP-10635</a>.
+     Major sub-task reported by Alejandro Abdelnur and fixed by Yi Liu (security)<br>
+     <b>Add a method to CryptoCodec to generate SRNs for IV</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10632">HADOOP-10632</a>.
+     Major sub-task reported by Alejandro Abdelnur and fixed by Yi Liu (security)<br>
+     <b>Minor improvements to Crypto input and output streams</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10611">HADOOP-10611</a>.
+     Major bug reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (security)<br>
+     <b>KMS, keyVersion name should not be assumed to be keyName@versionNumber</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10610">HADOOP-10610</a>.
+     Minor improvement reported by Ted Malaska and fixed by Ted Malaska (fs/s3)<br>
+     <b>Upgrade S3n s3.fs.buffer.dir to support multi directories</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10607">HADOOP-10607</a>.
+     Major new feature reported by Larry McCay and fixed by Larry McCay (security)<br>
+     <b>Create an API to Separate Credentials/Password Storage from Applications</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10591">HADOOP-10591</a>.
+     Major bug reported by Hari Shreedharan and fixed by Colin Patrick McCabe <br>
+     <b>Compression codecs must used pooled direct buffers or deallocate direct buffers when stream is closed</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10586">HADOOP-10586</a>.
+     Minor bug reported by Charles Lamb and fixed by Charles Lamb (bin)<br>
+     <b>KeyShell doesn't allow setting Options via CLI</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10583">HADOOP-10583</a>.
+     Minor bug reported by Charles Lamb and fixed by Charles Lamb (bin)<br>
+     <b>bin/hadoop key throws NPE with no args and assorted other fixups</b><br>
+     <blockquote>bin/hadoop key
+with no args would throw an NPE.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10552">HADOOP-10552</a>.
+     Trivial bug reported by Kenji Kikushima and fixed by Kenji Kikushima (documentation)<br>
+     <b>Fix usage and example at FileSystemShell.apt.vm</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10534">HADOOP-10534</a>.
+     Major bug reported by Owen O'Malley and fixed by Owen O'Malley <br>
+     <b>KeyProvider API should using windowing for retrieving metadata</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10507">HADOOP-10507</a>.
+     Minor bug reported by Stephen Chu and fixed by sathish (fs)<br>
+     <b>FsShell setfacl can throw ArrayIndexOutOfBoundsException when no perm is specified</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10488">HADOOP-10488</a>.
+     Major bug reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (test)<br>
+     <b>TestKeyProviderFactory fails randomly</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10433">HADOOP-10433</a>.
+     Major improvement reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (security)<br>
+     <b>Key Management Server based on KeyProvider API</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10432">HADOOP-10432</a>.
+     Major improvement reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (security)<br>
+     <b>Refactor SSLFactory to expose static method to determine HostnameVerifier</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10431">HADOOP-10431</a>.
+     Major improvement reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (security)<br>
+     <b>Change visibility of KeyStore.Options getter methods to public</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10430">HADOOP-10430</a>.
+     Major improvement reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (security)<br>
+     <b>KeyProvider Metadata should have an optional description, there should be a method to retrieve the metadata from all keys</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10429">HADOOP-10429</a>.
+     Major improvement reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (security)<br>
+     <b>KeyStores should have methods to generate the materials themselves, KeyShell should use them</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10428">HADOOP-10428</a>.
+     Major improvement reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (security)<br>
+     <b>JavaKeyStoreProvider should accept keystore password via configuration falling back to ENV VAR</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10427">HADOOP-10427</a>.
+     Major improvement reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (security)<br>
+     <b>KeyProvider implementations should be thread safe</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10404">HADOOP-10404</a>.
+     Minor bug reported by Ted Yu and fixed by Colin Patrick McCabe <br>
+     <b>Some accesses to DomainSocketWatcher#closed are not protected by lock</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10402">HADOOP-10402</a>.
+     Major bug reported by Robert Kanter and fixed by Robert Kanter <br>
+     <b>Configuration.getValByRegex does not substitute for variables</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10400">HADOOP-10400</a>.
+     Major new feature reported by Jordan Mendelson and fixed by Jordan Mendelson (fs , fs/s3)<br>
+     <b>Incorporate new S3A FileSystem implementation</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10373">HADOOP-10373</a>.
+     Major improvement reported by Steve Loughran and fixed by Steve Loughran (fs/s3)<br>
+     <b>create tools/hadoop-amazon for aws/EMR support</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10335">HADOOP-10335</a>.
+     Major improvement reported by Benoy Antony and fixed by Benoy Antony <br>
+     <b>An ip whilelist based implementation to resolve Sasl properties per connection</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10281">HADOOP-10281</a>.
+     Major sub-task reported by Chris Li and fixed by Chris Li <br>
+     <b>Create a scheduler, which assigns schedulables a priority level</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10244">HADOOP-10244</a>.
+     Major bug reported by Larry McCay and fixed by Larry McCay (security)<br>
+     <b>TestKeyShell improperly tests the results of a Delete</b><br>
+     <blockquote>Fix of inappropriate test of delete functionality.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10237">HADOOP-10237</a>.
+     Major bug reported by Larry McCay and fixed by Larry McCay (security)<br>
+     <b>JavaKeyStoreProvider needs to set keystore permissions properly</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10231">HADOOP-10231</a>.
+     Minor improvement reported by Akira AJISAKA and fixed by Akira AJISAKA (documentation)<br>
+     <b>Add some components in Native Libraries document</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10224">HADOOP-10224</a>.
+     Major bug reported by Larry McCay and fixed by Arun Suresh (security)<br>
+     <b>JavaKeyStoreProvider has to protect against corrupting underlying store</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10201">HADOOP-10201</a>.
+     Major sub-task reported by Larry McCay and fixed by Larry McCay (security)<br>
+     <b>Add Listing Support to Key Management APIs</b><br>
+     <blockquote>I just committed this. Thanks, Larry!</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10177">HADOOP-10177</a>.
+     Major bug reported by Owen O'Malley and fixed by Larry McCay (security)<br>
+     <b>Create CLI tools for managing keys via the KeyProvider API</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10150">HADOOP-10150</a>.
+     Major new feature reported by Yi Liu and fixed by Yi Liu (security)<br>
+     <b>Hadoop cryptographic file system</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10141">HADOOP-10141</a>.
+     Major bug reported by Owen O'Malley and fixed by Owen O'Malley (security)<br>
+     <b>Create an API to separate encryption key storage from applications</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10131">HADOOP-10131</a>.
+     Major bug reported by Vinayakumar B and fixed by Vinayakumar B <br>
+     <b>NetWorkTopology#countNumOfAvailableNodes() is returning wrong value if excluded nodes passed are not part of the cluster tree</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10121">HADOOP-10121</a>.
+     Trivial bug reported by Akira AJISAKA and fixed by Akira AJISAKA (documentation)<br>
+     <b>Fix javadoc spelling for HadoopArchives#writeTopLevelDirs</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10059">HADOOP-10059</a>.
+     Minor bug reported by Jason Lowe and fixed by Tsuyoshi OZAWA (metrics)<br>
+     <b>RPC authentication and authorization metrics overflow to negative values on busy clusters</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9989">HADOOP-9989</a>.
+     Major bug reported by Jinghui Wang and fixed by zhihai xu (security , util)<br>
+     <b>Bug introduced in HADOOP-9374, which parses the -tokenCacheFile as binary file but set it to the configuration as JSON file.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9921">HADOOP-9921</a>.
+     Major bug reported by Vinayakumar B and fixed by Vinayakumar B <br>
+     <b>daemon scripts should remove pid file on stop call after stop or process is found not running</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9740">HADOOP-9740</a>.
+     Major bug reported by Allan Yan and fixed by Allan Yan (fs)<br>
+     <b>FsShell's Text command does not read avro data files stored on HDFS</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9576">HADOOP-9576</a>.
+     Major bug reported by Jian He and fixed by Steve Loughran <br>
+     <b>Make NetUtils.wrapException throw EOFException instead of wrapping it as IOException</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9540">HADOOP-9540</a>.
+     Minor improvement reported by Hari and fixed by  (fs/s3 , test)<br>
+     <b>Expose the InMemoryS3 and S3N FilesystemStores implementations for Unit testing.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8944">HADOOP-8944</a>.
+     Trivial improvement reported by Jonathan Allen and fixed by Jonathan Allen <br>
+     <b>Shell command fs -count should include human readable option</b><br>
+     <blockquote>Implements -h option for fs -count to show file sizes in human readable format. Additionally, ContentSummary.getHeader() now returns a different string that is incompatible with previous releases.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8896">HADOOP-8896</a>.
+     Trivial improvement reported by Timothy Mann and fixed by Ray Chiang (documentation , io)<br>
+     <b>Javadoc points to Wrong Reader and Writer classes in SequenceFile</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8815">HADOOP-8815</a>.
+     Minor improvement reported by Brandon Li and fixed by Brandon Li (test)<br>
+     <b>RandomDatum overrides equals(Object) but no hashCode()</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8808">HADOOP-8808</a>.
+     Major bug reported by Hemanth Yamijala and fixed by Akira AJISAKA (documentation , fs)<br>
+     <b>Update FsShell documentation to mention deprecation of some of the commands, and mention alternatives</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8158">HADOOP-8158</a>.
+     Major bug reported by Todd Lipcon and fixed by Daryn Sharp <br>
+     <b>Interrupting hadoop fs -put from the command line causes a LeaseExpiredException</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8069">HADOOP-8069</a>.
+     Major improvement reported by Todd Lipcon and fixed by Todd Lipcon (ipc)<br>
+     <b>Enable TCP_NODELAY by default for IPC</b><br>
+     <blockquote>This change enables the TCP_NODELAY flag for all Hadoop IPC connections, hence bypassing TCP Nagling. Nagling interacts poorly with TCP delayed ACKs especially for request-response protocols.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-7664">HADOOP-7664</a>.
+     Minor improvement reported by Ravi Prakash and fixed by Ravi Prakash (conf)<br>
+     <b>o.a.h.conf.Configuration complains of overriding final parameter even if the value with which its attempting to override is the same. </b><br>
+     <blockquote></blockquote></li>
+</ul>
+</body></html>
+<META http-equiv="Content-Type" content="text/html; charset=UTF-8">
 <title>Hadoop  2.4.1 Release Notes</title>
 <STYLE type="text/css">
 	H1 {font-family: sans-serif}

+ 75 - 10
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java

@@ -18,6 +18,8 @@
 
 package org.apache.hadoop.conf;
 
+import com.google.common.annotations.VisibleForTesting;
+
 import java.io.BufferedInputStream;
 import java.io.DataInput;
 import java.io.DataOutput;
@@ -67,6 +69,7 @@ import javax.xml.transform.TransformerFactory;
 import javax.xml.transform.dom.DOMSource;
 import javax.xml.transform.stream.StreamResult;
 
+import com.google.common.base.Charsets;
 import org.apache.commons.collections.map.UnmodifiableMap;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -177,6 +180,11 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
     LogFactory.getLog("org.apache.hadoop.conf.Configuration.deprecation");
 
   private boolean quietmode = true;
+
+  private static final String DEFAULT_STRING_CHECK =
+    "testingforemptydefaultvalue";
+
+  private boolean allowNullValueProperties = false;
   
   private static class Resource {
     private final Object resource;
@@ -558,6 +566,32 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
     return deprecationContext.get().getDeprecatedKeyMap().containsKey(key);
   }
 
+  /**
+   * Sets all deprecated properties that are not currently set but have a
+   * corresponding new property that is set. Useful for iterating the
+   * properties when all deprecated properties for currently set properties
+   * need to be present.
+   */
+  public void setDeprecatedProperties() {
+    DeprecationContext deprecations = deprecationContext.get();
+    Properties props = getProps();
+    Properties overlay = getOverlay();
+    for (Map.Entry<String, DeprecatedKeyInfo> entry :
+        deprecations.getDeprecatedKeyMap().entrySet()) {
+      String depKey = entry.getKey();
+      if (!overlay.contains(depKey)) {
+        for (String newKey : entry.getValue().newKeys) {
+          String val = overlay.getProperty(newKey);
+          if (val != null) {
+            props.setProperty(depKey, val);
+            overlay.setProperty(depKey, val);
+            break;
+          }
+        }
+      }
+    }
+  }
+
   /**
    * Checks for the presence of the property <code>name</code> in the
    * deprecation map. Returns the first of the list of new keys if present
@@ -869,7 +903,38 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
     }
     return result;
   }
-  
+
+  /**
+   * Set Configuration to allow keys without values during setup.  Intended
+   * for use during testing.
+   *
+   * @param val If true, will allow Configuration to store keys without values
+   */
+  @VisibleForTesting
+  public void setAllowNullValueProperties( boolean val ) {
+    this.allowNullValueProperties = val;
+  }
+
+  /**
+   * Return existence of the <code>name</code> property, but only for
+   * names which have no valid value, usually non-existent or commented
+   * out in XML.
+   *
+   * @param name the property name
+   * @return true if the property <code>name</code> exists without value
+   */
+  @VisibleForTesting
+  public boolean onlyKeyExists(String name) {
+    String[] names = handleDeprecation(deprecationContext.get(), name);
+    for(String n : names) {
+      if ( getProps().getProperty(n,DEFAULT_STRING_CHECK)
+               .equals(DEFAULT_STRING_CHECK) ) {
+        return true;
+      }
+    }
+    return false;
+  }
+
   /**
    * Get the value of the <code>name</code> property as a trimmed <code>String</code>, 
    * <code>null</code> if no such property exists. 
@@ -1461,11 +1526,8 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
    * @param pattern new value
    */
   public void setPattern(String name, Pattern pattern) {
-    if (null == pattern) {
-      set(name, null);
-    } else {
-      set(name, pattern.pattern());
-    }
+    assert pattern != null : "Pattern cannot be null";
+    set(name, pattern.pattern());
   }
 
   /**
@@ -2240,7 +2302,7 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
         LOG.info("found resource " + name + " at " + url);
       }
 
-      return new InputStreamReader(url.openStream());
+      return new InputStreamReader(url.openStream(), Charsets.UTF_8);
     } catch (Exception e) {
       return null;
     }
@@ -2303,9 +2365,9 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
     // code.
     Map<String,String> result = new HashMap<String,String>();
     for(Map.Entry<Object,Object> item: getProps().entrySet()) {
-      if (item.getKey() instanceof String && 
+      if (item.getKey() instanceof String &&
           item.getValue() instanceof String) {
-        result.put((String) item.getKey(), (String) item.getValue());
+          result.put((String) item.getKey(), (String) item.getValue());
       }
     }
     return result.entrySet().iterator();
@@ -2511,8 +2573,11 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
   
   private void loadProperty(Properties properties, String name, String attr,
       String value, boolean finalParameter, String[] source) {
-    if (value != null) {
+    if (value != null || allowNullValueProperties) {
       if (!finalParameters.contains(attr)) {
+        if (value==null && allowNullValueProperties) {
+	  value = DEFAULT_STRING_CHECK;
+	}
         properties.setProperty(attr, value);
         updatingResource.put(attr, source);
       } else if (!value.equals(properties.getProperty(attr))) {

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurableBase.java

@@ -128,7 +128,7 @@ public abstract class ReconfigurableBase
         try {
           this.parent.reconfigurePropertyImpl(change.prop, change.newVal);
         } catch (ReconfigurationException e) {
-          errorMessage = e.toString();
+          errorMessage = e.getCause().getMessage();
         }
         results.put(change, Optional.fromNullable(errorMessage));
       }

+ 2 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurationServlet.java

@@ -71,10 +71,10 @@ public class ReconfigurationServlet extends HttpServlet {
 
   private void printHeader(PrintWriter out, String nodeName) {
     out.print("<html><head>");
-    out.printf("<title>%s Reconfiguration Utility</title>\n", 
+    out.printf("<title>%s Reconfiguration Utility</title>%n",
                StringEscapeUtils.escapeHtml(nodeName));
     out.print("</head><body>\n");
-    out.printf("<h1>%s Reconfiguration Utility</h1>\n",
+    out.printf("<h1>%s Reconfiguration Utility</h1>%n",
                StringEscapeUtils.escapeHtml(nodeName));
   }
 

+ 12 - 15
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/AesCtrCryptoCodec.java

@@ -33,7 +33,6 @@ public abstract class AesCtrCryptoCodec extends CryptoCodec {
    * @see http://en.wikipedia.org/wiki/Advanced_Encryption_Standard
    */
   private static final int AES_BLOCK_SIZE = SUITE.getAlgorithmBlockSize();
-  private static final int CTR_OFFSET = 8;
 
   @Override
   public CipherSuite getCipherSuite() {
@@ -48,20 +47,18 @@ public abstract class AesCtrCryptoCodec extends CryptoCodec {
   public void calculateIV(byte[] initIV, long counter, byte[] IV) {
     Preconditions.checkArgument(initIV.length == AES_BLOCK_SIZE);
     Preconditions.checkArgument(IV.length == AES_BLOCK_SIZE);
-    
-    System.arraycopy(initIV, 0, IV, 0, CTR_OFFSET);
-    long l = 0;
-    for (int i = 0; i < 8; i++) {
-      l = ((l << 8) | (initIV[CTR_OFFSET + i] & 0xff));
+
+    int i = IV.length; // IV length
+    int j = 0; // counter bytes index
+    int sum = 0;
+    while (i-- > 0) {
+      // (sum >>> Byte.SIZE) is the carry for addition
+      sum = (initIV[i] & 0xff) + (sum >>> Byte.SIZE);
+      if (j++ < 8) { // Big-endian, and long is 8 bytes length
+        sum += (byte) counter & 0xff;
+        counter >>>= 8;
+      }
+      IV[i] = (byte) sum;
     }
-    l += counter;
-    IV[CTR_OFFSET + 0] = (byte) (l >>> 56);
-    IV[CTR_OFFSET + 1] = (byte) (l >>> 48);
-    IV[CTR_OFFSET + 2] = (byte) (l >>> 40);
-    IV[CTR_OFFSET + 3] = (byte) (l >>> 32);
-    IV[CTR_OFFSET + 4] = (byte) (l >>> 24);
-    IV[CTR_OFFSET + 5] = (byte) (l >>> 16);
-    IV[CTR_OFFSET + 6] = (byte) (l >>> 8);
-    IV[CTR_OFFSET + 7] = (byte) (l);
   }
 }

+ 66 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CipherOption.java

@@ -0,0 +1,66 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.crypto;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+
+/**
+ * Used between client and server to negotiate the 
+ * cipher suite, key and iv.
+ */
+@InterfaceAudience.Private
+public class CipherOption {
+  private final CipherSuite suite;
+  private final byte[] inKey;
+  private final byte[] inIv;
+  private final byte[] outKey;
+  private final byte[] outIv;
+  
+  public CipherOption(CipherSuite suite) {
+    this(suite, null, null, null, null);
+  }
+  
+  public CipherOption(CipherSuite suite, byte[] inKey, byte[] inIv, 
+      byte[] outKey, byte[] outIv) {
+    this.suite = suite;
+    this.inKey = inKey;
+    this.inIv = inIv;
+    this.outKey = outKey;
+    this.outIv = outIv;
+  }
+  
+  public CipherSuite getCipherSuite() {
+    return suite;
+  }
+  
+  public byte[] getInKey() {
+    return inKey;
+  }
+  
+  public byte[] getInIv() {
+    return inIv;
+  }
+  
+  public byte[] getOutKey() {
+    return outKey;
+  }
+  
+  public byte[] getOutIv() {
+    return outIv;
+  }
+}

+ 37 - 9
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java

@@ -23,6 +23,7 @@ import java.io.FilterInputStream;
 import java.io.IOException;
 import java.io.InputStream;
 import java.nio.ByteBuffer;
+import java.nio.channels.ReadableByteChannel;
 import java.security.GeneralSecurityException;
 import java.util.EnumSet;
 import java.util.Queue;
@@ -57,7 +58,8 @@ import com.google.common.base.Preconditions;
 @InterfaceStability.Evolving
 public class CryptoInputStream extends FilterInputStream implements 
     Seekable, PositionedReadable, ByteBufferReadable, HasFileDescriptor, 
-    CanSetDropBehind, CanSetReadahead, HasEnhancedByteBufferAccess {
+    CanSetDropBehind, CanSetReadahead, HasEnhancedByteBufferAccess, 
+    ReadableByteChannel {
   private static final byte[] oneByteBuf = new byte[1];
   private final CryptoCodec codec;
   private final Decryptor decryptor;
@@ -92,6 +94,8 @@ public class CryptoInputStream extends FilterInputStream implements
   private final byte[] key;
   private final byte[] initIV;
   private byte[] iv;
+  private final boolean isByteBufferReadable;
+  private final boolean isReadableByteChannel;
   
   /** DirectBuffer pool */
   private final Queue<ByteBuffer> bufferPool = 
@@ -109,12 +113,15 @@ public class CryptoInputStream extends FilterInputStream implements
   public CryptoInputStream(InputStream in, CryptoCodec codec,
       int bufferSize, byte[] key, byte[] iv, long streamOffset) throws IOException {
     super(in);
+    CryptoStreamUtils.checkCodec(codec);
     this.bufferSize = CryptoStreamUtils.checkBufferSize(codec, bufferSize);
     this.codec = codec;
     this.key = key.clone();
     this.initIV = iv.clone();
     this.iv = iv.clone();
     this.streamOffset = streamOffset;
+    isByteBufferReadable = in instanceof ByteBufferReadable;
+    isReadableByteChannel = in instanceof ReadableByteChannel;
     inBuffer = ByteBuffer.allocateDirect(this.bufferSize);
     outBuffer = ByteBuffer.allocateDirect(this.bufferSize);
     decryptor = getDecryptor();
@@ -165,9 +172,11 @@ public class CryptoInputStream extends FilterInputStream implements
        * it can avoid bytes copy.
        */
       if (usingByteBufferRead == null) {
-        if (in instanceof ByteBufferReadable) {
+        if (isByteBufferReadable || isReadableByteChannel) {
           try {
-            n = ((ByteBufferReadable) in).read(inBuffer);
+            n = isByteBufferReadable ? 
+                ((ByteBufferReadable) in).read(inBuffer) : 
+                  ((ReadableByteChannel) in).read(inBuffer);
             usingByteBufferRead = Boolean.TRUE;
           } catch (UnsupportedOperationException e) {
             usingByteBufferRead = Boolean.FALSE;
@@ -180,7 +189,8 @@ public class CryptoInputStream extends FilterInputStream implements
         }
       } else {
         if (usingByteBufferRead) {
-          n = ((ByteBufferReadable) in).read(inBuffer);
+          n = isByteBufferReadable ? ((ByteBufferReadable) in).read(inBuffer) : 
+                ((ReadableByteChannel) in).read(inBuffer);
         } else {
           n = readFromUnderlyingStream(inBuffer);
         }
@@ -450,7 +460,7 @@ public class CryptoInputStream extends FilterInputStream implements
   @Override
   public int read(ByteBuffer buf) throws IOException {
     checkStream();
-    if (in instanceof ByteBufferReadable) {
+    if (isByteBufferReadable || isReadableByteChannel) {
       final int unread = outBuffer.remaining();
       if (unread > 0) { // Have unread decrypted data in buffer.
         int toRead = buf.remaining();
@@ -466,7 +476,8 @@ public class CryptoInputStream extends FilterInputStream implements
       }
       
       final int pos = buf.position();
-      final int n = ((ByteBufferReadable) in).read(buf);
+      final int n = isByteBufferReadable ? ((ByteBufferReadable) in).read(buf) : 
+            ((ReadableByteChannel) in).read(buf);
       if (n > 0) {
         streamOffset += n; // Read n bytes
         decrypt(buf, n, pos);
@@ -481,10 +492,22 @@ public class CryptoInputStream extends FilterInputStream implements
           return unread;
         }
       }
+    } else {
+      int n = 0;
+      if (buf.hasArray()) {
+        n = read(buf.array(), buf.position(), buf.remaining());
+        if (n > 0) {
+          buf.position(buf.position() + n);
+        }
+      } else {
+        byte[] tmp = new byte[buf.remaining()];
+        n = read(tmp);
+        if (n > 0) {
+          buf.put(tmp, 0, n);
+        }
+      }
+      return n;
     }
-
-    throw new UnsupportedOperationException("ByteBuffer read unsupported " +
-        "by input stream.");
   }
   
   /**
@@ -686,4 +709,9 @@ public class CryptoInputStream extends FilterInputStream implements
       decryptorPool.add(decryptor);
     }
   }
+
+  @Override
+  public boolean isOpen() {
+    return !closed;
+  }
 }

+ 1 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java

@@ -83,6 +83,7 @@ public class CryptoOutputStream extends FilterOutputStream implements
       int bufferSize, byte[] key, byte[] iv, long streamOffset) 
       throws IOException {
     super(out);
+    CryptoStreamUtils.checkCodec(codec);
     this.bufferSize = CryptoStreamUtils.checkBufferSize(codec, bufferSize);
     this.codec = codec;
     this.key = key.clone();

+ 7 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoStreamUtils.java

@@ -49,6 +49,13 @@ public class CryptoStreamUtils {
         HADOOP_SECURITY_CRYPTO_BUFFER_SIZE_DEFAULT);
   }
   
+  /** AES/CTR/NoPadding is required */
+  public static void checkCodec(CryptoCodec codec) {
+    if (codec.getCipherSuite() != CipherSuite.AES_CTR_NOPADDING) {
+      throw new UnsupportedCodecException("AES/CTR/NoPadding is required");
+    }
+  }
+
   /** Check and floor buffer size */
   public static int checkBufferSize(CryptoCodec codec, int bufferSize) {
     Preconditions.checkArgument(bufferSize >= MIN_BUFFER_SIZE, 

+ 60 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/UnsupportedCodecException.java

@@ -0,0 +1,60 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.crypto;
+
+/**
+ * Thrown to indicate that the specific codec is not supported.
+ */
+public class UnsupportedCodecException extends RuntimeException {
+
+  /** Default constructor */
+  public UnsupportedCodecException() {
+  }
+
+  /**
+   * Constructs an UnsupportedCodecException with the specified
+   * detail message.
+   * 
+   * @param message the detail message
+   */
+  public UnsupportedCodecException(String message) {
+    super(message);
+  }
+
+  /**
+   * Constructs a new exception with the specified detail message and
+   * cause.
+   * 
+   * @param message the detail message
+   * @param cause the cause
+   */
+  public UnsupportedCodecException(String message, Throwable cause) {
+    super(message, cause);
+  }
+
+  /**
+   * Constructs a new exception with the specified cause.
+   * 
+   * @param cause the cause
+   */
+  public UnsupportedCodecException(Throwable cause) {
+    super(cause);
+  }
+
+  private static final long serialVersionUID = 6713920435487942224L;
+}

+ 5 - 7
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/JavaKeyStoreProvider.java

@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.crypto.key;
 
+import com.google.common.base.Preconditions;
 import org.apache.commons.io.IOUtils;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
@@ -143,13 +144,8 @@ public class JavaKeyStoreProvider extends KeyProvider {
           // Provided Password file does not exist
           throw new IOException("Password file does not exists");
         }
-        if (pwdFile != null) {
-          InputStream is = pwdFile.openStream();
-          try {
-            password = IOUtils.toCharArray(is);
-          } finally {
-            is.close();
-          }
+        try (InputStream is = pwdFile.openStream()) {
+          password = IOUtils.toString(is).trim().toCharArray();
         }
       }
     }
@@ -423,6 +419,8 @@ public class JavaKeyStoreProvider extends KeyProvider {
   @Override
   public KeyVersion createKey(String name, byte[] material,
                                Options options) throws IOException {
+    Preconditions.checkArgument(name.equals(name.toLowerCase()),
+        "Uppercase key names are unsupported: %s", name);
     writeLock.lock();
     try {
       try {

+ 12 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java

@@ -32,6 +32,7 @@ import java.util.Map;
 
 import com.google.gson.stream.JsonReader;
 import com.google.gson.stream.JsonWriter;
+import org.apache.commons.io.Charsets;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -207,7 +208,8 @@ public abstract class KeyProvider {
      */
     protected byte[] serialize() throws IOException {
       ByteArrayOutputStream buffer = new ByteArrayOutputStream();
-      JsonWriter writer = new JsonWriter(new OutputStreamWriter(buffer));
+      JsonWriter writer = new JsonWriter(
+          new OutputStreamWriter(buffer, Charsets.UTF_8));
       try {
         writer.beginObject();
         if (cipher != null) {
@@ -251,7 +253,7 @@ public abstract class KeyProvider {
       String description = null;
       Map<String, String> attributes = null;
       JsonReader reader = new JsonReader(new InputStreamReader
-        (new ByteArrayInputStream(bytes)));
+        (new ByteArrayInputStream(bytes), Charsets.UTF_8));
       try {
         reader.beginObject();
         while (reader.hasNext()) {
@@ -533,6 +535,14 @@ public abstract class KeyProvider {
                                              byte[] material
                                             ) throws IOException;
 
+  /**
+   * Can be used by implementing classes to close any resources
+   * that require closing
+   */
+  public void close() throws IOException {
+    // NOP
+  }
+
   /**
    * Roll a new version of the given key generating the material for it.
    * <p/>

+ 8 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderCryptoExtension.java

@@ -408,6 +408,13 @@ public class KeyProviderCryptoExtension extends
                          ? (CryptoExtension) keyProvider
                          : new DefaultCryptoExtension(keyProvider);
     return new KeyProviderCryptoExtension(keyProvider, cryptoExtension);
-  }  
+  }
+
+  @Override
+  public void close() throws IOException {
+    if (getKeyProvider() != null) {
+      getKeyProvider().close();
+    }
+  }
 
 }

+ 17 - 15
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyShell.java

@@ -47,7 +47,7 @@ public class KeyShell extends Configured implements Tool {
       "   [" + ListCommand.USAGE + "]\n";
   private static final String LIST_METADATA = "keyShell.list.metadata";
 
-  private boolean interactive = false;
+  private boolean interactive = true;
   private Command command = null;
 
   /** allows stdout to be captured if necessary */
@@ -169,8 +169,8 @@ public class KeyShell extends Configured implements Tool {
         getConf().set(KeyProviderFactory.KEY_PROVIDER_PATH, args[++i]);
       } else if ("-metadata".equals(args[i])) {
         getConf().setBoolean(LIST_METADATA, true);
-      } else if ("-i".equals(args[i]) || ("-interactive".equals(args[i]))) {
-        interactive = true;
+      } else if ("-f".equals(args[i]) || ("-force".equals(args[i]))) {
+        interactive = false;
       } else if ("-help".equals(args[i])) {
         printKeyShellUsage();
         return 1;
@@ -298,7 +298,7 @@ public class KeyShell extends Configured implements Tool {
         }
       } catch (IOException e) {
         out.println("Cannot list keys for KeyProvider: " + provider
-            + ": " + e.getMessage());
+            + ": " + e.toString());
         throw e;
       }
     }
@@ -350,12 +350,12 @@ public class KeyShell extends Configured implements Tool {
           printProviderWritten();
         } catch (NoSuchAlgorithmException e) {
           out.println("Cannot roll key: " + keyName + " within KeyProvider: "
-              + provider);
+              + provider + ". " + e.toString());
           throw e;
         }
       } catch (IOException e1) {
         out.println("Cannot roll key: " + keyName + " within KeyProvider: "
-            + provider);
+            + provider + ". " + e1.toString());
         throw e1;
       }
     }
@@ -367,11 +367,13 @@ public class KeyShell extends Configured implements Tool {
   }
 
   private class DeleteCommand extends Command {
-    public static final String USAGE = "delete <keyname> [-provider <provider>] [-help]";
+    public static final String USAGE =
+        "delete <keyname> [-provider <provider>] [-f] [-help]";
     public static final String DESC =
         "The delete subcommand deletes all versions of the key\n" +
         "specified by the <keyname> argument from within the\n" +
-        "provider specified -provider.";
+        "provider specified -provider. The command asks for\n" +
+        "user confirmation unless -f is specified.";
 
     String keyName = null;
     boolean cont = true;
@@ -397,10 +399,10 @@ public class KeyShell extends Configured implements Tool {
         try {
           cont = ToolRunner
               .confirmPrompt("You are about to DELETE all versions of "
-                  + " key: " + keyName + " from KeyProvider "
-                  + provider + ". Continue?:");
+                  + " key " + keyName + " from KeyProvider "
+                  + provider + ". Continue? ");
           if (!cont) {
-            out.println("Nothing has been be deleted.");
+            out.println(keyName + " has not been deleted.");
           }
           return cont;
         } catch (IOException e) {
@@ -422,7 +424,7 @@ public class KeyShell extends Configured implements Tool {
           out.println(keyName + " has been successfully deleted.");
           printProviderWritten();
         } catch (IOException e) {
-          out.println(keyName + " has not been deleted.");
+          out.println(keyName + " has not been deleted. " + e.toString());
           throw e;
         }
       }
@@ -484,13 +486,13 @@ public class KeyShell extends Configured implements Tool {
             + options.toString() + ".");
         printProviderWritten();
       } catch (InvalidParameterException e) {
-        out.println(keyName + " has not been created. " + e.getMessage());
+        out.println(keyName + " has not been created. " + e.toString());
         throw e;
       } catch (IOException e) {
-        out.println(keyName + " has not been created. " + e.getMessage());
+        out.println(keyName + " has not been created. " + e.toString());
         throw e;
       } catch (NoSuchAlgorithmException e) {
-        out.println(keyName + " has not been created. " + e.getMessage());
+        out.println(keyName + " has not been created. " + e.toString());
         throw e;
       }
     }

+ 66 - 23
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java

@@ -18,6 +18,7 @@
 package org.apache.hadoop.crypto.key.kms;
 
 import org.apache.commons.codec.binary.Base64;
+import org.apache.commons.io.Charsets;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.crypto.key.KeyProvider;
@@ -26,8 +27,10 @@ import org.apache.hadoop.crypto.key.KeyProviderDelegationTokenExtension;
 import org.apache.hadoop.crypto.key.KeyProviderFactory;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.Text;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.ProviderUtils;
+import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
 import org.apache.hadoop.security.authentication.client.AuthenticationException;
@@ -48,6 +51,7 @@ import java.io.OutputStreamWriter;
 import java.io.Writer;
 import java.lang.reflect.UndeclaredThrowableException;
 import java.net.HttpURLConnection;
+import java.net.InetSocketAddress;
 import java.net.SocketTimeoutException;
 import java.net.URI;
 import java.net.URISyntaxException;
@@ -68,6 +72,7 @@ import java.util.concurrent.ExecutionException;
 import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
 import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.CryptoExtension;
 
+import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 
 /**
@@ -77,6 +82,8 @@ import com.google.common.base.Preconditions;
 public class KMSClientProvider extends KeyProvider implements CryptoExtension,
     KeyProviderDelegationTokenExtension.DelegationTokenExtension {
 
+  private static final String INVALID_SIGNATURE = "Invalid signature";
+
   private static final String ANONYMOUS_REQUESTS_DISALLOWED = "Anonymous requests are disallowed";
 
   public static final String TOKEN_KIND = "kms-dt";
@@ -203,7 +210,7 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension,
   }
 
   private static void writeJson(Map map, OutputStream os) throws IOException {
-    Writer writer = new OutputStreamWriter(os);
+    Writer writer = new OutputStreamWriter(os, Charsets.UTF_8);
     ObjectMapper jsonMapper = new ObjectMapper();
     jsonMapper.writerWithDefaultPrettyPrinter().writeValue(writer, map);
   }
@@ -247,8 +254,8 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension,
   private SSLFactory sslFactory;
   private ConnectionConfigurator configurator;
   private DelegationTokenAuthenticatedURL.Token authToken;
-  private UserGroupInformation loginUgi;
   private final int authRetry;
+  private final UserGroupInformation actualUgi;
 
   @Override
   public String toString() {
@@ -332,7 +339,11 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension,
                     KMS_CLIENT_ENC_KEY_CACHE_NUM_REFILL_THREADS_DEFAULT),
             new EncryptedQueueRefiller());
     authToken = new DelegationTokenAuthenticatedURL.Token();
-    loginUgi = UserGroupInformation.getCurrentUser();
+    actualUgi =
+        (UserGroupInformation.getCurrentUser().getAuthenticationMethod() ==
+        UserGroupInformation.AuthenticationMethod.PROXY) ? UserGroupInformation
+            .getCurrentUser().getRealUser() : UserGroupInformation
+            .getCurrentUser();
   }
 
   private String createServiceURL(URL url) throws IOException {
@@ -403,7 +414,7 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension,
                               ? currentUgi.getShortUserName() : null;
 
       // creating the HTTP connection using the current UGI at constructor time
-      conn = loginUgi.doAs(new PrivilegedExceptionAction<HttpURLConnection>() {
+      conn = actualUgi.doAs(new PrivilegedExceptionAction<HttpURLConnection>() {
         @Override
         public HttpURLConnection run() throws Exception {
           DelegationTokenAuthenticatedURL authUrl =
@@ -445,7 +456,8 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension,
       throw ex;
     }
     if ((conn.getResponseCode() == HttpURLConnection.HTTP_FORBIDDEN
-        && conn.getResponseMessage().equals(ANONYMOUS_REQUESTS_DISALLOWED))
+        && (conn.getResponseMessage().equals(ANONYMOUS_REQUESTS_DISALLOWED) ||
+            conn.getResponseMessage().contains(INVALID_SIGNATURE)))
         || conn.getResponseCode() == HttpURLConnection.HTTP_UNAUTHORIZED) {
       // Ideally, this should happen only when there is an Authentication
       // failure. Unfortunately, the AuthenticationFilter returns 403 when it
@@ -453,8 +465,6 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension,
       // WWW-Authenticate header as well)..
       KMSClientProvider.this.authToken =
           new DelegationTokenAuthenticatedURL.Token();
-      KMSClientProvider.this.loginUgi =
-          UserGroupInformation.getCurrentUser();
       if (authRetryCount > 0) {
         String contentType = conn.getRequestProperty(CONTENT_TYPE);
         String requestMethod = conn.getRequestMethod();
@@ -471,9 +481,6 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension,
       // Ignore the AuthExceptions.. since we are just using the method to
       // extract and set the authToken.. (Workaround till we actually fix
       // AuthenticatedURL properly to set authToken post initialization)
-    } finally {
-      KMSClientProvider.this.loginUgi =
-          UserGroupInformation.getCurrentUser();
     }
     HttpExceptionUtils.validateResponse(conn, expectedResponse);
     if (APPLICATION_JSON_MIME.equalsIgnoreCase(conn.getContentType())
@@ -770,25 +777,61 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension,
     encKeyVersionQueue.drain(keyName);
   }
 
+  @VisibleForTesting
+  public int getEncKeyQueueSize(String keyName) throws IOException {
+    try {
+      return encKeyVersionQueue.getSize(keyName);
+    } catch (ExecutionException e) {
+      throw new IOException(e);
+    }
+  }
+
   @Override
   public Token<?>[] addDelegationTokens(String renewer,
       Credentials credentials) throws IOException {
-    Token<?>[] tokens;
-    URL url = createURL(null, null, null, null);
-    DelegationTokenAuthenticatedURL authUrl =
-        new DelegationTokenAuthenticatedURL(configurator);
-    try {
-      Token<?> token = authUrl.getDelegationToken(url, authToken, renewer);
-      if (token != null) {
-        credentials.addToken(token.getService(), token);
-        tokens = new Token<?>[] { token };
-      } else {
-        throw new IOException("Got NULL as delegation token");
+    Token<?>[] tokens = null;
+    Text dtService = getDelegationTokenService();
+    Token<?> token = credentials.getToken(dtService);
+    if (token == null) {
+      URL url = createURL(null, null, null, null);
+      DelegationTokenAuthenticatedURL authUrl =
+          new DelegationTokenAuthenticatedURL(configurator);
+      try {
+        token = authUrl.getDelegationToken(url, authToken, renewer);
+        if (token != null) {
+          credentials.addToken(token.getService(), token);
+          tokens = new Token<?>[] { token };
+        } else {
+          throw new IOException("Got NULL as delegation token");
+        }
+      } catch (AuthenticationException ex) {
+        throw new IOException(ex);
       }
-    } catch (AuthenticationException ex) {
-      throw new IOException(ex);
     }
     return tokens;
   }
+  
+  private Text getDelegationTokenService() throws IOException {
+    URL url = new URL(kmsUrl);
+    InetSocketAddress addr = new InetSocketAddress(url.getHost(),
+        url.getPort());
+    Text dtService = SecurityUtil.buildTokenService(addr);
+    return dtService;
+  }
 
+  /**
+   * Shutdown valueQueue executor threads
+   */
+  @Override
+  public void close() throws IOException {
+    try {
+      encKeyVersionQueue.shutdown();
+    } catch (Exception e) {
+      throw new IOException(e);
+    } finally {
+      if (sslFactory != null) {
+        sslFactory.destroy();
+      }
+    }
+  }
 }

+ 21 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/ValueQueue.java

@@ -75,6 +75,8 @@ public class ValueQueue <E> {
   private final int numValues;
   private final float lowWatermark;
 
+  private volatile boolean executorThreadsStarted = false;
+
   /**
    * A <code>Runnable</code> which takes a string name.
    */
@@ -187,9 +189,6 @@ public class ValueQueue <E> {
             TimeUnit.MILLISECONDS, queue, new ThreadFactoryBuilder()
                 .setDaemon(true)
                 .setNameFormat(REFILL_THREAD).build());
-    // To ensure all requests are first queued, make coreThreads = maxThreads
-    // and pre-start all the Core Threads.
-    executor.prestartAllCoreThreads();
   }
 
   public ValueQueue(final int numValues, final float lowWaterMark, long expiry,
@@ -240,6 +239,16 @@ public class ValueQueue <E> {
     }
   }
 
+  /**
+   * Get size of the Queue for keyName
+   * @param keyName the key name
+   * @return int queue size
+   * @throws ExecutionException
+   */
+  public int getSize(String keyName) throws ExecutionException {
+    return keyQueues.get(keyName).size();
+  }
+
   /**
    * This removes the "num" values currently at the head of the Queue for the
    * provided key. Will immediately fire the Queue filler function if key
@@ -297,6 +306,15 @@ public class ValueQueue <E> {
 
   private void submitRefillTask(final String keyName,
       final Queue<E> keyQueue) throws InterruptedException {
+    if (!executorThreadsStarted) {
+      synchronized (this) {
+        // To ensure all requests are first queued, make coreThreads =
+        // maxThreads
+        // and pre-start all the Core Threads.
+        executor.prestartAllCoreThreads();
+        executorThreadsStarted = true;
+      }
+    }
     // The submit/execute method of the ThreadPoolExecutor is bypassed and
     // the Runnable is directly put in the backing BlockingQueue so that we
     // can control exactly how the runnable is inserted into the queue.

+ 7 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java

@@ -148,11 +148,14 @@ public abstract class AbstractFileSystem {
    */
   public static AbstractFileSystem createFileSystem(URI uri, Configuration conf)
       throws UnsupportedFileSystemException {
-    Class<?> clazz = conf.getClass("fs.AbstractFileSystem." + 
-                                uri.getScheme() + ".impl", null);
+    final String fsImplConf = String.format("fs.AbstractFileSystem.%s.impl",
+        uri.getScheme());
+
+    Class<?> clazz = conf.getClass(fsImplConf, null);
     if (clazz == null) {
-      throw new UnsupportedFileSystemException(
-          "No AbstractFileSystem for scheme: " + uri.getScheme());
+      throw new UnsupportedFileSystemException(String.format(
+          "%s=null: No AbstractFileSystem configured for scheme: %s",
+          fsImplConf, uri.getScheme()));
     }
     return (AbstractFileSystem) newInstance(clazz, uri, conf);
   }

+ 5 - 6
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ByteBufferReadable.java

@@ -29,9 +29,8 @@ public interface ByteBufferReadable {
    * Reads up to buf.remaining() bytes into buf. Callers should use
    * buf.limit(..) to control the size of the desired read.
    * <p/>
-   * After a successful call, buf.position() and buf.limit() should be
-   * unchanged, and therefore any data can be immediately read from buf.
-   * buf.mark() may be cleared or updated.
+   * After a successful call, buf.position() will be advanced by the number 
+   * of bytes read and buf.limit() should be unchanged.
    * <p/>
    * In the case of an exception, the values of buf.position() and buf.limit()
    * are undefined, and callers should be prepared to recover from this
@@ -45,9 +44,9 @@ public interface ByteBufferReadable {
    * signal an error upon their receipt.
    *
    * @param buf
-   *          the ByteBuffer to receive the results of the read operation. Up to
-   *          buf.limit() - buf.position() bytes may be read.
-   * @return the number of bytes available to read from buf
+   *          the ByteBuffer to receive the results of the read operation.
+   * @return the number of bytes read, possibly zero, or -1 if 
+   *         reach end-of-stream
    * @throws IOException
    *           if there is some error performing the read
    */

+ 10 - 11
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java

@@ -379,17 +379,19 @@ public abstract class ChecksumFileSystem extends FilterFileSystem {
                           int bufferSize,
                           short replication,
                           long blockSize,
-                          Progressable progress)
+                          Progressable progress,
+                          FsPermission permission)
       throws IOException {
       super(DataChecksum.newDataChecksum(DataChecksum.Type.CRC32,
           fs.getBytesPerSum()));
       int bytesPerSum = fs.getBytesPerSum();
-      this.datas = fs.getRawFileSystem().create(file, overwrite, bufferSize, 
-                                         replication, blockSize, progress);
+      this.datas = fs.getRawFileSystem().create(file, permission, overwrite,
+                                         bufferSize, replication, blockSize,
+                                         progress);
       int sumBufferSize = fs.getSumBufferSize(bytesPerSum, bufferSize);
-      this.sums = fs.getRawFileSystem().create(fs.getChecksumFile(file), true, 
-                                               sumBufferSize, replication,
-                                               blockSize);
+      this.sums = fs.getRawFileSystem().create(fs.getChecksumFile(file),
+                                               permission, true, sumBufferSize,
+                                               replication, blockSize, null);
       sums.write(CHECKSUM_VERSION, 0, CHECKSUM_VERSION.length);
       sums.writeInt(bytesPerSum);
     }
@@ -448,7 +450,7 @@ public abstract class ChecksumFileSystem extends FilterFileSystem {
     if (writeChecksum) {
       out = new FSDataOutputStream(
           new ChecksumFSOutputSummer(this, f, overwrite, bufferSize, replication,
-              blockSize, progress), null);
+              blockSize, progress, permission), null);
     } else {
       out = fs.create(f, permission, overwrite, bufferSize, replication,
           blockSize, progress);
@@ -458,9 +460,6 @@ public abstract class ChecksumFileSystem extends FilterFileSystem {
         fs.delete(checkFile, true);
       }
     }
-    if (permission != null) {
-      setPermission(f, permission);
-    }
     return out;
   }
 
@@ -562,7 +561,7 @@ public abstract class ChecksumFileSystem extends FilterFileSystem {
    * 
    * @param f
    *          given path
-   * @return the statuses of the files/directories in the given patch
+   * @return the statuses of the files/directories in the given path
    * @throws IOException
    */
   @Override

+ 10 - 7
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSOutputSummer.java

@@ -51,7 +51,7 @@ abstract public class FSOutputSummer extends OutputStream {
   protected FSOutputSummer(DataChecksum sum) {
     this.sum = sum;
     this.buf = new byte[sum.getBytesPerChecksum() * BUFFER_NUM_CHUNKS];
-    this.checksum = new byte[sum.getChecksumSize() * BUFFER_NUM_CHUNKS];
+    this.checksum = new byte[getChecksumSize() * BUFFER_NUM_CHUNKS];
     this.count = 0;
   }
   
@@ -188,7 +188,12 @@ abstract public class FSOutputSummer extends OutputStream {
   protected synchronized int getBufferedDataSize() {
     return count;
   }
-  
+
+  /** @return the size for a checksum. */
+  protected int getChecksumSize() {
+    return sum.getChecksumSize();
+  }
+
   /** Generate checksums for the given data chunks and output chunks & checksums
    * to the underlying output stream.
    */
@@ -197,9 +202,8 @@ abstract public class FSOutputSummer extends OutputStream {
     sum.calculateChunkedSums(b, off, len, checksum, 0);
     for (int i = 0; i < len; i += sum.getBytesPerChecksum()) {
       int chunkLen = Math.min(sum.getBytesPerChecksum(), len - i);
-      int ckOffset = i / sum.getBytesPerChecksum() * sum.getChecksumSize();
-      writeChunk(b, off + i, chunkLen, checksum, ckOffset,
-          sum.getChecksumSize());
+      int ckOffset = i / sum.getBytesPerChecksum() * getChecksumSize();
+      writeChunk(b, off + i, chunkLen, checksum, ckOffset, getChecksumSize());
     }
   }
 
@@ -226,8 +230,7 @@ abstract public class FSOutputSummer extends OutputStream {
    */
   protected synchronized void setChecksumBufSize(int size) {
     this.buf = new byte[size];
-    this.checksum = new byte[((size - 1) / sum.getBytesPerChecksum() + 1) *
-        sum.getChecksumSize()];
+    this.checksum = new byte[sum.getChecksumSize(size)];
     this.count = 0;
   }
 

+ 13 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java

@@ -161,7 +161,7 @@ import org.apache.hadoop.util.ShutdownHookManager;
 
 @InterfaceAudience.Public
 @InterfaceStability.Evolving /*Evolving for a release,to be changed to Stable */
-public final class FileContext {
+public class FileContext {
   
   public static final Log LOG = LogFactory.getLog(FileContext.class);
   /**
@@ -430,6 +430,9 @@ public final class FileContext {
       final Configuration aConf) throws UnsupportedFileSystemException {
     UserGroupInformation currentUser = null;
     AbstractFileSystem defaultAfs = null;
+    if (defaultFsUri.getScheme() == null) {
+      return getFileContext(aConf);
+    }
     try {
       currentUser = UserGroupInformation.getCurrentUser();
       defaultAfs = getAbstractFileSystem(currentUser, defaultFsUri, aConf);
@@ -454,9 +457,15 @@ public final class FileContext {
    */
   public static FileContext getFileContext(final Configuration aConf)
       throws UnsupportedFileSystemException {
-    return getFileContext(
-      URI.create(aConf.get(FS_DEFAULT_NAME_KEY, FS_DEFAULT_NAME_DEFAULT)), 
-      aConf);
+    final URI defaultFsUri = URI.create(aConf.get(FS_DEFAULT_NAME_KEY,
+        FS_DEFAULT_NAME_DEFAULT));
+    if (   defaultFsUri.getScheme() != null
+        && !defaultFsUri.getScheme().trim().isEmpty()) {
+      return getFileContext(defaultFsUri, aConf);
+    }
+    throw new UnsupportedFileSystemException(String.format(
+        "%s: URI configured via %s carries no scheme",
+        defaultFsUri, FS_DEFAULT_NAME_KEY));
   }
 
   /**

+ 30 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java

@@ -1700,6 +1700,36 @@ public abstract class FileSystem extends Configured implements Closeable {
     };
   }
 
+  /**
+   * Returns a remote iterator so that followup calls are made on demand
+   * while consuming the entries. Each file system implementation should
+   * override this method and provide a more efficient implementation, if
+   * possible. 
+   *
+   * @param p target path
+   * @return remote iterator
+   */
+  public RemoteIterator<FileStatus> listStatusIterator(final Path p)
+  throws FileNotFoundException, IOException {
+    return new RemoteIterator<FileStatus>() {
+      private final FileStatus[] stats = listStatus(p);
+      private int i = 0;
+
+      @Override
+      public boolean hasNext() {
+        return i<stats.length;
+      }
+
+      @Override
+      public FileStatus next() throws IOException {
+        if (!hasNext()) {
+          throw new NoSuchElementException("No more entry in " + p);
+        }
+        return stats[i++];
+      }
+    };
+  }
+
   /**
    * List the statuses and block locations of the files in the given path.
    * 
@@ -2588,9 +2618,6 @@ public abstract class FileSystem extends Configured implements Closeable {
   private static FileSystem createFileSystem(URI uri, Configuration conf
       ) throws IOException {
     Class<?> clazz = getFileSystemClass(uri.getScheme(), conf);
-    if (clazz == null) {
-      throw new IOException("No FileSystem for scheme: " + uri.getScheme());
-    }
     FileSystem fs = (FileSystem)ReflectionUtils.newInstance(clazz, conf);
     fs.initialize(uri, conf);
     return fs;

+ 24 - 8
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java

@@ -707,7 +707,7 @@ public class FileUtil {
       TarArchiveEntry entry, File outputDir) throws IOException {
     if (entry.isDirectory()) {
       File subDir = new File(outputDir, entry.getName());
-      if (!subDir.mkdir() && !subDir.isDirectory()) {
+      if (!subDir.mkdirs() && !subDir.isDirectory()) {
         throw new IOException("Mkdirs failed to create tar internal dir "
             + outputDir);
       }
@@ -720,8 +720,8 @@ public class FileUtil {
     }
 
     File outputFile = new File(outputDir, entry.getName());
-    if (!outputDir.exists()) {
-      if (!outputDir.mkdirs()) {
+    if (!outputFile.getParentFile().exists()) {
+      if (!outputFile.getParentFile().mkdirs()) {
         throw new IOException("Mkdirs failed to create tar internal dir "
             + outputDir);
       }
@@ -1186,6 +1186,11 @@ public class FileUtil {
     return fileNames;
   }  
   
+  public static String[] createJarWithClassPath(String inputClassPath, Path pwd,
+      Map<String, String> callerEnv) throws IOException {
+    return createJarWithClassPath(inputClassPath, pwd, pwd, callerEnv);
+  }
+  
   /**
    * Create a jar file at the given path, containing a manifest with a classpath
    * that references all specified entries.
@@ -1210,12 +1215,15 @@ public class FileUtil {
    * 
    * @param inputClassPath String input classpath to bundle into the jar manifest
    * @param pwd Path to working directory to save jar
+   * @param targetDir path to where the jar execution will have its working dir
    * @param callerEnv Map<String, String> caller's environment variables to use
    *   for expansion
-   * @return String absolute path to new jar
+   * @return String[] with absolute path to new jar in position 0 and
+   *   unexpanded wild card entry path in position 1
    * @throws IOException if there is an I/O error while writing the jar file
    */
-  public static String createJarWithClassPath(String inputClassPath, Path pwd,
+  public static String[] createJarWithClassPath(String inputClassPath, Path pwd,
+      Path targetDir,
       Map<String, String> callerEnv) throws IOException {
     // Replace environment variables, case-insensitive on Windows
     @SuppressWarnings("unchecked")
@@ -1235,6 +1243,7 @@ public class FileUtil {
       LOG.debug("mkdirs false for " + workingDir + ", execution will continue");
     }
 
+    StringBuilder unexpandedWildcardClasspath = new StringBuilder();
     // Append all entries
     List<String> classPathEntryList = new ArrayList<String>(
       classPathEntries.length);
@@ -1243,21 +1252,27 @@ public class FileUtil {
         continue;
       }
       if (classPathEntry.endsWith("*")) {
+        boolean foundWildCardJar = false;
         // Append all jars that match the wildcard
         Path globPath = new Path(classPathEntry).suffix("{.jar,.JAR}");
         FileStatus[] wildcardJars = FileContext.getLocalFSFileContext().util()
           .globStatus(globPath);
         if (wildcardJars != null) {
           for (FileStatus wildcardJar: wildcardJars) {
+            foundWildCardJar = true;
             classPathEntryList.add(wildcardJar.getPath().toUri().toURL()
               .toExternalForm());
           }
         }
+        if (!foundWildCardJar) {
+          unexpandedWildcardClasspath.append(File.pathSeparator);
+          unexpandedWildcardClasspath.append(classPathEntry);
+        }
       } else {
         // Append just this entry
         File fileCpEntry = null;
         if(!new Path(classPathEntry).isAbsolute()) {
-          fileCpEntry = new File(workingDir, classPathEntry);
+          fileCpEntry = new File(targetDir.toString(), classPathEntry);
         }
         else {
           fileCpEntry = new File(classPathEntry);
@@ -1300,7 +1315,8 @@ public class FileUtil {
     } finally {
       IOUtils.cleanup(LOG, jos, bos, fos);
     }
-
-    return classPathJar.getCanonicalPath();
+    String[] jarCp = {classPathJar.getCanonicalPath(),
+                        unexpandedWildcardClasspath.toString()};
+    return jarCp;
   }
 }

+ 7 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java

@@ -251,6 +251,13 @@ public class FilterFileSystem extends FileSystem {
     return fs.listLocatedStatus(f);
   }
   
+  /** Return a remote iterator for listing in a directory */
+  @Override
+  public RemoteIterator<FileStatus> listStatusIterator(Path f)
+  throws IOException {
+    return fs.listStatusIterator(f);
+   }
+
   @Override
   public Path getHomeDirectory() {
     return fs.getHomeDirectory();

+ 4 - 19
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java

@@ -220,12 +220,7 @@ public class HarFileSystem extends FileSystem {
       return FileSystem.getDefaultUri(conf);
     }
     String authority = rawURI.getAuthority();
-    if (authority == null) {
-      throw new IOException("URI: " + rawURI
-          + " is an invalid Har URI since authority==null."
-          + "  Expecting har://<scheme>-<host>/<path>.");
-    }
- 
+
     int i = authority.indexOf('-');
     if (i < 0) {
       throw new IOException("URI: " + rawURI
@@ -489,19 +484,12 @@ public class HarFileSystem extends FileSystem {
   }
   
   static class Store {
-    public Store() {
-      begin = end = startHash = endHash = 0;
-    }
-    public Store(long begin, long end, int startHash, int endHash) {
+    public Store(long begin, long end) {
       this.begin = begin;
       this.end = end;
-      this.startHash = startHash;
-      this.endHash = endHash;
     }
     public long begin;
     public long end;
-    public int startHash;
-    public int endHash;
   }
   
   /**
@@ -594,7 +582,7 @@ public class HarFileSystem extends FileSystem {
     public HarStatus(String harString) throws UnsupportedEncodingException {
       String[] splits = harString.split(" ");
       this.name = decodeFileName(splits[0]);
-      this.isDir = "dir".equals(splits[1]) ? true: false;
+      this.isDir = "dir".equals(splits[1]);
       // this is equal to "none" if its a directory
       this.partName = splits[2];
       this.startIndex = Long.parseLong(splits[3]);
@@ -1167,11 +1155,8 @@ public class HarFileSystem extends FileSystem {
           int b = lin.readLine(line);
           read += b;
           readStr = line.toString().split(" ");
-          int startHash = Integer.parseInt(readStr[0]);
-          int endHash  = Integer.parseInt(readStr[1]);
           stores.add(new Store(Long.parseLong(readStr[2]), 
-              Long.parseLong(readStr[3]), startHash,
-              endHash));
+              Long.parseLong(readStr[3])));
           line.clear();
         }
       } catch (IOException ioe) {

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java

@@ -372,7 +372,7 @@ public class LocalDirAllocator {
         // Keep rolling the wheel till we get a valid path
         Random r = new java.util.Random();
         while (numDirsSearched < numDirs && returnPath == null) {
-          long randomPosition = Math.abs(r.nextLong()) % totalAvailable;
+          long randomPosition = (r.nextLong() >>> 1) % totalAvailable;
           int dir = 0;
           while (randomPosition > availableOnDisk[dir]) {
             randomPosition -= availableOnDisk[dir];

+ 4 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MD5MD5CRC32FileChecksum.java

@@ -143,13 +143,13 @@ public class MD5MD5CRC32FileChecksum extends FileChecksum {
       switch (finalCrcType) {
         case CRC32:
           return new MD5MD5CRC32GzipFileChecksum(
-              Integer.valueOf(bytesPerCRC),
-              Integer.valueOf(crcPerBlock),
+              Integer.parseInt(bytesPerCRC),
+              Integer.parseInt(crcPerBlock),
               new MD5Hash(md5));
         case CRC32C:
           return new MD5MD5CRC32CastagnoliFileChecksum(
-              Integer.valueOf(bytesPerCRC),
-              Integer.valueOf(crcPerBlock),
+              Integer.parseInt(bytesPerCRC),
+              Integer.parseInt(crcPerBlock),
               new MD5Hash(md5));
         default:
           // we should never get here since finalCrcType will

+ 12 - 8
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Options.java

@@ -234,15 +234,14 @@ public final class Options {
    * This is used in FileSystem and FileContext to specify checksum options.
    */
   public static class ChecksumOpt {
-    private final int crcBlockSize;
-    private final DataChecksum.Type crcType;
+    private final DataChecksum.Type checksumType;
+    private final int bytesPerChecksum;
 
     /**
      * Create a uninitialized one
      */
     public ChecksumOpt() {
-      crcBlockSize = -1;
-      crcType = DataChecksum.Type.DEFAULT;
+      this(DataChecksum.Type.DEFAULT, -1);
     }
 
     /**
@@ -251,16 +250,21 @@ public final class Options {
      * @param size bytes per checksum
      */
     public ChecksumOpt(DataChecksum.Type type, int size) {
-      crcBlockSize = size;
-      crcType = type;
+      checksumType = type;
+      bytesPerChecksum = size;
     }
 
     public int getBytesPerChecksum() {
-      return crcBlockSize;
+      return bytesPerChecksum;
     }
 
     public DataChecksum.Type getChecksumType() {
-      return crcType;
+      return checksumType;
+    }
+    
+    @Override
+    public String toString() {
+      return checksumType + ":" + bytesPerChecksum;
     }
 
     /**

+ 0 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java

@@ -60,7 +60,6 @@ public class Path implements Comparable {
 
   /**
    * Pathnames with scheme and relative path are illegal.
-   * @param path to be checked
    */
   void checkNotSchemeWithRelative() {
     if (toUri().isAbsolute() && !isUriPathAbsolute()) {

+ 91 - 34
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java

@@ -41,7 +41,9 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.nativeio.NativeIO;
+import org.apache.hadoop.io.nativeio.NativeIOException;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.StringUtils;
@@ -207,8 +209,28 @@ public class RawLocalFileSystem extends FileSystem {
   class LocalFSFileOutputStream extends OutputStream {
     private FileOutputStream fos;
     
-    private LocalFSFileOutputStream(Path f, boolean append) throws IOException {
-      this.fos = new FileOutputStream(pathToFile(f), append);
+    private LocalFSFileOutputStream(Path f, boolean append,
+        FsPermission permission) throws IOException {
+      File file = pathToFile(f);
+      if (permission == null) {
+        this.fos = new FileOutputStream(file, append);
+      } else {
+        if (Shell.WINDOWS && NativeIO.isAvailable()) {
+          this.fos = NativeIO.Windows.createFileOutputStreamWithMode(file,
+              append, permission.toShort());
+        } else {
+          this.fos = new FileOutputStream(file, append);
+          boolean success = false;
+          try {
+            setPermission(f, permission);
+            success = true;
+          } finally {
+            if (!success) {
+              IOUtils.cleanup(LOG, this.fos);
+            }
+          }
+        }
+      }
     }
     
     /*
@@ -247,19 +269,20 @@ public class RawLocalFileSystem extends FileSystem {
       throw new IOException("Cannot append to a diretory (=" + f + " )");
     }
     return new FSDataOutputStream(new BufferedOutputStream(
-        new LocalFSFileOutputStream(f, true), bufferSize), statistics);
+        createOutputStreamWithMode(f, true, null), bufferSize), statistics);
   }
 
   @Override
   public FSDataOutputStream create(Path f, boolean overwrite, int bufferSize,
     short replication, long blockSize, Progressable progress)
     throws IOException {
-    return create(f, overwrite, true, bufferSize, replication, blockSize, progress);
+    return create(f, overwrite, true, bufferSize, replication, blockSize,
+        progress, null);
   }
 
   private FSDataOutputStream create(Path f, boolean overwrite,
       boolean createParent, int bufferSize, short replication, long blockSize,
-      Progressable progress) throws IOException {
+      Progressable progress, FsPermission permission) throws IOException {
     if (exists(f) && !overwrite) {
       throw new FileAlreadyExistsException("File already exists: " + f);
     }
@@ -268,7 +291,18 @@ public class RawLocalFileSystem extends FileSystem {
       throw new IOException("Mkdirs failed to create " + parent.toString());
     }
     return new FSDataOutputStream(new BufferedOutputStream(
-        new LocalFSFileOutputStream(f, false), bufferSize), statistics);
+        createOutputStreamWithMode(f, false, permission), bufferSize),
+        statistics);
+  }
+  
+  protected OutputStream createOutputStream(Path f, boolean append) 
+      throws IOException {
+    return createOutputStreamWithMode(f, append, null);
+  }
+
+  protected OutputStream createOutputStreamWithMode(Path f, boolean append,
+      FsPermission permission) throws IOException {
+    return new LocalFSFileOutputStream(f, append, permission);
   }
   
   @Override
@@ -280,7 +314,8 @@ public class RawLocalFileSystem extends FileSystem {
       throw new FileAlreadyExistsException("File already exists: " + f);
     }
     return new FSDataOutputStream(new BufferedOutputStream(
-        new LocalFSFileOutputStream(f, false), bufferSize), statistics);
+        createOutputStreamWithMode(f, false, permission), bufferSize),
+            statistics);
   }
 
   @Override
@@ -288,9 +323,8 @@ public class RawLocalFileSystem extends FileSystem {
     boolean overwrite, int bufferSize, short replication, long blockSize,
     Progressable progress) throws IOException {
 
-    FSDataOutputStream out = create(f,
-        overwrite, bufferSize, replication, blockSize, progress);
-    setPermission(f, permission);
+    FSDataOutputStream out = create(f, overwrite, true, bufferSize, replication,
+        blockSize, progress, permission);
     return out;
   }
 
@@ -299,9 +333,8 @@ public class RawLocalFileSystem extends FileSystem {
       boolean overwrite,
       int bufferSize, short replication, long blockSize,
       Progressable progress) throws IOException {
-    FSDataOutputStream out = create(f,
-        overwrite, false, bufferSize, replication, blockSize, progress);
-    setPermission(f, permission);
+    FSDataOutputStream out = create(f, overwrite, false, bufferSize, replication,
+        blockSize, progress, permission);
     return out;
   }
 
@@ -406,6 +439,37 @@ public class RawLocalFileSystem extends FileSystem {
     }
     return Arrays.copyOf(results, j);
   }
+  
+  protected boolean mkOneDir(File p2f) throws IOException {
+    return mkOneDirWithMode(new Path(p2f.getAbsolutePath()), p2f, null);
+  }
+
+  protected boolean mkOneDirWithMode(Path p, File p2f, FsPermission permission)
+      throws IOException {
+    if (permission == null) {
+      return p2f.mkdir();
+    } else {
+      if (Shell.WINDOWS && NativeIO.isAvailable()) {
+        try {
+          NativeIO.Windows.createDirectoryWithMode(p2f, permission.toShort());
+          return true;
+        } catch (IOException e) {
+          if (LOG.isDebugEnabled()) {
+            LOG.debug(String.format(
+                "NativeIO.createDirectoryWithMode error, path = %s, mode = %o",
+                p2f, permission.toShort()), e);
+          }
+          return false;
+        }
+      } else {
+        boolean b = p2f.mkdir();
+        if (b) {
+          setPermission(p, permission);
+        }
+        return b;
+      }
+    }
+  }
 
   /**
    * Creates the specified directory hierarchy. Does not
@@ -413,13 +477,24 @@ public class RawLocalFileSystem extends FileSystem {
    */
   @Override
   public boolean mkdirs(Path f) throws IOException {
+    return mkdirsWithOptionalPermission(f, null);
+  }
+
+  @Override
+  public boolean mkdirs(Path f, FsPermission permission) throws IOException {
+    return mkdirsWithOptionalPermission(f, permission);
+  }
+
+  private boolean mkdirsWithOptionalPermission(Path f, FsPermission permission)
+      throws IOException {
     if(f == null) {
       throw new IllegalArgumentException("mkdirs path arg is null");
     }
     Path parent = f.getParent();
     File p2f = pathToFile(f);
+    File parent2f = null;
     if(parent != null) {
-      File parent2f = pathToFile(parent);
+      parent2f = pathToFile(parent);
       if(parent2f != null && parent2f.exists() && !parent2f.isDirectory()) {
         throw new ParentNotDirectoryException("Parent path is not a directory: "
             + parent);
@@ -429,26 +504,8 @@ public class RawLocalFileSystem extends FileSystem {
       throw new FileNotFoundException("Destination exists" +
               " and is not a directory: " + p2f.getCanonicalPath());
     }
-    return (parent == null || mkdirs(parent)) &&
-      (p2f.mkdir() || p2f.isDirectory());
-  }
-
-  @Override
-  public boolean mkdirs(Path f, FsPermission permission) throws IOException {
-    boolean b = mkdirs(f);
-    if(b) {
-      setPermission(f, permission);
-    }
-    return b;
-  }
-  
-
-  @Override
-  protected boolean primitiveMkdir(Path f, FsPermission absolutePermission)
-    throws IOException {
-    boolean b = mkdirs(f);
-    setPermission(f, absolutePermission);
-    return b;
+    return (parent == null || parent2f.exists() || mkdirs(parent)) &&
+      (mkOneDirWithMode(f, p2f, permission) || p2f.isDirectory());
   }
   
   

+ 8 - 13
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java

@@ -23,6 +23,7 @@ import java.io.InputStream;
 import java.net.ConnectException;
 import java.net.URI;
 
+import com.google.common.base.Preconditions;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.net.ftp.FTP;
@@ -101,17 +102,12 @@ public class FTPFileSystem extends FileSystem {
     if (userAndPassword == null) {
       userAndPassword = (conf.get("fs.ftp.user." + host, null) + ":" + conf
           .get("fs.ftp.password." + host, null));
-      if (userAndPassword == null) {
-        throw new IOException("Invalid user/passsword specified");
-      }
     }
     String[] userPasswdInfo = userAndPassword.split(":");
+    Preconditions.checkState(userPasswdInfo.length > 1,
+                             "Invalid username / password");
     conf.set(FS_FTP_USER_PREFIX + host, userPasswdInfo[0]);
-    if (userPasswdInfo.length > 1) {
-      conf.set(FS_FTP_PASSWORD_PREFIX + host, userPasswdInfo[1]);
-    } else {
-      conf.set(FS_FTP_PASSWORD_PREFIX + host, null);
-    }
+    conf.set(FS_FTP_PASSWORD_PREFIX + host, userPasswdInfo[1]);
     setConf(conf);
     this.uri = uri;
   }
@@ -293,7 +289,8 @@ public class FTPFileSystem extends FileSystem {
    */
   private boolean exists(FTPClient client, Path file) throws IOException {
     try {
-      return getFileStatus(client, file) != null;
+      getFileStatus(client, file);
+      return true;
     } catch (FileNotFoundException fnfe) {
       return false;
     }
@@ -333,10 +330,8 @@ public class FTPFileSystem extends FileSystem {
     if (dirEntries != null && dirEntries.length > 0 && !(recursive)) {
       throw new IOException("Directory: " + file + " is not empty.");
     }
-    if (dirEntries != null) {
-      for (int i = 0; i < dirEntries.length; i++) {
-        delete(client, new Path(absolute, dirEntries[i].getPath()), recursive);
-      }
+    for (FileStatus dirEntry : dirEntries) {
+      delete(client, new Path(absolute, dirEntry.getPath()), recursive);
     }
     return client.removeDirectory(pathName);
   }

+ 0 - 66
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AccessControlException.java

@@ -1,66 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.fs.permission;
-
-import java.io.IOException;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-
-/**
- * An exception class for access control related issues.
- * @deprecated Use {@link org.apache.hadoop.security.AccessControlException} 
- *             instead.
- */
-@Deprecated
-@InterfaceAudience.Public
-@InterfaceStability.Stable
-public class AccessControlException extends IOException {
-  //Required by {@link java.io.Serializable}.
-  private static final long serialVersionUID = 1L;
-
-  /**
-   * Default constructor is needed for unwrapping from 
-   * {@link org.apache.hadoop.ipc.RemoteException}.
-   */
-  public AccessControlException() {
-    super("Permission denied.");
-  }
-
-  /**
-   * Constructs an {@link AccessControlException}
-   * with the specified detail message.
-   * @param s the detail message.
-   */
-  public AccessControlException(String s) {
-    super(s);
-  }
-  
-  /**
-   * Constructs a new exception with the specified cause and a detail
-   * message of <tt>(cause==null ? null : cause.toString())</tt> (which
-   * typically contains the class and detail message of <tt>cause</tt>).
-   * @param  cause the cause (which is saved for later retrieval by the
-   *         {@link #getCause()} method).  (A <tt>null</tt> value is
-   *         permitted, and indicates that the cause is nonexistent or
-   *         unknown.)
-   */
-  public AccessControlException(Throwable cause) {
-    super(cause);
-  }
-}

+ 3 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclEntry.java

@@ -146,7 +146,9 @@ public class AclEntry {
      * @return Builder this builder, for call chaining
      */
     public Builder setName(String name) {
-      this.name = name;
+      if (name != null && !name.isEmpty()) {
+        this.name = name;
+      }
       return this;
     }
 

+ 77 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclStatus.java

@@ -23,6 +23,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 
 import com.google.common.base.Objects;
+import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 
 /**
@@ -36,6 +37,7 @@ public class AclStatus {
   private final String group;
   private final boolean stickyBit;
   private final List<AclEntry> entries;
+  private final FsPermission permission;
 
   /**
    * Returns the file owner.
@@ -73,6 +75,14 @@ public class AclStatus {
     return entries;
   }
 
+  /**
+   * Returns the permission set for the path
+   * @return {@link FsPermission} for the path
+   */
+  public FsPermission getPermission() {
+    return permission;
+  }
+
   @Override
   public boolean equals(Object o) {
     if (o == null) {
@@ -113,6 +123,7 @@ public class AclStatus {
     private String group;
     private boolean stickyBit;
     private List<AclEntry> entries = Lists.newArrayList();
+    private FsPermission permission = null;
 
     /**
      * Sets the file owner.
@@ -172,13 +183,22 @@ public class AclStatus {
       return this;
     }
 
+    /**
+     * Sets the permission for the file.
+     * @param permission
+     */
+    public Builder setPermission(FsPermission permission) {
+      this.permission = permission;
+      return this;
+    }
+
     /**
      * Builds a new AclStatus populated with the set properties.
      *
      * @return AclStatus new AclStatus
      */
     public AclStatus build() {
-      return new AclStatus(owner, group, stickyBit, entries);
+      return new AclStatus(owner, group, stickyBit, entries, permission);
     }
   }
 
@@ -190,12 +210,67 @@ public class AclStatus {
    * @param group String file group
    * @param stickyBit the sticky bit
    * @param entries the ACL entries
+   * @param permission permission of the path
    */
   private AclStatus(String owner, String group, boolean stickyBit,
-      Iterable<AclEntry> entries) {
+      Iterable<AclEntry> entries, FsPermission permission) {
     this.owner = owner;
     this.group = group;
     this.stickyBit = stickyBit;
     this.entries = Lists.newArrayList(entries);
+    this.permission = permission;
+  }
+
+  /**
+   * Get the effective permission for the AclEntry
+   * @param entry AclEntry to get the effective action
+   */
+  public FsAction getEffectivePermission(AclEntry entry) {
+    return getEffectivePermission(entry, permission);
+  }
+
+  /**
+   * Get the effective permission for the AclEntry. <br>
+   * Recommended to use this API ONLY if client communicates with the old
+   * NameNode, needs to pass the Permission for the path to get effective
+   * permission, else use {@link AclStatus#getEffectivePermission(AclEntry)}.
+   * @param entry AclEntry to get the effective action
+   * @param permArg Permission for the path. However if the client is NOT
+   *          communicating with old namenode, then this argument will not have
+   *          any preference.
+   * @return Returns the effective permission for the entry.
+   * @throws IllegalArgumentException If the client communicating with old
+   *           namenode and permission is not passed as an argument.
+   */
+  public FsAction getEffectivePermission(AclEntry entry, FsPermission permArg)
+      throws IllegalArgumentException {
+    // At least one permission bits should be available.
+    Preconditions.checkArgument(this.permission != null || permArg != null,
+        "Permission bits are not available to calculate effective permission");
+    if (this.permission != null) {
+      // permission bits from server response will have the priority for
+      // accuracy.
+      permArg = this.permission;
+    }
+    if ((entry.getName() != null || entry.getType() == AclEntryType.GROUP)) {
+      if (entry.getScope() == AclEntryScope.ACCESS) {
+        FsAction entryPerm = entry.getPermission();
+        return entryPerm.and(permArg.getGroupAction());
+      } else {
+        Preconditions.checkArgument(this.entries.contains(entry)
+            && this.entries.size() >= 3,
+            "Passed default ACL entry not found in the list of ACLs");
+        // default mask entry for effective permission calculation will be the
+        // penultimate entry. This can be mask entry in case of extended ACLs.
+        // In case of minimal ACL, this is the owner group entry, and we end up
+        // intersecting group FsAction with itself, which is a no-op.
+        FsAction defaultMask = this.entries.get(this.entries.size() - 2)
+            .getPermission();
+        FsAction entryPerm = entry.getPermission();
+        return entryPerm.and(defaultMask);
+      }
+    } else {
+      return entry.getPermission();
+    }
   }
 }

+ 18 - 14
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/AclCommands.java

@@ -86,22 +86,26 @@ class AclCommands extends FsCommand {
           (perm.getOtherAction().implies(FsAction.EXECUTE) ? "t" : "T"));
       }
 
-      List<AclEntry> entries = perm.getAclBit() ?
-        item.fs.getAclStatus(item.path).getEntries() :
-        Collections.<AclEntry>emptyList();
+      AclStatus aclStatus = item.fs.getAclStatus(item.path);
+      List<AclEntry> entries = perm.getAclBit() ? aclStatus.getEntries()
+          : Collections.<AclEntry> emptyList();
       ScopedAclEntries scopedEntries = new ScopedAclEntries(
         AclUtil.getAclFromPermAndEntries(perm, entries));
-      printAclEntriesForSingleScope(scopedEntries.getAccessEntries());
-      printAclEntriesForSingleScope(scopedEntries.getDefaultEntries());
+      printAclEntriesForSingleScope(aclStatus, perm,
+          scopedEntries.getAccessEntries());
+      printAclEntriesForSingleScope(aclStatus, perm,
+          scopedEntries.getDefaultEntries());
       out.println();
     }
 
     /**
      * Prints all the ACL entries in a single scope.
-     *
+     * @param aclStatus AclStatus for the path
+     * @param fsPerm FsPermission for the path
      * @param entries List<AclEntry> containing ACL entries of file
      */
-    private void printAclEntriesForSingleScope(List<AclEntry> entries) {
+    private void printAclEntriesForSingleScope(AclStatus aclStatus,
+        FsPermission fsPerm, List<AclEntry> entries) {
       if (entries.isEmpty()) {
         return;
       }
@@ -110,10 +114,8 @@ class AclCommands extends FsCommand {
           out.println(entry);
         }
       } else {
-        // ACL sort order guarantees mask is the second-to-last entry.
-        FsAction maskPerm = entries.get(entries.size() - 2).getPermission();
         for (AclEntry entry: entries) {
-          printExtendedAclEntry(entry, maskPerm);
+          printExtendedAclEntry(aclStatus, fsPerm, entry);
         }
       }
     }
@@ -123,14 +125,16 @@ class AclCommands extends FsCommand {
      * permissions of the entry, then also prints the restricted version as the
      * effective permissions.  The mask applies to all named entries and also
      * the unnamed group entry.
-     *
+     * @param aclStatus AclStatus for the path
+     * @param fsPerm FsPermission for the path
      * @param entry AclEntry extended ACL entry to print
-     * @param maskPerm FsAction permissions in the ACL's mask entry
      */
-    private void printExtendedAclEntry(AclEntry entry, FsAction maskPerm) {
+    private void printExtendedAclEntry(AclStatus aclStatus,
+        FsPermission fsPerm, AclEntry entry) {
       if (entry.getName() != null || entry.getType() == AclEntryType.GROUP) {
         FsAction entryPerm = entry.getPermission();
-        FsAction effectivePerm = entryPerm.and(maskPerm);
+        FsAction effectivePerm = aclStatus
+            .getEffectivePermission(entry, fsPerm);
         if (entryPerm != effectivePerm) {
           out.println(String.format("%s\t#effective:%s", entry,
             effectivePerm.SYMBOL));

+ 27 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Command.java

@@ -65,6 +65,8 @@ abstract public class Command extends Configured {
   public PrintStream out = System.out;
   /** allows stderr to be captured if necessary */
   public PrintStream err = System.err;
+  /** allows the command factory to be used if necessary */
+  private CommandFactory commandFactory = null;
 
   /** Constructor */
   protected Command() {
@@ -121,6 +123,15 @@ abstract public class Command extends Configured {
     return exitCode;
   }
 
+  /** sets the command factory for later use */
+  public void setCommandFactory(CommandFactory factory) {
+    this.commandFactory = factory;
+  }
+  /** retrieves the command factory */
+  protected CommandFactory getCommandFactory() {
+    return this.commandFactory;
+  }
+
   /**
    * Invokes the command handler.  The default behavior is to process options,
    * expand arguments, and then process each argument.
@@ -308,7 +319,7 @@ abstract public class Command extends Configured {
     for (PathData item : items) {
       try {
         processPath(item);
-        if (recursive && item.stat.isDirectory()) {
+        if (recursive && isPathRecursable(item)) {
           recursePath(item);
         }
         postProcessPath(item);
@@ -318,6 +329,21 @@ abstract public class Command extends Configured {
     }
   }
 
+  /**
+   * Determines whether a {@link PathData} item is recursable. Default
+   * implementation is to recurse directories but can be overridden to recurse
+   * through symbolic links.
+   *
+   * @param item
+   *          a {@link PathData} object
+   * @return true if the item is recursable, false otherwise
+   * @throws IOException
+   *           if anything goes wrong in the user-implementation
+   */
+  protected boolean isPathRecursable(PathData item) throws IOException {
+    return item.stat.isDirectory();
+  }
+
   /**
    * Hook for commands to implement an operation to be applied on each
    * path for the command.  Note implementation of this method is optional

+ 1 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandFactory.java

@@ -124,6 +124,7 @@ public class CommandFactory extends Configured {
       if (cmdClass != null) {
         instance = ReflectionUtils.newInstance(cmdClass, conf);
         instance.setName(cmdName);
+        instance.setCommandFactory(this);
       }
     }
     return instance;

+ 7 - 5
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Display.java

@@ -32,6 +32,7 @@ import org.apache.avro.generic.GenericDatumWriter;
 import org.apache.avro.io.DatumWriter;
 import org.apache.avro.io.EncoderFactory;
 import org.apache.avro.io.JsonEncoder;
+import org.apache.commons.io.Charsets;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -195,11 +196,11 @@ class Display extends FsCommand {
 
       FileChecksum checksum = item.fs.getFileChecksum(item.path);
       if (checksum == null) {
-        out.printf("%s\tNONE\t\n", item.toString());
+        out.printf("%s\tNONE\t%n", item.toString());
       } else {
         String checksumString = StringUtils.byteToHexString(
             checksum.getBytes(), 0, checksum.getLength());
-        out.printf("%s\t%s\t%s\n",
+        out.printf("%s\t%s\t%s%n",
             item.toString(), checksum.getAlgorithmName(),
             checksumString);
       }
@@ -234,10 +235,10 @@ class Display extends FsCommand {
         if (!r.next(key, val)) {
           return -1;
         }
-        byte[] tmp = key.toString().getBytes();
+        byte[] tmp = key.toString().getBytes(Charsets.UTF_8);
         outbuf.write(tmp, 0, tmp.length);
         outbuf.write('\t');
-        tmp = val.toString().getBytes();
+        tmp = val.toString().getBytes(Charsets.UTF_8);
         outbuf.write(tmp, 0, tmp.length);
         outbuf.write('\n');
         inbuf.reset(outbuf.getData(), outbuf.getLength());
@@ -299,7 +300,8 @@ class Display extends FsCommand {
       encoder.flush();
       if (!fileReader.hasNext()) {
         // Write a new line after the last Avro record.
-        output.write(System.getProperty("line.separator").getBytes());
+        output.write(System.getProperty("line.separator")
+                         .getBytes(Charsets.UTF_8));
         output.flush();
       }
       pos = 0;

+ 2 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsCommand.java

@@ -25,6 +25,7 @@ import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FsShellPermissions;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.shell.find.Find;
 
 /**
  * Base class for all "hadoop fs" commands
@@ -48,6 +49,7 @@ abstract public class FsCommand extends Command {
     factory.registerCommands(Count.class);
     factory.registerCommands(Delete.class);
     factory.registerCommands(Display.class);
+    factory.registerCommands(Find.class);
     factory.registerCommands(FsShellPermissions.class);
     factory.registerCommands(FsUsage.class);
     factory.registerCommands(Ls.class);

+ 7 - 10
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsUsage.java

@@ -26,6 +26,7 @@ import java.util.List;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.FsStatus;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.util.StringUtils;
@@ -117,7 +118,7 @@ class FsUsage extends FsCommand {
     "Note that, even without the -s option, this only shows size summaries " +
     "one level deep into a directory.\n\n" +
     "The output is in the form \n" + 
-    "\tsize\tname(full path)\n"; 
+    "\tsize\tdisk space consumed\tname(full path)\n";
 
     protected boolean summary = false;
     
@@ -132,7 +133,7 @@ class FsUsage extends FsCommand {
 
     @Override
     protected void processPathArgument(PathData item) throws IOException {
-      usagesTable = new TableBuilder(2);
+      usagesTable = new TableBuilder(3);
       // go one level deep on dirs from cmdline unless in summary mode
       if (!summary && item.stat.isDirectory()) {
         recursePath(item);
@@ -144,16 +145,12 @@ class FsUsage extends FsCommand {
 
     @Override
     protected void processPath(PathData item) throws IOException {
-      long length;
-      if (item.stat.isDirectory()) {
-        length = item.fs.getContentSummary(item.path).getLength();
-      } else {
-        length = item.stat.getLen();
-      }
-      usagesTable.addRow(formatSize(length), item);
+      ContentSummary contentSummary = item.fs.getContentSummary(item.path);
+      long length = contentSummary.getLength();
+      long spaceConsumed = contentSummary.getSpaceConsumed();
+      usagesTable.addRow(formatSize(length), formatSize(spaceConsumed), item);
     }
   }
-
   /** show disk usage summary */
   public static class Dus extends Du {
     public static final String NAME = "dus";

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java

@@ -57,7 +57,7 @@ class Ls extends FsCommand {
 		  
   
 
-  protected static final SimpleDateFormat dateFormat = 
+  protected final SimpleDateFormat dateFormat =
     new SimpleDateFormat("yyyy-MM-dd HH:mm");
 
   protected int maxRepl = 3, maxLen = 10, maxOwner = 0, maxGroup = 0;

+ 2 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Stat.java

@@ -55,8 +55,8 @@ class Stat extends FsCommand {
     "in the specified format. Format accepts filesize in blocks (%b), group name of owner(%g), " +
     "filename (%n), block size (%o), replication (%r), user name of owner(%u), modification date (%y, %Y)\n";
 
-  protected static final SimpleDateFormat timeFmt;
-  static {
+  protected final SimpleDateFormat timeFmt;
+  {
     timeFmt = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
     timeFmt.setTimeZone(TimeZone.getTimeZone("UTC"));
   }

+ 2 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Test.java

@@ -83,6 +83,8 @@ class Test extends FsCommand {
       case 'z':
         test = (item.stat.getLen() == 0);
         break;
+      default:
+        break;
     }
     if (!test) exitCode = 1;
   }

+ 84 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/find/And.java

@@ -0,0 +1,84 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.shell.find;
+
+import java.io.IOException;
+import java.util.Deque;
+
+import org.apache.hadoop.fs.shell.PathData;
+
+/**
+ * Implements the -a (and) operator for the
+ * {@link org.apache.hadoop.fs.shell.find.Find} command.
+ */
+final class And extends BaseExpression {
+  /** Registers this expression with the specified factory. */
+  public static void registerExpression(ExpressionFactory factory)
+      throws IOException {
+    factory.addClass(And.class, "-a");
+    factory.addClass(And.class, "-and");
+  }
+
+  private static final String[] USAGE = { "expression -a expression",
+      "expression -and expression", "expression expression" };
+  private static final String[] HELP = {
+      "Logical AND operator for joining two expressions. Returns",
+      "true if both child expressions return true. Implied by the",
+      "juxtaposition of two expressions and so does not need to be",
+      "explicitly specified. The second expression will not be",
+      "applied if the first fails." };
+
+  public And() {
+    super();
+    setUsage(USAGE);
+    setHelp(HELP);
+  }
+
+  /**
+   * Applies child expressions to the {@link PathData} item. If all pass then
+   * returns {@link Result#PASS} else returns the result of the first
+   * non-passing expression.
+   */
+  @Override
+  public Result apply(PathData item, int depth) throws IOException {
+    Result result = Result.PASS;
+    for (Expression child : getChildren()) {
+      Result childResult = child.apply(item, -1);
+      result = result.combine(childResult);
+      if (!result.isPass()) {
+        return result;
+      }
+    }
+    return result;
+  }
+
+  @Override
+  public boolean isOperator() {
+    return true;
+  }
+
+  @Override
+  public int getPrecedence() {
+    return 200;
+  }
+
+  @Override
+  public void addChildren(Deque<Expression> expressions) {
+    addChildren(expressions, 2);
+  }
+}

+ 302 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/find/BaseExpression.java

@@ -0,0 +1,302 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.shell.find;
+
+import java.io.IOException;
+import java.util.Deque;
+import java.util.LinkedList;
+import java.util.List;
+
+import org.apache.hadoop.conf.Configurable;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.shell.PathData;
+
+/**
+ * Abstract expression for use in the
+ * {@link org.apache.hadoop.fs.shell.find.Find} command. Provides default
+ * behavior for a no-argument primary expression.
+ */
+public abstract class BaseExpression implements Expression, Configurable {
+  private String[] usage = { "Not yet implemented" };
+  private String[] help = { "Not yet implemented" };
+
+  /** Sets the usage text for this {@link Expression} */
+  protected void setUsage(String[] usage) {
+    this.usage = usage;
+  }
+
+  /** Sets the help text for this {@link Expression} */
+  protected void setHelp(String[] help) {
+    this.help = help;
+  }
+
+  @Override
+  public String[] getUsage() {
+    return this.usage;
+  }
+
+  @Override
+  public String[] getHelp() {
+    return this.help;
+  }
+
+  @Override
+  public void setOptions(FindOptions options) throws IOException {
+    this.options = options;
+    for (Expression child : getChildren()) {
+      child.setOptions(options);
+    }
+  }
+
+  @Override
+  public void prepare() throws IOException {
+    for (Expression child : getChildren()) {
+      child.prepare();
+    }
+  }
+
+  @Override
+  public void finish() throws IOException {
+    for (Expression child : getChildren()) {
+      child.finish();
+    }
+  }
+
+  /** Options passed in from the {@link Find} command. */
+  private FindOptions options;
+
+  /** Hadoop configuration. */
+  private Configuration conf;
+
+  /** Arguments for this expression. */
+  private LinkedList<String> arguments = new LinkedList<String>();
+
+  /** Children of this expression. */
+  private LinkedList<Expression> children = new LinkedList<Expression>();
+
+  /** Return the options to be used by this expression. */
+  protected FindOptions getOptions() {
+    return (this.options == null) ? new FindOptions() : this.options;
+  }
+
+  @Override
+  public void setConf(Configuration conf) {
+    this.conf = conf;
+  }
+
+  @Override
+  public Configuration getConf() {
+    return this.conf;
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder();
+    sb.append(getClass().getSimpleName());
+    sb.append("(");
+    boolean firstArg = true;
+    for (String arg : getArguments()) {
+      if (!firstArg) {
+        sb.append(",");
+      } else {
+        firstArg = false;
+      }
+      sb.append(arg);
+    }
+    sb.append(";");
+    firstArg = true;
+    for (Expression child : getChildren()) {
+      if (!firstArg) {
+        sb.append(",");
+      } else {
+        firstArg = false;
+      }
+      sb.append(child.toString());
+    }
+    sb.append(")");
+    return sb.toString();
+  }
+
+  @Override
+  public boolean isAction() {
+    for (Expression child : getChildren()) {
+      if (child.isAction()) {
+        return true;
+      }
+    }
+    return false;
+  }
+
+  @Override
+  public boolean isOperator() {
+    return false;
+  }
+
+  /**
+   * Returns the arguments of this expression
+   *
+   * @return list of argument strings
+   */
+  protected List<String> getArguments() {
+    return this.arguments;
+  }
+
+  /**
+   * Returns the argument at the given position (starting from 1).
+   *
+   * @param position
+   *          argument to be returned
+   * @return requested argument
+   * @throws IOException
+   *           if the argument doesn't exist or is null
+   */
+  protected String getArgument(int position) throws IOException {
+    if (position > this.arguments.size()) {
+      throw new IOException("Missing argument at " + position);
+    }
+    String argument = this.arguments.get(position - 1);
+    if (argument == null) {
+      throw new IOException("Null argument at position " + position);
+    }
+    return argument;
+  }
+
+  /**
+   * Returns the children of this expression.
+   *
+   * @return list of child expressions
+   */
+  protected List<Expression> getChildren() {
+    return this.children;
+  }
+
+  @Override
+  public int getPrecedence() {
+    return 0;
+  }
+
+  @Override
+  public void addChildren(Deque<Expression> exprs) {
+    // no children by default, will be overridden by specific expressions.
+  }
+
+  /**
+   * Add a specific number of children to this expression. The children are
+   * popped off the head of the expressions.
+   *
+   * @param exprs
+   *          deque of expressions from which to take the children
+   * @param count
+   *          number of children to be added
+   */
+  protected void addChildren(Deque<Expression> exprs, int count) {
+    for (int i = 0; i < count; i++) {
+      addChild(exprs.pop());
+    }
+  }
+
+  /**
+   * Add a single argument to this expression. The argument is popped off the
+   * head of the expressions.
+   *
+   * @param expr
+   *          child to add to the expression
+   */
+  private void addChild(Expression expr) {
+    children.push(expr);
+  }
+
+  @Override
+  public void addArguments(Deque<String> args) {
+    // no children by default, will be overridden by specific expressions.
+  }
+
+  /**
+   * Add a specific number of arguments to this expression. The children are
+   * popped off the head of the expressions.
+   *
+   * @param args
+   *          deque of arguments from which to take the argument
+   * @param count
+   *          number of children to be added
+   */
+  protected void addArguments(Deque<String> args, int count) {
+    for (int i = 0; i < count; i++) {
+      addArgument(args.pop());
+    }
+  }
+
+  /**
+   * Add a single argument to this expression. The argument is popped off the
+   * head of the expressions.
+   *
+   * @param arg
+   *          argument to add to the expression
+   */
+  protected void addArgument(String arg) {
+    arguments.add(arg);
+  }
+
+  /**
+   * Returns the {@link FileStatus} from the {@link PathData} item. If the
+   * current options require links to be followed then the returned file status
+   * is that of the linked file.
+   *
+   * @param item
+   *          PathData
+   * @param depth
+   *          current depth in the process directories
+   * @return FileStatus
+   */
+  protected FileStatus getFileStatus(PathData item, int depth)
+      throws IOException {
+    FileStatus fileStatus = item.stat;
+    if (fileStatus.isSymlink()) {
+      if (options.isFollowLink() || (options.isFollowArgLink() &&
+          (depth == 0))) {
+        Path linkedFile = item.fs.resolvePath(fileStatus.getSymlink());
+        fileStatus = getFileSystem(item).getFileStatus(linkedFile);
+      }
+    }
+    return fileStatus;
+  }
+
+  /**
+   * Returns the {@link Path} from the {@link PathData} item.
+   *
+   * @param item
+   *          PathData
+   * @return Path
+   */
+  protected Path getPath(PathData item) throws IOException {
+    return item.path;
+  }
+
+  /**
+   * Returns the {@link FileSystem} associated with the {@link PathData} item.
+   *
+   * @param item PathData
+   * @return FileSystem
+   */
+  protected FileSystem getFileSystem(PathData item) throws IOException {
+    return item.fs;
+  }
+}

+ 107 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/find/Expression.java

@@ -0,0 +1,107 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.shell.find;
+
+import java.io.IOException;
+import java.util.Deque;
+
+import org.apache.hadoop.fs.shell.PathData;
+
+/**
+ * Interface describing an expression to be used in the
+ * {@link org.apache.hadoop.fs.shell.find.Find} command.
+ */
+public interface Expression {
+  /**
+   * Set the options for this expression, called once before processing any
+   * items.
+   */
+  public void setOptions(FindOptions options) throws IOException;
+
+  /**
+   * Prepares the expression for execution, called once after setting options
+   * and before processing any options.
+   * @throws IOException
+   */
+  public void prepare() throws IOException;
+
+  /**
+   * Apply the expression to the specified item, called once for each item.
+   *
+   * @param item {@link PathData} item to be processed
+   * @param depth distance of the item from the command line argument
+   * @return {@link Result} of applying the expression to the item
+   */
+  public Result apply(PathData item, int depth) throws IOException;
+
+  /**
+   * Finishes the expression, called once after processing all items.
+   *
+   * @throws IOException
+   */
+  public void finish() throws IOException;
+
+  /**
+   * Returns brief usage instructions for this expression. Multiple items should
+   * be returned if there are multiple ways to use this expression.
+   *
+   * @return array of usage instructions
+   */
+  public String[] getUsage();
+
+  /**
+   * Returns a description of the expression for use in help. Multiple lines
+   * should be returned array items. Lines should be formated to 60 characters
+   * or less.
+   *
+   * @return array of description lines
+   */
+  public String[] getHelp();
+
+  /**
+   * Indicates whether this expression performs an action, i.e. provides output
+   * back to the user.
+   */
+  public boolean isAction();
+
+  /** Identifies the expression as an operator rather than a primary. */
+  public boolean isOperator();
+
+  /**
+   * Returns the precedence of this expression
+   * (only applicable to operators).
+   */
+  public int getPrecedence();
+
+  /**
+   * Adds children to this expression. Children are popped from the head of the
+   * deque.
+   *
+   * @param expressions
+   *          deque of expressions from which to take the children
+   */
+  public void addChildren(Deque<Expression> expressions);
+
+  /**
+   * Adds arguments to this expression. Arguments are popped from the head of
+   * the deque and added to the front of the child list, ie last child added is
+   * the first evaluated.
+   * @param args deque of arguments from which to take expression arguments
+   */
+  public void addArguments(Deque<String> args);
+}

+ 156 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/find/ExpressionFactory.java

@@ -0,0 +1,156 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.shell.find;
+
+import java.io.IOException;
+import java.lang.reflect.Method;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.hadoop.util.StringUtils;
+
+/**
+ * Factory class for registering and searching for expressions for use in the
+ * {@link org.apache.hadoop.fs.shell.find.Find} command.
+ */
+final class ExpressionFactory {
+  private static final String REGISTER_EXPRESSION_METHOD = "registerExpression";
+  private Map<String, Class<? extends Expression>> expressionMap =
+      new HashMap<String, Class<? extends Expression>>();
+
+  private static final ExpressionFactory INSTANCE = new ExpressionFactory();
+
+  static ExpressionFactory getExpressionFactory() {
+    return INSTANCE;
+  }
+
+  /**
+   * Private constructor to ensure singleton.
+   */
+  private ExpressionFactory() {
+  }
+
+  /**
+   * Invokes "static void registerExpression(FindExpressionFactory)" on the
+   * given class. This method abstracts the contract between the factory and the
+   * expression class. Do not assume that directly invoking registerExpression
+   * on the given class will have the same effect.
+   *
+   * @param expressionClass
+   *          class to allow an opportunity to register
+   */
+  void registerExpression(Class<? extends Expression> expressionClass) {
+    try {
+      Method register = expressionClass.getMethod(REGISTER_EXPRESSION_METHOD,
+          ExpressionFactory.class);
+      if (register != null) {
+        register.invoke(null, this);
+      }
+    } catch (Exception e) {
+      throw new RuntimeException(StringUtils.stringifyException(e));
+    }
+  }
+
+  /**
+   * Register the given class as handling the given list of expression names.
+   *
+   * @param expressionClass
+   *          the class implementing the expression names
+   * @param names
+   *          one or more command names that will invoke this class
+   * @throws IOException
+   *           if the expression is not of an expected type
+   */
+  void addClass(Class<? extends Expression> expressionClass,
+      String... names) throws IOException {
+    for (String name : names)
+      expressionMap.put(name, expressionClass);
+  }
+
+  /**
+   * Determines whether the given expression name represents and actual
+   * expression.
+   *
+   * @param expressionName
+   *          name of the expression
+   * @return true if expressionName represents an expression
+   */
+  boolean isExpression(String expressionName) {
+    return expressionMap.containsKey(expressionName);
+  }
+
+  /**
+   * Get an instance of the requested expression
+   *
+   * @param expressionName
+   *          name of the command to lookup
+   * @param conf
+   *          the Hadoop configuration
+   * @return the {@link Expression} or null if the expression is unknown
+   */
+  Expression getExpression(String expressionName, Configuration conf) {
+    if (conf == null)
+      throw new NullPointerException("configuration is null");
+
+    Class<? extends Expression> expressionClass = expressionMap
+        .get(expressionName);
+    Expression instance = createExpression(expressionClass, conf);
+    return instance;
+  }
+
+  /**
+   * Creates an instance of the requested {@link Expression} class.
+   *
+   * @param expressionClass
+   *          {@link Expression} class to be instantiated
+   * @param conf
+   *          the Hadoop configuration
+   * @return a new instance of the requested {@link Expression} class
+   */
+  Expression createExpression(
+      Class<? extends Expression> expressionClass, Configuration conf) {
+    Expression instance = null;
+    if (expressionClass != null) {
+      instance = ReflectionUtils.newInstance(expressionClass, conf);
+    }
+    return instance;
+  }
+
+  /**
+   * Creates an instance of the requested {@link Expression} class.
+   *
+   * @param expressionClassname
+   *          name of the {@link Expression} class to be instantiated
+   * @param conf
+   *          the Hadoop configuration
+   * @return a new instance of the requested {@link Expression} class
+   */
+  Expression createExpression(String expressionClassname,
+      Configuration conf) {
+    try {
+      Class<? extends Expression> expressionClass = Class.forName(
+          expressionClassname).asSubclass(Expression.class);
+      return createExpression(expressionClass, conf);
+    } catch (ClassNotFoundException e) {
+      throw new IllegalArgumentException("Invalid classname "
+          + expressionClassname);
+    }
+  }
+}

+ 144 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/find/FilterExpression.java

@@ -0,0 +1,144 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.shell.find;
+
+import java.io.IOException;
+import java.util.Deque;
+
+import org.apache.hadoop.conf.Configurable;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.shell.PathData;
+
+/**
+ * Provides an abstract composition filter for the {@link Expression} interface.
+ * Allows other {@link Expression} implementations to be reused without
+ * inheritance.
+ */
+public abstract class FilterExpression implements Expression, Configurable {
+  protected Expression expression;
+
+  protected FilterExpression(Expression expression) {
+    this.expression = expression;
+  }
+
+  @Override
+  public void setOptions(FindOptions options) throws IOException {
+    if (expression != null) {
+      expression.setOptions(options);
+    }
+  }
+
+  @Override
+  public void prepare() throws IOException {
+    if (expression != null) {
+      expression.prepare();
+    }
+  }
+
+  @Override
+  public Result apply(PathData item, int depth) throws IOException {
+    if (expression != null) {
+      return expression.apply(item, -1);
+    }
+    return Result.PASS;
+  }
+
+  @Override
+  public void finish() throws IOException {
+    if (expression != null) {
+      expression.finish();
+    }
+  }
+
+  @Override
+  public String[] getUsage() {
+    if (expression != null) {
+      return expression.getUsage();
+    }
+    return null;
+  }
+
+  @Override
+  public String[] getHelp() {
+    if (expression != null) {
+      return expression.getHelp();
+    }
+    return null;
+  }
+
+  @Override
+  public boolean isAction() {
+    if (expression != null) {
+      return expression.isAction();
+    }
+    return false;
+  }
+
+  @Override
+  public boolean isOperator() {
+    if (expression != null) {
+      return expression.isOperator();
+    }
+    return false;
+  }
+
+  @Override
+  public int getPrecedence() {
+    if (expression != null) {
+      return expression.getPrecedence();
+    }
+    return -1;
+  }
+
+  @Override
+  public void addChildren(Deque<Expression> expressions) {
+    if (expression != null) {
+      expression.addChildren(expressions);
+    }
+  }
+
+  @Override
+  public void addArguments(Deque<String> args) {
+    if (expression != null) {
+      expression.addArguments(args);
+    }
+  }
+
+  @Override
+  public void setConf(Configuration conf) {
+    if (expression instanceof Configurable) {
+      ((Configurable) expression).setConf(conf);
+    }
+  }
+
+  @Override
+  public Configuration getConf() {
+    if (expression instanceof Configurable) {
+      return ((Configurable) expression).getConf();
+    }
+    return null;
+  }
+
+  @Override
+  public String toString() {
+    if (expression != null) {
+      return getClass().getSimpleName() + "-" + expression.toString();
+    }
+    return getClass().getSimpleName();
+  }
+}

+ 444 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/find/Find.java

@@ -0,0 +1,444 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.shell.find;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.Deque;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.LinkedList;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.shell.CommandFactory;
+import org.apache.hadoop.fs.shell.CommandFormat;
+import org.apache.hadoop.fs.shell.FsCommand;
+import org.apache.hadoop.fs.shell.PathData;
+
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+/**
+ * Implements a Hadoop find command.
+ */
+public class Find extends FsCommand {
+  /**
+   * Register the names for the count command
+   * 
+   * @param factory the command factory that will instantiate this class
+   */
+  public static void registerCommands(CommandFactory factory) {
+    factory.addClass(Find.class, "-find");
+  }
+
+  public static final String NAME = "find";
+  public static final String USAGE = "<path> ... <expression> ...";
+  public static final String DESCRIPTION;
+  private static String[] HELP =
+  { "Finds all files that match the specified expression and",
+      "applies selected actions to them. If no <path> is specified",
+      "then defaults to the current working directory. If no",
+      "expression is specified then defaults to -print."
+  };
+
+  private static final String OPTION_FOLLOW_LINK = "L";
+  private static final String OPTION_FOLLOW_ARG_LINK = "H";
+
+  /** List of expressions recognized by this command. */
+  @SuppressWarnings("rawtypes")
+  private static final Class[] EXPRESSIONS;
+
+  static {
+    // Initialize the static variables.
+    EXPRESSIONS = new Class[] {
+        // Operator Expressions
+        And.class,
+        // Action Expressions
+        Print.class,
+        // Navigation Expressions
+        // Matcher Expressions
+        Name.class };
+    DESCRIPTION = buildDescription(ExpressionFactory.getExpressionFactory());
+
+    // Register the expressions with the expression factory.
+    registerExpressions(ExpressionFactory.getExpressionFactory());
+  }
+
+  /** Options for use in this command */
+  private FindOptions options;
+
+  /** Root expression for this instance of the command. */
+  private Expression rootExpression;
+
+  /** Set of path items returning a {@link Result#STOP} result. */
+  private HashSet<Path> stopPaths = new HashSet<Path>();
+
+  /** Register the expressions with the expression factory. */
+  @SuppressWarnings("unchecked")
+  private static void registerExpressions(ExpressionFactory factory) {
+    for (Class<? extends Expression> exprClass : EXPRESSIONS) {
+      factory.registerExpression(exprClass);
+    }
+  }
+
+  /** Build the description used by the help command. */
+  @SuppressWarnings("unchecked")
+  private static String buildDescription(ExpressionFactory factory) {
+    ArrayList<Expression> operators = new ArrayList<Expression>();
+    ArrayList<Expression> primaries = new ArrayList<Expression>();
+    for (Class<? extends Expression> exprClass : EXPRESSIONS) {
+      Expression expr = factory.createExpression(exprClass, null);
+      if (expr.isOperator()) {
+        operators.add(expr);
+      } else {
+        primaries.add(expr);
+      }
+    }
+    Collections.sort(operators, new Comparator<Expression>() {
+      @Override
+      public int compare(Expression arg0, Expression arg1) {
+        return arg0.getClass().getName().compareTo(arg1.getClass().getName());
+      }
+    });
+    Collections.sort(primaries, new Comparator<Expression>() {
+      @Override
+      public int compare(Expression arg0, Expression arg1) {
+        return arg0.getClass().getName().compareTo(arg1.getClass().getName());
+      }
+    });
+
+    StringBuilder sb = new StringBuilder();
+    for (String line : HELP) {
+      sb.append(line).append("\n");
+    }
+    sb.append("\n");
+    sb.append("The following primary expressions are recognised:\n");
+    for (Expression expr : primaries) {
+      for (String line : expr.getUsage()) {
+        sb.append("  ").append(line).append("\n");
+      }
+      for (String line : expr.getHelp()) {
+        sb.append("    ").append(line).append("\n");
+      }
+      sb.append("\n");
+    }
+    sb.append("The following operators are recognised:\n");
+    for (Expression expr : operators) {
+      for (String line : expr.getUsage()) {
+        sb.append("  ").append(line).append("\n");
+      }
+      for (String line : expr.getHelp()) {
+        sb.append("    ").append(line).append("\n");
+      }
+      sb.append("\n");
+    }
+    return sb.toString();
+  }
+
+  /** Default constructor for the Find command. */
+  public Find() {
+    setRecursive(true);
+  }
+
+  @Override
+  protected void processOptions(LinkedList<String> args) throws IOException {
+    CommandFormat cf =
+        new CommandFormat(1, Integer.MAX_VALUE, OPTION_FOLLOW_LINK,
+            OPTION_FOLLOW_ARG_LINK, null);
+    cf.parse(args);
+
+    if (cf.getOpt(OPTION_FOLLOW_LINK)) {
+      getOptions().setFollowLink(true);
+    } else if (cf.getOpt(OPTION_FOLLOW_ARG_LINK)) {
+      getOptions().setFollowArgLink(true);
+    }
+
+    // search for first non-path argument (ie starts with a "-") and capture and
+    // remove the remaining arguments as expressions
+    LinkedList<String> expressionArgs = new LinkedList<String>();
+    Iterator<String> it = args.iterator();
+    boolean isPath = true;
+    while (it.hasNext()) {
+      String arg = it.next();
+      if (isPath) {
+        if (arg.startsWith("-")) {
+          isPath = false;
+        }
+      }
+      if (!isPath) {
+        expressionArgs.add(arg);
+        it.remove();
+      }
+    }
+
+    if (args.isEmpty()) {
+      args.add(Path.CUR_DIR);
+    }
+
+    Expression expression = parseExpression(expressionArgs);
+    if (!expression.isAction()) {
+      Expression and = getExpression(And.class);
+      Deque<Expression> children = new LinkedList<Expression>();
+      children.add(getExpression(Print.class));
+      children.add(expression);
+      and.addChildren(children);
+      expression = and;
+    }
+
+    setRootExpression(expression);
+  }
+
+  /**
+   * Set the root expression for this find.
+   * 
+   * @param expression
+   */
+  @InterfaceAudience.Private
+  void setRootExpression(Expression expression) {
+    this.rootExpression = expression;
+  }
+
+  /**
+   * Return the root expression for this find.
+   * 
+   * @return the root expression
+   */
+  @InterfaceAudience.Private
+  Expression getRootExpression() {
+    return this.rootExpression;
+  }
+
+  /** Returns the current find options, creating them if necessary. */
+  @InterfaceAudience.Private
+  FindOptions getOptions() {
+    if (options == null) {
+      options = createOptions();
+    }
+    return options;
+  }
+
+  /** Create a new set of find options. */
+  private FindOptions createOptions() {
+    FindOptions options = new FindOptions();
+    options.setOut(out);
+    options.setErr(err);
+    options.setIn(System.in);
+    options.setCommandFactory(getCommandFactory());
+    options.setConfiguration(getConf());
+    return options;
+  }
+
+  /** Add the {@link PathData} item to the stop set. */
+  private void addStop(PathData item) {
+    stopPaths.add(item.path);
+  }
+
+  /** Returns true if the {@link PathData} item is in the stop set. */
+  private boolean isStop(PathData item) {
+    return stopPaths.contains(item.path);
+  }
+
+  /**
+   * Parse a list of arguments to to extract the {@link Expression} elements.
+   * The input Deque will be modified to remove the used elements.
+   * 
+   * @param args arguments to be parsed
+   * @return list of {@link Expression} elements applicable to this command
+   * @throws IOException if list can not be parsed
+   */
+  private Expression parseExpression(Deque<String> args) throws IOException {
+    Deque<Expression> primaries = new LinkedList<Expression>();
+    Deque<Expression> operators = new LinkedList<Expression>();
+    Expression prevExpr = getExpression(And.class);
+    while (!args.isEmpty()) {
+      String arg = args.pop();
+      if ("(".equals(arg)) {
+        Expression expr = parseExpression(args);
+        primaries.add(expr);
+        prevExpr = new BaseExpression() {
+          @Override
+          public Result apply(PathData item, int depth) throws IOException {
+            return Result.PASS;
+          }
+        }; // stub the previous expression to be a non-op
+      } else if (")".equals(arg)) {
+        break;
+      } else if (isExpression(arg)) {
+        Expression expr = getExpression(arg);
+        expr.addArguments(args);
+        if (expr.isOperator()) {
+          while (!operators.isEmpty()) {
+            if (operators.peek().getPrecedence() >= expr.getPrecedence()) {
+              Expression op = operators.pop();
+              op.addChildren(primaries);
+              primaries.push(op);
+            } else {
+              break;
+            }
+          }
+          operators.push(expr);
+        } else {
+          if (!prevExpr.isOperator()) {
+            Expression and = getExpression(And.class);
+            while (!operators.isEmpty()) {
+              if (operators.peek().getPrecedence() >= and.getPrecedence()) {
+                Expression op = operators.pop();
+                op.addChildren(primaries);
+                primaries.push(op);
+              } else {
+                break;
+              }
+            }
+            operators.push(and);
+          }
+          primaries.push(expr);
+        }
+        prevExpr = expr;
+      } else {
+        throw new IOException("Unexpected argument: " + arg);
+      }
+    }
+
+    while (!operators.isEmpty()) {
+      Expression operator = operators.pop();
+      operator.addChildren(primaries);
+      primaries.push(operator);
+    }
+
+    return primaries.isEmpty() ? getExpression(Print.class) : primaries.pop();
+  }
+
+  /** Returns true if the target is an ancestor of the source. */
+  private boolean isAncestor(PathData source, PathData target) {
+    for (Path parent = source.path; (parent != null) && !parent.isRoot();
+        parent = parent.getParent()) {
+      if (parent.equals(target.path)) {
+        return true;
+      }
+    }
+    return false;
+  }
+
+  @Override
+  protected void recursePath(PathData item) throws IOException {
+    if (isStop(item)) {
+      // this item returned a stop result so don't recurse any further
+      return;
+    }
+    if (getDepth() >= getOptions().getMaxDepth()) {
+      // reached the maximum depth so don't got any further.
+      return;
+    }
+    if (item.stat.isSymlink() && getOptions().isFollowLink()) {
+      PathData linkedItem =
+          new PathData(item.stat.getSymlink().toString(), getConf());
+      if (isAncestor(item, linkedItem)) {
+        getOptions().getErr().println(
+            "Infinite loop ignored: " + item.toString() + " -> "
+                + linkedItem.toString());
+        return;
+      }
+      if (linkedItem.exists) {
+        item = linkedItem;
+      }
+    }
+    if (item.stat.isDirectory()) {
+      super.recursePath(item);
+    }
+  }
+
+  @Override
+  protected boolean isPathRecursable(PathData item) throws IOException {
+    if (item.stat.isDirectory()) {
+      return true;
+    }
+    if (item.stat.isSymlink()) {
+      PathData linkedItem =
+          new PathData(item.fs.resolvePath(item.stat.getSymlink()).toString(),
+              getConf());
+      if (linkedItem.stat.isDirectory()) {
+        if (getOptions().isFollowLink()) {
+          return true;
+        }
+        if (getOptions().isFollowArgLink() && (getDepth() == 0)) {
+          return true;
+        }
+      }
+    }
+    return false;
+  }
+
+  @Override
+  protected void processPath(PathData item) throws IOException {
+    if (getOptions().isDepthFirst()) {
+      // depth first so leave until post processing
+      return;
+    }
+    applyItem(item);
+  }
+
+  @Override
+  protected void postProcessPath(PathData item) throws IOException {
+    if (!getOptions().isDepthFirst()) {
+      // not depth first so already processed
+      return;
+    }
+    applyItem(item);
+  }
+
+  private void applyItem(PathData item) throws IOException {
+    if (getDepth() >= getOptions().getMinDepth()) {
+      Result result = getRootExpression().apply(item, getDepth());
+      if (Result.STOP.equals(result)) {
+        addStop(item);
+      }
+    }
+  }
+
+  @Override
+  protected void processArguments(LinkedList<PathData> args)
+      throws IOException {
+    Expression expr = getRootExpression();
+    expr.setOptions(getOptions());
+    expr.prepare();
+    super.processArguments(args);
+    expr.finish();
+  }
+
+  /** Gets a named expression from the factory. */
+  private Expression getExpression(String expressionName) {
+    return ExpressionFactory.getExpressionFactory().getExpression(
+        expressionName, getConf());
+  }
+
+  /** Gets an instance of an expression from the factory. */
+  private Expression getExpression(
+      Class<? extends Expression> expressionClass) {
+    return ExpressionFactory.getExpressionFactory().createExpression(
+        expressionClass, getConf());
+  }
+
+  /** Asks the factory whether an expression is recognized. */
+  private boolean isExpression(String expressionName) {
+    return ExpressionFactory.getExpressionFactory()
+        .isExpression(expressionName);
+  }
+}

+ 271 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/find/FindOptions.java

@@ -0,0 +1,271 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.shell.find;
+
+import java.io.InputStream;
+import java.io.PrintStream;
+import java.util.Date;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.shell.CommandFactory;
+
+/**
+ * Options to be used by the {@link Find} command and its {@link Expression}s.
+ */
+public class FindOptions {
+  /** Output stream to be used. */
+  private PrintStream out;
+
+  /** Error stream to be used. */
+  private PrintStream err;
+
+  /** Input stream to be used. */
+  private InputStream in;
+
+  /**
+   * Indicates whether the expression should be applied to the directory tree
+   * depth first.
+   */
+  private boolean depthFirst = false;
+
+  /** Indicates whether symbolic links should be followed. */
+  private boolean followLink = false;
+
+  /**
+   * Indicates whether symbolic links specified as command arguments should be
+   * followed.
+   */
+  private boolean followArgLink = false;
+
+  /** Start time of the find process. */
+  private long startTime = new Date().getTime();
+
+  /**
+   * Depth at which to start applying expressions.
+   */
+  private int minDepth = 0;
+
+  /**
+   * Depth at which to stop applying expressions.
+   */
+  private int maxDepth = Integer.MAX_VALUE;
+
+  /** Factory for retrieving command classes. */
+  private CommandFactory commandFactory;
+
+  /** Configuration object. */
+  private Configuration configuration = new Configuration();
+
+  /**
+   * Sets the output stream to be used.
+   *
+   * @param out output stream to be used
+   */
+  public void setOut(PrintStream out) {
+    this.out = out;
+  }
+
+  /**
+   * Returns the output stream to be used.
+   *
+   * @return output stream to be used
+   */
+  public PrintStream getOut() {
+    return this.out;
+  }
+
+  /**
+   * Sets the error stream to be used.
+   *
+   * @param err error stream to be used
+   */
+  public void setErr(PrintStream err) {
+    this.err = err;
+  }
+
+  /**
+   * Returns the error stream to be used.
+   *
+   * @return error stream to be used
+   */
+  public PrintStream getErr() {
+    return this.err;
+  }
+
+  /**
+   * Sets the input stream to be used.
+   *
+   * @param in input stream to be used
+   */
+  public void setIn(InputStream in) {
+    this.in = in;
+  }
+
+  /**
+   * Returns the input stream to be used.
+   *
+   * @return input stream to be used
+   */
+  public InputStream getIn() {
+    return this.in;
+  }
+
+  /**
+   * Sets flag indicating whether the expression should be applied to the
+   * directory tree depth first.
+   *
+   * @param depthFirst true indicates depth first traversal
+   */
+  public void setDepthFirst(boolean depthFirst) {
+    this.depthFirst = depthFirst;
+  }
+
+  /**
+   * Should directory tree be traversed depth first?
+   *
+   * @return true indicate depth first traversal
+   */
+  public boolean isDepthFirst() {
+    return this.depthFirst;
+  }
+
+  /**
+   * Sets flag indicating whether symbolic links should be followed.
+   *
+   * @param followLink true indicates follow links
+   */
+  public void setFollowLink(boolean followLink) {
+    this.followLink = followLink;
+  }
+
+  /**
+   * Should symbolic links be follows?
+   *
+   * @return true indicates links should be followed
+   */
+  public boolean isFollowLink() {
+    return this.followLink;
+  }
+
+  /**
+   * Sets flag indicating whether command line symbolic links should be
+   * followed.
+   *
+   * @param followArgLink true indicates follow links
+   */
+  public void setFollowArgLink(boolean followArgLink) {
+    this.followArgLink = followArgLink;
+  }
+
+  /**
+   * Should command line symbolic links be follows?
+   *
+   * @return true indicates links should be followed
+   */
+  public boolean isFollowArgLink() {
+    return this.followArgLink;
+  }
+
+  /**
+   * Returns the start time of this {@link Find} command.
+   *
+   * @return start time (in milliseconds since epoch)
+   */
+  public long getStartTime() {
+    return this.startTime;
+  }
+
+  /**
+   * Set the start time of this {@link Find} command.
+   *
+   * @param time start time (in milliseconds since epoch)
+   */
+  public void setStartTime(long time) {
+    this.startTime = time;
+  }
+
+  /**
+   * Returns the minimum depth for applying expressions.
+   *
+   * @return min depth
+   */
+  public int getMinDepth() {
+    return this.minDepth;
+  }
+
+  /**
+   * Sets the minimum depth for applying expressions.
+   *
+   * @param minDepth minimum depth
+   */
+  public void setMinDepth(int minDepth) {
+    this.minDepth = minDepth;
+  }
+
+  /**
+   * Returns the maximum depth for applying expressions.
+   *
+   * @return maximum depth
+   */
+  public int getMaxDepth() {
+    return this.maxDepth;
+  }
+
+  /**
+   * Sets the maximum depth for applying expressions.
+   *
+   * @param maxDepth maximum depth
+   */
+  public void setMaxDepth(int maxDepth) {
+    this.maxDepth = maxDepth;
+  }
+
+  /**
+   * Set the command factory.
+   *
+   * @param factory {@link CommandFactory}
+   */
+  public void setCommandFactory(CommandFactory factory) {
+    this.commandFactory = factory;
+  }
+
+  /**
+   * Return the command factory.
+   *
+   * @return {@link CommandFactory}
+   */
+  public CommandFactory getCommandFactory() {
+    return this.commandFactory;
+  }
+
+  /**
+   * Set the {@link Configuration}
+   *
+   * @param configuration {@link Configuration}
+   */
+  public void setConfiguration(Configuration configuration) {
+    this.configuration = configuration;
+  }
+
+  /**
+   * Return the {@link Configuration} return configuration {@link Configuration}
+   */
+  public Configuration getConfiguration() {
+    return this.configuration;
+  }
+}

+ 100 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/find/Name.java

@@ -0,0 +1,100 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.shell.find;
+
+import java.io.IOException;
+import java.util.Deque;
+
+import org.apache.hadoop.fs.GlobPattern;
+import org.apache.hadoop.fs.shell.PathData;
+
+/**
+ * Implements the -name expression for the
+ * {@link org.apache.hadoop.fs.shell.find.Find} command.
+ */
+final class Name extends BaseExpression {
+  /** Registers this expression with the specified factory. */
+  public static void registerExpression(ExpressionFactory factory)
+      throws IOException {
+    factory.addClass(Name.class, "-name");
+    factory.addClass(Iname.class, "-iname");
+  }
+
+  private static final String[] USAGE = { "-name pattern", "-iname pattern" };
+  private static final String[] HELP = {
+      "Evaluates as true if the basename of the file matches the",
+      "pattern using standard file system globbing.",
+      "If -iname is used then the match is case insensitive." };
+  private GlobPattern globPattern;
+  private boolean caseSensitive = true;
+
+  /** Creates a case sensitive name expression. */
+  public Name() {
+    this(true);
+  }
+
+  /**
+   * Construct a Name {@link Expression} with a specified case sensitivity.
+   *
+   * @param caseSensitive if true the comparisons are case sensitive.
+   */
+  private Name(boolean caseSensitive) {
+    super();
+    setUsage(USAGE);
+    setHelp(HELP);
+    setCaseSensitive(caseSensitive);
+  }
+
+  private void setCaseSensitive(boolean caseSensitive) {
+    this.caseSensitive = caseSensitive;
+  }
+
+  @Override
+  public void addArguments(Deque<String> args) {
+    addArguments(args, 1);
+  }
+
+  @Override
+  public void prepare() throws IOException {
+    String argPattern = getArgument(1);
+    if (!caseSensitive) {
+      argPattern = argPattern.toLowerCase();
+    }
+    globPattern = new GlobPattern(argPattern);
+  }
+
+  @Override
+  public Result apply(PathData item, int depth) throws IOException {
+    String name = getPath(item).getName();
+    if (!caseSensitive) {
+      name = name.toLowerCase();
+    }
+    if (globPattern.matches(name)) {
+      return Result.PASS;
+    } else {
+      return Result.FAIL;
+    }
+  }
+
+  /** Case insensitive version of the -name expression. */
+  static class Iname extends FilterExpression {
+    public Iname() {
+      super(new Name(false));
+    }
+  }
+}

+ 76 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/find/Print.java

@@ -0,0 +1,76 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.shell.find;
+
+import java.io.IOException;
+
+import org.apache.hadoop.fs.shell.PathData;
+
+/**
+ * Implements the -print expression for the
+ * {@link org.apache.hadoop.fs.shell.find.Find} command.
+ */
+final class Print extends BaseExpression {
+  /** Registers this expression with the specified factory. */
+  public static void registerExpression(ExpressionFactory factory)
+      throws IOException {
+    factory.addClass(Print.class, "-print");
+    factory.addClass(Print0.class, "-print0");
+  }
+
+  private static final String[] USAGE = { "-print", "-print0" };
+  private static final String[] HELP = {
+      "Always evaluates to true. Causes the current pathname to be",
+      "written to standard output followed by a newline. If the -print0",
+      "expression is used then an ASCII NULL character is appended rather",
+      "than a newline." };
+
+  private final String suffix;
+
+  public Print() {
+    this("\n");
+  }
+
+  /**
+   * Construct a Print {@link Expression} with the specified suffix.
+   */
+  private Print(String suffix) {
+    super();
+    setUsage(USAGE);
+    setHelp(HELP);
+    this.suffix = suffix;
+  }
+
+  @Override
+  public Result apply(PathData item, int depth) throws IOException {
+    getOptions().getOut().print(item.toString() + suffix);
+    return Result.PASS;
+  }
+
+  @Override
+  public boolean isAction() {
+    return true;
+  }
+
+  /** Implements the -print0 expression. */
+  final static class Print0 extends FilterExpression {
+    public Print0() {
+      super(new Print("\0"));
+    }
+  }
+}

+ 88 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/find/Result.java

@@ -0,0 +1,88 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.shell.find;
+
+public final class Result {
+  /** Result indicating {@link Expression} processing should continue. */
+  public static final Result PASS = new Result(true, true);
+  /** Result indicating {@link Expression} processing should stop. */
+  public static final Result FAIL = new Result(false, true);
+  /**
+   * Result indicating {@link Expression} processing should not descend any more
+   * directories.
+   */
+  public static final Result STOP = new Result(true, false);
+  private boolean descend;
+  private boolean success;
+
+  private Result(boolean success, boolean recurse) {
+    this.success = success;
+    this.descend = recurse;
+  }
+
+  /** Should further directories be descended. */
+  public boolean isDescend() {
+    return this.descend;
+  }
+
+  /** Should processing continue. */
+  public boolean isPass() {
+    return this.success;
+  }
+
+  /** Returns the combination of this and another result. */
+  public Result combine(Result other) {
+    return new Result(this.isPass() && other.isPass(), this.isDescend()
+        && other.isDescend());
+  }
+
+  /** Negate this result. */
+  public Result negate() {
+    return new Result(!this.isPass(), this.isDescend());
+  }
+
+  @Override
+  public String toString() {
+    return "success=" + isPass() + "; recurse=" + isDescend();
+  }
+
+  @Override
+  public int hashCode() {
+    final int prime = 31;
+    int result = 1;
+    result = prime * result + (descend ? 1231 : 1237);
+    result = prime * result + (success ? 1231 : 1237);
+    return result;
+  }
+
+  @Override
+  public boolean equals(Object obj) {
+    if (this == obj)
+      return true;
+    if (obj == null)
+      return false;
+    if (getClass() != obj.getClass())
+      return false;
+    Result other = (Result) obj;
+    if (descend != other.descend)
+      return false;
+    if (success != other.success)
+      return false;
+    return true;
+  }
+}

+ 3 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java

@@ -1064,7 +1064,9 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
     public void process(WatchedEvent event) {
       hasReceivedEvent.countDown();
       try {
-        hasSetZooKeeper.await(zkSessionTimeout, TimeUnit.MILLISECONDS);
+        if (!hasSetZooKeeper.await(zkSessionTimeout, TimeUnit.MILLISECONDS)) {
+          LOG.debug("Event received with stale zk");
+        }
         ActiveStandbyElector.this.processWatchEvent(
             zk, event);
       } catch (Throwable t) {

+ 0 - 6
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java

@@ -168,12 +168,6 @@ public abstract class HAAdmin extends Configured implements Tool {
   private boolean isOtherTargetNodeActive(String targetNodeToActivate, boolean forceActive)
       throws IOException  {
     Collection<String> targetIds = getTargetIds(targetNodeToActivate);
-    if(targetIds == null) {
-      errOut.println("transitionToActive: No target node in the "
-          + "current configuration");
-      printUsage(errOut, "-transitionToActive");
-      return true;
-    }
     targetIds.remove(targetNodeToActivate);
     for(String targetId : targetIds) {
       HAServiceTarget target = resolveTarget(targetId);

+ 2 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/SshFenceByTcpPort.java

@@ -310,6 +310,8 @@ public class SshFenceByTcpPort extends Configured
       case com.jcraft.jsch.Logger.FATAL:
         LOG.fatal(message);
         break;
+      default:
+        break;
       }
     }
   }

이 변경점에서 너무 많은 파일들이 변경되어 몇몇 파일들은 표시되지 않았습니다.