瀏覽代碼

merge the rest of trunk to branch HDFS-4949

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-4949@1532967 13f79535-47bb-0310-9956-ffa450edef68
Andrew Wang 11 年之前
父節點
當前提交
da1f4419e3
共有 100 個文件被更改,包括 6237 次插入505 次删除
  1. 1 0
      .gitattributes
  2. 22 3
      BUILDING.txt
  3. 45 0
      hadoop-assemblies/src/main/resources/assemblies/hadoop-sls.xml
  4. 11 0
      hadoop-assemblies/src/main/resources/assemblies/hadoop-tools.xml
  5. 0 4
      hadoop-client/pom.xml
  6. 15 0
      hadoop-common-project/hadoop-annotations/pom.xml
  7. 144 7
      hadoop-common-project/hadoop-common/CHANGES.txt
  8. 17 1
      hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
  9. 4 1
      hadoop-common-project/hadoop-common/pom.xml
  10. 1 0
      hadoop-common-project/hadoop-common/src/main/conf/ssl-server.xml.example
  11. 1760 0
      hadoop-common-project/hadoop-common/src/main/docs/releasenotes.html
  12. 4 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
  13. 41 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DirectoryListingStartAfterNotFoundException.java
  14. 70 25
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Globber.java
  15. 172 76
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java
  16. 25 17
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java
  17. 3 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Seekable.java
  18. 7 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java
  19. 8 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java
  20. 9 6
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/SetReplication.java
  21. 3 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/SnapshotCommands.java
  22. 3 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java
  23. 5 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceProtocol.java
  24. 23 9
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpConfig.java
  25. 1 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java
  26. 30 7
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
  27. 41 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RetriableException.java
  28. 26 21
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
  29. 7 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/CachedDNSToSwitchMapping.java
  30. 8 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/DNSToSwitchMapping.java
  31. 6 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/ScriptBasedMapping.java
  32. 7 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/TableMapping.java
  33. 10 5
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcServer.java
  34. 9 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
  35. 12 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java
  36. 24 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/SecretManager.java
  37. 19 17
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java
  38. 21 7
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java
  39. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ReflectionUtils.java
  40. 1 1
      hadoop-common-project/hadoop-common/src/main/native/native.vcxproj
  41. 2 2
      hadoop-common-project/hadoop-common/src/main/proto/ProtobufRpcEngine.proto
  42. 1 1
      hadoop-common-project/hadoop-common/src/main/proto/RpcHeader.proto
  43. 20 0
      hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
  44. 7 3
      hadoop-common-project/hadoop-common/src/site/apt/ClusterSetup.apt.vm
  45. 9 4
      hadoop-common-project/hadoop-common/src/site/apt/FileSystemShell.apt.vm
  46. 1 1
      hadoop-common-project/hadoop-common/src/site/apt/SingleCluster.apt.vm
  47. 8 3
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/CLITestHelper.java
  48. 8 2
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FSMainOperationsBaseTest.java
  49. 143 2
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java
  50. 8 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextPermissionBase.java
  51. 2 2
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java
  52. 288 12
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java
  53. 83 16
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellCopy.java
  54. 176 5
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java
  55. 47 10
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystemBasics.java
  56. 5 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFSFileContextMainOperations.java
  57. 19 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestPath.java
  58. 5 4
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestFcMainOperationsLocalFs.java
  59. 8 2
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/ClientBaseWithFixes.java
  60. 2 2
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
  61. 78 22
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestArrayFile.java
  62. 49 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestArrayWritable.java
  63. 210 15
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestBloomMapFile.java
  64. 24 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestBooleanWritable.java
  65. 20 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestBytesWritable.java
  66. 57 10
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestEnumSetWritable.java
  67. 610 37
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestMapFile.java
  68. 35 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestSetFile.java
  69. 77 2
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestText.java
  70. 342 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/snappy/TestSnappyCompressorDecompressor.java
  71. 39 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java
  72. 6 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/StaticMapping.java
  73. 4 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestSwitchMapping.java
  74. 44 17
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java
  75. 137 45
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/KeyStoreTestUtil.java
  76. 97 6
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestSSLFactory.java
  77. 533 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/bloom/BloomFilterCommonTester.java
  78. 240 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/bloom/TestBloomFilters.java
  79. 89 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/hash/TestHash.java
  80. 9 0
      hadoop-common-project/hadoop-common/src/test/resources/core-site.xml
  81. 二進制
      hadoop-common-project/hadoop-common/src/test/resources/test.har/.part-0.crc
  82. 0 0
      hadoop-common-project/hadoop-common/src/test/resources/test.har/_SUCCESS
  83. 4 0
      hadoop-common-project/hadoop-common/src/test/resources/test.har/_index
  84. 2 0
      hadoop-common-project/hadoop-common/src/test/resources/test.har/_masterindex
  85. 0 0
      hadoop-common-project/hadoop-common/src/test/resources/test.har/part-0
  86. 15 3
      hadoop-common-project/hadoop-common/src/test/resources/testConf.xml
  87. 0 2
      hadoop-common-project/hadoop-nfs/pom.xml
  88. 5 5
      hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountResponse.java
  89. 4 0
      hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/IdUserGroup.java
  90. 12 18
      hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Base.java
  91. 5 6
      hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3FileAttributes.java
  92. 2 2
      hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Interface.java
  93. 2 2
      hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/SYMLINK3Request.java
  94. 10 2
      hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/WRITE3Request.java
  95. 3 2
      hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/ACCESS3Response.java
  96. 3 2
      hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/COMMIT3Response.java
  97. 3 2
      hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/CREATE3Response.java
  98. 3 2
      hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/FSINFO3Response.java
  99. 3 2
      hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/FSSTAT3Response.java
  100. 3 2
      hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/GETATTR3Response.java

+ 1 - 0
.gitattributes

@@ -15,5 +15,6 @@
 
 *.bat    text eol=crlf
 *.cmd    text eol=crlf
+*.vcxproj text merge=union eol=crlf
 *.csproj text merge=union eol=crlf
 *.sln    text merge=union eol=crlf

+ 22 - 3
BUILDING.txt

@@ -4,8 +4,8 @@ Build instructions for Hadoop
 Requirements:
 
 * Unix System
-* JDK 1.6
-* Maven 3.0
+* JDK 1.6+
+* Maven 3.0 or later
 * Findbugs 1.3.9 (if running findbugs)
 * ProtocolBuffer 2.5.0
 * CMake 2.6 or newer (if compiling native code)
@@ -149,13 +149,32 @@ Create a local staging version of the website (in /tmp/hadoop-site)
 
 ----------------------------------------------------------------------------------
 
+Handling out of memory errors in builds
+
+----------------------------------------------------------------------------------
+
+If the build process fails with an out of memory error, you should be able to fix
+it by increasing the memory used by maven -which can be done via the environment
+variable MAVEN_OPTS.
+
+Here is an example setting to allocate between 256 and 512 MB of heap space to
+Maven
+
+export MAVEN_OPTS="-Xms256m -Xmx512m"
+
+----------------------------------------------------------------------------------
+
 Building on OS/X
 
 ----------------------------------------------------------------------------------
 
-Hadoop does not build on OS/X with Java 7.
+A one-time manual step is required to enable building Hadoop OS X with Java 7
+every time the JDK is updated.
 see: https://issues.apache.org/jira/browse/HADOOP-9350
 
+$ sudo mkdir `/usr/libexec/java_home`/Classes
+$ sudo ln -s `/usr/libexec/java_home`/lib/tools.jar `/usr/libexec/java_home`/Classes/classes.jar
+
 ----------------------------------------------------------------------------------
 
 Building on Windows

+ 45 - 0
hadoop-assemblies/src/main/resources/assemblies/hadoop-sls.xml

@@ -0,0 +1,45 @@
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+  
+       http://www.apache.org/licenses/LICENSE-2.0
+  
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<assembly>
+  <id>hadoop-sls</id>
+  <formats>
+    <format>dir</format>
+  </formats>
+  <includeBaseDirectory>false</includeBaseDirectory>
+
+  <fileSets>
+    <fileSet>
+      <directory>${basedir}/src/main/bin</directory>
+      <outputDirectory>sls/bin</outputDirectory>
+      <fileMode>0755</fileMode>
+    </fileSet>
+    <fileSet>
+      <directory>${basedir}/src/main/html</directory>
+      <outputDirectory>sls/html</outputDirectory>
+    </fileSet>
+    <fileSet>
+      <directory>${basedir}/src/main/sample-conf</directory>
+      <outputDirectory>sls/sample-conf</outputDirectory>
+    </fileSet>
+    <fileSet>
+      <directory>${basedir}/src/main/data</directory>
+      <outputDirectory>sls/sample-data</outputDirectory>
+    </fileSet>
+  </fileSets>
+
+</assembly>

+ 11 - 0
hadoop-assemblies/src/main/resources/assemblies/hadoop-tools.xml

@@ -93,6 +93,17 @@
         <include>*-sources.jar</include>
       </includes>
     </fileSet>
+    <fileSet>
+      <directory>../hadoop-sls/target</directory>
+      <outputDirectory>/share/hadoop/${hadoop.component}/sources</outputDirectory>
+      <includes>
+        <include>*-sources.jar</include>
+      </includes>
+    </fileSet>
+    <fileSet>
+      <directory>../hadoop-sls/target/hadoop-sls-${project.version}/sls</directory>
+      <outputDirectory>/share/hadoop/${hadoop.component}/sls</outputDirectory>
+    </fileSet>
   </fileSets>
   <dependencySets>
     <dependencySet>

+ 0 - 4
hadoop-client/pom.xml

@@ -39,10 +39,6 @@
       <artifactId>hadoop-common</artifactId>
       <scope>compile</scope>
       <exclusions>
-        <exclusion>
-          <groupId>commons-httpclient</groupId>
-          <artifactId>commons-httpclient</artifactId>
-        </exclusion>
         <exclusion>
           <groupId>tomcat</groupId>
           <artifactId>jasper-compiler</artifactId>

+ 15 - 0
hadoop-common-project/hadoop-annotations/pom.xml

@@ -56,6 +56,21 @@
         </dependency>
       </dependencies>
     </profile>
+    <profile>
+      <id>jdk1.7</id>
+      <activation>
+        <jdk>1.7</jdk>
+      </activation>
+      <dependencies>
+        <dependency>
+          <groupId>jdk.tools</groupId>
+          <artifactId>jdk.tools</artifactId>
+          <version>1.7</version>
+          <scope>system</scope>
+          <systemPath>${java.home}/../lib/tools.jar</systemPath>
+        </dependency>
+      </dependencies>
+    </profile>
   </profiles>
 
 </project>

+ 144 - 7
hadoop-common-project/hadoop-common/CHANGES.txt

@@ -289,6 +289,9 @@ Release 2.3.0 - UNRELEASED
 
   NEW FEATURES
 
+    HADOOP-8545. Filesystem Implementation for OpenStack Swift
+    (Dmitry Mezhensky, David Dobbins, Stevel via stevel)
+
   IMPROVEMENTS
 
     HADOOP-9784. Add a builder for HttpServer. (Junping Du via llu)
@@ -312,9 +315,6 @@ Release 2.3.0 - UNRELEASED
     HADOOP-9435.  Support building the JNI code against the IBM JVM.
     (Tian Hong Wang via Colin Patrick McCabe)
 
-    HADOOP-9758.  Provide configuration option for FileSystem/FileContext
-    symlink resolution.  (Andrew Wang via Colin Patrick McCabe)
-
     HADOOP-9848. Create a MiniKDC for use with security testing. 
     (ywskycn via tucu)
 
@@ -333,18 +333,48 @@ Release 2.3.0 - UNRELEASED
     HADOOP-9915.  o.a.h.fs.Stat support on Mac OS X  (Binglin Chang via Colin
     Patrick McCabe)
 
+    HADOOP-9998.  Provide methods to clear only part of the DNSToSwitchMapping.
+    (Junping Du via Colin Patrick McCabe)
+
+    HADOOP-10006. Compilation failure in trunk for
+    o.a.h.fs.swift.util.JSONUtil (Junping Du via stevel)
+
+    HADOOP-9063. enhance unit-test coverage of class
+    org.apache.hadoop.fs.FileUtil (Ivan A. Veselovsky via jlowe)
+
+    HADOOP-9254. Cover packages org.apache.hadoop.util.bloom,
+    org.apache.hadoop.util.hash (Vadim Bondarev via jlowe)
+
+    HADOOP-9225. Cover package org.apache.hadoop.compress.Snappy (Vadim
+    Bondarev, Andrey Klochkov and Nathan Roberts via jlowe)
+
+    HADOOP-9199. Cover package org.apache.hadoop.io with unit tests (Andrey
+    Klochkov via jeagles)
+
+    HADOOP-9470. eliminate duplicate FQN tests in different Hadoop modules
+    (Ivan A. Veselovsky via daryn)
+
+    HADOOP-9494. Excluded auto-generated and examples code from clover reports
+    (Andrey Klochkov via jeagles)
+
+    HADOOP-9897. Add method to get path start position without drive specifier in
+    o.a.h.fs.Path. (Binglin Chang via cnauroth)
+
+    HADOOP-9078. enhance unit-test coverage of class
+    org.apache.hadoop.fs.FileContext (Ivan A. Veselovsky via jeagles)
+
   OPTIMIZATIONS
 
     HADOOP-9748. Reduce blocking on UGI.ensureInitialized (daryn)
 
   BUG FIXES
 
+    HADOOP-9964. Fix deadlocks in TestHttpServer by synchronize
+    ReflectionUtils.printThreadInfo. (Junping Du via llu)
+
     HADOOP-9582. Non-existent file to "hadoop fs -conf" doesn't throw error
     (Ashwin Shankar via jlowe)
 
-    HADOOP-9761.  ViewFileSystem#rename fails when using DistributedFileSystem.
-    (Andrew Wang via Colin Patrick McCabe)
-
     HADOOP-9817. FileSystem#globStatus and FileContext#globStatus need to work
     with symlinks. (Colin Patrick McCabe via Andrew Wang)
 
@@ -363,12 +393,93 @@ Release 2.3.0 - UNRELEASED
 
     HADOOP-9908. Fix NPE when versioninfo properties file is missing (todd)
 
-Release 2.1.1-beta - UNRELEASED
+    HADOOP-9350. Hadoop not building against Java7 on OSX
+    (Robert Kanter via stevel)
+
+    HADOOP-9929. Insufficient permissions for a path reported as file not found.
+    (Contributed by Colin Patrick McCabe)
+
+    HADOOP-9791. Add a test case covering long paths for new FileUtil access
+    check methods (ivanmi)
+
+    HADOOP-9981. globStatus should minimize its listStatus and getFileStatus
+    calls.  (Contributed by Colin Patrick McCabe)
+
+Release 2.2.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES
 
   NEW FEATURES
 
+  IMPROVEMENTS
+
+    HADOOP-10046. Print a log message when SSL is enabled.
+    (David S. Wang via wang)
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
+    HADOOP-10028. Malformed ssl-server.xml.example. (Haohui Mai via jing9)
+
+    HADOOP-10030. FsShell -put/copyFromLocal should support Windows local path.
+    (Chuan Liu via cnauroth)
+
+    HADOOP-10031. FsShell -get/copyToLocal/moveFromLocal should support Windows
+    local path. (Chuan Liu via cnauroth)
+
+    HADOOP-10039. Add Hive to the list of projects using 
+    AbstractDelegationTokenSecretManager. (Haohui Mai via jing9)
+
+    HADOOP-10040. hadoop.cmd in UNIX format and would not run by default on
+    Windows. (cnauroth)
+
+Release 2.2.0 - 2013-10-13
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+    HADOOP-9948. Add a config value to CLITestHelper to skip tests on Windows.
+    (Chuan Liu via cnauroth)
+
+    HADOOP-9976. Different versions of avro and avro-maven-plugin (Karthik
+    Kambatla via Sandy Ryza)
+
+    HADOOP-9758.  Provide configuration option for FileSystem/FileContext
+    symlink resolution.  (Andrew Wang via Colin Patrick McCabe)
+
+    HADOOP-8315. Support SASL-authenticated ZooKeeper in ActiveStandbyElector
+    (todd)
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
+    HADOOP-9776. HarFileSystem.listStatus() returns invalid authority if port
+    number is empty. (Shanyu Zhao via ivanmi)
+
+    HADOOP-9761.  ViewFileSystem#rename fails when using DistributedFileSystem.
+    (Andrew Wang via Colin Patrick McCabe)
+
+    HADOOP-10003. HarFileSystem.listLocatedStatus() fails.
+    (Jason Dere and suresh via suresh)
+
+    HADOOP-10017. Fix NPE in DFSClient#getDelegationToken when doing Distcp 
+    from a secured cluster to an insecured cluster. (Haohui Mai via jing9)
+
+Release 2.1.1-beta - 2013-09-23
+
+  INCOMPATIBLE CHANGES
+
+    HADOOP-9944. Fix RpcRequestHeaderProto.callId to be sint32 rather than
+    uint32 since ipc.Client.CONNECTION_CONTEXT_CALL_ID is signed (i.e. -3) 
+    (acmurthy)
+
+  NEW FEATURES
+
   IMPROVEMENTS
 
     HADOOP-9910. proxy server start and stop documentation wrong
@@ -405,6 +516,15 @@ Release 2.1.1-beta - UNRELEASED
     HADOOP-9918. Add addIfService to CompositeService (Karthik Kambatla via
     Sandy Ryza)
 
+    HADOOP-9945. HAServiceState should have a state for stopped services.
+    (Karthik Kambatla via atm)
+
+    HADOOP-9962. in order to avoid dependency divergence within Hadoop itself 
+    lets enable DependencyConvergence. (rvs via tucu)
+
+    HADOOP-9669. Reduce the number of byte array creations and copies in
+    XDR data manipulation. (Haohui Mai via brandonli)
+
   OPTIMIZATIONS
 
   BUG FIXES
@@ -454,6 +574,23 @@ Release 2.1.1-beta - UNRELEASED
 
     HADOOP-9932. Improper synchronization in RetryCache. (kihwal)
 
+    HADOOP-9958. Add old constructor back to DelegationTokenInformation to
+    unbreak downstream builds. (Andrew Wang)
+
+    HADOOP-9960. Upgrade Jersey version to 1.9. (Karthik Kambatla via atm)
+
+    HADOOP-9557. hadoop-client excludes commons-httpclient. (Lohit Vijayarenu via
+    cnauroth)
+
+    HADOOP-9961. versions of a few transitive dependencies diverged between hadoop 
+    subprojects. (rvs via tucu)
+
+    HADOOP-9977. Hadoop services won't start with different keypass and
+    keystorepass when https is enabled. (cnauroth)
+
+    HADOOP-10005. No need to check INFO severity level is enabled or not.
+    (Jackie Chang via suresh)
+
 Release 2.1.0-beta - 2013-08-22
 
   INCOMPATIBLE CHANGES

+ 17 - 1
hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml

@@ -348,4 +348,20 @@
        <Method name="waitForServiceToStop" />
        <Bug code="JLM" />
      </Match>
- </FindBugsFilter>
+
+  <!--
+  OpenStack Swift FS module -closes streams in a different method
+  from where they are opened.
+  -->
+    <Match>
+      <Class name="org.apache.hadoop.fs.swift.snative.SwiftNativeOutputStream"/>
+      <Method name="uploadFileAttempt"/>
+      <Bug pattern="OBL_UNSATISFIED_OBLIGATION"/>
+    </Match>
+    <Match>
+      <Class name="org.apache.hadoop.fs.swift.snative.SwiftNativeOutputStream"/>
+      <Method name="uploadFilePartAttempt"/>
+      <Bug pattern="OBL_UNSATISFIED_OBLIGATION"/>
+    </Match>
+
+</FindBugsFilter>

+ 4 - 1
hadoop-common-project/hadoop-common/pom.xml

@@ -250,7 +250,6 @@
     <dependency>
       <groupId>org.apache.commons</groupId>
       <artifactId>commons-compress</artifactId>
-      <version>1.4</version>
     </dependency>
   </dependencies>
 
@@ -465,6 +464,10 @@
             <exclude>src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4hc.c</exclude>
             <exclude>src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4hc_encoder.h</exclude>
             <exclude>src/test/java/org/apache/hadoop/fs/test-untar.tgz</exclude>
+            <exclude>src/test/resources/test.har/_SUCCESS</exclude>
+            <exclude>src/test/resources/test.har/_index</exclude>
+            <exclude>src/test/resources/test.har/_masterindex</exclude>
+            <exclude>src/test/resources/test.har/part-0</exclude>
           </excludes>
         </configuration>
       </plugin>

+ 1 - 0
hadoop-common-project/hadoop-common/src/main/conf/ssl-server.xml.example

@@ -44,6 +44,7 @@
   <value>10000</value>
   <description>Truststore reload check interval, in milliseconds.
   Default value is 10000 (10 seconds).
+  </description>
 </property>
 
 <property>

+ 1760 - 0
hadoop-common-project/hadoop-common/src/main/docs/releasenotes.html

@@ -1,3 +1,1763 @@
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<META http-equiv="Content-Type" content="text/html; charset=UTF-8">
+<title>Hadoop  2.2.0 Release Notes</title>
+<STYLE type="text/css">
+	H1 {font-family: sans-serif}
+	H2 {font-family: sans-serif; margin-left: 7mm}
+	TABLE {margin-left: 7mm}
+</STYLE>
+</head>
+<body>
+<h1>Hadoop  2.2.0 Release Notes</h1>
+These release notes include new developer and user-facing incompatibilities, features, and major improvements. 
+<a name="changes"/>
+<h2>Changes since Hadoop 2.1.1-beta</h2>
+<ul>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1278">YARN-1278</a>.
+     Blocker bug reported by Yesha Vora and fixed by Hitesh Shah <br>
+     <b>New AM does not start after rm restart</b><br>
+     <blockquote>The new AM fails to start after RM restarts. It fails to start new Application master and job fails with below error.
+
+ /usr/bin/mapred job -status job_1380985373054_0001
+13/10/05 15:04:04 INFO client.RMProxy: Connecting to ResourceManager at hostname
+Job: job_1380985373054_0001
+Job File: /user/abc/.staging/job_1380985373054_0001/job.xml
+Job Tracking URL : http://hostname:8088/cluster/app/application_1380985373054_0001
+Uber job : false
+Number of maps: 0
+Number of reduces: 0
+map() completion: 0.0
+reduce() completion: 0.0
+Job state: FAILED
+retired: false
+reason for failure: There are no failed tasks for the job. Job is failed due to some other reason and reason can be found in the logs.
+Counters: 0</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1277">YARN-1277</a>.
+     Major sub-task reported by Suresh Srinivas and fixed by Omkar Vinit Joshi <br>
+     <b>Add http policy support for YARN daemons</b><br>
+     <blockquote>This YARN part of HADOOP-10022.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1274">YARN-1274</a>.
+     Blocker bug reported by Alejandro Abdelnur and fixed by Siddharth Seth (nodemanager)<br>
+     <b>LCE fails to run containers that don't have resources to localize</b><br>
+     <blockquote>LCE container launch assumes the usercache/USER directory exists and it is owned by the user running the container process.
+
+But the directory is created only if there are resources to localize by the LCE localization command, if there are not resourcdes to localize, LCE localization never executes and launching fails reporting 255 exit code and the NM logs have something like:
+
+{code}
+2013-10-04 14:07:56,425 INFO org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor: main : command provided 1
+2013-10-04 14:07:56,425 INFO org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor: main : user is llama
+2013-10-04 14:07:56,425 INFO org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor: Can't create directory llama in /yarn/nm/usercache/llama/appcache/application_1380853306301_0004/container_1380853306301_0004_01_000004 - Permission denied
+{code}
+</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1273">YARN-1273</a>.
+     Major bug reported by Hitesh Shah and fixed by Hitesh Shah <br>
+     <b>Distributed shell does not account for start container failures reported asynchronously.</b><br>
+     <blockquote>2013-10-04 22:09:15,234 ERROR [org.apache.hadoop.yarn.client.api.async.impl.NMClientAsyncImpl #1] distributedshell.ApplicationMaster (ApplicationMaster.java:onStartContainerError(719)) - Failed to start Container container_1380920347574_0018_01_000006</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1271">YARN-1271</a>.
+     Major bug reported by Sandy Ryza and fixed by Sandy Ryza (nodemanager)<br>
+     <b>"Text file busy" errors launching containers again</b><br>
+     <blockquote>The error is shown below in the comments.
+
+MAPREDUCE-2374 fixed this by removing "-c" when running the container launch script.  It looks like the "-c" got brought back during the windows branch merge, so we should remove it again.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1262">YARN-1262</a>.
+     Major bug reported by Sandy Ryza and fixed by Karthik Kambatla <br>
+     <b>TestApplicationCleanup relies on all containers assigned in a single heartbeat</b><br>
+     <blockquote>TestApplicationCleanup submits container requests and waits for allocations to come in.  It only sends a single node heartbeat to the node, expecting multiple containers to be assigned on this heartbeat, which not all schedulers do by default.
+
+This is causing the test to fail when run with the Fair Scheduler.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1260">YARN-1260</a>.
+     Major sub-task reported by Yesha Vora and fixed by Omkar Vinit Joshi <br>
+     <b>RM_HOME link breaks when webapp.https.address related properties are not specified</b><br>
+     <blockquote>This issue happens in multiple node cluster where resource manager and node manager are running on different machines.
+
+Steps to reproduce:
+1) set yarn.resourcemanager.hostname = &lt;resourcemanager host&gt; in yarn-site.xml
+2) set hadoop.ssl.enabled = true in core-site.xml
+3) Do not specify below property in yarn-site.xml
+yarn.nodemanager.webapp.https.address and yarn.resourcemanager.webapp.https.address
+Here, the default value of above two property will be considered.
+4) Go to nodemanager web UI "https://&lt;nodemanager host&gt;:8044/node"
+5) Click on RM_HOME link 
+This link redirects to "https://&lt;nodemanager host&gt;:8090/cluster" instead "https://&lt;resourcemanager host&gt;:8090/cluster"
+</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1256">YARN-1256</a>.
+     Critical sub-task reported by Bikas Saha and fixed by Xuan Gong <br>
+     <b>NM silently ignores non-existent service in StartContainerRequest</b><br>
+     <blockquote>A container can set token service metadata for a service, say shuffle_service. If that service does not exist then the errors is silently ignored. Later, when the next container wants to access data written to shuffle_service by the first task, then it fails because the service does not have the token that was supposed to be set by the first task.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1254">YARN-1254</a>.
+     Major sub-task reported by Vinod Kumar Vavilapalli and fixed by Omkar Vinit Joshi <br>
+     <b>NM is polluting container's credentials</b><br>
+     <blockquote>Before launching the container, NM is using the same credential object and so is polluting what container should see. We should fix this.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1251">YARN-1251</a>.
+     Major bug reported by Junping Du and fixed by Xuan Gong (applications/distributed-shell)<br>
+     <b>TestDistributedShell#TestDSShell failed with timeout</b><br>
+     <blockquote>TestDistributedShell#TestDSShell on trunk Jenkins are failed consistently recently.
+The Stacktrace is:
+{code}
+java.lang.Exception: test timed out after 90000 milliseconds
+	at com.google.protobuf.LiteralByteString.&lt;init&gt;(LiteralByteString.java:234)
+	at com.google.protobuf.ByteString.copyFromUtf8(ByteString.java:255)
+	at org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos$RequestHeaderProto.getMethodNameBytes(ProtobufRpcEngineProtos.java:286)
+	at org.apache.hadoop.ipc.protobuf.ProtobufRpcEngineProtos$RequestHeaderProto.getSerializedSize(ProtobufRpcEngineProtos.java:462)
+	at com.google.protobuf.AbstractMessageLite.writeDelimitedTo(AbstractMessageLite.java:84)
+	at org.apache.hadoop.ipc.ProtobufRpcEngine$RpcMessageWithHeader.write(ProtobufRpcEngine.java:302)
+	at org.apache.hadoop.ipc.Client$Connection.sendRpcRequest(Client.java:989)
+	at org.apache.hadoop.ipc.Client.call(Client.java:1377)
+	at org.apache.hadoop.ipc.Client.call(Client.java:1357)
+	at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:206)
+	at $Proxy70.getApplicationReport(Unknown Source)
+	at org.apache.hadoop.yarn.api.impl.pb.client.ApplicationClientProtocolPBClientImpl.getApplicationReport(ApplicationClientProtocolPBClientImpl.java:137)
+	at sun.reflect.GeneratedMethodAccessor40.invoke(Unknown Source)
+	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
+	at java.lang.reflect.Method.invoke(Method.java:597)
+	at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:185)
+	at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:101)
+	at $Proxy71.getApplicationReport(Unknown Source)
+	at org.apache.hadoop.yarn.client.api.impl.YarnClientImpl.getApplicationReport(YarnClientImpl.java:195)
+	at org.apache.hadoop.yarn.applications.distributedshell.Client.monitorApplication(Client.java:622)
+	at org.apache.hadoop.yarn.applications.distributedshell.Client.run(Client.java:597)
+	at org.apache.hadoop.yarn.applications.distributedshell.TestDistributedShell.testDSShell(TestDistributedShell.java:125)
+{code}
+For details, please refer:
+https://builds.apache.org/job/PreCommit-YARN-Build/2039//testReport/</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1247">YARN-1247</a>.
+     Major bug reported by Roman Shaposhnik and fixed by Roman Shaposhnik (nodemanager)<br>
+     <b>test-container-executor has gotten out of sync with the changes to container-executor</b><br>
+     <blockquote>If run under the super-user account test-container-executor.c fails in multiple different places. It would be nice to fix it so that we have better testing of LCE functionality.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1246">YARN-1246</a>.
+     Minor improvement reported by Arpit Gupta and fixed by Arpit Gupta <br>
+     <b>Log application status in the rm log when app is done running</b><br>
+     <blockquote>Since there is no yarn history server it becomes difficult to determine what the status of an old application is. One has to be familiar with the state transition in yarn to know what means a success.
+
+We should add a log at info level that captures what the finalStatus of an app is. This would be helpful while debugging applications if the RM has restarted and we no longer can use the UI.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1236">YARN-1236</a>.
+     Major bug reported by Sandy Ryza and fixed by Sandy Ryza (resourcemanager)<br>
+     <b>FairScheduler setting queue name in RMApp is not working </b><br>
+     <blockquote>The fair scheduler sometimes picks a different queue than the one an application was submitted to, such as when user-as-default-queue is turned on.  It needs to update the queue name in the RMApp so that this choice will be reflected in the UI.
+
+This isn't working because the scheduler is looking up the RMApp by application attempt id instead of app id and failing to find it.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1229">YARN-1229</a>.
+     Blocker bug reported by Tassapol Athiapinya and fixed by Xuan Gong (nodemanager)<br>
+     <b>Define constraints on Auxiliary Service names. Change ShuffleHandler service name from mapreduce.shuffle to mapreduce_shuffle.</b><br>
+     <blockquote>I run sleep job. If AM fails to start, this exception could occur:
+
+13/09/20 11:00:23 INFO mapreduce.Job: Job job_1379673267098_0020 failed with state FAILED due to: Application application_1379673267098_0020 failed 1 times due to AM Container for appattempt_1379673267098_0020_000001 exited with  exitCode: 1 due to: Exception from container-launch:
+org.apache.hadoop.util.Shell$ExitCodeException: /myappcache/application_1379673267098_0020/container_1379673267098_0020_01_000001/launch_container.sh: line 12: export: `NM_AUX_SERVICE_mapreduce.shuffle=AAA0+gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
+': not a valid identifier
+
+at org.apache.hadoop.util.Shell.runCommand(Shell.java:464)
+at org.apache.hadoop.util.Shell.run(Shell.java:379)
+at org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:589)
+at org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor.launchContainer(DefaultContainerExecutor.java:195)
+at org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:270)
+at org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:78)
+at java.util.concurrent.FutureTask$Sync.innerRun(FutureTask.java:303)
+at java.util.concurrent.FutureTask.run(FutureTask.java:138)
+at java.util.concurrent.ThreadPoolExecutor$Worker.runTask(ThreadPoolExecutor.java:886)
+at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:908)
+at java.lang.Thread.run(Thread.java:662)
+.Failing this attempt.. Failing the application.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1228">YARN-1228</a>.
+     Major improvement reported by Sandy Ryza and fixed by Sandy Ryza (scheduler)<br>
+     <b>Clean up Fair Scheduler configuration loading</b><br>
+     <blockquote>Currently the Fair Scheduler is configured in two ways
+* An allocations file that has a different format than the standard Hadoop configuration file, which makes it easier to specify hierarchical objects like queues and their properties. 
+* With properties like yarn.scheduler.fair.max.assign that are specified in the standard Hadoop configuration format.
+
+The standard and default way of configuring it is to use fair-scheduler.xml as the allocations file and to put the yarn.scheduler properties in yarn-site.xml.
+
+It is also possible to specify a different file as the allocations file, and to place the yarn.scheduler properties in fair-scheduler.xml, which will be interpreted as in the standard Hadoop configuration format.  This flexibility is both confusing and unnecessary.
+
+Additionally, the allocation file is loaded as fair-scheduler.xml from the classpath if it is not specified, but is loaded as a File if it is.  This causes two problems
+1. We see different behavior when not setting the yarn.scheduler.fair.allocation.file, and setting it to fair-scheduler.xml, which is its default.
+2. Classloaders may choose to cache resources, which can break the reload logic when yarn.scheduler.fair.allocation.file is not specified.
+
+We should never allow the yarn.scheduler properties to go into fair-scheduler.xml.  And we should always load the allocations file as a file, not as a resource on the classpath.  To preserve existing behavior and allow loading files from the classpath, we can look for files on the classpath, but strip of their scheme and interpret them as Files.
+</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1221">YARN-1221</a>.
+     Major bug reported by Sandy Ryza and fixed by Siqi Li (resourcemanager , scheduler)<br>
+     <b>With Fair Scheduler, reserved MB reported in RM web UI increases indefinitely</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1219">YARN-1219</a>.
+     Major bug reported by shanyu zhao and fixed by shanyu zhao (nodemanager)<br>
+     <b>FSDownload changes file suffix making FileUtil.unTar() throw exception</b><br>
+     <blockquote>While running a Hive join operation on Yarn, I saw exception as described below. This is caused by FSDownload copy the files into a temp file and change the suffix into ".tmp" before unpacking it. In unpack(), it uses FileUtil.unTar() which will determine if the file is "gzipped" by looking at the file suffix:
+{code}
+boolean gzipped = inFile.toString().endsWith("gz");
+{code}
+
+To fix this problem, we can remove the ".tmp" in the temp file name.
+
+Here is the detailed exception:
+
+org.apache.commons.compress.archivers.tar.TarArchiveInputStream.getNextTarEntry(TarArchiveInputStream.java:240)
+	at org.apache.hadoop.fs.FileUtil.unTarUsingJava(FileUtil.java:676)
+	at org.apache.hadoop.fs.FileUtil.unTar(FileUtil.java:625)
+	at org.apache.hadoop.yarn.util.FSDownload.unpack(FSDownload.java:203)
+	at org.apache.hadoop.yarn.util.FSDownload.call(FSDownload.java:287)
+	at org.apache.hadoop.yarn.util.FSDownload.call(FSDownload.java:50)
+	at java.util.concurrent.FutureTask$Sync.innerRun(FutureTask.java:334)
+	at java.util.concurrent.FutureTask.run(FutureTask.java:166)
+	at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:471)
+	at java.util.concurrent.FutureTask$Sync.innerRun(FutureTask.java:334)
+	at java.util.concurrent.FutureTask.run(FutureTask.java:166)
+	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1110)
+	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:603)
+
+at java.lang.Thread.run(Thread.java:722)</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1215">YARN-1215</a>.
+     Major bug reported by Chuan Liu and fixed by Chuan Liu (api)<br>
+     <b>Yarn URL should include userinfo</b><br>
+     <blockquote>In the {{org.apache.hadoop.yarn.api.records.URL}} class, we don't have an userinfo as part of the URL. When converting a {{java.net.URI}} object into the YARN URL object in {{ConverterUtils.getYarnUrlFromURI()}} method, we will set uri host as the url host. If the uri has a userinfo part, the userinfo is discarded. This will lead to information loss if the original uri has the userinfo, e.g. foo://username:password@example.com will be converted to foo://example.com and username/password information is lost during the conversion.
+
+</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1214">YARN-1214</a>.
+     Critical sub-task reported by Jian He and fixed by Jian He (resourcemanager)<br>
+     <b>Register ClientToken MasterKey in SecretManager after it is saved</b><br>
+     <blockquote>Currently, app attempt ClientToken master key is registered before it is saved. This can cause problem that before the master key is saved, client gets the token and RM also crashes, RM cannot reloads the master key back after it restarts as it is not saved. As a result, client is holding an invalid token.
+
+We can register the client token master key after it is saved in the store.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1213">YARN-1213</a>.
+     Major improvement reported by Sandy Ryza and fixed by Sandy Ryza (scheduler)<br>
+     <b>Restore config to ban submitting to undeclared pools in the Fair Scheduler</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1204">YARN-1204</a>.
+     Major sub-task reported by Yesha Vora and fixed by Omkar Vinit Joshi <br>
+     <b>Need to add https port related property in Yarn</b><br>
+     <blockquote>There is no yarn property available to configure https port for Resource manager, nodemanager and history server. Currently, Yarn services uses the port defined for http [defined by 'mapreduce.jobhistory.webapp.address','yarn.nodemanager.webapp.address', 'yarn.resourcemanager.webapp.address'] for running services on https protocol.
+
+Yarn should have list of property to assign https port for RM, NM and JHS.
+It can be like below.
+yarn.nodemanager.webapp.https.address
+yarn.resourcemanager.webapp.https.address
+mapreduce.jobhistory.webapp.https.address </blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1203">YARN-1203</a>.
+     Major sub-task reported by Yesha Vora and fixed by Omkar Vinit Joshi <br>
+     <b>Application Manager UI does not appear with Https enabled</b><br>
+     <blockquote>Need to add support to disable 'hadoop.ssl.enabled' for MR jobs.
+
+A job should be able to run on http protocol by setting 'hadoop.ssl.enabled' property at job level.
+</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1167">YARN-1167</a>.
+     Major bug reported by Tassapol Athiapinya and fixed by Xuan Gong (applications/distributed-shell)<br>
+     <b>Submitted distributed shell application shows appMasterHost = empty</b><br>
+     <blockquote>Submit distributed shell application. Once the application turns to be RUNNING state, app master host should not be empty. In reality, it is empty.
+
+==console logs==
+distributedshell.Client: Got application report from ASM for, appId=12, clientToAMToken=null, appDiagnostics=, appMasterHost=, appQueue=default, appMasterRpcPort=0, appStartTime=1378505161360, yarnAppState=RUNNING, distributedFinalState=UNDEFINED, 
+</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1157">YARN-1157</a>.
+     Major bug reported by Tassapol Athiapinya and fixed by Xuan Gong (resourcemanager)<br>
+     <b>ResourceManager UI has invalid tracking URL link for distributed shell application</b><br>
+     <blockquote>Submit YARN distributed shell application. Goto ResourceManager Web UI. The application definitely appears. In Tracking UI column, there will be history link. Click on that link. Instead of showing application master web UI, HTTP error 500 would appear.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1149">YARN-1149</a>.
+     Major bug reported by Ramya Sunil and fixed by Xuan Gong <br>
+     <b>NM throws InvalidStateTransitonException: Invalid event: APPLICATION_LOG_HANDLING_FINISHED at RUNNING</b><br>
+     <blockquote>When nodemanager receives a kill signal when an application has finished execution but log aggregation has not kicked in, InvalidStateTransitonException: Invalid event: APPLICATION_LOG_HANDLING_FINISHED at RUNNING is thrown
+
+{noformat}
+2013-08-25 20:45:00,875 INFO  logaggregation.AppLogAggregatorImpl (AppLogAggregatorImpl.java:finishLogAggregation(254)) - Application just finished : application_1377459190746_0118
+2013-08-25 20:45:00,876 INFO  logaggregation.AppLogAggregatorImpl (AppLogAggregatorImpl.java:uploadLogsForContainer(105)) - Starting aggregate log-file for app application_1377459190746_0118 at /app-logs/foo/logs/application_1377459190746_0118/&lt;host&gt;_45454.tmp
+2013-08-25 20:45:00,876 INFO  logaggregation.LogAggregationService (LogAggregationService.java:stopAggregators(151)) - Waiting for aggregation to complete for application_1377459190746_0118
+2013-08-25 20:45:00,891 INFO  logaggregation.AppLogAggregatorImpl (AppLogAggregatorImpl.java:uploadLogsForContainer(122)) - Uploading logs for container container_1377459190746_0118_01_000004. Current good log dirs are /tmp/yarn/local
+2013-08-25 20:45:00,915 INFO  logaggregation.AppLogAggregatorImpl (AppLogAggregatorImpl.java:doAppLogAggregation(182)) - Finished aggregate log-file for app application_1377459190746_0118
+2013-08-25 20:45:00,925 WARN  application.Application (ApplicationImpl.java:handle(427)) - Can't handle this event at current state
+org.apache.hadoop.yarn.state.InvalidStateTransitonException: Invalid event: APPLICATION_LOG_HANDLING_FINISHED at RUNNING
+        at org.apache.hadoop.yarn.state.StateMachineFactory.doTransition(StateMachineFactory.java:305) 
+        at org.apache.hadoop.yarn.state.StateMachineFactory.access$300(StateMachineFactory.java:46)
+        at org.apache.hadoop.yarn.state.StateMachineFactory$InternalStateMachine.doTransition(StateMachineFactory.java:448)
+        at org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationImpl.handle(ApplicationImpl.java:425)
+        at org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationImpl.handle(ApplicationImpl.java:59)
+        at org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl$ApplicationEventDispatcher.handle(ContainerManagerImpl.java:697)
+        at org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl$ApplicationEventDispatcher.handle(ContainerManagerImpl.java:689)
+        at org.apache.hadoop.yarn.event.AsyncDispatcher.dispatch(AsyncDispatcher.java:134)
+        at org.apache.hadoop.yarn.event.AsyncDispatcher$1.run(AsyncDispatcher.java:81)   
+        at java.lang.Thread.run(Thread.java:662)
+2013-08-25 20:45:00,926 INFO  application.Application (ApplicationImpl.java:handle(430)) - Application application_1377459190746_0118 transitioned from RUNNING to null
+2013-08-25 20:45:00,927 WARN  monitor.ContainersMonitorImpl (ContainersMonitorImpl.java:run(463)) - org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.ContainersMonitorImpl is interrupted. Exiting.
+2013-08-25 20:45:00,938 INFO  ipc.Server (Server.java:stop(2437)) - Stopping server on 8040
+{noformat}
+
+</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1141">YARN-1141</a>.
+     Major bug reported by Zhijie Shen and fixed by Zhijie Shen <br>
+     <b>Updating resource requests should be decoupled with updating blacklist</b><br>
+     <blockquote>Currently, in CapacityScheduler and FifoScheduler, blacklist is updated together with resource requests, only when the incoming resource requests are not empty. Therefore, when the incoming resource requests are empty, the blacklist will not be updated even when blacklist additions and removals are not empty.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1131">YARN-1131</a>.
+     Minor sub-task reported by Tassapol Athiapinya and fixed by Siddharth Seth (client)<br>
+     <b>$yarn logs command should return an appropriate error message if YARN application is still running</b><br>
+     <blockquote>In the case when log aggregation is enabled, if a user submits MapReduce job and runs $ yarn logs -applicationId &lt;app ID&gt; while the YARN application is running, the command will return no message and return user back to shell. It is nice to tell the user that log aggregation is in progress.
+
+{code}
+-bash-4.1$ /usr/bin/yarn logs -applicationId application_1377900193583_0002
+-bash-4.1$
+{code}
+
+At the same time, if invalid application ID is given, YARN CLI should say that the application ID is incorrect rather than throwing NoSuchElementException.
+{code}
+$ /usr/bin/yarn logs -applicationId application_00000
+Exception in thread "main" java.util.NoSuchElementException
+at com.google.common.base.AbstractIterator.next(AbstractIterator.java:75)
+at org.apache.hadoop.yarn.util.ConverterUtils.toApplicationId(ConverterUtils.java:124)
+at org.apache.hadoop.yarn.util.ConverterUtils.toApplicationId(ConverterUtils.java:119)
+at org.apache.hadoop.yarn.logaggregation.LogDumper.run(LogDumper.java:110)
+at org.apache.hadoop.yarn.logaggregation.LogDumper.main(LogDumper.java:255)
+
+{code}
+</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1128">YARN-1128</a>.
+     Major bug reported by Sandy Ryza and fixed by Karthik Kambatla (scheduler)<br>
+     <b>FifoPolicy.computeShares throws NPE on empty list of Schedulables</b><br>
+     <blockquote>FifoPolicy gives all of a queue's share to the earliest-scheduled application.
+
+{code}
+    Schedulable earliest = null;
+    for (Schedulable schedulable : schedulables) {
+      if (earliest == null ||
+          schedulable.getStartTime() &lt; earliest.getStartTime()) {
+        earliest = schedulable;
+      }
+    }
+    earliest.setFairShare(Resources.clone(totalResources));
+{code}
+
+If the queue has no schedulables in it, earliest will be left null, leading to an NPE on the last line.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1090">YARN-1090</a>.
+     Major bug reported by Yesha Vora and fixed by Jian He <br>
+     <b>Job does not get into Pending State</b><br>
+     <blockquote>When there is no resource available to run a job, next job should go in pending state. RM UI should show next job as pending app and the counter for the pending app should be incremented.
+
+But Currently. Next job stays in ACCEPTED state and No AM has been assigned to this job.Though Pending App count is not incremented. 
+Running 'job status &lt;nextjob&gt;' shows job state=PREP. 
+
+$ mapred job -status job_1377122233385_0002
+13/08/21 21:59:23 INFO client.RMProxy: Connecting to ResourceManager at host1/ip1
+
+Job: job_1377122233385_0002
+Job File: /ABC/.staging/job_1377122233385_0002/job.xml
+Job Tracking URL : http://host1:port1/application_1377122233385_0002/
+Uber job : false
+Number of maps: 0
+Number of reduces: 0
+map() completion: 0.0
+reduce() completion: 0.0
+Job state: PREP
+retired: false
+reason for failure:</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1070">YARN-1070</a>.
+     Major sub-task reported by Hitesh Shah and fixed by Zhijie Shen (nodemanager)<br>
+     <b>ContainerImpl State Machine: Invalid event: CONTAINER_KILLED_ON_REQUEST at CONTAINER_CLEANEDUP_AFTER_KILL</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1032">YARN-1032</a>.
+     Critical bug reported by Lohit Vijayarenu and fixed by Lohit Vijayarenu <br>
+     <b>NPE in RackResolve</b><br>
+     <blockquote>We found a case where our rack resolve script was not returning rack due to problem with resolving host address. This exception was see in RackResolver.java as NPE, ultimately caught in RMContainerAllocator. 
+
+{noformat}
+2013-08-01 07:11:37,708 ERROR [RMCommunicator Allocator] org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator: ERROR IN CONTACTING RM. 
+java.lang.NullPointerException
+	at org.apache.hadoop.yarn.util.RackResolver.coreResolve(RackResolver.java:99)
+	at org.apache.hadoop.yarn.util.RackResolver.resolve(RackResolver.java:92)
+	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator$ScheduledRequests.assignMapsWithLocality(RMContainerAllocator.java:1039)
+	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator$ScheduledRequests.assignContainers(RMContainerAllocator.java:925)
+	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator$ScheduledRequests.assign(RMContainerAllocator.java:861)
+	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator$ScheduledRequests.access$400(RMContainerAllocator.java:681)
+	at org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator.heartbeat(RMContainerAllocator.java:219)
+	at org.apache.hadoop.mapreduce.v2.app.rm.RMCommunicator$1.run(RMCommunicator.java:243)
+	at java.lang.Thread.run(Thread.java:722)
+
+{noformat}</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-899">YARN-899</a>.
+     Major sub-task reported by Sandy Ryza and fixed by Xuan Gong (scheduler)<br>
+     <b>Get queue administration ACLs working</b><br>
+     <blockquote>The Capacity Scheduler documents the yarn.scheduler.capacity.root.&lt;queue-path&gt;.acl_administer_queue config option for controlling who can administer a queue, but it is not hooked up to anything.  The Fair Scheduler could make use of a similar option as well.  This is a feature-parity regression from MR1.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-890">YARN-890</a>.
+     Major bug reported by Trupti Dhavle and fixed by Xuan Gong (resourcemanager)<br>
+     <b>The roundup for memory values on resource manager UI is misleading</b><br>
+     <blockquote>
+From the yarn-site.xml, I see following values-
+&lt;property&gt;
+&lt;name&gt;yarn.nodemanager.resource.memory-mb&lt;/name&gt;
+&lt;value&gt;4192&lt;/value&gt;
+&lt;/property&gt;
+&lt;property&gt;
+&lt;name&gt;yarn.scheduler.maximum-allocation-mb&lt;/name&gt;
+&lt;value&gt;4192&lt;/value&gt;
+&lt;/property&gt;
+&lt;property&gt;
+&lt;name&gt;yarn.scheduler.minimum-allocation-mb&lt;/name&gt;
+&lt;value&gt;1024&lt;/value&gt;
+&lt;/property&gt;
+
+However the resourcemanager UI shows total memory as 5MB 
+</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-876">YARN-876</a>.
+     Major bug reported by PengZhang and fixed by PengZhang (resourcemanager)<br>
+     <b>Node resource is added twice when node comes back from unhealthy to healthy</b><br>
+     <blockquote>When an unhealthy restarts, its resource maybe added twice in scheduler.
+First time is at node's reconnection, while node's final state is still "UNHEALTHY".
+And second time is at node's update, while node's state changing from "UNHEALTHY" to "HEALTHY".</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-621">YARN-621</a>.
+     Critical sub-task reported by Allen Wittenauer and fixed by Omkar Vinit Joshi (resourcemanager)<br>
+     <b>RM triggers web auth failure before first job</b><br>
+     <blockquote>On a secure YARN setup, before the first job is executed, going to the web interface of the resource manager triggers authentication errors.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-49">YARN-49</a>.
+     Major sub-task reported by Hitesh Shah and fixed by Vinod Kumar Vavilapalli (applications/distributed-shell)<br>
+     <b>Improve distributed shell application to work on a secure cluster</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5562">MAPREDUCE-5562</a>.
+     Major sub-task reported by Zhijie Shen and fixed by Zhijie Shen <br>
+     <b>MR AM should exit when unregister() throws exception</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5554">MAPREDUCE-5554</a>.
+     Minor bug reported by Robert Kanter and fixed by Robert Kanter (test)<br>
+     <b>hdfs-site.xml included in hadoop-mapreduce-client-jobclient tests jar is breaking tests for downstream components</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5551">MAPREDUCE-5551</a>.
+     Blocker sub-task reported by Zhijie Shen and fixed by Zhijie Shen <br>
+     <b>Binary Incompatibility of O.A.H.U.mapred.SequenceFileAsBinaryOutputFormat.WritableValueBytes</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5545">MAPREDUCE-5545</a>.
+     Major bug reported by Robert Kanter and fixed by Robert Kanter <br>
+     <b>org.apache.hadoop.mapred.TestTaskAttemptListenerImpl.testCommitWindow times out</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5544">MAPREDUCE-5544</a>.
+     Major bug reported by Sandy Ryza and fixed by Sandy Ryza <br>
+     <b>JobClient#getJob loads job conf twice</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5538">MAPREDUCE-5538</a>.
+     Blocker sub-task reported by Zhijie Shen and fixed by Zhijie Shen <br>
+     <b>MRAppMaster#shutDownJob shouldn't send job end notification before checking isLastRetry</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5536">MAPREDUCE-5536</a>.
+     Blocker bug reported by Yesha Vora and fixed by Omkar Vinit Joshi <br>
+     <b>mapreduce.jobhistory.webapp.https.address property is not respected</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5533">MAPREDUCE-5533</a>.
+     Major bug reported by Tassapol Athiapinya and fixed by Xuan Gong (applicationmaster)<br>
+     <b>Speculative execution does not function for reduce</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5531">MAPREDUCE-5531</a>.
+     Blocker sub-task reported by Robert Kanter and fixed by Robert Kanter (mrv1 , mrv2)<br>
+     <b>Binary and source incompatibility in mapreduce.TaskID and mapreduce.TaskAttemptID between branch-1 and branch-2</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5530">MAPREDUCE-5530</a>.
+     Blocker sub-task reported by Robert Kanter and fixed by Robert Kanter (mrv1 , mrv2)<br>
+     <b>Binary and source incompatibility in mapred.lib.CombineFileInputFormat between branch-1 and branch-2</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5529">MAPREDUCE-5529</a>.
+     Blocker sub-task reported by Robert Kanter and fixed by Robert Kanter (mrv1 , mrv2)<br>
+     <b>Binary incompatibilities in mapred.lib.TotalOrderPartitioner between branch-1 and branch-2</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5525">MAPREDUCE-5525</a>.
+     Minor test reported by Chuan Liu and fixed by Chuan Liu (mrv2 , test)<br>
+     <b>Increase timeout of TestDFSIO.testAppend and TestMRJobsWithHistoryService.testJobHistoryData</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5523">MAPREDUCE-5523</a>.
+     Major bug reported by Omkar Vinit Joshi and fixed by Omkar Vinit Joshi <br>
+     <b>Need to add https port related property in Job history server</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5515">MAPREDUCE-5515</a>.
+     Major bug reported by Omkar Vinit Joshi and fixed by Omkar Vinit Joshi <br>
+     <b>Application Manager UI does not appear with Https enabled</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5513">MAPREDUCE-5513</a>.
+     Major bug reported by Jason Lowe and fixed by Robert Parker <br>
+     <b>ConcurrentModificationException in JobControl</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5505">MAPREDUCE-5505</a>.
+     Critical sub-task reported by Jian He and fixed by Zhijie Shen <br>
+     <b>Clients should be notified job finished only after job successfully unregistered </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5503">MAPREDUCE-5503</a>.
+     Blocker bug reported by Jason Lowe and fixed by Jian He (mrv2)<br>
+     <b>TestMRJobClient.testJobClient is failing</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5489">MAPREDUCE-5489</a>.
+     Critical bug reported by Yesha Vora and fixed by Zhijie Shen <br>
+     <b>MR jobs hangs as it does not use the node-blacklisting feature in RM requests</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5488">MAPREDUCE-5488</a>.
+     Major bug reported by Arpit Gupta and fixed by Jian He <br>
+     <b>Job recovery fails after killing all the running containers for the app</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5459">MAPREDUCE-5459</a>.
+     Major bug reported by Zhijie Shen and fixed by Zhijie Shen <br>
+     <b>Update the doc of running MRv1 examples jar on YARN</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5442">MAPREDUCE-5442</a>.
+     Major bug reported by Yingda Chen and fixed by Yingda Chen (client)<br>
+     <b>$HADOOP_MAPRED_HOME/$HADOOP_CONF_DIR setting not working on Windows</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5170">MAPREDUCE-5170</a>.
+     Trivial bug reported by Sangjin Lee and fixed by Sangjin Lee (mrv2)<br>
+     <b>incorrect exception message if min node size &gt; min rack size</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5308">HDFS-5308</a>.
+     Major improvement reported by Haohui Mai and fixed by Haohui Mai <br>
+     <b>Replace HttpConfig#getSchemePrefix with implicit schemes in HDFS JSP </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5306">HDFS-5306</a>.
+     Major sub-task reported by Suresh Srinivas and fixed by Suresh Srinivas (datanode , namenode)<br>
+     <b>Datanode https port is not available at the namenode</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5300">HDFS-5300</a>.
+     Major bug reported by Vinay and fixed by Vinay (namenode)<br>
+     <b>FSNameSystem#deleteSnapshot() should not check owner in case of permissions disabled</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5299">HDFS-5299</a>.
+     Blocker bug reported by Vinay and fixed by Vinay (namenode)<br>
+     <b>DFS client hangs in updatePipeline RPC when failover happened</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5289">HDFS-5289</a>.
+     Major bug reported by Aaron T. Myers and fixed by Aaron T. Myers (test)<br>
+     <b>Race condition in TestRetryCacheWithHA#testCreateSymlink causes spurious test failure</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5279">HDFS-5279</a>.
+     Major bug reported by Chris Nauroth and fixed by Chris Nauroth (namenode)<br>
+     <b>Guard against NullPointerException in NameNode JSP pages before initialization of FSNamesystem.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5268">HDFS-5268</a>.
+     Major bug reported by Brandon Li and fixed by Brandon Li (nfs)<br>
+     <b>NFS write commit verifier is not set in a few places</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5265">HDFS-5265</a>.
+     Major bug reported by Haohui Mai and fixed by Haohui Mai <br>
+     <b>Namenode fails to start when dfs.https.port is unspecified</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5259">HDFS-5259</a>.
+     Major sub-task reported by Yesha Vora and fixed by Brandon Li (nfs)<br>
+     <b>Support client which combines appended data with old data before sends it to NFS server</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5258">HDFS-5258</a>.
+     Minor bug reported by Chris Nauroth and fixed by Chuan Liu (test)<br>
+     <b>Skip tests in TestHDFSCLI that are not applicable on Windows.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5256">HDFS-5256</a>.
+     Major improvement reported by Haohui Mai and fixed by Haohui Mai (nfs)<br>
+     <b>Use guava LoadingCache to implement DFSClientCache</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5255">HDFS-5255</a>.
+     Major bug reported by Yesha Vora and fixed by Arpit Agarwal <br>
+     <b>Distcp job fails with hsftp when https is enabled in insecure cluster</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5251">HDFS-5251</a>.
+     Major bug reported by Haohui Mai and fixed by Haohui Mai <br>
+     <b>Race between the initialization of NameNode and the http server</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5246">HDFS-5246</a>.
+     Major sub-task reported by Jinghui Wang and fixed by Jinghui Wang (nfs)<br>
+     <b>Make Hadoop nfs server port and mount daemon port configurable</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5230">HDFS-5230</a>.
+     Major sub-task reported by Haohui Mai and fixed by Haohui Mai (nfs)<br>
+     <b>Introduce RpcInfo to decouple XDR classes from the RPC API</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5228">HDFS-5228</a>.
+     Blocker bug reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (hdfs-client)<br>
+     <b>The RemoteIterator returned by DistributedFileSystem.listFiles(..) may throw NPE</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5186">HDFS-5186</a>.
+     Minor test reported by Chuan Liu and fixed by Chuan Liu (namenode , test)<br>
+     <b>TestFileJournalManager fails on Windows due to file handle leaks</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5139">HDFS-5139</a>.
+     Major improvement reported by Arpit Agarwal and fixed by Arpit Agarwal (tools)<br>
+     <b>Remove redundant -R option from setrep</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5031">HDFS-5031</a>.
+     Blocker bug reported by Vinay and fixed by Vinay (datanode)<br>
+     <b>BlockScanner scans the block multiple times and on restart scans everything</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-4817">HDFS-4817</a>.
+     Minor improvement reported by Colin Patrick McCabe and fixed by Colin Patrick McCabe (hdfs-client)<br>
+     <b>make HDFS advisory caching configurable on a per-file basis</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10020">HADOOP-10020</a>.
+     Blocker sub-task reported by Colin Patrick McCabe and fixed by Sanjay Radia (fs)<br>
+     <b>disable symlinks temporarily</b><br>
+     <blockquote>During review of symbolic links, many issues were found related impact on semantics of existing APIs such FileSystem#listStatus, FileSystem#globStatus etc. There were also many issues brought up about symbolic links and the impact on security and functionality of HDFS. All these issues will be address in the upcoming release 2.3. Until then the feature is temporarily disabled.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10017">HADOOP-10017</a>.
+     Major sub-task reported by Jing Zhao and fixed by Haohui Mai <br>
+     <b>Fix NPE in DFSClient#getDelegationToken when doing Distcp from a secured cluster to an insecured cluster</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10012">HADOOP-10012</a>.
+     Blocker bug reported by Arpit Gupta and fixed by Suresh Srinivas (ha)<br>
+     <b>Secure Oozie jobs fail with delegation token renewal exception in Namenode HA setup</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10003">HADOOP-10003</a>.
+     Major bug reported by Jason Dere and fixed by  (fs)<br>
+     <b>HarFileSystem.listLocatedStatus() fails</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9976">HADOOP-9976</a>.
+     Major bug reported by Karthik Kambatla and fixed by Karthik Kambatla <br>
+     <b>Different versions of avro and avro-maven-plugin</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9948">HADOOP-9948</a>.
+     Minor test reported by Chuan Liu and fixed by Chuan Liu (test)<br>
+     <b>Add a config value to CLITestHelper to skip tests on Windows</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9776">HADOOP-9776</a>.
+     Major bug reported by shanyu zhao and fixed by shanyu zhao (fs)<br>
+     <b>HarFileSystem.listStatus() returns invalid authority if port number is empty</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9761">HADOOP-9761</a>.
+     Blocker bug reported by Andrew Wang and fixed by Andrew Wang (viewfs)<br>
+     <b>ViewFileSystem#rename fails when using DistributedFileSystem</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9758">HADOOP-9758</a>.
+     Major improvement reported by Andrew Wang and fixed by Andrew Wang <br>
+     <b>Provide configuration option for FileSystem/FileContext symlink resolution</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8315">HADOOP-8315</a>.
+     Major improvement reported by Todd Lipcon and fixed by Todd Lipcon (auto-failover , ha)<br>
+     <b>Support SASL-authenticated ZooKeeper in ActiveStandbyElector</b><br>
+     <blockquote></blockquote></li>
+</ul>
+</body></html>
+<META http-equiv="Content-Type" content="text/html; charset=UTF-8">
+<title>Hadoop  2.1.1-beta Release Notes</title>
+<STYLE type="text/css">
+	H1 {font-family: sans-serif}
+	H2 {font-family: sans-serif; margin-left: 7mm}
+	TABLE {margin-left: 7mm}
+</STYLE>
+</head>
+<body>
+<h1>Hadoop  2.1.1-beta Release Notes</h1>
+These release notes include new developer and user-facing incompatibilities, features, and major improvements. 
+<a name="changes"/>
+<h2>Changes since Hadoop 2.1.0-beta</h2>
+<ul>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1194">YARN-1194</a>.
+     Minor bug reported by Roman Shaposhnik and fixed by Roman Shaposhnik (nodemanager)<br>
+     <b>TestContainerLogsPage fails with native builds</b><br>
+     <blockquote>Running TestContainerLogsPage on trunk while Native IO is enabled makes it fail</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1189">YARN-1189</a>.
+     Blocker bug reported by Jason Lowe and fixed by Omkar Vinit Joshi <br>
+     <b>NMTokenSecretManagerInNM is not being told when applications have finished </b><br>
+     <blockquote>The {{appFinished}} method is not being called when applications have finished.  This causes a couple of leaks as {{oldMasterKeys}} and {{appToAppAttemptMap}} are never being pruned.
+</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1184">YARN-1184</a>.
+     Major bug reported by J.Andreina and fixed by Chris Douglas (capacityscheduler , resourcemanager)<br>
+     <b>ClassCastException is thrown during preemption When a huge job is submitted to a queue B whose resources is used by a job in queueA</b><br>
+     <blockquote>preemption is enabled.
+Queue = a,b
+a capacity = 30%
+b capacity = 70%
+
+Step 1: Assign a big job to queue a ( so that job_a will utilize some resources from queue b)
+Step 2: Assigne a big job to queue b.
+
+Following exception is thrown at Resource Manager
+{noformat}
+2013-09-12 10:42:32,535 ERROR [SchedulingMonitor (ProportionalCapacityPreemptionPolicy)] yarn.YarnUncaughtExceptionHandler (YarnUncaughtExceptionHandler.java:uncaughtException(68)) - Thread Thread[SchedulingMonitor (ProportionalCapacityPreemptionPolicy),5,main] threw an Exception.
+java.lang.ClassCastException: java.util.Collections$UnmodifiableSet cannot be cast to java.util.NavigableSet
+	at org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.ProportionalCapacityPreemptionPolicy.getContainersToPreempt(ProportionalCapacityPreemptionPolicy.java:403)
+	at org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.ProportionalCapacityPreemptionPolicy.containerBasedPreemptOrKill(ProportionalCapacityPreemptionPolicy.java:202)
+	at org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.ProportionalCapacityPreemptionPolicy.editSchedule(ProportionalCapacityPreemptionPolicy.java:173)
+	at org.apache.hadoop.yarn.server.resourcemanager.monitor.SchedulingMonitor.invokePolicy(SchedulingMonitor.java:72)
+	at org.apache.hadoop.yarn.server.resourcemanager.monitor.SchedulingMonitor$PreemptionChecker.run(SchedulingMonitor.java:82)
+	at java.lang.Thread.run(Thread.java:662)
+
+{noformat}</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1176">YARN-1176</a>.
+     Critical bug reported by Thomas Graves and fixed by Jonathan Eagles (resourcemanager)<br>
+     <b>RM web services ClusterMetricsInfo total nodes doesn't include unhealthy nodes</b><br>
+     <blockquote>In the web services api for the cluster/metrics, the totalNodes reported doesn't include the unhealthy nodes.
+
+this.totalNodes = activeNodes + lostNodes + decommissionedNodes
+	        + rebootedNodes;</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1170">YARN-1170</a>.
+     Blocker bug reported by Arun C Murthy and fixed by Binglin Chang <br>
+     <b>yarn proto definitions should specify package as 'hadoop.yarn'</b><br>
+     <blockquote>yarn proto definitions should specify package as 'hadoop.yarn' similar to protos with 'hadoop.common' &amp; 'hadoop.hdfs' in Common &amp; HDFS respectively.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1152">YARN-1152</a>.
+     Blocker bug reported by Jason Lowe and fixed by Jason Lowe (resourcemanager)<br>
+     <b>Invalid key to HMAC computation error when getting application report for completed app attempt</b><br>
+     <blockquote>On a secure cluster, an invalid key to HMAC error is thrown when trying to get an application report for an application with an attempt that has unregistered.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1144">YARN-1144</a>.
+     Critical bug reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (resourcemanager)<br>
+     <b>Unmanaged AMs registering a tracking URI should not be proxy-fied</b><br>
+     <blockquote>Unmanaged AMs do not run in the cluster, their tracking URL should not be proxy-fied.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1137">YARN-1137</a>.
+     Major improvement reported by Alejandro Abdelnur and fixed by Roman Shaposhnik (nodemanager)<br>
+     <b>Add support whitelist for system users to Yarn container-executor.c</b><br>
+     <blockquote>Currently container-executor.c has a banned set of users (mapred, hdfs &amp; bin) and configurable min.user.id (defaulting to 1000).
+
+This presents a problem for systems that run as system users (below 1000) if these systems want to start containers.
+
+Systems like Impala fit in this category. A (local) 'impala' system user is created when installing Impala on the nodes. 
+
+Note that the same thing happens when installing system like HDFS, Yarn, Oozie, from packages (Bigtop); local system users are created.
+
+For Impala to be able to run containers in a secure cluster, the 'impala' system user must whitelisted. 
+
+For this, adding a configuration 'allowed.system.users' option in the container-executor.cfg and the logic in container-executor.c would allow the usernames in that list.
+
+Because system users are not guaranteed to have the same UID in different machines, the 'allowed.system.users' property should use usernames and not UIDs.
+</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1124">YARN-1124</a>.
+     Blocker bug reported by Omkar Vinit Joshi and fixed by Xuan Gong <br>
+     <b>By default yarn application -list should display all the applications in a state other than FINISHED / FAILED</b><br>
+     <blockquote>Today we are just listing application in RUNNING state by default for "yarn application -list". Instead we should show all the applications which are either submitted/accepted/running.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1120">YARN-1120</a>.
+     Minor bug reported by Chuan Liu and fixed by Chuan Liu <br>
+     <b>Make ApplicationConstants.Environment.USER definition OS neutral</b><br>
+     <blockquote>In YARN-557, we added some code to make {{ApplicationConstants.Environment.USER}} has OS-specific definition in order to fix the unit test TestUnmanagedAMLauncher. In YARN-571, the relevant test code was corrected. In YARN-602, we actually will explicitly set the environment variables for the child containers. With these changes, I think we can revert the YARN-557 change to make {{ApplicationConstants.Environment.USER}} OS neutral. The main benefit is that we can use the same method over the Enum constants. This should also fix the TestContainerLaunch#testContainerEnvVariables failure on Windows. </blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1117">YARN-1117</a>.
+     Major improvement reported by Tassapol Athiapinya and fixed by Xuan Gong (client)<br>
+     <b>Improve help message for $ yarn applications and $yarn node</b><br>
+     <blockquote>There is standardization of help message in YARN-1080. It is nice to have similar changes for $ yarn appications and yarn node</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1116">YARN-1116</a>.
+     Major sub-task reported by Jian He and fixed by Jian He (resourcemanager)<br>
+     <b>Populate AMRMTokens back to AMRMTokenSecretManager after RM restarts</b><br>
+     <blockquote>The AMRMTokens are now only saved in RMStateStore and not populated back to AMRMTokenSecretManager after RM restarts. This is more needed now since AMRMToken also becomes used in non-secure env.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1107">YARN-1107</a>.
+     Blocker bug reported by Arpit Gupta and fixed by Omkar Vinit Joshi (resourcemanager)<br>
+     <b>Job submitted with Delegation token in secured environment causes RM to fail during RM restart</b><br>
+     <blockquote>If secure RM with recovery enabled is restarted while oozie jobs are running rm fails to come up.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1101">YARN-1101</a>.
+     Major bug reported by Robert Parker and fixed by Robert Parker (resourcemanager)<br>
+     <b>Active nodes can be decremented below 0</b><br>
+     <blockquote>The issue is in RMNodeImpl where both RUNNING and UNHEALTHY states that transition to a deactive state (LOST, DECOMMISSIONED, REBOOTED) use the same DeactivateNodeTransition class.  The DeactivateNodeTransition class naturally decrements the active node, however the in cases where the node has transition to UNHEALTHY the active count has already been decremented.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1094">YARN-1094</a>.
+     Blocker bug reported by Yesha Vora and fixed by Vinod Kumar Vavilapalli <br>
+     <b>RM restart throws Null pointer Exception in Secure Env</b><br>
+     <blockquote>Enable rmrestart feature And restart Resorce Manager while a job is running.
+
+Resorce Manager fails to start with below error
+
+2013-08-23 17:57:40,705 INFO  resourcemanager.RMAppManager (RMAppManager.java:recover(370)) - Recovering application application_1377280618693_0001
+2013-08-23 17:57:40,763 ERROR resourcemanager.ResourceManager (ResourceManager.java:serviceStart(617)) - Failed to load/recover state
+java.lang.NullPointerException
+        at org.apache.hadoop.yarn.server.resourcemanager.security.DelegationTokenRenewer.setTimerForTokenRenewal(DelegationTokenRenewer.java:371)
+        at org.apache.hadoop.yarn.server.resourcemanager.security.DelegationTokenRenewer.addApplication(DelegationTokenRenewer.java:307)
+        at org.apache.hadoop.yarn.server.resourcemanager.RMAppManager.submitApplication(RMAppManager.java:291)
+        at org.apache.hadoop.yarn.server.resourcemanager.RMAppManager.recover(RMAppManager.java:371)
+        at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.recover(ResourceManager.java:819)
+        at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceStart(ResourceManager.java:613)
+        at org.apache.hadoop.service.AbstractService.start(AbstractService.java:193)
+        at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.main(ResourceManager.java:832)
+2013-08-23 17:57:40,766 INFO  util.ExitUtil (ExitUtil.java:terminate(124)) - Exiting with status 1
+                                                                                                    
+
+</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1093">YARN-1093</a>.
+     Major bug reported by Wing Yew Poon and fixed by  (documentation)<br>
+     <b>Corrections to Fair Scheduler documentation</b><br>
+     <blockquote>The fair scheduler is still evolving, but the current documentation contains some inaccuracies.
+
+
+</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1085">YARN-1085</a>.
+     Blocker task reported by Jaimin D Jetly and fixed by Omkar Vinit Joshi (nodemanager , resourcemanager)<br>
+     <b>Yarn and MRv2 should do HTTP client authentication in kerberos setup.</b><br>
+     <blockquote>In kerberos setup it's expected for a http client to authenticate to kerberos before allowing user to browse any information.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1083">YARN-1083</a>.
+     Major bug reported by Yesha Vora and fixed by Zhijie Shen (resourcemanager)<br>
+     <b>ResourceManager should fail when yarn.nm.liveness-monitor.expiry-interval-ms is set less than heartbeat interval</b><br>
+     <blockquote>if 'yarn.nm.liveness-monitor.expiry-interval-ms' is set to less than heartbeat iterval, all the node managers will be added in 'Lost Nodes'
+
+Instead, Resource Manager should validate these property and It should fail to start if combination of such property is invalid.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1082">YARN-1082</a>.
+     Blocker bug reported by Arpit Gupta and fixed by Vinod Kumar Vavilapalli (resourcemanager)<br>
+     <b>Secure RM with recovery enabled and rm state store on hdfs fails with gss exception</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1081">YARN-1081</a>.
+     Minor improvement reported by Tassapol Athiapinya and fixed by Akira AJISAKA (client)<br>
+     <b>Minor improvement to output header for $ yarn node -list</b><br>
+     <blockquote>Output of $ yarn node -list shows number of running containers at each node. I found a case when new user of YARN thinks that this is container ID, use it later in other YARN commands and find an error due to misunderstanding.
+
+{code:title=current output}
+2013-07-31 04:00:37,814|beaver.machine|INFO|RUNNING: /usr/bin/yarn node -list
+2013-07-31 04:00:38,746|beaver.machine|INFO|Total Nodes:1
+2013-07-31 04:00:38,747|beaver.machine|INFO|Node-Id	Node-State	Node-Http-Address	Running-Containers
+2013-07-31 04:00:38,747|beaver.machine|INFO|myhost:45454	   RUNNING	myhost:50060	   2
+{code}
+
+{code:title=proposed output}
+2013-07-31 04:00:37,814|beaver.machine|INFO|RUNNING: /usr/bin/yarn node -list
+2013-07-31 04:00:38,746|beaver.machine|INFO|Total Nodes:1
+2013-07-31 04:00:38,747|beaver.machine|INFO|Node-Id	Node-State	Node-Http-Address	Number-of-Running-Containers
+2013-07-31 04:00:38,747|beaver.machine|INFO|myhost:45454	   RUNNING	myhost:50060	   2
+{code}</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1080">YARN-1080</a>.
+     Major improvement reported by Tassapol Athiapinya and fixed by Xuan Gong (client)<br>
+     <b>Improve help message for $ yarn logs</b><br>
+     <blockquote>There are 2 parts I am proposing in this jira. They can be fixed together in one patch.
+
+1. Standardize help message for required parameter of $ yarn logs
+YARN CLI has a command "logs" ($ yarn logs). The command always requires a parameter of "-applicationId &lt;arg&gt;". However, help message of the command does not make it clear. It lists -applicationId as optional parameter. If I don't set it, YARN CLI will complain this is missing. It is better to use standard required notation used in other Linux command for help message. Any user familiar to the command can understand that this parameter is needed more easily.
+
+{code:title=current help message}
+-bash-4.1$ yarn logs
+usage: general options are:
+ -applicationId &lt;arg&gt;   ApplicationId (required)
+ -appOwner &lt;arg&gt;        AppOwner (assumed to be current user if not
+                        specified)
+ -containerId &lt;arg&gt;     ContainerId (must be specified if node address is
+                        specified)
+ -nodeAddress &lt;arg&gt;     NodeAddress in the format nodename:port (must be
+                        specified if container id is specified)
+{code}
+
+{code:title=proposed help message}
+-bash-4.1$ yarn logs
+usage: yarn logs -applicationId &lt;application ID&gt; [OPTIONS]
+general options are:
+ -appOwner &lt;arg&gt;        AppOwner (assumed to be current user if not
+                        specified)
+ -containerId &lt;arg&gt;     ContainerId (must be specified if node address is
+                        specified)
+ -nodeAddress &lt;arg&gt;     NodeAddress in the format nodename:port (must be
+                        specified if container id is specified)
+{code}
+
+2. Add description for help command. As far as I know, a user cannot get logs for running job. Since I spent some time trying to get logs of running applications, it should be nice to say this in command description.
+{code:title=proposed help}
+Retrieve logs for completed/killed YARN application
+usage: general options are...
+{code}
+</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1078">YARN-1078</a>.
+     Minor bug reported by Chuan Liu and fixed by Chuan Liu <br>
+     <b>TestNodeManagerResync, TestNodeManagerShutdown, and TestNodeStatusUpdater fail on Windows</b><br>
+     <blockquote>The three unit tests fail on Windows due to host name resolution differences on Windows, i.e. 127.0.0.1 does not resolve to host name "localhost".
+
+{noformat}
+org.apache.hadoop.security.token.SecretManager$InvalidToken: Given Container container_0_0000_01_000000 identifier is not valid for current Node manager. Expected : 127.0.0.1:12345 Found : localhost:12345
+{noformat}
+
+{noformat}
+testNMConnectionToRM(org.apache.hadoop.yarn.server.nodemanager.TestNodeStatusUpdater)  Time elapsed: 8343 sec  &lt;&lt;&lt; FAILURE!
+org.junit.ComparisonFailure: expected:&lt;[localhost]:12345&gt; but was:&lt;[127.0.0.1]:12345&gt;
+	at org.junit.Assert.assertEquals(Assert.java:125)
+	at org.junit.Assert.assertEquals(Assert.java:147)
+	at org.apache.hadoop.yarn.server.nodemanager.TestNodeStatusUpdater$MyResourceTracker6.registerNodeManager(TestNodeStatusUpdater.java:712)
+	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
+	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
+	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
+	at java.lang.reflect.Method.invoke(Method.java:597)
+	at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:187)
+	at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:101)
+	at $Proxy26.registerNodeManager(Unknown Source)
+	at org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl.registerWithRM(NodeStatusUpdaterImpl.java:212)
+	at org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl.serviceStart(NodeStatusUpdaterImpl.java:149)
+	at org.apache.hadoop.yarn.server.nodemanager.TestNodeStatusUpdater$MyNodeStatusUpdater4.serviceStart(TestNodeStatusUpdater.java:369)
+	at org.apache.hadoop.service.AbstractService.start(AbstractService.java:193)
+	at org.apache.hadoop.service.CompositeService.serviceStart(CompositeService.java:101)
+	at org.apache.hadoop.yarn.server.nodemanager.NodeManager.serviceStart(NodeManager.java:213)
+	at org.apache.hadoop.service.AbstractService.start(AbstractService.java:193)
+	at org.apache.hadoop.yarn.server.nodemanager.TestNodeStatusUpdater.testNMConnectionToRM(TestNodeStatusUpdater.java:985)
+{noformat}</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1077">YARN-1077</a>.
+     Minor bug reported by Chuan Liu and fixed by Chuan Liu <br>
+     <b>TestContainerLaunch fails on Windows</b><br>
+     <blockquote>Several cases in this unit tests fail on Windows. (Append error log at the end.)
+
+testInvalidEnvSyntaxDiagnostics fails because the difference between cmd and bash script error handling. If some command fails in the cmd script, cmd will continue execute the the rest of the script command. Error handling needs to be explicitly carried out in the script file. The error code of the last command will be returned as the error code of the whole script. In this test, some error happened in the middle of the cmd script, the test expect an exception and non-zero error code. In the cmd script, the intermediate errors are ignored. The last command "call" succeeded and there is no exception.
+
+testContainerLaunchStdoutAndStderrDiagnostics fails due to wrong cmd commands used by the test.
+
+testContainerEnvVariables and testDelayedKill fail due to a regression from YARN-906.
+
+{noformat}
+-------------------------------------------------------------------------------
+Test set: org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.TestContainerLaunch
+-------------------------------------------------------------------------------
+Tests run: 7, Failures: 4, Errors: 0, Skipped: 0, Time elapsed: 11.526 sec &lt;&lt;&lt; FAILURE!
+testInvalidEnvSyntaxDiagnostics(org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.TestContainerLaunch)  Time elapsed: 583 sec  &lt;&lt;&lt; FAILURE!
+junit.framework.AssertionFailedError: Should catch exception
+	at junit.framework.Assert.fail(Assert.java:50)
+	at org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.TestContainerLaunch.testInvalidEnvSyntaxDiagnostics(TestContainerLaunch.java:269)
+...
+
+testContainerLaunchStdoutAndStderrDiagnostics(org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.TestContainerLaunch)  Time elapsed: 561 sec  &lt;&lt;&lt; FAILURE!
+junit.framework.AssertionFailedError: Should catch exception
+	at junit.framework.Assert.fail(Assert.java:50)
+	at org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.TestContainerLaunch.testContainerLaunchStdoutAndStderrDiagnostics(TestContainerLaunch.java:314)
+...
+
+testContainerEnvVariables(org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.TestContainerLaunch)  Time elapsed: 4136 sec  &lt;&lt;&lt; FAILURE!
+junit.framework.AssertionFailedError: expected:&lt;137&gt; but was:&lt;143&gt;
+	at junit.framework.Assert.fail(Assert.java:50)
+	at junit.framework.Assert.failNotEquals(Assert.java:287)
+	at junit.framework.Assert.assertEquals(Assert.java:67)
+	at junit.framework.Assert.assertEquals(Assert.java:199)
+	at junit.framework.Assert.assertEquals(Assert.java:205)
+	at org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.TestContainerLaunch.testContainerEnvVariables(TestContainerLaunch.java:500)
+...
+
+testDelayedKill(org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.TestContainerLaunch)  Time elapsed: 2744 sec  &lt;&lt;&lt; FAILURE!
+junit.framework.AssertionFailedError: expected:&lt;137&gt; but was:&lt;143&gt;
+	at junit.framework.Assert.fail(Assert.java:50)
+	at junit.framework.Assert.failNotEquals(Assert.java:287)
+	at junit.framework.Assert.assertEquals(Assert.java:67)
+	at junit.framework.Assert.assertEquals(Assert.java:199)
+	at junit.framework.Assert.assertEquals(Assert.java:205)
+	at org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.TestContainerLaunch.testDelayedKill(TestContainerLaunch.java:601)
+...
+{noformat}
+</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1074">YARN-1074</a>.
+     Major improvement reported by Tassapol Athiapinya and fixed by Xuan Gong (client)<br>
+     <b>Clean up YARN CLI app list to show only running apps.</b><br>
+     <blockquote>Once a user brings up YARN daemon, runs jobs, jobs will stay in output returned by $ yarn application -list even after jobs complete already. We want YARN command line to clean up this list. Specifically, we want to remove applications with FINISHED state(not Final-State) or KILLED state from the result.
+
+{code}
+[user1@host1 ~]$ yarn application -list
+Total Applications:150
+                Application-Id	    Application-Name	    Application-Type	      User	     Queue	             State       Final-State	       Progress	                       Tracking-URL
+application_1374638600275_0109	           Sleep job	           MAPREDUCE	    user1	   default	            KILLED            KILLED	           100%	   host1:54059
+application_1374638600275_0121	           Sleep job	           MAPREDUCE	    user1	   default	          FINISHED         SUCCEEDED	           100%	host1:19888/jobhistory/job/job_1374638600275_0121
+application_1374638600275_0020	           Sleep job	           MAPREDUCE	    user1	   default	          FINISHED         SUCCEEDED	           100%	host1:19888/jobhistory/job/job_1374638600275_0020
+application_1374638600275_0038	           Sleep job	           MAPREDUCE	    user1	   default	
+....
+{code}
+</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1049">YARN-1049</a>.
+     Blocker bug reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (api)<br>
+     <b>ContainerExistStatus should define a status for preempted containers</b><br>
+     <blockquote>With the current behavior is impossible to determine if a container has been preempted or lost due to a NM crash.
+
+Adding a PREEMPTED exit status (-102) will help an AM determine that a container has been preempted.
+
+Note the change of scope from the original summary/description. The original scope proposed API/behavior changes. Because we are passed 2.1.0-beta I'm reducing the scope of this JIRA.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1034">YARN-1034</a>.
+     Trivial task reported by Sandy Ryza and fixed by Karthik Kambatla (documentation , scheduler)<br>
+     <b>Remove "experimental" in the Fair Scheduler documentation</b><br>
+     <blockquote>The YARN Fair Scheduler is largely stable now, and should no longer be declared experimental.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1025">YARN-1025</a>.
+     Major bug reported by Chris Nauroth and fixed by Chris Nauroth (nodemanager , resourcemanager)<br>
+     <b>ResourceManager and NodeManager do not load native libraries on Windows.</b><br>
+     <blockquote>ResourceManager and NodeManager do not have the correct setting for java.library.path when launched on Windows.  This prevents the processes from loading native code from hadoop.dll.  The native code is required for correct functioning on Windows (not optional), so this ultimately can cause failures.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1008">YARN-1008</a>.
+     Major bug reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (nodemanager)<br>
+     <b>MiniYARNCluster with multiple nodemanagers, all nodes have same key for allocations</b><br>
+     <blockquote>While the NMs are keyed using the NodeId, the allocation is done based on the hostname. 
+
+This makes the different nodes indistinguishable to the scheduler.
+
+There should be an option to enabled the host:port instead just port for allocations. The nodes reported to the AM should report the 'key' (host or host:port). 
+</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1006">YARN-1006</a>.
+     Major bug reported by Jian He and fixed by Xuan Gong <br>
+     <b>Nodes list web page on the RM web UI is broken</b><br>
+     <blockquote>The nodes web page which list all the connected nodes of the cluster is broken.
+
+1. The page is not showing in correct format/style.
+2. If we restart the NM, the node list is not refreshed, but just add the new started NM to the list. The old NMs information still remain.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1001">YARN-1001</a>.
+     Blocker task reported by Srimanth Gunturi and fixed by Zhijie Shen (api)<br>
+     <b>YARN should provide per application-type and state statistics</b><br>
+     <blockquote>In Ambari we plan to show for MR2 the number of applications finished, running, waiting, etc. It would be efficient if YARN could provide per application-type and state aggregated counts.
+</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-994">YARN-994</a>.
+     Major bug reported by Xuan Gong and fixed by Xuan Gong <br>
+     <b>HeartBeat thread in AMRMClientAsync does not handle runtime exception correctly</b><br>
+     <blockquote>YARN-654 performs sanity checks for parameters of public methods in AMRMClient. Those may create runtime exception. 
+Currently, heartBeat thread in AMRMClientAsync only captures IOException and YarnException, and will not handle Runtime Exception properly. 
+Possible solution can be: heartbeat thread will catch throwable and notify the callbackhandler thread via existing savedException</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-981">YARN-981</a>.
+     Major bug reported by Xuan Gong and fixed by Jian He <br>
+     <b>YARN/MR2/Job-history /logs link does not have correct content</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-966">YARN-966</a>.
+     Major bug reported by Zhijie Shen and fixed by Zhijie Shen <br>
+     <b>The thread of ContainerLaunch#call will fail without any signal if getLocalizedResources() is called when the container is not at LOCALIZED</b><br>
+     <blockquote>In ContainerImpl.getLocalizedResources(), there's:
+{code}
+assert ContainerState.LOCALIZED == getContainerState(); // TODO: FIXME!!
+{code}
+
+ContainerImpl.getLocalizedResources() is called in ContainerLaunch.call(), which is scheduled on a separate thread. If the container is not at LOCALIZED (e.g. it is at KILLING, see YARN-906), an AssertError will be thrown and fails the thread without notifying NM. Therefore, the container cannot receive more events, which are supposed to be sent from ContainerLaunch.call(), and move towards completion. </blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-957">YARN-957</a>.
+     Blocker bug reported by Omkar Vinit Joshi and fixed by Omkar Vinit Joshi <br>
+     <b>Capacity Scheduler tries to reserve the memory more than what node manager reports.</b><br>
+     <blockquote>I have 2 node managers.
+* one with 1024 MB memory.(nm1)
+* second with 2048 MB memory.(nm2)
+I am submitting simple map reduce application with 1 mapper and one reducer with 1024mb each. The steps to reproduce this are
+* stop nm2 with 2048MB memory.( This I am doing to make sure that this node's heartbeat doesn't reach RM first).
+* now submit application. As soon as it receives first node's (nm1) heartbeat it will try to reserve memory for AM-container (2048MB). However it has only 1024MB of memory.
+* now start nm2 with 2048 MB memory.
+
+It hangs forever... Ideally this has two potential issues.
+* It should not try to reserve memory on a node manager which is never going to give requested memory. i.e. Current max capability of node manager is 1024MB but 2048MB is reserved on it. But it still does that.
+* Say 2048MB is reserved on nm1 but nm2 comes back with 2048MB available memory. In this case if the original request was made without any locality then scheduler should unreserve memory on nm1 and allocate requested 2048MB container on nm2.
+</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-948">YARN-948</a>.
+     Major bug reported by Omkar Vinit Joshi and fixed by Omkar Vinit Joshi <br>
+     <b>RM should validate the release container list before actually releasing them</b><br>
+     <blockquote>At present we are blinding passing the allocate request containing containers to be released to the scheduler. This may result into one application releasing another application's container.
+
+{code}
+  @Override
+  @Lock(Lock.NoLock.class)
+  public Allocation allocate(ApplicationAttemptId applicationAttemptId,
+      List&lt;ResourceRequest&gt; ask, List&lt;ContainerId&gt; release, 
+      List&lt;String&gt; blacklistAdditions, List&lt;String&gt; blacklistRemovals) {
+
+    FiCaSchedulerApp application = getApplication(applicationAttemptId);
+....
+....
+    // Release containers
+    for (ContainerId releasedContainerId : release) {
+      RMContainer rmContainer = getRMContainer(releasedContainerId);
+      if (rmContainer == null) {
+         RMAuditLogger.logFailure(application.getUser(),
+             AuditConstants.RELEASE_CONTAINER, 
+             "Unauthorized access or invalid container", "CapacityScheduler",
+             "Trying to release container not owned by app or with invalid id",
+             application.getApplicationId(), releasedContainerId);
+      }
+      completedContainer(rmContainer,
+          SchedulerUtils.createAbnormalContainerStatus(
+              releasedContainerId, 
+              SchedulerUtils.RELEASED_CONTAINER),
+          RMContainerEventType.RELEASED);
+    }
+{code}
+
+Current checks are not sufficient and we should prevent this..... thoughts?</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-942">YARN-942</a>.
+     Major bug reported by Sandy Ryza and fixed by Akira AJISAKA (scheduler)<br>
+     <b>In Fair Scheduler documentation, inconsistency on which properties have prefix</b><br>
+     <blockquote>locality.threshold.node and locality.threshold.rack should have the yarn.scheduler.fair prefix like the items before them
+
+http://hadoop.apache.org/docs/current/hadoop-yarn/hadoop-yarn-site/FairScheduler.html</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-910">YARN-910</a>.
+     Major improvement reported by Sandy Ryza and fixed by Alejandro Abdelnur (nodemanager)<br>
+     <b>Allow auxiliary services to listen for container starts and completions</b><br>
+     <blockquote>Making container start and completion events available to auxiliary services would allow them to be resource-aware.  The auxiliary service would be able to notify a co-located service that is opportunistically using free capacity of allocation changes.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-906">YARN-906</a>.
+     Major sub-task reported by Zhijie Shen and fixed by Zhijie Shen <br>
+     <b>Cancelling ContainerLaunch#call at KILLING causes that the container cannot be completed</b><br>
+     <blockquote>See https://builds.apache.org/job/PreCommit-YARN-Build/1435//testReport/org.apache.hadoop.yarn.client.api.impl/TestNMClient/testNMClientNoCleanupOnStop/</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-903">YARN-903</a>.
+     Major bug reported by Abhishek Kapoor and fixed by Omkar Vinit Joshi (applications/distributed-shell)<br>
+     <b>DistributedShell throwing Errors in logs after successfull completion</b><br>
+     <blockquote>I have tried running DistributedShell and also used ApplicationMaster of the same for my test.
+The application is successfully running through logging some errors which would be useful to fix.
+Below are the logs from NodeManager and ApplicationMasterode
+
+Log Snippet for NodeManager
+=============================
+2013-07-07 13:39:18,787 INFO org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl: Connecting to ResourceManager at localhost/127.0.0.1:9990. current no. of attempts is 1
+2013-07-07 13:39:19,050 INFO org.apache.hadoop.yarn.server.nodemanager.security.NMContainerTokenSecretManager: Rolling master-key for container-tokens, got key with id -325382586
+2013-07-07 13:39:19,052 INFO org.apache.hadoop.yarn.server.nodemanager.security.NMTokenSecretManagerInNM: Rolling master-key for nm-tokens, got key with id :1005046570
+2013-07-07 13:39:19,053 INFO org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl: Registered with ResourceManager as sunny-Inspiron:9993 with total resource of &lt;memory:10240, vCores:8&gt;
+2013-07-07 13:39:19,053 INFO org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl: Notifying ContainerManager to unblock new container-requests
+2013-07-07 13:39:35,256 INFO SecurityLogger.org.apache.hadoop.ipc.Server: Auth successful for appattempt_1373184544832_0001_000001 (auth:SIMPLE)
+2013-07-07 13:39:35,492 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl: Start request for container_1373184544832_0001_01_000001 by user sunny
+2013-07-07 13:39:35,507 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl: Creating a new application reference for app application_1373184544832_0001
+2013-07-07 13:39:35,511 INFO org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger: USER=sunny	IP=127.0.0.1	OPERATION=Start Container Request	TARGET=ContainerManageImpl	RESULT=SUCCESS	APPID=application_1373184544832_0001	CONTAINERID=container_1373184544832_0001_01_000001
+2013-07-07 13:39:35,511 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application: Application application_1373184544832_0001 transitioned from NEW to INITING
+2013-07-07 13:39:35,512 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application: Adding container_1373184544832_0001_01_000001 to application application_1373184544832_0001
+2013-07-07 13:39:35,518 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application: Application application_1373184544832_0001 transitioned from INITING to RUNNING
+2013-07-07 13:39:35,528 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container: Container container_1373184544832_0001_01_000001 transitioned from NEW to LOCALIZING
+2013-07-07 13:39:35,540 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.LocalizedResource: Resource hdfs://localhost:9000/application/test.jar transitioned from INIT to DOWNLOADING
+2013-07-07 13:39:35,540 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ResourceLocalizationService: Created localizer for container_1373184544832_0001_01_000001
+2013-07-07 13:39:35,675 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ResourceLocalizationService: Writing credentials to the nmPrivate file /home/sunny/Hadoop2/hadoopdata/nodemanagerdata/nmPrivate/container_1373184544832_0001_01_000001.tokens. Credentials list: 
+2013-07-07 13:39:35,694 INFO org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor: Initializing user sunny
+2013-07-07 13:39:35,803 INFO org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor: Copying from /home/sunny/Hadoop2/hadoopdata/nodemanagerdata/nmPrivate/container_1373184544832_0001_01_000001.tokens to /home/sunny/Hadoop2/hadoopdata/nodemanagerdata/usercache/sunny/appcache/application_1373184544832_0001/container_1373184544832_0001_01_000001.tokens
+2013-07-07 13:39:35,803 INFO org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor: CWD set to /home/sunny/Hadoop2/hadoopdata/nodemanagerdata/usercache/sunny/appcache/application_1373184544832_0001 = file:/home/sunny/Hadoop2/hadoopdata/nodemanagerdata/usercache/sunny/appcache/application_1373184544832_0001
+2013-07-07 13:39:36,136 INFO org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl: Sending out status for container: container_id {, app_attempt_id {, application_id {, id: 1, cluster_timestamp: 1373184544832, }, attemptId: 1, }, id: 1, }, state: C_RUNNING, diagnostics: "", exit_status: -1000, 
+2013-07-07 13:39:36,406 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.LocalizedResource: Resource hdfs://localhost:9000/application/test.jar transitioned from DOWNLOADING to LOCALIZED
+2013-07-07 13:39:36,409 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container: Container container_1373184544832_0001_01_000001 transitioned from LOCALIZING to LOCALIZED
+2013-07-07 13:39:36,524 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container: Container container_1373184544832_0001_01_000001 transitioned from LOCALIZED to RUNNING
+2013-07-07 13:39:36,692 INFO org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor: launchContainer: [bash, -c, /home/sunny/Hadoop2/hadoopdata/nodemanagerdata/usercache/sunny/appcache/application_1373184544832_0001/container_1373184544832_0001_01_000001/default_container_executor.sh]
+2013-07-07 13:39:37,144 INFO org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl: Sending out status for container: container_id {, app_attempt_id {, application_id {, id: 1, cluster_timestamp: 1373184544832, }, attemptId: 1, }, id: 1, }, state: C_RUNNING, diagnostics: "", exit_status: -1000, 
+2013-07-07 13:39:38,147 INFO org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl: Sending out status for container: container_id {, app_attempt_id {, application_id {, id: 1, cluster_timestamp: 1373184544832, }, attemptId: 1, }, id: 1, }, state: C_RUNNING, diagnostics: "", exit_status: -1000, 
+2013-07-07 13:39:39,151 INFO org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl: Sending out status for container: container_id {, app_attempt_id {, application_id {, id: 1, cluster_timestamp: 1373184544832, }, attemptId: 1, }, id: 1, }, state: C_RUNNING, diagnostics: "", exit_status: -1000, 
+2013-07-07 13:39:39,209 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.ContainersMonitorImpl: Starting resource-monitoring for container_1373184544832_0001_01_000001
+2013-07-07 13:39:39,259 WARN org.apache.hadoop.yarn.util.ProcfsBasedProcessTree: Unexpected: procfs stat file is not in the expected format for process with pid 11552
+2013-07-07 13:39:39,264 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.ContainersMonitorImpl: Memory usage of ProcessTree 29524 for container-id container_1373184544832_0001_01_000001: 79.9 MB of 1 GB physical memory used; 2.2 GB of 2.1 GB virtual memory used
+2013-07-07 13:39:39,645 INFO SecurityLogger.org.apache.hadoop.ipc.Server: Auth successful for appattempt_1373184544832_0001_000001 (auth:SIMPLE)
+2013-07-07 13:39:39,651 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl: Start request for container_1373184544832_0001_01_000002 by user sunny
+2013-07-07 13:39:39,651 INFO org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger: USER=sunny	IP=127.0.0.1	OPERATION=Start Container Request	TARGET=ContainerManageImpl	RESULT=SUCCESS	APPID=application_1373184544832_0001	CONTAINERID=container_1373184544832_0001_01_000002
+2013-07-07 13:39:39,651 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application: Adding container_1373184544832_0001_01_000002 to application application_1373184544832_0001
+2013-07-07 13:39:39,652 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container: Container container_1373184544832_0001_01_000002 transitioned from NEW to LOCALIZED
+2013-07-07 13:39:39,660 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl: Getting container-status for container_1373184544832_0001_01_000002
+2013-07-07 13:39:39,661 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl: Returning container_id {, app_attempt_id {, application_id {, id: 1, cluster_timestamp: 1373184544832, }, attemptId: 1, }, id: 2, }, state: C_RUNNING, diagnostics: "", exit_status: -1000, 
+2013-07-07 13:39:39,728 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container: Container container_1373184544832_0001_01_000002 transitioned from LOCALIZED to RUNNING
+2013-07-07 13:39:39,873 INFO org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor: launchContainer: [bash, -c, /home/sunny/Hadoop2/hadoopdata/nodemanagerdata/usercache/sunny/appcache/application_1373184544832_0001/container_1373184544832_0001_01_000002/default_container_executor.sh]
+2013-07-07 13:39:39,898 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch: Container container_1373184544832_0001_01_000002 succeeded 
+2013-07-07 13:39:39,899 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container: Container container_1373184544832_0001_01_000002 transitioned from RUNNING to EXITED_WITH_SUCCESS
+2013-07-07 13:39:39,900 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch: Cleaning up container container_1373184544832_0001_01_000002
+2013-07-07 13:39:39,942 INFO org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger: USER=sunny	OPERATION=Container Finished - Succeeded	TARGET=ContainerImpl	RESULT=SUCCESS	APPID=application_1373184544832_0001	CONTAINERID=container_1373184544832_0001_01_000002
+2013-07-07 13:39:39,943 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container: Container container_1373184544832_0001_01_000002 transitioned from EXITED_WITH_SUCCESS to DONE
+2013-07-07 13:39:39,944 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application: Removing container_1373184544832_0001_01_000002 from application application_1373184544832_0001
+2013-07-07 13:39:40,155 INFO org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl: Sending out status for container: container_id {, app_attempt_id {, application_id {, id: 1, cluster_timestamp: 1373184544832, }, attemptId: 1, }, id: 1, }, state: C_RUNNING, diagnostics: "", exit_status: -1000, 
+2013-07-07 13:39:40,157 INFO org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl: Sending out status for container: container_id {, app_attempt_id {, application_id {, id: 1, cluster_timestamp: 1373184544832, }, attemptId: 1, }, id: 2, }, state: C_COMPLETE, diagnostics: "", exit_status: 0, 
+2013-07-07 13:39:40,158 INFO org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl: Removed completed container container_1373184544832_0001_01_000002
+2013-07-07 13:39:40,683 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl: Getting container-status for container_1373184544832_0001_01_000002
+2013-07-07 13:39:40,686 ERROR org.apache.hadoop.security.UserGroupInformation: PriviledgedActionException as:appattempt_1373184544832_0001_000001 (auth:TOKEN) cause:org.apache.hadoop.yarn.exceptions.YarnException: Container container_1373184544832_0001_01_000002 is not handled by this NodeManager
+2013-07-07 13:39:40,687 INFO org.apache.hadoop.ipc.Server: IPC Server handler 4 on 9993, call org.apache.hadoop.yarn.api.ContainerManagementProtocolPB.stopContainer from 127.0.0.1:51085: error: org.apache.hadoop.yarn.exceptions.YarnException: Container container_1373184544832_0001_01_000002 is not handled by this NodeManager
+org.apache.hadoop.yarn.exceptions.YarnException: Container container_1373184544832_0001_01_000002 is not handled by this NodeManager
+	at org.apache.hadoop.yarn.ipc.RPCUtil.getRemoteException(RPCUtil.java:45)
+	at org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl.authorizeGetAndStopContainerRequest(ContainerManagerImpl.java:614)
+	at org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl.stopContainer(ContainerManagerImpl.java:538)
+	at org.apache.hadoop.yarn.api.impl.pb.service.ContainerManagementProtocolPBServiceImpl.stopContainer(ContainerManagementProtocolPBServiceImpl.java:88)
+	at org.apache.hadoop.yarn.proto.ContainerManagementProtocol$ContainerManagementProtocolService$2.callBlockingMethod(ContainerManagementProtocol.java:85)
+	at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:605)
+	at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1033)
+	at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:1868)
+	at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:1864)
+	at java.security.AccessController.doPrivileged(Native Method)
+	at javax.security.auth.Subject.doAs(Subject.java:396)
+	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1489)
+	at org.apache.hadoop.ipc.Server$Handler.run(Server.java:1862)
+2013-07-07 13:39:41,162 INFO org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl: Sending out status for container: container_id {, app_attempt_id {, application_id {, id: 1, cluster_timestamp: 1373184544832, }, attemptId: 1, }, id: 1, }, state: C_RUNNING, diagnostics: "", exit_status: -1000, 
+2013-07-07 13:39:41,691 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch: Container container_1373184544832_0001_01_000001 succeeded 
+2013-07-07 13:39:41,692 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container: Container container_1373184544832_0001_01_000001 transitioned from RUNNING to EXITED_WITH_SUCCESS
+2013-07-07 13:39:41,692 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch: Cleaning up container container_1373184544832_0001_01_000001
+2013-07-07 13:39:41,714 INFO org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger: USER=sunny	OPERATION=Container Finished - Succeeded	TARGET=ContainerImpl	RESULT=SUCCESS	APPID=application_1373184544832_0001	CONTAINERID=container_1373184544832_0001_01_000001
+2013-07-07 13:39:41,714 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container: Container container_1373184544832_0001_01_000001 transitioned from EXITED_WITH_SUCCESS to DONE
+2013-07-07 13:39:41,714 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application: Removing container_1373184544832_0001_01_000001 from application application_1373184544832_0001
+2013-07-07 13:39:42,166 INFO org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl: Sending out status for container: container_id {, app_attempt_id {, application_id {, id: 1, cluster_timestamp: 1373184544832, }, attemptId: 1, }, id: 1, }, state: C_COMPLETE, diagnostics: "", exit_status: 0, 
+2013-07-07 13:39:42,166 INFO org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl: Removed completed container container_1373184544832_0001_01_000001
+2013-07-07 13:39:42,191 INFO SecurityLogger.org.apache.hadoop.ipc.Server: Auth successful for appattempt_1373184544832_0001_000001 (auth:SIMPLE)
+2013-07-07 13:39:42,195 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl: Getting container-status for container_1373184544832_0001_01_000001
+2013-07-07 13:39:42,196 ERROR org.apache.hadoop.security.UserGroupInformation: PriviledgedActionException as:appattempt_1373184544832_0001_000001 (auth:TOKEN) cause:org.apache.hadoop.yarn.exceptions.YarnException: Container container_1373184544832_0001_01_000001 is not handled by this NodeManager
+2013-07-07 13:39:42,196 INFO org.apache.hadoop.ipc.Server: IPC Server handler 5 on 9993, call org.apache.hadoop.yarn.api.ContainerManagementProtocolPB.stopContainer from 127.0.0.1:51086: error: org.apache.hadoop.yarn.exceptions.YarnException: Container container_1373184544832_0001_01_000001 is not handled by this NodeManager
+org.apache.hadoop.yarn.exceptions.YarnException: Container container_1373184544832_0001_01_000001 is not handled by this NodeManager
+	at org.apache.hadoop.yarn.ipc.RPCUtil.getRemoteException(RPCUtil.java:45)
+	at org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl.authorizeGetAndStopContainerRequest(ContainerManagerImpl.java:614)
+	at org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl.stopContainer(ContainerManagerImpl.java:538)
+	at org.apache.hadoop.yarn.api.impl.pb.service.ContainerManagementProtocolPBServiceImpl.stopContainer(ContainerManagementProtocolPBServiceImpl.java:88)
+	at org.apache.hadoop.yarn.proto.ContainerManagementProtocol$ContainerManagementProtocolService$2.callBlockingMethod(ContainerManagementProtocol.java:85)
+	at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:605)
+	at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1033)
+	at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:1868)
+	at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:1864)
+	at java.security.AccessController.doPrivileged(Native Method)
+	at javax.security.auth.Subject.doAs(Subject.java:396)
+	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1489)
+	at org.apache.hadoop.ipc.Server$Handler.run(Server.java:1862)
+2013-07-07 13:39:42,264 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.ContainersMonitorImpl: Starting resource-monitoring for container_1373184544832_0001_01_000002
+2013-07-07 13:39:42,265 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.ContainersMonitorImpl: Stopping resource-monitoring for container_1373184544832_0001_01_000002
+2013-07-07 13:39:42,265 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.ContainersMonitorImpl: Stopping resource-monitoring for container_1373184544832_0001_01_000001
+2013-07-07 13:39:43,173 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application: Application application_1373184544832_0001 transitioned from RUNNING to APPLICATION_RESOURCES_CLEANINGUP
+2013-07-07 13:39:43,174 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.AuxServices: Got event APPLICATION_STOP for appId application_1373184544832_0001
+2013-07-07 13:39:43,180 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application: Application application_1373184544832_0001 transitioned from APPLICATION_RESOURCES_CLEANINGUP to FINISHED
+2013-07-07 13:39:43,180 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.NonAggregatingLogHandler: Scheduling Log Deletion for application: application_1373184544832_0001, with delay of 10800 seconds
+
+
+Log Snippet for Application Manager
+==================================
+13/07/07 13:39:36 INFO client.SimpleApplicationMaster: Initializing ApplicationMaster
+13/07/07 13:39:37 INFO client.SimpleApplicationMaster: Application master for app, appId=1, clustertimestamp=1373184544832, attemptId=1
+13/07/07 13:39:37 INFO client.SimpleApplicationMaster: Starting ApplicationMaster
+13/07/07 13:39:37 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
+13/07/07 13:39:37 INFO impl.NMClientAsyncImpl: Upper bound of the thread pool size is 500
+13/07/07 13:39:37 INFO impl.ContainerManagementProtocolProxy: yarn.client.max-nodemanagers-proxies : 500
+13/07/07 13:39:37 INFO client.SimpleApplicationMaster: Max mem capabililty of resources in this cluster 8192
+13/07/07 13:39:37 INFO client.SimpleApplicationMaster: Requested container ask: Capability[&lt;memory:100, vCores:0&gt;]Priority[0]ContainerCount[1]
+13/07/07 13:39:39 INFO client.SimpleApplicationMaster: Got response from RM for container ask, allocatedCnt=1
+13/07/07 13:39:39 INFO client.SimpleApplicationMaster: Launching shell command on a new container., containerId=container_1373184544832_0001_01_000002, containerNode=sunny-Inspiron:9993, containerNodeURI=sunny-Inspiron:8042, containerResourceMemory1024
+13/07/07 13:39:39 INFO client.SimpleApplicationMaster: Setting up container launch container for containerid=container_1373184544832_0001_01_000002
+13/07/07 13:39:39 INFO impl.NMClientAsyncImpl: Processing Event EventType: START_CONTAINER for Container container_1373184544832_0001_01_000002
+13/07/07 13:39:39 INFO impl.ContainerManagementProtocolProxy: Opening proxy : sunny-Inspiron:9993
+13/07/07 13:39:39 INFO client.SimpleApplicationMaster: Succeeded to start Container container_1373184544832_0001_01_000002
+13/07/07 13:39:39 INFO impl.NMClientAsyncImpl: Processing Event EventType: QUERY_CONTAINER for Container container_1373184544832_0001_01_000002
+13/07/07 13:39:40 INFO client.SimpleApplicationMaster: Got response from RM for container ask, completedCnt=1
+13/07/07 13:39:40 INFO client.SimpleApplicationMaster: Got container status for containerID=container_1373184544832_0001_01_000002, state=COMPLETE, exitStatus=0, diagnostics=
+13/07/07 13:39:40 INFO client.SimpleApplicationMaster: Container completed successfully., containerId=container_1373184544832_0001_01_000002
+13/07/07 13:39:40 INFO client.SimpleApplicationMaster: Application completed. Stopping running containers
+13/07/07 13:39:40 ERROR impl.NMClientImpl: Failed to stop Container container_1373184544832_0001_01_000002when stopping NMClientImpl
+13/07/07 13:39:40 INFO impl.ContainerManagementProtocolProxy: Closing proxy : sunny-Inspiron:9993
+13/07/07 13:39:40 INFO client.SimpleApplicationMaster: Application completed. Signalling finish to RM
+13/07/07 13:39:41 INFO impl.AMRMClientAsyncImpl: Interrupted while waiting for queue
+java.lang.InterruptedException
+	at java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.reportInterruptAfterWait(AbstractQueuedSynchronizer.java:1899)
+	at java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1934)
+	at java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:399)
+	at org.apache.hadoop.yarn.client.api.async.impl.AMRMClientAsyncImpl$CallbackHandlerThread.run(AMRMClientAsyncImpl.java:281)
+13/07/07 13:39:41 INFO client.SimpleApplicationMaster: Application Master completed successfully. exiting
+
+
+</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-881">YARN-881</a>.
+     Major bug reported by Jian He and fixed by Jian He <br>
+     <b>Priority#compareTo method seems to be wrong.</b><br>
+     <blockquote>if lower int value means higher priority, shouldn't we "return other.getPriority() - this.getPriority() " </blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-771">YARN-771</a>.
+     Major sub-task reported by Bikas Saha and fixed by Junping Du <br>
+     <b>AMRMClient  support for resource blacklisting</b><br>
+     <blockquote>After YARN-750 AMRMClient should support blacklisting via the new YARN API's</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-758">YARN-758</a>.
+     Minor improvement reported by Bikas Saha and fixed by Karthik Kambatla <br>
+     <b>Augment MockNM to use multiple cores</b><br>
+     <blockquote>YARN-757 got fixed by changing the scheduler from Fair to default (which is capacity).</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-707">YARN-707</a>.
+     Blocker improvement reported by Bikas Saha and fixed by Jason Lowe <br>
+     <b>Add user info in the YARN ClientToken</b><br>
+     <blockquote>If user info is present in the client token then it can be used to do limited authz in the AM.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-696">YARN-696</a>.
+     Major improvement reported by Trevor Lorimer and fixed by Trevor Lorimer (resourcemanager)<br>
+     <b>Enable multiple states to to be specified in Resource Manager apps REST call</b><br>
+     <blockquote>Within the YARN Resource Manager REST API the GET call which returns all Applications can be filtered by a single State query parameter (http://&lt;rm http address:port&gt;/ws/v1/cluster/apps). 
+
+There are 8 possible states (New, Submitted, Accepted, Running, Finishing, Finished, Failed, Killed), if no state parameter is specified all states are returned, however if a sub-set of states is required then multiple REST calls are required (max. of 7).
+
+The proposal is to be able to specify multiple states in a single REST call.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-643">YARN-643</a>.
+     Major bug reported by Jian He and fixed by Xuan Gong <br>
+     <b>WHY appToken is removed both in BaseFinalTransition and AMUnregisteredTransition AND clientToken is removed in FinalTransition and not BaseFinalTransition</b><br>
+     <blockquote>The jira is tracking why appToken and clientToAMToken is removed separately, and why they are distributed in different transitions, ideally there may be a common place where these two tokens can be removed at the same time. </blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-602">YARN-602</a>.
+     Major bug reported by Xuan Gong and fixed by Kenji Kikushima <br>
+     <b>NodeManager should mandatorily set some Environment variables into every containers that it launches</b><br>
+     <blockquote>NodeManager should mandatorily set some Environment variables into every containers that it launches, such as Environment.user, Environment.pwd. If both users and NodeManager set those variables, the value set by NM should be used </blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-589">YARN-589</a>.
+     Major improvement reported by Sandy Ryza and fixed by Sandy Ryza (scheduler)<br>
+     <b>Expose a REST API for monitoring the fair scheduler</b><br>
+     <blockquote>The fair scheduler should have an HTTP interface that exposes information such as applications per queue, fair shares, demands, current allocations.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-573">YARN-573</a>.
+     Critical sub-task reported by Omkar Vinit Joshi and fixed by Omkar Vinit Joshi <br>
+     <b>Shared data structures in Public Localizer and Private Localizer are not Thread safe.</b><br>
+     <blockquote>PublicLocalizer
+1) pending accessed by addResource (part of event handling) and run method (as a part of PublicLocalizer.run() ).
+
+PrivateLocalizer
+1) pending accessed by addResource (part of event handling) and findNextResource (i.remove()). Also update method should be fixed. It too is sharing pending list.
+</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-540">YARN-540</a>.
+     Major sub-task reported by Jian He and fixed by Jian He (resourcemanager)<br>
+     <b>Race condition causing RM to potentially relaunch already unregistered AMs on RM restart</b><br>
+     <blockquote>When job succeeds and successfully call finishApplicationMaster, RM shutdown and restart-dispatcher is stopped before it can process REMOVE_APP event. The next time RM comes back, it will reload the existing state files even though the job is succeeded</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-502">YARN-502</a>.
+     Major sub-task reported by Lohit Vijayarenu and fixed by Mayank Bansal <br>
+     <b>RM crash with NPE on NODE_REMOVED event with FairScheduler</b><br>
+     <blockquote>While running some test and adding/removing nodes, we see RM crashed with the below exception. We are testing with fair scheduler and running hadoop-2.0.3-alpha
+
+{noformat}
+2013-03-22 18:54:27,015 INFO org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeImpl: Deactivating Node YYYY:55680 as it is now LOST
+2013-03-22 18:54:27,015 INFO org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeImpl: YYYY:55680 Node Transitioned from UNHEALTHY to LOST
+2013-03-22 18:54:27,015 FATAL org.apache.hadoop.yarn.server.resourcemanager.ResourceManager: Error in handling event type NODE_REMOVED to the scheduler
+java.lang.NullPointerException
+        at org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler.removeNode(FairScheduler.java:619)
+        at org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler.handle(FairScheduler.java:856)
+        at org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler.handle(FairScheduler.java:98)
+        at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$SchedulerEventDispatcher$EventProcessor.run(ResourceManager.java:375)
+        at java.lang.Thread.run(Thread.java:662)
+2013-03-22 18:54:27,016 INFO org.apache.hadoop.yarn.server.resourcemanager.ResourceManager: Exiting, bbye..
+2013-03-22 18:54:27,020 INFO org.mortbay.log: Stopped SelectChannelConnector@XXXX:50030
+{noformat}</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-337">YARN-337</a>.
+     Major bug reported by Jason Lowe and fixed by Jason Lowe (resourcemanager)<br>
+     <b>RM handles killed application tracking URL poorly</b><br>
+     <blockquote>When the ResourceManager kills an application, it leaves the proxy URL redirecting to the original tracking URL for the application even though the ApplicationMaster is no longer there to service it.  It should redirect it somewhere more useful, like the RM's web page for the application, where the user can find that the application was killed and links to the AM logs.
+
+In addition, sometimes the AM during teardown from the kill can attempt to unregister and provide an updated tracking URL, but unfortunately the RM has "forgotten" the AM due to the kill and refuses to process the unregistration.  Instead it logs:
+
+{noformat}
+2013-01-09 17:37:49,671 [IPC Server handler 2 on 8030] ERROR
+org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService: AppAttemptId doesnt exist in cache appattempt_1357575694478_28614_000001
+{noformat}
+
+It should go ahead and process the unregistration to update the tracking URL since the application offered it.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-292">YARN-292</a>.
+     Major sub-task reported by Devaraj K and fixed by Zhijie Shen (resourcemanager)<br>
+     <b>ResourceManager throws ArrayIndexOutOfBoundsException while handling CONTAINER_ALLOCATED for application attempt</b><br>
+     <blockquote>{code:xml}
+2012-12-26 08:41:15,030 ERROR org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler: Calling allocate on removed or non existant application appattempt_1356385141279_49525_000001
+2012-12-26 08:41:15,031 ERROR org.apache.hadoop.yarn.server.resourcemanager.ResourceManager: Error in handling event type CONTAINER_ALLOCATED for applicationAttempt application_1356385141279_49525
+java.lang.ArrayIndexOutOfBoundsException: 0
+	at java.util.Arrays$ArrayList.get(Arrays.java:3381)
+	at org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptImpl$AMContainerAllocatedTransition.transition(RMAppAttemptImpl.java:655)
+	at org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptImpl$AMContainerAllocatedTransition.transition(RMAppAttemptImpl.java:644)
+	at org.apache.hadoop.yarn.state.StateMachineFactory$SingleInternalArc.doTransition(StateMachineFactory.java:357)
+	at org.apache.hadoop.yarn.state.StateMachineFactory.doTransition(StateMachineFactory.java:298)
+	at org.apache.hadoop.yarn.state.StateMachineFactory.access$300(StateMachineFactory.java:43)
+	at org.apache.hadoop.yarn.state.StateMachineFactory$InternalStateMachine.doTransition(StateMachineFactory.java:443)
+	at org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptImpl.handle(RMAppAttemptImpl.java:490)
+	at org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptImpl.handle(RMAppAttemptImpl.java:80)
+	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$ApplicationAttemptEventDispatcher.handle(ResourceManager.java:433)
+	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$ApplicationAttemptEventDispatcher.handle(ResourceManager.java:414)
+	at org.apache.hadoop.yarn.event.AsyncDispatcher.dispatch(AsyncDispatcher.java:126)
+	at org.apache.hadoop.yarn.event.AsyncDispatcher$1.run(AsyncDispatcher.java:75)
+	at java.lang.Thread.run(Thread.java:662)
+ {code}</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-107">YARN-107</a>.
+     Major bug reported by Devaraj K and fixed by Xuan Gong (resourcemanager)<br>
+     <b>ClientRMService.forceKillApplication() should handle the non-RUNNING applications properly</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5497">MAPREDUCE-5497</a>.
+     Major bug reported by Jian He and fixed by Jian He <br>
+     <b>'5s sleep'  in MRAppMaster.shutDownJob is only needed before stopping ClientService</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5493">MAPREDUCE-5493</a>.
+     Blocker bug reported by Jason Lowe and fixed by Jason Lowe (mrv2)<br>
+     <b>In-memory map outputs can be leaked after shuffle completes</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5483">MAPREDUCE-5483</a>.
+     Major bug reported by Alejandro Abdelnur and fixed by Robert Kanter (distcp)<br>
+     <b>revert MAPREDUCE-5357</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5478">MAPREDUCE-5478</a>.
+     Minor improvement reported by Sandy Ryza and fixed by Sandy Ryza (examples)<br>
+     <b>TeraInputFormat unnecessarily defines its own FileSplit subclass</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5476">MAPREDUCE-5476</a>.
+     Blocker bug reported by Jian He and fixed by Jian He <br>
+     <b>Job can fail when RM restarts after staging dir is cleaned but before MR successfully unregister with RM</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5475">MAPREDUCE-5475</a>.
+     Blocker bug reported by Jason Lowe and fixed by Jason Lowe (mr-am , mrv2)<br>
+     <b>MRClientService does not verify ACLs properly</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5470">MAPREDUCE-5470</a>.
+     Major bug reported by Chris Nauroth and fixed by Sandy Ryza <br>
+     <b>LocalJobRunner does not work on Windows.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5468">MAPREDUCE-5468</a>.
+     Blocker bug reported by Yesha Vora and fixed by Vinod Kumar Vavilapalli <br>
+     <b>AM recovery does not work for map only jobs</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5466">MAPREDUCE-5466</a>.
+     Blocker bug reported by Yesha Vora and fixed by Jian He <br>
+     <b>Historyserver does not refresh the result of restarted jobs after RM restart</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5462">MAPREDUCE-5462</a>.
+     Major sub-task reported by Sandy Ryza and fixed by Sandy Ryza (performance , task)<br>
+     <b>In map-side sort, swap entire meta entries instead of indexes for better cache performance </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5454">MAPREDUCE-5454</a>.
+     Major bug reported by Karthik Kambatla and fixed by Karthik Kambatla (test)<br>
+     <b>TestDFSIO fails intermittently on JDK7</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5446">MAPREDUCE-5446</a>.
+     Major bug reported by Jason Lowe and fixed by Jason Lowe (mrv2 , test)<br>
+     <b>TestJobHistoryEvents and TestJobHistoryParsing have race conditions</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5441">MAPREDUCE-5441</a>.
+     Major bug reported by Rohith Sharma K S and fixed by Jian He (applicationmaster , client)<br>
+     <b>JobClient exit whenever RM issue Reboot command to 1st attempt App Master.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5440">MAPREDUCE-5440</a>.
+     Major bug reported by Robert Parker and fixed by Robert Parker (mrv2)<br>
+     <b>TestCopyCommitter Fails on JDK7</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5428">MAPREDUCE-5428</a>.
+     Major bug reported by Jason Lowe and fixed by Karthik Kambatla (jobhistoryserver , mrv2)<br>
+     <b>HistoryFileManager doesn't stop threads when service is stopped</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5425">MAPREDUCE-5425</a>.
+     Major bug reported by Ashwin Shankar and fixed by Robert Parker (jobhistoryserver)<br>
+     <b>Junit in TestJobHistoryServer failing in jdk 7</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5414">MAPREDUCE-5414</a>.
+     Major bug reported by Nemon Lou and fixed by Nemon Lou (test)<br>
+     <b>TestTaskAttempt fails jdk7 with NullPointerException</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5385">MAPREDUCE-5385</a>.
+     Blocker bug reported by Omkar Vinit Joshi and fixed by Omkar Vinit Joshi <br>
+     <b>JobContext cache files api are broken</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5379">MAPREDUCE-5379</a>.
+     Major improvement reported by Sandy Ryza and fixed by Karthik Kambatla (job submission , security)<br>
+     <b>Include token tracking ids in jobconf</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5367">MAPREDUCE-5367</a>.
+     Major improvement reported by Sandy Ryza and fixed by Sandy Ryza <br>
+     <b>Local jobs all use same local working directory</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5358">MAPREDUCE-5358</a>.
+     Major bug reported by Devaraj K and fixed by Devaraj K (mr-am)<br>
+     <b>MRAppMaster throws invalid transitions for JobImpl</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5317">MAPREDUCE-5317</a>.
+     Major bug reported by Ravi Prakash and fixed by Ravi Prakash (mrv2)<br>
+     <b>Stale files left behind for failed jobs</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5251">MAPREDUCE-5251</a>.
+     Major bug reported by Jason Lowe and fixed by Ashwin Shankar (mrv2)<br>
+     <b>Reducer should not implicate map attempt if it has insufficient space to fetch map output</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5164">MAPREDUCE-5164</a>.
+     Major bug reported by Nemon Lou and fixed by Nemon Lou <br>
+     <b>command  "mapred job" and "mapred queue" omit HADOOP_CLIENT_OPTS </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5020">MAPREDUCE-5020</a>.
+     Major bug reported by Trevor Robinson and fixed by Trevor Robinson (client)<br>
+     <b>Compile failure with JDK8</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5001">MAPREDUCE-5001</a>.
+     Major bug reported by Brock Noland and fixed by Sandy Ryza <br>
+     <b>LocalJobRunner has race condition resulting in job failures </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3193">MAPREDUCE-3193</a>.
+     Major bug reported by Ramgopal N and fixed by Devaraj K (mrv1 , mrv2)<br>
+     <b>FileInputFormat doesn't read files recursively in the input path dir</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-1981">MAPREDUCE-1981</a>.
+     Major improvement reported by Hairong Kuang and fixed by Hairong Kuang (job submission)<br>
+     <b>Improve getSplits performance by using listLocatedStatus</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5199">HDFS-5199</a>.
+     Major sub-task reported by Brandon Li and fixed by Brandon Li (nfs)<br>
+     <b>Add more debug trace for NFS READ and WRITE</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5192">HDFS-5192</a>.
+     Minor bug reported by Jing Zhao and fixed by Jing Zhao <br>
+     <b>NameNode may fail to start when dfs.client.test.drop.namenode.response.number is set</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5159">HDFS-5159</a>.
+     Major bug reported by Aaron T. Myers and fixed by Aaron T. Myers (namenode)<br>
+     <b>Secondary NameNode fails to checkpoint if error occurs downloading edits on first checkpoint</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5150">HDFS-5150</a>.
+     Blocker bug reported by Kihwal Lee and fixed by Kihwal Lee <br>
+     <b>Allow per NN SPN for internal SPNEGO.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5140">HDFS-5140</a>.
+     Blocker bug reported by Arpit Gupta and fixed by Jing Zhao (ha)<br>
+     <b>Too many safemode monitor threads being created in the standby namenode causing it to fail with out of memory error</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5136">HDFS-5136</a>.
+     Major sub-task reported by Brandon Li and fixed by Brandon Li (nfs)<br>
+     <b>MNT EXPORT should give the full group list which can mount the exports</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5132">HDFS-5132</a>.
+     Blocker bug reported by Arpit Gupta and fixed by Kihwal Lee (namenode)<br>
+     <b>Deadlock in NameNode between SafeModeMonitor#run and DatanodeManager#handleHeartbeat</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5128">HDFS-5128</a>.
+     Critical improvement reported by Kihwal Lee and fixed by Kihwal Lee <br>
+     <b>Allow multiple net interfaces to be used with HA namenode RPC server</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5124">HDFS-5124</a>.
+     Blocker bug reported by Deepesh Khandelwal and fixed by Daryn Sharp (namenode)<br>
+     <b>DelegationTokenSecretManager#retrievePassword can cause deadlock in NameNode</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5118">HDFS-5118</a>.
+     Major new feature reported by Jing Zhao and fixed by Jing Zhao <br>
+     <b>Provide testing support for DFSClient to drop RPC responses</b><br>
+     <blockquote>Used for testing when NameNode HA is enabled. Users can use a new configuration property "dfs.client.test.drop.namenode.response.number" to specify the number of responses that DFSClient will drop in each RPC call. This feature can help testing functionalities such as NameNode retry cache.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5111">HDFS-5111</a>.
+     Minor bug reported by Jing Zhao and fixed by Jing Zhao (snapshots)<br>
+     <b>Remove duplicated error message for snapshot commands when processing invalid arguments</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5110">HDFS-5110</a>.
+     Major sub-task reported by Brandon Li and fixed by Brandon Li (nfs)<br>
+     <b>Change FSDataOutputStream to HdfsDataOutputStream for opened streams to fix type cast error</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5107">HDFS-5107</a>.
+     Major sub-task reported by Brandon Li and fixed by Brandon Li (nfs)<br>
+     <b>Fix array copy error in Readdir and Readdirplus responses</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5106">HDFS-5106</a>.
+     Minor bug reported by Chuan Liu and fixed by Chuan Liu (test)<br>
+     <b>TestDatanodeBlockScanner fails on Windows due to incorrect path format</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5105">HDFS-5105</a>.
+     Minor bug reported by Chuan Liu and fixed by Chuan Liu <br>
+     <b>TestFsck fails on Windows</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5104">HDFS-5104</a>.
+     Major sub-task reported by Brandon Li and fixed by Brandon Li (nfs)<br>
+     <b>Support dotdot name in NFS LOOKUP operation</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5103">HDFS-5103</a>.
+     Minor bug reported by Chuan Liu and fixed by Chuan Liu (test)<br>
+     <b>TestDirectoryScanner fails on Windows</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5102">HDFS-5102</a>.
+     Major bug reported by Aaron T. Myers and fixed by Jing Zhao (snapshots)<br>
+     <b>Snapshot names should not be allowed to contain slash characters</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5100">HDFS-5100</a>.
+     Minor bug reported by Chuan Liu and fixed by Chuan Liu (test)<br>
+     <b>TestNamenodeRetryCache fails on Windows due to incorrect cleanup</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5099">HDFS-5099</a>.
+     Major bug reported by Chuan Liu and fixed by Chuan Liu (namenode)<br>
+     <b>Namenode#copyEditLogSegmentsToSharedDir should close EditLogInputStreams upon finishing</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5091">HDFS-5091</a>.
+     Minor bug reported by Jing Zhao and fixed by Jing Zhao <br>
+     <b>Support for spnego keytab separate from the JournalNode keytab for secure HA</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5085">HDFS-5085</a>.
+     Major sub-task reported by Brandon Li and fixed by Jing Zhao (nfs)<br>
+     <b>Refactor o.a.h.nfs to support different types of authentications</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5080">HDFS-5080</a>.
+     Major bug reported by Jing Zhao and fixed by Jing Zhao (ha , qjm)<br>
+     <b>BootstrapStandby not working with QJM when the existing NN is active</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5078">HDFS-5078</a>.
+     Major sub-task reported by Brandon Li and fixed by Brandon Li (nfs)<br>
+     <b>Support file append in NFSv3 gateway to enable data streaming to HDFS</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5076">HDFS-5076</a>.
+     Minor new feature reported by Jing Zhao and fixed by Jing Zhao <br>
+     <b>Add MXBean methods to query NN's transaction information and JournalNode's journal status</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5071">HDFS-5071</a>.
+     Major sub-task reported by Kihwal Lee and fixed by Brandon Li (nfs)<br>
+     <b>Change hdfs-nfs parent project to hadoop-project</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5069">HDFS-5069</a>.
+     Major sub-task reported by Brandon Li and fixed by Brandon Li (nfs)<br>
+     <b>Include hadoop-nfs and hadoop-hdfs-nfs into hadoop dist for NFS deployment</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5067">HDFS-5067</a>.
+     Major sub-task reported by Brandon Li and fixed by Brandon Li (nfs)<br>
+     <b>Support symlink operations</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5061">HDFS-5061</a>.
+     Major improvement reported by Arpit Agarwal and fixed by Arpit Agarwal (namenode)<br>
+     <b>Make FSNameSystem#auditLoggers an unmodifiable list</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5055">HDFS-5055</a>.
+     Blocker bug reported by Allen Wittenauer and fixed by Vinay (namenode)<br>
+     <b>nn fails to download checkpointed image from snn in some setups</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5047">HDFS-5047</a>.
+     Major bug reported by Kihwal Lee and fixed by Robert Parker (namenode)<br>
+     <b>Supress logging of full stack trace of quota and lease exceptions</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5045">HDFS-5045</a>.
+     Minor improvement reported by Jing Zhao and fixed by Jing Zhao <br>
+     <b>Add more unit tests for retry cache to cover all AtMostOnce methods</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5043">HDFS-5043</a>.
+     Major bug reported by Brandon Li and fixed by Brandon Li <br>
+     <b>For HdfsFileStatus, set default value of childrenNum to -1 instead of 0 to avoid confusing applications</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5028">HDFS-5028</a>.
+     Major bug reported by zhaoyunjiong and fixed by zhaoyunjiong <br>
+     <b>LeaseRenewer throw java.util.ConcurrentModificationException when timeout</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-4993">HDFS-4993</a>.
+     Major bug reported by Kihwal Lee and fixed by Robert Parker <br>
+     <b>fsck can fail if a file is renamed or deleted</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-4962">HDFS-4962</a>.
+     Minor sub-task reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (nfs)<br>
+     <b>Use enum for nfs constants</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-4947">HDFS-4947</a>.
+     Major sub-task reported by Brandon Li and fixed by Jing Zhao (nfs)<br>
+     <b>Add NFS server export table to control export by hostname or IP range</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-4926">HDFS-4926</a>.
+     Trivial improvement reported by Joseph Lorenzini and fixed by Vivek Ganesan (namenode)<br>
+     <b>namenode webserver's page has a tooltip that is inconsistent with the datanode HTML link</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-4905">HDFS-4905</a>.
+     Minor improvement reported by Arpit Agarwal and fixed by Arpit Agarwal (tools)<br>
+     <b>Add appendToFile command to "hdfs dfs"</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-4898">HDFS-4898</a>.
+     Minor bug reported by Eric Sirianni and fixed by Tsz Wo (Nicholas), SZE (namenode)<br>
+     <b>BlockPlacementPolicyWithNodeGroup.chooseRemoteRack() fails to properly fallback to local rack</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-4763">HDFS-4763</a>.
+     Major sub-task reported by Brandon Li and fixed by Brandon Li (nfs)<br>
+     <b>Add script changes/utility for starting NFS gateway</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-4680">HDFS-4680</a>.
+     Major bug reported by Andrew Wang and fixed by Andrew Wang (namenode , security)<br>
+     <b>Audit logging of delegation tokens for MR tracing</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-4632">HDFS-4632</a>.
+     Major bug reported by Chris Nauroth and fixed by Chuan Liu (test)<br>
+     <b>globStatus using backslash for escaping does not work on Windows</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-4594">HDFS-4594</a>.
+     Minor bug reported by Arpit Gupta and fixed by Chris Nauroth (webhdfs)<br>
+     <b>WebHDFS open sets Content-Length header to what is specified by length parameter rather than how much data is actually returned. </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-4329">HDFS-4329</a>.
+     Major bug reported by Andy Isaacson and fixed by Cristina L. Abad (hdfs-client)<br>
+     <b>DFSShell issues with directories with spaces in name</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3245">HDFS-3245</a>.
+     Major improvement reported by Todd Lipcon and fixed by Ravi Prakash (namenode)<br>
+     <b>Add metrics and web UI for cluster version summary</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2933">HDFS-2933</a>.
+     Major improvement reported by Philip Zeyliger and fixed by Vivek Ganesan (datanode)<br>
+     <b>Improve DataNode Web UI Index Page</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9962">HADOOP-9962</a>.
+     Major improvement reported by Roman Shaposhnik and fixed by Roman Shaposhnik (build)<br>
+     <b>in order to avoid dependency divergence within Hadoop itself lets enable DependencyConvergence</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9961">HADOOP-9961</a>.
+     Minor bug reported by Roman Shaposhnik and fixed by Roman Shaposhnik (build)<br>
+     <b>versions of a few transitive dependencies diverged between hadoop subprojects</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9960">HADOOP-9960</a>.
+     Blocker bug reported by Brock Noland and fixed by Karthik Kambatla <br>
+     <b>Upgrade Jersey version to 1.9</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9958">HADOOP-9958</a>.
+     Major bug reported by Andrew Wang and fixed by Andrew Wang <br>
+     <b>Add old constructor back to DelegationTokenInformation to unbreak downstream builds</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9945">HADOOP-9945</a>.
+     Minor improvement reported by Karthik Kambatla and fixed by Karthik Kambatla (ha)<br>
+     <b>HAServiceState should have a state for stopped services</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9944">HADOOP-9944</a>.
+     Blocker bug reported by Arun C Murthy and fixed by Arun C Murthy <br>
+     <b>RpcRequestHeaderProto defines callId as uint32 while ipc.Client.CONNECTION_CONTEXT_CALL_ID is signed (-3)</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9932">HADOOP-9932</a>.
+     Blocker bug reported by Kihwal Lee and fixed by Kihwal Lee <br>
+     <b>Improper synchronization in RetryCache</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9924">HADOOP-9924</a>.
+     Major bug reported by shanyu zhao and fixed by shanyu zhao (fs)<br>
+     <b>FileUtil.createJarWithClassPath() does not generate relative classpath correctly</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9918">HADOOP-9918</a>.
+     Minor improvement reported by Karthik Kambatla and fixed by Karthik Kambatla <br>
+     <b>Add addIfService() to CompositeService</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9916">HADOOP-9916</a>.
+     Minor bug reported by Binglin Chang and fixed by Binglin Chang <br>
+     <b>Race condition in ipc.Client causes TestIPC timeout</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9910">HADOOP-9910</a>.
+     Minor bug reported by Andr&#233; Kelpe and fixed by  <br>
+     <b>proxy server start and stop documentation wrong</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9906">HADOOP-9906</a>.
+     Minor bug reported by Karthik Kambatla and fixed by Karthik Kambatla (ha)<br>
+     <b>Move HAZKUtil to o.a.h.util.ZKUtil and make inner-classes public</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9899">HADOOP-9899</a>.
+     Minor bug reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (security)<br>
+     <b>Remove the debug message added by HADOOP-8855</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9886">HADOOP-9886</a>.
+     Minor improvement reported by Arpit Gupta and fixed by Arpit Gupta <br>
+     <b>Turn warning message in RetryInvocationHandler to debug</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9880">HADOOP-9880</a>.
+     Blocker bug reported by Kihwal Lee and fixed by Daryn Sharp <br>
+     <b>SASL changes from HADOOP-9421 breaks Secure HA NN </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9879">HADOOP-9879</a>.
+     Minor improvement reported by Karthik Kambatla and fixed by Karthik Kambatla (build)<br>
+     <b>Move the version info of zookeeper dependencies to hadoop-project/pom</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9868">HADOOP-9868</a>.
+     Blocker bug reported by Daryn Sharp and fixed by Daryn Sharp (ipc)<br>
+     <b>Server must not advertise kerberos realm</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9858">HADOOP-9858</a>.
+     Trivial bug reported by Chris Nauroth and fixed by Chris Nauroth (fs)<br>
+     <b>Remove unused private RawLocalFileSystem#execCommand method from branch-2.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9857">HADOOP-9857</a>.
+     Major bug reported by Chris Nauroth and fixed by Chris Nauroth (build , test)<br>
+     <b>Tests block and sometimes timeout on Windows due to invalid entropy source.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9833">HADOOP-9833</a>.
+     Minor improvement reported by Steve Loughran and fixed by Kousuke Saruta (build)<br>
+     <b>move slf4j to version 1.7.5</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9831">HADOOP-9831</a>.
+     Minor improvement reported by Chris Nauroth and fixed by Chris Nauroth (bin)<br>
+     <b>Make checknative shell command accessible on Windows.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9821">HADOOP-9821</a>.
+     Minor improvement reported by Tsuyoshi OZAWA and fixed by Tsuyoshi OZAWA <br>
+     <b>ClientId should have getMsb/getLsb methods</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9820">HADOOP-9820</a>.
+     Blocker bug reported by Daryn Sharp and fixed by Daryn Sharp (ipc , security)<br>
+     <b>RPCv9 wire protocol is insufficient to support multiplexing</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9806">HADOOP-9806</a>.
+     Major bug reported by Brandon Li and fixed by Brandon Li (nfs)<br>
+     <b>PortmapInterface should check if the procedure is out-of-range</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9803">HADOOP-9803</a>.
+     Minor improvement reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (ipc)<br>
+     <b>Add generic type parameter to RetryInvocationHandler</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9802">HADOOP-9802</a>.
+     Major improvement reported by Chris Nauroth and fixed by Chris Nauroth (io)<br>
+     <b>Support Snappy codec on Windows.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9801">HADOOP-9801</a>.
+     Major bug reported by Chris Nauroth and fixed by Chris Nauroth (conf)<br>
+     <b>Configuration#writeXml uses platform defaulting encoding, which may mishandle multi-byte characters.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9789">HADOOP-9789</a>.
+     Critical new feature reported by Daryn Sharp and fixed by Daryn Sharp (ipc , security)<br>
+     <b>Support server advertised kerberos principals</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9774">HADOOP-9774</a>.
+     Major bug reported by shanyu zhao and fixed by shanyu zhao (fs)<br>
+     <b>RawLocalFileSystem.listStatus() return absolute paths when input path is relative on Windows</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9768">HADOOP-9768</a>.
+     Major bug reported by Chris Nauroth and fixed by Chris Nauroth (fs)<br>
+     <b>chown and chgrp reject users and groups with spaces on platforms where spaces are otherwise acceptable</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9757">HADOOP-9757</a>.
+     Major bug reported by Jason Lowe and fixed by Cristina L. Abad (fs)<br>
+     <b>Har metadata cache can grow without limit</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9686">HADOOP-9686</a>.
+     Major improvement reported by Jason Lowe and fixed by Jason Lowe (conf)<br>
+     <b>Easy access to final parameters in Configuration</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9672">HADOOP-9672</a>.
+     Major improvement reported by Sandy Ryza and fixed by Sandy Ryza <br>
+     <b>Upgrade Avro dependency to 1.7.4</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9557">HADOOP-9557</a>.
+     Major bug reported by Lohit Vijayarenu and fixed by Lohit Vijayarenu (build)<br>
+     <b>hadoop-client excludes commons-httpclient</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9446">HADOOP-9446</a>.
+     Major improvement reported by Yu Gao and fixed by Yu Gao (security)<br>
+     <b>Support Kerberos HTTP SPNEGO authentication for non-SUN JDK</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9435">HADOOP-9435</a>.
+     Major bug reported by Tian Hong Wang and fixed by Tian Hong Wang (build)<br>
+     <b>Support building the JNI code against the IBM JVM</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9381">HADOOP-9381</a>.
+     Trivial bug reported by Keegan Witt and fixed by Keegan Witt <br>
+     <b>Document dfs cp -f option</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9315">HADOOP-9315</a>.
+     Major bug reported by Dennis Y and fixed by Chris Nauroth (build)<br>
+     <b>Port HADOOP-9249 hadoop-maven-plugins Clover fix to branch-2 to fix build failures</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8814">HADOOP-8814</a>.
+     Minor improvement reported by Brandon Li and fixed by Brandon Li (conf , fs , fs/s3 , ha , io , metrics , performance , record , security , util)<br>
+     <b>Inefficient comparison with the empty string. Use isEmpty() instead</b><br>
+     <blockquote></blockquote></li>
+</ul>
+</body></html>
 <META http-equiv="Content-Type" content="text/html; charset=UTF-8">
 <title>Hadoop  2.1.0-beta Release Notes</title>
 <STYLE type="text/css">

+ 4 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java

@@ -264,5 +264,9 @@ public class CommonConfigurationKeysPublic {
   /** Default value for HADOOP_KERBEROS_MIN_SECONDS_BEFORE_RELOGIN */
   public static final int HADOOP_KERBEROS_MIN_SECONDS_BEFORE_RELOGIN_DEFAULT =
           60;
+
+  // HTTP policies to be used in configuration
+  public static final String HTTP_POLICY_HTTP_ONLY = "HTTP_ONLY";
+  public static final String HTTP_POLICY_HTTPS_ONLY = "HTTPS_ONLY";
 }
 

+ 41 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DirectoryListingStartAfterNotFoundException.java

@@ -0,0 +1,41 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs;
+
+import java.io.IOException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/** 
+ * Thrown when the startAfter can't be found when listing a directory.
+ */
+@InterfaceAudience.LimitedPrivate({"HDFS"})
+@InterfaceStability.Stable
+public class DirectoryListingStartAfterNotFoundException extends IOException {
+  private static final long serialVersionUID = 1L;
+
+  public DirectoryListingStartAfterNotFoundException() {
+    super();
+  }
+
+  public DirectoryListingStartAfterNotFoundException(String msg) {
+    super(msg);
+  }
+}

+ 70 - 25
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Globber.java

@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.fs;
 
+import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
@@ -50,26 +51,26 @@ class Globber {
     this.filter = filter;
   }
 
-  private FileStatus getFileStatus(Path path) {
+  private FileStatus getFileStatus(Path path) throws IOException {
     try {
       if (fs != null) {
         return fs.getFileStatus(path);
       } else {
         return fc.getFileStatus(path);
       }
-    } catch (IOException e) {
+    } catch (FileNotFoundException e) {
       return null;
     }
   }
 
-  private FileStatus[] listStatus(Path path) {
+  private FileStatus[] listStatus(Path path) throws IOException {
     try {
       if (fs != null) {
         return fs.listStatus(path);
       } else {
         return fc.util().listStatus(path);
       }
-    } catch (IOException e) {
+    } catch (FileNotFoundException e) {
       return new FileStatus[0];
     }
   }
@@ -82,6 +83,15 @@ class Globber {
     }
   }
 
+  /**
+   * Convert a path component that contains backslash ecape sequences to a
+   * literal string.  This is necessary when you want to explicitly refer to a
+   * path that contains globber metacharacters.
+   */
+  private static String unescapePathComponent(String name) {
+    return name.replaceAll("\\\\(.)", "$1");
+  }
+
   /**
    * Translate an absolute path into a list of path components.
    * We merge double slashes into a single slash here.
@@ -165,37 +175,72 @@ class Globber {
             new Path(scheme, authority, Path.SEPARATOR)));
       }
       
-      for (String component : components) {
+      for (int componentIdx = 0; componentIdx < components.size();
+          componentIdx++) {
         ArrayList<FileStatus> newCandidates =
             new ArrayList<FileStatus>(candidates.size());
-        GlobFilter globFilter = new GlobFilter(component);
+        GlobFilter globFilter = new GlobFilter(components.get(componentIdx));
+        String component = unescapePathComponent(components.get(componentIdx));
         if (globFilter.hasPattern()) {
           sawWildcard = true;
         }
         if (candidates.isEmpty() && sawWildcard) {
+          // Optimization: if there are no more candidates left, stop examining 
+          // the path components.  We can only do this if we've already seen
+          // a wildcard component-- otherwise, we still need to visit all path 
+          // components in case one of them is a wildcard.
           break;
         }
-        for (FileStatus candidate : candidates) {
-          FileStatus resolvedCandidate = candidate;
-          if (candidate.isSymlink()) {
-            // We have to resolve symlinks, because otherwise we don't know
-            // whether they are directories.
-            resolvedCandidate = getFileStatus(candidate.getPath());
+        if ((componentIdx < components.size() - 1) &&
+            (!globFilter.hasPattern())) {
+          // Optimization: if this is not the terminal path component, and we 
+          // are not matching against a glob, assume that it exists.  If it 
+          // doesn't exist, we'll find out later when resolving a later glob
+          // or the terminal path component.
+          for (FileStatus candidate : candidates) {
+            candidate.setPath(new Path(candidate.getPath(), component));
           }
-          if (resolvedCandidate == null ||
-              resolvedCandidate.isDirectory() == false) {
-            continue;
-          }
-          FileStatus[] children = listStatus(candidate.getPath());
-          for (FileStatus child : children) {
-            // Set the child path based on the parent path.
-            // This keeps the symlinks in our path.
-            child.setPath(new Path(candidate.getPath(),
-                    child.getPath().getName()));
-            if (globFilter.accept(child.getPath())) {
-              newCandidates.add(child);
+          continue;
+        }
+        for (FileStatus candidate : candidates) {
+          if (globFilter.hasPattern()) {
+            FileStatus[] children = listStatus(candidate.getPath());
+            if (children.length == 1) {
+              // If we get back only one result, this could be either a listing
+              // of a directory with one entry, or it could reflect the fact
+              // that what we listed resolved to a file.
+              //
+              // Unfortunately, we can't just compare the returned paths to
+              // figure this out.  Consider the case where you have /a/b, where
+              // b is a symlink to "..".  In that case, listing /a/b will give
+              // back "/a/b" again.  If we just went by returned pathname, we'd
+              // incorrectly conclude that /a/b was a file and should not match
+              // /a/*/*.  So we use getFileStatus of the path we just listed to
+              // disambiguate.
+              if (!getFileStatus(candidate.getPath()).isDirectory()) {
+                continue;
+              }
             }
-          }
+            for (FileStatus child : children) {
+              // Set the child path based on the parent path.
+              child.setPath(new Path(candidate.getPath(),
+                      child.getPath().getName()));
+              if (globFilter.accept(child.getPath())) {
+                newCandidates.add(child);
+              }
+            }
+          } else {
+            // When dealing with non-glob components, use getFileStatus 
+            // instead of listStatus.  This is an optimization, but it also
+            // is necessary for correctness in HDFS, since there are some
+            // special HDFS directories like .reserved and .snapshot that are
+            // not visible to listStatus, but which do exist.  (See HADOOP-9877)
+            FileStatus childStatus = getFileStatus(
+                new Path(candidate.getPath(), component));
+            if (childStatus != null) {
+              newCandidates.add(childStatus);
+             }
+           }
         }
         candidates = newCandidates;
       }

+ 172 - 76
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java

@@ -17,20 +17,6 @@
  */
 package org.apache.hadoop.fs;
 
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.io.UnsupportedEncodingException;
-import java.net.URI;
-import java.net.URISyntaxException;
-import java.net.URLDecoder;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-import java.util.LinkedHashMap;
-import java.util.Map;
-import java.util.TreeMap;
-import java.util.HashMap;
-
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -40,6 +26,14 @@ import org.apache.hadoop.io.Text;
 import org.apache.hadoop.util.LineReader;
 import org.apache.hadoop.util.Progressable;
 
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.UnsupportedEncodingException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.net.URLDecoder;
+import java.util.*;
+
 /**
  * This is an implementation of the Hadoop Archive 
  * Filesystem. This archive Filesystem has index files
@@ -53,7 +47,7 @@ import org.apache.hadoop.util.Progressable;
  * index for ranges of hashcodes.
  */
 
-public class HarFileSystem extends FilterFileSystem {
+public class HarFileSystem extends FileSystem {
 
   private static final Log LOG = LogFactory.getLog(HarFileSystem.class);
 
@@ -75,11 +69,13 @@ public class HarFileSystem extends FilterFileSystem {
   // pointer into the static metadata cache
   private HarMetaData metadata;
 
+  private FileSystem fs;
+
   /**
    * public construction of harfilesystem
-   *
    */
   public HarFileSystem() {
+    // Must call #initialize() method to set the underlying file system
   }
 
   /**
@@ -96,10 +92,11 @@ public class HarFileSystem extends FilterFileSystem {
   /**
    * Constructor to create a HarFileSystem with an
    * underlying filesystem.
-   * @param fs
+   * @param fs underlying file system
    */
   public HarFileSystem(FileSystem fs) {
-    super(fs);
+    this.fs = fs;
+    this.statistics = fs.statistics;
   }
  
   private synchronized void initializeMetadataCache(Configuration conf) {
@@ -171,6 +168,11 @@ public class HarFileSystem extends FilterFileSystem {
     }
   }
 
+  @Override
+  public Configuration getConf() {
+    return fs.getConf();
+  }
+
   // get the version of the filesystem from the masterindex file
   // the version is currently not useful since its the first version
   // of archives
@@ -236,8 +238,7 @@ public class HarFileSystem extends FilterFileSystem {
       throw new IOException("query component in Path not supported  " + rawURI);
     }
  
-    URI tmp = null;
- 
+    URI tmp;
     try {
       // convert <scheme>-<host> to <scheme>://<host>
       URI baseUri = new URI(authority.replaceFirst("-", "://"));
@@ -256,7 +257,7 @@ public class HarFileSystem extends FilterFileSystem {
     return URLDecoder.decode(str, "UTF-8");
   }
 
-  private String decodeFileName(String fname) 
+  private String decodeFileName(String fname)
     throws UnsupportedEncodingException {
     int version = metadata.getVersion();
     if (version == 2 || version == 3){
@@ -272,19 +273,30 @@ public class HarFileSystem extends FilterFileSystem {
   public Path getWorkingDirectory() {
     return new Path(uri.toString());
   }
-  
+
+  @Override
+  public Path getInitialWorkingDirectory() {
+    return getWorkingDirectory();
+  }
+
+  @Override
+  public FsStatus getStatus(Path p) throws IOException {
+    return fs.getStatus(p);
+  }
+
   /**
    * Create a har specific auth 
    * har-underlyingfs:port
-   * @param underLyingURI the uri of underlying
+   * @param underLyingUri the uri of underlying
    * filesystem
    * @return har specific auth
    */
   private String getHarAuth(URI underLyingUri) {
     String auth = underLyingUri.getScheme() + "-";
     if (underLyingUri.getHost() != null) {
-      auth += underLyingUri.getHost() + ":";
+      auth += underLyingUri.getHost();
       if (underLyingUri.getPort() != -1) {
+        auth += ":";
         auth +=  underLyingUri.getPort();
       }
     }
@@ -293,7 +305,21 @@ public class HarFileSystem extends FilterFileSystem {
     }
     return auth;
   }
-  
+
+  /**
+   * Used for delegation token related functionality. Must delegate to
+   * underlying file system.
+   */
+  @Override
+  protected URI getCanonicalUri() {
+    return fs.getCanonicalUri();
+  }
+
+  @Override
+  protected URI canonicalizeUri(URI uri) {
+    return fs.canonicalizeUri(uri);
+  }
+
   /**
    * Returns the uri of this filesystem.
    * The uri is of the form 
@@ -304,6 +330,16 @@ public class HarFileSystem extends FilterFileSystem {
     return this.uri;
   }
   
+  @Override
+  protected void checkPath(Path path) {
+    fs.checkPath(path);
+  }
+
+  @Override
+  public Path resolvePath(Path p) throws IOException {
+    return fs.resolvePath(p);
+  }
+
   /**
    * this method returns the path 
    * inside the har filesystem.
@@ -418,7 +454,7 @@ public class HarFileSystem extends FilterFileSystem {
   /**
    * Get block locations from the underlying fs and fix their
    * offsets and lengths.
-   * @param file the input filestatus to get block locations
+   * @param file the input file status to get block locations
    * @param start the start of the desired range in the contained file
    * @param len the length of the desired range
    * @return block locations for this segment of file
@@ -440,8 +476,7 @@ public class HarFileSystem extends FilterFileSystem {
   }
   
   /**
-   * the hash of the path p inside iniside
-   * the filesystem
+   * the hash of the path p inside  the filesystem
    * @param p the path in the harfilesystem
    * @return the hash code of the path.
    */
@@ -474,13 +509,9 @@ public class HarFileSystem extends FilterFileSystem {
    *          the parent path directory
    * @param statuses
    *          the list to add the children filestatuses to
-   * @param children
-   *          the string list of children for this parent
-   * @param archiveIndexStat
-   *          the archive index filestatus
    */
-  private void fileStatusesInIndex(HarStatus parent, List<FileStatus> statuses,
-      List<String> children) throws IOException {
+  private void fileStatusesInIndex(HarStatus parent, List<FileStatus> statuses)
+          throws IOException {
     String parentString = parent.getName();
     if (!parentString.endsWith(Path.SEPARATOR)){
         parentString += Path.SEPARATOR;
@@ -546,7 +577,7 @@ public class HarFileSystem extends FilterFileSystem {
   // stored in a single line in the index files 
   // the format is of the form 
   // filename "dir"/"file" partFileName startIndex length 
-  // <space seperated children>
+  // <space separated children>
   private class HarStatus {
     boolean isDir;
     String name;
@@ -665,7 +696,6 @@ public class HarFileSystem extends FilterFileSystem {
   public FSDataInputStream open(Path f, int bufferSize) throws IOException {
     // get the fs DataInputStream for the underlying file
     HarStatus hstatus = getFileHarStatus(f);
-    // we got it.. woo hooo!!! 
     if (hstatus.isDir()) {
       throw new FileNotFoundException(f + " : not a file in " +
                 archivePath);
@@ -674,20 +704,39 @@ public class HarFileSystem extends FilterFileSystem {
         hstatus.getPartName()),
         hstatus.getStartIndex(), hstatus.getLength(), bufferSize);
   }
- 
+
+  /**
+   * Used for delegation token related functionality. Must delegate to
+   * underlying file system.
+   */
+  @Override
+  public FileSystem[] getChildFileSystems() {
+    return new FileSystem[]{fs};
+  }
+
   @Override
-  public FSDataOutputStream create(Path f,
-      FsPermission permission,
-      boolean overwrite,
-      int bufferSize,
-      short replication,
-      long blockSize,
+  public FSDataOutputStream create(Path f, FsPermission permission,
+      boolean overwrite, int bufferSize, short replication, long blockSize,
       Progressable progress) throws IOException {
     throw new IOException("Har: create not allowed.");
   }
-  
+
+  @SuppressWarnings("deprecation")
+  @Override
+  public FSDataOutputStream createNonRecursive(Path f, boolean overwrite,
+      int bufferSize, short replication, long blockSize, Progressable progress)
+      throws IOException {
+    throw new IOException("Har: create not allowed.");
+  }
+
+  @Override
+  public FSDataOutputStream append(Path f, int bufferSize, Progressable progress) throws IOException {
+    throw new IOException("Har: append not allowed.");
+  }
+
   @Override
   public void close() throws IOException {
+    super.close();
     if (fs != null) {
       try {
         fs.close();
@@ -703,9 +752,19 @@ public class HarFileSystem extends FilterFileSystem {
    */
   @Override
   public boolean setReplication(Path src, short replication) throws IOException{
-    throw new IOException("Har: setreplication not allowed");
+    throw new IOException("Har: setReplication not allowed");
   }
-  
+
+  @Override
+  public boolean rename(Path src, Path dst) throws IOException {
+    throw new IOException("Har: rename not allowed");
+  }
+
+  @Override
+  public FSDataOutputStream append(Path f) throws IOException {
+    throw new IOException("Har: append not allowed");
+  }
+
   /**
    * Not implemented.
    */
@@ -713,7 +772,7 @@ public class HarFileSystem extends FilterFileSystem {
   public boolean delete(Path f, boolean recursive) throws IOException { 
     throw new IOException("Har: delete not allowed");
   }
-  
+
   /**
    * liststatus returns the children of a directory 
    * after looking up the index files.
@@ -732,7 +791,7 @@ public class HarFileSystem extends FilterFileSystem {
       throw new FileNotFoundException("File " + f + " not found in " + archivePath);
     }
     if (hstatus.isDir()) {
-      fileStatusesInIndex(hstatus, statuses, hstatus.children);
+      fileStatusesInIndex(hstatus, statuses);
     } else {
       statuses.add(toFileStatus(hstatus, null));
     }
@@ -747,7 +806,7 @@ public class HarFileSystem extends FilterFileSystem {
   public Path getHomeDirectory() {
     return new Path(uri.toString());
   }
-  
+
   @Override
   public void setWorkingDirectory(Path newDir) {
     //does nothing.
@@ -765,11 +824,17 @@ public class HarFileSystem extends FilterFileSystem {
    * not implemented.
    */
   @Override
-  public void copyFromLocalFile(boolean delSrc, Path src, Path dst) throws 
-        IOException {
+  public void copyFromLocalFile(boolean delSrc, boolean overwrite,
+      Path src, Path dst) throws IOException {
     throw new IOException("Har: copyfromlocalfile not allowed");
   }
-  
+
+  @Override
+  public void copyFromLocalFile(boolean delSrc, boolean overwrite,
+      Path[] srcs, Path dst) throws IOException {
+    throw new IOException("Har: copyfromlocalfile not allowed");
+  }
+
   /**
    * copies the file in the har filesystem to a local file.
    */
@@ -806,11 +871,16 @@ public class HarFileSystem extends FilterFileSystem {
     throw new IOException("Har: setowner not allowed");
   }
 
+  @Override
+  public void setTimes(Path p, long mtime, long atime) throws IOException {
+    throw new IOException("Har: setTimes not allowed");
+  }
+
   /**
    * Not implemented.
    */
   @Override
-  public void setPermission(Path p, FsPermission permisssion) 
+  public void setPermission(Path p, FsPermission permission)
     throws IOException {
     throw new IOException("Har: setPermission not allowed");
   }
@@ -899,7 +969,7 @@ public class HarFileSystem extends FilterFileSystem {
           newlen = (int) (end - position);
         }
         // end case
-        if (newlen == 0) 
+        if (newlen == 0)
           return ret;
         ret = underLyingStream.read(b, offset, newlen);
         position += ret;
@@ -936,8 +1006,8 @@ public class HarFileSystem extends FilterFileSystem {
 
       @Override
       public boolean seekToNewSource(long targetPos) throws IOException {
-        //do not need to implement this
-        // hdfs in itself does seektonewsource 
+        // do not need to implement this
+        // hdfs in itself does seektonewsource
         // while reading.
         return false;
       }
@@ -973,14 +1043,12 @@ public class HarFileSystem extends FilterFileSystem {
       }
 
       @Override
-      public void setReadahead(Long readahead)
-          throws IOException, UnsupportedEncodingException {
+      public void setReadahead(Long readahead) throws IOException {
         underLyingStream.setReadahead(readahead);
       }
 
       @Override
-      public void setDropBehind(Boolean dropBehind)
-          throws IOException, UnsupportedEncodingException {
+      public void setDropBehind(Boolean dropBehind) throws IOException {
         underLyingStream.setDropBehind(dropBehind);
       }
     }
@@ -998,19 +1066,6 @@ public class HarFileSystem extends FilterFileSystem {
         long length, int bufsize) throws IOException {
         super(new HarFsInputStream(fs, p, start, length, bufsize));
     }
-
-    /**
-     * constructor for har input stream.
-     * @param fs the underlying filesystem
-     * @param p the path in the underlying file system
-     * @param start the start position in the part file
-     * @param length the length of valid data in the part file.
-     * @throws IOException
-     */
-    public HarFSDataInputStream(FileSystem fs, Path  p, long start, long length)
-      throws IOException {
-        super(new HarFsInputStream(fs, p, start, length, 0));
-    }
   }
 
   private class HarMetaData {
@@ -1057,7 +1112,7 @@ public class HarFileSystem extends FilterFileSystem {
     }
 
     private void parseMetaData() throws IOException {
-      Text line;
+      Text line = new Text();
       long read;
       FSDataInputStream in = null;
       LineReader lin = null;
@@ -1067,7 +1122,6 @@ public class HarFileSystem extends FilterFileSystem {
         FileStatus masterStat = fs.getFileStatus(masterIndexPath);
         masterIndexTimestamp = masterStat.getModificationTime();
         lin = new LineReader(in, getConf());
-        line = new Text();
         read = lin.readLine(line);
 
         // the first line contains the version of the index file
@@ -1081,7 +1135,7 @@ public class HarFileSystem extends FilterFileSystem {
         }
 
         // each line contains a hashcode range and the index file name
-        String[] readStr = null;
+        String[] readStr;
         while(read < masterStat.getLen()) {
           int b = lin.readLine(line);
           read += b;
@@ -1093,6 +1147,9 @@ public class HarFileSystem extends FilterFileSystem {
               endHash));
           line.clear();
         }
+      } catch (IOException ioe) {
+        LOG.warn("Encountered exception ", ioe);
+        throw ioe;
       } finally {
         IOUtils.cleanup(LOG, lin, in);
       }
@@ -1144,4 +1201,43 @@ public class HarFileSystem extends FilterFileSystem {
         return size() > MAX_ENTRIES;
     }
   }
+
+  @SuppressWarnings("deprecation")
+  @Override
+  public FsServerDefaults getServerDefaults() throws IOException {
+    return fs.getServerDefaults();
+  }
+
+  @Override
+  public FsServerDefaults getServerDefaults(Path f) throws IOException {
+    return fs.getServerDefaults(f);
+  }
+
+  @Override
+  public long getUsed() throws IOException{
+    return fs.getUsed();
+  }
+
+  @SuppressWarnings("deprecation")
+  @Override
+  public long getDefaultBlockSize() {
+    return fs.getDefaultBlockSize();
+  }
+
+  @SuppressWarnings("deprecation")
+  @Override
+  public long getDefaultBlockSize(Path f) {
+    return fs.getDefaultBlockSize(f);
+  }
+
+  @SuppressWarnings("deprecation")
+  @Override
+  public short getDefaultReplication() {
+    return fs.getDefaultReplication();
+  }
+
+  @Override
+  public short getDefaultReplication(Path f) {
+    return fs.getDefaultReplication(f);
+  }
 }

+ 25 - 17
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java

@@ -218,10 +218,13 @@ public class Path implements Comparable {
    */
   public static Path mergePaths(Path path1, Path path2) {
     String path2Str = path2.toUri().getPath();
-    if(hasWindowsDrive(path2Str)) {
-      path2Str = path2Str.substring(path2Str.indexOf(':')+1);
-    }
-    return new Path(path1 + path2Str);
+    path2Str = path2Str.substring(startPositionWithoutWindowsDrive(path2Str));
+    // Add path components explicitly, because simply concatenating two path
+    // string is not safe, for example:
+    // "/" + "/foo" yields "//foo", which will be parsed as authority in Path
+    return new Path(path1.toUri().getScheme(), 
+        path1.toUri().getAuthority(), 
+        path1.toUri().getPath() + path2Str);
   }
 
   /**
@@ -247,8 +250,8 @@ public class Path implements Comparable {
     }
     
     // trim trailing slash from non-root path (ignoring windows drive)
-    int minLength = hasWindowsDrive(path) ? 4 : 1;
-    if (path.length() > minLength && path.endsWith("/")) {
+    int minLength = startPositionWithoutWindowsDrive(path) + 1;
+    if (path.length() > minLength && path.endsWith(SEPARATOR)) {
       path = path.substring(0, path.length()-1);
     }
     
@@ -259,6 +262,14 @@ public class Path implements Comparable {
     return (WINDOWS && hasDriveLetterSpecifier.matcher(path).find());
   }
 
+  private static int startPositionWithoutWindowsDrive(String path) {
+    if (hasWindowsDrive(path)) {
+      return path.charAt(0) ==  SEPARATOR_CHAR ? 3 : 2;
+    } else {
+      return 0;
+    }
+  }
+  
   /**
    * Determine whether a given path string represents an absolute path on
    * Windows. e.g. "C:/a/b" is an absolute path. "C:a/b" is not.
@@ -270,13 +281,11 @@ public class Path implements Comparable {
    */
   public static boolean isWindowsAbsolutePath(final String pathString,
                                               final boolean slashed) {
-    int start = (slashed ? 1 : 0);
-
-    return
-        hasWindowsDrive(pathString) &&
-        pathString.length() >= (start + 3) &&
-        ((pathString.charAt(start + 2) == SEPARATOR_CHAR) ||
-          (pathString.charAt(start + 2) == '\\'));
+    int start = startPositionWithoutWindowsDrive(pathString);
+    return start > 0
+        && pathString.length() > start
+        && ((pathString.charAt(start) == SEPARATOR_CHAR) ||
+            (pathString.charAt(start) == '\\'));
   }
 
   /** Convert this to a URI. */
@@ -300,7 +309,7 @@ public class Path implements Comparable {
    *  True if the path component (i.e. directory) of this URI is absolute.
    */
   public boolean isUriPathAbsolute() {
-    int start = hasWindowsDrive(uri.getPath()) ? 3 : 0;
+    int start = startPositionWithoutWindowsDrive(uri.getPath());
     return uri.getPath().startsWith(SEPARATOR, start);
    }
   
@@ -334,7 +343,7 @@ public class Path implements Comparable {
   public Path getParent() {
     String path = uri.getPath();
     int lastSlash = path.lastIndexOf('/');
-    int start = hasWindowsDrive(path) ? 3 : 0;
+    int start = startPositionWithoutWindowsDrive(path);
     if ((path.length() == start) ||               // empty path
         (lastSlash == start && path.length() == start+1)) { // at root
       return null;
@@ -343,8 +352,7 @@ public class Path implements Comparable {
     if (lastSlash==-1) {
       parent = CUR_DIR;
     } else {
-      int end = hasWindowsDrive(path) ? 3 : 0;
-      parent = path.substring(0, lastSlash==end?end+1:lastSlash);
+      parent = path.substring(0, lastSlash==start?start+1:lastSlash);
     }
     return new Path(uri.getScheme(), uri.getAuthority(), parent);
   }

+ 3 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Seekable.java

@@ -22,7 +22,9 @@ import java.io.*;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 
-/** Stream that permits seeking. */
+/**
+ *  Stream that permits seeking.
+ */
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
 public interface Seekable {

+ 7 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java

@@ -84,11 +84,16 @@ abstract class CommandWithDestination extends FsCommand {
    */
   protected void getLocalDestination(LinkedList<String> args)
   throws IOException {
+    String pathString = (args.size() < 2) ? Path.CUR_DIR : args.removeLast();
     try {
-      String pathString = (args.size() < 2) ? Path.CUR_DIR : args.removeLast();
       dst = new PathData(new URI(pathString), getConf());
     } catch (URISyntaxException e) {
-      throw new IOException("unexpected URISyntaxException", e);
+      if (Path.WINDOWS) {
+        // Unlike URI, PathData knows how to parse Windows drive-letter paths.
+        dst = new PathData(pathString, getConf());
+      } else {
+        throw new IOException("unexpected URISyntaxException", e);
+      }
     }
   }
 

+ 8 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java

@@ -204,13 +204,18 @@ class CopyCommands {
     // commands operating on local paths have no need for glob expansion
     @Override
     protected List<PathData> expandArgument(String arg) throws IOException {
+      List<PathData> items = new LinkedList<PathData>();
       try {
-        List<PathData> items = new LinkedList<PathData>();
         items.add(new PathData(new URI(arg), getConf()));
-        return items;
       } catch (URISyntaxException e) {
-        throw new IOException("unexpected URISyntaxException", e);
+        if (Path.WINDOWS) {
+          // Unlike URI, PathData knows how to parse Windows drive-letter paths.
+          items.add(new PathData(arg, getConf()));
+        } else {
+          throw new IOException("unexpected URISyntaxException", e);
+        }
       }
+      return items;
     }
 
     @Override

+ 9 - 6
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/SetReplication.java

@@ -39,11 +39,14 @@ class SetReplication extends FsCommand {
   }
   
   public static final String NAME = "setrep";
-  public static final String USAGE = "[-R] [-w] <rep> <path/file> ...";
+  public static final String USAGE = "[-R] [-w] <rep> <path> ...";
   public static final String DESCRIPTION =
-    "Set the replication level of a file.\n" +
-    "The -R flag requests a recursive change of replication level\n" +
-    "for an entire tree.";
+    "Set the replication level of a file. If <path> is a directory\n" +
+    "then the command recursively changes the replication factor of\n" +
+    "all files under the directory tree rooted at <path>.\n" +
+    "The -w flag requests that the command wait for the replication\n" +
+    "to complete. This can potentially take a very long time.\n" +
+    "The -R flag is accepted for backwards compatibility. It has no effect.";
   
   protected short newRep = 0;
   protected List<PathData> waitList = new LinkedList<PathData>();
@@ -54,7 +57,7 @@ class SetReplication extends FsCommand {
     CommandFormat cf = new CommandFormat(2, Integer.MAX_VALUE, "R", "w");
     cf.parse(args);
     waitOpt = cf.getOpt("w");
-    setRecursive(cf.getOpt("R"));
+    setRecursive(true);
     
     try {
       newRep = Short.parseShort(args.removeFirst());
@@ -126,4 +129,4 @@ class SetReplication extends FsCommand {
       out.println(" done");
     }
   }
-}
+}

+ 3 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/SnapshotCommands.java

@@ -68,7 +68,7 @@ class SnapshotCommands extends FsCommand {
         throw new IllegalArgumentException("<snapshotDir> is missing.");
       } 
       if (args.size() > 2) {
-        throw new IllegalArgumentException("Too many arguements.");
+        throw new IllegalArgumentException("Too many arguments.");
       }
       if (args.size() == 2) {
         snapshotName = args.removeLast();
@@ -110,7 +110,7 @@ class SnapshotCommands extends FsCommand {
     @Override
     protected void processOptions(LinkedList<String> args) throws IOException {
       if (args.size() != 2) {
-        throw new IOException("args number not 2: " + args.size());
+        throw new IllegalArgumentException("Incorrect number of arguments.");
       }
       snapshotName = args.removeLast();
     }
@@ -150,7 +150,7 @@ class SnapshotCommands extends FsCommand {
     @Override
     protected void processOptions(LinkedList<String> args) throws IOException {
       if (args.size() != 3) {
-        throw new IOException("args number not 3: " + args.size());
+        throw new IllegalArgumentException("Incorrect number of arguments.");
       }
       newName = args.removeLast();
       oldName = args.removeLast();

+ 3 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java

@@ -568,6 +568,9 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
         enterNeutralMode();
         reJoinElection(0);
         break;
+      case SaslAuthenticated:
+        LOG.info("Successfully authenticated to ZooKeeper using SASL.");
+        break;
       default:
         fatalError("Unexpected Zookeeper watch event state: "
             + event.getState());

+ 5 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceProtocol.java

@@ -43,13 +43,15 @@ public interface HAServiceProtocol {
   public static final long versionID = 1L;
 
   /**
-   * An HA service may be in active or standby state. During
-   * startup, it is in an unknown INITIALIZING state.
+   * An HA service may be in active or standby state. During startup, it is in
+   * an unknown INITIALIZING state. During shutdown, it is in the STOPPING state
+   * and can no longer return to active/standby states.
    */
   public enum HAServiceState {
     INITIALIZING("initializing"),
     ACTIVE("active"),
-    STANDBY("standby");
+    STANDBY("standby"),
+    STOPPING("stopping");
 
     private String name;
 

+ 23 - 9
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpConfig.java

@@ -17,7 +17,6 @@
  */
 package org.apache.hadoop.http;
 
-import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -29,26 +28,41 @@ import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 @InterfaceAudience.Private
 @InterfaceStability.Unstable
 public class HttpConfig {
-  private static boolean sslEnabled;
+  private static Policy policy;
+  public enum Policy {
+    HTTP_ONLY,
+    HTTPS_ONLY;
+
+    public static Policy fromString(String value) {
+      if (value.equalsIgnoreCase(CommonConfigurationKeysPublic
+              .HTTP_POLICY_HTTPS_ONLY)) {
+        return HTTPS_ONLY;
+      }
+      return HTTP_ONLY;
+    }
+  }
 
   static {
     Configuration conf = new Configuration();
-    sslEnabled = conf.getBoolean(
-        CommonConfigurationKeysPublic.HADOOP_SSL_ENABLED_KEY,
-        CommonConfigurationKeysPublic.HADOOP_SSL_ENABLED_DEFAULT);
+    boolean sslEnabled = conf.getBoolean(
+            CommonConfigurationKeysPublic.HADOOP_SSL_ENABLED_KEY,
+            CommonConfigurationKeysPublic.HADOOP_SSL_ENABLED_DEFAULT);
+    policy = sslEnabled ? Policy.HTTPS_ONLY : Policy.HTTP_ONLY;
   }
 
-  @VisibleForTesting
-  static void setSecure(boolean secure) {
-    sslEnabled = secure;
+  public static void setPolicy(Policy policy) {
+    HttpConfig.policy = policy;
   }
 
   public static boolean isSecure() {
-    return sslEnabled;
+    return policy == Policy.HTTPS_ONLY;
   }
 
   public static String getSchemePrefix() {
     return (isSecure()) ? "https://" : "http://";
   }
 
+  public static String getScheme(Policy policy) {
+    return policy == Policy.HTTPS_ONLY ? "https://" : "http://";
+  }
 }

+ 1 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java

@@ -341,6 +341,7 @@ public class HttpServer implements FilterContainer {
       }
       listener.setHost(bindAddress);
       listener.setPort(port);
+      LOG.info("SSL is enabled on " + toString());
     } else {
       listenerStartedExternally = true;
       listener = connector;

+ 30 - 7
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java

@@ -34,6 +34,7 @@ import java.util.concurrent.TimeUnit;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.ipc.RetriableException;
 import org.apache.hadoop.ipc.StandbyException;
 import org.apache.hadoop.net.ConnectTimeoutException;
 
@@ -531,6 +532,15 @@ public class RetryPolicies {
       this.maxDelayBase = maxDelayBase;
     }
 
+    /**
+     * @return 0 if this is our first failover/retry (i.e., retry immediately),
+     *         sleep exponentially otherwise
+     */
+    private long getFailoverOrRetrySleepTime(int times) {
+      return times == 0 ? 0 : 
+        calculateExponentialTime(delayMillis, times, maxDelayBase);
+    }
+    
     @Override
     public RetryAction shouldRetry(Exception e, int retries,
         int failovers, boolean isIdempotentOrAtMostOnce) throws Exception {
@@ -546,11 +556,8 @@ public class RetryPolicies {
           e instanceof StandbyException ||
           e instanceof ConnectTimeoutException ||
           isWrappedStandbyException(e)) {
-        return new RetryAction(
-            RetryAction.RetryDecision.FAILOVER_AND_RETRY,
-            // retry immediately if this is our first failover, sleep otherwise
-            failovers == 0 ? 0 :
-                calculateExponentialTime(delayMillis, failovers, maxDelayBase));
+        return new RetryAction(RetryAction.RetryDecision.FAILOVER_AND_RETRY,
+            getFailoverOrRetrySleepTime(failovers));
       } else if (e instanceof SocketException ||
                  (e instanceof IOException && !(e instanceof RemoteException))) {
         if (isIdempotentOrAtMostOnce) {
@@ -561,8 +568,14 @@ public class RetryPolicies {
               "whether it was invoked");
         }
       } else {
-        return fallbackPolicy.shouldRetry(e, retries, failovers,
-            isIdempotentOrAtMostOnce);
+        RetriableException re = getWrappedRetriableException(e);
+        if (re != null) {
+          return new RetryAction(RetryAction.RetryDecision.RETRY,
+              getFailoverOrRetrySleepTime(retries));
+        } else {
+          return fallbackPolicy.shouldRetry(e, retries, failovers,
+              isIdempotentOrAtMostOnce);
+        }
       }
     }
     
@@ -596,4 +609,14 @@ public class RetryPolicies {
         StandbyException.class);
     return unwrapped instanceof StandbyException;
   }
+  
+  private static RetriableException getWrappedRetriableException(Exception e) {
+    if (!(e instanceof RemoteException)) {
+      return null;
+    }
+    Exception unwrapped = ((RemoteException)e).unwrapRemoteException(
+        RetriableException.class);
+    return unwrapped instanceof RetriableException ? 
+        (RetriableException) unwrapped : null;
+  }
 }

+ 41 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RetriableException.java

@@ -0,0 +1,41 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ipc;
+
+import java.io.IOException;
+
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * Exception thrown by a server typically to indicate that server is in a state
+ * where request cannot be processed temporarily (such as still starting up).
+ * Client may retry the request. If the service is up, the server may be able to
+ * process a retried request.
+ */
+@InterfaceStability.Evolving
+public class RetriableException extends IOException {
+  private static final long serialVersionUID = 1915561725516487301L;
+  
+  public RetriableException(Exception e) {
+    super(e);
+  }
+  
+  public RetriableException(String msg) {
+    super(msg);
+  }
+}

+ 26 - 21
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java

@@ -1295,6 +1295,29 @@ public abstract class Server {
       }
     }
 
+    private Throwable getCauseForInvalidToken(IOException e) {
+      Throwable cause = e;
+      while (cause != null) {
+        if (cause instanceof RetriableException) {
+          return (RetriableException) cause;
+        } else if (cause instanceof StandbyException) {
+          return (StandbyException) cause;
+        } else if (cause instanceof InvalidToken) {
+          // FIXME: hadoop method signatures are restricting the SASL
+          // callbacks to only returning InvalidToken, but some services
+          // need to throw other exceptions (ex. NN + StandyException),
+          // so for now we'll tunnel the real exceptions via an
+          // InvalidToken's cause which normally is not set 
+          if (cause.getCause() != null) {
+            cause = cause.getCause();
+          }
+          return cause;
+        }
+        cause = cause.getCause();
+      }
+      return e;
+    }
+    
     private void saslProcess(RpcSaslProto saslMessage)
         throws WrappedRpcServerException, IOException, InterruptedException {
       if (saslContextEstablished) {
@@ -1307,29 +1330,11 @@ public abstract class Server {
         try {
           saslResponse = processSaslMessage(saslMessage);
         } catch (IOException e) {
-          IOException sendToClient = e;
-          Throwable cause = e;
-          while (cause != null) {
-            if (cause instanceof InvalidToken) {
-              // FIXME: hadoop method signatures are restricting the SASL
-              // callbacks to only returning InvalidToken, but some services
-              // need to throw other exceptions (ex. NN + StandyException),
-              // so for now we'll tunnel the real exceptions via an
-              // InvalidToken's cause which normally is not set 
-              if (cause.getCause() != null) {
-                cause = cause.getCause();
-              }
-              sendToClient = (IOException) cause;
-              break;
-            }
-            cause = cause.getCause();
-          }
           rpcMetrics.incrAuthenticationFailures();
-          String clientIP = this.toString();
           // attempting user could be null
-          AUDITLOG.warn(AUTH_FAILED_FOR + clientIP + ":" + attemptingUser +
-            " (" + e.getLocalizedMessage() + ")");
-          throw sendToClient;
+          AUDITLOG.warn(AUTH_FAILED_FOR + this.toString() + ":"
+              + attemptingUser + " (" + e.getLocalizedMessage() + ")");
+          throw (IOException) getCauseForInvalidToken(e);
         }
         
         if (saslServer != null && saslServer.isComplete()) {

+ 7 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/CachedDNSToSwitchMapping.java

@@ -154,4 +154,11 @@ public class CachedDNSToSwitchMapping extends AbstractDNSToSwitchMapping {
   public void reloadCachedMappings() {
     cache.clear();
   }
+
+  @Override
+  public void reloadCachedMappings(List<String> names) {
+    for (String name : names) {
+      cache.remove(name);
+    }
+  }
 }

+ 8 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/DNSToSwitchMapping.java

@@ -59,4 +59,12 @@ public interface DNSToSwitchMapping {
    * will get a chance to see the new data.
    */
   public void reloadCachedMappings();
+  
+  /**
+   * Reload cached mappings on specific nodes.
+   *
+   * If there is a cache on these nodes, this method will clear it, so that 
+   * future accesses will see updated data.
+   */
+  public void reloadCachedMappings(List<String> names);
 }

+ 6 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/ScriptBasedMapping.java

@@ -269,5 +269,11 @@ public final class ScriptBasedMapping extends CachedDNSToSwitchMapping {
       // Nothing to do here, since RawScriptBasedMapping has no cache, and
       // does not inherit from CachedDNSToSwitchMapping
     }
+
+    @Override
+    public void reloadCachedMappings(List<String> names) {
+      // Nothing to do here, since RawScriptBasedMapping has no cache, and
+      // does not inherit from CachedDNSToSwitchMapping
+    }
   }
 }

+ 7 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/TableMapping.java

@@ -162,5 +162,12 @@ public class TableMapping extends CachedDNSToSwitchMapping {
         }
       }
     }
+
+    @Override
+    public void reloadCachedMappings(List<String> names) {
+      // TableMapping has to reload all mappings at once, so no chance to 
+      // reload mappings on specific nodes
+      reloadCachedMappings();
+    }
   }
 }

+ 10 - 5
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcServer.java

@@ -45,11 +45,13 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.ipc.RetriableException;
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.ipc.Server.Connection;
+import org.apache.hadoop.ipc.StandbyException;
 import org.apache.hadoop.security.token.SecretManager;
-import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
+import org.apache.hadoop.security.token.TokenIdentifier;
 
 /**
  * A utility class for dealing with SASL on RPC server
@@ -267,13 +269,15 @@ public class SaslRpcServer {
       this.connection = connection;
     }
 
-    private char[] getPassword(TokenIdentifier tokenid) throws InvalidToken {
-      return encodePassword(secretManager.retrievePassword(tokenid));
+    private char[] getPassword(TokenIdentifier tokenid) throws InvalidToken,
+        StandbyException, RetriableException, IOException {
+      return encodePassword(secretManager.retriableRetrievePassword(tokenid));
     }
 
     @Override
     public void handle(Callback[] callbacks) throws InvalidToken,
-        UnsupportedCallbackException {
+        UnsupportedCallbackException, StandbyException, RetriableException,
+        IOException {
       NameCallback nc = null;
       PasswordCallback pc = null;
       AuthorizeCallback ac = null;
@@ -292,7 +296,8 @@ public class SaslRpcServer {
         }
       }
       if (pc != null) {
-        TokenIdentifier tokenIdentifier = getIdentifier(nc.getDefaultName(), secretManager);
+        TokenIdentifier tokenIdentifier = getIdentifier(nc.getDefaultName(),
+            secretManager);
         char[] password = getPassword(tokenIdentifier);
         UserGroupInformation user = null;
         user = tokenIdentifier.getUser(); // may throw exception

+ 9 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java

@@ -33,6 +33,7 @@ import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
+import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
@@ -1333,7 +1334,14 @@ public class UserGroupInformation {
    * @return Credentials of tokens associated with this user
    */
   public synchronized Credentials getCredentials() {
-    return new Credentials(getCredentialsInternal());
+    Credentials creds = new Credentials(getCredentialsInternal());
+    Iterator<Token<?>> iter = creds.getAllTokens().iterator();
+    while (iter.hasNext()) {
+      if (iter.next() instanceof Token.PrivateToken) {
+        iter.remove();
+      }
+    }
+    return creds;
   }
   
   /**

+ 12 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java

@@ -53,6 +53,8 @@ public class FileBasedKeyStoresFactory implements KeyStoresFactory {
     "ssl.{0}.keystore.location";
   public static final String SSL_KEYSTORE_PASSWORD_TPL_KEY =
     "ssl.{0}.keystore.password";
+  public static final String SSL_KEYSTORE_KEYPASSWORD_TPL_KEY =
+    "ssl.{0}.keystore.keypassword";
   public static final String SSL_KEYSTORE_TYPE_TPL_KEY =
     "ssl.{0}.keystore.type";
 
@@ -136,7 +138,7 @@ public class FileBasedKeyStoresFactory implements KeyStoresFactory {
       conf.get(resolvePropertyName(mode, SSL_KEYSTORE_TYPE_TPL_KEY),
                DEFAULT_KEYSTORE_TYPE);
     KeyStore keystore = KeyStore.getInstance(keystoreType);
-    String keystorePassword = null;
+    String keystoreKeyPassword = null;
     if (requireClientCert || mode == SSLFactory.Mode.SERVER) {
       String locationProperty =
         resolvePropertyName(mode, SSL_KEYSTORE_LOCATION_TPL_KEY);
@@ -147,11 +149,17 @@ public class FileBasedKeyStoresFactory implements KeyStoresFactory {
       }
       String passwordProperty =
         resolvePropertyName(mode, SSL_KEYSTORE_PASSWORD_TPL_KEY);
-      keystorePassword = conf.get(passwordProperty, "");
+      String keystorePassword = conf.get(passwordProperty, "");
       if (keystorePassword.isEmpty()) {
         throw new GeneralSecurityException("The property '" + passwordProperty +
           "' has not been set in the ssl configuration file.");
       }
+      String keyPasswordProperty =
+        resolvePropertyName(mode, SSL_KEYSTORE_KEYPASSWORD_TPL_KEY);
+      // Key password defaults to the same value as store password for
+      // compatibility with legacy configurations that did not use a separate
+      // configuration property for key password.
+      keystoreKeyPassword = conf.get(keyPasswordProperty, keystorePassword);
       LOG.debug(mode.toString() + " KeyStore: " + keystoreLocation);
 
       InputStream is = new FileInputStream(keystoreLocation);
@@ -167,8 +175,8 @@ public class FileBasedKeyStoresFactory implements KeyStoresFactory {
     KeyManagerFactory keyMgrFactory = KeyManagerFactory
         .getInstance(SSLFactory.SSLCERTIFICATE);
       
-    keyMgrFactory.init(keystore, (keystorePassword != null) ?
-                                 keystorePassword.toCharArray() : null);
+    keyMgrFactory.init(keystore, (keystoreKeyPassword != null) ?
+                                 keystoreKeyPassword.toCharArray() : null);
     keyManagers = keyMgrFactory.getKeyManagers();
 
     //trust store

+ 24 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/SecretManager.java

@@ -29,6 +29,7 @@ import javax.crypto.spec.SecretKeySpec;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.ipc.RetriableException;
 import org.apache.hadoop.ipc.StandbyException;
 
 
@@ -66,7 +67,29 @@ public abstract class SecretManager<T extends TokenIdentifier> {
    * @return the password to use
    * @throws InvalidToken the token was invalid
    */
-  public abstract byte[] retrievePassword(T identifier) throws InvalidToken;
+  public abstract byte[] retrievePassword(T identifier)
+      throws InvalidToken;
+  
+  /**
+   * The same functionality with {@link #retrievePassword}, except that this 
+   * method can throw a {@link RetriableException} or a {@link StandbyException}
+   * to indicate that client can retry/failover the same operation because of 
+   * temporary issue on the server side.
+   * 
+   * @param identifier the identifier to validate
+   * @return the password to use
+   * @throws InvalidToken the token was invalid
+   * @throws StandbyException the server is in standby state, the client can
+   *         try other servers
+   * @throws RetriableException the token was invalid, and the server thinks 
+   *         this may be a temporary issue and suggests the client to retry
+   * @throws IOException to allow future exceptions to be added without breaking
+   *         compatibility        
+   */
+  public byte[] retriableRetrievePassword(T identifier)
+      throws InvalidToken, StandbyException, RetriableException, IOException {
+    return retrievePassword(identifier);
+  }
   
   /**
    * Create an empty token identifier.

+ 19 - 17
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java

@@ -19,31 +19,20 @@
 package org.apache.hadoop.security.token;
 
 import com.google.common.collect.Maps;
-
-import java.io.ByteArrayInputStream;
-import java.io.DataInput;
-import java.io.DataInputStream;
-import java.io.DataOutput;
-import java.io.IOException;
-import java.util.Arrays;
-import java.util.Map;
-import java.util.ServiceLoader;
-
 import org.apache.commons.codec.binary.Base64;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-  
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.DataInputBuffer;
-import org.apache.hadoop.io.DataOutputBuffer;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableComparator;
-import org.apache.hadoop.io.WritableUtils;
+import org.apache.hadoop.io.*;
 import org.apache.hadoop.util.ReflectionUtils;
 
+import java.io.*;
+import java.util.Arrays;
+import java.util.Map;
+import java.util.ServiceLoader;
+
 /**
  * The client-side form of the token.
  */
@@ -195,6 +184,19 @@ public class Token<T extends TokenIdentifier> implements Writable {
     service = newService;
   }
 
+  /**
+   * Indicates whether the token is a clone.  Used by HA failover proxy
+   * to indicate a token should not be visible to the user via
+   * UGI.getCredentials()
+   */
+  @InterfaceAudience.Private
+  @InterfaceStability.Unstable
+  public static class PrivateToken<T extends TokenIdentifier> extends Token<T> {
+    public PrivateToken(Token<T> token) {
+      super(token);
+    }
+  }
+
   @Override
   public void readFields(DataInput in) throws IOException {
     int len = WritableUtils.readVInt(in);

+ 21 - 7
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java

@@ -45,7 +45,7 @@ import org.apache.hadoop.util.Time;
 
 import com.google.common.base.Preconditions;
 
-@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
+@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce", "Hive"})
 @InterfaceStability.Evolving
 public abstract 
 class AbstractDelegationTokenSecretManager<TokenIdent 
@@ -289,20 +289,30 @@ extends AbstractDelegationTokenIdentifier>
         + tokenRenewInterval, password, getTrackingIdIfEnabled(identifier)));
     return password;
   }
-
-  @Override
-  public synchronized byte[] retrievePassword(TokenIdent identifier)
+  
+  /**
+   * Find the DelegationTokenInformation for the given token id, and verify that
+   * if the token is expired. Note that this method should be called with 
+   * acquiring the secret manager's monitor.
+   */
+  protected DelegationTokenInformation checkToken(TokenIdent identifier)
       throws InvalidToken {
+    assert Thread.holdsLock(this);
     DelegationTokenInformation info = currentTokens.get(identifier);
     if (info == null) {
       throw new InvalidToken("token (" + identifier.toString()
           + ") can't be found in cache");
     }
-    long now = Time.now();
-    if (info.getRenewDate() < now) {
+    if (info.getRenewDate() < Time.now()) {
       throw new InvalidToken("token (" + identifier.toString() + ") is expired");
     }
-    return info.getPassword();
+    return info;
+  }
+  
+  @Override
+  public synchronized byte[] retrievePassword(TokenIdent identifier)
+      throws InvalidToken {
+    return checkToken(identifier).getPassword();
   }
 
   protected String getTrackingIdIfEnabled(TokenIdent ident) {
@@ -444,6 +454,10 @@ extends AbstractDelegationTokenIdentifier>
     byte[] password;
     String trackingId;
 
+    public DelegationTokenInformation(long renewDate, byte[] password) {
+      this(renewDate, password, null);
+    }
+
     public DelegationTokenInformation(long renewDate, byte[] password,
         String trackingId) {
       this.renewDate = renewDate;

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ReflectionUtils.java

@@ -154,7 +154,7 @@ public class ReflectionUtils {
    * @param stream the stream to
    * @param title a string title for the stack trace
    */
-  public static void printThreadInfo(PrintWriter stream,
+  public synchronized static void printThreadInfo(PrintWriter stream,
                                      String title) {
     final int STACK_DEPTH = 20;
     boolean contention = threadBean.isThreadContentionMonitoringEnabled();

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/native/native.vcxproj

@@ -1,4 +1,4 @@
-<?xml version="1.0" encoding="utf-8"?>
+<?xml version="1.0" encoding="utf-8"?>
 
 <!--
    Licensed to the Apache Software Foundation (ASF) under one or more

+ 2 - 2
hadoop-common-project/hadoop-common/src/main/proto/ProtobufRpcEngine.proto

@@ -60,8 +60,8 @@ message RequestHeaderProto {
    * ProtocolInfoProto) since they reuse the connection; in this case
    * the declaringClassProtocolName field is set to the ProtocolInfoProto
    */
-  required string declaringClassProtocolName = 3;
+  required string declaringClassProtocolName = 2;
   
   /** protocol version of class declaring the called method */
-  required uint64 clientProtocolVersion = 4;
+  required uint64 clientProtocolVersion = 3;
 }

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/proto/RpcHeader.proto

@@ -62,7 +62,7 @@ message RpcRequestHeaderProto { // the header for the RpcRequest
 
   optional RpcKindProto rpcKind = 1;
   optional OperationProto rpcOp = 2;
-  required uint32 callId = 3; // a sequence number that is sent back in response
+  required sint32 callId = 3; // a sequence number that is sent back in response
   required bytes clientId = 4; // Globally unique client ID
   // clientId + callId uniquely identifies a request
   // retry count, 1 means this is the first retry

+ 20 - 0
hadoop-common-project/hadoop-common/src/main/resources/core-default.xml

@@ -500,6 +500,11 @@
   </description>
 </property>
 
+<property>
+  <name>fs.swift.impl</name>
+  <value>org.apache.hadoop.fs.swift.snative.SwiftNativeFileSystem</value>
+  <description>The implementation class of the OpenStack Swift Filesystem</description>
+</property>
 
 <property>
   <name>fs.automatic.close</name>
@@ -1226,4 +1231,19 @@
   </description>
 </property>
 
+<property>
+  <name>nfs3.server.port</name>
+  <value>2049</value>
+  <description>
+      Specify the port number used by Hadoop NFS.
+  </description>
+</property>
+
+<property>
+  <name>nfs3.mountd.port</name>
+  <value>4242</value>
+  <description>
+      Specify the port number used by Hadoop mount daemon.
+  </description>
+</property>
 </configuration>

+ 7 - 3
hadoop-common-project/hadoop-common/src/site/apt/ClusterSetup.apt.vm

@@ -311,7 +311,7 @@ Hadoop MapReduce Next Generation - Cluster Setup
 | | | Only applicable if log-aggregation is enabled. |
 *-------------------------+-------------------------+------------------------+
 | <<<yarn.nodemanager.aux-services>>> | | |
-| | mapreduce.shuffle  | |
+| | mapreduce_shuffle  | |
 | | | Shuffle service that needs to be set for Map Reduce applications. |
 *-------------------------+-------------------------+------------------------+
 
@@ -854,8 +854,10 @@ KVNO Timestamp         Principal
 | | The container process has the same Unix user as the NodeManager.  |
 *--------------------------------------+--------------------------------------+
 | <<<LinuxContainerExecutor>>>               | |
-| | Supported only on GNU/Linux, this executor runs the containers as the |
-| | user who submitted the application. It requires all user accounts to be |
+| | Supported only on GNU/Linux, this executor runs the containers as either the |
+| | YARN user who submitted the application (when full security is enabled) or |
+| | as a dedicated user (defaults to nobody) when full security is not enabled. |
+| | When full security is enabled, this executor requires all user accounts to be |
 | | created on the cluster nodes where the containers are launched. It uses |
 | | a <setuid> executable that is included in the Hadoop distribution. |
 | | The NodeManager uses this executable to launch and kill containers. |
@@ -929,6 +931,8 @@ KVNO Timestamp         Principal
 *-------------------------+-------------------------+------------------------+
 | <<<banned.users>>> | hfds,yarn,mapred,bin | Banned users. |
 *-------------------------+-------------------------+------------------------+
+| <<<allowed.system.users>>> | foo,bar | Allowed system users. |
+*-------------------------+-------------------------+------------------------+
 | <<<min.user.id>>> | 1000 | Prevent other super-users. |
 *-------------------------+-------------------------+------------------------+
 

+ 9 - 4
hadoop-common-project/hadoop-common/src/site/apt/FileSystemShell.apt.vm

@@ -381,17 +381,22 @@ rmr
 
 setrep
 
-   Usage: <<<hdfs dfs -setrep [-R] <path> >>>
+   Usage: <<<hdfs dfs -setrep [-R] [-w] <numRepicas> <path> >>>
 
-   Changes the replication factor of a file.
+   Changes the replication factor of a file. If <path> is a directory then
+   the command recursively changes the replication factor of all files under
+   the directory tree rooted at <path>.
 
    Options:
 
-     * The -R option will recursively increase the replication factor of files within a directory.
+     * The -w flag requests that the command wait for the replication
+       to complete. This can potentially take a very long time.
+
+     * The -R flag is accepted for backwards compatibility. It has no effect.
 
    Example:
 
-     * <<<hdfs dfs -setrep -w 3 -R /user/hadoop/dir1>>>
+     * <<<hdfs dfs -setrep -w 3 /user/hadoop/dir1>>>
 
    Exit Code:
 

+ 1 - 1
hadoop-common-project/hadoop-common/src/site/apt/SingleCluster.apt.vm

@@ -140,7 +140,7 @@ Add the following configs to your <<<yarn-site.xml>>>
 
   <property>
     <name>yarn.nodemanager.aux-services</name>
-    <value>mapreduce.shuffle</value>
+    <value>mapreduce_shuffle</value>
     <description>shuffle service that needs to be set for Map Reduce to run </description>
   </property>
 +---+

+ 8 - 3
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/CLITestHelper.java

@@ -21,11 +21,10 @@ package org.apache.hadoop.cli;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.cli.util.*;
-import org.apache.hadoop.cli.util.CLITestCmd;
-import org.apache.hadoop.cli.util.CLICommand;
 import org.apache.hadoop.cli.util.CommandExecutor.Result;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.StringUtils;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
@@ -369,6 +368,7 @@ public class CLITestHelper {
     CLITestData td = null;
     ArrayList<CLICommand> testCommands = null;
     ArrayList<CLICommand> cleanupCommands = null;
+    boolean runOnWindows = true;
     
     @Override
     public void startDocument() throws SAXException {
@@ -399,6 +399,8 @@ public class CLITestHelper {
         throws SAXException {
       if (qName.equals("description")) {
         td.setTestDesc(charString);
+      } else if (qName.equals("windows")) {
+          runOnWindows = Boolean.parseBoolean(charString);
       } else if (qName.equals("test-commands")) {
         td.setTestCommands(testCommands);
         testCommands = null;
@@ -420,8 +422,11 @@ public class CLITestHelper {
       } else if (qName.equals("expected-output")) {
         comparatorData.setExpectedOutput(charString);
       } else if (qName.equals("test")) {
-        testsFromConfigFile.add(td);
+        if (!Shell.WINDOWS || runOnWindows) {
+          testsFromConfigFile.add(td);
+        }
         td = null;
+        runOnWindows = true;
       } else if (qName.equals("mode")) {
         testMode = charString;
         if (!testMode.equals(TESTMODE_NOCOMPARE) &&

+ 8 - 2
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FSMainOperationsBaseTest.java

@@ -944,14 +944,20 @@ public abstract class FSMainOperationsBaseTest extends FileSystemTestHelper {
       rename(src, dst, false, true, false, Rename.NONE);
       Assert.fail("Expected exception was not thrown");
     } catch (IOException e) {
-      Assert.assertTrue(unwrapException(e) instanceof FileNotFoundException);
+      IOException ioException = unwrapException(e);
+      if (!(ioException instanceof FileNotFoundException)) {
+        throw ioException;
+      }
     }
 
     try {
       rename(src, dst, false, true, false, Rename.OVERWRITE);
       Assert.fail("Expected exception was not thrown");
     } catch (IOException e) {
-      Assert.assertTrue(unwrapException(e) instanceof FileNotFoundException);
+      IOException ioException = unwrapException(e);
+      if (!(ioException instanceof FileNotFoundException)) {
+        throw ioException;
+      }
     }
   }
 

+ 143 - 2
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java

@@ -22,6 +22,7 @@ import java.io.File;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.EnumSet;
+import java.util.NoSuchElementException;
 
 import org.apache.commons.lang.RandomStringUtils;
 import org.apache.hadoop.HadoopIllegalArgumentException;
@@ -30,6 +31,7 @@ import org.apache.hadoop.fs.Options.Rename;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.junit.After;
 import org.junit.Assert;
+import static org.junit.Assert.*;
 import org.junit.Before;
 import org.junit.Test;
 
@@ -92,7 +94,7 @@ public abstract class FileContextMainOperationsBaseTest  {
     }     
   };
   
-  private static byte[] data = getFileData(numBlocks,
+  private static final byte[] data = getFileData(numBlocks,
       getDefaultBlockSize());
   
   @Before
@@ -107,7 +109,8 @@ public abstract class FileContextMainOperationsBaseTest  {
   
   @After
   public void tearDown() throws Exception {
-    fc.delete(new Path(fileContextTestHelper.getAbsoluteTestRootPath(fc), new Path("test")), true);
+    boolean del = fc.delete(new Path(fileContextTestHelper.getAbsoluteTestRootPath(fc), new Path("test")), true);
+    assertTrue(del);
     fc.delete(localFsRootPath, true);
   }
   
@@ -194,6 +197,14 @@ public abstract class FileContextMainOperationsBaseTest  {
     fc.setWorkingDirectory(absoluteDir);
     Assert.assertEquals(absoluteDir, fc.getWorkingDirectory());
 
+    Path aRegularFile = new Path("aRegularFile");
+    createFile(aRegularFile);
+    try {
+      fc.setWorkingDirectory(aRegularFile);
+      fail("An IOException expected.");
+    } catch (IOException ioe) {
+      // okay
+    }
   }
   
   @Test
@@ -1195,6 +1206,136 @@ public abstract class FileContextMainOperationsBaseTest  {
         return true;
       }
     return false;
+ }
+
+  @Test
+  public void testOpen2() throws IOException {
+    final Path rootPath = getTestRootPath(fc, "test");
+    //final Path rootPath = getAbsoluteTestRootPath(fc);
+    final Path path = new Path(rootPath, "zoo");
+    createFile(path);
+    final long length = fc.getFileStatus(path).getLen();
+    FSDataInputStream fsdis = fc.open(path, 2048);
+    try {
+      byte[] bb = new byte[(int)length];
+      fsdis.readFully(bb);
+      assertArrayEquals(data, bb);
+    } finally {
+      fsdis.close();
+    }
+  }
+
+  @Test
+  public void testSetVerifyChecksum() throws IOException {
+    final Path rootPath = getTestRootPath(fc, "test");
+    final Path path = new Path(rootPath, "zoo");
+
+    FSDataOutputStream out = fc.create(path, EnumSet.of(CREATE),
+        Options.CreateOpts.createParent());
+    try {
+      // instruct FS to verify checksum through the FileContext:
+      fc.setVerifyChecksum(true, path);
+      out.write(data, 0, data.length);
+    } finally {
+      out.close();
+    }
+
+    // NB: underlying FS may be different (this is an abstract test),
+    // so we cannot assert .zoo.crc existence.
+    // Instead, we check that the file is read correctly:
+    FileStatus fileStatus = fc.getFileStatus(path);
+    final long len = fileStatus.getLen();
+    assertTrue(len == data.length);
+    byte[] bb = new byte[(int)len];
+    FSDataInputStream fsdis = fc.open(path);
+    try {
+      fsdis.read(bb);
+    } finally {
+      fsdis.close();
+    }
+    assertArrayEquals(data, bb);
+  }
+
+  @Test
+  public void testListCorruptFileBlocks() throws IOException {
+    final Path rootPath = getTestRootPath(fc, "test");
+    final Path path = new Path(rootPath, "zoo");
+    createFile(path);
+    try {
+      final RemoteIterator<Path> remoteIterator = fc
+          .listCorruptFileBlocks(path);
+      if (listCorruptedBlocksSupported()) {
+        assertTrue(remoteIterator != null);
+        Path p;
+        while (remoteIterator.hasNext()) {
+          p = remoteIterator.next();
+          System.out.println("corrupted block: " + p);
+        }
+        try {
+          remoteIterator.next();
+          fail();
+        } catch (NoSuchElementException nsee) {
+          // okay
+        }
+      } else {
+        fail();
+      }
+    } catch (UnsupportedOperationException uoe) {
+      if (listCorruptedBlocksSupported()) {
+        fail(uoe.toString());
+      } else {
+        // okay
+      }
+    }
+  }
+
+  protected abstract boolean listCorruptedBlocksSupported();
+
+  @Test
+  public void testDeleteOnExitUnexisting() throws IOException {
+    final Path rootPath = getTestRootPath(fc, "test");
+    final Path path = new Path(rootPath, "zoo");
+    boolean registered = fc.deleteOnExit(path);
+    // because "zoo" does not exist:
+    assertTrue(!registered);
+  }
+
+  @Test
+  public void testFileContextStatistics() throws IOException {
+    FileContext.clearStatistics();
+
+    final Path rootPath = getTestRootPath(fc, "test");
+    final Path path = new Path(rootPath, "zoo");
+    createFile(path);
+    byte[] bb = new byte[data.length];
+    FSDataInputStream fsdis = fc.open(path);
+    try {
+      fsdis.read(bb);
+    } finally {
+      fsdis.close();
+    }
+    assertArrayEquals(data, bb);
+
+    FileContext.printStatistics();
+  }
+
+  @Test
+  /*
+   * Test method
+   *  org.apache.hadoop.fs.FileContext.getFileContext(AbstractFileSystem)
+   */
+  public void testGetFileContext1() throws IOException {
+    final Path rootPath = getTestRootPath(fc, "test");
+    AbstractFileSystem asf = fc.getDefaultFileSystem();
+    // create FileContext using the protected #getFileContext(1) method:
+    FileContext fc2 = FileContext.getFileContext(asf);
+    // Now just check that this context can do something reasonable:
+    final Path path = new Path(rootPath, "zoo");
+    FSDataOutputStream out = fc2.create(path, EnumSet.of(CREATE),
+        Options.CreateOpts.createParent());
+    out.close();
+    Path pathResolved = fc2.resolvePath(path);
+    assertEquals(pathResolved.toUri().getPath(), path.toUri().getPath());
   }
   
   private Path getTestRootPath(FileContext fc, String pathString) {

+ 8 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextPermissionBase.java

@@ -35,6 +35,7 @@ import org.junit.Test;
 
 import static org.apache.hadoop.fs.FileContextTestHelper.*;
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
 
 /**
  * <p>
@@ -174,6 +175,13 @@ public abstract class FileContextPermissionBase {
         System.out.println("Not testing changing the group since user " +
                            "belongs to only one group.");
       }
+      
+      try {
+        fc.setOwner(f, null, null);
+        fail("Exception expected.");
+      } catch (IllegalArgumentException iae) {
+        // okay
+      }
     } 
     finally {cleanupFile(fc, f);}
   }

+ 2 - 2
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemContractBaseTest.java

@@ -291,7 +291,7 @@ public abstract class FileSystemContractBaseTest extends TestCase {
   
   public void testWriteInNonExistentDirectory() throws IOException {
     Path path = path("/test/hadoop/file");
-    assertFalse("Parent doesn't exist", fs.exists(path.getParent()));
+    assertFalse("Parent exists", fs.exists(path.getParent()));
     createFile(path);
     
     assertTrue("Exists", fs.exists(path));
@@ -301,7 +301,7 @@ public abstract class FileSystemContractBaseTest extends TestCase {
 
   public void testDeleteNonExistentFile() throws IOException {
     Path path = path("/test/hadoop/file");    
-    assertFalse("Doesn't exist", fs.exists(path));
+    assertFalse("Path exists: " + path, fs.exists(path));
     assertFalse("No deletion", fs.delete(path, true));
   }
   

+ 288 - 12
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java

@@ -24,6 +24,8 @@ import java.io.FileInputStream;
 import java.io.FileOutputStream;
 import java.io.FileReader;
 import java.io.IOException;
+import java.io.OutputStream;
+import java.net.URI;
 import java.io.PrintWriter;
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -32,15 +34,20 @@ import java.util.List;
 import java.util.jar.Attributes;
 import java.util.jar.JarFile;
 import java.util.jar.Manifest;
+import java.util.zip.ZipEntry;
+import java.util.zip.ZipOutputStream;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.StringUtils;
+import org.apache.tools.tar.TarEntry;
+import org.apache.tools.tar.TarOutputStream;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Test;
+import static org.junit.Assert.*;
 
 public class TestFileUtil {
   private static final Log LOG = LogFactory.getLog(TestFileUtil.class);
@@ -48,14 +55,14 @@ public class TestFileUtil {
   private static final String TEST_ROOT_DIR = System.getProperty(
       "test.build.data", "/tmp") + "/fu";
   private static final File TEST_DIR = new File(TEST_ROOT_DIR);
-  private static String FILE = "x";
-  private static String LINK = "y";
-  private static String DIR = "dir";
-  private File del = new File(TEST_DIR, "del");
-  private File tmp = new File(TEST_DIR, "tmp");
-  private File dir1 = new File(del, DIR + "1");
-  private File dir2 = new File(del, DIR + "2");
-  private File partitioned = new File(TEST_DIR, "partitioned");
+  private static final String FILE = "x";
+  private static final String LINK = "y";
+  private static final String DIR = "dir";
+  private final File del = new File(TEST_DIR, "del");
+  private final File tmp = new File(TEST_DIR, "tmp");
+  private final File dir1 = new File(del, DIR + "1");
+  private final File dir2 = new File(del, DIR + "2");
+  private final File partitioned = new File(TEST_DIR, "partitioned");
 
   /**
    * Creates multiple directories for testing.
@@ -116,17 +123,17 @@ public class TestFileUtil {
    * @param contents String non-null file contents.
    * @throws IOException if an I/O error occurs.
    */
-  private void createFile(File directory, String name, String contents)
+  private File createFile(File directory, String name, String contents)
       throws IOException {
     File newFile = new File(directory, name);
     PrintWriter pw = new PrintWriter(newFile);
-
     try {
       pw.println(contents);
     }
     finally {
       pw.close();
     }
+    return newFile;
   }
 
   @Test (timeout = 30000)
@@ -553,14 +560,283 @@ public class TestFileUtil {
    * @throws IOException
    */
   @Test (timeout = 30000)
-  public void testGetDU() throws IOException {
+  public void testGetDU() throws Exception {
     setupDirs();
 
     long du = FileUtil.getDU(TEST_DIR);
     // Only two files (in partitioned).  Each has 3 characters + system-specific
     // line separator.
-    long expected = 2 * (3 + System.getProperty("line.separator").length());
+    final long expected = 2 * (3 + System.getProperty("line.separator").length());
     Assert.assertEquals(expected, du);
+    
+    // target file does not exist:
+    final File doesNotExist = new File(tmp, "QuickBrownFoxJumpsOverTheLazyDog");
+    long duDoesNotExist = FileUtil.getDU(doesNotExist);
+    assertEquals(0, duDoesNotExist);
+    
+    // target file is not a directory:
+    File notADirectory = new File(partitioned, "part-r-00000");
+    long duNotADirectoryActual = FileUtil.getDU(notADirectory);
+    long duNotADirectoryExpected = 3 + System.getProperty("line.separator").length();
+    assertEquals(duNotADirectoryExpected, duNotADirectoryActual);
+    
+    try {
+      // one of target files is not accessible, but the containing directory
+      // is accessible:
+      try {
+        FileUtil.chmod(notADirectory.getAbsolutePath(), "0000");
+      } catch (InterruptedException ie) {
+        // should never happen since that method never throws InterruptedException.      
+        assertNull(ie);  
+      }
+      assertFalse(notADirectory.canRead());
+      final long du3 = FileUtil.getDU(partitioned);
+      assertEquals(expected, du3);
+
+      // some target files and containing directory are not accessible:
+      try {
+        FileUtil.chmod(partitioned.getAbsolutePath(), "0000");
+      } catch (InterruptedException ie) {
+        // should never happen since that method never throws InterruptedException.      
+        assertNull(ie);  
+      }
+      assertFalse(partitioned.canRead());
+      final long du4 = FileUtil.getDU(partitioned);
+      assertEquals(0, du4);
+    } finally {
+      // Restore the permissions so that we can delete the folder 
+      // in @After method:
+      FileUtil.chmod(partitioned.getAbsolutePath(), "0777", true/*recursive*/);
+    }
+  }
+  
+  @Test (timeout = 30000)
+  public void testUnTar() throws IOException {
+    setupDirs();
+    
+    // make a simple tar:
+    final File simpleTar = new File(del, FILE);
+    OutputStream os = new FileOutputStream(simpleTar); 
+    TarOutputStream tos = new TarOutputStream(os);
+    try {
+      TarEntry te = new TarEntry("foo");
+      byte[] data = "some-content".getBytes("UTF-8");
+      te.setSize(data.length);
+      tos.putNextEntry(te);
+      tos.write(data);
+      tos.closeEntry();
+      tos.flush();
+      tos.finish();
+    } finally {
+      tos.close();
+    }
+
+    // successfully untar it into an existing dir:
+    FileUtil.unTar(simpleTar, tmp);
+    // check result:
+    assertTrue(new File(tmp, "foo").exists());
+    assertEquals(12, new File(tmp, "foo").length());
+    
+    final File regularFile = new File(tmp, "QuickBrownFoxJumpsOverTheLazyDog");
+    regularFile.createNewFile();
+    assertTrue(regularFile.exists());
+    try {
+      FileUtil.unTar(simpleTar, regularFile);
+      assertTrue("An IOException expected.", false);
+    } catch (IOException ioe) {
+      // okay
+    }
+  }
+  
+  @Test (timeout = 30000)
+  public void testReplaceFile() throws IOException {
+    setupDirs();
+    final File srcFile = new File(tmp, "src");
+    
+    // src exists, and target does not exist:
+    srcFile.createNewFile();
+    assertTrue(srcFile.exists());
+    final File targetFile = new File(tmp, "target");
+    assertTrue(!targetFile.exists());
+    FileUtil.replaceFile(srcFile, targetFile);
+    assertTrue(!srcFile.exists());
+    assertTrue(targetFile.exists());
+
+    // src exists and target is a regular file: 
+    srcFile.createNewFile();
+    assertTrue(srcFile.exists());
+    FileUtil.replaceFile(srcFile, targetFile);
+    assertTrue(!srcFile.exists());
+    assertTrue(targetFile.exists());
+    
+    // src exists, and target is a non-empty directory: 
+    srcFile.createNewFile();
+    assertTrue(srcFile.exists());
+    targetFile.delete();
+    targetFile.mkdirs();
+    File obstacle = new File(targetFile, "obstacle");
+    obstacle.createNewFile();
+    assertTrue(obstacle.exists());
+    assertTrue(targetFile.exists() && targetFile.isDirectory());
+    try {
+      FileUtil.replaceFile(srcFile, targetFile);
+      assertTrue(false);
+    } catch (IOException ioe) {
+      // okay
+    }
+    // check up the post-condition: nothing is deleted:
+    assertTrue(srcFile.exists());
+    assertTrue(targetFile.exists() && targetFile.isDirectory());
+    assertTrue(obstacle.exists());
+  }
+  
+  @Test (timeout = 30000)
+  public void testCreateLocalTempFile() throws IOException {
+    setupDirs();
+    final File baseFile = new File(tmp, "base");
+    File tmp1 = FileUtil.createLocalTempFile(baseFile, "foo", false);
+    File tmp2 = FileUtil.createLocalTempFile(baseFile, "foo", true);
+    assertFalse(tmp1.getAbsolutePath().equals(baseFile.getAbsolutePath()));
+    assertFalse(tmp2.getAbsolutePath().equals(baseFile.getAbsolutePath()));
+    assertTrue(tmp1.exists() && tmp2.exists());
+    assertTrue(tmp1.canWrite() && tmp2.canWrite());
+    assertTrue(tmp1.canRead() && tmp2.canRead());
+    tmp1.delete();
+    tmp2.delete();
+    assertTrue(!tmp1.exists() && !tmp2.exists());
+  }
+  
+  @Test (timeout = 30000)
+  public void testUnZip() throws IOException {
+    // make sa simple zip
+    setupDirs();
+    
+    // make a simple tar:
+    final File simpleZip = new File(del, FILE);
+    OutputStream os = new FileOutputStream(simpleZip); 
+    ZipOutputStream tos = new ZipOutputStream(os);
+    try {
+      ZipEntry ze = new ZipEntry("foo");
+      byte[] data = "some-content".getBytes("UTF-8");
+      ze.setSize(data.length);
+      tos.putNextEntry(ze);
+      tos.write(data);
+      tos.closeEntry();
+      tos.flush();
+      tos.finish();
+    } finally {
+      tos.close();
+    }
+    
+    // successfully untar it into an existing dir:
+    FileUtil.unZip(simpleZip, tmp);
+    // check result:
+    assertTrue(new File(tmp, "foo").exists());
+    assertEquals(12, new File(tmp, "foo").length());
+    
+    final File regularFile = new File(tmp, "QuickBrownFoxJumpsOverTheLazyDog");
+    regularFile.createNewFile();
+    assertTrue(regularFile.exists());
+    try {
+      FileUtil.unZip(simpleZip, regularFile);
+      assertTrue("An IOException expected.", false);
+    } catch (IOException ioe) {
+      // okay
+    }
+  }  
+  
+  @Test (timeout = 30000)
+  /*
+   * Test method copy(FileSystem srcFS, Path src, File dst, boolean deleteSource, Configuration conf)
+   */
+  public void testCopy5() throws IOException {
+    setupDirs();
+    
+    URI uri = tmp.toURI();
+    Configuration conf = new Configuration();
+    FileSystem fs = FileSystem.newInstance(uri, conf);
+    final String content = "some-content";
+    File srcFile = createFile(tmp, "src", content);
+    Path srcPath = new Path(srcFile.toURI());
+    
+    // copy regular file:
+    final File dest = new File(del, "dest");
+    boolean result = FileUtil.copy(fs, srcPath, dest, false, conf);
+    assertTrue(result);
+    assertTrue(dest.exists());
+    assertEquals(content.getBytes().length 
+        + System.getProperty("line.separator").getBytes().length, dest.length());
+    assertTrue(srcFile.exists()); // should not be deleted
+    
+    // copy regular file, delete src:
+    dest.delete();
+    assertTrue(!dest.exists());
+    result = FileUtil.copy(fs, srcPath, dest, true, conf);
+    assertTrue(result);
+    assertTrue(dest.exists());
+    assertEquals(content.getBytes().length 
+        + System.getProperty("line.separator").getBytes().length, dest.length());
+    assertTrue(!srcFile.exists()); // should be deleted
+    
+    // copy a dir:
+    dest.delete();
+    assertTrue(!dest.exists());
+    srcPath = new Path(partitioned.toURI());
+    result = FileUtil.copy(fs, srcPath, dest, true, conf);
+    assertTrue(result);
+    assertTrue(dest.exists() && dest.isDirectory());
+    File[] files = dest.listFiles();
+    assertTrue(files != null);
+    assertEquals(2, files.length);
+    for (File f: files) {
+      assertEquals(3 
+          + System.getProperty("line.separator").getBytes().length, f.length());
+    }
+    assertTrue(!partitioned.exists()); // should be deleted
+  }  
+
+  @Test (timeout = 30000)
+  public void testStat2Paths1() {
+    assertNull(FileUtil.stat2Paths(null));
+    
+    FileStatus[] fileStatuses = new FileStatus[0]; 
+    Path[] paths = FileUtil.stat2Paths(fileStatuses);
+    assertEquals(0, paths.length);
+    
+    Path path1 = new Path("file://foo");
+    Path path2 = new Path("file://moo");
+    fileStatuses = new FileStatus[] { 
+        new FileStatus(3, false, 0, 0, 0, path1), 
+        new FileStatus(3, false, 0, 0, 0, path2) 
+        };
+    paths = FileUtil.stat2Paths(fileStatuses);
+    assertEquals(2, paths.length);
+    assertEquals(paths[0], path1);
+    assertEquals(paths[1], path2);
+  }
+  
+  @Test (timeout = 30000)
+  public void testStat2Paths2()  {
+    Path defaultPath = new Path("file://default");
+    Path[] paths = FileUtil.stat2Paths(null, defaultPath);
+    assertEquals(1, paths.length);
+    assertEquals(defaultPath, paths[0]);
+
+    paths = FileUtil.stat2Paths(null, null);
+    assertTrue(paths != null);
+    assertEquals(1, paths.length);
+    assertEquals(null, paths[0]);
+    
+    Path path1 = new Path("file://foo");
+    Path path2 = new Path("file://moo");
+    FileStatus[] fileStatuses = new FileStatus[] { 
+        new FileStatus(3, false, 0, 0, 0, path1), 
+        new FileStatus(3, false, 0, 0, 0, path2) 
+        };
+    paths = FileUtil.stat2Paths(fileStatuses, defaultPath);
+    assertEquals(2, paths.length);
+    assertEquals(paths[0], path1);
+    assertEquals(paths[1], path2);
   }
 
   @Test (timeout = 30000)

+ 83 - 16
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellCopy.java

@@ -19,7 +19,9 @@
 package org.apache.hadoop.fs;
 
 import static org.junit.Assert.*;
+import static org.junit.Assume.assumeTrue;
 
+import java.io.File;
 import java.io.IOException;
 
 import org.apache.hadoop.conf.Configuration;
@@ -106,7 +108,7 @@ public class TestFsShellCopy {
     Path targetDir = new Path(testRoot, "target");    
     Path filePath = new Path(testRoot, new Path("srcFile"));
     lfs.create(filePath).close();
-    checkPut(filePath, targetDir);
+    checkPut(filePath, targetDir, false);
   }
 
   @Test
@@ -119,10 +121,42 @@ public class TestFsShellCopy {
     Path dirPath = new Path(testRoot, new Path("srcDir"));
     lfs.mkdirs(dirPath);
     lfs.create(new Path(dirPath, "srcFile")).close();
-    checkPut(dirPath, targetDir);
+    checkPut(dirPath, targetDir, false);
   }
+
+  @Test
+  public void testCopyFileFromWindowsLocalPath() throws Exception {
+    assumeTrue(Path.WINDOWS);
+    String windowsTestRootPath = (new File(testRootDir.toUri().getPath()
+        .toString())).getAbsolutePath();
+    Path testRoot = new Path(windowsTestRootPath, "testPutFile");
+    lfs.delete(testRoot, true);
+    lfs.mkdirs(testRoot);
+
+    Path targetDir = new Path(testRoot, "target");
+    Path filePath = new Path(testRoot, new Path("srcFile"));
+    lfs.create(filePath).close();
+    checkPut(filePath, targetDir, true);
+  }
+
+  @Test
+  public void testCopyDirFromWindowsLocalPath() throws Exception {
+    assumeTrue(Path.WINDOWS);
+    String windowsTestRootPath = (new File(testRootDir.toUri().getPath()
+        .toString())).getAbsolutePath();
+    Path testRoot = new Path(windowsTestRootPath, "testPutDir");
+    lfs.delete(testRoot, true);
+    lfs.mkdirs(testRoot);
+
+    Path targetDir = new Path(testRoot, "target");
+    Path dirPath = new Path(testRoot, new Path("srcDir"));
+    lfs.mkdirs(dirPath);
+    lfs.create(new Path(dirPath, "srcFile")).close();
+    checkPut(dirPath, targetDir, true);
+  }
+
   
-  private void checkPut(Path srcPath, Path targetDir)
+  private void checkPut(Path srcPath, Path targetDir, boolean useWindowsPath)
   throws Exception {
     lfs.delete(targetDir, true);
     lfs.mkdirs(targetDir);    
@@ -134,37 +168,37 @@ public class TestFsShellCopy {
     
     // copy to new file, then again
     prepPut(dstPath, false, false);
-    checkPut(0, srcPath, dstPath);
+    checkPut(0, srcPath, dstPath, useWindowsPath);
     if (lfs.isFile(srcPath)) {
-      checkPut(1, srcPath, dstPath);
+      checkPut(1, srcPath, dstPath, useWindowsPath);
     } else { // directory works because it copies into the dir
       // clear contents so the check won't think there are extra paths
       prepPut(dstPath, true, true);
-      checkPut(0, srcPath, dstPath);
+      checkPut(0, srcPath, dstPath, useWindowsPath);
     }
 
     // copy to non-existent subdir
     prepPut(childPath, false, false);
-    checkPut(1, srcPath, dstPath);
+    checkPut(1, srcPath, dstPath, useWindowsPath);
 
     // copy into dir, then with another name
     prepPut(dstPath, true, true);
-    checkPut(0, srcPath, dstPath);
+    checkPut(0, srcPath, dstPath, useWindowsPath);
     prepPut(childPath, true, true);
-    checkPut(0, srcPath, childPath);
+    checkPut(0, srcPath, childPath, useWindowsPath);
 
     // try to put to pwd with existing dir
     prepPut(targetDir, true, true);
-    checkPut(0, srcPath, null);
+    checkPut(0, srcPath, null, useWindowsPath);
     prepPut(targetDir, true, true);
-    checkPut(0, srcPath, new Path("."));
+    checkPut(0, srcPath, new Path("."), useWindowsPath);
 
     // try to put to pwd with non-existent cwd
     prepPut(dstPath, false, true);
     lfs.setWorkingDirectory(dstPath);
-    checkPut(1, srcPath, null);
+    checkPut(1, srcPath, null, useWindowsPath);
     prepPut(dstPath, false, true);
-    checkPut(1, srcPath, new Path("."));
+    checkPut(1, srcPath, new Path("."), useWindowsPath);
   }
 
   private void prepPut(Path dst, boolean create,
@@ -183,12 +217,17 @@ public class TestFsShellCopy {
     }
   }
   
-  private void checkPut(int exitCode, Path src, Path dest) throws Exception {
+  private void checkPut(int exitCode, Path src, Path dest,
+      boolean useWindowsPath) throws Exception {
     String argv[] = null;
+    String srcPath = src.toString();
+    if (useWindowsPath) {
+      srcPath = (new File(srcPath)).getAbsolutePath();
+    }
     if (dest != null) {
-      argv = new String[]{ "-put", src.toString(), pathAsString(dest) };
+      argv = new String[]{ "-put", srcPath, pathAsString(dest) };
     } else {
-      argv = new String[]{ "-put", src.toString() };
+      argv = new String[]{ "-put", srcPath };
       dest = new Path(Path.CUR_DIR);
     }
     
@@ -418,6 +457,34 @@ public class TestFsShellCopy {
     assertTrue(lfs.exists(srcDir));
   }
   
+  @Test
+  public void testMoveFromWindowsLocalPath() throws Exception {
+    assumeTrue(Path.WINDOWS);
+    Path testRoot = new Path(testRootDir, "testPutFile");
+    lfs.delete(testRoot, true);
+    lfs.mkdirs(testRoot);
+
+    Path target = new Path(testRoot, "target");
+    Path srcFile = new Path(testRoot, new Path("srcFile"));
+    lfs.createNewFile(srcFile);
+
+    String winSrcFile = (new File(srcFile.toUri().getPath()
+        .toString())).getAbsolutePath();
+    shellRun(0, "-moveFromLocal", winSrcFile, target.toString());
+    assertFalse(lfs.exists(srcFile));
+    assertTrue(lfs.exists(target));
+    assertTrue(lfs.isFile(target));
+  }
+
+  @Test
+  public void testGetWindowsLocalPath() throws Exception {
+    assumeTrue(Path.WINDOWS);
+    String winDstFile = (new File(dstPath.toUri().getPath()
+        .toString())).getAbsolutePath();
+    shellRun(0, "-get", srcPath.toString(), winDstFile);
+    checkPath(dstPath, false);
+  }
+  
   private void createFile(Path ... paths) throws IOException {
     for (Path path : paths) {
       FSDataOutputStream out = lfs.create(path);

+ 176 - 5
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java

@@ -18,14 +18,155 @@
 
 package org.apache.hadoop.fs;
 
-import java.io.IOException;
-
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.security.Credentials;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.util.Progressable;
 import org.junit.Assert;
-import static org.junit.Assert.*;
 import org.junit.Test;
 
+import java.io.IOException;
+import java.lang.reflect.Method;
+import java.lang.reflect.Modifier;
+import java.util.EnumSet;
+import java.util.Iterator;
+
+import static org.apache.hadoop.fs.Options.ChecksumOpt;
+import static org.apache.hadoop.fs.Options.CreateOpts;
+import static org.apache.hadoop.fs.Options.Rename;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+@SuppressWarnings("deprecation")
 public class TestHarFileSystem {
+  public static final Log LOG = LogFactory.getLog(TestHarFileSystem.class);
+
+  /**
+   * FileSystem methods that must not be overwritten by
+   * {@link HarFileSystem}. Either because there is a default implementation
+   * already available or because it is not relevant.
+   */
+  @SuppressWarnings("deprecation")
+  private interface MustNotImplement {
+    public BlockLocation[] getFileBlockLocations(Path p, long start, long len);
+    public long getLength(Path f);
+    public FSDataOutputStream append(Path f, int bufferSize);
+    public void rename(Path src, Path dst, Rename... options);
+    public boolean exists(Path f);
+    public boolean isDirectory(Path f);
+    public boolean isFile(Path f);
+    public boolean createNewFile(Path f);
+
+    public FSDataOutputStream createNonRecursive(Path f, FsPermission permission,
+        boolean overwrite, int bufferSize, short replication, long blockSize,
+        Progressable progress) throws IOException;
+
+    public FSDataOutputStream createNonRecursive(Path f, FsPermission permission,
+        EnumSet<CreateFlag> flags, int bufferSize, short replication, long blockSize,
+        Progressable progress) throws IOException;
+
+    public FSDataOutputStream createNonRecursive(Path f, FsPermission permission,
+        EnumSet<CreateFlag> flags, int bufferSize, short replication, long blockSize,
+        Progressable progress, ChecksumOpt checksumOpt);
+
+    public boolean mkdirs(Path f);
+    public FSDataInputStream open(Path f);
+    public FSDataOutputStream create(Path f);
+    public FSDataOutputStream create(Path f, boolean overwrite);
+    public FSDataOutputStream create(Path f, Progressable progress);
+    public FSDataOutputStream create(Path f, short replication);
+    public FSDataOutputStream create(Path f, short replication,
+        Progressable progress);
+
+    public FSDataOutputStream create(Path f, boolean overwrite,
+        int bufferSize);
+
+    public FSDataOutputStream create(Path f, boolean overwrite, int bufferSize,
+        Progressable progress);
+
+    public FSDataOutputStream create(Path f, boolean overwrite, int bufferSize,
+        short replication, long blockSize);
+
+    public FSDataOutputStream create(Path f, boolean overwrite, int bufferSize,
+        short replication, long blockSize, Progressable progress);
+
+    public FSDataOutputStream create(Path f, FsPermission permission,
+        EnumSet<CreateFlag> flags, int bufferSize, short replication,
+        long blockSize, Progressable progress) throws IOException;
+
+    public FSDataOutputStream create(Path f, FsPermission permission,
+        EnumSet<CreateFlag> flags, int bufferSize, short replication,
+        long blockSize, Progressable progress, ChecksumOpt checksumOpt)
+        throws IOException;
+
+    public String getName();
+    public boolean delete(Path f);
+    public short getReplication(Path src);
+    public void processDeleteOnExit();
+    public ContentSummary getContentSummary(Path f);
+    public FsStatus getStatus();
+    public FileStatus[] listStatus(Path f, PathFilter filter);
+    public FileStatus[] listStatus(Path[] files);
+    public FileStatus[] listStatus(Path[] files, PathFilter filter);
+    public FileStatus[] globStatus(Path pathPattern);
+    public FileStatus[] globStatus(Path pathPattern, PathFilter filter);
+
+    public Iterator<LocatedFileStatus> listFiles(Path path,
+        boolean isRecursive);
+
+    public Iterator<LocatedFileStatus> listLocatedStatus(Path f);
+    public Iterator<LocatedFileStatus> listLocatedStatus(Path f,
+        PathFilter filter);
+    public void copyFromLocalFile(Path src, Path dst);
+    public void moveFromLocalFile(Path[] srcs, Path dst);
+    public void moveFromLocalFile(Path src, Path dst);
+    public void copyToLocalFile(Path src, Path dst);
+    public void copyToLocalFile(boolean delSrc, Path src, Path dst,
+        boolean useRawLocalFileSystem);
+    public void moveToLocalFile(Path src, Path dst);
+    public long getBlockSize(Path f);
+    public FSDataOutputStream primitiveCreate(Path f,
+        EnumSet<CreateFlag> createFlag, CreateOpts... opts);
+    public void primitiveMkdir(Path f, FsPermission absolutePermission,
+        boolean createParent);
+    public int getDefaultPort();
+    public String getCanonicalServiceName();
+    public Token<?> getDelegationToken(String renewer) throws IOException;
+    public boolean deleteOnExit(Path f) throws IOException;
+    public boolean cancelDeleteOnExit(Path f) throws IOException;
+    public Token<?>[] addDelegationTokens(String renewer, Credentials creds)
+        throws IOException;
+    public Path fixRelativePart(Path p);
+    public void concat(Path trg, Path [] psrcs) throws IOException;
+    public FSDataOutputStream primitiveCreate(Path f,
+        FsPermission absolutePermission, EnumSet<CreateFlag> flag, int bufferSize,
+        short replication, long blockSize, Progressable progress,
+        ChecksumOpt checksumOpt) throws IOException;
+    public boolean primitiveMkdir(Path f, FsPermission absolutePermission)
+        throws IOException;
+    public RemoteIterator<Path> listCorruptFileBlocks(Path path)
+        throws IOException;
+    public void copyFromLocalFile(boolean delSrc, Path src, Path dst)
+        throws IOException;
+    public void createSymlink(Path target, Path link, boolean createParent)
+        throws IOException;
+    public FileStatus getFileLinkStatus(Path f) throws IOException;
+    public boolean supportsSymlinks();
+    public Path getLinkTarget(Path f) throws IOException;
+    public Path resolveLink(Path f) throws IOException;
+    public void setVerifyChecksum(boolean verifyChecksum);
+    public void setWriteChecksum(boolean writeChecksum);
+    public Path createSnapshot(Path path, String snapshotName) throws
+        IOException;
+    public void renameSnapshot(Path path, String snapshotOldName,
+        String snapshotNewName) throws IOException;
+    public void deleteSnapshot(Path path, String snapshotName)
+        throws IOException;
+  }
+
   @Test
   public void testHarUri() {
     final Configuration conf = new Configuration();
@@ -44,8 +185,7 @@ public class TestHarFileSystem {
       p.getFileSystem(conf);
       Assert.fail(p + " is an invalid path.");
     } catch (IOException e) {
-      System.out.println("GOOD: Got an exception.");
-      e.printStackTrace(System.out);
+      // Expected
     }
   }
 
@@ -133,6 +273,37 @@ public class TestHarFileSystem {
       assertEquals(b[1].getOffset(), 128);
       assertEquals(b[1].getLength(), 384);
     }
+  }
 
+  @Test
+  public void testInheritedMethodsImplemented() throws Exception {
+    int errors = 0;
+    for (Method m : FileSystem.class.getDeclaredMethods()) {
+      if (Modifier.isStatic(m.getModifiers()) ||
+          Modifier.isPrivate(m.getModifiers()) ||
+          Modifier.isFinal(m.getModifiers())) {
+        continue;
+      }
+
+      try {
+        MustNotImplement.class.getMethod(m.getName(), m.getParameterTypes());
+        try {
+          HarFileSystem.class.getDeclaredMethod(m.getName(), m.getParameterTypes());
+          LOG.error("HarFileSystem MUST not implement " + m);
+          errors++;
+        } catch (NoSuchMethodException ex) {
+          // Expected
+        }
+      } catch (NoSuchMethodException exc) {
+        try {
+          HarFileSystem.class.getDeclaredMethod(m.getName(), m.getParameterTypes());
+        } catch (NoSuchMethodException exc2) {
+          LOG.error("HarFileSystem MUST implement " + m);
+          errors++;
+        }
+      }
+    }
+    assertTrue((errors + " methods were not overridden correctly - see log"),
+        errors <= 0);
   }
 }

+ 47 - 10
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystemBasics.java

@@ -18,14 +18,6 @@
 
 package org.apache.hadoop.fs;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.assertFalse;
-
-import java.io.File;
-import java.io.IOException;
-import java.net.URI;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.util.Shell;
@@ -34,6 +26,14 @@ import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 
+import java.io.File;
+import java.io.IOException;
+import java.net.URI;
+import java.util.HashSet;
+import java.util.Set;
+
+import static org.junit.Assert.*;
+
 /**
  * This test class checks basic operations with {@link HarFileSystem} including
  * various initialization cases, getters, and modification methods.
@@ -69,7 +69,7 @@ public class TestHarFileSystemBasics {
   /*
    * creates and returns fully initialized HarFileSystem
    */
-  private HarFileSystem createHarFileSysten(final Configuration conf)
+  private HarFileSystem createHarFileSystem(final Configuration conf)
       throws Exception {
     localFileSystem = FileSystem.getLocal(conf);
     localFileSystem.initialize(new URI("file:///"), conf);
@@ -130,7 +130,7 @@ public class TestHarFileSystemBasics {
     }
     // create Har to test:
     conf = new Configuration();
-    harFileSystem = createHarFileSysten(conf);
+    harFileSystem = createHarFileSystem(conf);
   }
 
   @After
@@ -221,6 +221,43 @@ public class TestHarFileSystemBasics {
     hfs.initialize(uri, new Configuration());
   }
 
+  @Test
+  public void testPositiveListFilesNotEndInColon() throws Exception {
+    // re-initialize the har file system with host name
+    // make sure the qualified path name does not append ":" at the end of host name
+    final URI uri = new URI("har://file-localhost" + harPath.toString());
+    harFileSystem.initialize(uri, conf);
+    Path p1 = new Path("har://file-localhost" + harPath.toString());
+    Path p2 = harFileSystem.makeQualified(p1);
+    assertTrue(p2.toUri().toString().startsWith("har://file-localhost/"));
+  }
+
+ @Test
+  public void testListLocatedStatus() throws Exception {
+    String testHarPath = this.getClass().getResource("/test.har").getPath();
+    URI uri = new URI("har://" + testHarPath);
+    HarFileSystem hfs = new HarFileSystem(localFileSystem);
+    hfs.initialize(uri, new Configuration());
+
+    // test.har has the following contents:
+    //   dir1/1.txt
+    //   dir1/2.txt
+    Set<String> expectedFileNames = new HashSet<String>();
+    expectedFileNames.add("1.txt");
+    expectedFileNames.add("2.txt");
+
+    // List contents of dir, and ensure we find all expected files
+    Path path = new Path("dir1");
+    RemoteIterator<LocatedFileStatus> fileList = hfs.listLocatedStatus(path);
+    while (fileList.hasNext()) {
+      String fileName = fileList.next().getPath().getName();
+      assertTrue(fileName + " not in expected files list", expectedFileNames.contains(fileName));
+      expectedFileNames.remove(fileName);
+    }
+    assertEquals("Didn't find all of the expected file names: " + expectedFileNames,
+                 0, expectedFileNames.size());
+  }
+
   // ========== Negative:
 
   @Test

+ 5 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFSFileContextMainOperations.java

@@ -49,6 +49,11 @@ public class TestLocalFSFileContextMainOperations extends FileContextMainOperati
     FileContext fc1 = FileContext.getLocalFSFileContext();
     Assert.assertTrue(fc1 != fc);
   }
+  
+  @Override
+  protected boolean listCorruptedBlocksSupported() {
+    return false;
+  }
 
   @Test
   public void testDefaultFilePermission() throws IOException {

+ 19 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestPath.java

@@ -460,6 +460,13 @@ public class TestPath extends TestCase {
       Path.mergePaths(new Path("/C:/foo"),
         new Path("/C:/bar")));
 
+    assertEquals(new Path(Shell.WINDOWS ? "/C:/bar" : "/C:/C:/bar"),
+        Path.mergePaths(new Path("/C:/"),
+          new Path("/C:/bar")));
+
+    assertEquals(new Path("/bar"),
+        Path.mergePaths(new Path("/"), new Path("/bar")));
+
     assertEquals(new Path("viewfs:///foo/bar"),
       Path.mergePaths(new Path("viewfs:///foo"),
         new Path("file:///bar")));
@@ -468,4 +475,16 @@ public class TestPath extends TestCase {
       Path.mergePaths(new Path("viewfs://vfsauthority/foo"),
         new Path("file://fileauthority/bar")));
   }
+
+  @Test (timeout = 30000)
+  public void testIsWindowsAbsolutePath() {
+    if (!Shell.WINDOWS) return;
+    assertTrue(Path.isWindowsAbsolutePath("C:\\test", false));
+    assertTrue(Path.isWindowsAbsolutePath("C:/test", false));
+    assertTrue(Path.isWindowsAbsolutePath("/C:/test", true));
+    assertFalse(Path.isWindowsAbsolutePath("/test", false));
+    assertFalse(Path.isWindowsAbsolutePath("/test", true));
+    assertFalse(Path.isWindowsAbsolutePath("C:test", false));
+    assertFalse(Path.isWindowsAbsolutePath("/C:test", true));
+  }
 }

+ 5 - 4
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestFcMainOperationsLocalFs.java

@@ -18,13 +18,9 @@
 package org.apache.hadoop.fs.viewfs;
 
 
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileContext;
 import org.apache.hadoop.fs.FileContextMainOperationsBaseTest;
-import org.apache.hadoop.fs.FileContextTestHelper;
-import org.apache.hadoop.fs.FsConstants;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.viewfs.ConfigUtil;
 
 import org.junit.After;
 import org.junit.Before;
@@ -49,4 +45,9 @@ public class TestFcMainOperationsLocalFs  extends
     super.tearDown();
     ViewFsTestSetup.tearDownForViewFsLocalFs(fileContextTestHelper);
   }
+  
+  @Override
+  protected boolean listCorruptedBlocksSupported() {
+    return false;
+  }
 }

+ 8 - 2
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/ClientBaseWithFixes.java

@@ -53,6 +53,8 @@ import org.junit.Before;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import com.google.common.annotations.VisibleForTesting;
+
 /**
  * Copy-paste of ClientBase from ZooKeeper, but without any of the
  * JMXEnv verification. There seems to be a bug ZOOKEEPER-1438
@@ -111,7 +113,9 @@ public abstract class ClientBaseWithFixes extends ZKTestCase {
         synchronized boolean isConnected() {
             return connected;
         }
-        synchronized void waitForConnected(long timeout) throws InterruptedException, TimeoutException {
+        @VisibleForTesting
+        public synchronized void waitForConnected(long timeout)
+            throws InterruptedException, TimeoutException {
             long expire = Time.now() + timeout;
             long left = timeout;
             while(!connected && left > 0) {
@@ -123,7 +127,9 @@ public abstract class ClientBaseWithFixes extends ZKTestCase {
 
             }
         }
-        synchronized void waitForDisconnected(long timeout) throws InterruptedException, TimeoutException {
+        @VisibleForTesting
+        public synchronized void waitForDisconnected(long timeout)
+            throws InterruptedException, TimeoutException {
             long expire = Time.now() + timeout;
             long left = timeout;
             while(connected && left > 0) {

+ 2 - 2
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java

@@ -54,7 +54,7 @@ public class TestSSLHttpServer extends HttpServerFunctionalTest {
 
   @Before
   public void setup() throws Exception {
-    HttpConfig.setSecure(true);
+    HttpConfig.setPolicy(HttpConfig.Policy.HTTPS_ONLY);
     File base = new File(BASEDIR);
     FileUtil.fullyDelete(base);
     base.mkdirs();
@@ -89,7 +89,7 @@ public class TestSSLHttpServer extends HttpServerFunctionalTest {
     String classpathDir =
         KeyStoreTestUtil.getClasspathDir(TestSSLHttpServer.class);
     new File(classpathDir, CONFIG_SITE_XML).delete();
-    HttpConfig.setSecure(false);
+    HttpConfig.setPolicy(HttpConfig.Policy.HTTP_ONLY);
   }
   
 

+ 78 - 22
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestArrayFile.java

@@ -19,18 +19,23 @@
 package org.apache.hadoop.io;
 
 import java.io.*;
+
 import junit.framework.TestCase;
 
 import org.apache.commons.logging.*;
-
 import org.apache.hadoop.fs.*;
+import org.apache.hadoop.io.SequenceFile.CompressionType;
+import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.conf.*;
 
 /** Support for flat files of binary key/value pairs. */
 public class TestArrayFile extends TestCase {
   private static final Log LOG = LogFactory.getLog(TestArrayFile.class);
-  private static String FILE =
-    System.getProperty("test.build.data",".") + "/test.array";
+  
+  private static final Path TEST_DIR = new Path(
+      System.getProperty("test.build.data", "/tmp"),
+      TestMapFile.class.getSimpleName());
+  private static String TEST_FILE = new Path(TEST_DIR, "test.array").toString();
 
   public TestArrayFile(String name) { 
     super(name); 
@@ -40,15 +45,15 @@ public class TestArrayFile extends TestCase {
     Configuration conf = new Configuration();
     FileSystem fs = FileSystem.getLocal(conf);
     RandomDatum[] data = generate(10000);
-    writeTest(fs, data, FILE);
-    readTest(fs, data, FILE, conf);
+    writeTest(fs, data, TEST_FILE);
+    readTest(fs, data, TEST_FILE, conf);
   }
 
   public void testEmptyFile() throws Exception {
     Configuration conf = new Configuration();
     FileSystem fs = FileSystem.getLocal(conf);
-    writeTest(fs, new RandomDatum[0], FILE);
-    ArrayFile.Reader reader = new ArrayFile.Reader(fs, FILE, conf);
+    writeTest(fs, new RandomDatum[0], TEST_FILE);
+    ArrayFile.Reader reader = new ArrayFile.Reader(fs, TEST_FILE, conf);
     assertNull(reader.get(0, new RandomDatum()));
     reader.close();
   }
@@ -87,31 +92,75 @@ public class TestArrayFile extends TestCase {
       LOG.debug("reading " + data.length + " debug");
     }
     ArrayFile.Reader reader = new ArrayFile.Reader(fs, file, conf);
-    for (int i = 0; i < data.length; i++) {       // try forwards
-      reader.get(i, v);
-      if (!v.equals(data[i])) {
-        throw new RuntimeException("wrong value at " + i);
+    try {
+      for (int i = 0; i < data.length; i++) {       // try forwards
+        reader.get(i, v);
+        if (!v.equals(data[i])) {
+          throw new RuntimeException("wrong value at " + i);
+        }
       }
-    }
-    for (int i = data.length-1; i >= 0; i--) {    // then backwards
-      reader.get(i, v);
-      if (!v.equals(data[i])) {
-        throw new RuntimeException("wrong value at " + i);
+      for (int i = data.length-1; i >= 0; i--) {    // then backwards
+        reader.get(i, v);
+        if (!v.equals(data[i])) {
+          throw new RuntimeException("wrong value at " + i);
+        }
       }
-    }
-    reader.close();
-    if(LOG.isDebugEnabled()) {
-      LOG.debug("done reading " + data.length + " debug");
+      if(LOG.isDebugEnabled()) {
+        LOG.debug("done reading " + data.length + " debug");
+      }
+    } finally {
+      reader.close();
     }
   }
 
-
+  /** 
+   * test on {@link ArrayFile.Reader} iteration methods
+   * <pre> 
+   * {@code next(), seek()} in and out of range.
+   * </pre>
+   */
+  public void testArrayFileIteration() {
+    int SIZE = 10;
+    Configuration conf = new Configuration();    
+    try {
+      FileSystem fs = FileSystem.get(conf);
+      ArrayFile.Writer writer = new ArrayFile.Writer(conf, fs, TEST_FILE, 
+          LongWritable.class, CompressionType.RECORD, defaultProgressable);
+      assertNotNull("testArrayFileIteration error !!!", writer);
+      
+      for (int i = 0; i < SIZE; i++)
+        writer.append(new LongWritable(i));
+      
+      writer.close();
+      
+      ArrayFile.Reader reader = new ArrayFile.Reader(fs, TEST_FILE, conf);
+      LongWritable nextWritable = new LongWritable(0);
+      
+      for (int i = 0; i < SIZE; i++) {
+        nextWritable = (LongWritable)reader.next(nextWritable);
+        assertEquals(nextWritable.get(), i);
+      }
+        
+      assertTrue("testArrayFileIteration seek error !!!",
+          reader.seek(new LongWritable(6)));
+      nextWritable = (LongWritable) reader.next(nextWritable);
+      assertTrue("testArrayFileIteration error !!!", reader.key() == 7);
+      assertTrue("testArrayFileIteration error !!!",
+          nextWritable.equals(new LongWritable(7)));
+      assertFalse("testArrayFileIteration error !!!",
+          reader.seek(new LongWritable(SIZE + 5)));
+      reader.close();
+    } catch (Exception ex) {
+      fail("testArrayFileWriterConstruction error !!!");
+    }
+  }
+ 
   /** For debugging and testing. */
   public static void main(String[] args) throws Exception {
     int count = 1024 * 1024;
     boolean create = true;
     boolean check = true;
-    String file = FILE;
+    String file = TEST_FILE;
     String usage = "Usage: TestArrayFile [-count N] [-nocreate] [-nocheck] file";
       
     if (args.length == 0) {
@@ -160,4 +209,11 @@ public class TestArrayFile extends TestCase {
       fs.close();
     }
   }
+  
+  private static final Progressable defaultProgressable = new Progressable() {
+    @Override
+    public void progress() {      
+    }
+  };
+  
 }

+ 49 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestArrayWritable.java

@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -20,6 +20,8 @@ package org.apache.hadoop.io;
 
 import java.io.*;
 
+import org.junit.Assert;
+
 import junit.framework.TestCase;
 
 /** Unit tests for ArrayWritable */
@@ -61,4 +63,50 @@ public class TestArrayWritable extends TestCase {
       assertEquals(destElements[i],elements[i]);
     }
   }
+  
+ /**
+  * test {@link ArrayWritable} toArray() method 
+  */
+  public void testArrayWritableToArray() {
+    Text[] elements = {new Text("zero"), new Text("one"), new Text("two")};
+    TextArrayWritable arrayWritable = new TextArrayWritable();
+    arrayWritable.set(elements);
+    Object array = arrayWritable.toArray();
+  
+    assertTrue("TestArrayWritable testArrayWritableToArray error!!! ", array instanceof Text[]);
+    Text[] destElements = (Text[]) array;
+  
+    for (int i = 0; i < elements.length; i++) {
+      assertEquals(destElements[i], elements[i]);
+    }
+  }
+  
+  /**
+   * test {@link ArrayWritable} constructor with null
+   */
+  public void testNullArgument() {
+    try {
+      Class<? extends Writable> valueClass = null;
+      new ArrayWritable(valueClass);
+      fail("testNullArgument error !!!");
+    } catch (IllegalArgumentException exp) {
+      //should be for test pass
+    } catch (Exception e) {
+      fail("testNullArgument error !!!");
+    }
+  }
+
+  /**
+   * test {@link ArrayWritable} constructor with {@code String[]} as a parameter
+   */
+  @SuppressWarnings("deprecation")
+  public void testArrayWritableStringConstructor() {
+    String[] original = { "test1", "test2", "test3" };
+    ArrayWritable arrayWritable = new ArrayWritable(original);
+    assertEquals("testArrayWritableStringConstructor class error!!!", 
+        UTF8.class, arrayWritable.getValueClass());
+    Assert.assertArrayEquals("testArrayWritableStringConstructor toString error!!!",
+      original, arrayWritable.toStrings());
+  }
+  
 }

+ 210 - 15
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestBloomMapFile.java

@@ -18,28 +18,53 @@
 
 package org.apache.hadoop.io;
 
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
 
+import junit.framework.TestCase;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.LocalFileSystem;
 import org.apache.hadoop.fs.Path;
-
-import junit.framework.TestCase;
+import org.apache.hadoop.io.SequenceFile.CompressionType;
+import org.apache.hadoop.io.compress.CompressionCodec;
+import org.apache.hadoop.io.compress.CompressionInputStream;
+import org.apache.hadoop.io.compress.CompressionOutputStream;
+import org.apache.hadoop.io.compress.Compressor;
+import org.apache.hadoop.io.compress.Decompressor;
+import org.apache.hadoop.util.Progressable;
+import org.junit.Assert;
 
 public class TestBloomMapFile extends TestCase {
   private static Configuration conf = new Configuration();
+  private static final Path TEST_ROOT = new Path(
+      System.getProperty("test.build.data", "/tmp"),
+      TestMapFile.class.getSimpleName());
+  private static final Path TEST_DIR = new Path(TEST_ROOT, "testfile");
+  private static final Path TEST_FILE = new Path(TEST_ROOT, "testfile");
+
+  @Override
+  public void setUp() throws Exception {
+    LocalFileSystem fs = FileSystem.getLocal(conf);
+    if (fs.exists(TEST_ROOT) && !fs.delete(TEST_ROOT, true)) {
+      Assert.fail("Can't clean up test root dir");
+    }
+    fs.mkdirs(TEST_ROOT);
+  }
   
+  @SuppressWarnings("deprecation")
   public void testMembershipTest() throws Exception {
     // write the file
-    Path dirName = new Path(System.getProperty("test.build.data",".") +
-        getName() + ".bloommapfile"); 
     FileSystem fs = FileSystem.getLocal(conf);
-    Path qualifiedDirName = fs.makeQualified(dirName);
+    Path qualifiedDirName = fs.makeQualified(TEST_DIR);
     conf.setInt("io.mapfile.bloom.size", 2048);
     BloomMapFile.Writer writer = new BloomMapFile.Writer(conf, fs,
-      qualifiedDirName.toString(), IntWritable.class, Text.class);
+        qualifiedDirName.toString(), IntWritable.class, Text.class);
     IntWritable key = new IntWritable();
     Text value = new Text();
     for (int i = 0; i < 2000; i += 2) {
@@ -48,7 +73,7 @@ public class TestBloomMapFile extends TestCase {
       writer.append(key, value);
     }
     writer.close();
-    
+
     BloomMapFile.Reader reader = new BloomMapFile.Reader(fs,
         qualifiedDirName.toString(), conf);
     // check false positives rate
@@ -58,9 +83,11 @@ public class TestBloomMapFile extends TestCase {
       key.set(i);
       boolean exists = reader.probablyHasKey(key);
       if (i % 2 == 0) {
-        if (!exists) falseNeg++;
+        if (!exists)
+          falseNeg++;
       } else {
-        if (exists) falsePos++;
+        if (exists)
+          falsePos++;
       }
     }
     reader.close();
@@ -71,13 +98,13 @@ public class TestBloomMapFile extends TestCase {
     assertTrue(falsePos < 2);
   }
 
-  private void checkMembershipVaryingSizedKeys(String name, List<Text> keys) throws Exception {
-    Path dirName = new Path(System.getProperty("test.build.data",".") +
-        name + ".bloommapfile"); 
+  @SuppressWarnings("deprecation")
+  private void checkMembershipVaryingSizedKeys(String name, List<Text> keys)
+      throws Exception {
     FileSystem fs = FileSystem.getLocal(conf);
-    Path qualifiedDirName = fs.makeQualified(dirName);
+    Path qualifiedDirName = fs.makeQualified(TEST_DIR);
     BloomMapFile.Writer writer = new BloomMapFile.Writer(conf, fs,
-      qualifiedDirName.toString(), Text.class, NullWritable.class);
+        qualifiedDirName.toString(), Text.class, NullWritable.class);
     for (Text key : keys) {
       writer.append(key, NullWritable.get());
     }
@@ -88,7 +115,8 @@ public class TestBloomMapFile extends TestCase {
         qualifiedDirName.toString(), conf);
     Collections.reverse(keys);
     for (Text key : keys) {
-      assertTrue("False negative for existing key " + key, reader.probablyHasKey(key));
+      assertTrue("False negative for existing key " + key,
+          reader.probablyHasKey(key));
     }
     reader.close();
     fs.delete(qualifiedDirName, true);
@@ -108,4 +136,171 @@ public class TestBloomMapFile extends TestCase {
     checkMembershipVaryingSizedKeys(getName(), list);
   }
 
+  /**
+   * test {@code BloomMapFile.delete()} method
+   */
+  public void testDeleteFile() {
+    try {
+      FileSystem fs = FileSystem.getLocal(conf);
+      BloomMapFile.Writer writer = new BloomMapFile.Writer(conf, TEST_FILE,
+          MapFile.Writer.keyClass(IntWritable.class),
+          MapFile.Writer.valueClass(Text.class));
+      assertNotNull("testDeleteFile error !!!", writer);
+      BloomMapFile.delete(fs, "." + TEST_FILE);
+    } catch (Exception ex) {
+      fail("unexpect ex in testDeleteFile !!!");
+    }
+  }
+  
+  /**
+   * test {@link BloomMapFile.Reader} constructor with 
+   * IOException
+   */
+  public void testIOExceptionInWriterConstructor() {
+    Path dirNameSpy = org.mockito.Mockito.spy(TEST_FILE);
+    try {
+      BloomMapFile.Writer writer = new BloomMapFile.Writer(conf, TEST_FILE,
+          MapFile.Writer.keyClass(IntWritable.class),
+          MapFile.Writer.valueClass(Text.class));
+      writer.append(new IntWritable(1), new Text("123124142"));
+      writer.close();
+
+      org.mockito.Mockito.when(dirNameSpy.getFileSystem(conf)).thenThrow(
+          new IOException());
+      BloomMapFile.Reader reader = new BloomMapFile.Reader(dirNameSpy, conf,
+          MapFile.Reader.comparator(new WritableComparator(IntWritable.class)));
+
+      assertNull("testIOExceptionInWriterConstructor error !!!",
+          reader.getBloomFilter());
+      reader.close();
+    } catch (Exception ex) {
+      fail("unexpect ex in testIOExceptionInWriterConstructor !!!");
+    }
+  }
+
+  /**
+   *  test {@link BloomMapFile.Reader.get()} method 
+   */
+  public void testGetBloomMapFile() {
+    int SIZE = 10;
+    try {
+      BloomMapFile.Writer writer = new BloomMapFile.Writer(conf, TEST_FILE,
+          MapFile.Writer.keyClass(IntWritable.class),
+          MapFile.Writer.valueClass(Text.class));
+
+      for (int i = 0; i < SIZE; i++) {
+        writer.append(new IntWritable(i), new Text());
+      }
+      writer.close();
+
+      BloomMapFile.Reader reader = new BloomMapFile.Reader(TEST_FILE, conf,
+          MapFile.Reader.comparator(new WritableComparator(IntWritable.class)));
+
+      for (int i = 0; i < SIZE; i++) {
+        assertNotNull("testGetBloomMapFile error !!!",
+            reader.get(new IntWritable(i), new Text()));
+      }
+            
+      assertNull("testGetBloomMapFile error !!!",
+          reader.get(new IntWritable(SIZE + 5), new Text()));
+      reader.close();
+    } catch (Exception ex) {
+      fail("unexpect ex in testGetBloomMapFile !!!");
+    }
+  }
+
+  /**
+   * test {@code BloomMapFile.Writer} constructors
+   */
+  @SuppressWarnings("deprecation")
+  public void testBloomMapFileConstructors() {
+    try {
+      FileSystem ts = FileSystem.get(conf);
+      String testFileName = TEST_FILE.toString();
+      BloomMapFile.Writer writer1 = new BloomMapFile.Writer(conf, ts,
+          testFileName, IntWritable.class, Text.class, CompressionType.BLOCK,
+          defaultCodec, defaultProgress);
+      assertNotNull("testBloomMapFileConstructors error !!!", writer1);
+      BloomMapFile.Writer writer2 = new BloomMapFile.Writer(conf, ts,
+          testFileName, IntWritable.class, Text.class, CompressionType.BLOCK,
+          defaultProgress);
+      assertNotNull("testBloomMapFileConstructors error !!!", writer2);
+      BloomMapFile.Writer writer3 = new BloomMapFile.Writer(conf, ts,
+          testFileName, IntWritable.class, Text.class, CompressionType.BLOCK);
+      assertNotNull("testBloomMapFileConstructors error !!!", writer3);
+      BloomMapFile.Writer writer4 = new BloomMapFile.Writer(conf, ts,
+          testFileName, IntWritable.class, Text.class, CompressionType.RECORD,
+          defaultCodec, defaultProgress);
+      assertNotNull("testBloomMapFileConstructors error !!!", writer4);
+      BloomMapFile.Writer writer5 = new BloomMapFile.Writer(conf, ts,
+          testFileName, IntWritable.class, Text.class, CompressionType.RECORD,
+          defaultProgress);
+      assertNotNull("testBloomMapFileConstructors error !!!", writer5);
+      BloomMapFile.Writer writer6 = new BloomMapFile.Writer(conf, ts,
+          testFileName, IntWritable.class, Text.class, CompressionType.RECORD);
+      assertNotNull("testBloomMapFileConstructors error !!!", writer6);
+      BloomMapFile.Writer writer7 = new BloomMapFile.Writer(conf, ts,
+          testFileName, WritableComparator.get(Text.class), Text.class);
+      assertNotNull("testBloomMapFileConstructors error !!!", writer7);
+    } catch (Exception ex) {
+      fail("testBloomMapFileConstructors error !!!");
+    }
+  }
+
+  static final Progressable defaultProgress = new Progressable() {
+    @Override
+    public void progress() {
+    }
+  };
+
+  static final CompressionCodec defaultCodec = new CompressionCodec() {
+    @Override
+    public String getDefaultExtension() {
+      return null;
+    }
+
+    @Override
+    public Class<? extends Decompressor> getDecompressorType() {
+      return null;
+    }
+
+    @Override
+    public Class<? extends Compressor> getCompressorType() {
+      return null;
+    }
+
+    @Override
+    public CompressionOutputStream createOutputStream(OutputStream out,
+        Compressor compressor) throws IOException {
+      return null;
+    }
+
+    @Override
+    public CompressionOutputStream createOutputStream(OutputStream out)
+        throws IOException {
+      return null;
+    }
+
+    @Override
+    public CompressionInputStream createInputStream(InputStream in,
+        Decompressor decompressor) throws IOException {
+      return null;
+    }
+
+    @Override
+    public CompressionInputStream createInputStream(InputStream in)
+        throws IOException {
+      return null;
+    }
+
+    @Override
+    public Decompressor createDecompressor() {
+      return null;
+    }
+
+    @Override
+    public Compressor createCompressor() {
+      return null;
+    }
+  };
 }

+ 24 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestBooleanWritable.java

@@ -50,4 +50,28 @@ public class TestBooleanWritable {
     out.flush();
     return out;
   }
+  
+  /**
+   * test {@link BooleanWritable} methods hashCode(), equals(), compareTo() 
+   */
+  @Test
+  public void testCommonMethods() {    
+    assertTrue("testCommonMethods1 error !!!", newInstance(true).equals(newInstance(true)));
+    assertTrue("testCommonMethods2 error  !!!", newInstance(false).equals(newInstance(false)));
+    assertFalse("testCommonMethods3 error !!!", newInstance(false).equals(newInstance(true)));
+    assertTrue("testCommonMethods4 error !!!", checkHashCode(newInstance(true), newInstance(true)));
+    assertFalse("testCommonMethods5 error !!! ", checkHashCode(newInstance(true), newInstance(false)));
+    assertTrue("testCommonMethods6 error !!!", newInstance(true).compareTo(newInstance(false)) > 0 );
+    assertTrue("testCommonMethods7 error !!!", newInstance(false).compareTo(newInstance(true)) < 0 );
+    assertTrue("testCommonMethods8 error !!!", newInstance(false).compareTo(newInstance(false)) == 0 );
+    assertEquals("testCommonMethods9 error !!!", "true", newInstance(true).toString());
+  }
+  
+  private boolean checkHashCode(BooleanWritable f, BooleanWritable s) {
+    return f.hashCode() == s.hashCode();
+  }    
+  
+  private static BooleanWritable newInstance(boolean flag) {
+    return new BooleanWritable(flag);
+  }
 }

+ 20 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestBytesWritable.java

@@ -133,5 +133,24 @@ public class TestBytesWritable {
     assertTrue("buffer created with (array, len) has bad length",
         zeroBuf.getLength() == copyBuf.getLength());
   }
+    
+  /**
+   * test {@link ByteWritable} 
+   * methods compareTo(), toString(), equals()
+   */
+  @Test
+  public void testObjectCommonMethods() {    
+    byte b = 0x9;
+    ByteWritable bw = new ByteWritable();
+    bw.set(b);
+    assertTrue("testSetByteWritable error", bw.get() == b);
+    assertTrue("testSetByteWritable error < 0", bw.compareTo(new ByteWritable((byte)0xA)) < 0);
+    assertTrue("testSetByteWritable error > 0", bw.compareTo(new ByteWritable((byte)0x8)) > 0);
+    assertTrue("testSetByteWritable error == 0", bw.compareTo(new ByteWritable((byte)0x9)) == 0);
+    assertTrue("testSetByteWritable equals error !!!", bw.equals(new ByteWritable((byte)0x9)));
+    assertTrue("testSetByteWritable equals error !!!", ! bw.equals(new ByteWritable((byte)0xA)));
+    assertTrue("testSetByteWritable equals error !!!", ! bw.equals(new IntWritable(1)));
+    assertEquals("testSetByteWritable error ", "9", bw.toString());    
+  }
+  
 }
-

+ 57 - 10
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestEnumSetWritable.java

@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -20,6 +20,7 @@ package org.apache.hadoop.io;
 
 import java.io.IOException;
 import java.util.EnumSet;
+import java.util.Iterator;
 import java.lang.reflect.Type;
 
 import junit.framework.TestCase;
@@ -32,8 +33,8 @@ public class TestEnumSetWritable extends TestCase {
   }
 
   EnumSet<TestEnumSet> nonEmptyFlag = EnumSet.of(TestEnumSet.APPEND);
-  EnumSetWritable<TestEnumSet> nonEmptyFlagWritable = new EnumSetWritable<TestEnumSet>(
-      nonEmptyFlag);
+  EnumSetWritable<TestEnumSet> nonEmptyFlagWritable = 
+      new EnumSetWritable<TestEnumSet>(nonEmptyFlag);
 
   @SuppressWarnings("unchecked")
   public void testSerializeAndDeserializeNonEmpty() throws IOException {
@@ -60,11 +61,12 @@ public class TestEnumSetWritable extends TestCase {
     }
 
     assertTrue(
-        "Instantiate empty EnumSetWritable with no element type class providesd should throw exception.",
+        "Instantiation of empty EnumSetWritable with no element type class "
+        + "provided should throw exception.",
         gotException);
 
-    EnumSetWritable<TestEnumSet> emptyFlagWritable = new EnumSetWritable<TestEnumSet>(
-        emptyFlag, TestEnumSet.class);
+    EnumSetWritable<TestEnumSet> emptyFlagWritable = 
+        new EnumSetWritable<TestEnumSet>(emptyFlag, TestEnumSet.class);
     DataOutputBuffer out = new DataOutputBuffer();
     ObjectWritable.writeObject(out, emptyFlagWritable, emptyFlagWritable
         .getClass(), null);
@@ -86,11 +88,12 @@ public class TestEnumSetWritable extends TestCase {
     }
 
     assertTrue(
-        "Instantiate empty EnumSetWritable with no element type class providesd should throw exception.",
+        "Instantiation of empty EnumSetWritable with no element type class "
+        + "provided should throw exception",
         gotException);
 
-    EnumSetWritable<TestEnumSet> nullFlagWritable = new EnumSetWritable<TestEnumSet>(
-        null, TestEnumSet.class);
+    EnumSetWritable<TestEnumSet> nullFlagWritable = 
+        new EnumSetWritable<TestEnumSet>(null, TestEnumSet.class);
 
     DataOutputBuffer out = new DataOutputBuffer();
     ObjectWritable.writeObject(out, nullFlagWritable, nullFlagWritable
@@ -105,10 +108,54 @@ public class TestEnumSetWritable extends TestCase {
   public EnumSetWritable<TestEnumSet> testField;
 
   public void testAvroReflect() throws Exception {
-    String schema = "{\"type\":\"array\",\"items\":{\"type\":\"enum\",\"name\":\"TestEnumSet\",\"namespace\":\"org.apache.hadoop.io.TestEnumSetWritable$\",\"symbols\":[\"CREATE\",\"OVERWRITE\",\"APPEND\"]},\"java-class\":\"org.apache.hadoop.io.EnumSetWritable\"}";
+    String schema = "{\"type\":\"array\",\"items\":{\"type\":\"enum\","
+        + "\"name\":\"TestEnumSet\","
+        + "\"namespace\":\"org.apache.hadoop.io.TestEnumSetWritable$\","
+        + "\"symbols\":[\"CREATE\",\"OVERWRITE\",\"APPEND\"]},"
+        + "\"java-class\":\"org.apache.hadoop.io.EnumSetWritable\"}";
     Type type =
       TestEnumSetWritable.class.getField("testField").getGenericType();
     AvroTestUtil.testReflect(nonEmptyFlagWritable, type, schema);
+  }    
+  
+  /**
+   * test {@link EnumSetWritable} equals() method
+   */
+  public void testEnumSetWritableEquals() {
+    EnumSetWritable<TestEnumSet> eset1 = new EnumSetWritable<TestEnumSet>(
+        EnumSet.of(TestEnumSet.APPEND, TestEnumSet.CREATE), TestEnumSet.class);
+    EnumSetWritable<TestEnumSet> eset2 = new EnumSetWritable<TestEnumSet>(
+        EnumSet.of(TestEnumSet.APPEND, TestEnumSet.CREATE), TestEnumSet.class);
+    assertTrue("testEnumSetWritableEquals error !!!", eset1.equals(eset2));
+    assertFalse("testEnumSetWritableEquals error !!!",
+        eset1.equals(new EnumSetWritable<TestEnumSet>(EnumSet.of(
+            TestEnumSet.APPEND, TestEnumSet.CREATE, TestEnumSet.OVERWRITE),
+            TestEnumSet.class)));
+    assertTrue("testEnumSetWritableEquals getElementType error !!!", eset1
+        .getElementType().equals(TestEnumSet.class));
   }
+  
+  /** 
+   * test {@code EnumSetWritable.write(DataOutputBuffer out)} 
+   *  and iteration by TestEnumSet through iterator().
+   */
+  public void testEnumSetWritableWriteRead() throws Exception {
+    EnumSetWritable<TestEnumSet> srcSet = new EnumSetWritable<TestEnumSet>(
+        EnumSet.of(TestEnumSet.APPEND, TestEnumSet.CREATE), TestEnumSet.class);
+    DataOutputBuffer out = new DataOutputBuffer();
+    srcSet.write(out);
 
+    EnumSetWritable<TestEnumSet> dstSet = new EnumSetWritable<TestEnumSet>();
+    DataInputBuffer in = new DataInputBuffer();
+    in.reset(out.getData(), out.getLength());
+    dstSet.readFields(in);
+
+    EnumSet<TestEnumSet> result = dstSet.get();
+    Iterator<TestEnumSet> dstIter = result.iterator();
+    Iterator<TestEnumSet> srcIter = srcSet.iterator();
+    while (dstIter.hasNext() && srcIter.hasNext()) {
+      assertEquals("testEnumSetWritableWriteRead error !!!", dstIter.next(),
+          srcIter.next());
+    }
+  }
 }

+ 610 - 37
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestMapFile.java

@@ -17,29 +17,592 @@
  */
 package org.apache.hadoop.io;
 
+import java.io.File;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.LocalFileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.SequenceFile.CompressionType;
+import org.apache.hadoop.io.compress.CompressionCodec;
+import org.apache.hadoop.io.compress.CompressionInputStream;
+import org.apache.hadoop.io.compress.CompressionOutputStream;
+import org.apache.hadoop.io.compress.Compressor;
+import org.apache.hadoop.io.compress.Decompressor;
+import org.apache.hadoop.util.Progressable;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.junit.Assert.*;
 
-import junit.framework.TestCase;
+import static org.mockito.Mockito.*;
 
-public class TestMapFile extends TestCase {
+public class TestMapFile {
+  
+  private static final Path TEST_DIR = new Path(
+      System.getProperty("test.build.data", "/tmp"),
+      TestMapFile.class.getSimpleName());
+  
   private static Configuration conf = new Configuration();
 
+  @Before
+  public void setup() throws Exception {
+    LocalFileSystem fs = FileSystem.getLocal(conf);
+    if (fs.exists(TEST_DIR) && !fs.delete(TEST_DIR, true)) {
+      Assert.fail("Can't clean up test root dir");
+    }
+    fs.mkdirs(TEST_DIR);
+  }
+  
+  private static final Progressable defaultProgressable = new Progressable() {
+    @Override
+    public void progress() {
+    }
+  };
+
+  private static final CompressionCodec defaultCodec = new CompressionCodec() {
+    @Override
+    public CompressionOutputStream createOutputStream(OutputStream out)
+        throws IOException {
+      return null;
+    }
+
+    @Override
+    public CompressionOutputStream createOutputStream(OutputStream out,
+        Compressor compressor) throws IOException {
+      return null;
+    }
+
+    @Override
+    public Class<? extends Compressor> getCompressorType() {
+      return null;
+    }
+
+    @Override
+    public Compressor createCompressor() {
+      return null;
+    }
+
+    @Override
+    public CompressionInputStream createInputStream(InputStream in)
+        throws IOException {
+      return null;
+    }
+
+    @Override
+    public CompressionInputStream createInputStream(InputStream in,
+        Decompressor decompressor) throws IOException {
+      return null;
+    }
+
+    @Override
+    public Class<? extends Decompressor> getDecompressorType() {
+      return null;
+    }
+
+    @Override
+    public Decompressor createDecompressor() {
+      return null;
+    }
+
+    @Override
+    public String getDefaultExtension() {
+      return null;
+    }
+  };
+
+  private MapFile.Writer createWriter(String fileName,
+      Class<? extends WritableComparable<?>> keyClass,
+      Class<? extends Writable> valueClass) throws IOException {
+    Path dirName = new Path(TEST_DIR, fileName);
+    MapFile.Writer.setIndexInterval(conf, 4);
+    return new MapFile.Writer(conf, dirName, MapFile.Writer.keyClass(keyClass),
+        MapFile.Writer.valueClass(valueClass));
+  }
+
+  private MapFile.Reader createReader(String fileName,
+      Class<? extends WritableComparable<?>> keyClass) throws IOException {
+    Path dirName = new Path(TEST_DIR, fileName);
+    return new MapFile.Reader(dirName, conf,
+        MapFile.Reader.comparator(new WritableComparator(keyClass)));
+  }
+  
+  /**
+   * test {@code MapFile.Reader.getClosest()} method 
+   *
+   */
+  @Test
+  public void testGetClosestOnCurrentApi() throws Exception {
+    final String TEST_PREFIX = "testGetClosestOnCurrentApi.mapfile";
+    MapFile.Writer writer = createWriter(TEST_PREFIX, Text.class, Text.class);
+    int FIRST_KEY = 1;
+    // Test keys: 11,21,31,...,91
+    for (int i = FIRST_KEY; i < 100; i += 10) {      
+      Text t = new Text(Integer.toString(i));
+      writer.append(t, t);
+    }
+    writer.close();
+
+    MapFile.Reader reader = createReader(TEST_PREFIX, Text.class);
+    Text key = new Text("55");
+    Text value = new Text();
+
+    // Test get closest with step forward
+    Text closest = (Text) reader.getClosest(key, value);
+    assertEquals(new Text("61"), closest);
+
+    // Test get closest with step back
+    closest = (Text) reader.getClosest(key, value, true);
+    assertEquals(new Text("51"), closest);
+
+    // Test get closest when we pass explicit key
+    final Text explicitKey = new Text("21");
+    closest = (Text) reader.getClosest(explicitKey, value);
+    assertEquals(new Text("21"), explicitKey);
+
+    // Test what happens at boundaries. Assert if searching a key that is
+    // less than first key in the mapfile, that the first key is returned.
+    key = new Text("00");
+    closest = (Text) reader.getClosest(key, value);
+    assertEquals(FIRST_KEY, Integer.parseInt(closest.toString()));
+
+    // Assert that null is returned if key is > last entry in mapfile.
+    key = new Text("92");
+    closest = (Text) reader.getClosest(key, value);
+    assertNull("Not null key in testGetClosestWithNewCode", closest);
+
+    // If we were looking for the key before, we should get the last key
+    closest = (Text) reader.getClosest(key, value, true);
+    assertEquals(new Text("91"), closest);
+  }
+  
+  /**
+   * test {@code MapFile.Reader.midKey() } method 
+   */
+  @Test
+  public void testMidKeyOnCurrentApi() throws Exception {
+    // Write a mapfile of simple data: keys are
+    final String TEST_PREFIX = "testMidKeyOnCurrentApi.mapfile";
+    MapFile.Writer writer = createWriter(TEST_PREFIX, IntWritable.class,
+        IntWritable.class);
+    // 0,1,....9
+    int SIZE = 10;
+    for (int i = 0; i < SIZE; i++)
+      writer.append(new IntWritable(i), new IntWritable(i));
+    writer.close();
+
+    MapFile.Reader reader = createReader(TEST_PREFIX, IntWritable.class);
+    assertEquals(new IntWritable((SIZE - 1) / 2), reader.midKey());
+  }
+  
+  /**
+   * test  {@code MapFile.Writer.rename()} method 
+   */
+  @Test
+  public void testRename() {
+    final String NEW_FILE_NAME = "test-new.mapfile";
+    final String OLD_FILE_NAME = "test-old.mapfile";
+    try {
+      FileSystem fs = FileSystem.getLocal(conf);
+      MapFile.Writer writer = createWriter(OLD_FILE_NAME, IntWritable.class,
+          IntWritable.class);
+      writer.close();
+      MapFile.rename(fs, new Path(TEST_DIR, OLD_FILE_NAME).toString(), 
+          new Path(TEST_DIR, NEW_FILE_NAME).toString());
+      MapFile.delete(fs, new Path(TEST_DIR, NEW_FILE_NAME).toString());
+    } catch (IOException ex) {
+      fail("testRename error " + ex);
+    }
+  }
+  
+  /**
+   * test {@code MapFile.rename()} 
+   *  method with throwing {@code IOException}  
+   */
+  @Test
+  public void testRenameWithException() {
+    final String ERROR_MESSAGE = "Can't rename file";
+    final String NEW_FILE_NAME = "test-new.mapfile";
+    final String OLD_FILE_NAME = "test-old.mapfile";
+    try {
+      FileSystem fs = FileSystem.getLocal(conf);
+      FileSystem spyFs = spy(fs);
+
+      MapFile.Writer writer = createWriter(OLD_FILE_NAME, IntWritable.class,
+          IntWritable.class);
+      writer.close();
+
+      Path oldDir = new Path(TEST_DIR, OLD_FILE_NAME);
+      Path newDir = new Path(TEST_DIR, NEW_FILE_NAME);
+      when(spyFs.rename(oldDir, newDir)).thenThrow(
+          new IOException(ERROR_MESSAGE));
+
+      MapFile.rename(spyFs, oldDir.toString(), newDir.toString());
+      fail("testRenameWithException no exception error !!!");
+    } catch (IOException ex) {
+      assertEquals("testRenameWithException invalid IOExceptionMessage !!!",
+          ex.getMessage(), ERROR_MESSAGE);
+    }
+  }
+
+  @Test
+  public void testRenameWithFalse() {
+    final String ERROR_MESSAGE = "Could not rename";
+    final String NEW_FILE_NAME = "test-new.mapfile";
+    final String OLD_FILE_NAME = "test-old.mapfile";
+    try {
+      FileSystem fs = FileSystem.getLocal(conf);
+      FileSystem spyFs = spy(fs);
+
+      MapFile.Writer writer = createWriter(OLD_FILE_NAME, IntWritable.class,
+          IntWritable.class);
+      writer.close();
+
+      Path oldDir = new Path(TEST_DIR, OLD_FILE_NAME);
+      Path newDir = new Path(TEST_DIR, NEW_FILE_NAME);
+      when(spyFs.rename(oldDir, newDir)).thenReturn(false);
+
+      MapFile.rename(spyFs, oldDir.toString(), newDir.toString());
+      fail("testRenameWithException no exception error !!!");
+    } catch (IOException ex) {
+      assertTrue("testRenameWithFalse invalid IOExceptionMessage error !!!", ex
+          .getMessage().startsWith(ERROR_MESSAGE));
+    }
+  }
+  
+  /**
+   * test throwing {@code IOException} in {@code MapFile.Writer} constructor    
+   */
+  @Test
+  public void testWriteWithFailDirCreation() {
+    String ERROR_MESSAGE = "Mkdirs failed to create directory";
+    Path dirName = new Path(TEST_DIR, "fail.mapfile");
+    MapFile.Writer writer = null;
+    try {
+      FileSystem fs = FileSystem.getLocal(conf);
+      FileSystem spyFs = spy(fs);
+      Path pathSpy = spy(dirName);
+      when(pathSpy.getFileSystem(conf)).thenReturn(spyFs);
+      when(spyFs.mkdirs(dirName)).thenReturn(false);
+
+      writer = new MapFile.Writer(conf, pathSpy,
+          MapFile.Writer.keyClass(IntWritable.class),
+          MapFile.Writer.valueClass(Text.class));
+      fail("testWriteWithFailDirCreation error !!!");
+    } catch (IOException ex) {
+      assertTrue("testWriteWithFailDirCreation ex error !!!", ex.getMessage()
+          .startsWith(ERROR_MESSAGE));
+    } finally {
+      if (writer != null)
+        try {
+          writer.close();
+        } catch (IOException e) {
+        }
+    }
+  }
+
+  /**
+   * test {@code MapFile.Reader.finalKey()} method
+   */
+  @Test
+  public void testOnFinalKey() {
+    final String TEST_METHOD_KEY = "testOnFinalKey.mapfile";
+    int SIZE = 10;
+    try {
+      MapFile.Writer writer = createWriter(TEST_METHOD_KEY, IntWritable.class,
+          IntWritable.class);
+      for (int i = 0; i < SIZE; i++)
+        writer.append(new IntWritable(i), new IntWritable(i));
+      writer.close();
+
+      MapFile.Reader reader = createReader(TEST_METHOD_KEY, IntWritable.class);
+      IntWritable expectedKey = new IntWritable(0);
+      reader.finalKey(expectedKey);
+      assertEquals("testOnFinalKey not same !!!", expectedKey, new IntWritable(
+          9));
+    } catch (IOException ex) {
+      fail("testOnFinalKey error !!!");
+    }
+  }
+  
+  /**
+   * test {@code MapFile.Writer} constructor with key, value
+   * and validate it with {@code keyClass(), valueClass()} methods 
+   */
+  @Test
+  public void testKeyValueClasses() {
+    Class<? extends WritableComparable<?>> keyClass = IntWritable.class;
+    Class<?> valueClass = Text.class;
+    try {
+      createWriter("testKeyValueClasses.mapfile", IntWritable.class, Text.class);
+      assertNotNull("writer key class null error !!!",
+          MapFile.Writer.keyClass(keyClass));
+      assertNotNull("writer value class null error !!!",
+          MapFile.Writer.valueClass(valueClass));
+    } catch (IOException ex) {
+      fail(ex.getMessage());
+    }
+  }
+  
+  /**
+   * test {@code MapFile.Reader.getClosest() } with wrong class key
+   */
+  @Test
+  public void testReaderGetClosest() throws Exception {
+    final String TEST_METHOD_KEY = "testReaderWithWrongKeyClass.mapfile";
+    try {
+      MapFile.Writer writer = createWriter(TEST_METHOD_KEY, IntWritable.class,
+          Text.class);
+
+      for (int i = 0; i < 10; i++)
+        writer.append(new IntWritable(i), new Text("value" + i));
+      writer.close();
+
+      MapFile.Reader reader = createReader(TEST_METHOD_KEY, Text.class);
+      reader.getClosest(new Text("2"), new Text(""));
+      fail("no excepted exception in testReaderWithWrongKeyClass !!!");
+    } catch (IOException ex) {
+      /* Should be thrown to pass the test */
+    }
+  }
+  
+  /**
+   * test {@code MapFile.Writer.append() } with wrong key class
+   */
+  @Test
+  public void testReaderWithWrongValueClass() {
+    final String TEST_METHOD_KEY = "testReaderWithWrongValueClass.mapfile";
+    try {
+      MapFile.Writer writer = createWriter(TEST_METHOD_KEY, IntWritable.class,
+          Text.class);
+      writer.append(new IntWritable(0), new IntWritable(0));
+      fail("no excepted exception in testReaderWithWrongKeyClass !!!");
+    } catch (IOException ex) {
+      /* Should be thrown to pass the test */
+    }
+  }
+  
+  /**
+   * test {@code MapFile.Reader.next(key, value)} for iteration.
+   */
+  @Test
+  public void testReaderKeyIteration() {
+    final String TEST_METHOD_KEY = "testReaderKeyIteration.mapfile";
+    int SIZE = 10;
+    int ITERATIONS = 5;
+    try {
+      MapFile.Writer writer = createWriter(TEST_METHOD_KEY, IntWritable.class,
+          Text.class);
+      int start = 0;
+      for (int i = 0; i < SIZE; i++)
+        writer.append(new IntWritable(i), new Text("Value:" + i));
+      writer.close();
+
+      MapFile.Reader reader = createReader(TEST_METHOD_KEY, IntWritable.class);
+      // test iteration
+      Writable startValue = new Text("Value:" + start);
+      int i = 0;
+      while (i++ < ITERATIONS) {
+        IntWritable key = new IntWritable(start);
+        Writable value = startValue;
+        while (reader.next(key, value)) {
+          assertNotNull(key);
+          assertNotNull(value);
+        }
+        reader.reset();
+      }
+      assertTrue("reader seek error !!!",
+          reader.seek(new IntWritable(SIZE / 2)));
+      assertFalse("reader seek error !!!",
+          reader.seek(new IntWritable(SIZE * 2)));
+    } catch (IOException ex) {
+      fail("reader seek error !!!");
+    }
+  }
+
+  /**
+   * test {@code MapFile.Writer.testFix} method
+   */
+  @Test
+  public void testFix() {
+    final String INDEX_LESS_MAP_FILE = "testFix.mapfile";
+    int PAIR_SIZE = 20;
+    try {
+      FileSystem fs = FileSystem.getLocal(conf);
+      Path dir = new Path(TEST_DIR, INDEX_LESS_MAP_FILE);
+      MapFile.Writer writer = createWriter(INDEX_LESS_MAP_FILE,
+          IntWritable.class, Text.class);
+      for (int i = 0; i < PAIR_SIZE; i++)
+        writer.append(new IntWritable(0), new Text("value"));
+      writer.close();
+
+      File indexFile = new File(".", "." + INDEX_LESS_MAP_FILE + "/index");
+      boolean isDeleted = false;
+      if (indexFile.exists())
+        isDeleted = indexFile.delete();
+
+      if (isDeleted)
+        assertTrue("testFix error !!!",
+            MapFile.fix(fs, dir, IntWritable.class, Text.class, true, conf) == PAIR_SIZE);
+    } catch (Exception ex) {
+      fail("testFix error !!!");
+    }
+  }
+  /**
+   * test all available constructor for {@code MapFile.Writer}
+   */
+  @Test
+  @SuppressWarnings("deprecation")
+  public void testDeprecatedConstructors() {
+    String path = new Path(TEST_DIR, "writes.mapfile").toString();
+    try {
+      FileSystem fs = FileSystem.getLocal(conf);
+      MapFile.Writer writer = new MapFile.Writer(conf, fs, path,
+          IntWritable.class, Text.class, CompressionType.RECORD);
+      assertNotNull(writer);
+      writer = new MapFile.Writer(conf, fs, path, IntWritable.class,
+          Text.class, CompressionType.RECORD, defaultProgressable);
+      assertNotNull(writer);
+      writer = new MapFile.Writer(conf, fs, path, IntWritable.class,
+          Text.class, CompressionType.RECORD, defaultCodec, defaultProgressable);
+      assertNotNull(writer);
+      writer = new MapFile.Writer(conf, fs, path,
+          WritableComparator.get(Text.class), Text.class);
+      assertNotNull(writer);
+      writer = new MapFile.Writer(conf, fs, path,
+          WritableComparator.get(Text.class), Text.class,
+          SequenceFile.CompressionType.RECORD);
+      assertNotNull(writer);
+      writer = new MapFile.Writer(conf, fs, path,
+          WritableComparator.get(Text.class), Text.class,
+          CompressionType.RECORD, defaultProgressable);
+      assertNotNull(writer);
+      writer.close();
+
+      MapFile.Reader reader = new MapFile.Reader(fs, path,
+          WritableComparator.get(IntWritable.class), conf);
+      assertNotNull(reader);
+      assertNotNull("reader key is null !!!", reader.getKeyClass());
+      assertNotNull("reader value in null", reader.getValueClass());
+
+    } catch (IOException e) {
+      fail(e.getMessage());
+    }
+  }
+  
+  /**
+   * test {@code MapFile.Writer} constructor 
+   * with IllegalArgumentException  
+   *  
+   */
+  @Test
+  public void testKeyLessWriterCreation() {
+    MapFile.Writer writer = null;
+    try {
+      writer = new MapFile.Writer(conf, TEST_DIR);
+      fail("fail in testKeyLessWriterCreation !!!");
+    } catch (IllegalArgumentException ex) {
+    } catch (Exception e) {
+      fail("fail in testKeyLessWriterCreation. Other ex !!!");
+    } finally {
+      if (writer != null)
+        try {
+          writer.close();
+        } catch (IOException e) {
+        }
+    }
+  }
+  /**
+   * test {@code MapFile.Writer} constructor with IOException
+   */
+  @Test
+  public void testPathExplosionWriterCreation() {
+    Path path = new Path(TEST_DIR, "testPathExplosionWriterCreation.mapfile");
+    String TEST_ERROR_MESSAGE = "Mkdirs failed to create directory "
+        + path.getName();
+    MapFile.Writer writer = null;
+    try {
+      FileSystem fsSpy = spy(FileSystem.get(conf));
+      Path pathSpy = spy(path);
+      when(fsSpy.mkdirs(path)).thenThrow(new IOException(TEST_ERROR_MESSAGE));
+
+      when(pathSpy.getFileSystem(conf)).thenReturn(fsSpy);
+
+      writer = new MapFile.Writer(conf, pathSpy,
+          MapFile.Writer.keyClass(IntWritable.class),
+          MapFile.Writer.valueClass(IntWritable.class));
+      fail("fail in testPathExplosionWriterCreation !!!");
+    } catch (IOException ex) {
+      assertEquals("testPathExplosionWriterCreation ex message error !!!",
+          ex.getMessage(), TEST_ERROR_MESSAGE);
+    } catch (Exception e) {
+      fail("fail in testPathExplosionWriterCreation. Other ex !!!");
+    } finally {
+      if (writer != null)
+        try {
+          writer.close();
+        } catch (IOException e) {
+        }
+    }
+  }
+
+  /**
+   * test {@code MapFile.Writer.append} method with desc order  
+   */
+  @Test
+  public void testDescOrderWithThrowExceptionWriterAppend() {
+    try {
+      MapFile.Writer writer = createWriter(".mapfile", IntWritable.class,
+          Text.class);
+      writer.append(new IntWritable(2), new Text("value: " + 1));
+      writer.append(new IntWritable(2), new Text("value: " + 2));
+      writer.append(new IntWritable(2), new Text("value: " + 4));
+      writer.append(new IntWritable(1), new Text("value: " + 3));
+      fail("testDescOrderWithThrowExceptionWriterAppend not expected exception error !!!");
+    } catch (IOException ex) {
+    } catch (Exception e) {
+      fail("testDescOrderWithThrowExceptionWriterAppend other ex throw !!!");
+    }
+  }
+
+  @Test
+  public void testMainMethodMapFile() {
+    String path = new Path(TEST_DIR, "mainMethodMapFile.mapfile").toString();
+    String inFile = "mainMethodMapFile.mapfile";
+    String outFile = "mainMethodMapFile.mapfile";
+    String[] args = { path, outFile };
+    try {
+      MapFile.Writer writer = createWriter(inFile, IntWritable.class,
+          Text.class);
+      writer.append(new IntWritable(1), new Text("test_text1"));
+      writer.append(new IntWritable(2), new Text("test_text2"));
+      writer.close();
+      MapFile.main(args);
+    } catch (Exception ex) {
+      fail("testMainMethodMapFile error !!!");
+    }
+  }
+
   /**
    * Test getClosest feature.
+   * 
    * @throws Exception
    */
+  @Test
+  @SuppressWarnings("deprecation")
   public void testGetClosest() throws Exception {
-    // Write a mapfile of simple data: keys are 
-    Path dirName = new Path(System.getProperty("test.build.data",".") +
-      getName() + ".mapfile"); 
+    // Write a mapfile of simple data: keys are
+    Path dirName = new Path(TEST_DIR, "testGetClosest.mapfile");
     FileSystem fs = FileSystem.getLocal(conf);
     Path qualifiedDirName = fs.makeQualified(dirName);
     // Make an index entry for every third insertion.
     MapFile.Writer.setIndexInterval(conf, 3);
     MapFile.Writer writer = new MapFile.Writer(conf, fs,
-      qualifiedDirName.toString(), Text.class, Text.class);
+        qualifiedDirName.toString(), Text.class, Text.class);
     // Assert that the index interval is 1
     assertEquals(3, writer.getIndexInterval());
     // Add entries up to 100 in intervals of ten.
@@ -51,74 +614,84 @@ public class TestMapFile extends TestCase {
     }
     writer.close();
     // Now do getClosest on created mapfile.
-    MapFile.Reader reader = new MapFile.Reader(fs, qualifiedDirName.toString(),
-      conf);
+    MapFile.Reader reader = new MapFile.Reader(qualifiedDirName, conf);
+    try {
     Text key = new Text("55");
     Text value = new Text();
-    Text closest = (Text)reader.getClosest(key, value);
+    Text closest = (Text) reader.getClosest(key, value);
     // Assert that closest after 55 is 60
     assertEquals(new Text("60"), closest);
     // Get closest that falls before the passed key: 50
-    closest = (Text)reader.getClosest(key, value, true);
+    closest = (Text) reader.getClosest(key, value, true);
     assertEquals(new Text("50"), closest);
     // Test get closest when we pass explicit key
     final Text TWENTY = new Text("20");
-    closest = (Text)reader.getClosest(TWENTY, value);
+    closest = (Text) reader.getClosest(TWENTY, value);
     assertEquals(TWENTY, closest);
-    closest = (Text)reader.getClosest(TWENTY, value, true);
+    closest = (Text) reader.getClosest(TWENTY, value, true);
     assertEquals(TWENTY, closest);
-    // Test what happens at boundaries.  Assert if searching a key that is
+    // Test what happens at boundaries. Assert if searching a key that is
     // less than first key in the mapfile, that the first key is returned.
     key = new Text("00");
-    closest = (Text)reader.getClosest(key, value);
+    closest = (Text) reader.getClosest(key, value);
     assertEquals(FIRST_KEY, Integer.parseInt(closest.toString()));
-    
-    // If we're looking for the first key before, and we pass in a key before 
+
+    // If we're looking for the first key before, and we pass in a key before
     // the first key in the file, we should get null
-    closest = (Text)reader.getClosest(key, value, true);
+    closest = (Text) reader.getClosest(key, value, true);
     assertNull(closest);
-    
+
     // Assert that null is returned if key is > last entry in mapfile.
     key = new Text("99");
-    closest = (Text)reader.getClosest(key, value);
+    closest = (Text) reader.getClosest(key, value);
     assertNull(closest);
 
     // If we were looking for the key before, we should get the last key
-    closest = (Text)reader.getClosest(key, value, true);
+    closest = (Text) reader.getClosest(key, value, true);
     assertEquals(new Text("90"), closest);
+    } finally {
+      reader.close();
+    }
   }
 
+  @Test
+  @SuppressWarnings("deprecation")
   public void testMidKey() throws Exception {
-    // Write a mapfile of simple data: keys are 
-    Path dirName = new Path(System.getProperty("test.build.data",".") +
-      getName() + ".mapfile"); 
+    // Write a mapfile of simple data: keys are
+    Path dirName = new Path(TEST_DIR, "testMidKey.mapfile");
     FileSystem fs = FileSystem.getLocal(conf);
     Path qualifiedDirName = fs.makeQualified(dirName);
- 
+
     MapFile.Writer writer = new MapFile.Writer(conf, fs,
-      qualifiedDirName.toString(), IntWritable.class, IntWritable.class);
+        qualifiedDirName.toString(), IntWritable.class, IntWritable.class);
     writer.append(new IntWritable(1), new IntWritable(1));
     writer.close();
     // Now do getClosest on created mapfile.
-    MapFile.Reader reader = new MapFile.Reader(fs, qualifiedDirName.toString(),
-      conf);
-    assertEquals(new IntWritable(1), reader.midKey());
+    MapFile.Reader reader = new MapFile.Reader(qualifiedDirName, conf);
+    try {
+      assertEquals(new IntWritable(1), reader.midKey());
+    } finally {
+      reader.close();
+    }
   }
 
-
+  @Test
+  @SuppressWarnings("deprecation")
   public void testMidKeyEmpty() throws Exception {
-    // Write a mapfile of simple data: keys are 
-    Path dirName = new Path(System.getProperty("test.build.data",".") +
-      getName() + ".mapfile"); 
+    // Write a mapfile of simple data: keys are
+    Path dirName = new Path(TEST_DIR, "testMidKeyEmpty.mapfile");
     FileSystem fs = FileSystem.getLocal(conf);
     Path qualifiedDirName = fs.makeQualified(dirName);
- 
+
     MapFile.Writer writer = new MapFile.Writer(conf, fs,
-      qualifiedDirName.toString(), IntWritable.class, IntWritable.class);
+        qualifiedDirName.toString(), IntWritable.class, IntWritable.class);
     writer.close();
     // Now do getClosest on created mapfile.
-    MapFile.Reader reader = new MapFile.Reader(fs, qualifiedDirName.toString(),
-      conf);
-    assertEquals(null, reader.midKey());
+    MapFile.Reader reader = new MapFile.Reader(qualifiedDirName, conf);
+    try {
+      assertEquals(null, reader.midKey()); 
+    } finally {
+      reader.close();
+    }
   }
 }

+ 35 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestSetFile.java

@@ -20,6 +20,8 @@ package org.apache.hadoop.io;
 
 import java.io.*;
 import java.util.*;
+import java.util.concurrent.atomic.AtomicReference;
+
 import junit.framework.TestCase;
 
 import org.apache.commons.logging.*;
@@ -51,6 +53,39 @@ public class TestSetFile extends TestCase {
       fs.close();
     }
   }
+  
+  /**
+   * test {@code SetFile.Reader} methods 
+   * next(), get() in combination 
+   */
+  public void testSetFileAccessMethods() {    
+    try {             
+      FileSystem fs = FileSystem.getLocal(conf);
+      int size = 10;
+      writeData(fs, size);
+      SetFile.Reader reader = createReader(fs);
+      assertTrue("testSetFileWithConstruction1 error !!!", reader.next(new IntWritable(0)));
+      // don't know why reader.get(i) return i+1
+      assertEquals("testSetFileWithConstruction2 error !!!", new IntWritable(size/2 + 1), reader.get(new IntWritable(size/2)));      
+      assertNull("testSetFileWithConstruction3 error !!!", reader.get(new IntWritable(size*2)));
+    } catch (Exception ex) {
+      fail("testSetFileWithConstruction error !!!");    
+    }
+  }
+
+  private SetFile.Reader createReader(FileSystem fs) throws IOException  {
+    return new SetFile.Reader(fs, FILE, 
+        WritableComparator.get(IntWritable.class), conf);    
+  }
+  
+  @SuppressWarnings("deprecation")
+  private void writeData(FileSystem fs, int elementSize) throws IOException {
+    MapFile.delete(fs, FILE);    
+    SetFile.Writer writer = new SetFile.Writer(fs, FILE, IntWritable.class);
+    for (int i = 0; i < elementSize; i++)
+      writer.append(new IntWritable(i));
+    writer.close();    
+  }
 
   private static RandomDatum[] generate(int count) {
     LOG.info("generating " + count + " records in memory");

+ 77 - 2
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestText.java

@@ -19,11 +19,12 @@
 package org.apache.hadoop.io;
 
 import junit.framework.TestCase;
-
 import java.io.IOException;
+import java.nio.BufferUnderflowException;
 import java.nio.ByteBuffer;
 import java.nio.charset.CharacterCodingException;
 import java.util.Random;
+import com.google.common.primitives.Bytes;
 
 /** Unit tests for LargeUTF8. */
 public class TestText extends TestCase {
@@ -321,7 +322,81 @@ public class TestText extends TestCase {
       (new Text("foo"),
        "{\"type\":\"string\",\"java-class\":\"org.apache.hadoop.io.Text\"}");
   }
-
+  
+  /**
+   * 
+   */
+  public void testCharAt() {
+    String line = "adsawseeeeegqewgasddga";
+    Text text = new Text(line);
+    for (int i = 0; i < line.length(); i++) {
+      assertTrue("testCharAt error1 !!!", text.charAt(i) == line.charAt(i));
+    }    
+    assertEquals("testCharAt error2 !!!", -1, text.charAt(-1));    
+    assertEquals("testCharAt error3 !!!", -1, text.charAt(100));
+  }    
+  
+  /**
+   * test {@code Text} readFields/write operations
+   */
+  public void testReadWriteOperations() {
+    String line = "adsawseeeeegqewgasddga";
+    byte[] inputBytes = line.getBytes();       
+    inputBytes = Bytes.concat(new byte[] {(byte)22}, inputBytes);        
+    
+    DataInputBuffer in = new DataInputBuffer();
+    DataOutputBuffer out = new DataOutputBuffer();
+    Text text = new Text(line);
+    try {      
+      in.reset(inputBytes, inputBytes.length);
+      text.readFields(in);      
+    } catch(Exception ex) {
+      fail("testReadFields error !!!");
+    }    
+    try {
+      text.write(out);
+    } catch(IOException ex) {      
+    } catch(Exception ex) {
+      fail("testReadWriteOperations error !!!");
+    }        
+  }
+  
+  /**
+   * test {@code Text.bytesToCodePoint(bytes) } 
+   * with {@code BufferUnderflowException}
+   * 
+   */
+  public void testBytesToCodePoint() {
+    try {
+      ByteBuffer bytes = ByteBuffer.wrap(new byte[] {-2, 45, 23, 12, 76, 89});                                      
+      Text.bytesToCodePoint(bytes);      
+      assertTrue("testBytesToCodePoint error !!!", bytes.position() == 6 );                      
+    } catch (BufferUnderflowException ex) {
+      fail("testBytesToCodePoint unexp exception");
+    } catch (Exception e) {
+      fail("testBytesToCodePoint unexp exception");
+    }    
+  }
+  
+  public void testbytesToCodePointWithInvalidUTF() {
+    try {                 
+      Text.bytesToCodePoint(ByteBuffer.wrap(new byte[] {-2}));
+      fail("testbytesToCodePointWithInvalidUTF error unexp exception !!!");
+    } catch (BufferUnderflowException ex) {      
+    } catch(Exception e) {
+      fail("testbytesToCodePointWithInvalidUTF error unexp exception !!!");
+    }
+  }
+  
+  public void testUtf8Length() {         
+    assertEquals("testUtf8Length1 error   !!!", 1, Text.utf8Length(new String(new char[]{(char)1})));
+    assertEquals("testUtf8Length127 error !!!", 1, Text.utf8Length(new String(new char[]{(char)127})));
+    assertEquals("testUtf8Length128 error !!!", 2, Text.utf8Length(new String(new char[]{(char)128})));
+    assertEquals("testUtf8Length193 error !!!", 2, Text.utf8Length(new String(new char[]{(char)193})));    
+    assertEquals("testUtf8Length225 error !!!", 2, Text.utf8Length(new String(new char[]{(char)225})));
+    assertEquals("testUtf8Length254 error !!!", 2, Text.utf8Length(new String(new char[]{(char)254})));                 
+  }
+  
   public static void main(String[] args)  throws Exception
   {
     TestText test = new TestText("main");

+ 342 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/snappy/TestSnappyCompressorDecompressor.java

@@ -0,0 +1,342 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.io.compress.snappy;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.io.BufferedInputStream;
+import java.io.BufferedOutputStream;
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.lang.reflect.Array;
+import java.util.Random;
+
+import org.apache.hadoop.io.DataInputBuffer;
+import org.apache.hadoop.io.DataOutputBuffer;
+import org.apache.hadoop.io.compress.BlockCompressorStream;
+import org.apache.hadoop.io.compress.BlockDecompressorStream;
+import org.apache.hadoop.io.compress.CompressionInputStream;
+import org.apache.hadoop.io.compress.CompressionOutputStream;
+import org.apache.hadoop.io.compress.SnappyCodec;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.junit.Assume.*;
+
+public class TestSnappyCompressorDecompressor {
+
+  @Before
+  public void before() {
+    assumeTrue(SnappyCodec.isNativeCodeLoaded());
+  }
+
+  @Test
+  public void testSnappyCompressorSetInputNullPointerException() {
+    try {
+      SnappyCompressor compressor = new SnappyCompressor();
+      compressor.setInput(null, 0, 10);
+      fail("testSnappyCompressorSetInputNullPointerException error !!!");
+    } catch (NullPointerException ex) {
+      // excepted
+    } catch (Exception ex) {
+      fail("testSnappyCompressorSetInputNullPointerException ex error !!!");
+    }
+  }
+
+  @Test
+  public void testSnappyDecompressorSetInputNullPointerException() {
+    try {
+      SnappyDecompressor decompressor = new SnappyDecompressor();
+      decompressor.setInput(null, 0, 10);
+      fail("testSnappyDecompressorSetInputNullPointerException error !!!");
+    } catch (NullPointerException ex) {
+      // expected
+    } catch (Exception e) {
+      fail("testSnappyDecompressorSetInputNullPointerException ex error !!!");
+    }
+  }
+
+  @Test
+  public void testSnappyCompressorSetInputAIOBException() {
+    try {
+      SnappyCompressor compressor = new SnappyCompressor();
+      compressor.setInput(new byte[] {}, -5, 10);
+      fail("testSnappyCompressorSetInputAIOBException error !!!");
+    } catch (ArrayIndexOutOfBoundsException ex) {
+      // expected
+    } catch (Exception ex) {
+      fail("testSnappyCompressorSetInputAIOBException ex error !!!");
+    }
+  }
+
+  @Test
+  public void testSnappyDecompressorSetInputAIOUBException() {
+    try {
+      SnappyDecompressor decompressor = new SnappyDecompressor();
+      decompressor.setInput(new byte[] {}, -5, 10);
+      fail("testSnappyDecompressorSetInputAIOUBException error !!!");
+    } catch (ArrayIndexOutOfBoundsException ex) {
+      // expected
+    } catch (Exception e) {
+      fail("testSnappyDecompressorSetInputAIOUBException ex error !!!");
+    }
+  }
+
+  @Test
+  public void testSnappyCompressorCompressNullPointerException() {
+    try {
+      SnappyCompressor compressor = new SnappyCompressor();
+      byte[] bytes = BytesGenerator.get(1024 * 6);
+      compressor.setInput(bytes, 0, bytes.length);
+      compressor.compress(null, 0, 0);
+      fail("testSnappyCompressorCompressNullPointerException error !!!");
+    } catch (NullPointerException ex) {
+      // expected
+    } catch (Exception e) {
+      fail("testSnappyCompressorCompressNullPointerException ex error !!!");
+    }
+  }
+
+  @Test
+  public void testSnappyDecompressorCompressNullPointerException() {
+    try {
+      SnappyDecompressor decompressor = new SnappyDecompressor();
+      byte[] bytes = BytesGenerator.get(1024 * 6);
+      decompressor.setInput(bytes, 0, bytes.length);
+      decompressor.decompress(null, 0, 0);
+      fail("testSnappyDecompressorCompressNullPointerException error !!!");
+    } catch (NullPointerException ex) {
+      // expected
+    } catch (Exception e) {
+      fail("testSnappyDecompressorCompressNullPointerException ex error !!!");
+    }
+  }
+
+  @Test
+  public void testSnappyCompressorCompressAIOBException() {
+    try {
+      SnappyCompressor compressor = new SnappyCompressor();
+      byte[] bytes = BytesGenerator.get(1024 * 6);
+      compressor.setInput(bytes, 0, bytes.length);
+      compressor.compress(new byte[] {}, 0, -1);
+      fail("testSnappyCompressorCompressAIOBException error !!!");
+    } catch (ArrayIndexOutOfBoundsException ex) {
+      // expected
+    } catch (Exception e) {
+      fail("testSnappyCompressorCompressAIOBException ex error !!!");
+    }
+  }
+
+  @Test
+  public void testSnappyDecompressorCompressAIOBException() {
+    try {
+      SnappyDecompressor decompressor = new SnappyDecompressor();
+      byte[] bytes = BytesGenerator.get(1024 * 6);
+      decompressor.setInput(bytes, 0, bytes.length);
+      decompressor.decompress(new byte[] {}, 0, -1);
+      fail("testSnappyDecompressorCompressAIOBException error !!!");
+    } catch (ArrayIndexOutOfBoundsException ex) {
+      // expected
+    } catch (Exception e) {
+      fail("testSnappyDecompressorCompressAIOBException ex error !!!");
+    }
+  }    
+
+  @Test
+  public void testSnappyCompressDecompress() {
+    int BYTE_SIZE = 1024 * 54;
+    byte[] bytes = BytesGenerator.get(BYTE_SIZE);
+    SnappyCompressor compressor = new SnappyCompressor();
+    try {
+      compressor.setInput(bytes, 0, bytes.length);
+      assertTrue("SnappyCompressDecompress getBytesRead error !!!",
+          compressor.getBytesRead() > 0);
+      assertTrue(
+          "SnappyCompressDecompress getBytesWritten before compress error !!!",
+          compressor.getBytesWritten() == 0);
+
+      byte[] compressed = new byte[BYTE_SIZE];
+      int cSize = compressor.compress(compressed, 0, compressed.length);
+      assertTrue(
+          "SnappyCompressDecompress getBytesWritten after compress error !!!",
+          compressor.getBytesWritten() > 0);
+
+      SnappyDecompressor decompressor = new SnappyDecompressor(BYTE_SIZE);
+      // set as input for decompressor only compressed data indicated with cSize
+      decompressor.setInput(compressed, 0, cSize);
+      byte[] decompressed = new byte[BYTE_SIZE];
+      decompressor.decompress(decompressed, 0, decompressed.length);
+
+      assertTrue("testSnappyCompressDecompress finished error !!!",
+          decompressor.finished());
+      Assert.assertArrayEquals(bytes, decompressed);
+      compressor.reset();
+      decompressor.reset();
+      assertTrue("decompressor getRemaining error !!!",
+          decompressor.getRemaining() == 0);
+    } catch (Exception e) {
+      fail("testSnappyCompressDecompress ex error!!!");
+    }
+  }
+
+  @Test
+  public void testCompressorDecompressorEmptyStreamLogic() {
+    ByteArrayInputStream bytesIn = null;
+    ByteArrayOutputStream bytesOut = null;
+    byte[] buf = null;
+    BlockDecompressorStream blockDecompressorStream = null;
+    try {
+      // compress empty stream
+      bytesOut = new ByteArrayOutputStream();
+      BlockCompressorStream blockCompressorStream = new BlockCompressorStream(
+          bytesOut, new SnappyCompressor(), 1024, 0);
+      // close without write
+      blockCompressorStream.close();
+
+      // check compressed output
+      buf = bytesOut.toByteArray();
+      assertEquals("empty stream compressed output size != 4", 4, buf.length);
+
+      // use compressed output as input for decompression
+      bytesIn = new ByteArrayInputStream(buf);
+
+      // create decompression stream
+      blockDecompressorStream = new BlockDecompressorStream(bytesIn,
+          new SnappyDecompressor(), 1024);
+
+      // no byte is available because stream was closed
+      assertEquals("return value is not -1", -1, blockDecompressorStream.read());
+    } catch (Exception e) {
+      fail("testCompressorDecompressorEmptyStreamLogic ex error !!!"
+          + e.getMessage());
+    } finally {
+      if (blockDecompressorStream != null)
+        try {
+          bytesIn.close();
+          bytesOut.close();
+          blockDecompressorStream.close();
+        } catch (IOException e) {
+        }
+    }
+  }
+
+  @Test
+  public void testSnappyBlockCompression() {
+    int BYTE_SIZE = 1024 * 50;
+    int BLOCK_SIZE = 512;
+    ByteArrayOutputStream out = new ByteArrayOutputStream();
+    byte[] block = new byte[BLOCK_SIZE];
+    byte[] bytes = BytesGenerator.get(BYTE_SIZE);
+    try {
+      // Use default of 512 as bufferSize and compressionOverhead of
+      // (1% of bufferSize + 12 bytes) = 18 bytes (zlib algorithm).
+      SnappyCompressor compressor = new SnappyCompressor();
+      int off = 0;
+      int len = BYTE_SIZE;
+      int maxSize = BLOCK_SIZE - 18;
+      if (BYTE_SIZE > maxSize) {
+        do {
+          int bufLen = Math.min(len, maxSize);
+          compressor.setInput(bytes, off, bufLen);
+          compressor.finish();
+          while (!compressor.finished()) {
+            compressor.compress(block, 0, block.length);
+            out.write(block);
+          }
+          compressor.reset();
+          off += bufLen;
+          len -= bufLen;
+        } while (len > 0);
+      }
+      assertTrue("testSnappyBlockCompression error !!!",
+          out.toByteArray().length > 0);
+    } catch (Exception ex) {
+      fail("testSnappyBlockCompression ex error !!!");
+    }
+  }
+
+  @Test
+  public void testSnappyCompressorDecopressorLogicWithCompressionStreams() {
+    int BYTE_SIZE = 1024 * 100;
+    byte[] bytes = BytesGenerator.get(BYTE_SIZE);
+    int bufferSize = 262144;
+    int compressionOverhead = (bufferSize / 6) + 32;
+    DataOutputStream deflateOut = null;
+    DataInputStream inflateIn = null;
+    try {
+      DataOutputBuffer compressedDataBuffer = new DataOutputBuffer();
+      CompressionOutputStream deflateFilter = new BlockCompressorStream(
+          compressedDataBuffer, new SnappyCompressor(bufferSize), bufferSize,
+          compressionOverhead);
+      deflateOut = new DataOutputStream(new BufferedOutputStream(deflateFilter));
+
+      deflateOut.write(bytes, 0, bytes.length);
+      deflateOut.flush();
+      deflateFilter.finish();
+
+      DataInputBuffer deCompressedDataBuffer = new DataInputBuffer();
+      deCompressedDataBuffer.reset(compressedDataBuffer.getData(), 0,
+          compressedDataBuffer.getLength());
+
+      CompressionInputStream inflateFilter = new BlockDecompressorStream(
+          deCompressedDataBuffer, new SnappyDecompressor(bufferSize),
+          bufferSize);
+
+      inflateIn = new DataInputStream(new BufferedInputStream(inflateFilter));
+
+      byte[] result = new byte[BYTE_SIZE];
+      inflateIn.read(result);
+
+      Assert.assertArrayEquals(
+          "original array not equals compress/decompressed array", result,
+          bytes);
+    } catch (IOException e) {
+      fail("testSnappyCompressorDecopressorLogicWithCompressionStreams ex error !!!");
+    } finally {
+      try {
+        if (deflateOut != null)
+          deflateOut.close();
+        if (inflateIn != null)
+          inflateIn.close();
+      } catch (Exception e) {
+      }
+    }
+  }
+
+  static final class BytesGenerator {
+    private BytesGenerator() {
+    }
+
+    private static final byte[] CACHE = new byte[] { 0x0, 0x1, 0x2, 0x3, 0x4,
+        0x5, 0x6, 0x7, 0x8, 0x9, 0xA, 0xB, 0xC, 0xD, 0xE, 0xF };
+    private static final Random rnd = new Random(12345l);
+
+    public static byte[] get(int size) {
+      byte[] array = (byte[]) Array.newInstance(byte.class, size);
+      for (int i = 0; i < size; i++)
+        array[i] = CACHE[rnd.nextInt(CACHE.length - 1)];
+      return array;
+    }
+  }
+}

+ 39 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java

@@ -259,6 +259,45 @@ public class TestNativeIO {
     File testFile = new File(TEST_DIR, "testfileaccess");
     assertTrue(testFile.createNewFile());
 
+    // Validate ACCESS_READ
+    FileUtil.setReadable(testFile, false);
+    assertFalse(NativeIO.Windows.access(testFile.getAbsolutePath(),
+        NativeIO.Windows.AccessRight.ACCESS_READ));
+
+    FileUtil.setReadable(testFile, true);
+    assertTrue(NativeIO.Windows.access(testFile.getAbsolutePath(),
+        NativeIO.Windows.AccessRight.ACCESS_READ));
+
+    // Validate ACCESS_WRITE
+    FileUtil.setWritable(testFile, false);
+    assertFalse(NativeIO.Windows.access(testFile.getAbsolutePath(),
+        NativeIO.Windows.AccessRight.ACCESS_WRITE));
+
+    FileUtil.setWritable(testFile, true);
+    assertTrue(NativeIO.Windows.access(testFile.getAbsolutePath(),
+        NativeIO.Windows.AccessRight.ACCESS_WRITE));
+
+    // Validate ACCESS_EXECUTE
+    FileUtil.setExecutable(testFile, false);
+    assertFalse(NativeIO.Windows.access(testFile.getAbsolutePath(),
+        NativeIO.Windows.AccessRight.ACCESS_EXECUTE));
+
+    FileUtil.setExecutable(testFile, true);
+    assertTrue(NativeIO.Windows.access(testFile.getAbsolutePath(),
+        NativeIO.Windows.AccessRight.ACCESS_EXECUTE));
+
+    // Validate that access checks work as expected for long paths
+
+    // Assemble a path longer then 260 chars (MAX_PATH)
+    String testFileRelativePath = "";
+    for (int i = 0; i < 15; ++i) {
+      testFileRelativePath += "testfileaccessfolder\\";
+    }
+    testFileRelativePath += "testfileaccess";
+    testFile = new File(TEST_DIR, testFileRelativePath);
+    assertTrue(testFile.getParentFile().mkdirs());
+    assertTrue(testFile.createNewFile());
+
     // Validate ACCESS_READ
     FileUtil.setReadable(testFile, false);
     assertFalse(NativeIO.Windows.access(testFile.getAbsolutePath(),

+ 6 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/StaticMapping.java

@@ -152,4 +152,10 @@ public class StaticMapping extends AbstractDNSToSwitchMapping  {
     // reloadCachedMappings does nothing for StaticMapping; there is
     // nowhere to reload from since all data is in memory.
   }
+
+  @Override
+  public void reloadCachedMappings(List<String> names) {
+    // reloadCachedMappings does nothing for StaticMapping; there is
+    // nowhere to reload from since all data is in memory.
+  }
 }

+ 4 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestSwitchMapping.java

@@ -120,5 +120,9 @@ public class TestSwitchMapping extends Assert {
     @Override
     public void reloadCachedMappings() {
     }
+
+    @Override
+    public void reloadCachedMappings(List<String> names) {
+    }
   }
 }

+ 44 - 17
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java

@@ -16,11 +16,21 @@
  */
 package org.apache.hadoop.security;
 
-import static org.junit.Assert.*;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.ipc.TestSaslRPC;
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
+import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
+import org.apache.hadoop.security.authentication.util.KerberosName;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.util.Shell;
 import org.junit.*;
 
-import static org.mockito.Mockito.*;
-
+import javax.security.auth.Subject;
+import javax.security.auth.login.AppConfigurationEntry;
+import javax.security.auth.login.LoginContext;
 import java.io.BufferedReader;
 import java.io.IOException;
 import java.io.InputStreamReader;
@@ -30,21 +40,13 @@ import java.util.Collection;
 import java.util.LinkedHashSet;
 import java.util.Set;
 
-import javax.security.auth.Subject;
-import javax.security.auth.login.AppConfigurationEntry;
-import javax.security.auth.login.LoginContext;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.metrics2.MetricsRecordBuilder;
-import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
-import org.apache.hadoop.security.authentication.util.KerberosName;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.security.token.TokenIdentifier;
-import static org.apache.hadoop.test.MetricsAsserts.*;
-import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTH_TO_LOCAL;
-import org.apache.hadoop.util.Shell;
+import static org.apache.hadoop.ipc.TestSaslRPC.*;
+import static org.apache.hadoop.security.token.delegation.TestDelegationToken.TestDelegationTokenIdentifier;
+import static org.apache.hadoop.test.MetricsAsserts.*;
+import static org.junit.Assert.*;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
 
 public class TestUserGroupInformation {
   final private static String USER_NAME = "user1@HADOOP.APACHE.ORG";
@@ -786,4 +788,29 @@ public class TestUserGroupInformation {
     UserGroupInformation.setLoginUser(ugi);
     assertEquals(ugi, UserGroupInformation.getLoginUser());
   }
+
+  /**
+   * In some scenario, such as HA, delegation tokens are associated with a
+   * logical name. The tokens are cloned and are associated with the
+   * physical address of the server where the service is provided.
+   * This test ensures cloned delegated tokens are locally used
+   * and are not returned in {@link UserGroupInformation#getCredentials()}
+   */
+  @Test
+  public void testPrivateTokenExclusion() throws Exception  {
+    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
+    TestTokenIdentifier tokenId = new TestTokenIdentifier();
+    Token<TestTokenIdentifier> token = new Token<TestTokenIdentifier>(
+            tokenId.getBytes(), "password".getBytes(),
+            tokenId.getKind(), null);
+    ugi.addToken(new Text("regular-token"), token);
+
+    // Now add cloned private token
+    ugi.addToken(new Text("private-token"), new Token.PrivateToken<TestTokenIdentifier>(token));
+    ugi.addToken(new Text("private-token1"), new Token.PrivateToken<TestTokenIdentifier>(token));
+
+    // Ensure only non-private tokens are returned
+    Collection<Token<? extends TokenIdentifier>> tokens = ugi.getCredentials().getAllTokens();
+    assertEquals(1, tokens.size());
+  }
 }

+ 137 - 45
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/KeyStoreTestUtil.java

@@ -145,6 +145,28 @@ public class KeyStoreTestUtil {
     saveKeyStore(ks, filename, password);
   }
 
+  /**
+   * Creates a keystore with a single key and saves it to a file.
+   * 
+   * @param filename String file to save
+   * @param password String store password to set on keystore
+   * @param keyPassword String key password to set on key
+   * @param alias String alias to use for the key
+   * @param privateKey Key to save in keystore
+   * @param cert Certificate to use as certificate chain associated to key
+   * @throws GeneralSecurityException for any error with the security APIs
+   * @throws IOException if there is an I/O error saving the file
+   */
+  public static void createKeyStore(String filename,
+                                    String password, String keyPassword, String alias,
+                                    Key privateKey, Certificate cert)
+    throws GeneralSecurityException, IOException {
+    KeyStore ks = createEmptyKeyStore();
+    ks.setKeyEntry(alias, privateKey, keyPassword.toCharArray(),
+                   new Certificate[]{cert});
+    saveKeyStore(ks, filename, password);
+  }
+
   public static void createTrustStore(String filename,
                                       String password, String alias,
                                       Certificate cert)
@@ -178,6 +200,19 @@ public class KeyStoreTestUtil {
     f.delete();
   }
 
+  /**
+   * Performs complete setup of SSL configuration in preparation for testing an
+   * SSLFactory.  This includes keys, certs, keystores, truststores, the server
+   * SSL configuration file, the client SSL configuration file, and the master
+   * configuration file read by the SSLFactory.
+   * 
+   * @param keystoresDir String directory to save keystores
+   * @param sslConfDir String directory to save SSL configuration files
+   * @param conf Configuration master configuration to be used by an SSLFactory,
+   *   which will be mutated by this method
+   * @param useClientCert boolean true to make the client present a cert in the
+   *   SSL handshake
+   */
   public static void setupSSLConfig(String keystoresDir, String sslConfDir,
                                     Configuration conf, boolean useClientCert)
     throws Exception {
@@ -213,58 +248,115 @@ public class KeyStoreTestUtil {
 
     KeyStoreTestUtil.createTrustStore(trustKS, trustPassword, certs);
 
-    Configuration clientSSLConf = new Configuration(false);
-    clientSSLConf.set(FileBasedKeyStoresFactory.resolvePropertyName(
-      SSLFactory.Mode.CLIENT,
-      FileBasedKeyStoresFactory.SSL_KEYSTORE_LOCATION_TPL_KEY), clientKS);
-    clientSSLConf.set(FileBasedKeyStoresFactory.resolvePropertyName(
-      SSLFactory.Mode.CLIENT,
-      FileBasedKeyStoresFactory.SSL_KEYSTORE_PASSWORD_TPL_KEY), clientPassword);
-    clientSSLConf.set(FileBasedKeyStoresFactory.resolvePropertyName(
-      SSLFactory.Mode.CLIENT,
-      FileBasedKeyStoresFactory.SSL_TRUSTSTORE_LOCATION_TPL_KEY), trustKS);
-    clientSSLConf.set(FileBasedKeyStoresFactory.resolvePropertyName(
-      SSLFactory.Mode.CLIENT,
-      FileBasedKeyStoresFactory.SSL_TRUSTSTORE_PASSWORD_TPL_KEY), trustPassword);
-    clientSSLConf.set(FileBasedKeyStoresFactory.resolvePropertyName(
-      SSLFactory.Mode.CLIENT,
-      FileBasedKeyStoresFactory.SSL_TRUSTSTORE_RELOAD_INTERVAL_TPL_KEY), "1000");
+    Configuration clientSSLConf = createClientSSLConfig(clientKS, clientPassword,
+      clientPassword, trustKS);
+    Configuration serverSSLConf = createServerSSLConfig(serverKS, serverPassword,
+      serverPassword, trustKS);
 
-    Configuration serverSSLConf = new Configuration(false);
-    serverSSLConf.set(FileBasedKeyStoresFactory.resolvePropertyName(
-      SSLFactory.Mode.SERVER,
-      FileBasedKeyStoresFactory.SSL_KEYSTORE_LOCATION_TPL_KEY), serverKS);
-    serverSSLConf.set(FileBasedKeyStoresFactory.resolvePropertyName(
-      SSLFactory.Mode.SERVER,
-      FileBasedKeyStoresFactory.SSL_KEYSTORE_PASSWORD_TPL_KEY), serverPassword);
-    serverSSLConf.set(FileBasedKeyStoresFactory.resolvePropertyName(
-      SSLFactory.Mode.SERVER,
-      FileBasedKeyStoresFactory.SSL_TRUSTSTORE_LOCATION_TPL_KEY), trustKS);
-    serverSSLConf.set(FileBasedKeyStoresFactory.resolvePropertyName(
-      SSLFactory.Mode.SERVER,
-      FileBasedKeyStoresFactory.SSL_TRUSTSTORE_PASSWORD_TPL_KEY), trustPassword);
-    serverSSLConf.set(FileBasedKeyStoresFactory.resolvePropertyName(
-      SSLFactory.Mode.SERVER,
-      FileBasedKeyStoresFactory.SSL_TRUSTSTORE_RELOAD_INTERVAL_TPL_KEY), "1000");
+    saveConfig(sslClientConfFile, clientSSLConf);
+    saveConfig(sslServerConfFile, serverSSLConf);
 
-    Writer writer = new FileWriter(sslClientConfFile);
-    try {
-      clientSSLConf.writeXml(writer);
-    } finally {
-      writer.close();
+    conf.set(SSLFactory.SSL_HOSTNAME_VERIFIER_KEY, "ALLOW_ALL");
+    conf.set(SSLFactory.SSL_CLIENT_CONF_KEY, sslClientConfFile.getName());
+    conf.set(SSLFactory.SSL_SERVER_CONF_KEY, sslServerConfFile.getName());
+    conf.setBoolean(SSLFactory.SSL_REQUIRE_CLIENT_CERT_KEY, useClientCert);
+  }
+
+  /**
+   * Creates SSL configuration for a client.
+   * 
+   * @param clientKS String client keystore file
+   * @param password String store password, or null to avoid setting store
+   *   password
+   * @param keyPassword String key password, or null to avoid setting key
+   *   password
+   * @param trustKS String truststore file
+   * @return Configuration for client SSL
+   */
+  public static Configuration createClientSSLConfig(String clientKS,
+      String password, String keyPassword, String trustKS) {
+    Configuration clientSSLConf = createSSLConfig(SSLFactory.Mode.CLIENT,
+      clientKS, password, keyPassword, trustKS);
+    return clientSSLConf;
+  }
+
+  /**
+   * Creates SSL configuration for a server.
+   * 
+   * @param serverKS String server keystore file
+   * @param password String store password, or null to avoid setting store
+   *   password
+   * @param keyPassword String key password, or null to avoid setting key
+   *   password
+   * @param trustKS String truststore file
+   * @return Configuration for server SSL
+   */
+  public static Configuration createServerSSLConfig(String serverKS,
+      String password, String keyPassword, String trustKS) throws IOException {
+    Configuration serverSSLConf = createSSLConfig(SSLFactory.Mode.SERVER,
+      serverKS, password, keyPassword, trustKS);
+    return serverSSLConf;
+  }
+
+  /**
+   * Creates SSL configuration.
+   * 
+   * @param mode SSLFactory.Mode mode to configure
+   * @param keystore String keystore file
+   * @param password String store password, or null to avoid setting store
+   *   password
+   * @param keyPassword String key password, or null to avoid setting key
+   *   password
+   * @param trustKS String truststore file
+   * @return Configuration for SSL
+   */
+  private static Configuration createSSLConfig(SSLFactory.Mode mode,
+      String keystore, String password, String keyPassword, String trustKS) {
+    String trustPassword = "trustP";
+
+    Configuration sslConf = new Configuration(false);
+    if (keystore != null) {
+      sslConf.set(FileBasedKeyStoresFactory.resolvePropertyName(mode,
+        FileBasedKeyStoresFactory.SSL_KEYSTORE_LOCATION_TPL_KEY), keystore);
+    }
+    if (password != null) {
+      sslConf.set(FileBasedKeyStoresFactory.resolvePropertyName(mode,
+        FileBasedKeyStoresFactory.SSL_KEYSTORE_PASSWORD_TPL_KEY), password);
+    }
+    if (keyPassword != null) {
+      sslConf.set(FileBasedKeyStoresFactory.resolvePropertyName(mode,
+        FileBasedKeyStoresFactory.SSL_KEYSTORE_KEYPASSWORD_TPL_KEY),
+        keyPassword);
+    }
+    if (trustKS != null) {
+      sslConf.set(FileBasedKeyStoresFactory.resolvePropertyName(mode,
+        FileBasedKeyStoresFactory.SSL_TRUSTSTORE_LOCATION_TPL_KEY), trustKS);
     }
+    if (trustPassword != null) {
+      sslConf.set(FileBasedKeyStoresFactory.resolvePropertyName(mode,
+        FileBasedKeyStoresFactory.SSL_TRUSTSTORE_PASSWORD_TPL_KEY),
+        trustPassword);
+    }
+    sslConf.set(FileBasedKeyStoresFactory.resolvePropertyName(mode,
+      FileBasedKeyStoresFactory.SSL_TRUSTSTORE_RELOAD_INTERVAL_TPL_KEY), "1000");
+
+    return sslConf;
+  }
 
-    writer = new FileWriter(sslServerConfFile);
+  /**
+   * Saves configuration to a file.
+   * 
+   * @param file File to save
+   * @param conf Configuration contents to write to file
+   * @throws IOException if there is an I/O error saving the file
+   */
+  public static void saveConfig(File file, Configuration conf)
+      throws IOException {
+    Writer writer = new FileWriter(file);
     try {
-      serverSSLConf.writeXml(writer);
+      conf.writeXml(writer);
     } finally {
       writer.close();
     }
-
-    conf.set(SSLFactory.SSL_HOSTNAME_VERIFIER_KEY, "ALLOW_ALL");
-    conf.set(SSLFactory.SSL_CLIENT_CONF_KEY, sslClientConfFile.getName());
-    conf.set(SSLFactory.SSL_SERVER_CONF_KEY, sslServerConfFile.getName());
-    conf.setBoolean(SSLFactory.SSL_REQUIRE_CLIENT_CERT_KEY, useClientCert);
   }
-
 }

+ 97 - 6
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestSSLFactory.java

@@ -29,12 +29,19 @@ import javax.net.ssl.HttpsURLConnection;
 import java.io.File;
 import java.net.URL;
 import java.security.GeneralSecurityException;
+import java.security.KeyPair;
+import java.security.cert.X509Certificate;
+import java.util.Collections;
+import java.util.Map;
 
 public class TestSSLFactory {
 
   private static final String BASEDIR =
     System.getProperty("test.build.dir", "target/test-dir") + "/" +
     TestSSLFactory.class.getSimpleName();
+  private static final String KEYSTORES_DIR =
+    new File(BASEDIR).getAbsolutePath();
+  private String sslConfsDir;
 
   @BeforeClass
   public static void setUp() throws Exception {
@@ -46,18 +53,16 @@ public class TestSSLFactory {
   private Configuration createConfiguration(boolean clientCert)
     throws Exception {
     Configuration conf = new Configuration();
-    String keystoresDir = new File(BASEDIR).getAbsolutePath();
-    String sslConfsDir = KeyStoreTestUtil.getClasspathDir(TestSSLFactory.class);
-    KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfsDir, conf, clientCert);
+    KeyStoreTestUtil.setupSSLConfig(KEYSTORES_DIR, sslConfsDir, conf,
+      clientCert);
     return conf;
   }
 
   @After
   @Before
   public void cleanUp() throws Exception {
-    String keystoresDir = new File(BASEDIR).getAbsolutePath();
-    String sslConfsDir = KeyStoreTestUtil.getClasspathDir(TestSSLFactory.class);
-    KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, sslConfsDir);
+    sslConfsDir = KeyStoreTestUtil.getClasspathDir(TestSSLFactory.class);
+    KeyStoreTestUtil.cleanupSSLConfig(KEYSTORES_DIR, sslConfsDir);
   }
 
   @Test(expected = IllegalStateException.class)
@@ -181,4 +186,90 @@ public class TestSSLFactory {
     }
   }
 
+  @Test
+  public void testServerDifferentPasswordAndKeyPassword() throws Exception {
+    checkSSLFactoryInitWithPasswords(SSLFactory.Mode.SERVER, "password",
+      "keyPassword", "password", "keyPassword");
+  }
+
+  @Test
+  public void testServerKeyPasswordDefaultsToPassword() throws Exception {
+    checkSSLFactoryInitWithPasswords(SSLFactory.Mode.SERVER, "password",
+      "password", "password", null);
+  }
+
+  @Test
+  public void testClientDifferentPasswordAndKeyPassword() throws Exception {
+    checkSSLFactoryInitWithPasswords(SSLFactory.Mode.CLIENT, "password",
+      "keyPassword", "password", "keyPassword");
+  }
+
+  @Test
+  public void testClientKeyPasswordDefaultsToPassword() throws Exception {
+    checkSSLFactoryInitWithPasswords(SSLFactory.Mode.CLIENT, "password",
+      "password", "password", null);
+  }
+
+  /**
+   * Checks that SSLFactory initialization is successful with the given
+   * arguments.  This is a helper method for writing test cases that cover
+   * different combinations of settings for the store password and key password.
+   * It takes care of bootstrapping a keystore, a truststore, and SSL client or
+   * server configuration.  Then, it initializes an SSLFactory.  If no exception
+   * is thrown, then initialization was successful.
+   * 
+   * @param mode SSLFactory.Mode mode to test
+   * @param password String store password to set on keystore
+   * @param keyPassword String key password to set on keystore
+   * @param confPassword String store password to set in SSL config file, or null
+   *   to avoid setting in SSL config file
+   * @param confKeyPassword String key password to set in SSL config file, or
+   *   null to avoid setting in SSL config file
+   * @throws Exception for any error
+   */
+  private void checkSSLFactoryInitWithPasswords(SSLFactory.Mode mode,
+      String password, String keyPassword, String confPassword,
+      String confKeyPassword) throws Exception {
+    String keystore = new File(KEYSTORES_DIR, "keystore.jks").getAbsolutePath();
+    String truststore = new File(KEYSTORES_DIR, "truststore.jks")
+      .getAbsolutePath();
+    String trustPassword = "trustP";
+
+    // Create keys, certs, keystore, and truststore.
+    KeyPair keyPair = KeyStoreTestUtil.generateKeyPair("RSA");
+    X509Certificate cert = KeyStoreTestUtil.generateCertificate("CN=Test",
+      keyPair, 30, "SHA1withRSA");
+    KeyStoreTestUtil.createKeyStore(keystore, password, keyPassword, "Test",
+      keyPair.getPrivate(), cert);
+    Map<String, X509Certificate> certs = Collections.singletonMap("server",
+      cert);
+    KeyStoreTestUtil.createTrustStore(truststore, trustPassword, certs);
+
+    // Create SSL configuration file, for either server or client.
+    final String sslConfFileName;
+    final Configuration sslConf;
+    if (mode == SSLFactory.Mode.SERVER) {
+      sslConfFileName = "ssl-server.xml";
+      sslConf = KeyStoreTestUtil.createServerSSLConfig(keystore, confPassword,
+        confKeyPassword, truststore);
+    } else {
+      sslConfFileName = "ssl-client.xml";
+      sslConf = KeyStoreTestUtil.createClientSSLConfig(keystore, confPassword,
+        confKeyPassword, truststore);
+    }
+    KeyStoreTestUtil.saveConfig(new File(sslConfsDir, sslConfFileName), sslConf);
+
+    // Create the master configuration for use by the SSLFactory, which by
+    // default refers to the ssl-server.xml or ssl-client.xml created above.
+    Configuration conf = new Configuration();
+    conf.setBoolean(SSLFactory.SSL_REQUIRE_CLIENT_CERT_KEY, true);
+
+    // Try initializing an SSLFactory.
+    SSLFactory sslFactory = new SSLFactory(mode, conf);
+    try {
+      sslFactory.init();
+    } finally {
+      sslFactory.destroy();
+    }
+  }
 }

+ 533 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/bloom/BloomFilterCommonTester.java

@@ -0,0 +1,533 @@
+package org.apache.hadoop.util.bloom;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.util.AbstractCollection;
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.Random;
+
+import org.junit.Assert;
+import org.apache.hadoop.io.DataInputBuffer;
+import org.apache.hadoop.io.DataOutputBuffer;
+import org.apache.hadoop.util.hash.Hash;
+import org.apache.log4j.Logger;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableSet;
+
+public class BloomFilterCommonTester<T extends Filter> {
+
+  private static final double LN2 = Math.log(2);
+  private static final double LN2_SQUARED = LN2 * LN2;
+
+  private final int hashType;
+  private final int numInsertions;
+
+  private final ImmutableList.Builder<T> builder = ImmutableList.builder();
+
+  private ImmutableSet<BloomFilterTestStrategy> filterTestStrateges;
+
+  private final PreAssertionHelper preAssertionHelper;
+
+  static int optimalNumOfBits(int n, double p) {
+    return (int) (-n * Math.log(p) / LN2_SQUARED);
+  }
+  
+  public static <T extends Filter> BloomFilterCommonTester<T> of(int hashId,
+      int numInsertions) {
+    return new BloomFilterCommonTester<T>(hashId, numInsertions);
+  }
+
+  public BloomFilterCommonTester<T> withFilterInstance(T filter) {
+    builder.add(filter);
+    return this;
+  }
+
+  private BloomFilterCommonTester(int hashId, int numInsertions) {
+    this.hashType = hashId;
+    this.numInsertions = numInsertions;
+
+    this.preAssertionHelper = new PreAssertionHelper() {
+
+      @Override
+      public ImmutableSet<Integer> falsePositives(int hashId) {
+        switch (hashId) {
+        case Hash.JENKINS_HASH: {
+          // // false pos for odd and event under 1000
+          return ImmutableSet.of(99, 963);
+        }
+        case Hash.MURMUR_HASH: {
+          // false pos for odd and event under 1000
+          return ImmutableSet.of(769, 772, 810, 874);
+        }
+        default: {
+          // fail fast with unknown hash error !!!
+          Assert.assertFalse("unknown hash error", true);
+          return ImmutableSet.of();
+        }
+        }
+      }
+    };
+  }
+
+  public BloomFilterCommonTester<T> withTestCases(
+      ImmutableSet<BloomFilterTestStrategy> filterTestStrateges) {
+    this.filterTestStrateges = ImmutableSet.copyOf(filterTestStrateges);
+    return this;
+  }
+
+  @SuppressWarnings("unchecked")
+  public void test() {
+    final ImmutableList<T> filtersList = builder.build();
+    final ImmutableSet<Integer> falsePositives = preAssertionHelper
+        .falsePositives(hashType);
+
+    for (T filter : filtersList) {
+      for (BloomFilterTestStrategy strategy : filterTestStrateges) {
+        strategy.getStrategy().assertWhat(filter, numInsertions, hashType, falsePositives);
+        // create fresh instance for next test iteration 
+        filter = (T) getSymmetricFilter(filter.getClass(), numInsertions, hashType);                
+      }
+    }
+  }
+
+  interface FilterTesterStrategy {
+    final Logger logger = Logger.getLogger(FilterTesterStrategy.class);
+
+    void assertWhat(Filter filter, int numInsertions, int hashId,
+        ImmutableSet<Integer> falsePositives);
+  }
+
+  private static Filter getSymmetricFilter(Class<?> filterClass,
+      int numInsertions, int hashType) {
+    int bitSetSize = optimalNumOfBits(numInsertions, 0.03);
+    int hashFunctionNumber = 5; 
+    
+    if (filterClass == BloomFilter.class) {
+      return new BloomFilter(bitSetSize, hashFunctionNumber, hashType);
+    } else if (filterClass == CountingBloomFilter.class) {
+      return new CountingBloomFilter(bitSetSize, hashFunctionNumber, hashType);
+    } else if (filterClass == RetouchedBloomFilter.class) {
+      return new RetouchedBloomFilter(bitSetSize, hashFunctionNumber, hashType);
+    } else if (filterClass == DynamicBloomFilter.class) {
+      return new DynamicBloomFilter(bitSetSize, hashFunctionNumber, hashType, 3);
+    } else {
+      //fail fast
+      assertFalse("unexpected filterClass", true);
+      return null;
+    } 
+  }
+
+  public enum BloomFilterTestStrategy {
+
+    ADD_KEYS_STRATEGY(new FilterTesterStrategy() {
+
+      private final ImmutableList<Key> keys = ImmutableList.of(new Key(
+          new byte[] { 49, 48, 48 }), new Key(new byte[] { 50, 48, 48 }));
+
+      @Override
+      public void assertWhat(Filter filter, int numInsertions, int hashId,
+          ImmutableSet<Integer> falsePositives) {
+
+        filter.add(keys);
+
+        assertTrue(" might contain key error ",
+            filter.membershipTest(new Key("100".getBytes())));
+        assertTrue(" might contain key error ",
+            filter.membershipTest(new Key("200".getBytes())));
+
+        filter.add(keys.toArray(new Key[] {}));
+
+        assertTrue(" might contain key error ",
+            filter.membershipTest(new Key("100".getBytes())));
+        assertTrue(" might contain key error ",
+            filter.membershipTest(new Key("200".getBytes())));
+
+        filter.add(new AbstractCollection<Key>() {
+
+          @Override
+          public Iterator<Key> iterator() {
+            return keys.iterator();
+          }
+
+          @Override
+          public int size() {
+            return keys.size();
+          }
+
+        });
+
+        assertTrue(" might contain key error ",
+            filter.membershipTest(new Key("100".getBytes())));
+        assertTrue(" might contain key error ",
+            filter.membershipTest(new Key("200".getBytes())));
+      }
+    }),
+
+    KEY_TEST_STRATEGY(new FilterTesterStrategy() {
+
+      private void checkOnKeyMethods() {
+        String line = "werabsdbe";
+
+        Key key = new Key(line.getBytes());
+        assertTrue("default key weight error ", key.getWeight() == 1d);
+
+        key.set(line.getBytes(), 2d);
+        assertTrue(" setted key weight error ", key.getWeight() == 2d);
+
+        Key sKey = new Key(line.getBytes(), 2d);
+        assertTrue("equals error", key.equals(sKey));
+        assertTrue("hashcode error", key.hashCode() == sKey.hashCode());
+
+        sKey = new Key(line.concat("a").getBytes(), 2d);
+        assertFalse("equals error", key.equals(sKey));
+        assertFalse("hashcode error", key.hashCode() == sKey.hashCode());
+
+        sKey = new Key(line.getBytes(), 3d);
+        assertFalse("equals error", key.equals(sKey));
+        assertFalse("hashcode error", key.hashCode() == sKey.hashCode());
+
+        key.incrementWeight();
+        assertTrue("weight error", key.getWeight() == 3d);
+
+        key.incrementWeight(2d);
+        assertTrue("weight error", key.getWeight() == 5d);
+      }
+
+      private void checkOnReadWrite() {
+        String line = "qryqeb354645rghdfvbaq23312fg";
+        DataOutputBuffer out = new DataOutputBuffer();
+        DataInputBuffer in = new DataInputBuffer();
+        Key originKey = new Key(line.getBytes(), 100d);
+        try {
+          originKey.write(out);
+          in.reset(out.getData(), out.getData().length);
+          Key restoredKey = new Key(new byte[] { 0 });
+          assertFalse("checkOnReadWrite equals error", restoredKey.equals(originKey));
+          restoredKey.readFields(in);
+          assertTrue("checkOnReadWrite equals error", restoredKey.equals(originKey));
+          out.reset();
+        } catch (Exception ioe) {
+          Assert.fail("checkOnReadWrite ex error");
+        }
+      }
+
+      private void checkSetOnIAE() {
+        Key key = new Key();
+        try {
+          key.set(null, 0);
+        } catch (IllegalArgumentException ex) {
+          // expected
+        } catch (Exception e) {
+          Assert.fail("checkSetOnIAE ex error");
+        }
+      }
+
+      @Override
+      public void assertWhat(Filter filter, int numInsertions, int hashId,
+          ImmutableSet<Integer> falsePositives) {
+        checkOnKeyMethods();
+        checkOnReadWrite();
+        checkSetOnIAE();
+      }
+    }),
+
+    EXCEPTIONS_CHECK_STRATEGY(new FilterTesterStrategy() {
+
+      @Override
+      public void assertWhat(Filter filter, int numInsertions, int hashId,
+          ImmutableSet<Integer> falsePositives) {
+        checkAddOnNPE(filter);
+        checkTestMembershipOnNPE(filter);
+        checkAndOnIAE(filter);       
+      }
+
+      private void checkAndOnIAE(Filter filter) {
+        Filter tfilter = null;
+
+        try {
+          Collection<Key> keys = null;
+          filter.add(keys);
+        } catch (IllegalArgumentException ex) {
+          //
+        } catch (Exception e) {
+          Assert.fail("" + e);
+        }
+
+        try {
+          Key[] keys = null;
+          filter.add(keys);
+        } catch (IllegalArgumentException ex) {
+          //
+        } catch (Exception e) {
+          Assert.fail("" + e);
+        }
+
+        try {
+          ImmutableList<Key> keys = null;
+          filter.add(keys);
+        } catch (IllegalArgumentException ex) {
+          //
+        } catch (Exception e) {
+          Assert.fail("" + e);
+        }
+
+        try {
+          filter.and(tfilter);
+        } catch (IllegalArgumentException ex) {
+          // expected
+        } catch (Exception e) {
+          Assert.fail("" + e);
+        }
+
+        try {
+          filter.or(tfilter);
+        } catch (IllegalArgumentException ex) {
+          // expected
+        } catch (Exception e) {
+          Assert.fail("" + e);
+        }
+
+        try {
+          filter.xor(tfilter);
+        } catch (IllegalArgumentException ex) {
+          // expected
+        } catch (UnsupportedOperationException unex) {
+          //
+        } catch (Exception e) {
+          Assert.fail("" + e);
+        }
+
+      }
+
+      private void checkTestMembershipOnNPE(Filter filter) {
+        try {
+          Key nullKey = null;
+          filter.membershipTest(nullKey);
+        } catch (NullPointerException ex) {
+          // expected
+        } catch (Exception e) {
+          Assert.fail("" + e);
+        }
+      }
+
+      private void checkAddOnNPE(Filter filter) {
+        try {
+          Key nullKey = null;
+          filter.add(nullKey);
+        } catch (NullPointerException ex) {
+          // expected
+        } catch (Exception e) {
+          Assert.fail("" + e);
+        }
+      }
+    }),
+
+    ODD_EVEN_ABSENT_STRATEGY(new FilterTesterStrategy() {
+
+      @Override
+      public void assertWhat(Filter filter, int numInsertions, int hashId,
+          ImmutableSet<Integer> falsePositives) {
+
+        // add all even keys
+        for (int i = 0; i < numInsertions; i += 2) {
+          filter.add(new Key(Integer.toString(i).getBytes()));
+        }
+
+        // check on present even key
+        for (int i = 0; i < numInsertions; i += 2) {
+          Assert.assertTrue(" filter might contains " + i,
+              filter.membershipTest(new Key(Integer.toString(i).getBytes())));
+        }
+
+        // check on absent odd in event
+        for (int i = 1; i < numInsertions; i += 2) {
+          if (!falsePositives.contains(i)) {
+            assertFalse(" filter should not contain " + i,
+                filter.membershipTest(new Key(Integer.toString(i).getBytes())));
+          }
+        }
+      }
+    }),
+
+    WRITE_READ_STRATEGY(new FilterTesterStrategy() {
+
+      private int slotSize = 10;
+
+      @Override
+      public void assertWhat(Filter filter, int numInsertions, int hashId,
+          ImmutableSet<Integer> falsePositives) {
+
+        final Random rnd = new Random();
+        final DataOutputBuffer out = new DataOutputBuffer();
+        final DataInputBuffer in = new DataInputBuffer();
+        try {
+          Filter tempFilter = getSymmetricFilter(filter.getClass(),
+              numInsertions, hashId);
+          ImmutableList.Builder<Integer> blist = ImmutableList.builder();
+          for (int i = 0; i < slotSize; i++) {
+            blist.add(rnd.nextInt(numInsertions * 2));
+          }
+
+          ImmutableList<Integer> list = blist.build();
+
+          // mark bits for later check
+          for (Integer slot : list) {
+            filter.add(new Key(String.valueOf(slot).getBytes()));
+          }
+
+          filter.write(out);
+          in.reset(out.getData(), out.getLength());
+          tempFilter.readFields(in);
+
+          for (Integer slot : list) {
+            assertTrue("read/write mask check filter error on " + slot,
+                filter.membershipTest(new Key(String.valueOf(slot).getBytes())));
+          }
+
+        } catch (IOException ex) {
+          Assert.fail("error ex !!!" + ex);
+        }
+      }
+    }),
+
+    FILTER_XOR_STRATEGY(new FilterTesterStrategy() {
+
+      @Override
+      public void assertWhat(Filter filter, int numInsertions, int hashId,
+          ImmutableSet<Integer> falsePositives) {
+        Filter symmetricFilter = getSymmetricFilter(filter.getClass(),
+            numInsertions, hashId);
+        try {
+          // 0 xor 0 -> 0
+          filter.xor(symmetricFilter);
+          // check on present all key
+          for (int i = 0; i < numInsertions; i++) {
+            Assert.assertFalse(" filter might contains " + i,
+                filter.membershipTest(new Key(Integer.toString(i).getBytes())));
+          }
+
+          // add all even keys
+          for (int i = 0; i < numInsertions; i += 2) {
+            filter.add(new Key(Integer.toString(i).getBytes()));
+          }
+
+          // add all odd keys
+          for (int i = 0; i < numInsertions; i += 2) {
+            symmetricFilter.add(new Key(Integer.toString(i).getBytes()));
+          }
+
+          filter.xor(symmetricFilter);
+          // 1 xor 1 -> 0
+          // check on absent all key
+          for (int i = 0; i < numInsertions; i++) {
+            Assert.assertFalse(" filter might not contains " + i,
+                filter.membershipTest(new Key(Integer.toString(i).getBytes())));
+          }
+
+        } catch (UnsupportedOperationException ex) {
+          // not all Filter's implements this method
+          return;
+        }
+      }
+    }),
+
+    FILTER_AND_STRATEGY(new FilterTesterStrategy() {
+
+      @Override
+      public void assertWhat(Filter filter, int numInsertions, int hashId,
+          ImmutableSet<Integer> falsePositives) {
+
+        int startIntersection = numInsertions - (numInsertions - 100);
+        int endIntersection = numInsertions - 100;
+
+        Filter partialFilter = getSymmetricFilter(filter.getClass(),
+            numInsertions, hashId);
+
+        for (int i = 0; i < numInsertions; i++) {
+          String digit = Integer.toString(i);
+          filter.add(new Key(digit.getBytes()));
+          if (i >= startIntersection && i <= endIntersection) {
+            partialFilter.add(new Key(digit.getBytes()));
+          }
+        }
+
+        // do logic AND
+        filter.and(partialFilter);
+
+        for (int i = 0; i < numInsertions; i++) {
+          if (i >= startIntersection && i <= endIntersection) {
+            Assert.assertTrue(" filter might contains " + i,
+                filter.membershipTest(new Key(Integer.toString(i).getBytes())));
+          }
+        }        
+      }
+    }),
+
+    FILTER_OR_STRATEGY(new FilterTesterStrategy() {
+
+      @Override
+      public void assertWhat(Filter filter, int numInsertions, int hashId,
+          ImmutableSet<Integer> falsePositives) {
+        Filter evenFilter = getSymmetricFilter(filter.getClass(),
+            numInsertions, hashId);
+
+        // add all even
+        for (int i = 0; i < numInsertions; i += 2) {
+          evenFilter.add(new Key(Integer.toString(i).getBytes()));
+        }
+
+        // add all odd
+        for (int i = 1; i < numInsertions; i += 2) {
+          filter.add(new Key(Integer.toString(i).getBytes()));
+        }
+
+        // union odd with even
+        filter.or(evenFilter);
+
+        // check on present all key
+        for (int i = 0; i < numInsertions; i++) {
+          Assert.assertTrue(" filter might contains " + i,
+              filter.membershipTest(new Key(Integer.toString(i).getBytes())));
+        }        
+      }
+    });
+
+    private final FilterTesterStrategy testerStrategy;
+
+    BloomFilterTestStrategy(FilterTesterStrategy testerStrategy) {
+      this.testerStrategy = testerStrategy;
+    }
+
+    public FilterTesterStrategy getStrategy() {
+      return testerStrategy;
+    }
+
+  }
+
+  interface PreAssertionHelper {
+    public ImmutableSet<Integer> falsePositives(int hashId);
+  }
+
+}

+ 240 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/bloom/TestBloomFilters.java

@@ -0,0 +1,240 @@
+package org.apache.hadoop.util.bloom;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
+import java.util.AbstractCollection;
+import java.util.Iterator;
+
+import org.apache.hadoop.util.bloom.BloomFilterCommonTester.BloomFilterTestStrategy;
+import org.apache.hadoop.util.hash.Hash;
+import org.junit.Assert;
+import org.junit.Test;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.ImmutableSet;
+
+public class TestBloomFilters {
+
+  int numInsertions = 1000;
+  int bitSize = BloomFilterCommonTester.optimalNumOfBits(numInsertions, 0.03);
+  int hashFunctionNumber = 5;
+
+  private static final ImmutableMap<Integer, ? extends AbstractCollection<Key>> FALSE_POSITIVE_UNDER_1000 = ImmutableMap
+      .of(Hash.JENKINS_HASH, new AbstractCollection<Key>() {
+        final ImmutableList<Key> falsePositive = ImmutableList.<Key> of(
+            new Key("99".getBytes()), new Key("963".getBytes()));
+
+        @Override
+        public Iterator<Key> iterator() {
+          return falsePositive.iterator();
+        }
+
+        @Override
+        public int size() {
+          return falsePositive.size();
+        }
+      }, Hash.MURMUR_HASH, new AbstractCollection<Key>() {
+        final ImmutableList<Key> falsePositive = ImmutableList.<Key> of(
+            new Key("769".getBytes()), new Key("772".getBytes()),
+            new Key("810".getBytes()), new Key("874".getBytes()));
+
+        @Override
+        public Iterator<Key> iterator() {
+          return falsePositive.iterator();
+        }
+
+        @Override
+        public int size() {
+          return falsePositive.size();
+        }
+      });
+
+  private enum Digits {
+    ODD(1), EVEN(0);
+
+    int start;
+
+    Digits(int start) {
+      this.start = start;
+    }
+
+    int getStart() {
+      return start;
+    }
+  }
+  
+  @Test
+  public void testDynamicBloomFilter() {
+    int hashId = Hash.JENKINS_HASH;    
+    Filter filter = new DynamicBloomFilter(bitSize, hashFunctionNumber,
+        Hash.JENKINS_HASH, 3);    
+    BloomFilterCommonTester.of(hashId, numInsertions)
+        .withFilterInstance(filter)
+        .withTestCases(ImmutableSet.of(BloomFilterTestStrategy.KEY_TEST_STRATEGY,
+                BloomFilterTestStrategy.ADD_KEYS_STRATEGY,
+                BloomFilterTestStrategy.EXCEPTIONS_CHECK_STRATEGY,
+                BloomFilterTestStrategy.WRITE_READ_STRATEGY,
+                BloomFilterTestStrategy.ODD_EVEN_ABSENT_STRATEGY))
+                .test();
+    
+    assertNotNull("testDynamicBloomFilter error ", filter.toString());
+  }
+
+  @Test
+  public void testCountingBloomFilter() {
+    int hashId = Hash.JENKINS_HASH;
+
+    CountingBloomFilter filter = new CountingBloomFilter(bitSize,
+        hashFunctionNumber, hashId);
+
+    Key key = new Key(new byte[] { 48, 48 });
+
+    filter.add(key);
+    assertTrue("CountingBloomFilter.membership error ",
+        filter.membershipTest(key));
+    assertTrue("CountingBloomFilter.approximateCount error",
+        filter.approximateCount(key) == 1);
+
+    filter.add(key);
+    assertTrue("CountingBloomFilter.approximateCount error",
+        filter.approximateCount(key) == 2);
+
+    filter.delete(key);
+    assertTrue("CountingBloomFilter.membership error ",
+        filter.membershipTest(key));
+
+    filter.delete(key);
+    assertFalse("CountingBloomFilter.membership error ",
+        filter.membershipTest(key));
+    assertTrue("CountingBloomFilter.approximateCount error",
+        filter.approximateCount(key) == 0);
+
+    BloomFilterCommonTester.of(hashId, numInsertions)
+        .withFilterInstance(filter)
+        .withTestCases(ImmutableSet.of(BloomFilterTestStrategy.KEY_TEST_STRATEGY,
+                BloomFilterTestStrategy.ADD_KEYS_STRATEGY,
+                BloomFilterTestStrategy.EXCEPTIONS_CHECK_STRATEGY,
+                BloomFilterTestStrategy.ODD_EVEN_ABSENT_STRATEGY,
+                BloomFilterTestStrategy.WRITE_READ_STRATEGY,
+                BloomFilterTestStrategy.FILTER_OR_STRATEGY,
+                BloomFilterTestStrategy.FILTER_XOR_STRATEGY)).test();
+  }
+
+  @Test
+  public void testRetouchedBloomFilterSpecific() {
+    int numInsertions = 1000;
+    int hashFunctionNumber = 5;
+
+    ImmutableSet<Integer> hashes = ImmutableSet.of(Hash.MURMUR_HASH,
+        Hash.JENKINS_HASH);
+
+    for (Integer hashId : hashes) {      
+      RetouchedBloomFilter filter = new RetouchedBloomFilter(bitSize,
+          hashFunctionNumber, hashId);
+
+      checkOnAbsentFalsePositive(hashId, numInsertions, filter, Digits.ODD,
+          RemoveScheme.MAXIMUM_FP);
+      filter.and(new RetouchedBloomFilter(bitSize, hashFunctionNumber, hashId));
+
+      checkOnAbsentFalsePositive(hashId, numInsertions, filter, Digits.EVEN,
+          RemoveScheme.MAXIMUM_FP);
+      filter.and(new RetouchedBloomFilter(bitSize, hashFunctionNumber, hashId));
+
+      checkOnAbsentFalsePositive(hashId, numInsertions, filter, Digits.ODD,
+          RemoveScheme.MINIMUM_FN);
+      filter.and(new RetouchedBloomFilter(bitSize, hashFunctionNumber, hashId));
+
+      checkOnAbsentFalsePositive(hashId, numInsertions, filter, Digits.EVEN,
+          RemoveScheme.MINIMUM_FN);
+      filter.and(new RetouchedBloomFilter(bitSize, hashFunctionNumber, hashId));
+
+      checkOnAbsentFalsePositive(hashId, numInsertions, filter, Digits.ODD,
+          RemoveScheme.RATIO);
+      filter.and(new RetouchedBloomFilter(bitSize, hashFunctionNumber, hashId));
+
+      checkOnAbsentFalsePositive(hashId, numInsertions, filter, Digits.EVEN,
+          RemoveScheme.RATIO);
+      filter.and(new RetouchedBloomFilter(bitSize, hashFunctionNumber, hashId));
+    }
+  }
+
+  private void checkOnAbsentFalsePositive(int hashId, int numInsertions,
+      final RetouchedBloomFilter filter, Digits digits, short removeSchema) {
+    AbstractCollection<Key> falsePositives = FALSE_POSITIVE_UNDER_1000
+        .get(hashId);
+
+    if (falsePositives == null)
+      Assert.fail(String.format("false positives for hash %d not founded",
+          hashId));
+
+    filter.addFalsePositive(falsePositives);
+
+    for (int i = digits.getStart(); i < numInsertions; i += 2) {
+      filter.add(new Key(Integer.toString(i).getBytes()));
+    }
+
+    for (Key key : falsePositives) {
+      filter.selectiveClearing(key, removeSchema);
+    }
+
+    for (int i = 1 - digits.getStart(); i < numInsertions; i += 2) {
+      assertFalse(" testRetouchedBloomFilterAddFalsePositive error " + i,
+          filter.membershipTest(new Key(Integer.toString(i).getBytes())));
+    }
+  }
+
+  @Test
+  public void testFiltersWithJenkinsHash() {
+    int hashId = Hash.JENKINS_HASH;
+
+    BloomFilterCommonTester.of(hashId, numInsertions)
+        .withFilterInstance(new BloomFilter(bitSize, hashFunctionNumber, hashId))
+        .withFilterInstance(new RetouchedBloomFilter(bitSize, hashFunctionNumber, hashId))
+        .withTestCases(ImmutableSet.of(BloomFilterTestStrategy.KEY_TEST_STRATEGY,
+                BloomFilterTestStrategy.ADD_KEYS_STRATEGY,
+                BloomFilterTestStrategy.EXCEPTIONS_CHECK_STRATEGY,
+                BloomFilterTestStrategy.ODD_EVEN_ABSENT_STRATEGY,
+                BloomFilterTestStrategy.WRITE_READ_STRATEGY,
+                BloomFilterTestStrategy.FILTER_OR_STRATEGY,
+                BloomFilterTestStrategy.FILTER_AND_STRATEGY,
+                BloomFilterTestStrategy.FILTER_XOR_STRATEGY)).test();
+  }
+
+  @Test
+  public void testFiltersWithMurmurHash() {
+    int hashId = Hash.MURMUR_HASH;
+
+    BloomFilterCommonTester.of(hashId, numInsertions)
+        .withFilterInstance(new BloomFilter(bitSize, hashFunctionNumber, hashId))
+        .withFilterInstance(new RetouchedBloomFilter(bitSize, hashFunctionNumber, hashId))
+        .withTestCases(ImmutableSet.of(BloomFilterTestStrategy.KEY_TEST_STRATEGY,
+                BloomFilterTestStrategy.ADD_KEYS_STRATEGY,
+                BloomFilterTestStrategy.EXCEPTIONS_CHECK_STRATEGY,
+                BloomFilterTestStrategy.ODD_EVEN_ABSENT_STRATEGY,
+                BloomFilterTestStrategy.WRITE_READ_STRATEGY,
+                BloomFilterTestStrategy.FILTER_OR_STRATEGY,
+                BloomFilterTestStrategy.FILTER_AND_STRATEGY,
+                BloomFilterTestStrategy.FILTER_XOR_STRATEGY)).test();
+  }
+}

+ 89 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/hash/TestHash.java

@@ -0,0 +1,89 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.util.hash;
+
+import static org.junit.Assert.*;
+import org.apache.hadoop.conf.Configuration;
+import org.junit.Test;
+
+public class TestHash {
+  static final String LINE = "34563@45kjkksdf/ljfdb9d8fbusd*89uggjsk<dfgjsdfh@sddc2q3esc";
+
+  @Test
+  public void testHash() {
+    int iterations = 30;
+    assertTrue("testHash jenkins error !!!",
+        Hash.JENKINS_HASH == Hash.parseHashType("jenkins"));
+    assertTrue("testHash murmur error !!!",
+        Hash.MURMUR_HASH == Hash.parseHashType("murmur"));
+    assertTrue("testHash undefined",
+        Hash.INVALID_HASH == Hash.parseHashType("undefined"));
+
+    Configuration cfg = new Configuration();
+    cfg.set("hadoop.util.hash.type", "murmur");
+    assertTrue("testHash", MurmurHash.getInstance() == Hash.getInstance(cfg));
+
+    cfg = new Configuration();
+    cfg.set("hadoop.util.hash.type", "jenkins");
+    assertTrue("testHash jenkins configuration error !!!",
+        JenkinsHash.getInstance() == Hash.getInstance(cfg));
+
+    cfg = new Configuration();
+    assertTrue("testHash undefine configuration error !!!",
+        MurmurHash.getInstance() == Hash.getInstance(cfg));
+
+    assertTrue("testHash error jenkin getInstance !!!",
+        JenkinsHash.getInstance() == Hash.getInstance(Hash.JENKINS_HASH));
+    assertTrue("testHash error murmur getInstance !!!",
+        MurmurHash.getInstance() == Hash.getInstance(Hash.MURMUR_HASH));
+
+    assertNull("testHash error invalid getInstance !!!",
+        Hash.getInstance(Hash.INVALID_HASH));
+
+    int murmurHash = Hash.getInstance(Hash.MURMUR_HASH).hash(LINE.getBytes());
+    for (int i = 0; i < iterations; i++) {
+      assertTrue("multiple evaluation murmur hash error !!!",
+          murmurHash == Hash.getInstance(Hash.MURMUR_HASH)
+              .hash(LINE.getBytes()));
+    }
+
+    murmurHash = Hash.getInstance(Hash.MURMUR_HASH).hash(LINE.getBytes(), 67);
+    for (int i = 0; i < iterations; i++) {
+      assertTrue(
+          "multiple evaluation murmur hash error !!!",
+          murmurHash == Hash.getInstance(Hash.MURMUR_HASH).hash(
+              LINE.getBytes(), 67));
+    }
+
+    int jenkinsHash = Hash.getInstance(Hash.JENKINS_HASH).hash(LINE.getBytes());
+    for (int i = 0; i < iterations; i++) {
+      assertTrue(
+          "multiple evaluation jenkins hash error !!!",
+          jenkinsHash == Hash.getInstance(Hash.JENKINS_HASH).hash(
+              LINE.getBytes()));
+    }
+
+    jenkinsHash = Hash.getInstance(Hash.JENKINS_HASH).hash(LINE.getBytes(), 67);
+    for (int i = 0; i < iterations; i++) {
+      assertTrue(
+          "multiple evaluation jenkins hash error !!!",
+          jenkinsHash == Hash.getInstance(Hash.JENKINS_HASH).hash(
+              LINE.getBytes(), 67));
+    }   
+  } 
+}

+ 9 - 0
hadoop-common-project/hadoop-common/src/test/resources/core-site.xml

@@ -69,4 +69,13 @@
   <value>simple</value>
 </property>
 
+<property>
+  <name>nfs3.server.port</name>
+  <value>2079</value>
+</property>
+
+<property>
+  <name>nfs3.mountd.port</name>
+  <value>4272</value>
+</property>
 </configuration>

二進制
hadoop-common-project/hadoop-common/src/test/resources/test.har/.part-0.crc


+ 0 - 0
hadoop-common-project/hadoop-common/src/test/resources/test.har/_SUCCESS


+ 4 - 0
hadoop-common-project/hadoop-common/src/test/resources/test.har/_index

@@ -0,0 +1,4 @@
+%2F dir 1380270822000+511+root+wheel 0 0 dir1 
+%2Fdir1 dir 1380270441000+493+jdere+wheel 0 0 1.txt 2.txt 
+%2Fdir1%2F1.txt file part-0 0 0 1380270439000+420+jdere+wheel 
+%2Fdir1%2F2.txt file part-0 0 0 1380270441000+420+jdere+wheel 

+ 2 - 0
hadoop-common-project/hadoop-common/src/test/resources/test.har/_masterindex

@@ -0,0 +1,2 @@
+3 
+0 1210114968 0 232 

+ 0 - 0
hadoop-common-project/hadoop-common/src/test/resources/test.har/part-0


+ 15 - 3
hadoop-common-project/hadoop-common/src/test/resources/testConf.xml

@@ -601,16 +601,28 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-setrep \[-R\] \[-w\] &lt;rep&gt; &lt;path/file&gt; \.\.\.:( |\t)*Set the replication level of a file.( )*</expected-output>
+          <expected-output>^-setrep \[-R\] \[-w\] &lt;rep&gt; &lt;path&gt; \.\.\.:( |\t)*Set the replication level of a file. If &lt;path&gt; is a directory( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*The -R flag requests a recursive change of replication level( )*</expected-output>
+          <expected-output>^( |\t)*then the command recursively changes the replication factor of( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*for an entire tree.( )*</expected-output>
+          <expected-output>^( |\t)*all files under the directory tree rooted at &lt;path&gt;\.( )*</expected-output>
         </comparator>
+        <comparator>
+            <type>RegexpComparator</type>
+            <expected-output>^( |\t)*The -w flag requests that the command wait for the replication( )*</expected-output>
+        </comparator>
+        <comparator>
+            <type>RegexpComparator</type>
+            <expected-output>^( |\t)*to complete. This can potentially take a very long time\.( )*</expected-output>
+        </comparator>
+          <comparator>
+              <type>RegexpComparator</type>
+              <expected-output>^( |\t)*The -R flag is accepted for backwards compatibility\. It has no effect\.( )*</expected-output>
+          </comparator>
       </comparators>
     </test>
 

+ 0 - 2
hadoop-common-project/hadoop-nfs/pom.xml

@@ -86,13 +86,11 @@
     <dependency>
       <groupId>io.netty</groupId>
       <artifactId>netty</artifactId>
-      <version>3.6.2.Final</version>
       <scope>compile</scope>
     </dependency>
     <dependency>
       <groupId>com.google.guava</groupId>
       <artifactId>guava</artifactId>
-      <version>11.0.2</version>
     </dependency>
   </dependencies>
 

+ 5 - 5
hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountResponse.java

@@ -22,6 +22,7 @@ import java.util.List;
 import org.apache.hadoop.nfs.NfsExports;
 import org.apache.hadoop.oncrpc.RpcAcceptedReply;
 import org.apache.hadoop.oncrpc.XDR;
+import org.apache.hadoop.oncrpc.security.VerifierNone;
 import org.apache.hadoop.oncrpc.security.RpcAuthInfo.AuthFlavor;
 
 /**
@@ -37,11 +38,10 @@ public class MountResponse {
   /** Response for RPC call {@link MountInterface.MNTPROC#MNT} */
   public static XDR writeMNTResponse(int status, XDR xdr, int xid,
       byte[] handle) {
-    RpcAcceptedReply.voidReply(xdr, xid);
+    RpcAcceptedReply.getAcceptInstance(xid, new VerifierNone()).write(xdr);
     xdr.writeInt(status);
     if (status == MNT_OK) {
-      xdr.writeInt(handle.length);
-      xdr.writeFixedOpaque(handle);
+      xdr.writeVariableOpaque(handle);
       // Only MountV3 returns a list of supported authFlavors
       xdr.writeInt(1);
       xdr.writeInt(AuthFlavor.AUTH_SYS.getValue());
@@ -51,7 +51,7 @@ public class MountResponse {
 
   /** Response for RPC call {@link MountInterface.MNTPROC#DUMP} */
   public static XDR writeMountList(XDR xdr, int xid, List<MountEntry> mounts) {
-    RpcAcceptedReply.voidReply(xdr, xid);
+    RpcAcceptedReply.getAcceptInstance(xid, new VerifierNone()).write(xdr);
     for (MountEntry mountEntry : mounts) {
       xdr.writeBoolean(true); // Value follows yes
       xdr.writeString(mountEntry.host());
@@ -66,7 +66,7 @@ public class MountResponse {
       List<NfsExports> hostMatcher) {
     assert (exports.size() == hostMatcher.size());
 
-    RpcAcceptedReply.voidReply(xdr, xid);
+    RpcAcceptedReply.getAcceptInstance(xid, new VerifierNone()).write(xdr);
     for (int i = 0; i < exports.size(); i++) {
       xdr.writeBoolean(true); // Value follows - yes
       xdr.writeString(exports.get(i));

+ 4 - 0
hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/IdUserGroup.java

@@ -149,6 +149,8 @@ public class IdUserGroup {
     checkAndUpdateMaps();
     String uname = uidNameMap.get(uid);
     if (uname == null) {
+      LOG.warn("Can't find user name for uid " + uid
+          + ". Use default user name " + unknown);
       uname = unknown;
     }
     return uname;
@@ -158,6 +160,8 @@ public class IdUserGroup {
     checkAndUpdateMaps();
     String gname = gidNameMap.get(gid);
     if (gname == null) {
+      LOG.warn("Can't find group name for gid " + gid
+          + ". Use default group name " + unknown);
       gname = unknown;
     }
     return gname;

+ 12 - 18
hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Base.java

@@ -19,15 +19,11 @@ package org.apache.hadoop.nfs.nfs3;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.mount.MountdBase;
-import org.apache.hadoop.oncrpc.RpcFrameDecoder;
 import org.apache.hadoop.oncrpc.RpcProgram;
 import org.apache.hadoop.oncrpc.SimpleTcpServer;
-import org.apache.hadoop.oncrpc.SimpleTcpServerHandler;
 import org.apache.hadoop.portmap.PortmapMapping;
-import org.jboss.netty.channel.ChannelPipeline;
-import org.jboss.netty.channel.ChannelPipelineFactory;
-import org.jboss.netty.channel.Channels;
 
 /**
  * Nfs server. Supports NFS v3 using {@link RpcProgram}.
@@ -38,6 +34,7 @@ public abstract class Nfs3Base {
   public static final Log LOG = LogFactory.getLog(Nfs3Base.class);
   private final MountdBase mountd;
   private final RpcProgram rpcProgram;
+  private final int nfsPort;
   
   public MountdBase getMountBase() {
     return mountd;
@@ -47,9 +44,17 @@ public abstract class Nfs3Base {
     return rpcProgram;
   }
 
+  protected Nfs3Base(MountdBase mountd, RpcProgram program, Configuration conf) {
+    this.mountd = mountd;
+    this.rpcProgram = program;
+    this.nfsPort = conf.getInt("nfs3.server.port", Nfs3Constant.PORT);
+    LOG.info("NFS server port set to: "+nfsPort);
+  }
+
   protected Nfs3Base(MountdBase mountd, RpcProgram program) {
     this.mountd = mountd;
     this.rpcProgram = program;
+    this.nfsPort = Nfs3Constant.PORT;
   }
 
   public void start(boolean register) {
@@ -61,19 +66,8 @@ public abstract class Nfs3Base {
   }
 
   private void startTCPServer() {
-    SimpleTcpServer tcpServer = new SimpleTcpServer(Nfs3Constant.PORT,
-        rpcProgram, 0) {
-      @Override
-      public ChannelPipelineFactory getPipelineFactory() {
-        return new ChannelPipelineFactory() {
-          @Override
-          public ChannelPipeline getPipeline() {
-            return Channels.pipeline(new RpcFrameDecoder(),
-                new SimpleTcpServerHandler(rpcProgram));
-          }
-        };
-      }
-    };
+    SimpleTcpServer tcpServer = new SimpleTcpServer(nfsPort,
+        rpcProgram, 0);
     tcpServer.run();
   }
 }

+ 5 - 6
hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3FileAttributes.java

@@ -72,19 +72,18 @@ public class Nfs3FileAttributes {
   }
    
   public Nfs3FileAttributes() {
-    this(false, 0, (short)0, 0, 0, 0, 0, 0, 0, 0);
+    this(NfsFileType.NFSREG, 0, (short)0, 0, 0, 0, 0, 0, 0, 0);
   }
 
-  public Nfs3FileAttributes(boolean isDir, int nlink, short mode, int uid,
+  public Nfs3FileAttributes(NfsFileType nfsType, int nlink, short mode, int uid,
       int gid, long size, long fsid, long fileid, long mtime, long atime) {
-    this.type = isDir ? NfsFileType.NFSDIR.toValue() : NfsFileType.NFSREG
-        .toValue();
+    this.type = nfsType.toValue();
     this.mode = mode;
-    this.nlink = isDir ? (nlink + 2) : 1;
+    this.nlink = (type == NfsFileType.NFSDIR.toValue()) ? (nlink + 2) : 1;
     this.uid = uid;
     this.gid = gid;
     this.size = size;
-    if(isDir) {
+    if(type == NfsFileType.NFSDIR.toValue()) {
       this.size = getDirSize(nlink);
     }
     this.used = this.size;

+ 2 - 2
hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Interface.java

@@ -97,6 +97,6 @@ public interface Nfs3Interface {
       InetAddress client);
 
   /** COMMIT: Commit cached data on a server to stable storage */
-  public NFS3Response commit(XDR xdr, SecurityHandler securityHandler,
-      InetAddress client);
+  public NFS3Response commit(XDR xdr, Channel channel, int xid,
+      SecurityHandler securityHandler, InetAddress client);
 }

+ 2 - 2
hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/SYMLINK3Request.java

@@ -25,9 +25,9 @@ import org.apache.hadoop.oncrpc.XDR;
  * SYMLINK3 Request
  */
 public class SYMLINK3Request extends RequestWithHandle {
-  private final String name;
+  private final String name;     // The name of the link
   private final SetAttr3 symAttr;
-  private final String symData;
+  private final String symData;  // It contains the target
   
   public SYMLINK3Request(XDR xdr) throws IOException {
     super(xdr);

+ 10 - 2
hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/WRITE3Request.java

@@ -28,8 +28,8 @@ import org.apache.hadoop.oncrpc.XDR;
  * WRITE3 Request
  */
 public class WRITE3Request extends RequestWithHandle {
-  private final long offset;
-  private final int count;
+  private long offset;
+  private int count;
   private final WriteStableHow stableHow;
   private final ByteBuffer data;
 
@@ -54,10 +54,18 @@ public class WRITE3Request extends RequestWithHandle {
     return this.offset;
   }
 
+  public void setOffset(long offset) {
+    this.offset = offset;
+  }
+  
   public int getCount() {
     return this.count;
   }
 
+  public void setCount(int count) {
+    this.count = count;
+  }
+  
   public WriteStableHow getStableHow() {
     return this.stableHow;
   }

+ 3 - 2
hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/ACCESS3Response.java

@@ -20,6 +20,7 @@ package org.apache.hadoop.nfs.nfs3.response;
 import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes;
 import org.apache.hadoop.nfs.nfs3.Nfs3Status;
 import org.apache.hadoop.oncrpc.XDR;
+import org.apache.hadoop.oncrpc.security.Verifier;
 
 /**
  * ACCESS3 Response 
@@ -43,8 +44,8 @@ public class ACCESS3Response extends NFS3Response {
   }
 
   @Override
-  public XDR send(XDR out, int xid) {
-    super.send(out, xid);
+  public XDR writeHeaderAndResponse(XDR out, int xid, Verifier verifier) {
+    super.writeHeaderAndResponse(out, xid, verifier);
     out.writeBoolean(true);
     postOpAttr.serialize(out);
     if (this.getStatus() == Nfs3Status.NFS3_OK) {

+ 3 - 2
hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/COMMIT3Response.java

@@ -20,6 +20,7 @@ package org.apache.hadoop.nfs.nfs3.response;
 import org.apache.hadoop.nfs.nfs3.Nfs3Constant;
 import org.apache.hadoop.nfs.nfs3.Nfs3Status;
 import org.apache.hadoop.oncrpc.XDR;
+import org.apache.hadoop.oncrpc.security.Verifier;
 
 /**
  * COMMIT3 Response
@@ -47,8 +48,8 @@ public class COMMIT3Response extends NFS3Response {
   }
 
   @Override
-  public XDR send(XDR out, int xid) {
-    super.send(out, xid);
+  public XDR writeHeaderAndResponse(XDR out, int xid, Verifier verifier) {
+    super.writeHeaderAndResponse(out, xid, verifier);
     fileWcc.serialize(out);
     if (getStatus() == Nfs3Status.NFS3_OK) {
       out.writeLongAsHyper(verf);

+ 3 - 2
hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/CREATE3Response.java

@@ -21,6 +21,7 @@ import org.apache.hadoop.nfs.nfs3.FileHandle;
 import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes;
 import org.apache.hadoop.nfs.nfs3.Nfs3Status;
 import org.apache.hadoop.oncrpc.XDR;
+import org.apache.hadoop.oncrpc.security.Verifier;
 
 /**
  * CREATE3 Response
@@ -55,8 +56,8 @@ public class CREATE3Response extends NFS3Response {
   }
 
   @Override
-  public XDR send(XDR out, int xid) {
-    super.send(out, xid);
+  public XDR writeHeaderAndResponse(XDR out, int xid, Verifier verifier) {
+    super.writeHeaderAndResponse(out, xid, verifier);
     if (getStatus() == Nfs3Status.NFS3_OK) {
       out.writeBoolean(true); // Handle follows
       objHandle.serialize(out);

+ 3 - 2
hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/FSINFO3Response.java

@@ -21,6 +21,7 @@ import org.apache.hadoop.nfs.NfsTime;
 import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes;
 import org.apache.hadoop.nfs.nfs3.Nfs3Status;
 import org.apache.hadoop.oncrpc.XDR;
+import org.apache.hadoop.oncrpc.security.Verifier;
 
 /**
  * FSINFO3 Response
@@ -109,8 +110,8 @@ public class FSINFO3Response extends NFS3Response {
   }
 
   @Override
-  public XDR send(XDR out, int xid) {
-    super.send(out, xid);
+  public XDR writeHeaderAndResponse(XDR out, int xid, Verifier verifier) {
+    super.writeHeaderAndResponse(out, xid, verifier);
     out.writeBoolean(true);
     postOpAttr.serialize(out);
 

+ 3 - 2
hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/FSSTAT3Response.java

@@ -20,6 +20,7 @@ package org.apache.hadoop.nfs.nfs3.response;
 import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes;
 import org.apache.hadoop.nfs.nfs3.Nfs3Status;
 import org.apache.hadoop.oncrpc.XDR;
+import org.apache.hadoop.oncrpc.security.Verifier;
 
 /**
  * FSSTAT3 Response
@@ -90,8 +91,8 @@ public class FSSTAT3Response extends NFS3Response {
   }
 
   @Override
-  public XDR send(XDR out, int xid) {
-    super.send(out, xid);
+  public XDR writeHeaderAndResponse(XDR out, int xid, Verifier verifier) {
+    super.writeHeaderAndResponse(out, xid, verifier);
     out.writeBoolean(true);
     if (postOpAttr == null) {
       postOpAttr = new Nfs3FileAttributes();

+ 3 - 2
hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/GETATTR3Response.java

@@ -20,6 +20,7 @@ package org.apache.hadoop.nfs.nfs3.response;
 import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes;
 import org.apache.hadoop.nfs.nfs3.Nfs3Status;
 import org.apache.hadoop.oncrpc.XDR;
+import org.apache.hadoop.oncrpc.security.Verifier;
 
 /**
  * GETATTR3 Response
@@ -40,8 +41,8 @@ public class GETATTR3Response extends NFS3Response {
   }
   
   @Override
-  public XDR send(XDR out, int xid) {
-    super.send(out, xid);
+  public XDR writeHeaderAndResponse(XDR out, int xid, Verifier verifier) {
+    super.writeHeaderAndResponse(out, xid, verifier);
     if (getStatus() == Nfs3Status.NFS3_OK) {
       postOpAttr.serialize(out);
     }

部分文件因文件數量過多而無法顯示