浏览代码

Preparing for hadoop-2.1.0-beta release.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2.1.0-beta@1508397 13f79535-47bb-0310-9956-ffa450edef68
Arun Murthy 12 年之前
父节点
当前提交
28715282c0
共有 100 个文件被更改,包括 6187 次插入1041 次删除
  1. 2 2
      hadoop-assemblies/pom.xml
  2. 2 2
      hadoop-client/pom.xml
  3. 2 2
      hadoop-common-project/hadoop-annotations/pom.xml
  4. 2 2
      hadoop-common-project/hadoop-auth-examples/pom.xml
  5. 2 2
      hadoop-common-project/hadoop-auth/pom.xml
  6. 88 2
      hadoop-common-project/hadoop-common/CHANGES.txt
  7. 6 0
      hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
  8. 5 2
      hadoop-common-project/hadoop-common/pom.xml
  9. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
  10. 4 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
  11. 5 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
  12. 104 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSLinkResolver.java
  13. 9 99
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
  14. 77 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
  15. 99 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemLinkResolver.java
  16. 27 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
  17. 4 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsConstants.java
  18. 27 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java
  19. 27 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
  20. 5 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
  21. 6 15
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/local/RawLocalFs.java
  22. 5 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceProtocol.java
  23. 41 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/AtMostOnce.java
  24. 2 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/FailoverProxyProvider.java
  25. 45 9
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java
  26. 11 11
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
  27. 13 12
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicy.java
  28. 8 7
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryProxy.java
  29. 137 144
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
  30. 79 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ClientId.java
  31. 2 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
  32. 3 104
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
  33. 329 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RetryCache.java
  34. 53 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcConstants.java
  35. 388 213
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
  36. 4 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MetricsSourceBuilder.java
  37. 4 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
  38. 4 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/RefreshUserMappingsProtocol.java
  39. 260 85
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcClient.java
  40. 2 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java
  41. 2 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/RefreshAuthorizationPolicyProtocol.java
  42. 3 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/protocolPB/RefreshAuthorizationPolicyProtocolClientSideTranslatorPB.java
  43. 2 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/protocolPB/RefreshAuthorizationPolicyProtocolPB.java
  44. 3 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/protocolPB/RefreshAuthorizationPolicyProtocolServerSideTranslatorPB.java
  45. 3 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/protocolPB/RefreshUserMappingsProtocolClientSideTranslatorPB.java
  46. 2 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/protocolPB/RefreshUserMappingsProtocolPB.java
  47. 5 5
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/protocolPB/RefreshUserMappingsProtocolServerSideTranslatorPB.java
  48. 2 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tools/GetUserMappingsProtocol.java
  49. 3 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tools/protocolPB/GetUserMappingsProtocolClientSideTranslatorPB.java
  50. 2 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tools/protocolPB/GetUserMappingsProtocolPB.java
  51. 3 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tools/protocolPB/GetUserMappingsProtocolServerSideTranslatorPB.java
  52. 5 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GSet.java
  53. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GSetByHashMap.java
  54. 238 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LightWeightCache.java
  55. 2 5
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LightWeightGSet.java
  56. 6 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ProtoUtil.java
  57. 35 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ReflectionUtils.java
  58. 12 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
  59. 32 0
      hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/NativeCodeLoader.c
  60. 6 6
      hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/bulk_crc32.c
  61. 3 4
      hadoop-common-project/hadoop-common/src/main/proto/GetUserMappingsProtocol.proto
  62. 2 2
      hadoop-common-project/hadoop-common/src/main/proto/RefreshAuthorizationPolicyProtocol.proto
  63. 2 2
      hadoop-common-project/hadoop-common/src/main/proto/RefreshUserMappingsProtocol.proto
  64. 8 2
      hadoop-common-project/hadoop-common/src/main/proto/RpcHeader.proto
  65. 1 1
      hadoop-common-project/hadoop-common/src/site/apt/Compatibility.apt.vm
  66. 1 1
      hadoop-common-project/hadoop-common/src/site/apt/SingleNodeSetup.apt.vm
  67. 140 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FSTestWrapper.java
  68. 112 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FSWrapper.java
  69. 335 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextTestWrapper.java
  70. 401 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemTestWrapper.java
  71. 1373 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/SymlinkBaseTest.java
  72. 1 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java
  73. 82 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java
  74. 1 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestFailoverProxy.java
  75. 34 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestRetryProxy.java
  76. 3 3
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/MiniRPCBenchmark.java
  77. 2 3
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/RPCCallBenchmark.java
  78. 315 38
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
  79. 8 5
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPCServerResponder.java
  80. 1 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestMultipleProtocolServer.java
  81. 32 5
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRpc.java
  82. 36 38
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
  83. 3 3
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPCCompatibility.java
  84. 215 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRetryCache.java
  85. 170 124
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java
  86. 1 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestServer.java
  87. 1 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSocketFactory.java
  88. 5 6
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestGSet.java
  89. 457 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestLightWeightCache.java
  90. 16 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestProtoUtil.java
  91. 48 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestReflectionUtils.java
  92. 2 2
      hadoop-common-project/hadoop-nfs/pom.xml
  93. 2 2
      hadoop-common-project/pom.xml
  94. 2 2
      hadoop-dist/pom.xml
  95. 3 2
      hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
  96. 0 1
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSKerberosAuthenticator.java
  97. 2 3
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/DelegationTokenIdentifier.java
  98. 2 2
      hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml
  99. 99 4
      hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
  100. 5 0
      hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml

+ 2 - 2
hadoop-assemblies/pom.xml

@@ -23,12 +23,12 @@
   <parent>
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
     <artifactId>hadoop-project</artifactId>
-    <version>2.1.0-beta</version>
+    <version>2.1.1-SNAPSHOT</version>
     <relativePath>../hadoop-project</relativePath>
     <relativePath>../hadoop-project</relativePath>
   </parent>
   </parent>
   <groupId>org.apache.hadoop</groupId>
   <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-assemblies</artifactId>
   <artifactId>hadoop-assemblies</artifactId>
-  <version>2.1.0-beta</version>
+  <version>2.1.1-SNAPSHOT</version>
   <name>Apache Hadoop Assemblies</name>
   <name>Apache Hadoop Assemblies</name>
   <description>Apache Hadoop Assemblies</description>
   <description>Apache Hadoop Assemblies</description>
 
 

+ 2 - 2
hadoop-client/pom.xml

@@ -18,12 +18,12 @@
   <parent>
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project-dist</artifactId>
     <artifactId>hadoop-project-dist</artifactId>
-    <version>2.1.0-beta</version>
+    <version>2.1.1-SNAPSHOT</version>
     <relativePath>../hadoop-project-dist</relativePath>
     <relativePath>../hadoop-project-dist</relativePath>
   </parent>
   </parent>
   <groupId>org.apache.hadoop</groupId>
   <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-client</artifactId>
   <artifactId>hadoop-client</artifactId>
-  <version>2.1.0-beta</version>
+  <version>2.1.1-SNAPSHOT</version>
   <packaging>jar</packaging>
   <packaging>jar</packaging>
 
 
   <description>Apache Hadoop Client</description>
   <description>Apache Hadoop Client</description>

+ 2 - 2
hadoop-common-project/hadoop-annotations/pom.xml

@@ -20,12 +20,12 @@
   <parent>
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
     <artifactId>hadoop-project</artifactId>
-    <version>2.1.0-beta</version>
+    <version>2.1.1-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   </parent>
   <groupId>org.apache.hadoop</groupId>
   <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-annotations</artifactId>
   <artifactId>hadoop-annotations</artifactId>
-  <version>2.1.0-beta</version>
+  <version>2.1.1-SNAPSHOT</version>
   <description>Apache Hadoop Annotations</description>
   <description>Apache Hadoop Annotations</description>
   <name>Apache Hadoop Annotations</name>
   <name>Apache Hadoop Annotations</name>
   <packaging>jar</packaging>
   <packaging>jar</packaging>

+ 2 - 2
hadoop-common-project/hadoop-auth-examples/pom.xml

@@ -20,12 +20,12 @@
   <parent>
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
     <artifactId>hadoop-project</artifactId>
-    <version>2.1.0-beta</version>
+    <version>2.1.1-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   </parent>
   <groupId>org.apache.hadoop</groupId>
   <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-auth-examples</artifactId>
   <artifactId>hadoop-auth-examples</artifactId>
-  <version>2.1.0-beta</version>
+  <version>2.1.1-SNAPSHOT</version>
   <packaging>war</packaging>
   <packaging>war</packaging>
 
 
   <name>Apache Hadoop Auth Examples</name>
   <name>Apache Hadoop Auth Examples</name>

+ 2 - 2
hadoop-common-project/hadoop-auth/pom.xml

@@ -20,12 +20,12 @@
   <parent>
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
     <artifactId>hadoop-project</artifactId>
-    <version>2.1.0-beta</version>
+    <version>2.1.1-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   </parent>
   <groupId>org.apache.hadoop</groupId>
   <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-auth</artifactId>
   <artifactId>hadoop-auth</artifactId>
-  <version>2.1.0-beta</version>
+  <version>2.1.1-SNAPSHOT</version>
   <packaging>jar</packaging>
   <packaging>jar</packaging>
 
 
   <name>Apache Hadoop Auth</name>
   <name>Apache Hadoop Auth</name>

+ 88 - 2
hadoop-common-project/hadoop-common/CHANGES.txt

@@ -6,6 +6,10 @@ Release 2.1.1-beta - UNRELEASED
 
 
   NEW FEATURES
   NEW FEATURES
 
 
+    HADOOP-9509. Implement ONCRPC and XDR. (brandonli)
+
+    HADOOP-9515. Add general interface for NFS and Mount. (brandonli)
+
   IMPROVEMENTS
   IMPROVEMENTS
 
 
   OPTIMIZATIONS
   OPTIMIZATIONS
@@ -35,6 +39,12 @@ Release 2.1.0-beta - 2013-07-02
     HADOOP-9421. [RPC v9] Convert SASL to use ProtoBuf and provide
     HADOOP-9421. [RPC v9] Convert SASL to use ProtoBuf and provide
     negotiation capabilities (daryn)
     negotiation capabilities (daryn)
 
 
+    HADOOP-9688. Add globally unique Client ID to RPC requests. (suresh)
+
+    HADOOP-9683. [RPC v9] Wrap IpcConnectionContext in RPC headers (daryn)
+
+    HADOOP-9698. [RPC v9] Client must honor server's SASL negotiate response (daryn)
+
   NEW FEATURES
   NEW FEATURES
 
 
     HADOOP-9283. Add support for running the Hadoop client on AIX. (atm)
     HADOOP-9283. Add support for running the Hadoop client on AIX. (atm)
@@ -55,9 +65,14 @@ Release 2.1.0-beta - 2013-07-02
     HADOOP-8470. Add NetworkTopologyWithNodeGroup, a 4-layer implementation
     HADOOP-8470. Add NetworkTopologyWithNodeGroup, a 4-layer implementation
     of NetworkTopology.  (Junping Du via szetszwo)
     of NetworkTopology.  (Junping Du via szetszwo)
 
 
-    HADOOP-9509. Implement ONCRPC and XDR. (brandonli)
+    HADOOP-9763. Extends LightWeightGSet to support eviction of expired 
+    elements. (Tsz Wo (Nicholas) SZE via jing9)
 
 
-    HADOOP-9515. Add general interface for NFS and Mount. (brandonli)
+    HADOOP-9762. RetryCache utility for implementing RPC retries. 
+    (Suresh Srinivas via jing9)
+
+    HADOOP-9792. Retry the methods that are tagged @AtMostOnce along 
+    with @Idempotent. (suresh)
 
 
   IMPROVEMENTS
   IMPROVEMENTS
 
 
@@ -137,6 +152,57 @@ Release 2.1.0-beta - 2013-07-02
 
 
     HADOOP-9619 Mark stability of .proto files (sanjay Radia)
     HADOOP-9619 Mark stability of .proto files (sanjay Radia)
 
 
+    HADOOP-9676.  Make maximum RPC buffer size configurable (Colin Patrick
+    McCabe)
+
+    HADOOP-9691. RPC clients can generate call ID using AtomicInteger instead of
+    synchronizing on the Client instance. (cnauroth)
+
+    HADOOP-9661. Allow metrics sources to be extended. (sandyr via tucu)
+
+    HADOOP-9370.  Write FSWrapper class to wrap FileSystem and FileContext for
+    better test coverage.  (Andrew Wang via Colin Patrick McCabe)
+
+    HADOOP-9355.  Abstract symlink tests to use either FileContext or
+    FileSystem.  (Andrew Wang via Colin Patrick McCabe)
+
+    HADOOP-9673.  NetworkTopology: when a node can't be added, print out its
+    location for diagnostic purposes.  (Colin Patrick McCabe)
+
+    HADOOP-9414.  Refactor out FSLinkResolver and relevant helper methods.
+    (Andrew Wang via Colin Patrick McCabe)
+
+    HADOOP-9416.  Add new symlink resolution methods in FileSystem and
+    FileSystemLinkResolver.  (Andrew Wang via Colin Patrick McCabe)
+
+    HADOOP-9720. Rename Client#uuid to Client#clientId. (Arpit Agarwal via
+    suresh)
+
+    HADOOP-9734. Common protobuf definitions for GetUserMappingsProtocol,
+    RefreshAuthorizationPolicyProtocol and RefreshUserMappingsProtocol (jlowe)
+
+    HADOOP-9716. Rpc retries should use the same call ID as the original call.
+    (szetszwo)
+
+    HADOOP-9717. Add retry attempt count to the RPC requests. (jing9)
+
+    HADOOP-9751. Add clientId and retryCount to RpcResponseHeaderProto.
+    (szetszwo)
+
+    HADOOP-9754. Remove unnecessary "throws IOException/InterruptedException",
+    and fix generic and other javac warnings.  (szetszwo)
+
+    HADOOP-9760. Move GSet and related classes to common from HDFS.
+    (suresh)
+
+    HADOOP-9756. Remove the deprecated getServer(..) methods from RPC.
+    (Junping Du via szetszwo)
+
+    HADOOP-9770. Make RetryCache#state non volatile. (suresh)
+
+    HADOOP-9786. RetryInvocationHandler#isRpcInvocation should support 
+    ProtocolTranslator. (suresh and jing9)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
     HADOOP-9150. Avoid unnecessary DNS resolution attempts for logical URIs
     HADOOP-9150. Avoid unnecessary DNS resolution attempts for logical URIs
@@ -283,6 +349,20 @@ Release 2.1.0-beta - 2013-07-02
     HADOOP-9656. Gridmix unit tests fail on Windows and Linux. (Chuan Liu via
     HADOOP-9656. Gridmix unit tests fail on Windows and Linux. (Chuan Liu via
     cnauroth)
     cnauroth)
 
 
+    HADOOP-9707. Fix register lists for crc32c inline assembly. (todd via
+    kihwal)
+
+    HADOOP-9738. TestDistCh fails. (jing9 via kihwal)
+
+    HADOOP-9759. Add support for NativeCodeLoader#getLibraryName on Windows.
+    (Chuan Liu via cnauroth)
+
+    HADOOP-9773. TestLightWeightCache should not set size limit to zero when
+    testing it.  (szetszwo)
+
+    HADOOP-9507. LocalFileSystem rename() is broken in some cases when
+    destination exists. (cnauroth)
+
   BREAKDOWN OF HADOOP-8562 SUBTASKS AND RELATED JIRAS
   BREAKDOWN OF HADOOP-8562 SUBTASKS AND RELATED JIRAS
 
 
     HADOOP-8924. Hadoop Common creating package-info.java must not depend on
     HADOOP-8924. Hadoop Common creating package-info.java must not depend on
@@ -490,6 +570,12 @@ Release 2.1.0-beta - 2013-07-02
     HADOOP-8440. HarFileSystem.decodeHarURI fails for URIs whose host contains
     HADOOP-8440. HarFileSystem.decodeHarURI fails for URIs whose host contains
     numbers. (Ivan Mitic via cnauroth)
     numbers. (Ivan Mitic via cnauroth)
 
 
+    HADOOP-9643. org.apache.hadoop.security.SecurityUtil calls 
+    toUpperCase(Locale.getDefault()) as well as toLowerCase(Locale.getDefault()) 
+    on hadoop.security.authentication value. (markrmiller@gmail.com via tucu)
+
+    HADOOP-9701. mvn site ambiguous links in hadoop-common. (kkambatl via tucu)
+
 Release 2.0.5-alpha - 06/06/2013
 Release 2.0.5-alpha - 06/06/2013
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES

+ 6 - 0
hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml

@@ -18,6 +18,12 @@
      <Match>
      <Match>
        <Package name="org.apache.hadoop.record.compiler.generated" />
        <Package name="org.apache.hadoop.record.compiler.generated" />
      </Match>
      </Match>
+     <Match>
+       <Package name="org.apache.hadoop.security.proto" />
+     </Match>
+     <Match>
+       <Package name="org.apache.hadoop.tools.proto" />
+     </Match>
      <Match>
      <Match>
        <Bug pattern="EI_EXPOSE_REP" />
        <Bug pattern="EI_EXPOSE_REP" />
      </Match>
      </Match>

+ 5 - 2
hadoop-common-project/hadoop-common/pom.xml

@@ -20,12 +20,12 @@
   <parent>
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project-dist</artifactId>
     <artifactId>hadoop-project-dist</artifactId>
-    <version>2.1.0-beta</version>
+    <version>2.1.1-SNAPSHOT</version>
     <relativePath>../../hadoop-project-dist</relativePath>
     <relativePath>../../hadoop-project-dist</relativePath>
   </parent>
   </parent>
   <groupId>org.apache.hadoop</groupId>
   <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-common</artifactId>
   <artifactId>hadoop-common</artifactId>
-  <version>2.1.0-beta</version>
+  <version>2.1.1-SNAPSHOT</version>
   <description>Apache Hadoop Common</description>
   <description>Apache Hadoop Common</description>
   <name>Apache Hadoop Common</name>
   <name>Apache Hadoop Common</name>
   <packaging>jar</packaging>
   <packaging>jar</packaging>
@@ -321,6 +321,9 @@
                   <include>ZKFCProtocol.proto</include>
                   <include>ZKFCProtocol.proto</include>
                   <include>ProtobufRpcEngine.proto</include>
                   <include>ProtobufRpcEngine.proto</include>
                   <include>Security.proto</include>
                   <include>Security.proto</include>
+                  <include>GetUserMappingsProtocol.proto</include>
+                  <include>RefreshAuthorizationPolicyProtocol.proto</include>
+                  <include>RefreshUserMappingsProtocol.proto</include>
                 </includes>
                 </includes>
               </source>
               </source>
               <output>${project.build.directory}/generated-sources/java</output>
               <output>${project.build.directory}/generated-sources/java</output>

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java

@@ -2220,7 +2220,7 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
     doc.appendChild(conf);
     doc.appendChild(conf);
     conf.appendChild(doc.createTextNode("\n"));
     conf.appendChild(doc.createTextNode("\n"));
     handleDeprecation(); //ensure properties is set and deprecation is handled
     handleDeprecation(); //ensure properties is set and deprecation is handled
-    for (Enumeration e = properties.keys(); e.hasMoreElements();) {
+    for (Enumeration<Object> e = properties.keys(); e.hasMoreElements();) {
       String name = (String)e.nextElement();
       String name = (String)e.nextElement();
       Object object = properties.get(name);
       Object object = properties.get(name);
       String value = null;
       String value = null;

+ 4 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java

@@ -729,6 +729,7 @@ public abstract class AbstractFileSystem {
   
   
   /**
   /**
    * Returns true if the file system supports symlinks, false otherwise.
    * Returns true if the file system supports symlinks, false otherwise.
+   * @return true if filesystem supports symlinks
    */
    */
   public boolean supportsSymlinks() {
   public boolean supportsSymlinks() {
     return false;
     return false;
@@ -744,8 +745,9 @@ public abstract class AbstractFileSystem {
   }
   }
 
 
   /**
   /**
-   * The specification of this method matches that of  
-   * {@link FileContext#getLinkTarget(Path)};
+   * Partially resolves the path. This is used during symlink resolution in
+   * {@link FSLinkResolver}, and differs from the similarly named method
+   * {@link FileContext#getLinkTarget(Path)}.
    */
    */
   public Path getLinkTarget(final Path f) throws IOException {
   public Path getLinkTarget(final Path f) throws IOException {
     /* We should never get here. Any file system that threw an
     /* We should never get here. Any file system that threw an

+ 5 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java

@@ -64,6 +64,11 @@ public class CommonConfigurationKeys extends CommonConfigurationKeysPublic {
     "ipc.server.read.threadpool.size";
     "ipc.server.read.threadpool.size";
   /** Default value for IPC_SERVER_RPC_READ_THREADS_KEY */
   /** Default value for IPC_SERVER_RPC_READ_THREADS_KEY */
   public static final int     IPC_SERVER_RPC_READ_THREADS_DEFAULT = 1;
   public static final int     IPC_SERVER_RPC_READ_THREADS_DEFAULT = 1;
+  
+  public static final String IPC_MAXIMUM_DATA_LENGTH =
+      "ipc.maximum.data.length";
+  
+  public static final int IPC_MAXIMUM_DATA_LENGTH_DEFAULT = 64 * 1024 * 1024;
 
 
   /** How many calls per handler are allowed in the queue. */
   /** How many calls per handler are allowed in the queue. */
   public static final String  IPC_SERVER_HANDLER_QUEUE_SIZE_KEY =
   public static final String  IPC_SERVER_HANDLER_QUEUE_SIZE_KEY =

+ 104 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSLinkResolver.java

@@ -0,0 +1,104 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs;
+
+import java.io.IOException;
+import java.net.URI;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * Used primarily by {@link FileContext} to operate on and resolve
+ * symlinks in a path. Operations can potentially span multiple
+ * {@link AbstractFileSystem}s.
+ * 
+ * @see FileSystemLinkResolver
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public abstract class FSLinkResolver<T> {
+
+  /**
+   * Return a fully-qualified version of the given symlink target if it
+   * has no scheme and authority. Partially and fully-qualified paths
+   * are returned unmodified.
+   * @param pathURI URI of the filesystem of pathWithLink
+   * @param pathWithLink Path that contains the symlink
+   * @param target The symlink's absolute target
+   * @return Fully qualified version of the target.
+   */
+  public static Path qualifySymlinkTarget(final URI pathURI,
+      Path pathWithLink, Path target) {
+    // NB: makeQualified uses the target's scheme and authority, if
+    // specified, and the scheme and authority of pathURI, if not.
+    final URI targetUri = target.toUri();
+    final String scheme = targetUri.getScheme();
+    final String auth = targetUri.getAuthority();
+    return (scheme == null && auth == null) ? target.makeQualified(pathURI,
+        pathWithLink.getParent()) : target;
+  }
+
+  /**
+   * Generic helper function overridden on instantiation to perform a
+   * specific operation on the given file system using the given path
+   * which may result in an UnresolvedLinkException.
+   * @param fs AbstractFileSystem to perform the operation on.
+   * @param p Path given the file system.
+   * @return Generic type determined by the specific implementation.
+   * @throws UnresolvedLinkException If symbolic link <code>path</code> could
+   *           not be resolved
+   * @throws IOException an I/O error occurred
+   */
+  abstract public T next(final AbstractFileSystem fs, final Path p)
+      throws IOException, UnresolvedLinkException;
+
+  /**
+   * Performs the operation specified by the next function, calling it
+   * repeatedly until all symlinks in the given path are resolved.
+   * @param fc FileContext used to access file systems.
+   * @param path The path to resolve symlinks on.
+   * @return Generic type determined by the implementation of next.
+   * @throws IOException
+   */
+  public T resolve(final FileContext fc, final Path path) throws IOException {
+    int count = 0;
+    T in = null;
+    Path p = path;
+    // NB: More than one AbstractFileSystem can match a scheme, eg 
+    // "file" resolves to LocalFs but could have come by RawLocalFs.
+    AbstractFileSystem fs = fc.getFSofPath(p);
+
+    // Loop until all symlinks are resolved or the limit is reached
+    for (boolean isLink = true; isLink;) {
+      try {
+        in = next(fs, p);
+        isLink = false;
+      } catch (UnresolvedLinkException e) {
+        if (count++ > FsConstants.MAX_PATH_LINKS) {
+          throw new IOException("Possible cyclic loop while " +
+                                "following symbolic link " + path);
+        }
+        // Resolve the first unresolved path component
+        p = qualifySymlinkTarget(fs.getUri(), p, fs.getLinkTarget(p));
+        fs = fc.getFSofPath(p);
+      }
+    }
+    return in;
+  }
+}

+ 9 - 99
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java

@@ -292,17 +292,6 @@ public final class FileContext {
       DELETE_ON_EXIT.clear();
       DELETE_ON_EXIT.clear();
     }
     }
   }
   }
-  
-  /**
-   * Pathnames with scheme and relative path are illegal.
-   * @param path to be checked
-   */
-  private static void checkNotSchemeWithRelative(final Path path) {
-    if (path.toUri().isAbsolute() && !path.isUriPathAbsolute()) {
-      throw new HadoopIllegalArgumentException(
-          "Unsupported name: has scheme but relative path-part");
-    }
-  }
 
 
   /**
   /**
    * Get the file system of supplied path.
    * Get the file system of supplied path.
@@ -315,13 +304,10 @@ public final class FileContext {
    * @throws IOExcepton If the file system for <code>absOrFqPath</code> could
    * @throws IOExcepton If the file system for <code>absOrFqPath</code> could
    *         not be instantiated.
    *         not be instantiated.
    */
    */
-  private AbstractFileSystem getFSofPath(final Path absOrFqPath)
+  protected AbstractFileSystem getFSofPath(final Path absOrFqPath)
       throws UnsupportedFileSystemException, IOException {
       throws UnsupportedFileSystemException, IOException {
-    checkNotSchemeWithRelative(absOrFqPath);
-    if (!absOrFqPath.isAbsolute() && absOrFqPath.toUri().getScheme() == null) {
-      throw new HadoopIllegalArgumentException(
-          "FileContext Bug: path is relative");
-    }
+    absOrFqPath.checkNotSchemeWithRelative();
+    absOrFqPath.checkNotRelative();
 
 
     try { 
     try { 
       // Is it the default FS for this FileContext?
       // Is it the default FS for this FileContext?
@@ -523,7 +509,7 @@ public final class FileContext {
    *           </ul>
    *           </ul>
    */
    */
   public void setWorkingDirectory(final Path newWDir) throws IOException {
   public void setWorkingDirectory(final Path newWDir) throws IOException {
-    checkNotSchemeWithRelative(newWDir);
+    newWDir.checkNotSchemeWithRelative();
     /* wd is stored as a fully qualified path. We check if the given 
     /* wd is stored as a fully qualified path. We check if the given 
      * path is not relative first since resolve requires and returns 
      * path is not relative first since resolve requires and returns 
      * an absolute path.
      * an absolute path.
@@ -1128,26 +1114,6 @@ public final class FileContext {
     }.resolve(this, absF);
     }.resolve(this, absF);
   }
   }
 
 
-  /**
-   * Return a fully qualified version of the given symlink target if it
-   * has no scheme and authority. Partially and fully qualified paths 
-   * are returned unmodified.
-   * @param pathFS The AbstractFileSystem of the path
-   * @param pathWithLink Path that contains the symlink
-   * @param target The symlink's absolute target
-   * @return Fully qualified version of the target.
-   */
-  private Path qualifySymlinkTarget(final AbstractFileSystem pathFS,
-    Path pathWithLink, Path target) {
-    // NB: makeQualified uses the target's scheme and authority, if
-    // specified, and the scheme and authority of pathFS, if not.
-    final String scheme = target.toUri().getScheme();
-    final String auth   = target.toUri().getAuthority();
-    return (scheme == null && auth == null)
-      ? target.makeQualified(pathFS.getUri(), pathWithLink.getParent())
-      : target;
-  }
-  
   /**
   /**
    * Return a file status object that represents the path. If the path 
    * Return a file status object that represents the path. If the path 
    * refers to a symlink then the FileStatus of the symlink is returned.
    * refers to a symlink then the FileStatus of the symlink is returned.
@@ -1172,7 +1138,8 @@ public final class FileContext {
         throws IOException, UnresolvedLinkException {
         throws IOException, UnresolvedLinkException {
         FileStatus fi = fs.getFileLinkStatus(p);
         FileStatus fi = fs.getFileLinkStatus(p);
         if (fi.isSymlink()) {
         if (fi.isSymlink()) {
-          fi.setSymlink(qualifySymlinkTarget(fs, p, fi.getSymlink()));
+          fi.setSymlink(FSLinkResolver.qualifySymlinkTarget(fs.getUri(), p,
+              fi.getSymlink()));
         }
         }
         return fi;
         return fi;
       }
       }
@@ -2166,9 +2133,9 @@ public final class FileContext {
         boolean overwrite) throws AccessControlException,
         boolean overwrite) throws AccessControlException,
         FileAlreadyExistsException, FileNotFoundException,
         FileAlreadyExistsException, FileNotFoundException,
         ParentNotDirectoryException, UnsupportedFileSystemException, 
         ParentNotDirectoryException, UnsupportedFileSystemException, 
-	IOException {
-      checkNotSchemeWithRelative(src);
-      checkNotSchemeWithRelative(dst);
+        IOException {
+      src.checkNotSchemeWithRelative();
+      dst.checkNotSchemeWithRelative();
       Path qSrc = makeQualified(src);
       Path qSrc = makeQualified(src);
       Path qDst = makeQualified(dst);
       Path qDst = makeQualified(dst);
       checkDest(qSrc.getName(), qDst, overwrite);
       checkDest(qSrc.getName(), qDst, overwrite);
@@ -2334,64 +2301,7 @@ public final class FileContext {
     }.resolve(this, absF);
     }.resolve(this, absF);
     return result;
     return result;
   }
   }
-  
-  /**
-   * Class used to perform an operation on and resolve symlinks in a
-   * path. The operation may potentially span multiple file systems.  
-   */
-  protected abstract class FSLinkResolver<T> {
-    // The maximum number of symbolic link components in a path
-    private static final int MAX_PATH_LINKS = 32;
-
-    /**
-     * Generic helper function overridden on instantiation to perform a 
-     * specific operation on the given file system using the given path
-     * which may result in an UnresolvedLinkException. 
-     * @param fs AbstractFileSystem to perform the operation on.
-     * @param p Path given the file system.
-     * @return Generic type determined by the specific implementation.
-     * @throws UnresolvedLinkException If symbolic link <code>path</code> could 
-     *           not be resolved
-     * @throws IOException an I/O error occured
-     */
-    public abstract T next(final AbstractFileSystem fs, final Path p) 
-      throws IOException, UnresolvedLinkException;  
-        
-    /**
-     * Performs the operation specified by the next function, calling it
-     * repeatedly until all symlinks in the given path are resolved.
-     * @param fc FileContext used to access file systems.
-     * @param p The path to resolve symlinks in.
-     * @return Generic type determined by the implementation of next.
-     * @throws IOException
-     */
-    public T resolve(final FileContext fc, Path p) throws IOException {
-      int count = 0;
-      T in = null;
-      Path first = p;
-      // NB: More than one AbstractFileSystem can match a scheme, eg 
-      // "file" resolves to LocalFs but could have come by RawLocalFs.
-      AbstractFileSystem fs = fc.getFSofPath(p);      
 
 
-      // Loop until all symlinks are resolved or the limit is reached
-      for (boolean isLink = true; isLink;) {
-        try {
-          in = next(fs, p);
-          isLink = false;
-        } catch (UnresolvedLinkException e) {
-          if (count++ > MAX_PATH_LINKS) {
-            throw new IOException("Possible cyclic loop while " +
-                                  "following symbolic link " + first);
-          }
-          // Resolve the first unresolved path component
-          p = qualifySymlinkTarget(fs, p, fs.getLinkTarget(p));
-          fs = fc.getFSofPath(p);
-        }
-      }
-      return in;
-    }
-  }
-  
   /**
   /**
    * Get the statistics for a particular file system
    * Get the statistics for a particular file system
    * 
    * 

+ 77 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java

@@ -53,6 +53,7 @@ import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.io.MultipleIOException;
 import org.apache.hadoop.io.MultipleIOException;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -262,6 +263,16 @@ public abstract class FileSystem extends Configured implements Closeable {
     return 0;
     return 0;
   }
   }
 
 
+  protected static FileSystem getFSofPath(final Path absOrFqPath,
+      final Configuration conf)
+      throws UnsupportedFileSystemException, IOException {
+    absOrFqPath.checkNotSchemeWithRelative();
+    absOrFqPath.checkNotRelative();
+
+    // Uses the default file system if not fully qualified
+    return get(absOrFqPath.toUri(), conf);
+  }
+
   /**
   /**
    * Get a canonical service name for this file system.  The token cache is
    * Get a canonical service name for this file system.  The token cache is
    * the only user of the canonical service name, and uses it to lookup this
    * the only user of the canonical service name, and uses it to lookup this
@@ -811,7 +822,9 @@ public abstract class FileSystem extends Configured implements Closeable {
   public FSDataOutputStream create(Path f, short replication, 
   public FSDataOutputStream create(Path f, short replication, 
       Progressable progress) throws IOException {
       Progressable progress) throws IOException {
     return create(f, true, 
     return create(f, true, 
-                  getConf().getInt("io.file.buffer.size", 4096),
+                  getConf().getInt(
+                      CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY,
+                      CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT),
                   replication,
                   replication,
                   getDefaultBlockSize(f), progress);
                   getDefaultBlockSize(f), progress);
   }
   }
@@ -1243,7 +1256,7 @@ public abstract class FileSystem extends Configured implements Closeable {
   protected void rename(final Path src, final Path dst,
   protected void rename(final Path src, final Path dst,
       final Rename... options) throws IOException {
       final Rename... options) throws IOException {
     // Default implementation
     // Default implementation
-    final FileStatus srcStatus = getFileStatus(src);
+    final FileStatus srcStatus = getFileLinkStatus(src);
     if (srcStatus == null) {
     if (srcStatus == null) {
       throw new FileNotFoundException("rename source " + src + " not found.");
       throw new FileNotFoundException("rename source " + src + " not found.");
     }
     }
@@ -1259,7 +1272,7 @@ public abstract class FileSystem extends Configured implements Closeable {
 
 
     FileStatus dstStatus;
     FileStatus dstStatus;
     try {
     try {
-      dstStatus = getFileStatus(dst);
+      dstStatus = getFileLinkStatus(dst);
     } catch (IOException e) {
     } catch (IOException e) {
       dstStatus = null;
       dstStatus = null;
     }
     }
@@ -2174,6 +2187,65 @@ public abstract class FileSystem extends Configured implements Closeable {
    */
    */
   public abstract FileStatus getFileStatus(Path f) throws IOException;
   public abstract FileStatus getFileStatus(Path f) throws IOException;
 
 
+  /**
+   * See {@link FileContext#fixRelativePart}
+   */
+  protected Path fixRelativePart(Path p) {
+    if (p.isUriPathAbsolute()) {
+      return p;
+    } else {
+      return new Path(getWorkingDirectory(), p);
+    }
+  }
+
+  /**
+   * See {@link FileContext#createSymlink(Path, Path, boolean)}
+   */
+  public void createSymlink(final Path target, final Path link,
+      final boolean createParent) throws AccessControlException,
+      FileAlreadyExistsException, FileNotFoundException,
+      ParentNotDirectoryException, UnsupportedFileSystemException, 
+      IOException {
+    // Supporting filesystems should override this method
+    throw new UnsupportedOperationException(
+        "Filesystem does not support symlinks!");
+  }
+
+  /**
+   * See {@link FileContext#getFileLinkStatus(Path)}
+   */
+  public FileStatus getFileLinkStatus(final Path f)
+      throws AccessControlException, FileNotFoundException,
+      UnsupportedFileSystemException, IOException {
+    // Supporting filesystems should override this method
+    return getFileStatus(f);
+  }
+
+  /**
+   * See {@link AbstractFileSystem#supportsSymlinks()}
+   */
+  public boolean supportsSymlinks() {
+    return false;
+  }
+
+  /**
+   * See {@link FileContext#getLinkTarget(Path)}
+   */
+  public Path getLinkTarget(Path f) throws IOException {
+    // Supporting filesystems should override this method
+    throw new UnsupportedOperationException(
+        "Filesystem does not support symlinks!");
+  }
+
+  /**
+   * See {@link AbstractFileSystem#getLinkTarget(Path)}
+   */
+  protected Path resolveLink(Path f) throws IOException {
+    // Supporting filesystems should override this method
+    throw new UnsupportedOperationException(
+        "Filesystem does not support symlinks!");
+  }
+
   /**
   /**
    * Get the checksum of a file.
    * Get the checksum of a file.
    *
    *
@@ -2397,7 +2469,8 @@ public abstract class FileSystem extends Configured implements Closeable {
         }
         }
         
         
         // now insert the new file system into the map
         // now insert the new file system into the map
-        if (map.isEmpty() ) {
+        if (map.isEmpty()
+                && !ShutdownHookManager.get().isShutdownInProgress()) {
           ShutdownHookManager.get().addShutdownHook(clientFinalizer, SHUTDOWN_HOOK_PRIORITY);
           ShutdownHookManager.get().addShutdownHook(clientFinalizer, SHUTDOWN_HOOK_PRIORITY);
         }
         }
         fs.key = key;
         fs.key = key;

+ 99 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemLinkResolver.java

@@ -0,0 +1,99 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs;
+
+import java.io.IOException;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * FileSystem-specific class used to operate on and resolve symlinks in a path.
+ * Operation can potentially span multiple {@link FileSystem}s.
+ * 
+ * @see FSLinkResolver
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public abstract class FileSystemLinkResolver<T> {
+
+  /**
+   * FileSystem subclass-specific implementation of superclass method.
+   * Overridden on instantiation to perform the actual method call, which throws
+   * an UnresolvedLinkException if called on an unresolved {@link Path}.
+   * @param p Path on which to perform an operation
+   * @return Generic type returned by operation
+   * @throws IOException
+   * @throws UnresolvedLinkException
+   */
+  abstract public T doCall(final Path p) throws IOException,
+      UnresolvedLinkException;
+
+  /**
+   * Calls the abstract FileSystem call equivalent to the specialized subclass
+   * implementation in {@link #doCall(Path)}. This is used when retrying the
+   * call with a newly resolved Path and corresponding new FileSystem.
+   * 
+   * @param fs
+   *          FileSystem with which to retry call
+   * @param p
+   *          Resolved Target of path
+   * @return Generic type determined by implementation
+   * @throws IOException
+   */
+  abstract public T next(final FileSystem fs, final Path p) throws IOException;
+
+  /**
+   * Attempt calling overridden {@link #doCall(Path)} method with
+   * specified {@link FileSystem} and {@link Path}. If the call fails with an
+   * UnresolvedLinkException, it will try to resolve the path and retry the call
+   * by calling {@link #next(FileSystem, Path)}.
+   * @param filesys FileSystem with which to try call
+   * @param path Path with which to try call
+   * @return Generic type determined by implementation
+   * @throws IOException
+   */
+  public T resolve(final FileSystem filesys, final Path path)
+      throws IOException {
+    int count = 0;
+    T in = null;
+    Path p = path;
+    FileSystem fs = FileSystem.getFSofPath(p, filesys.getConf());
+    for (boolean isLink = true; isLink;) {
+      try {
+        in = doCall(p);
+        isLink = false;
+      } catch (UnresolvedLinkException e) {
+        if (count++ > FsConstants.MAX_PATH_LINKS) {
+          throw new IOException("Possible cyclic loop while " +
+                                "following symbolic link " + path);
+        }
+        // Resolve the first unresolved path component
+        p = FSLinkResolver.qualifySymlinkTarget(fs.getUri(), p,
+            filesys.resolveLink(p));
+        fs = FileSystem.getFSofPath(p, filesys.getConf());
+        // Have to call next if it's a new FS
+        if (!fs.equals(filesys)) {
+          return next(fs, p);
+        }
+        // Else, we keep resolving with this filesystem
+      }
+    }
+    // Successful call, path was fully resolved
+    return in;
+  }
+}

+ 27 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java

@@ -28,6 +28,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.Options.ChecksumOpt;
 import org.apache.hadoop.fs.Options.ChecksumOpt;
+import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.Progressable;
 
 
 /****************************************************************
 /****************************************************************
@@ -397,6 +398,32 @@ public class FilterFileSystem extends FileSystem {
     return fs.getFileStatus(f);
     return fs.getFileStatus(f);
   }
   }
 
 
+  public void createSymlink(final Path target, final Path link,
+      final boolean createParent) throws AccessControlException,
+      FileAlreadyExistsException, FileNotFoundException,
+      ParentNotDirectoryException, UnsupportedFileSystemException, 
+      IOException {
+    fs.createSymlink(target, link, createParent);
+  }
+
+  public FileStatus getFileLinkStatus(final Path f)
+      throws AccessControlException, FileNotFoundException,
+      UnsupportedFileSystemException, IOException {
+    return fs.getFileLinkStatus(f);
+  }
+
+  public boolean supportsSymlinks() {
+    return fs.supportsSymlinks();
+  }
+
+  public Path getLinkTarget(Path f) throws IOException {
+    return fs.getLinkTarget(f);
+  }
+
+  protected Path resolveLink(Path f) throws IOException {
+    return fs.resolveLink(f);
+  }
+
   @Override
   @Override
   public FileChecksum getFileChecksum(Path f) throws IOException {
   public FileChecksum getFileChecksum(Path f) throws IOException {
     return fs.getFileChecksum(f);
     return fs.getFileChecksum(f);

+ 4 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsConstants.java

@@ -33,8 +33,10 @@ public interface FsConstants {
   
   
   // URI scheme for FTP
   // URI scheme for FTP
   public static final String FTP_SCHEME = "ftp";
   public static final String FTP_SCHEME = "ftp";
-  
-  
+
+  // Maximum number of symlinks to recursively resolve in a path
+  static final int MAX_PATH_LINKS = 32;
+
   /**
   /**
    * ViewFs: viewFs file system (ie the mount file system on client side)
    * ViewFs: viewFs file system (ie the mount file system on client side)
    */
    */

+ 27 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java

@@ -25,6 +25,7 @@ import java.util.regex.Pattern;
 
 
 import org.apache.avro.reflect.Stringable;
 import org.apache.avro.reflect.Stringable;
 import org.apache.commons.lang.StringUtils;
 import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
@@ -57,6 +58,32 @@ public class Path implements Comparable {
 
 
   private URI uri;                                // a hierarchical uri
   private URI uri;                                // a hierarchical uri
 
 
+  /**
+   * Pathnames with scheme and relative path are illegal.
+   * @param path to be checked
+   */
+  void checkNotSchemeWithRelative() {
+    if (toUri().isAbsolute() && !isUriPathAbsolute()) {
+      throw new HadoopIllegalArgumentException(
+          "Unsupported name: has scheme but relative path-part");
+    }
+  }
+
+  void checkNotRelative() {
+    if (!isAbsolute() && toUri().getScheme() == null) {
+      throw new HadoopIllegalArgumentException("Path is relative");
+    }
+  }
+
+  public static Path getPathWithoutSchemeAndAuthority(Path path) {
+    // This code depends on Path.toString() to remove the leading slash before
+    // the drive specification on Windows.
+    Path newPath = path.isUriPathAbsolute() ?
+      new Path(null, null, path.toUri().getPath()) :
+      path;
+    return newPath;
+  }
+
   /** Resolve a child path against a parent path. */
   /** Resolve a child path against a parent path. */
   public Path(String parent, String child) {
   public Path(String parent, String child) {
     this(new Path(parent), new Path(child));
     this(new Path(parent), new Path(child));

+ 27 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java

@@ -319,9 +319,35 @@ public class RawLocalFileSystem extends FileSystem {
 
 
   @Override
   @Override
   public boolean rename(Path src, Path dst) throws IOException {
   public boolean rename(Path src, Path dst) throws IOException {
-    if (pathToFile(src).renameTo(pathToFile(dst))) {
+    // Attempt rename using Java API.
+    File srcFile = pathToFile(src);
+    File dstFile = pathToFile(dst);
+    if (srcFile.renameTo(dstFile)) {
       return true;
       return true;
     }
     }
+
+    // Enforce POSIX rename behavior that a source directory replaces an existing
+    // destination if the destination is an empty directory.  On most platforms,
+    // this is already handled by the Java API call above.  Some platforms
+    // (notably Windows) do not provide this behavior, so the Java API call above
+    // fails.  Delete destination and attempt rename again.
+    if (this.exists(dst)) {
+      FileStatus sdst = this.getFileStatus(dst);
+      if (sdst.isDirectory() && dstFile.list().length == 0) {
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Deleting empty destination and renaming " + src + " to " +
+            dst);
+        }
+        if (this.delete(dst, false) && srcFile.renameTo(dstFile)) {
+          return true;
+        }
+      }
+    }
+
+    // The fallback behavior accomplishes the rename by a full copy.
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Falling through to a copy of " + src + " to " + dst);
+    }
     return FileUtil.copy(this, src, this, dst, true, getConf());
     return FileUtil.copy(this, src, this, dst, true, getConf());
   }
   }
   
   

+ 5 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java

@@ -89,8 +89,11 @@ public class TrashPolicyDefault extends TrashPolicy {
     this.emptierInterval = (long)(conf.getFloat(
     this.emptierInterval = (long)(conf.getFloat(
         FS_TRASH_CHECKPOINT_INTERVAL_KEY, FS_TRASH_CHECKPOINT_INTERVAL_DEFAULT)
         FS_TRASH_CHECKPOINT_INTERVAL_KEY, FS_TRASH_CHECKPOINT_INTERVAL_DEFAULT)
         * MSECS_PER_MINUTE);
         * MSECS_PER_MINUTE);
-  }
-  
+    LOG.info("Namenode trash configuration: Deletion interval = " +
+             this.deletionInterval + " minutes, Emptier interval = " +
+             this.emptierInterval + " minutes.");
+   }
+
   private Path makeTrashRelativePath(Path basePath, Path rmFilePath) {
   private Path makeTrashRelativePath(Path basePath, Path rmFilePath) {
     return Path.mergePaths(basePath, rmFilePath);
     return Path.mergePaths(basePath, rmFilePath);
   }
   }

+ 6 - 15
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/local/RawLocalFs.java

@@ -17,20 +17,20 @@
  */
  */
 package org.apache.hadoop.fs.local;
 package org.apache.hadoop.fs.local;
 
 
-import java.io.IOException;
-import java.io.File;
 import java.io.FileNotFoundException;
 import java.io.FileNotFoundException;
+import java.io.IOException;
 import java.net.URI;
 import java.net.URI;
 import java.net.URISyntaxException;
 import java.net.URISyntaxException;
 
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.AbstractFileSystem;
 import org.apache.hadoop.fs.DelegateToFileSystem;
 import org.apache.hadoop.fs.DelegateToFileSystem;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FsConstants;
 import org.apache.hadoop.fs.FsConstants;
 import org.apache.hadoop.fs.FsServerDefaults;
 import org.apache.hadoop.fs.FsServerDefaults;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.RawLocalFileSystem;
 import org.apache.hadoop.fs.RawLocalFileSystem;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.Shell;
@@ -91,8 +91,8 @@ public class RawLocalFs extends DelegateToFileSystem {
     // NB: Use createSymbolicLink in java.nio.file.Path once available
     // NB: Use createSymbolicLink in java.nio.file.Path once available
     try {
     try {
       Shell.execCommand(Shell.getSymlinkCommand(
       Shell.execCommand(Shell.getSymlinkCommand(
-        getPathWithoutSchemeAndAuthority(target).getPath(),
-        getPathWithoutSchemeAndAuthority(link).getPath()));
+        Path.getPathWithoutSchemeAndAuthority(target).toString(),
+        Path.getPathWithoutSchemeAndAuthority(link).toString()));
     } catch (IOException x) {
     } catch (IOException x) {
       throw new IOException("Unable to create symlink: "+x.getMessage());
       throw new IOException("Unable to create symlink: "+x.getMessage());
     }
     }
@@ -175,13 +175,4 @@ public class RawLocalFs extends DelegateToFileSystem {
      */
      */
     throw new AssertionError();
     throw new AssertionError();
   }
   }
-
-  private static File getPathWithoutSchemeAndAuthority(Path path) {
-    Path newPath = path.isUriPathAbsolute() ?
-      new Path(null, null, path.toUri().getPath()) :
-      path;
-
-    // Path.toString() removes leading slash before drive spec on Windows.
-    return new File(newPath.toString());
-  }
 }
 }

+ 5 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceProtocol.java

@@ -20,6 +20,7 @@ package org.apache.hadoop.ha;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.io.retry.Idempotent;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.KerberosInfo;
 import org.apache.hadoop.security.KerberosInfo;
 
 
@@ -106,6 +107,7 @@ public interface HAServiceProtocol {
    * @throws IOException
    * @throws IOException
    *           if other errors happen
    *           if other errors happen
    */
    */
+  @Idempotent
   public void monitorHealth() throws HealthCheckFailedException,
   public void monitorHealth() throws HealthCheckFailedException,
                                      AccessControlException,
                                      AccessControlException,
                                      IOException;
                                      IOException;
@@ -121,6 +123,7 @@ public interface HAServiceProtocol {
    * @throws IOException
    * @throws IOException
    *           if other errors happen
    *           if other errors happen
    */
    */
+  @Idempotent
   public void transitionToActive(StateChangeRequestInfo reqInfo)
   public void transitionToActive(StateChangeRequestInfo reqInfo)
                                    throws ServiceFailedException,
                                    throws ServiceFailedException,
                                           AccessControlException,
                                           AccessControlException,
@@ -137,6 +140,7 @@ public interface HAServiceProtocol {
    * @throws IOException
    * @throws IOException
    *           if other errors happen
    *           if other errors happen
    */
    */
+  @Idempotent
   public void transitionToStandby(StateChangeRequestInfo reqInfo)
   public void transitionToStandby(StateChangeRequestInfo reqInfo)
                                     throws ServiceFailedException,
                                     throws ServiceFailedException,
                                            AccessControlException,
                                            AccessControlException,
@@ -152,6 +156,7 @@ public interface HAServiceProtocol {
    * @throws IOException
    * @throws IOException
    *           if other errors happen
    *           if other errors happen
    */
    */
+  @Idempotent
   public HAServiceStatus getServiceStatus() throws AccessControlException,
   public HAServiceStatus getServiceStatus() throws AccessControlException,
                                                    IOException;
                                                    IOException;
 }
 }

+ 41 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/AtMostOnce.java

@@ -0,0 +1,41 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.io.retry;
+
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Inherited;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * Used to mark certain methods of an interface with at-most-once semantics.
+ * 
+ * Server must guarantee that methods are executed at most once, by keeping
+ * a retry cache. The previous response must be returned when duplicate 
+ * requests are received. Because of these guarantee, a client can retry
+ * this request on failover and other network failure conditions.
+ */
+@Inherited
+@Retention(RetentionPolicy.RUNTIME)
+@Target(ElementType.METHOD)
+@InterfaceStability.Evolving
+public @interface AtMostOnce {
+
+}

+ 2 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/FailoverProxyProvider.java

@@ -51,8 +51,8 @@ public interface FailoverProxyProvider<T> extends Closeable {
   /**
   /**
    * Return a reference to the interface this provider's proxy objects actually
    * Return a reference to the interface this provider's proxy objects actually
    * implement. If any of the methods on this interface are annotated as being
    * implement. If any of the methods on this interface are annotated as being
-   * {@link Idempotent}, then this fact will be passed to the
-   * {@link RetryPolicy#shouldRetry(Exception, int, int, boolean)} method on
+   * {@link Idempotent} or {@link AtMostOnce}, then this fact will be passed to
+   * the {@link RetryPolicy#shouldRetry(Exception, int, int, boolean)} method on
    * error, for use in determining whether or not failover should be attempted.
    * error, for use in determining whether or not failover should be attempted.
    * 
    * 
    * @return the interface implemented by the proxy objects returned by
    * @return the interface implemented by the proxy objects returned by

+ 45 - 9
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java

@@ -18,21 +18,34 @@
 package org.apache.hadoop.io.retry;
 package org.apache.hadoop.io.retry;
 
 
 import java.io.IOException;
 import java.io.IOException;
+import java.lang.reflect.InvocationHandler;
 import java.lang.reflect.InvocationTargetException;
 import java.lang.reflect.InvocationTargetException;
 import java.lang.reflect.Method;
 import java.lang.reflect.Method;
+import java.lang.reflect.Proxy;
 import java.util.Collections;
 import java.util.Collections;
 import java.util.Map;
 import java.util.Map;
 import java.util.concurrent.atomic.AtomicLong;
 import java.util.concurrent.atomic.AtomicLong;
 
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.io.retry.RetryPolicy.RetryAction;
 import org.apache.hadoop.io.retry.RetryPolicy.RetryAction;
-import org.apache.hadoop.util.ThreadUtil;
+import org.apache.hadoop.ipc.Client;
 import org.apache.hadoop.ipc.Client.ConnectionId;
 import org.apache.hadoop.ipc.Client.ConnectionId;
+import org.apache.hadoop.ipc.ProtocolTranslator;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.ipc.RpcConstants;
 import org.apache.hadoop.ipc.RpcInvocationHandler;
 import org.apache.hadoop.ipc.RpcInvocationHandler;
+import org.apache.hadoop.util.ThreadUtil;
 
 
-class RetryInvocationHandler implements RpcInvocationHandler {
+import com.google.common.annotations.VisibleForTesting;
+
+/**
+ * This class implements RpcInvocationHandler and supports retry on the client 
+ * side.
+ */
+@InterfaceAudience.Private
+public class RetryInvocationHandler implements RpcInvocationHandler {
   public static final Log LOG = LogFactory.getLog(RetryInvocationHandler.class);
   public static final Log LOG = LogFactory.getLog(RetryInvocationHandler.class);
   private final FailoverProxyProvider proxyProvider;
   private final FailoverProxyProvider proxyProvider;
 
 
@@ -45,13 +58,13 @@ class RetryInvocationHandler implements RpcInvocationHandler {
   private final RetryPolicy defaultPolicy;
   private final RetryPolicy defaultPolicy;
   private final Map<String,RetryPolicy> methodNameToPolicyMap;
   private final Map<String,RetryPolicy> methodNameToPolicyMap;
   private Object currentProxy;
   private Object currentProxy;
-  
-  public RetryInvocationHandler(FailoverProxyProvider proxyProvider,
+
+  protected RetryInvocationHandler(FailoverProxyProvider proxyProvider,
       RetryPolicy retryPolicy) {
       RetryPolicy retryPolicy) {
     this(proxyProvider, retryPolicy, Collections.<String, RetryPolicy>emptyMap());
     this(proxyProvider, retryPolicy, Collections.<String, RetryPolicy>emptyMap());
   }
   }
 
 
-  public RetryInvocationHandler(FailoverProxyProvider proxyProvider,
+  RetryInvocationHandler(FailoverProxyProvider proxyProvider,
       RetryPolicy defaultPolicy,
       RetryPolicy defaultPolicy,
       Map<String, RetryPolicy> methodNameToPolicyMap) {
       Map<String, RetryPolicy> methodNameToPolicyMap) {
     this.proxyProvider = proxyProvider;
     this.proxyProvider = proxyProvider;
@@ -70,6 +83,8 @@ class RetryInvocationHandler implements RpcInvocationHandler {
     
     
     // The number of times this method invocation has been failed over.
     // The number of times this method invocation has been failed over.
     int invocationFailoverCount = 0;
     int invocationFailoverCount = 0;
+    final boolean isRpc = isRpcInvocation(currentProxy);
+    final int callId = isRpc? Client.nextCallId(): RpcConstants.INVALID_CALL_ID;
     int retries = 0;
     int retries = 0;
     while (true) {
     while (true) {
       // The number of times this invocation handler has ever been failed over,
       // The number of times this invocation handler has ever been failed over,
@@ -79,16 +94,25 @@ class RetryInvocationHandler implements RpcInvocationHandler {
       synchronized (proxyProvider) {
       synchronized (proxyProvider) {
         invocationAttemptFailoverCount = proxyProviderFailoverCount;
         invocationAttemptFailoverCount = proxyProviderFailoverCount;
       }
       }
+
+      if (isRpc) {
+        Client.setCallIdAndRetryCount(callId, retries);
+      }
       try {
       try {
         Object ret = invokeMethod(method, args);
         Object ret = invokeMethod(method, args);
         hasMadeASuccessfulCall = true;
         hasMadeASuccessfulCall = true;
         return ret;
         return ret;
       } catch (Exception e) {
       } catch (Exception e) {
-        boolean isMethodIdempotent = proxyProvider.getInterface()
+        boolean isIdempotentOrAtMostOnce = proxyProvider.getInterface()
             .getMethod(method.getName(), method.getParameterTypes())
             .getMethod(method.getName(), method.getParameterTypes())
             .isAnnotationPresent(Idempotent.class);
             .isAnnotationPresent(Idempotent.class);
-        RetryAction action = policy.shouldRetry(e, retries++, invocationFailoverCount,
-            isMethodIdempotent);
+        if (!isIdempotentOrAtMostOnce) {
+          isIdempotentOrAtMostOnce = proxyProvider.getInterface()
+              .getMethod(method.getName(), method.getParameterTypes())
+              .isAnnotationPresent(AtMostOnce.class);
+        }
+        RetryAction action = policy.shouldRetry(e, retries++,
+            invocationFailoverCount, isIdempotentOrAtMostOnce);
         if (action.action == RetryAction.RetryDecision.FAIL) {
         if (action.action == RetryAction.RetryDecision.FAIL) {
           if (action.reason != null) {
           if (action.reason != null) {
             LOG.warn("Exception while invoking " + 
             LOG.warn("Exception while invoking " + 
@@ -156,7 +180,7 @@ class RetryInvocationHandler implements RpcInvocationHandler {
     }
     }
   }
   }
   
   
-  private Object invokeMethod(Method method, Object[] args) throws Throwable {
+  protected Object invokeMethod(Method method, Object[] args) throws Throwable {
     try {
     try {
       if (!method.isAccessible()) {
       if (!method.isAccessible()) {
         method.setAccessible(true);
         method.setAccessible(true);
@@ -167,6 +191,18 @@ class RetryInvocationHandler implements RpcInvocationHandler {
     }
     }
   }
   }
 
 
+  @VisibleForTesting
+  static boolean isRpcInvocation(Object proxy) {
+    if (proxy instanceof ProtocolTranslator) {
+      proxy = ((ProtocolTranslator) proxy).getUnderlyingProxyObject();
+    }
+    if (!Proxy.isProxyClass(proxy.getClass())) {
+      return false;
+    }
+    final InvocationHandler ih = Proxy.getInvocationHandler(proxy);
+    return ih instanceof RpcInvocationHandler;
+  }
+
   @Override
   @Override
   public void close() throws IOException {
   public void close() throws IOException {
     proxyProvider.close();
     proxyProvider.close();

+ 11 - 11
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java

@@ -153,7 +153,7 @@ public class RetryPolicies {
   static class TryOnceThenFail implements RetryPolicy {
   static class TryOnceThenFail implements RetryPolicy {
     @Override
     @Override
     public RetryAction shouldRetry(Exception e, int retries, int failovers,
     public RetryAction shouldRetry(Exception e, int retries, int failovers,
-        boolean isMethodIdempotent) throws Exception {
+        boolean isIdempotentOrAtMostOnce) throws Exception {
       return RetryAction.FAIL;
       return RetryAction.FAIL;
     }
     }
   }
   }
@@ -161,7 +161,7 @@ public class RetryPolicies {
   static class RetryForever implements RetryPolicy {
   static class RetryForever implements RetryPolicy {
     @Override
     @Override
     public RetryAction shouldRetry(Exception e, int retries, int failovers,
     public RetryAction shouldRetry(Exception e, int retries, int failovers,
-        boolean isMethodIdempotent) throws Exception {
+        boolean isIdempotentOrAtMostOnce) throws Exception {
       return RetryAction.RETRY;
       return RetryAction.RETRY;
     }
     }
   }
   }
@@ -196,7 +196,7 @@ public class RetryPolicies {
 
 
     @Override
     @Override
     public RetryAction shouldRetry(Exception e, int retries, int failovers,
     public RetryAction shouldRetry(Exception e, int retries, int failovers,
-        boolean isMethodIdempotent) throws Exception {
+        boolean isIdempotentOrAtMostOnce) throws Exception {
       if (retries >= maxRetries) {
       if (retries >= maxRetries) {
         return RetryAction.FAIL;
         return RetryAction.FAIL;
       }
       }
@@ -305,7 +305,7 @@ public class RetryPolicies {
 
 
     @Override
     @Override
     public RetryAction shouldRetry(Exception e, int curRetry, int failovers,
     public RetryAction shouldRetry(Exception e, int curRetry, int failovers,
-        boolean isMethodIdempotent) throws Exception {
+        boolean isIdempotentOrAtMostOnce) throws Exception {
       final Pair p = searchPair(curRetry);
       final Pair p = searchPair(curRetry);
       if (p == null) {
       if (p == null) {
         //no more retries.
         //no more retries.
@@ -435,12 +435,12 @@ public class RetryPolicies {
 
 
     @Override
     @Override
     public RetryAction shouldRetry(Exception e, int retries, int failovers,
     public RetryAction shouldRetry(Exception e, int retries, int failovers,
-        boolean isMethodIdempotent) throws Exception {
+        boolean isIdempotentOrAtMostOnce) throws Exception {
       RetryPolicy policy = exceptionToPolicyMap.get(e.getClass());
       RetryPolicy policy = exceptionToPolicyMap.get(e.getClass());
       if (policy == null) {
       if (policy == null) {
         policy = defaultPolicy;
         policy = defaultPolicy;
       }
       }
-      return policy.shouldRetry(e, retries, failovers, isMethodIdempotent);
+      return policy.shouldRetry(e, retries, failovers, isIdempotentOrAtMostOnce);
     }
     }
     
     
   }
   }
@@ -463,7 +463,7 @@ public class RetryPolicies {
 
 
     @Override
     @Override
     public RetryAction shouldRetry(Exception e, int retries, int failovers,
     public RetryAction shouldRetry(Exception e, int retries, int failovers,
-        boolean isMethodIdempotent) throws Exception {
+        boolean isIdempotentOrAtMostOnce) throws Exception {
       RetryPolicy policy = null;
       RetryPolicy policy = null;
       if (e instanceof RemoteException) {
       if (e instanceof RemoteException) {
         policy = exceptionNameToPolicyMap.get(
         policy = exceptionNameToPolicyMap.get(
@@ -472,7 +472,7 @@ public class RetryPolicies {
       if (policy == null) {
       if (policy == null) {
         policy = defaultPolicy;
         policy = defaultPolicy;
       }
       }
-      return policy.shouldRetry(e, retries, failovers, isMethodIdempotent);
+      return policy.shouldRetry(e, retries, failovers, isIdempotentOrAtMostOnce);
     }
     }
   }
   }
   
   
@@ -533,7 +533,7 @@ public class RetryPolicies {
 
 
     @Override
     @Override
     public RetryAction shouldRetry(Exception e, int retries,
     public RetryAction shouldRetry(Exception e, int retries,
-        int failovers, boolean isMethodIdempotent) throws Exception {
+        int failovers, boolean isIdempotentOrAtMostOnce) throws Exception {
       if (failovers >= maxFailovers) {
       if (failovers >= maxFailovers) {
         return new RetryAction(RetryAction.RetryDecision.FAIL, 0,
         return new RetryAction(RetryAction.RetryDecision.FAIL, 0,
             "failovers (" + failovers + ") exceeded maximum allowed ("
             "failovers (" + failovers + ") exceeded maximum allowed ("
@@ -553,7 +553,7 @@ public class RetryPolicies {
                 calculateExponentialTime(delayMillis, failovers, maxDelayBase));
                 calculateExponentialTime(delayMillis, failovers, maxDelayBase));
       } else if (e instanceof SocketException ||
       } else if (e instanceof SocketException ||
                  (e instanceof IOException && !(e instanceof RemoteException))) {
                  (e instanceof IOException && !(e instanceof RemoteException))) {
-        if (isMethodIdempotent) {
+        if (isIdempotentOrAtMostOnce) {
           return RetryAction.FAILOVER_AND_RETRY;
           return RetryAction.FAILOVER_AND_RETRY;
         } else {
         } else {
           return new RetryAction(RetryAction.RetryDecision.FAIL, 0,
           return new RetryAction(RetryAction.RetryDecision.FAIL, 0,
@@ -562,7 +562,7 @@ public class RetryPolicies {
         }
         }
       } else {
       } else {
         return fallbackPolicy.shouldRetry(e, retries, failovers,
         return fallbackPolicy.shouldRetry(e, retries, failovers,
-            isMethodIdempotent);
+            isIdempotentOrAtMostOnce);
       }
       }
     }
     }
     
     

+ 13 - 12
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicy.java

@@ -75,24 +75,25 @@ public interface RetryPolicy {
   
   
   /**
   /**
    * <p>
    * <p>
-   * Determines whether the framework should retry a
-   * method for the given exception, and the number
-   * of retries that have been made for that operation
+   * Determines whether the framework should retry a method for the given
+   * exception, and the number of retries that have been made for that operation
    * so far.
    * so far.
    * </p>
    * </p>
+   * 
    * @param e The exception that caused the method to fail
    * @param e The exception that caused the method to fail
    * @param retries The number of times the method has been retried
    * @param retries The number of times the method has been retried
    * @param failovers The number of times the method has failed over to a
    * @param failovers The number of times the method has failed over to a
-   *   different backend implementation
-   * @param isMethodIdempotent <code>true</code> if the method is idempotent
-   *   and so can reasonably be retried on failover when we don't know if the
-   *   previous attempt reached the server or not
+   *          different backend implementation
+   * @param isIdempotentOrAtMostOnce <code>true</code> if the method is
+   *          {@link Idempotent} or {@link AtMostOnce} and so can reasonably be
+   *          retried on failover when we don't know if the previous attempt
+   *          reached the server or not
    * @return <code>true</code> if the method should be retried,
    * @return <code>true</code> if the method should be retried,
-   *   <code>false</code> if the method should not be retried
-   *   but shouldn't fail with an exception (only for void methods)
-   * @throws Exception The re-thrown exception <code>e</code> indicating
-   *   that the method failed and should not be retried further
+   *         <code>false</code> if the method should not be retried but
+   *         shouldn't fail with an exception (only for void methods)
+   * @throws Exception The re-thrown exception <code>e</code> indicating that
+   *           the method failed and should not be retried further
    */
    */
   public RetryAction shouldRetry(Exception e, int retries, int failovers,
   public RetryAction shouldRetry(Exception e, int retries, int failovers,
-      boolean isMethodIdempotent) throws Exception;
+      boolean isIdempotentOrAtMostOnce) throws Exception;
 }
 }

+ 8 - 7
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryProxy.java

@@ -36,10 +36,10 @@ public class RetryProxy {
    * @param retryPolicy the policy for retrying method call failures
    * @param retryPolicy the policy for retrying method call failures
    * @return the retry proxy
    * @return the retry proxy
    */
    */
-  public static Object create(Class<?> iface, Object implementation,
+  public static <T> Object create(Class<T> iface, T implementation,
                               RetryPolicy retryPolicy) {
                               RetryPolicy retryPolicy) {
     return RetryProxy.create(iface,
     return RetryProxy.create(iface,
-        new DefaultFailoverProxyProvider(iface, implementation),
+        new DefaultFailoverProxyProvider<T>(iface, implementation),
         retryPolicy);
         retryPolicy);
   }
   }
 
 
@@ -53,8 +53,8 @@ public class RetryProxy {
    * @param retryPolicy the policy for retrying or failing over method call failures
    * @param retryPolicy the policy for retrying or failing over method call failures
    * @return the retry proxy
    * @return the retry proxy
    */
    */
-  public static Object create(Class<?> iface, FailoverProxyProvider proxyProvider,
-      RetryPolicy retryPolicy) {
+  public static <T> Object create(Class<T> iface,
+      FailoverProxyProvider<T> proxyProvider, RetryPolicy retryPolicy) {
     return Proxy.newProxyInstance(
     return Proxy.newProxyInstance(
         proxyProvider.getInterface().getClassLoader(),
         proxyProvider.getInterface().getClassLoader(),
         new Class<?>[] { iface },
         new Class<?>[] { iface },
@@ -73,10 +73,10 @@ public class RetryProxy {
    * @param methodNameToPolicyMap a map of method names to retry policies
    * @param methodNameToPolicyMap a map of method names to retry policies
    * @return the retry proxy
    * @return the retry proxy
    */
    */
-  public static Object create(Class<?> iface, Object implementation,
+  public static <T> Object create(Class<T> iface, T implementation,
                               Map<String,RetryPolicy> methodNameToPolicyMap) {
                               Map<String,RetryPolicy> methodNameToPolicyMap) {
     return create(iface,
     return create(iface,
-        new DefaultFailoverProxyProvider(iface, implementation),
+        new DefaultFailoverProxyProvider<T>(iface, implementation),
         methodNameToPolicyMap,
         methodNameToPolicyMap,
         RetryPolicies.TRY_ONCE_THEN_FAIL);
         RetryPolicies.TRY_ONCE_THEN_FAIL);
   }
   }
@@ -92,7 +92,8 @@ public class RetryProxy {
    * @param methodNameToPolicyMapa map of method names to retry policies
    * @param methodNameToPolicyMapa map of method names to retry policies
    * @return the retry proxy
    * @return the retry proxy
    */
    */
-  public static Object create(Class<?> iface, FailoverProxyProvider proxyProvider,
+  public static <T> Object create(Class<T> iface,
+      FailoverProxyProvider<T> proxyProvider,
       Map<String,RetryPolicy> methodNameToPolicyMap,
       Map<String,RetryPolicy> methodNameToPolicyMap,
       RetryPolicy defaultPolicy) {
       RetryPolicy defaultPolicy) {
     return Proxy.newProxyInstance(
     return Proxy.newProxyInstance(

+ 137 - 144
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java

@@ -33,6 +33,7 @@ import java.net.Socket;
 import java.net.SocketTimeoutException;
 import java.net.SocketTimeoutException;
 import java.net.UnknownHostException;
 import java.net.UnknownHostException;
 import java.security.PrivilegedExceptionAction;
 import java.security.PrivilegedExceptionAction;
+import java.util.Arrays;
 import java.util.Hashtable;
 import java.util.Hashtable;
 import java.util.Iterator;
 import java.util.Iterator;
 import java.util.Map.Entry;
 import java.util.Map.Entry;
@@ -45,6 +46,7 @@ import java.util.concurrent.Future;
 import java.util.concurrent.RejectedExecutionException;
 import java.util.concurrent.RejectedExecutionException;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.atomic.AtomicLong;
 import java.util.concurrent.atomic.AtomicLong;
 
 
 import javax.net.SocketFactory;
 import javax.net.SocketFactory;
@@ -62,7 +64,10 @@ import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.retry.RetryPolicies;
 import org.apache.hadoop.io.retry.RetryPolicies;
 import org.apache.hadoop.io.retry.RetryPolicy;
 import org.apache.hadoop.io.retry.RetryPolicy;
 import org.apache.hadoop.io.retry.RetryPolicy.RetryAction;
 import org.apache.hadoop.io.retry.RetryPolicy.RetryAction;
+import org.apache.hadoop.ipc.ProtobufRpcEngine.RpcRequestMessageWrapper;
+import org.apache.hadoop.ipc.RPC.RpcKind;
 import org.apache.hadoop.ipc.Server.AuthProtocol;
 import org.apache.hadoop.ipc.Server.AuthProtocol;
+import org.apache.hadoop.ipc.protobuf.IpcConnectionContextProtos.IpcConnectionContextProto;
 import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcRequestHeaderProto;
 import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcRequestHeaderProto;
 import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcRequestHeaderProto.OperationProto;
 import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcRequestHeaderProto.OperationProto;
 import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto;
 import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto;
@@ -75,15 +80,12 @@ import org.apache.hadoop.security.SaslRpcClient;
 import org.apache.hadoop.security.SaslRpcServer.AuthMethod;
 import org.apache.hadoop.security.SaslRpcServer.AuthMethod;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.security.token.TokenIdentifier;
-import org.apache.hadoop.security.token.TokenInfo;
-import org.apache.hadoop.security.token.TokenSelector;
 import org.apache.hadoop.util.ProtoUtil;
 import org.apache.hadoop.util.ProtoUtil;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.Time;
 
 
+import com.google.common.base.Preconditions;
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
 import com.google.protobuf.CodedOutputStream;
 import com.google.protobuf.CodedOutputStream;
 
 
@@ -97,11 +99,26 @@ public class Client {
   
   
   public static final Log LOG = LogFactory.getLog(Client.class);
   public static final Log LOG = LogFactory.getLog(Client.class);
 
 
+  /** A counter for generating call IDs. */
+  private static final AtomicInteger callIdCounter = new AtomicInteger();
+
+  private static final ThreadLocal<Integer> callId = new ThreadLocal<Integer>();
+  private static final ThreadLocal<Integer> retryCount = new ThreadLocal<Integer>();
+
+  /** Set call id and retry count for the next call. */
+  public static void setCallIdAndRetryCount(int cid, int rc) {
+    Preconditions.checkArgument(cid != RpcConstants.INVALID_CALL_ID);
+    Preconditions.checkState(callId.get() == null);
+    Preconditions.checkArgument(rc != RpcConstants.INVALID_RETRY_COUNT);
+
+    callId.set(cid);
+    retryCount.set(rc);
+  }
+
   private Hashtable<ConnectionId, Connection> connections =
   private Hashtable<ConnectionId, Connection> connections =
     new Hashtable<ConnectionId, Connection>();
     new Hashtable<ConnectionId, Connection>();
 
 
   private Class<? extends Writable> valueClass;   // class of call values
   private Class<? extends Writable> valueClass;   // class of call values
-  private int counter;                            // counter for call ids
   private AtomicBoolean running = new AtomicBoolean(true); // if client runs
   private AtomicBoolean running = new AtomicBoolean(true); // if client runs
   final private Configuration conf;
   final private Configuration conf;
 
 
@@ -111,8 +128,9 @@ public class Client {
   private final int connectionTimeout;
   private final int connectionTimeout;
 
 
   private final boolean fallbackAllowed;
   private final boolean fallbackAllowed;
+  private final byte[] clientId;
   
   
-  final static int PING_CALL_ID = -1;
+  final static int CONNECTION_CONTEXT_CALL_ID = -3;
   
   
   /**
   /**
    * Executor on which IPC calls' parameters are sent. Deferring
    * Executor on which IPC calls' parameters are sent. Deferring
@@ -199,22 +217,57 @@ public class Client {
     return refCount==0;
     return refCount==0;
   }
   }
 
 
+  /** Check the rpc response header. */
+  void checkResponse(RpcResponseHeaderProto header) throws IOException {
+    if (header == null) {
+      throw new IOException("Response is null.");
+    }
+    if (header.hasClientId()) {
+      // check client IDs
+      final byte[] id = header.getClientId().toByteArray();
+      if (!Arrays.equals(id, RpcConstants.DUMMY_CLIENT_ID)) {
+        if (!Arrays.equals(id, clientId)) {
+          throw new IOException("Client IDs not matched: local ID="
+              + StringUtils.byteToHexString(clientId) + ", ID in reponse="
+              + StringUtils.byteToHexString(header.getClientId().toByteArray()));
+        }
+      }
+    }
+  }
+
+  Call createCall(RPC.RpcKind rpcKind, Writable rpcRequest) {
+    return new Call(rpcKind, rpcRequest);
+  }
+
   /** 
   /** 
    * Class that represents an RPC call
    * Class that represents an RPC call
    */
    */
-  private class Call {
+  static class Call {
     final int id;               // call id
     final int id;               // call id
+    final int retry;           // retry count
     final Writable rpcRequest;  // the serialized rpc request
     final Writable rpcRequest;  // the serialized rpc request
     Writable rpcResponse;       // null if rpc has error
     Writable rpcResponse;       // null if rpc has error
     IOException error;          // exception, null if success
     IOException error;          // exception, null if success
     final RPC.RpcKind rpcKind;      // Rpc EngineKind
     final RPC.RpcKind rpcKind;      // Rpc EngineKind
     boolean done;               // true when call is done
     boolean done;               // true when call is done
 
 
-    protected Call(RPC.RpcKind rpcKind, Writable param) {
+    private Call(RPC.RpcKind rpcKind, Writable param) {
       this.rpcKind = rpcKind;
       this.rpcKind = rpcKind;
       this.rpcRequest = param;
       this.rpcRequest = param;
-      synchronized (Client.this) {
-        this.id = counter++;
+
+      final Integer id = callId.get();
+      if (id == null) {
+        this.id = nextCallId();
+      } else {
+        callId.set(null);
+        this.id = id;
+      }
+      
+      final Integer rc = retryCount.get();
+      if (rc == null) {
+        this.retry = 0;
+      } else {
+        this.retry = rc;
       }
       }
     }
     }
 
 
@@ -255,10 +308,9 @@ public class Client {
    * socket: responses may be delivered out of order. */
    * socket: responses may be delivered out of order. */
   private class Connection extends Thread {
   private class Connection extends Thread {
     private InetSocketAddress server;             // server ip:port
     private InetSocketAddress server;             // server ip:port
-    private String serverPrincipal;  // server's krb5 principal name
     private final ConnectionId remoteId;                // connection id
     private final ConnectionId remoteId;                // connection id
     private AuthMethod authMethod; // authentication method
     private AuthMethod authMethod; // authentication method
-    private Token<? extends TokenIdentifier> token;
+    private AuthProtocol authProtocol;
     private int serviceClass;
     private int serviceClass;
     private SaslRpcClient saslRpcClient;
     private SaslRpcClient saslRpcClient;
     
     
@@ -305,45 +357,11 @@ public class Client {
       }
       }
 
 
       UserGroupInformation ticket = remoteId.getTicket();
       UserGroupInformation ticket = remoteId.getTicket();
-      Class<?> protocol = remoteId.getProtocol();
-      if (protocol != null) {
-        TokenInfo tokenInfo = SecurityUtil.getTokenInfo(protocol, conf);
-        if (tokenInfo != null) {
-          TokenSelector<? extends TokenIdentifier> tokenSelector = null;
-          try {
-            tokenSelector = tokenInfo.value().newInstance();
-          } catch (InstantiationException e) {
-            throw new IOException(e.toString());
-          } catch (IllegalAccessException e) {
-            throw new IOException(e.toString());
-          }
-          token = tokenSelector.selectToken(
-              SecurityUtil.buildTokenService(server),
-              ticket.getTokens());
-        }
-        KerberosInfo krbInfo = SecurityUtil.getKerberosInfo(protocol, conf);
-        if (krbInfo != null) {
-          serverPrincipal = remoteId.getServerPrincipal();
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("RPC Server's Kerberos principal name for protocol="
-                + protocol.getCanonicalName() + " is " + serverPrincipal);
-          }
-        }
-      }
-      
-      AuthenticationMethod authentication;
-      if (token != null) {
-        authentication = AuthenticationMethod.TOKEN;
-      } else if (ticket != null) {
-        authentication = ticket.getRealAuthenticationMethod();
-      } else { // this only happens in lazy tests
-        authentication = AuthenticationMethod.SIMPLE;
-      }
-      authMethod = authentication.getAuthMethod();
-      
-      if (LOG.isDebugEnabled())
-        LOG.debug("Use " + authMethod + " authentication for protocol "
-            + protocol.getSimpleName());
+      // try SASL if security is enabled or if the ugi contains tokens.
+      // this causes a SIMPLE client with tokens to attempt SASL
+      boolean trySasl = UserGroupInformation.isSecurityEnabled() ||
+                        (ticket != null && !ticket.getTokens().isEmpty());
+      this.authProtocol = trySasl ? AuthProtocol.SASL : AuthProtocol.NONE;
       
       
       this.setName("IPC Client (" + socketFactory.hashCode() +") connection to " +
       this.setName("IPC Client (" + socketFactory.hashCode() +") connection to " +
           server.toString() +
           server.toString() +
@@ -454,11 +472,10 @@ public class Client {
       return false;
       return false;
     }
     }
     
     
-    private synchronized boolean setupSaslConnection(final InputStream in2, 
-        final OutputStream out2) 
-        throws IOException {
-      saslRpcClient = new SaslRpcClient(authMethod, token, serverPrincipal,
-          fallbackAllowed);
+    private synchronized AuthMethod setupSaslConnection(final InputStream in2, 
+        final OutputStream out2) throws IOException, InterruptedException {
+      saslRpcClient = new SaslRpcClient(remoteId.getTicket(),
+          remoteId.getProtocol(), remoteId.getAddress(), conf);
       return saslRpcClient.saslConnect(in2, out2);
       return saslRpcClient.saslConnect(in2, out2);
     }
     }
 
 
@@ -496,7 +513,8 @@ public class Client {
            * client, to ensure Server matching address of the client connection
            * client, to ensure Server matching address of the client connection
            * to host name in principal passed.
            * to host name in principal passed.
            */
            */
-          if (UserGroupInformation.isSecurityEnabled()) {
+          UserGroupInformation ticket = remoteId.getTicket();
+          if (ticket != null && ticket.hasKerberosCredentials()) {
             KerberosInfo krbInfo = 
             KerberosInfo krbInfo = 
               remoteId.getProtocol().getAnnotation(KerberosInfo.class);
               remoteId.getProtocol().getAnnotation(KerberosInfo.class);
             if (krbInfo != null && krbInfo.clientPrincipal() != null) {
             if (krbInfo != null && krbInfo.clientPrincipal() != null) {
@@ -574,7 +592,7 @@ public class Client {
             } else {
             } else {
               String msg = "Couldn't setup connection for "
               String msg = "Couldn't setup connection for "
                   + UserGroupInformation.getLoginUser().getUserName() + " to "
                   + UserGroupInformation.getLoginUser().getUserName() + " to "
-                  + serverPrincipal;
+                  + remoteId;
               LOG.warn(msg);
               LOG.warn(msg);
               throw (IOException) new IOException(msg).initCause(ex);
               throw (IOException) new IOException(msg).initCause(ex);
             }
             }
@@ -594,7 +612,7 @@ public class Client {
      * a header to the server and starts
      * a header to the server and starts
      * the connection thread that waits for responses.
      * the connection thread that waits for responses.
      */
      */
-    private synchronized void setupIOstreams() throws InterruptedException {
+    private synchronized void setupIOstreams() {
       if (socket != null || shouldCloseConnection.get()) {
       if (socket != null || shouldCloseConnection.get()) {
         return;
         return;
       } 
       } 
@@ -610,19 +628,19 @@ public class Client {
           InputStream inStream = NetUtils.getInputStream(socket);
           InputStream inStream = NetUtils.getInputStream(socket);
           OutputStream outStream = NetUtils.getOutputStream(socket);
           OutputStream outStream = NetUtils.getOutputStream(socket);
           writeConnectionHeader(outStream);
           writeConnectionHeader(outStream);
-          if (authMethod != AuthMethod.SIMPLE) {
+          if (authProtocol == AuthProtocol.SASL) {
             final InputStream in2 = inStream;
             final InputStream in2 = inStream;
             final OutputStream out2 = outStream;
             final OutputStream out2 = outStream;
             UserGroupInformation ticket = remoteId.getTicket();
             UserGroupInformation ticket = remoteId.getTicket();
             if (ticket.getRealUser() != null) {
             if (ticket.getRealUser() != null) {
               ticket = ticket.getRealUser();
               ticket = ticket.getRealUser();
             }
             }
-            boolean continueSasl = false;
             try {
             try {
-              continueSasl = ticket
-                  .doAs(new PrivilegedExceptionAction<Boolean>() {
+              authMethod = ticket
+                  .doAs(new PrivilegedExceptionAction<AuthMethod>() {
                     @Override
                     @Override
-                    public Boolean run() throws IOException {
+                    public AuthMethod run()
+                        throws IOException, InterruptedException {
                       return setupSaslConnection(in2, out2);
                       return setupSaslConnection(in2, out2);
                     }
                     }
                   });
                   });
@@ -634,13 +652,15 @@ public class Client {
                   ticket);
                   ticket);
               continue;
               continue;
             }
             }
-            if (continueSasl) {
+            if (authMethod != AuthMethod.SIMPLE) {
               // Sasl connect is successful. Let's set up Sasl i/o streams.
               // Sasl connect is successful. Let's set up Sasl i/o streams.
               inStream = saslRpcClient.getInputStream(inStream);
               inStream = saslRpcClient.getInputStream(inStream);
               outStream = saslRpcClient.getOutputStream(outStream);
               outStream = saslRpcClient.getOutputStream(outStream);
-            } else {
-              // fall back to simple auth because server told us so.
-              authMethod = AuthMethod.SIMPLE;
+            } else if (UserGroupInformation.isSecurityEnabled() &&
+                       !fallbackAllowed) {
+              throw new IOException("Server asks us to fall back to SIMPLE " +
+                  "auth, but this client is configured to only allow secure " +
+                  "connections.");
             }
             }
           }
           }
         
         
@@ -757,17 +777,9 @@ public class Client {
         throws IOException {
         throws IOException {
       DataOutputStream out = new DataOutputStream(new BufferedOutputStream(outStream));
       DataOutputStream out = new DataOutputStream(new BufferedOutputStream(outStream));
       // Write out the header, version and authentication method
       // Write out the header, version and authentication method
-      out.write(Server.HEADER.array());
-      out.write(Server.CURRENT_VERSION);
+      out.write(RpcConstants.HEADER.array());
+      out.write(RpcConstants.CURRENT_VERSION);
       out.write(serviceClass);
       out.write(serviceClass);
-      final AuthProtocol authProtocol;
-      switch (authMethod) {
-        case SIMPLE:
-          authProtocol = AuthProtocol.NONE;
-          break;
-        default:
-          authProtocol = AuthProtocol.SASL;
-      }
       out.write(authProtocol.callId);
       out.write(authProtocol.callId);
       out.flush();
       out.flush();
     }
     }
@@ -779,17 +791,20 @@ public class Client {
                                         AuthMethod authMethod)
                                         AuthMethod authMethod)
                                             throws IOException {
                                             throws IOException {
       // Write out the ConnectionHeader
       // Write out the ConnectionHeader
-      DataOutputBuffer buf = new DataOutputBuffer();
-      ProtoUtil.makeIpcConnectionContext(
+      IpcConnectionContextProto message = ProtoUtil.makeIpcConnectionContext(
           RPC.getProtocolName(remoteId.getProtocol()),
           RPC.getProtocolName(remoteId.getProtocol()),
           remoteId.getTicket(),
           remoteId.getTicket(),
-          authMethod).writeTo(buf);
+          authMethod);
+      RpcRequestHeaderProto connectionContextHeader = ProtoUtil
+          .makeRpcRequestHeader(RpcKind.RPC_PROTOCOL_BUFFER,
+              OperationProto.RPC_FINAL_PACKET, CONNECTION_CONTEXT_CALL_ID,
+              RpcConstants.INVALID_RETRY_COUNT, clientId);
+      RpcRequestMessageWrapper request =
+          new RpcRequestMessageWrapper(connectionContextHeader, message);
       
       
       // Write out the packet length
       // Write out the packet length
-      int bufLen = buf.getLength();
-
-      out.writeInt(bufLen);
-      out.write(buf.getData(), 0, bufLen);
+      out.writeInt(request.getLength());
+      request.write(out);
     }
     }
     
     
     /* wait till someone signals us to start reading RPC response or
     /* wait till someone signals us to start reading RPC response or
@@ -835,7 +850,7 @@ public class Client {
       if ( curTime - lastActivity.get() >= pingInterval) {
       if ( curTime - lastActivity.get() >= pingInterval) {
         lastActivity.set(curTime);
         lastActivity.set(curTime);
         synchronized (out) {
         synchronized (out) {
-          out.writeInt(PING_CALL_ID);
+          out.writeInt(RpcConstants.PING_CALL_ID);
           out.flush();
           out.flush();
         }
         }
       }
       }
@@ -890,7 +905,8 @@ public class Client {
       // Items '1' and '2' are prepared here. 
       // Items '1' and '2' are prepared here. 
       final DataOutputBuffer d = new DataOutputBuffer();
       final DataOutputBuffer d = new DataOutputBuffer();
       RpcRequestHeaderProto header = ProtoUtil.makeRpcRequestHeader(
       RpcRequestHeaderProto header = ProtoUtil.makeRpcRequestHeader(
-         call.rpcKind, OperationProto.RPC_FINAL_PACKET, call.id);
+          call.rpcKind, OperationProto.RPC_FINAL_PACKET, call.id, call.retry,
+          clientId);
       header.writeDelimitedTo(d);
       header.writeDelimitedTo(d);
       call.rpcRequest.write(d);
       call.rpcRequest.write(d);
 
 
@@ -955,9 +971,8 @@ public class Client {
         int totalLen = in.readInt();
         int totalLen = in.readInt();
         RpcResponseHeaderProto header = 
         RpcResponseHeaderProto header = 
             RpcResponseHeaderProto.parseDelimitedFrom(in);
             RpcResponseHeaderProto.parseDelimitedFrom(in);
-        if (header == null) {
-          throw new IOException("Response is null.");
-        }
+        checkResponse(header);
+
         int headerLen = header.getSerializedSize();
         int headerLen = header.getSerializedSize();
         headerLen += CodedOutputStream.computeRawVarint32Size(headerLen);
         headerLen += CodedOutputStream.computeRawVarint32Size(headerLen);
 
 
@@ -1090,6 +1105,7 @@ public class Client {
         CommonConfigurationKeys.IPC_CLIENT_CONNECT_TIMEOUT_DEFAULT);
         CommonConfigurationKeys.IPC_CLIENT_CONNECT_TIMEOUT_DEFAULT);
     this.fallbackAllowed = conf.getBoolean(CommonConfigurationKeys.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_KEY,
     this.fallbackAllowed = conf.getBoolean(CommonConfigurationKeys.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_KEY,
         CommonConfigurationKeys.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_DEFAULT);
         CommonConfigurationKeys.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_DEFAULT);
+    this.clientId = ClientId.getClientId();
   }
   }
 
 
   /**
   /**
@@ -1141,7 +1157,7 @@ public class Client {
    *  for RPC_BUILTIN
    *  for RPC_BUILTIN
    */
    */
   public Writable call(Writable param, InetSocketAddress address)
   public Writable call(Writable param, InetSocketAddress address)
-  throws InterruptedException, IOException {
+      throws IOException {
     return call(RPC.RpcKind.RPC_BUILTIN, param, address);
     return call(RPC.RpcKind.RPC_BUILTIN, param, address);
     
     
   }
   }
@@ -1153,7 +1169,7 @@ public class Client {
    */
    */
   @Deprecated
   @Deprecated
   public Writable call(RPC.RpcKind rpcKind, Writable param, InetSocketAddress address)
   public Writable call(RPC.RpcKind rpcKind, Writable param, InetSocketAddress address)
-  throws InterruptedException, IOException {
+  throws IOException {
       return call(rpcKind, param, address, null);
       return call(rpcKind, param, address, null);
   }
   }
   
   
@@ -1167,8 +1183,7 @@ public class Client {
    */
    */
   @Deprecated
   @Deprecated
   public Writable call(RPC.RpcKind rpcKind, Writable param, InetSocketAddress addr, 
   public Writable call(RPC.RpcKind rpcKind, Writable param, InetSocketAddress addr, 
-      UserGroupInformation ticket)  
-      throws InterruptedException, IOException {
+      UserGroupInformation ticket) throws IOException {
     ConnectionId remoteId = ConnectionId.getConnectionId(addr, null, ticket, 0,
     ConnectionId remoteId = ConnectionId.getConnectionId(addr, null, ticket, 0,
         conf);
         conf);
     return call(rpcKind, param, remoteId);
     return call(rpcKind, param, remoteId);
@@ -1186,8 +1201,7 @@ public class Client {
   @Deprecated
   @Deprecated
   public Writable call(RPC.RpcKind rpcKind, Writable param, InetSocketAddress addr, 
   public Writable call(RPC.RpcKind rpcKind, Writable param, InetSocketAddress addr, 
                        Class<?> protocol, UserGroupInformation ticket,
                        Class<?> protocol, UserGroupInformation ticket,
-                       int rpcTimeout)  
-                       throws InterruptedException, IOException {
+                       int rpcTimeout) throws IOException {
     ConnectionId remoteId = ConnectionId.getConnectionId(addr, protocol,
     ConnectionId remoteId = ConnectionId.getConnectionId(addr, protocol,
         ticket, rpcTimeout, conf);
         ticket, rpcTimeout, conf);
     return call(rpcKind, param, remoteId);
     return call(rpcKind, param, remoteId);
@@ -1201,8 +1215,7 @@ public class Client {
    */
    */
   public Writable call(Writable param, InetSocketAddress addr,
   public Writable call(Writable param, InetSocketAddress addr,
       Class<?> protocol, UserGroupInformation ticket,
       Class<?> protocol, UserGroupInformation ticket,
-      int rpcTimeout, Configuration conf)
-      throws InterruptedException, IOException {
+      int rpcTimeout, Configuration conf) throws IOException {
     ConnectionId remoteId = ConnectionId.getConnectionId(addr, protocol,
     ConnectionId remoteId = ConnectionId.getConnectionId(addr, protocol,
         ticket, rpcTimeout, conf);
         ticket, rpcTimeout, conf);
     return call(RPC.RpcKind.RPC_BUILTIN, param, remoteId);
     return call(RPC.RpcKind.RPC_BUILTIN, param, remoteId);
@@ -1216,7 +1229,7 @@ public class Client {
   public Writable call(Writable param, InetSocketAddress addr,
   public Writable call(Writable param, InetSocketAddress addr,
       Class<?> protocol, UserGroupInformation ticket,
       Class<?> protocol, UserGroupInformation ticket,
       int rpcTimeout, int serviceClass, Configuration conf)
       int rpcTimeout, int serviceClass, Configuration conf)
-      throws InterruptedException, IOException {
+      throws IOException {
     ConnectionId remoteId = ConnectionId.getConnectionId(addr, protocol,
     ConnectionId remoteId = ConnectionId.getConnectionId(addr, protocol,
         ticket, rpcTimeout, conf);
         ticket, rpcTimeout, conf);
     return call(RPC.RpcKind.RPC_BUILTIN, param, remoteId, serviceClass);
     return call(RPC.RpcKind.RPC_BUILTIN, param, remoteId, serviceClass);
@@ -1232,8 +1245,7 @@ public class Client {
    */
    */
   public Writable call(RPC.RpcKind rpcKind, Writable param, InetSocketAddress addr, 
   public Writable call(RPC.RpcKind rpcKind, Writable param, InetSocketAddress addr, 
                        Class<?> protocol, UserGroupInformation ticket,
                        Class<?> protocol, UserGroupInformation ticket,
-                       int rpcTimeout, Configuration conf)  
-                       throws InterruptedException, IOException {
+                       int rpcTimeout, Configuration conf) throws IOException {
     ConnectionId remoteId = ConnectionId.getConnectionId(addr, protocol,
     ConnectionId remoteId = ConnectionId.getConnectionId(addr, protocol,
         ticket, rpcTimeout, conf);
         ticket, rpcTimeout, conf);
     return call(rpcKind, param, remoteId);
     return call(rpcKind, param, remoteId);
@@ -1243,8 +1255,8 @@ public class Client {
    * Same as {link {@link #call(RPC.RpcKind, Writable, ConnectionId)}
    * Same as {link {@link #call(RPC.RpcKind, Writable, ConnectionId)}
    * except the rpcKind is RPC_BUILTIN
    * except the rpcKind is RPC_BUILTIN
    */
    */
-  public Writable call(Writable param, ConnectionId remoteId)  
-      throws InterruptedException, IOException {
+  public Writable call(Writable param, ConnectionId remoteId)
+      throws IOException {
      return call(RPC.RpcKind.RPC_BUILTIN, param, remoteId);
      return call(RPC.RpcKind.RPC_BUILTIN, param, remoteId);
   }
   }
   
   
@@ -1260,7 +1272,7 @@ public class Client {
    * threw an exception.
    * threw an exception.
    */
    */
   public Writable call(RPC.RpcKind rpcKind, Writable rpcRequest,
   public Writable call(RPC.RpcKind rpcKind, Writable rpcRequest,
-      ConnectionId remoteId) throws InterruptedException, IOException {
+      ConnectionId remoteId) throws IOException {
     return call(rpcKind, rpcRequest, remoteId, RPC.RPC_SERVICE_CLASS_DEFAULT);
     return call(rpcKind, rpcRequest, remoteId, RPC.RPC_SERVICE_CLASS_DEFAULT);
   }
   }
 
 
@@ -1277,9 +1289,8 @@ public class Client {
    * threw an exception.
    * threw an exception.
    */
    */
   public Writable call(RPC.RpcKind rpcKind, Writable rpcRequest,
   public Writable call(RPC.RpcKind rpcKind, Writable rpcRequest,
-      ConnectionId remoteId, int serviceClass)
-      throws InterruptedException, IOException {
-    Call call = new Call(rpcKind, rpcRequest);
+      ConnectionId remoteId, int serviceClass) throws IOException {
+    final Call call = createCall(rpcKind, rpcRequest);
     Connection connection = getConnection(remoteId, call, serviceClass);
     Connection connection = getConnection(remoteId, call, serviceClass);
     try {
     try {
       connection.sendRpcRequest(call);                 // send the rpc request
       connection.sendRpcRequest(call);                 // send the rpc request
@@ -1337,8 +1348,7 @@ public class Client {
   /** Get a connection from the pool, or create a new one and add it to the
   /** Get a connection from the pool, or create a new one and add it to the
    * pool.  Connections to a given ConnectionId are reused. */
    * pool.  Connections to a given ConnectionId are reused. */
   private Connection getConnection(ConnectionId remoteId,
   private Connection getConnection(ConnectionId remoteId,
-                                   Call call, int serviceClass)
-                                   throws IOException, InterruptedException {
+      Call call, int serviceClass) throws IOException {
     if (!running.get()) {
     if (!running.get()) {
       // the client is stopped
       // the client is stopped
       throw new IOException("The client is stopped");
       throw new IOException("The client is stopped");
@@ -1378,7 +1388,6 @@ public class Client {
     final Class<?> protocol;
     final Class<?> protocol;
     private static final int PRIME = 16777619;
     private static final int PRIME = 16777619;
     private final int rpcTimeout;
     private final int rpcTimeout;
-    private final String serverPrincipal;
     private final int maxIdleTime; //connections will be culled if it was idle for 
     private final int maxIdleTime; //connections will be culled if it was idle for 
     //maxIdleTime msecs
     //maxIdleTime msecs
     private final RetryPolicy connectionRetryPolicy;
     private final RetryPolicy connectionRetryPolicy;
@@ -1389,15 +1398,13 @@ public class Client {
     private final int pingInterval; // how often sends ping to the server in msecs
     private final int pingInterval; // how often sends ping to the server in msecs
     
     
     ConnectionId(InetSocketAddress address, Class<?> protocol, 
     ConnectionId(InetSocketAddress address, Class<?> protocol, 
-                 UserGroupInformation ticket, int rpcTimeout,
-                 String serverPrincipal, int maxIdleTime, 
+                 UserGroupInformation ticket, int rpcTimeout, int maxIdleTime, 
                  RetryPolicy connectionRetryPolicy, int maxRetriesOnSocketTimeouts,
                  RetryPolicy connectionRetryPolicy, int maxRetriesOnSocketTimeouts,
                  boolean tcpNoDelay, boolean doPing, int pingInterval) {
                  boolean tcpNoDelay, boolean doPing, int pingInterval) {
       this.protocol = protocol;
       this.protocol = protocol;
       this.address = address;
       this.address = address;
       this.ticket = ticket;
       this.ticket = ticket;
       this.rpcTimeout = rpcTimeout;
       this.rpcTimeout = rpcTimeout;
-      this.serverPrincipal = serverPrincipal;
       this.maxIdleTime = maxIdleTime;
       this.maxIdleTime = maxIdleTime;
       this.connectionRetryPolicy = connectionRetryPolicy;
       this.connectionRetryPolicy = connectionRetryPolicy;
       this.maxRetriesOnSocketTimeouts = maxRetriesOnSocketTimeouts;
       this.maxRetriesOnSocketTimeouts = maxRetriesOnSocketTimeouts;
@@ -1422,10 +1429,6 @@ public class Client {
       return rpcTimeout;
       return rpcTimeout;
     }
     }
     
     
-    String getServerPrincipal() {
-      return serverPrincipal;
-    }
-    
     int getMaxIdleTime() {
     int getMaxIdleTime() {
       return maxIdleTime;
       return maxIdleTime;
     }
     }
@@ -1475,11 +1478,9 @@ public class Client {
             max, 1, TimeUnit.SECONDS);
             max, 1, TimeUnit.SECONDS);
       }
       }
 
 
-      String remotePrincipal = getRemotePrincipal(conf, addr, protocol);
       boolean doPing =
       boolean doPing =
         conf.getBoolean(CommonConfigurationKeys.IPC_CLIENT_PING_KEY, true);
         conf.getBoolean(CommonConfigurationKeys.IPC_CLIENT_PING_KEY, true);
-      return new ConnectionId(addr, protocol, ticket,
-          rpcTimeout, remotePrincipal,
+      return new ConnectionId(addr, protocol, ticket, rpcTimeout,
           conf.getInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,
           conf.getInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,
               CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_DEFAULT),
               CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_DEFAULT),
           connectionRetryPolicy,
           connectionRetryPolicy,
@@ -1492,25 +1493,6 @@ public class Client {
           (doPing ? Client.getPingInterval(conf) : 0));
           (doPing ? Client.getPingInterval(conf) : 0));
     }
     }
     
     
-    private static String getRemotePrincipal(Configuration conf,
-        InetSocketAddress address, Class<?> protocol) throws IOException {
-      if (!UserGroupInformation.isSecurityEnabled() || protocol == null) {
-        return null;
-      }
-      KerberosInfo krbInfo = SecurityUtil.getKerberosInfo(protocol, conf);
-      if (krbInfo != null) {
-        String serverKey = krbInfo.serverPrincipal();
-        if (serverKey == null) {
-          throw new IOException(
-              "Can't obtain server Kerberos config key from protocol="
-                  + protocol.getCanonicalName());
-        }
-        return SecurityUtil.getServerPrincipal(conf.get(serverKey), address
-            .getAddress());
-      }
-      return null;
-    }
-    
     static boolean isEqual(Object a, Object b) {
     static boolean isEqual(Object a, Object b) {
       return a == null ? b == null : a.equals(b);
       return a == null ? b == null : a.equals(b);
     }
     }
@@ -1529,7 +1511,6 @@ public class Client {
             && this.pingInterval == that.pingInterval
             && this.pingInterval == that.pingInterval
             && isEqual(this.protocol, that.protocol)
             && isEqual(this.protocol, that.protocol)
             && this.rpcTimeout == that.rpcTimeout
             && this.rpcTimeout == that.rpcTimeout
-            && isEqual(this.serverPrincipal, that.serverPrincipal)
             && this.tcpNoDelay == that.tcpNoDelay
             && this.tcpNoDelay == that.tcpNoDelay
             && isEqual(this.ticket, that.ticket);
             && isEqual(this.ticket, that.ticket);
       }
       }
@@ -1545,8 +1526,6 @@ public class Client {
       result = PRIME * result + pingInterval;
       result = PRIME * result + pingInterval;
       result = PRIME * result + ((protocol == null) ? 0 : protocol.hashCode());
       result = PRIME * result + ((protocol == null) ? 0 : protocol.hashCode());
       result = PRIME * result + rpcTimeout;
       result = PRIME * result + rpcTimeout;
-      result = PRIME * result
-          + ((serverPrincipal == null) ? 0 : serverPrincipal.hashCode());
       result = PRIME * result + (tcpNoDelay ? 1231 : 1237);
       result = PRIME * result + (tcpNoDelay ? 1231 : 1237);
       result = PRIME * result + ((ticket == null) ? 0 : ticket.hashCode());
       result = PRIME * result + ((ticket == null) ? 0 : ticket.hashCode());
       return result;
       return result;
@@ -1554,7 +1533,21 @@ public class Client {
     
     
     @Override
     @Override
     public String toString() {
     public String toString() {
-      return serverPrincipal + "@" + address;
+      return address.toString();
     }
     }
   }  
   }  
+
+  /**
+   * Returns the next valid sequential call ID by incrementing an atomic counter
+   * and masking off the sign bit.  Valid call IDs are non-negative integers in
+   * the range [ 0, 2^31 - 1 ].  Negative numbers are reserved for special
+   * purposes.  The values can overflow back to 0 and be reused.  Note that prior
+   * versions of the client did not mask off the sign bit, so a server may still
+   * see a negative call ID if it receives connections from an old client.
+   * 
+   * @return next call ID
+   */
+  public static int nextCallId() {
+    return callIdCounter.getAndIncrement() & 0x7FFFFFFF;
+  }
 }
 }

+ 79 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ClientId.java

@@ -0,0 +1,79 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ipc;
+
+import java.nio.ByteBuffer;
+import java.util.UUID;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+
+import com.google.common.base.Preconditions;
+
+/**
+ * A class defining a set of static helper methods to provide conversion between
+ * bytes and string for UUID-based client Id.
+ */
+@InterfaceAudience.Private
+public class ClientId {
+  
+  /** The byte array of a UUID should be 16 */ 
+  public static final int BYTE_LENGTH = 16;
+  
+  /**
+   * Return clientId as byte[]
+   */
+  public static byte[] getClientId() {
+    UUID uuid = UUID.randomUUID();
+    ByteBuffer buf = ByteBuffer.wrap(new byte[BYTE_LENGTH]);
+    buf.putLong(uuid.getMostSignificantBits());
+    buf.putLong(uuid.getLeastSignificantBits());
+    return buf.array();
+  }
+  
+  /** Convert a clientId byte[] to string */
+  public static String toString(byte[] clientId) {
+    // clientId can be null or an empty array
+    if (clientId == null || clientId.length == 0) {
+      return "";
+    }
+    // otherwise should be 16 bytes
+    Preconditions.checkArgument(clientId.length == BYTE_LENGTH);
+    long msb = 0;
+    long lsb = 0;
+    for (int i = 0; i < 8; i++) {
+      msb = (msb << 8) | (clientId[i] & 0xff);
+    }
+    for (int i = 8; i < 16; i++) {
+      lsb = (lsb << 8) | (clientId[i] & 0xff);
+    }
+    return (new UUID(msb, lsb)).toString();
+  }
+  
+  /** Convert from clientId string byte[] representation of clientId */
+  public static byte[] toBytes(String id) {
+    if (id == null || "".equals(id)) {
+      return new byte[0];
+    }
+    UUID uuid = UUID.fromString(id);
+    ByteBuffer buf = ByteBuffer.wrap(new byte[BYTE_LENGTH]);
+    buf.putLong(uuid.getMostSignificantBits());
+    buf.putLong(uuid.getLeastSignificantBits());
+    return buf.array();
+  }
+
+}

+ 2 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java

@@ -124,7 +124,7 @@ public class ProtobufRpcEngine implements RpcEngine {
     /**
     /**
      * This constructor takes a connectionId, instead of creating a new one.
      * This constructor takes a connectionId, instead of creating a new one.
      */
      */
-    public Invoker(Class<?> protocol, Client.ConnectionId connId,
+    private Invoker(Class<?> protocol, Client.ConnectionId connId,
         Configuration conf, SocketFactory factory) {
         Configuration conf, SocketFactory factory) {
       this.remoteId = connId;
       this.remoteId = connId;
       this.client = CLIENTS.getClient(conf, factory, RpcResponseWrapper.class);
       this.client = CLIENTS.getClient(conf, factory, RpcResponseWrapper.class);
@@ -192,7 +192,6 @@ public class ProtobufRpcEngine implements RpcEngine {
       }
       }
 
 
       RequestHeaderProto rpcRequestHeader = constructRpcRequestHeader(method);
       RequestHeaderProto rpcRequestHeader = constructRpcRequestHeader(method);
-      RpcResponseWrapper val = null;
       
       
       if (LOG.isTraceEnabled()) {
       if (LOG.isTraceEnabled()) {
         LOG.trace(Thread.currentThread().getId() + ": Call -> " +
         LOG.trace(Thread.currentThread().getId() + ": Call -> " +
@@ -202,6 +201,7 @@ public class ProtobufRpcEngine implements RpcEngine {
 
 
 
 
       Message theRequest = (Message) args[1];
       Message theRequest = (Message) args[1];
+      final RpcResponseWrapper val;
       try {
       try {
         val = (RpcResponseWrapper) client.call(RPC.RpcKind.RPC_PROTOCOL_BUFFER,
         val = (RpcResponseWrapper) client.call(RPC.RpcKind.RPC_PROTOCOL_BUFFER,
             new RpcRequestWrapper(rpcRequestHeader, theRequest), remoteId);
             new RpcRequestWrapper(rpcRequestHeader, theRequest), remoteId);

+ 3 - 104
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java

@@ -642,104 +642,6 @@ public class RPC {
             + proxy.getClass());
             + proxy.getClass());
   }
   }
 
 
-  /** Construct a server for a protocol implementation instance listening on a
-   * port and address.
-   * @deprecated Please use {@link Builder} to build the {@link Server}
-   */
-  @Deprecated
-  public static Server getServer(final Object instance, final String bindAddress, final int port, Configuration conf) 
-    throws IOException {
-    return getServer(instance, bindAddress, port, 1, false, conf);
-  }
-
-  /** Construct a server for a protocol implementation instance listening on a
-   * port and address.
-   * @deprecated Please use {@link Builder} to build the {@link Server}
-   */
-  @Deprecated
-  public static Server getServer(final Object instance, final String bindAddress, final int port,
-                                 final int numHandlers,
-                                 final boolean verbose, Configuration conf) 
-    throws IOException {
-    return getServer(instance.getClass(),         // use impl class for protocol
-                     instance, bindAddress, port, numHandlers, false, conf, null,
-                     null);
-  }
-
-  /** Construct a server for a protocol implementation instance.
-   *  @deprecated Please use {@link Builder} to build the {@link Server}
-   */
-  @Deprecated
-  public static Server getServer(Class<?> protocol,
-                                 Object instance, String bindAddress,
-                                 int port, Configuration conf) 
-    throws IOException {
-    return getServer(protocol, instance, bindAddress, port, 1, false, conf, null,
-        null);
-  }
-
-  /** Construct a server for a protocol implementation instance.
-   * @deprecated Please use {@link Builder} to build the {@link Server}
-   */
-  @Deprecated
-  public static Server getServer(Class<?> protocol,
-                                 Object instance, String bindAddress, int port,
-                                 int numHandlers,
-                                 boolean verbose, Configuration conf) 
-    throws IOException {
-    
-    return getServer(protocol, instance, bindAddress, port, numHandlers, verbose,
-                 conf, null, null);
-  }
-  
-  /** Construct a server for a protocol implementation instance. 
-   *  @deprecated Please use {@link Builder} to build the {@link Server}
-   */
-  @Deprecated
-  public static Server getServer(Class<?> protocol,
-                                 Object instance, String bindAddress, int port,
-                                 int numHandlers,
-                                 boolean verbose, Configuration conf,
-                                 SecretManager<? extends TokenIdentifier> secretManager) 
-    throws IOException {
-    return getServer(protocol, instance, bindAddress, port, numHandlers, verbose,
-        conf, secretManager, null);
-  }
-  
-  /**
-   *  @deprecated Please use {@link Builder} to build the {@link Server}
-   */
-  @Deprecated
-  public static Server getServer(Class<?> protocol,
-      Object instance, String bindAddress, int port,
-      int numHandlers,
-      boolean verbose, Configuration conf,
-      SecretManager<? extends TokenIdentifier> secretManager,
-      String portRangeConfig) 
-  throws IOException {
-    return getProtocolEngine(protocol, conf)
-      .getServer(protocol, instance, bindAddress, port, numHandlers, -1, -1,
-                 verbose, conf, secretManager, portRangeConfig);
-  }
-
-  /** Construct a server for a protocol implementation instance.
-   *  @deprecated Please use {@link Builder} to build the {@link Server}
-   */
-  @Deprecated
-  public static <PROTO extends VersionedProtocol, IMPL extends PROTO> 
-        Server getServer(Class<PROTO> protocol,
-                                 IMPL instance, String bindAddress, int port,
-                                 int numHandlers, int numReaders, int queueSizePerHandler,
-                                 boolean verbose, Configuration conf,
-                                 SecretManager<? extends TokenIdentifier> secretManager) 
-    throws IOException {
-    
-    return getProtocolEngine(protocol, conf)
-      .getServer(protocol, instance, bindAddress, port, numHandlers,
-                 numReaders, queueSizePerHandler, verbose, conf, secretManager,
-                 null);
-  }
-
   /**
   /**
    * Class to construct instances of RPC server with specific options.
    * Class to construct instances of RPC server with specific options.
    */
    */
@@ -913,7 +815,7 @@ public class RPC {
    
    
    // Register  protocol and its impl for rpc calls
    // Register  protocol and its impl for rpc calls
    void registerProtocolAndImpl(RpcKind rpcKind, Class<?> protocolClass, 
    void registerProtocolAndImpl(RpcKind rpcKind, Class<?> protocolClass, 
-       Object protocolImpl) throws IOException {
+       Object protocolImpl) {
      String protocolName = RPC.getProtocolName(protocolClass);
      String protocolName = RPC.getProtocolName(protocolClass);
      long version;
      long version;
      
      
@@ -943,8 +845,6 @@ public class RPC {
      }
      }
    }
    }
    
    
-   
-   @SuppressWarnings("unused") // will be useful later.
    VerProtocolImpl[] getSupportedProtocolVersions(RPC.RpcKind rpcKind,
    VerProtocolImpl[] getSupportedProtocolVersions(RPC.RpcKind rpcKind,
        String protocolName) {
        String protocolName) {
      VerProtocolImpl[] resultk = 
      VerProtocolImpl[] resultk = 
@@ -999,8 +899,7 @@ public class RPC {
       initProtocolMetaInfo(conf);
       initProtocolMetaInfo(conf);
     }
     }
     
     
-    private void initProtocolMetaInfo(Configuration conf)
-        throws IOException {
+    private void initProtocolMetaInfo(Configuration conf) {
       RPC.setProtocolEngine(conf, ProtocolMetaInfoPB.class,
       RPC.setProtocolEngine(conf, ProtocolMetaInfoPB.class,
           ProtobufRpcEngine.class);
           ProtobufRpcEngine.class);
       ProtocolMetaInfoServerSideTranslatorPB xlator = 
       ProtocolMetaInfoServerSideTranslatorPB xlator = 
@@ -1018,7 +917,7 @@ public class RPC {
      * @return the server (for convenience)
      * @return the server (for convenience)
      */
      */
     public Server addProtocol(RpcKind rpcKind, Class<?> protocolClass,
     public Server addProtocol(RpcKind rpcKind, Class<?> protocolClass,
-        Object protocolImpl) throws IOException {
+        Object protocolImpl) {
       registerProtocolAndImpl(rpcKind, protocolClass, protocolImpl);
       registerProtocolAndImpl(rpcKind, protocolClass, protocolImpl);
       return this;
       return this;
     }
     }

+ 329 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RetryCache.java

@@ -0,0 +1,329 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ipc;
+
+
+import java.util.Arrays;
+import java.util.UUID;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.util.LightWeightCache;
+import org.apache.hadoop.util.LightWeightGSet;
+import org.apache.hadoop.util.LightWeightGSet.LinkedElement;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+
+/**
+ * Maintains a cache of non-idempotent requests that have been successfully
+ * processed by the RPC server implementation, to handle the retries. A request
+ * is uniquely identified by the unique client ID + call ID of the RPC request.
+ * On receiving retried request, an entry will be found in the
+ * {@link RetryCache} and the previous response is sent back to the request.
+ * <p>
+ * To look an implementation using this cache, see HDFS FSNamesystem class.
+ */
+@InterfaceAudience.Private
+public class RetryCache {
+  public static final Log LOG = LogFactory.getLog(RetryCache.class);
+  /**
+   * CacheEntry is tracked using unique client ID and callId of the RPC request
+   */
+  public static class CacheEntry implements LightWeightCache.Entry {
+    /**
+     * Processing state of the requests
+     */
+    private static byte INPROGRESS = 0;
+    private static byte SUCCESS = 1;
+    private static byte FAILED = 2;
+
+    private byte state = INPROGRESS;
+    
+    // Store uuid as two long for better memory utilization
+    private final long clientIdMsb; // Most signficant bytes
+    private final long clientIdLsb; // Least significant bytes
+    
+    private final int callId;
+    private final long expirationTime;
+    private LightWeightGSet.LinkedElement next;
+
+    CacheEntry(byte[] clientId, int callId, long expirationTime) {
+      // ClientId must be a UUID - that is 16 octets.
+      Preconditions.checkArgument(clientId.length == ClientId.BYTE_LENGTH,
+          "Invalid clientId - length is " + clientId.length
+              + " expected length " + ClientId.BYTE_LENGTH);
+      // Convert UUID bytes to two longs
+      long tmp = 0;
+      for (int i=0; i<8; i++) {
+        tmp = (tmp << 8) | (clientId[i] & 0xff);
+      }
+      clientIdMsb = tmp;
+      tmp = 0;
+      for (int i=8; i<16; i++) {
+        tmp = (tmp << 8) | (clientId[i] & 0xff);
+      }
+      clientIdLsb = tmp;
+      this.callId = callId;
+      this.expirationTime = expirationTime;
+    }
+
+    private static int hashCode(long value) {
+      return (int)(value ^ (value >>> 32));
+    }
+    
+    @Override
+    public int hashCode() {
+      return (hashCode(clientIdMsb) * 31 + hashCode(clientIdLsb)) * 31 + callId;
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+      if (this == obj) {
+        return true;
+      }
+      if (!(obj instanceof CacheEntry)) {
+        return false;
+      }
+      CacheEntry other = (CacheEntry) obj;
+      return callId == other.callId && clientIdMsb == other.clientIdMsb
+          && clientIdLsb == other.clientIdLsb;
+    }
+
+    @Override
+    public void setNext(LinkedElement next) {
+      this.next = next;
+    }
+
+    @Override
+    public LinkedElement getNext() {
+      return next;
+    }
+
+    synchronized void completed(boolean success) {
+      state = success ? SUCCESS : FAILED;
+      this.notifyAll();
+    }
+
+    public synchronized boolean isSuccess() {
+      return state == SUCCESS;
+    }
+
+    @Override
+    public void setExpirationTime(long timeNano) {
+      // expiration time does not change
+    }
+
+    @Override
+    public long getExpirationTime() {
+      return expirationTime;
+    }
+    
+    @Override
+    public String toString() {
+      return (new UUID(this.clientIdMsb, this.clientIdLsb)).toString() + ":"
+          + this.callId + ":" + this.state;
+    }
+  }
+
+  /**
+   * CacheEntry with payload that tracks the previous response or parts of
+   * previous response to be used for generating response for retried requests.
+   */
+  public static class CacheEntryWithPayload extends CacheEntry {
+    private Object payload;
+
+    CacheEntryWithPayload(byte[] clientId, int callId, Object payload,
+        long expirationTime) {
+      super(clientId, callId, expirationTime);
+      this.payload = payload;
+    }
+
+    /** Override equals to avoid findbugs warnings */
+    @Override
+    public boolean equals(Object obj) {
+      return super.equals(obj);
+    }
+
+    /** Override hashcode to avoid findbugs warnings */
+    @Override
+    public int hashCode() {
+      return super.hashCode();
+    }
+
+    public Object getPayload() {
+      return payload;
+    }
+  }
+
+  private final LightWeightGSet<CacheEntry, CacheEntry> set;
+  private final long expirationTime;
+
+  /**
+   * Constructor
+   * @param cacheName name to identify the cache by
+   * @param percentage percentage of total java heap space used by this cache
+   * @param expirationTime time for an entry to expire in nanoseconds
+   */
+  public RetryCache(String cacheName, double percentage, long expirationTime) {
+    int capacity = LightWeightGSet.computeCapacity(percentage, cacheName);
+    capacity = capacity > 16 ? capacity : 16;
+    this.set = new LightWeightCache<CacheEntry, CacheEntry>(capacity, capacity,
+        expirationTime, 0);
+    this.expirationTime = expirationTime;
+  }
+
+  private static boolean skipRetryCache() {
+    // Do not track non RPC invocation or RPC requests with
+    // invalid callId or clientId in retry cache
+    return !Server.isRpcInvocation() || Server.getCallId() < 0
+        || Arrays.equals(Server.getClientId(), RpcConstants.DUMMY_CLIENT_ID);
+  }
+  
+  @VisibleForTesting
+  public LightWeightGSet<CacheEntry, CacheEntry> getCacheSet() {
+    return set;
+  }
+
+  /**
+   * This method handles the following conditions:
+   * <ul>
+   * <li>If retry is not to be processed, return null</li>
+   * <li>If there is no cache entry, add a new entry {@code newEntry} and return
+   * it.</li>
+   * <li>If there is an existing entry, wait for its completion. If the
+   * completion state is {@link CacheEntry#FAILED}, the expectation is that the
+   * thread that waited for completion, retries the request. the
+   * {@link CacheEntry} state is set to {@link CacheEntry#INPROGRESS} again.
+   * <li>If the completion state is {@link CacheEntry#SUCCESS}, the entry is
+   * returned so that the thread that waits for it can can return previous
+   * response.</li>
+   * <ul>
+   * 
+   * @return {@link CacheEntry}.
+   */
+  private CacheEntry waitForCompletion(CacheEntry newEntry) {
+    CacheEntry mapEntry = null;
+    synchronized (this) {
+      mapEntry = set.get(newEntry);
+      // If an entry in the cache does not exist, add a new one
+      if (mapEntry == null) {
+        if (LOG.isTraceEnabled()) {
+          LOG.trace("Adding Rpc request clientId "
+              + newEntry.clientIdMsb + newEntry.clientIdLsb + " callId "
+              + newEntry.callId + " to retryCache");
+        }
+        set.put(newEntry);
+        return newEntry;
+      }
+    }
+    // Entry already exists in cache. Wait for completion and return its state
+    Preconditions.checkNotNull(mapEntry,
+        "Entry from the cache should not be null");
+    // Wait for in progress request to complete
+    synchronized (mapEntry) {
+      while (mapEntry.state == CacheEntry.INPROGRESS) {
+        try {
+          mapEntry.wait();
+        } catch (InterruptedException ie) {
+          // Restore the interrupted status
+          Thread.currentThread().interrupt();
+        }
+      }
+      // Previous request has failed, the expectation is is that it will be
+      // retried again.
+      if (mapEntry.state != CacheEntry.SUCCESS) {
+        mapEntry.state = CacheEntry.INPROGRESS;
+      }
+    }
+    return mapEntry;
+  }
+  
+  /** 
+   * Add a new cache entry into the retry cache. The cache entry consists of 
+   * clientId and callId extracted from editlog.
+   */
+  public void addCacheEntry(byte[] clientId, int callId) {
+    CacheEntry newEntry = new CacheEntry(clientId, callId, System.nanoTime()
+        + expirationTime);
+    newEntry.completed(true);
+    set.put(newEntry);
+  }
+  
+  public void addCacheEntryWithPayload(byte[] clientId, int callId,
+      Object payload) {
+    CacheEntry newEntry = new CacheEntryWithPayload(clientId, callId, payload,
+        System.nanoTime() + expirationTime);
+    // since the entry is loaded from editlog, we can assume it succeeded.    
+    newEntry.completed(true);
+    set.put(newEntry);
+  }
+
+  private static CacheEntry newEntry(long expirationTime) {
+    return new CacheEntry(Server.getClientId(), Server.getCallId(),
+        System.nanoTime() + expirationTime);
+  }
+
+  private static CacheEntryWithPayload newEntry(Object payload,
+      long expirationTime) {
+    return new CacheEntryWithPayload(Server.getClientId(), Server.getCallId(),
+        payload, System.nanoTime() + expirationTime);
+  }
+
+  /** Static method that provides null check for retryCache */
+  public static CacheEntry waitForCompletion(RetryCache cache) {
+    if (skipRetryCache()) {
+      return null;
+    }
+    return cache != null ? cache
+        .waitForCompletion(newEntry(cache.expirationTime)) : null;
+  }
+
+  /** Static method that provides null check for retryCache */
+  public static CacheEntryWithPayload waitForCompletion(RetryCache cache,
+      Object payload) {
+    if (skipRetryCache()) {
+      return null;
+    }
+    return (CacheEntryWithPayload) (cache != null ? cache
+        .waitForCompletion(newEntry(payload, cache.expirationTime)) : null);
+  }
+
+  public static void setState(CacheEntry e, boolean success) {
+    if (e == null) {
+      return;
+    }
+    e.completed(success);
+  }
+
+  public static void setState(CacheEntryWithPayload e, boolean success,
+      Object payload) {
+    if (e == null) {
+      return;
+    }
+    e.payload = payload;
+    e.completed(success);
+  }
+
+  public static void clear(RetryCache cache) {
+    if (cache != null) {
+      cache.set.clear();
+    }
+  }
+}

+ 53 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcConstants.java

@@ -0,0 +1,53 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ipc;
+
+import java.nio.ByteBuffer;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+
+@InterfaceAudience.Private
+public class RpcConstants {
+  private RpcConstants() {
+    // Hidden Constructor
+  }
+  
+  public static final int PING_CALL_ID = -1;
+  
+  public static final byte[] DUMMY_CLIENT_ID = new byte[0];
+  
+  public static final int INVALID_CALL_ID = -2;
+  
+  public static final int INVALID_RETRY_COUNT = -1;
+  
+  /**
+   * The first four bytes of Hadoop RPC connections
+   */
+  public static final ByteBuffer HEADER = ByteBuffer.wrap("hrpc".getBytes());
+  
+  // 1 : Introduce ping and server does not throw away RPCs
+  // 3 : Introduce the protocol into the RPC connection header
+  // 4 : Introduced SASL security layer
+  // 5 : Introduced use of {@link ArrayPrimitiveWritable$Internal}
+  //     in ObjectWritable to efficiently transmit arrays of primitives
+  // 6 : Made RPC Request header explicit
+  // 7 : Changed Ipc Connection Header to use Protocol buffers
+  // 8 : SASL server always sends a final response
+  // 9 : Changes to protocol for HADOOP-8990
+  public static final byte CURRENT_VERSION = 9;
+}

文件差异内容过多而无法显示
+ 388 - 213
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java


+ 4 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MetricsSourceBuilder.java

@@ -33,6 +33,7 @@ import org.apache.hadoop.metrics2.MetricsInfo;
 import org.apache.hadoop.metrics2.MetricsSource;
 import org.apache.hadoop.metrics2.MetricsSource;
 import org.apache.hadoop.metrics2.annotation.Metric;
 import org.apache.hadoop.metrics2.annotation.Metric;
 import org.apache.hadoop.metrics2.annotation.Metrics;
 import org.apache.hadoop.metrics2.annotation.Metrics;
+import org.apache.hadoop.util.ReflectionUtils;
 
 
 /**
 /**
  * Helper class to build metrics source object from annotations
  * Helper class to build metrics source object from annotations
@@ -54,10 +55,10 @@ public class MetricsSourceBuilder {
     Class<?> cls = source.getClass();
     Class<?> cls = source.getClass();
     registry = initRegistry(source);
     registry = initRegistry(source);
 
 
-    for (Field field : cls.getDeclaredFields()) {
+    for (Field field : ReflectionUtils.getDeclaredFieldsIncludingInherited(cls)) {
       add(source, field);
       add(source, field);
     }
     }
-    for (Method method : cls.getDeclaredMethods()) {
+    for (Method method : ReflectionUtils.getDeclaredMethodsIncludingInherited(cls)) {
       add(source, method);
       add(source, method);
     }
     }
   }
   }
@@ -88,7 +89,7 @@ public class MetricsSourceBuilder {
     Class<?> cls = source.getClass();
     Class<?> cls = source.getClass();
     MetricsRegistry r = null;
     MetricsRegistry r = null;
     // Get the registry if it already exists.
     // Get the registry if it already exists.
-    for (Field field : cls.getDeclaredFields()) {
+    for (Field field : ReflectionUtils.getDeclaredFieldsIncludingInherited(cls)) {
       if (field.getType() != MetricsRegistry.class) continue;
       if (field.getType() != MetricsRegistry.class) continue;
       try {
       try {
         field.setAccessible(true);
         field.setAccessible(true);

+ 4 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java

@@ -397,10 +397,10 @@ public class NetworkTopology {
     netlock.writeLock().lock();
     netlock.writeLock().lock();
     try {
     try {
       if ((depthOfAllLeaves != -1) && (depthOfAllLeaves != newDepth)) {
       if ((depthOfAllLeaves != -1) && (depthOfAllLeaves != newDepth)) {
-        LOG.error("Error: can't add leaf node at depth " +
-            newDepth + " to topology:\n" + oldTopoStr);
-        throw new InvalidTopologyException("Invalid network topology. " +
-            "You cannot have a rack and a non-rack node at the same " +
+        LOG.error("Error: can't add leaf node " + NodeBase.getPath(node) +
+            " at depth " + newDepth + " to topology:\n" + oldTopoStr);
+        throw new InvalidTopologyException("Failed to add " + NodeBase.getPath(node) +
+            ": You cannot have a rack and a non-rack node at the same " +
             "level of the network topology.");
             "level of the network topology.");
       }
       }
       Node rack = getNodeForNetworkLocation(node);
       Node rack = getNodeForNetworkLocation(node);

+ 4 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/RefreshUserMappingsProtocol.java

@@ -22,6 +22,7 @@ import java.io.IOException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.io.retry.Idempotent;
 import org.apache.hadoop.security.KerberosInfo;
 import org.apache.hadoop.security.KerberosInfo;
 
 
 /**
 /**
@@ -43,12 +44,13 @@ public interface RefreshUserMappingsProtocol {
    * Refresh user to group mappings.
    * Refresh user to group mappings.
    * @throws IOException
    * @throws IOException
    */
    */
+  @Idempotent
   public void refreshUserToGroupsMappings() throws IOException;
   public void refreshUserToGroupsMappings() throws IOException;
   
   
   /**
   /**
    * Refresh superuser proxy group list
    * Refresh superuser proxy group list
    * @throws IOException
    * @throws IOException
    */
    */
-  public void refreshSuperUserGroupsConfiguration() 
-  throws IOException;
+  @Idempotent
+  public void refreshSuperUserGroupsConfiguration() throws IOException;
 }
 }

+ 260 - 85
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcClient.java

@@ -25,6 +25,9 @@ import java.io.DataOutputStream;
 import java.io.IOException;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.InputStream;
 import java.io.OutputStream;
 import java.io.OutputStream;
+import java.net.InetSocketAddress;
+import java.util.ArrayList;
+import java.util.List;
 import java.util.Map;
 import java.util.Map;
 
 
 import javax.security.auth.callback.Callback;
 import javax.security.auth.callback.Callback;
@@ -32,6 +35,7 @@ import javax.security.auth.callback.CallbackHandler;
 import javax.security.auth.callback.NameCallback;
 import javax.security.auth.callback.NameCallback;
 import javax.security.auth.callback.PasswordCallback;
 import javax.security.auth.callback.PasswordCallback;
 import javax.security.auth.callback.UnsupportedCallbackException;
 import javax.security.auth.callback.UnsupportedCallbackException;
+import javax.security.auth.kerberos.KerberosPrincipal;
 import javax.security.sasl.RealmCallback;
 import javax.security.sasl.RealmCallback;
 import javax.security.sasl.RealmChoiceCallback;
 import javax.security.sasl.RealmChoiceCallback;
 import javax.security.sasl.Sasl;
 import javax.security.sasl.Sasl;
@@ -42,10 +46,12 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.ipc.ProtobufRpcEngine.RpcRequestMessageWrapper;
 import org.apache.hadoop.ipc.ProtobufRpcEngine.RpcRequestMessageWrapper;
 import org.apache.hadoop.ipc.ProtobufRpcEngine.RpcResponseMessageWrapper;
 import org.apache.hadoop.ipc.ProtobufRpcEngine.RpcResponseMessageWrapper;
 import org.apache.hadoop.ipc.RPC.RpcKind;
 import org.apache.hadoop.ipc.RPC.RpcKind;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.ipc.RpcConstants;
 import org.apache.hadoop.ipc.Server.AuthProtocol;
 import org.apache.hadoop.ipc.Server.AuthProtocol;
 import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcRequestHeaderProto;
 import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcRequestHeaderProto;
 import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcRequestHeaderProto.OperationProto;
 import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcRequestHeaderProto.OperationProto;
@@ -57,6 +63,8 @@ import org.apache.hadoop.security.SaslRpcServer.AuthMethod;
 import org.apache.hadoop.security.authentication.util.KerberosName;
 import org.apache.hadoop.security.authentication.util.KerberosName;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.security.token.TokenInfo;
+import org.apache.hadoop.security.token.TokenSelector;
 import org.apache.hadoop.util.ProtoUtil;
 import org.apache.hadoop.util.ProtoUtil;
 
 
 import com.google.protobuf.ByteString;
 import com.google.protobuf.ByteString;
@@ -68,54 +76,136 @@ import com.google.protobuf.ByteString;
 public class SaslRpcClient {
 public class SaslRpcClient {
   public static final Log LOG = LogFactory.getLog(SaslRpcClient.class);
   public static final Log LOG = LogFactory.getLog(SaslRpcClient.class);
 
 
-  private final AuthMethod authMethod;
-  private final SaslClient saslClient;
-  private final boolean fallbackAllowed;
-  private static final RpcRequestHeaderProto saslHeader =
-      ProtoUtil.makeRpcRequestHeader(RpcKind.RPC_PROTOCOL_BUFFER,
-          OperationProto.RPC_FINAL_PACKET, AuthProtocol.SASL.callId);
+  private final UserGroupInformation ugi;
+  private final Class<?> protocol;
+  private final InetSocketAddress serverAddr;  
+  private final Configuration conf;
+
+  private SaslClient saslClient;
+  
+  private static final RpcRequestHeaderProto saslHeader = ProtoUtil
+      .makeRpcRequestHeader(RpcKind.RPC_PROTOCOL_BUFFER,
+          OperationProto.RPC_FINAL_PACKET, AuthProtocol.SASL.callId,
+          RpcConstants.INVALID_RETRY_COUNT, RpcConstants.DUMMY_CLIENT_ID);
   private static final RpcSaslProto negotiateRequest =
   private static final RpcSaslProto negotiateRequest =
       RpcSaslProto.newBuilder().setState(SaslState.NEGOTIATE).build();
       RpcSaslProto.newBuilder().setState(SaslState.NEGOTIATE).build();
   
   
   /**
   /**
-   * Create a SaslRpcClient for an authentication method
+   * Create a SaslRpcClient that can be used by a RPC client to negotiate
+   * SASL authentication with a RPC server
+   * @param ugi - connecting user
+   * @param protocol - RPC protocol
+   * @param serverAddr - InetSocketAddress of remote server
+   * @param conf - Configuration
+   */
+  public SaslRpcClient(UserGroupInformation ugi, Class<?> protocol,
+      InetSocketAddress serverAddr, Configuration conf) {
+    this.ugi = ugi;
+    this.protocol = protocol;
+    this.serverAddr = serverAddr;
+    this.conf = conf;
+  }
+  
+  /**
+   * Instantiate a sasl client for the first supported auth type in the
+   * given list.  The auth type must be defined, enabled, and the user
+   * must possess the required credentials, else the next auth is tried.
    * 
    * 
-   * @param method
-   *          the requested authentication method
-   * @param token
-   *          token to use if needed by the authentication method
+   * @param authTypes to attempt in the given order
+   * @return SaslAuth of instantiated client
+   * @throws AccessControlException - client doesn't support any of the auths
+   * @throws IOException - misc errors
    */
    */
-  public SaslRpcClient(AuthMethod method,
-      Token<? extends TokenIdentifier> token, String serverPrincipal,
-      boolean fallbackAllowed)
-      throws IOException {
-    this.authMethod = method;
-    this.fallbackAllowed = fallbackAllowed;
+  private SaslAuth selectSaslClient(List<SaslAuth> authTypes)
+      throws SaslException, AccessControlException, IOException {
+    SaslAuth selectedAuthType = null;
+    boolean switchToSimple = false;
+    for (SaslAuth authType : authTypes) {
+      if (!isValidAuthType(authType)) {
+        continue; // don't know what it is, try next
+      }
+      AuthMethod authMethod = AuthMethod.valueOf(authType.getMethod());
+      if (authMethod == AuthMethod.SIMPLE) {
+        switchToSimple = true;
+      } else {
+        saslClient = createSaslClient(authType);
+        if (saslClient == null) { // client lacks credentials, try next
+          continue;
+        }
+      }
+      selectedAuthType = authType;
+      break;
+    }
+    if (saslClient == null && !switchToSimple) {
+      List<String> serverAuthMethods = new ArrayList<String>();
+      for (SaslAuth authType : authTypes) {
+        serverAuthMethods.add(authType.getMethod());
+      }
+      throw new AccessControlException(
+          "Client cannot authenticate via:" + serverAuthMethods);
+    }
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Use " + selectedAuthType.getMethod() +
+          " authentication for protocol " + protocol.getSimpleName());
+    }
+    return selectedAuthType;
+  }
+  
+
+  private boolean isValidAuthType(SaslAuth authType) {
+    AuthMethod authMethod;
+    try {
+      authMethod = AuthMethod.valueOf(authType.getMethod());
+    } catch (IllegalArgumentException iae) { // unknown auth
+      authMethod = null;
+    }
+    // do we know what it is?  is it using our mechanism?
+    return authMethod != null &&
+           authMethod.getMechanismName().equals(authType.getMechanism());
+  }  
+  
+  /**
+   * Try to create a SaslClient for an authentication type.  May return
+   * null if the type isn't supported or the client lacks the required
+   * credentials.
+   * 
+   * @param authType - the requested authentication method
+   * @return SaslClient for the authType or null
+   * @throws SaslException - error instantiating client
+   * @throws IOException - misc errors
+   */
+  private SaslClient createSaslClient(SaslAuth authType)
+      throws SaslException, IOException {
     String saslUser = null;
     String saslUser = null;
-    String saslProtocol = null;
-    String saslServerName = null;
+    // SASL requires the client and server to use the same proto and serverId
+    // if necessary, auth types below will verify they are valid
+    final String saslProtocol = authType.getProtocol();
+    final String saslServerName = authType.getServerId();
     Map<String, String> saslProperties = SaslRpcServer.SASL_PROPS;
     Map<String, String> saslProperties = SaslRpcServer.SASL_PROPS;
     CallbackHandler saslCallback = null;
     CallbackHandler saslCallback = null;
     
     
+    final AuthMethod method = AuthMethod.valueOf(authType.getMethod());
     switch (method) {
     switch (method) {
       case TOKEN: {
       case TOKEN: {
-        saslProtocol = "";
-        saslServerName = SaslRpcServer.SASL_DEFAULT_REALM;
+        Token<?> token = getServerToken(authType);
+        if (token == null) {
+          return null; // tokens aren't supported or user doesn't have one
+        }
         saslCallback = new SaslClientCallbackHandler(token);
         saslCallback = new SaslClientCallbackHandler(token);
         break;
         break;
       }
       }
       case KERBEROS: {
       case KERBEROS: {
-        if (serverPrincipal == null || serverPrincipal.isEmpty()) {
-          throw new IOException(
-              "Failed to specify server's Kerberos principal name");
+        if (ugi.getRealAuthenticationMethod().getAuthMethod() !=
+            AuthMethod.KERBEROS) {
+          return null; // client isn't using kerberos
+        }
+        String serverPrincipal = getServerPrincipal(authType);
+        if (serverPrincipal == null) {
+          return null; // protocol doesn't use kerberos
         }
         }
-        KerberosName name = new KerberosName(serverPrincipal);
-        saslProtocol = name.getServiceName();
-        saslServerName = name.getHostName();
-        if (saslServerName == null) {
-          throw new IOException(
-              "Kerberos principal name does NOT have the expected hostname part: "
-                  + serverPrincipal);
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("RPC Server's Kerberos principal name for protocol="
+              + protocol.getCanonicalName() + " is " + serverPrincipal);
         }
         }
         break;
         break;
       }
       }
@@ -125,16 +215,85 @@ public class SaslRpcClient {
     
     
     String mechanism = method.getMechanismName();
     String mechanism = method.getMechanismName();
     if (LOG.isDebugEnabled()) {
     if (LOG.isDebugEnabled()) {
-      LOG.debug("Creating SASL " + mechanism + "(" + authMethod + ") "
+      LOG.debug("Creating SASL " + mechanism + "(" + method + ") "
           + " client to authenticate to service at " + saslServerName);
           + " client to authenticate to service at " + saslServerName);
     }
     }
-    saslClient = Sasl.createSaslClient(
+    return Sasl.createSaslClient(
         new String[] { mechanism }, saslUser, saslProtocol, saslServerName,
         new String[] { mechanism }, saslUser, saslProtocol, saslServerName,
         saslProperties, saslCallback);
         saslProperties, saslCallback);
-    if (saslClient == null) {
-      throw new IOException("Unable to find SASL client implementation");
+  }
+  
+  /**
+   * Try to locate the required token for the server.
+   * 
+   * @param authType of the SASL client
+   * @return Token<?> for server, or null if no token available
+   * @throws IOException - token selector cannot be instantiated
+   */
+  private Token<?> getServerToken(SaslAuth authType) throws IOException {
+    TokenInfo tokenInfo = SecurityUtil.getTokenInfo(protocol, conf);
+    LOG.debug("Get token info proto:"+protocol+" info:"+tokenInfo);
+    if (tokenInfo == null) { // protocol has no support for tokens
+      return null;
+    }
+    TokenSelector<?> tokenSelector = null;
+    try {
+      tokenSelector = tokenInfo.value().newInstance();
+    } catch (InstantiationException e) {
+      throw new IOException(e.toString());
+    } catch (IllegalAccessException e) {
+      throw new IOException(e.toString());
+    }
+    return tokenSelector.selectToken(
+        SecurityUtil.buildTokenService(serverAddr), ugi.getTokens());
+  }
+  
+  /**
+   * Get the remote server's principal.  The value will be obtained from
+   * the config and cross-checked against the server's advertised principal.
+   * 
+   * @param authType of the SASL client
+   * @return String of the server's principal
+   * @throws IOException - error determining configured principal
+   */
+
+  // try to get the configured principal for the remote server
+  private String getServerPrincipal(SaslAuth authType) throws IOException {
+    KerberosInfo krbInfo = SecurityUtil.getKerberosInfo(protocol, conf);
+    LOG.debug("Get kerberos info proto:"+protocol+" info:"+krbInfo);
+    if (krbInfo == null) { // protocol has no support for kerberos
+      return null;
+    }
+    String serverKey = krbInfo.serverPrincipal();
+    if (serverKey == null) {
+      throw new IllegalArgumentException(
+          "Can't obtain server Kerberos config key from protocol="
+              + protocol.getCanonicalName());
+    }
+    // construct the expected principal from the config
+    String confPrincipal = SecurityUtil.getServerPrincipal(
+        conf.get(serverKey), serverAddr.getAddress());
+    if (confPrincipal == null || confPrincipal.isEmpty()) {
+      throw new IllegalArgumentException(
+          "Failed to specify server's Kerberos principal name");
+    }
+    // ensure it looks like a host-based service principal
+    KerberosName name = new KerberosName(confPrincipal);
+    if (name.getHostName() == null) {
+      throw new IllegalArgumentException(
+          "Kerberos principal name does NOT have the expected hostname part: "
+              + confPrincipal);
+    }
+    // check that the server advertised principal matches our conf
+    KerberosPrincipal serverPrincipal = new KerberosPrincipal(
+        authType.getProtocol() + "/" + authType.getServerId());
+    if (!serverPrincipal.getName().equals(confPrincipal)) {
+      throw new IllegalArgumentException(
+          "Server has invalid Kerberos principal: " + serverPrincipal);
     }
     }
+    return confPrincipal;
   }
   }
+  
 
 
   /**
   /**
    * Do client side SASL authentication with server via the given InputStream
    * Do client side SASL authentication with server via the given InputStream
@@ -144,18 +303,18 @@ public class SaslRpcClient {
    *          InputStream to use
    *          InputStream to use
    * @param outS
    * @param outS
    *          OutputStream to use
    *          OutputStream to use
-   * @return true if connection is set up, or false if needs to switch 
-   *             to simple Auth.
+   * @return AuthMethod used to negotiate the connection
    * @throws IOException
    * @throws IOException
    */
    */
-  public boolean saslConnect(InputStream inS, OutputStream outS)
+  public AuthMethod saslConnect(InputStream inS, OutputStream outS)
       throws IOException {
       throws IOException {
     DataInputStream inStream = new DataInputStream(new BufferedInputStream(inS));
     DataInputStream inStream = new DataInputStream(new BufferedInputStream(inS));
     DataOutputStream outStream = new DataOutputStream(new BufferedOutputStream(
     DataOutputStream outStream = new DataOutputStream(new BufferedOutputStream(
         outS));
         outS));
     
     
-    // track if SASL ever started, or server switched us to simple
-    boolean inSasl = false;
+    // redefined if/when a SASL negotiation completes
+    AuthMethod authMethod = AuthMethod.SIMPLE;
+    
     sendSaslMessage(outStream, negotiateRequest);
     sendSaslMessage(outStream, negotiateRequest);
     
     
     // loop until sasl is complete or a rpc error occurs
     // loop until sasl is complete or a rpc error occurs
@@ -189,50 +348,48 @@ public class SaslRpcClient {
       RpcSaslProto.Builder response = null;
       RpcSaslProto.Builder response = null;
       switch (saslMessage.getState()) {
       switch (saslMessage.getState()) {
         case NEGOTIATE: {
         case NEGOTIATE: {
-          inSasl = true;
-          // TODO: should instantiate sasl client based on advertisement
-          // but just blindly use the pre-instantiated sasl client for now
-          String clientAuthMethod = authMethod.toString();
-          SaslAuth saslAuthType = null;
-          for (SaslAuth authType : saslMessage.getAuthsList()) {
-            if (clientAuthMethod.equals(authType.getMethod())) {
-              saslAuthType = authType;
-              break;
-            }
-          }
-          if (saslAuthType == null) {
-            saslAuthType = SaslAuth.newBuilder()
-                .setMethod(clientAuthMethod)
-                .setMechanism(saslClient.getMechanismName())
-                .build();
-          }
+          // create a compatible SASL client, throws if no supported auths
+          SaslAuth saslAuthType = selectSaslClient(saslMessage.getAuthsList());
+          authMethod = AuthMethod.valueOf(saslAuthType.getMethod());
           
           
-          byte[] challengeToken = null;
-          if (saslAuthType != null && saslAuthType.hasChallenge()) {
-            // server provided the first challenge
-            challengeToken = saslAuthType.getChallenge().toByteArray();
-            saslAuthType =
-              SaslAuth.newBuilder(saslAuthType).clearChallenge().build();
-          } else if (saslClient.hasInitialResponse()) {
-            challengeToken = new byte[0];
+          byte[] responseToken = null;
+          if (authMethod == AuthMethod.SIMPLE) { // switching to SIMPLE
+            done = true; // not going to wait for success ack
+          } else {
+            byte[] challengeToken = null;
+            if (saslAuthType.hasChallenge()) {
+              // server provided the first challenge
+              challengeToken = saslAuthType.getChallenge().toByteArray();
+              saslAuthType =
+                  SaslAuth.newBuilder(saslAuthType).clearChallenge().build();
+            } else if (saslClient.hasInitialResponse()) {
+              challengeToken = new byte[0];
+            }
+            responseToken = (challengeToken != null)
+                ? saslClient.evaluateChallenge(challengeToken)
+                    : new byte[0];
           }
           }
-          byte[] responseToken = (challengeToken != null)
-              ? saslClient.evaluateChallenge(challengeToken)
-              : new byte[0];
-          
           response = createSaslReply(SaslState.INITIATE, responseToken);
           response = createSaslReply(SaslState.INITIATE, responseToken);
           response.addAuths(saslAuthType);
           response.addAuths(saslAuthType);
           break;
           break;
         }
         }
         case CHALLENGE: {
         case CHALLENGE: {
-          inSasl = true;
+          if (saslClient == null) {
+            // should probably instantiate a client to allow a server to
+            // demand a specific negotiation
+            throw new SaslException("Server sent unsolicited challenge");
+          }
           byte[] responseToken = saslEvaluateToken(saslMessage, false);
           byte[] responseToken = saslEvaluateToken(saslMessage, false);
           response = createSaslReply(SaslState.RESPONSE, responseToken);
           response = createSaslReply(SaslState.RESPONSE, responseToken);
           break;
           break;
         }
         }
         case SUCCESS: {
         case SUCCESS: {
-          if (inSasl && saslEvaluateToken(saslMessage, true) != null) {
-            throw new SaslException("SASL client generated spurious token");
+          // simple server sends immediate success to a SASL client for
+          // switch to simple
+          if (saslClient == null) {
+            authMethod = AuthMethod.SIMPLE;
+          } else {
+            saslEvaluateToken(saslMessage, true);
           }
           }
           done = true;
           done = true;
           break;
           break;
@@ -246,12 +403,7 @@ public class SaslRpcClient {
         sendSaslMessage(outStream, response.build());
         sendSaslMessage(outStream, response.build());
       }
       }
     } while (!done);
     } while (!done);
-    if (!inSasl && !fallbackAllowed) {
-      throw new IOException("Server asks us to fall back to SIMPLE " +
-          "auth, but this client is configured to only allow secure " +
-          "connections.");
-    }
-    return inSasl;
+    return authMethod;
   }
   }
   
   
   private void sendSaslMessage(DataOutputStream out, RpcSaslProto message)
   private void sendSaslMessage(DataOutputStream out, RpcSaslProto message)
@@ -266,17 +418,37 @@ public class SaslRpcClient {
     out.flush();    
     out.flush();    
   }
   }
   
   
+  /**
+   * Evaluate the server provided challenge.  The server must send a token
+   * if it's not done.  If the server is done, the challenge token is
+   * optional because not all mechanisms send a final token for the client to
+   * update its internal state.  The client must also be done after
+   * evaluating the optional token to ensure a malicious server doesn't
+   * prematurely end the negotiation with a phony success.
+   *  
+   * @param saslResponse - client response to challenge
+   * @param serverIsDone - server negotiation state
+   * @throws SaslException - any problems with negotiation
+   */
   private byte[] saslEvaluateToken(RpcSaslProto saslResponse,
   private byte[] saslEvaluateToken(RpcSaslProto saslResponse,
-      boolean done) throws SaslException {
+      boolean serverIsDone) throws SaslException {
     byte[] saslToken = null;
     byte[] saslToken = null;
     if (saslResponse.hasToken()) {
     if (saslResponse.hasToken()) {
       saslToken = saslResponse.getToken().toByteArray();
       saslToken = saslResponse.getToken().toByteArray();
       saslToken = saslClient.evaluateChallenge(saslToken);
       saslToken = saslClient.evaluateChallenge(saslToken);
-    } else if (!done) {
-      throw new SaslException("Challenge contains no token");
+    } else if (!serverIsDone) {
+      // the server may only omit a token when it's done
+      throw new SaslException("Server challenge contains no token");
     }
     }
-    if (done && !saslClient.isComplete()) {
-      throw new SaslException("Client is out of sync with server");
+    if (serverIsDone) {
+      // server tried to report success before our client completed
+      if (!saslClient.isComplete()) {
+        throw new SaslException("Client is out of sync with server");
+      }
+      // a client cannot generate a response to a success message
+      if (saslToken != null) {
+        throw new SaslException("Client generated spurious response");        
+      }
     }
     }
     return saslToken;
     return saslToken;
   }
   }
@@ -325,7 +497,10 @@ public class SaslRpcClient {
 
 
   /** Release resources used by wrapped saslClient */
   /** Release resources used by wrapped saslClient */
   public void dispose() throws SaslException {
   public void dispose() throws SaslException {
-    saslClient.dispose();
+    if (saslClient != null) {
+      saslClient.dispose();
+      saslClient = null;
+    }
   }
   }
 
 
   private static class SaslClientCallbackHandler implements CallbackHandler {
   private static class SaslClientCallbackHandler implements CallbackHandler {
@@ -375,4 +550,4 @@ public class SaslRpcClient {
       }
       }
     }
     }
   }
   }
-}
+}

+ 2 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java

@@ -672,7 +672,7 @@ public class SecurityUtil {
   public static AuthenticationMethod getAuthenticationMethod(Configuration conf) {
   public static AuthenticationMethod getAuthenticationMethod(Configuration conf) {
     String value = conf.get(HADOOP_SECURITY_AUTHENTICATION, "simple");
     String value = conf.get(HADOOP_SECURITY_AUTHENTICATION, "simple");
     try {
     try {
-      return Enum.valueOf(AuthenticationMethod.class, value.toUpperCase());
+      return Enum.valueOf(AuthenticationMethod.class, value.toUpperCase(Locale.ENGLISH));
     } catch (IllegalArgumentException iae) {
     } catch (IllegalArgumentException iae) {
       throw new IllegalArgumentException("Invalid attribute value for " +
       throw new IllegalArgumentException("Invalid attribute value for " +
           HADOOP_SECURITY_AUTHENTICATION + " of " + value);
           HADOOP_SECURITY_AUTHENTICATION + " of " + value);
@@ -685,6 +685,6 @@ public class SecurityUtil {
       authenticationMethod = AuthenticationMethod.SIMPLE;
       authenticationMethod = AuthenticationMethod.SIMPLE;
     }
     }
     conf.set(HADOOP_SECURITY_AUTHENTICATION,
     conf.set(HADOOP_SECURITY_AUTHENTICATION,
-             authenticationMethod.toString().toLowerCase());
+             authenticationMethod.toString().toLowerCase(Locale.ENGLISH));
   }
   }
 }
 }

+ 2 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/RefreshAuthorizationPolicyProtocol.java

@@ -22,6 +22,7 @@ import java.io.IOException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.io.retry.Idempotent;
 import org.apache.hadoop.security.KerberosInfo;
 import org.apache.hadoop.security.KerberosInfo;
 
 
 /**
 /**
@@ -42,5 +43,6 @@ public interface RefreshAuthorizationPolicyProtocol {
    * Refresh the service-level authorization policy in-effect.
    * Refresh the service-level authorization policy in-effect.
    * @throws IOException
    * @throws IOException
    */
    */
+  @Idempotent
   void refreshServiceAcl() throws IOException;
   void refreshServiceAcl() throws IOException;
 }
 }

+ 3 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RefreshAuthorizationPolicyProtocolClientSideTranslatorPB.java → hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/protocolPB/RefreshAuthorizationPolicyProtocolClientSideTranslatorPB.java

@@ -16,17 +16,18 @@
  * limitations under the License.
  * limitations under the License.
  */
  */
 
 
-package org.apache.hadoop.hdfs.protocolPB;
+package org.apache.hadoop.security.protocolPB;
 
 
 import java.io.Closeable;
 import java.io.Closeable;
 import java.io.IOException;
 import java.io.IOException;
 
 
-import org.apache.hadoop.hdfs.protocol.proto.RefreshAuthorizationPolicyProtocolProtos.RefreshServiceAclRequestProto;
 import org.apache.hadoop.ipc.ProtobufHelper;
 import org.apache.hadoop.ipc.ProtobufHelper;
 import org.apache.hadoop.ipc.ProtocolMetaInterface;
 import org.apache.hadoop.ipc.ProtocolMetaInterface;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RpcClientUtil;
 import org.apache.hadoop.ipc.RpcClientUtil;
 import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol;
 import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol;
+import org.apache.hadoop.security.proto.RefreshAuthorizationPolicyProtocolProtos.RefreshServiceAclRequestProto;
+import org.apache.hadoop.security.protocolPB.RefreshAuthorizationPolicyProtocolPB;
 
 
 import com.google.protobuf.RpcController;
 import com.google.protobuf.RpcController;
 import com.google.protobuf.ServiceException;
 import com.google.protobuf.ServiceException;

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RefreshAuthorizationPolicyProtocolPB.java → hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/protocolPB/RefreshAuthorizationPolicyProtocolPB.java

@@ -16,14 +16,14 @@
  * limitations under the License.
  * limitations under the License.
  */
  */
 
 
-package org.apache.hadoop.hdfs.protocolPB;
+package org.apache.hadoop.security.protocolPB;
 
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
-import org.apache.hadoop.hdfs.protocol.proto.RefreshAuthorizationPolicyProtocolProtos.RefreshAuthorizationPolicyProtocolService;
 import org.apache.hadoop.ipc.ProtocolInfo;
 import org.apache.hadoop.ipc.ProtocolInfo;
 import org.apache.hadoop.security.KerberosInfo;
 import org.apache.hadoop.security.KerberosInfo;
+import org.apache.hadoop.security.proto.RefreshAuthorizationPolicyProtocolProtos.RefreshAuthorizationPolicyProtocolService;
 
 
 @KerberosInfo(
 @KerberosInfo(
     serverPrincipal=CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY)
     serverPrincipal=CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY)

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RefreshAuthorizationPolicyProtocolServerSideTranslatorPB.java → hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/protocolPB/RefreshAuthorizationPolicyProtocolServerSideTranslatorPB.java

@@ -16,13 +16,13 @@
  * limitations under the License.
  * limitations under the License.
  */
  */
 
 
-package org.apache.hadoop.hdfs.protocolPB;
+package org.apache.hadoop.security.protocolPB;
 
 
 import java.io.IOException;
 import java.io.IOException;
 
 
-import org.apache.hadoop.hdfs.protocol.proto.RefreshAuthorizationPolicyProtocolProtos.RefreshServiceAclRequestProto;
-import org.apache.hadoop.hdfs.protocol.proto.RefreshAuthorizationPolicyProtocolProtos.RefreshServiceAclResponseProto;
 import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol;
 import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol;
+import org.apache.hadoop.security.proto.RefreshAuthorizationPolicyProtocolProtos.RefreshServiceAclRequestProto;
+import org.apache.hadoop.security.proto.RefreshAuthorizationPolicyProtocolProtos.RefreshServiceAclResponseProto;
 
 
 import com.google.protobuf.RpcController;
 import com.google.protobuf.RpcController;
 import com.google.protobuf.ServiceException;
 import com.google.protobuf.ServiceException;

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RefreshUserMappingsProtocolClientSideTranslatorPB.java → hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/protocolPB/RefreshUserMappingsProtocolClientSideTranslatorPB.java

@@ -16,18 +16,18 @@
  * limitations under the License.
  * limitations under the License.
  */
  */
 
 
-package org.apache.hadoop.hdfs.protocolPB;
+package org.apache.hadoop.security.protocolPB;
 
 
 import java.io.Closeable;
 import java.io.Closeable;
 import java.io.IOException;
 import java.io.IOException;
 
 
-import org.apache.hadoop.hdfs.protocol.proto.RefreshUserMappingsProtocolProtos.RefreshSuperUserGroupsConfigurationRequestProto;
-import org.apache.hadoop.hdfs.protocol.proto.RefreshUserMappingsProtocolProtos.RefreshUserToGroupsMappingsRequestProto;
 import org.apache.hadoop.ipc.ProtobufHelper;
 import org.apache.hadoop.ipc.ProtobufHelper;
 import org.apache.hadoop.ipc.ProtocolMetaInterface;
 import org.apache.hadoop.ipc.ProtocolMetaInterface;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RpcClientUtil;
 import org.apache.hadoop.ipc.RpcClientUtil;
 import org.apache.hadoop.security.RefreshUserMappingsProtocol;
 import org.apache.hadoop.security.RefreshUserMappingsProtocol;
+import org.apache.hadoop.security.proto.RefreshUserMappingsProtocolProtos.RefreshSuperUserGroupsConfigurationRequestProto;
+import org.apache.hadoop.security.proto.RefreshUserMappingsProtocolProtos.RefreshUserToGroupsMappingsRequestProto;
 
 
 import com.google.protobuf.RpcController;
 import com.google.protobuf.RpcController;
 import com.google.protobuf.ServiceException;
 import com.google.protobuf.ServiceException;

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RefreshUserMappingsProtocolPB.java → hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/protocolPB/RefreshUserMappingsProtocolPB.java

@@ -16,14 +16,14 @@
  * limitations under the License.
  * limitations under the License.
  */
  */
 
 
-package org.apache.hadoop.hdfs.protocolPB;
+package org.apache.hadoop.security.protocolPB;
 
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
-import org.apache.hadoop.hdfs.protocol.proto.RefreshUserMappingsProtocolProtos.RefreshUserMappingsProtocolService;
 import org.apache.hadoop.ipc.ProtocolInfo;
 import org.apache.hadoop.ipc.ProtocolInfo;
 import org.apache.hadoop.security.KerberosInfo;
 import org.apache.hadoop.security.KerberosInfo;
+import org.apache.hadoop.security.proto.RefreshUserMappingsProtocolProtos.RefreshUserMappingsProtocolService;
 
 
 @KerberosInfo(
 @KerberosInfo(
     serverPrincipal=CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY)
     serverPrincipal=CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY)

+ 5 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RefreshUserMappingsProtocolServerSideTranslatorPB.java → hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/protocolPB/RefreshUserMappingsProtocolServerSideTranslatorPB.java

@@ -16,15 +16,15 @@
  * limitations under the License.
  * limitations under the License.
  */
  */
 
 
-package org.apache.hadoop.hdfs.protocolPB;
+package org.apache.hadoop.security.protocolPB;
 
 
 import java.io.IOException;
 import java.io.IOException;
 
 
-import org.apache.hadoop.hdfs.protocol.proto.RefreshUserMappingsProtocolProtos.RefreshSuperUserGroupsConfigurationRequestProto;
-import org.apache.hadoop.hdfs.protocol.proto.RefreshUserMappingsProtocolProtos.RefreshSuperUserGroupsConfigurationResponseProto;
-import org.apache.hadoop.hdfs.protocol.proto.RefreshUserMappingsProtocolProtos.RefreshUserToGroupsMappingsRequestProto;
-import org.apache.hadoop.hdfs.protocol.proto.RefreshUserMappingsProtocolProtos.RefreshUserToGroupsMappingsResponseProto;
 import org.apache.hadoop.security.RefreshUserMappingsProtocol;
 import org.apache.hadoop.security.RefreshUserMappingsProtocol;
+import org.apache.hadoop.security.proto.RefreshUserMappingsProtocolProtos.RefreshSuperUserGroupsConfigurationRequestProto;
+import org.apache.hadoop.security.proto.RefreshUserMappingsProtocolProtos.RefreshSuperUserGroupsConfigurationResponseProto;
+import org.apache.hadoop.security.proto.RefreshUserMappingsProtocolProtos.RefreshUserToGroupsMappingsRequestProto;
+import org.apache.hadoop.security.proto.RefreshUserMappingsProtocolProtos.RefreshUserToGroupsMappingsResponseProto;
 
 
 import com.google.protobuf.RpcController;
 import com.google.protobuf.RpcController;
 import com.google.protobuf.ServiceException;
 import com.google.protobuf.ServiceException;

+ 2 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tools/GetUserMappingsProtocol.java

@@ -21,6 +21,7 @@ import java.io.IOException;
 
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.io.retry.Idempotent;
 
 
 /**
 /**
  * Protocol implemented by the Name Node and Job Tracker which maps users to
  * Protocol implemented by the Name Node and Job Tracker which maps users to
@@ -41,5 +42,6 @@ public interface GetUserMappingsProtocol {
    * @return The set of groups the user belongs to.
    * @return The set of groups the user belongs to.
    * @throws IOException
    * @throws IOException
    */
    */
+  @Idempotent
   public String[] getGroupsForUser(String user) throws IOException;
   public String[] getGroupsForUser(String user) throws IOException;
 }
 }

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/GetUserMappingsProtocolClientSideTranslatorPB.java → hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tools/protocolPB/GetUserMappingsProtocolClientSideTranslatorPB.java

@@ -16,17 +16,17 @@
  * limitations under the License.
  * limitations under the License.
  */
  */
 
 
-package org.apache.hadoop.hdfs.protocolPB;
+package org.apache.hadoop.tools.protocolPB;
 
 
 import java.io.Closeable;
 import java.io.Closeable;
 import java.io.IOException;
 import java.io.IOException;
-import org.apache.hadoop.hdfs.protocol.proto.GetUserMappingsProtocolProtos.GetGroupsForUserRequestProto;
-import org.apache.hadoop.hdfs.protocol.proto.GetUserMappingsProtocolProtos.GetGroupsForUserResponseProto;
 import org.apache.hadoop.ipc.ProtobufHelper;
 import org.apache.hadoop.ipc.ProtobufHelper;
 import org.apache.hadoop.ipc.ProtocolMetaInterface;
 import org.apache.hadoop.ipc.ProtocolMetaInterface;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RpcClientUtil;
 import org.apache.hadoop.ipc.RpcClientUtil;
 import org.apache.hadoop.tools.GetUserMappingsProtocol;
 import org.apache.hadoop.tools.GetUserMappingsProtocol;
+import org.apache.hadoop.tools.proto.GetUserMappingsProtocolProtos.GetGroupsForUserRequestProto;
+import org.apache.hadoop.tools.proto.GetUserMappingsProtocolProtos.GetGroupsForUserResponseProto;
 
 
 import com.google.protobuf.RpcController;
 import com.google.protobuf.RpcController;
 import com.google.protobuf.ServiceException;
 import com.google.protobuf.ServiceException;

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/GetUserMappingsProtocolPB.java → hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tools/protocolPB/GetUserMappingsProtocolPB.java

@@ -16,14 +16,14 @@
  * limitations under the License.
  * limitations under the License.
  */
  */
 
 
-package org.apache.hadoop.hdfs.protocolPB;
+package org.apache.hadoop.tools.protocolPB;
 
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
-import org.apache.hadoop.hdfs.protocol.proto.GetUserMappingsProtocolProtos.GetUserMappingsProtocolService;
 import org.apache.hadoop.ipc.ProtocolInfo;
 import org.apache.hadoop.ipc.ProtocolInfo;
 import org.apache.hadoop.security.KerberosInfo;
 import org.apache.hadoop.security.KerberosInfo;
+import org.apache.hadoop.tools.proto.GetUserMappingsProtocolProtos.GetUserMappingsProtocolService;
 
 
 @KerberosInfo(
 @KerberosInfo(
     serverPrincipal=CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY)
     serverPrincipal=CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY)

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/GetUserMappingsProtocolServerSideTranslatorPB.java → hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tools/protocolPB/GetUserMappingsProtocolServerSideTranslatorPB.java

@@ -16,13 +16,13 @@
  * limitations under the License.
  * limitations under the License.
  */
  */
 
 
-package org.apache.hadoop.hdfs.protocolPB;
+package org.apache.hadoop.tools.protocolPB;
 
 
 import java.io.IOException;
 import java.io.IOException;
 
 
-import org.apache.hadoop.hdfs.protocol.proto.GetUserMappingsProtocolProtos.GetGroupsForUserRequestProto;
-import org.apache.hadoop.hdfs.protocol.proto.GetUserMappingsProtocolProtos.GetGroupsForUserResponseProto;
 import org.apache.hadoop.tools.GetUserMappingsProtocol;
 import org.apache.hadoop.tools.GetUserMappingsProtocol;
+import org.apache.hadoop.tools.proto.GetUserMappingsProtocolProtos.GetGroupsForUserRequestProto;
+import org.apache.hadoop.tools.proto.GetUserMappingsProtocolProtos.GetGroupsForUserResponseProto;
 
 
 import com.google.protobuf.RpcController;
 import com.google.protobuf.RpcController;
 import com.google.protobuf.ServiceException;
 import com.google.protobuf.ServiceException;

+ 5 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/GSet.java → hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GSet.java

@@ -15,8 +15,10 @@
  * See the License for the specific language governing permissions and
  * See the License for the specific language governing permissions and
  * limitations under the License.
  * limitations under the License.
  */
  */
-package org.apache.hadoop.hdfs.util;
+package org.apache.hadoop.util;
 
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 
 
 /**
 /**
@@ -31,6 +33,8 @@ import org.apache.hadoop.classification.InterfaceAudience;
  */
  */
 @InterfaceAudience.Private
 @InterfaceAudience.Private
 public interface GSet<K, E extends K> extends Iterable<E> {
 public interface GSet<K, E extends K> extends Iterable<E> {
+  static final Log LOG = LogFactory.getLog(GSet.class);
+
   /**
   /**
    * @return The size of this set.
    * @return The size of this set.
    */
    */

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/GSetByHashMap.java → hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GSetByHashMap.java

@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * See the License for the specific language governing permissions and
  * limitations under the License.
  * limitations under the License.
  */
  */
-package org.apache.hadoop.hdfs.util;
+package org.apache.hadoop.util;
 
 
 import java.util.HashMap;
 import java.util.HashMap;
 import java.util.Iterator;
 import java.util.Iterator;

+ 238 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LightWeightCache.java

@@ -0,0 +1,238 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.util;
+
+import java.util.Comparator;
+import java.util.PriorityQueue;
+
+import org.apache.hadoop.HadoopIllegalArgumentException;
+import org.apache.hadoop.classification.InterfaceAudience;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+
+/**
+ * A low memory footprint Cache which extends {@link LightWeightGSet}.
+ * An entry in the cache is expired if
+ * (1) it is added to the cache longer than the creation-expiration period, and
+ * (2) it is not accessed for the access-expiration period.
+ * When an entry is expired, it may be evicted from the cache.
+ * When the size limit of the cache is set, the cache will evict the entries
+ * with earliest expiration time, even if they are not expired.
+ * 
+ * It is guaranteed that number of entries in the cache is less than or equal
+ * to the size limit.  However, It is not guaranteed that expired entries are
+ * evicted from the cache. An expired entry may possibly be accessed after its
+ * expiration time. In such case, the expiration time may be updated.
+ *
+ * This class does not support null entry.
+ *
+ * This class is not thread safe.
+ *
+ * @param <K> Key type for looking up the entries
+ * @param <E> Entry type, which must be
+ *       (1) a subclass of K, and
+ *       (2) implementing {@link Entry} interface, and
+ */
+@InterfaceAudience.Private
+public class LightWeightCache<K, E extends K> extends LightWeightGSet<K, E> {
+  /** Limit the number of entries in each eviction. */
+  private static final int EVICTION_LIMIT = 1 << 16;
+
+  /**
+   * Entries of {@link LightWeightCache}.
+   */
+  public static interface Entry extends LinkedElement {
+    /** Set the expiration time. */
+    public void setExpirationTime(long timeNano);
+
+    /** Get the expiration time. */
+    public long getExpirationTime();
+  }
+
+  /** Comparator for sorting entries by expiration time in ascending order. */
+  private static final Comparator<Entry> expirationTimeComparator
+      = new Comparator<Entry>() {
+    @Override
+    public int compare(Entry left, Entry right) {
+      final long l = left.getExpirationTime();
+      final long r = right.getExpirationTime();
+      return l > r? 1: l < r? -1: 0;
+    }
+  };
+
+  /** A clock for measuring time so that it can be mocked in unit tests. */
+  static class Clock {
+    /** @return the current time. */
+    long currentTime() {
+      return System.nanoTime();
+    }
+  }
+  
+  private static int updateRecommendedLength(int recommendedLength,
+      int sizeLimit) {
+    return sizeLimit > 0 && sizeLimit < recommendedLength?
+        (sizeLimit/4*3) // 0.75 load factor
+        : recommendedLength;
+  }
+
+  /*
+   * The memory footprint for java.util.PriorityQueue is low but the
+   * remove(Object) method runs in linear time. We may improve it by using a
+   * balanced tree. However, we do not yet have a low memory footprint balanced
+   * tree implementation.
+   */
+  private final PriorityQueue<Entry> queue;
+  private final long creationExpirationPeriod;
+  private final long accessExpirationPeriod;
+  private final int sizeLimit;
+  private final Clock clock;
+
+  /**
+   * @param recommendedLength Recommended size of the internal array.
+   * @param sizeLimit the limit of the size of the cache.
+   *            The limit is disabled if it is <= 0.
+   * @param creationExpirationPeriod the time period C > 0 in nanoseconds that
+   *            the creation of an entry is expired if it is added to the cache
+   *            longer than C.
+   * @param accessExpirationPeriod the time period A >= 0 in nanoseconds that
+   *            the access of an entry is expired if it is not accessed
+   *            longer than A. 
+   */
+  public LightWeightCache(final int recommendedLength,
+      final int sizeLimit,
+      final long creationExpirationPeriod,
+      final long accessExpirationPeriod) {
+    this(recommendedLength, sizeLimit,
+        creationExpirationPeriod, accessExpirationPeriod, new Clock());
+  }
+
+  @VisibleForTesting
+  LightWeightCache(final int recommendedLength,
+      final int sizeLimit,
+      final long creationExpirationPeriod,
+      final long accessExpirationPeriod,
+      final Clock clock) {
+    super(updateRecommendedLength(recommendedLength, sizeLimit));
+
+    this.sizeLimit = sizeLimit;
+
+    if (creationExpirationPeriod <= 0) {
+      throw new IllegalArgumentException("creationExpirationPeriod = "
+          + creationExpirationPeriod + " <= 0");
+    }
+    this.creationExpirationPeriod = creationExpirationPeriod;
+
+    if (accessExpirationPeriod < 0) {
+      throw new IllegalArgumentException("accessExpirationPeriod = "
+          + accessExpirationPeriod + " < 0");
+    }
+    this.accessExpirationPeriod = accessExpirationPeriod;
+
+    this.queue = new PriorityQueue<Entry>(
+        sizeLimit > 0? sizeLimit + 1: 1 << 10, expirationTimeComparator);
+    this.clock = clock;
+  }
+
+  void setExpirationTime(final Entry e, final long expirationPeriod) {
+    e.setExpirationTime(clock.currentTime() + expirationPeriod);
+  }
+
+  boolean isExpired(final Entry e, final long now) {
+    return now > e.getExpirationTime();
+  }
+
+  private E evict() {
+    @SuppressWarnings("unchecked")
+    final E polled = (E)queue.poll();
+    final E removed = super.remove(polled);
+    Preconditions.checkState(removed == polled);
+    return polled;
+  }
+
+  /** Evict expired entries. */
+  private void evictExpiredEntries() {
+    final long now = clock.currentTime();
+    for(int i = 0; i < EVICTION_LIMIT; i++) {
+      final Entry peeked = queue.peek();
+      if (peeked == null || !isExpired(peeked, now)) {
+        return;
+      }
+
+      final E evicted = evict();
+      Preconditions.checkState(evicted == peeked);
+    }
+  }
+
+  /** Evict entries in order to enforce the size limit of the cache. */
+  private void evictEntries() {
+    if (sizeLimit > 0) {
+      for(int i = size(); i > sizeLimit; i--) {
+        evict();
+      }
+    }
+  }
+  
+  @Override
+  public E get(K key) {
+    final E entry = super.get(key);
+    if (entry != null) {
+      if (accessExpirationPeriod > 0) {
+        // update expiration time
+        final Entry existing = (Entry)entry;
+        Preconditions.checkState(queue.remove(existing));
+        setExpirationTime(existing, accessExpirationPeriod);
+        queue.offer(existing);
+      }
+    }
+    return entry;
+  }
+
+  @Override
+  public E put(final E entry) {
+    if (!(entry instanceof Entry)) {
+      throw new HadoopIllegalArgumentException(
+          "!(entry instanceof Entry), entry.getClass()=" + entry.getClass());
+    }
+
+    evictExpiredEntries();
+
+    final E existing = super.put(entry);
+    if (existing != null) {
+      queue.remove(existing);
+    }
+
+    final Entry e = (Entry)entry;
+    setExpirationTime(e, creationExpirationPeriod);
+    queue.offer(e);
+    
+    evictEntries();
+    return existing;
+  }
+
+  @Override
+  public E remove(K key) {
+    evictExpiredEntries();
+
+    final E removed = super.remove(key);
+    if (removed != null) {
+      Preconditions.checkState(queue.remove(removed));
+    }
+    return removed;
+  }
+}

+ 2 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LightWeightGSet.java → hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LightWeightGSet.java

@@ -15,17 +15,15 @@
  * See the License for the specific language governing permissions and
  * See the License for the specific language governing permissions and
  * limitations under the License.
  * limitations under the License.
  */
  */
-package org.apache.hadoop.hdfs.util;
+package org.apache.hadoop.util;
 
 
 import java.io.PrintStream;
 import java.io.PrintStream;
 import java.util.ConcurrentModificationException;
 import java.util.ConcurrentModificationException;
 import java.util.Iterator;
 import java.util.Iterator;
 
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.StringUtils;
-import org.apache.hadoop.HadoopIllegalArgumentException;
 
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.annotations.VisibleForTesting;
 
 
@@ -59,7 +57,6 @@ public class LightWeightGSet<K, E extends K> implements GSet<K, E> {
     public LinkedElement getNext();
     public LinkedElement getNext();
   }
   }
 
 
-  public static final Log LOG = LogFactory.getLog(GSet.class);
   static final int MAX_ARRAY_LENGTH = 1 << 30; //prevent int overflow problem
   static final int MAX_ARRAY_LENGTH = 1 << 30; //prevent int overflow problem
   static final int MIN_ARRAY_LENGTH = 1;
   static final int MIN_ARRAY_LENGTH = 1;
 
 

+ 6 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ProtoUtil.java

@@ -28,6 +28,8 @@ import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.*;
 import org.apache.hadoop.security.SaslRpcServer.AuthMethod;
 import org.apache.hadoop.security.SaslRpcServer.AuthMethod;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 
 
+import com.google.protobuf.ByteString;
+
 public abstract class ProtoUtil {
 public abstract class ProtoUtil {
 
 
   /**
   /**
@@ -158,9 +160,11 @@ public abstract class ProtoUtil {
   }
   }
  
  
   public static RpcRequestHeaderProto makeRpcRequestHeader(RPC.RpcKind rpcKind,
   public static RpcRequestHeaderProto makeRpcRequestHeader(RPC.RpcKind rpcKind,
-      RpcRequestHeaderProto.OperationProto operation, int callId) {
+      RpcRequestHeaderProto.OperationProto operation, int callId,
+      int retryCount, byte[] uuid) {
     RpcRequestHeaderProto.Builder result = RpcRequestHeaderProto.newBuilder();
     RpcRequestHeaderProto.Builder result = RpcRequestHeaderProto.newBuilder();
-    result.setRpcKind(convert(rpcKind)).setRpcOp(operation).setCallId(callId);
+    result.setRpcKind(convert(rpcKind)).setRpcOp(operation).setCallId(callId)
+        .setRetryCount(retryCount).setClientId(ByteString.copyFrom(uuid));
     return result.build();
     return result.build();
   }
   }
 }
 }

+ 35 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ReflectionUtils.java

@@ -25,7 +25,10 @@ import java.lang.management.ManagementFactory;
 import java.lang.management.ThreadInfo;
 import java.lang.management.ThreadInfo;
 import java.lang.management.ThreadMXBean;
 import java.lang.management.ThreadMXBean;
 import java.lang.reflect.Constructor;
 import java.lang.reflect.Constructor;
+import java.lang.reflect.Field;
 import java.lang.reflect.Method;
 import java.lang.reflect.Method;
+import java.util.ArrayList;
+import java.util.List;
 import java.util.Map;
 import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentHashMap;
 
 
@@ -302,4 +305,36 @@ public class ReflectionUtils {
     buffer.moveData();
     buffer.moveData();
     dst.readFields(buffer.inBuffer);
     dst.readFields(buffer.inBuffer);
   }
   }
+  
+  /**
+   * Gets all the declared fields of a class including fields declared in
+   * superclasses.
+   */
+  public static List<Field> getDeclaredFieldsIncludingInherited(Class<?> clazz) {
+    List<Field> fields = new ArrayList<Field>();
+    while (clazz != null) {
+      for (Field field : clazz.getDeclaredFields()) {
+        fields.add(field);
+      }
+      clazz = clazz.getSuperclass();
+    }
+    
+    return fields;
+  }
+  
+  /**
+   * Gets all the declared methods of a class including methods declared in
+   * superclasses.
+   */
+  public static List<Method> getDeclaredMethodsIncludingInherited(Class<?> clazz) {
+    List<Method> methods = new ArrayList<Method>();
+    while (clazz != null) {
+      for (Method method : clazz.getDeclaredMethods()) {
+        methods.add(method);
+      }
+      clazz = clazz.getSuperclass();
+    }
+    
+    return methods;
+  }
 }
 }

+ 12 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java

@@ -40,7 +40,6 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.util.Shell;
 
 
 import com.google.common.net.InetAddresses;
 import com.google.common.net.InetAddresses;
 
 
@@ -894,4 +893,16 @@ public class StringUtils {
     matcher.appendTail(sb);
     matcher.appendTail(sb);
     return sb.toString();
     return sb.toString();
   }
   }
+  
+  /**
+   * Get stack trace for a given thread.
+   */
+  public static String getStackTrace(Thread t) {
+    final StackTraceElement[] stackTrace = t.getStackTrace();
+    StringBuilder str = new StringBuilder();
+    for (StackTraceElement e : stackTrace) {
+      str.append(e.toString() + "\n");
+    }
+    return str.toString();
+  }
 }
 }

+ 32 - 0
hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/NativeCodeLoader.c

@@ -38,9 +38,41 @@ JNIEXPORT jboolean JNICALL Java_org_apache_hadoop_util_NativeCodeLoader_buildSup
 JNIEXPORT jstring JNICALL Java_org_apache_hadoop_util_NativeCodeLoader_getLibraryName
 JNIEXPORT jstring JNICALL Java_org_apache_hadoop_util_NativeCodeLoader_getLibraryName
   (JNIEnv *env, jclass clazz)
   (JNIEnv *env, jclass clazz)
 {
 {
+#ifdef UNIX
   Dl_info dl_info;
   Dl_info dl_info;
   int ret = dladdr(
   int ret = dladdr(
       Java_org_apache_hadoop_util_NativeCodeLoader_getLibraryName,
       Java_org_apache_hadoop_util_NativeCodeLoader_getLibraryName,
       &dl_info);
       &dl_info);
   return (*env)->NewStringUTF(env, ret==0 ? "Unavailable" : dl_info.dli_fname);
   return (*env)->NewStringUTF(env, ret==0 ? "Unavailable" : dl_info.dli_fname);
+#endif
+
+#ifdef WINDOWS
+  SIZE_T ret = 0;
+  DWORD size = MAX_PATH;
+  LPWSTR filename = NULL;
+  HMODULE mod = NULL;
+  DWORD err = ERROR_SUCCESS;
+
+  MEMORY_BASIC_INFORMATION mbi;
+  ret = VirtualQuery(Java_org_apache_hadoop_util_NativeCodeLoader_getLibraryName,
+    &mbi, sizeof(mbi));
+  if (ret == 0) goto cleanup;
+  mod = mbi.AllocationBase;
+
+  do {
+    filename = (LPWSTR) realloc(filename, size * sizeof(WCHAR));
+    if (filename == NULL) goto cleanup;
+    GetModuleFileName(mod, filename, size);
+    size <<= 1;
+    err = GetLastError();
+  } while (err == ERROR_INSUFFICIENT_BUFFER);
+  
+  if (err != ERROR_SUCCESS) goto cleanup;
+
+  return (*env)->NewString(env, filename, (jsize) wcslen(filename));
+
+cleanup:
+  if (filename != NULL) free(filename);
+  return (*env)->NewStringUTF(env, "Unavailable");
+#endif
 }
 }

+ 6 - 6
hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/bulk_crc32.c

@@ -427,7 +427,7 @@ static void pipelined_crc32c(uint32_t *crc1, uint32_t *crc2, uint32_t *crc3, con
         "crc32q (%7,%6,1), %1;\n\t"
         "crc32q (%7,%6,1), %1;\n\t"
         "crc32q (%7,%6,2), %2;\n\t"
         "crc32q (%7,%6,2), %2;\n\t"
          : "=r"(c1), "=r"(c2), "=r"(c3)
          : "=r"(c1), "=r"(c2), "=r"(c3)
-         : "r"(c1), "r"(c2), "r"(c3), "r"(block_size), "r"(data)
+         : "0"(c1), "1"(c2), "2"(c3), "r"(block_size), "r"(data)
         );
         );
         data++;
         data++;
         counter--;
         counter--;
@@ -443,7 +443,7 @@ static void pipelined_crc32c(uint32_t *crc1, uint32_t *crc2, uint32_t *crc3, con
         "crc32b (%7,%6,1), %1;\n\t"
         "crc32b (%7,%6,1), %1;\n\t"
         "crc32b (%7,%6,2), %2;\n\t"
         "crc32b (%7,%6,2), %2;\n\t"
          : "=r"(c1), "=r"(c2), "=r"(c3)
          : "=r"(c1), "=r"(c2), "=r"(c3)
-         : "r"(c1), "r"(c2), "r"(c3), "r"(block_size), "r"(bdata)
+         : "0"(c1), "1"(c2), "2"(c3), "r"(block_size), "r"(bdata)
         );
         );
         bdata++;
         bdata++;
         remainder--;
         remainder--;
@@ -456,7 +456,7 @@ static void pipelined_crc32c(uint32_t *crc1, uint32_t *crc2, uint32_t *crc3, con
         "crc32q (%5), %0;\n\t"
         "crc32q (%5), %0;\n\t"
         "crc32q (%5,%4,1), %1;\n\t"
         "crc32q (%5,%4,1), %1;\n\t"
          : "=r"(c1), "=r"(c2) 
          : "=r"(c1), "=r"(c2) 
-         : "r"(c1), "r"(c2), "r"(block_size), "r"(data)
+         : "0"(c1), "1"(c2), "r"(block_size), "r"(data)
         );
         );
         data++;
         data++;
         counter--;
         counter--;
@@ -468,7 +468,7 @@ static void pipelined_crc32c(uint32_t *crc1, uint32_t *crc2, uint32_t *crc3, con
         "crc32b (%5), %0;\n\t"
         "crc32b (%5), %0;\n\t"
         "crc32b (%5,%4,1), %1;\n\t"
         "crc32b (%5,%4,1), %1;\n\t"
          : "=r"(c1), "=r"(c2) 
          : "=r"(c1), "=r"(c2) 
-         : "r"(c1), "r"(c2), "r"(block_size), "r"(bdata)
+         : "0"(c1), "1"(c2), "r"(block_size), "r"(bdata)
         );
         );
         bdata++;
         bdata++;
         remainder--;
         remainder--;
@@ -480,7 +480,7 @@ static void pipelined_crc32c(uint32_t *crc1, uint32_t *crc2, uint32_t *crc3, con
         __asm__ __volatile__(
         __asm__ __volatile__(
         "crc32q (%2), %0;\n\t"
         "crc32q (%2), %0;\n\t"
          : "=r"(c1) 
          : "=r"(c1) 
-         : "r"(c1), "r"(data)
+         : "0"(c1), "r"(data)
         );
         );
         data++;
         data++;
         counter--;
         counter--;
@@ -490,7 +490,7 @@ static void pipelined_crc32c(uint32_t *crc1, uint32_t *crc2, uint32_t *crc3, con
         __asm__ __volatile__(
         __asm__ __volatile__(
         "crc32b (%2), %0;\n\t"
         "crc32b (%2), %0;\n\t"
          : "=r"(c1) 
          : "=r"(c1) 
-         : "r"(c1), "r"(bdata)
+         : "0"(c1), "r"(bdata)
         );
         );
         bdata++;
         bdata++;
         remainder--;
         remainder--;

+ 3 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/proto/GetUserMappingsProtocol.proto → hadoop-common-project/hadoop-common/src/main/proto/GetUserMappingsProtocol.proto

@@ -22,11 +22,11 @@
  * for what changes are allowed for a *stable* .proto interface.
  * for what changes are allowed for a *stable* .proto interface.
  */
  */
 
 
-option java_package = "org.apache.hadoop.hdfs.protocol.proto";
+option java_package = "org.apache.hadoop.tools.proto";
 option java_outer_classname = "GetUserMappingsProtocolProtos";
 option java_outer_classname = "GetUserMappingsProtocolProtos";
 option java_generic_services = true;
 option java_generic_services = true;
 option java_generate_equals_and_hash = true;
 option java_generate_equals_and_hash = true;
-package hadoop.hdfs;
+package hadoop.common;
 
 
 /**
 /**
  *  Get groups for user request.
  *  Get groups for user request.
@@ -44,8 +44,7 @@ message GetGroupsForUserResponseProto {
 
 
 
 
 /**
 /**
- * Protocol implemented by the Name Node and Job Tracker which maps users to
- * groups.
+ * Protocol which maps users to groups.
  */
  */
 service GetUserMappingsProtocolService {
 service GetUserMappingsProtocolService {
   /**
   /**

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/proto/RefreshAuthorizationPolicyProtocol.proto → hadoop-common-project/hadoop-common/src/main/proto/RefreshAuthorizationPolicyProtocol.proto

@@ -22,11 +22,11 @@
  * for what changes are allowed for a *stable* .proto interface.
  * for what changes are allowed for a *stable* .proto interface.
  */
  */
 
 
-option java_package = "org.apache.hadoop.hdfs.protocol.proto";
+option java_package = "org.apache.hadoop.security.proto";
 option java_outer_classname = "RefreshAuthorizationPolicyProtocolProtos";
 option java_outer_classname = "RefreshAuthorizationPolicyProtocolProtos";
 option java_generic_services = true;
 option java_generic_services = true;
 option java_generate_equals_and_hash = true;
 option java_generate_equals_and_hash = true;
-package hadoop.hdfs;
+package hadoop.common;
 
 
 /**
 /**
  *  Refresh service acl request.
  *  Refresh service acl request.

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/proto/RefreshUserMappingsProtocol.proto → hadoop-common-project/hadoop-common/src/main/proto/RefreshUserMappingsProtocol.proto

@@ -22,11 +22,11 @@
  * for what changes are allowed for a *stable* .proto interface.
  * for what changes are allowed for a *stable* .proto interface.
  */
  */
 
 
-option java_package = "org.apache.hadoop.hdfs.protocol.proto";
+option java_package = "org.apache.hadoop.security.proto";
 option java_outer_classname = "RefreshUserMappingsProtocolProtos";
 option java_outer_classname = "RefreshUserMappingsProtocolProtos";
 option java_generic_services = true;
 option java_generic_services = true;
 option java_generate_equals_and_hash = true;
 option java_generate_equals_and_hash = true;
-package hadoop.hdfs;
+package hadoop.common;
 
 
 /**
 /**
  *  Refresh user to group mappings request.
  *  Refresh user to group mappings request.

+ 8 - 2
hadoop-common-project/hadoop-common/src/main/proto/RpcHeader.proto

@@ -62,7 +62,11 @@ message RpcRequestHeaderProto { // the header for the RpcRequest
 
 
   optional RpcKindProto rpcKind = 1;
   optional RpcKindProto rpcKind = 1;
   optional OperationProto rpcOp = 2;
   optional OperationProto rpcOp = 2;
-  required uint32 callId = 3; // each rpc has a callId that is also used in response
+  required uint32 callId = 3; // a sequence number that is sent back in response
+  required bytes clientId = 4; // Globally unique client ID
+  // clientId + callId uniquely identifies a request
+  // retry count, 1 means this is the first retry
+  optional sint32 retryCount = 5 [default = -1];
 }
 }
 
 
 
 
@@ -126,6 +130,8 @@ message RpcResponseHeaderProto {
   optional string exceptionClassName = 4;  // if request fails
   optional string exceptionClassName = 4;  // if request fails
   optional string errorMsg = 5;  // if request fails, often contains strack trace
   optional string errorMsg = 5;  // if request fails, often contains strack trace
   optional RpcErrorCodeProto errorDetail = 6; // in case of error
   optional RpcErrorCodeProto errorDetail = 6; // in case of error
+  optional bytes clientId = 7; // Globally unique client ID
+  optional sint32 retryCount = 8 [default = -1];
 }
 }
 
 
 message RpcSaslProto {
 message RpcSaslProto {
@@ -149,4 +155,4 @@ message RpcSaslProto {
   required SaslState state = 2;
   required SaslState state = 2;
   optional bytes token     = 3;
   optional bytes token     = 3;
   repeated SaslAuth auths  = 4;
   repeated SaslAuth auths  = 4;
-}
+}

+ 1 - 1
hadoop-common-project/hadoop-common/src/site/apt/Compatibility.apt.vm

@@ -517,7 +517,7 @@ hand-in-hand to address this.
   * Annotations for interfaces as per interface classification
   * Annotations for interfaces as per interface classification
     schedule -
     schedule -
     {{{https://issues.apache.org/jira/browse/HADOOP-7391}HADOOP-7391}}
     {{{https://issues.apache.org/jira/browse/HADOOP-7391}HADOOP-7391}}
-    {{{InterfaceClassification.html}Hadoop Interface Classification}}
+    {{{./InterfaceClassification.html}Hadoop Interface Classification}}
 
 
   * Compatibility for Hadoop 1.x releases -
   * Compatibility for Hadoop 1.x releases -
     {{{https://issues.apache.org/jira/browse/HADOOP-5071}HADOOP-5071}}
     {{{https://issues.apache.org/jira/browse/HADOOP-5071}HADOOP-5071}}

+ 1 - 1
hadoop-common-project/hadoop-common/src/site/apt/SingleNodeSetup.apt.vm

@@ -221,7 +221,7 @@ Single Node Setup
 * Fully-Distributed Operation
 * Fully-Distributed Operation
 
 
    For information on setting up fully-distributed, non-trivial clusters
    For information on setting up fully-distributed, non-trivial clusters
-   see {{{Cluster Setup}}}.
+   see {{{./ClusterSetup.html}Cluster Setup}}.
 
 
    Java and JNI are trademarks or registered trademarks of Sun
    Java and JNI are trademarks or registered trademarks of Sun
    Microsystems, Inc. in the United States and other countries.
    Microsystems, Inc. in the United States and other countries.

+ 140 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FSTestWrapper.java

@@ -0,0 +1,140 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs;
+
+import java.io.IOException;
+
+import org.apache.commons.lang.RandomStringUtils;
+import org.apache.hadoop.fs.Options.CreateOpts;
+
+/**
+ * Abstraction of filesystem functionality with additional helper methods
+ * commonly used in tests. This allows generic tests to be written which apply
+ * to the two filesystem abstractions in Hadoop: {@link FileSystem} and
+ * {@link FileContext}.
+ */
+public abstract class FSTestWrapper implements FSWrapper {
+
+  //
+  // Test helper methods taken from FileContextTestHelper
+  //
+
+  protected static final int DEFAULT_BLOCK_SIZE = 1024;
+  protected static final int DEFAULT_NUM_BLOCKS = 2;
+
+  protected String testRootDir = null;
+  protected String absTestRootDir = null;
+
+  public FSTestWrapper(String testRootDir) {
+    // Use default test dir if not provided
+    if (testRootDir == null || testRootDir.isEmpty()) {
+      testRootDir = System.getProperty("test.build.data", "build/test/data");
+    }
+    // salt test dir with some random digits for safe parallel runs
+    this.testRootDir = testRootDir + "/"
+        + RandomStringUtils.randomAlphanumeric(10);
+  }
+
+  public static byte[] getFileData(int numOfBlocks, long blockSize) {
+    byte[] data = new byte[(int) (numOfBlocks * blockSize)];
+    for (int i = 0; i < data.length; i++) {
+      data[i] = (byte) (i % 10);
+    }
+    return data;
+  }
+
+  public Path getTestRootPath() {
+    return makeQualified(new Path(testRootDir));
+  }
+
+  public Path getTestRootPath(String pathString) {
+    return makeQualified(new Path(testRootDir, pathString));
+  }
+
+  // the getAbsolutexxx method is needed because the root test dir
+  // can be messed up by changing the working dir.
+
+  public String getAbsoluteTestRootDir() throws IOException {
+    if (absTestRootDir == null) {
+      if (testRootDir.startsWith("/")) {
+        absTestRootDir = testRootDir;
+      } else {
+        absTestRootDir = getWorkingDirectory().toString() + "/"
+            + testRootDir;
+      }
+    }
+    return absTestRootDir;
+  }
+
+  public Path getAbsoluteTestRootPath() throws IOException {
+    return makeQualified(new Path(getAbsoluteTestRootDir()));
+  }
+
+  abstract public FSTestWrapper getLocalFSWrapper()
+      throws UnsupportedFileSystemException, IOException;
+
+  abstract public Path getDefaultWorkingDirectory() throws IOException;
+
+  /*
+   * Create files with numBlocks blocks each with block size blockSize.
+   */
+  abstract public long createFile(Path path, int numBlocks,
+      CreateOpts... options) throws IOException;
+
+  abstract public long createFile(Path path, int numBlocks, int blockSize)
+      throws IOException;
+
+  abstract public long createFile(Path path) throws IOException;
+
+  abstract public long createFile(String name) throws IOException;
+
+  abstract public long createFileNonRecursive(String name) throws IOException;
+
+  abstract public long createFileNonRecursive(Path path) throws IOException;
+
+  abstract public void appendToFile(Path path, int numBlocks,
+      CreateOpts... options) throws IOException;
+
+  abstract public boolean exists(Path p) throws IOException;
+
+  abstract public boolean isFile(Path p) throws IOException;
+
+  abstract public boolean isDir(Path p) throws IOException;
+
+  abstract public boolean isSymlink(Path p) throws IOException;
+
+  abstract public void writeFile(Path path, byte b[]) throws IOException;
+
+  abstract public byte[] readFile(Path path, int len) throws IOException;
+
+  abstract public FileStatus containsPath(Path path, FileStatus[] dirList)
+      throws IOException;
+
+  abstract public FileStatus containsPath(String path, FileStatus[] dirList)
+      throws IOException;
+
+  enum fileType {
+    isDir, isFile, isSymlink
+  };
+
+  abstract public void checkFileStatus(String path, fileType expectedType)
+      throws IOException;
+
+  abstract public void checkFileLinkStatus(String path, fileType expectedType)
+      throws IOException;
+}

+ 112 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FSWrapper.java

@@ -0,0 +1,112 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.EnumSet;
+
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.security.AccessControlException;
+
+/**
+ * Abstraction of filesystem operations that is essentially an interface
+ * extracted from {@link FileContext}.
+ */
+public interface FSWrapper {
+
+  abstract public void setWorkingDirectory(final Path newWDir)
+      throws IOException;
+
+  abstract public Path getWorkingDirectory();
+
+  abstract public Path makeQualified(final Path path);
+
+  abstract public FSDataOutputStream create(final Path f,
+      final EnumSet<CreateFlag> createFlag, Options.CreateOpts... opts)
+      throws AccessControlException, FileAlreadyExistsException,
+      FileNotFoundException, ParentNotDirectoryException,
+      UnsupportedFileSystemException, IOException;
+
+  abstract public void mkdir(final Path dir, final FsPermission permission,
+      final boolean createParent) throws AccessControlException,
+      FileAlreadyExistsException, FileNotFoundException,
+      ParentNotDirectoryException, UnsupportedFileSystemException, IOException;
+
+  abstract public boolean delete(final Path f, final boolean recursive)
+      throws AccessControlException, FileNotFoundException,
+      UnsupportedFileSystemException, IOException;
+
+  abstract public FSDataInputStream open(final Path f)
+      throws AccessControlException, FileNotFoundException,
+      UnsupportedFileSystemException, IOException;
+
+  abstract public boolean setReplication(final Path f, final short replication)
+      throws AccessControlException, FileNotFoundException,
+      IOException;
+
+  abstract public void rename(final Path src, final Path dst,
+      final Options.Rename... options) throws AccessControlException,
+      FileAlreadyExistsException, FileNotFoundException,
+      ParentNotDirectoryException, UnsupportedFileSystemException, IOException;
+
+  abstract public void setPermission(final Path f, final FsPermission permission)
+      throws AccessControlException, FileNotFoundException,
+      UnsupportedFileSystemException, IOException;
+
+  abstract public void setOwner(final Path f, final String username,
+      final String groupname) throws AccessControlException,
+      UnsupportedFileSystemException, FileNotFoundException,
+      IOException;
+
+  abstract public void setTimes(final Path f, final long mtime, final long atime)
+      throws AccessControlException, FileNotFoundException,
+      UnsupportedFileSystemException, IOException;
+
+  abstract public FileChecksum getFileChecksum(final Path f)
+      throws AccessControlException, FileNotFoundException, IOException;
+
+  abstract public FileStatus getFileStatus(final Path f)
+      throws AccessControlException, FileNotFoundException,
+      UnsupportedFileSystemException, IOException;
+
+  abstract public FileStatus getFileLinkStatus(final Path f)
+      throws AccessControlException, FileNotFoundException,
+      UnsupportedFileSystemException, IOException;
+
+  abstract public Path getLinkTarget(final Path f)
+      throws AccessControlException, FileNotFoundException,
+      UnsupportedFileSystemException, IOException;
+
+  abstract public BlockLocation[] getFileBlockLocations(final Path f,
+      final long start, final long len) throws AccessControlException,
+      FileNotFoundException, UnsupportedFileSystemException, IOException;
+
+  abstract public void createSymlink(final Path target, final Path link,
+      final boolean createParent) throws AccessControlException,
+      FileAlreadyExistsException, FileNotFoundException,
+      ParentNotDirectoryException, UnsupportedFileSystemException, IOException;
+
+  abstract public RemoteIterator<FileStatus> listStatusIterator(final Path f)
+      throws AccessControlException, FileNotFoundException,
+      UnsupportedFileSystemException, IOException;
+
+  abstract public FileStatus[] listStatus(final Path f)
+      throws AccessControlException, FileNotFoundException,
+      UnsupportedFileSystemException, IOException;
+}

+ 335 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextTestWrapper.java

@@ -0,0 +1,335 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs;
+
+import java.io.DataInputStream;
+import java.io.IOException;
+import java.io.FileNotFoundException;
+import java.util.EnumSet;
+
+import org.apache.hadoop.fs.Options.CreateOpts;
+import org.apache.hadoop.fs.Options.CreateOpts.BlockSize;
+import org.apache.hadoop.fs.Options.Rename;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.security.AccessControlException;
+import org.junit.Assert;
+
+/**
+ * Helper class for unit tests.
+ */
+public final class FileContextTestWrapper extends FSTestWrapper {
+
+  private final FileContext fc;
+
+  public FileContextTestWrapper(FileContext context) {
+    this(context, null);
+  }
+
+  public FileContextTestWrapper(FileContext context, String rootDir) {
+    super(rootDir);
+    this.fc = context;
+  }
+
+  public FSTestWrapper getLocalFSWrapper()
+      throws UnsupportedFileSystemException {
+    return new FileContextTestWrapper(FileContext.getLocalFSFileContext());
+  }
+
+  public Path getDefaultWorkingDirectory() throws IOException {
+    return getTestRootPath("/user/" + System.getProperty("user.name"))
+        .makeQualified(fc.getDefaultFileSystem().getUri(),
+            fc.getWorkingDirectory());
+  }
+
+  /*
+   * Create files with numBlocks blocks each with block size blockSize.
+   */
+  public long createFile(Path path, int numBlocks, CreateOpts... options)
+      throws IOException {
+    BlockSize blockSizeOpt =
+      (BlockSize) CreateOpts.getOpt(CreateOpts.BlockSize.class, options);
+    long blockSize = blockSizeOpt != null ? blockSizeOpt.getValue()
+        : DEFAULT_BLOCK_SIZE;
+    FSDataOutputStream out =
+      fc.create(path, EnumSet.of(CreateFlag.CREATE), options);
+    byte[] data = getFileData(numBlocks, blockSize);
+    out.write(data, 0, data.length);
+    out.close();
+    return data.length;
+  }
+
+  public long createFile(Path path, int numBlocks, int blockSize)
+      throws IOException {
+    return createFile(path, numBlocks, CreateOpts.blockSize(blockSize),
+        CreateOpts.createParent());
+  }
+
+  public long createFile(Path path) throws IOException {
+    return createFile(path, DEFAULT_NUM_BLOCKS, CreateOpts.createParent());
+  }
+
+  public long createFile(String name) throws IOException {
+    Path path = getTestRootPath(name);
+    return createFile(path);
+  }
+
+  public long createFileNonRecursive(String name) throws IOException {
+    Path path = getTestRootPath(name);
+    return createFileNonRecursive(path);
+  }
+
+  public long createFileNonRecursive(Path path) throws IOException {
+    return createFile(path, DEFAULT_NUM_BLOCKS, CreateOpts.donotCreateParent());
+  }
+
+  public void appendToFile(Path path, int numBlocks, CreateOpts... options)
+      throws IOException {
+    BlockSize blockSizeOpt =
+      (BlockSize) CreateOpts.getOpt(CreateOpts.BlockSize.class, options);
+    long blockSize = blockSizeOpt != null ? blockSizeOpt.getValue()
+        : DEFAULT_BLOCK_SIZE;
+    FSDataOutputStream out;
+    out = fc.create(path, EnumSet.of(CreateFlag.APPEND));
+    byte[] data = getFileData(numBlocks, blockSize);
+    out.write(data, 0, data.length);
+    out.close();
+  }
+
+  public boolean exists(Path p) throws IOException {
+    return fc.util().exists(p);
+  }
+
+  public boolean isFile(Path p) throws IOException {
+    try {
+      return fc.getFileStatus(p).isFile();
+    } catch (FileNotFoundException e) {
+      return false;
+    }
+  }
+
+  public boolean isDir(Path p) throws IOException {
+    try {
+      return fc.getFileStatus(p).isDirectory();
+    } catch (FileNotFoundException e) {
+      return false;
+    }
+  }
+
+  public boolean isSymlink(Path p) throws IOException {
+    try {
+      return fc.getFileLinkStatus(p).isSymlink();
+    } catch (FileNotFoundException e) {
+      return false;
+    }
+  }
+
+  public void writeFile(Path path, byte b[]) throws IOException {
+    FSDataOutputStream out =
+      fc.create(path,EnumSet.of(CreateFlag.CREATE), CreateOpts.createParent());
+    out.write(b);
+    out.close();
+  }
+
+  public byte[] readFile(Path path, int len) throws IOException {
+    DataInputStream dis = fc.open(path);
+    byte[] buffer = new byte[len];
+    IOUtils.readFully(dis, buffer, 0, len);
+    dis.close();
+    return buffer;
+  }
+
+  public FileStatus containsPath(Path path, FileStatus[] dirList)
+    throws IOException {
+    for(int i = 0; i < dirList.length; i ++) {
+      if (path.equals(dirList[i].getPath()))
+        return dirList[i];
+      }
+    return null;
+  }
+
+  public FileStatus containsPath(String path, FileStatus[] dirList)
+     throws IOException {
+    return containsPath(new Path(path), dirList);
+  }
+
+  public void checkFileStatus(String path, fileType expectedType)
+      throws IOException {
+    FileStatus s = fc.getFileStatus(new Path(path));
+    Assert.assertNotNull(s);
+    if (expectedType == fileType.isDir) {
+      Assert.assertTrue(s.isDirectory());
+    } else if (expectedType == fileType.isFile) {
+      Assert.assertTrue(s.isFile());
+    } else if (expectedType == fileType.isSymlink) {
+      Assert.assertTrue(s.isSymlink());
+    }
+    Assert.assertEquals(fc.makeQualified(new Path(path)), s.getPath());
+  }
+
+  public void checkFileLinkStatus(String path, fileType expectedType)
+      throws IOException {
+    FileStatus s = fc.getFileLinkStatus(new Path(path));
+    Assert.assertNotNull(s);
+    if (expectedType == fileType.isDir) {
+      Assert.assertTrue(s.isDirectory());
+    } else if (expectedType == fileType.isFile) {
+      Assert.assertTrue(s.isFile());
+    } else if (expectedType == fileType.isSymlink) {
+      Assert.assertTrue(s.isSymlink());
+    }
+    Assert.assertEquals(fc.makeQualified(new Path(path)), s.getPath());
+  }
+
+  //
+  // FileContext wrappers
+  //
+
+  @Override
+  public Path makeQualified(Path path) {
+    return fc.makeQualified(path);
+  }
+
+  @Override
+  public void mkdir(Path dir, FsPermission permission, boolean createParent)
+      throws AccessControlException, FileAlreadyExistsException,
+      FileNotFoundException, ParentNotDirectoryException,
+      UnsupportedFileSystemException, IOException {
+    fc.mkdir(dir, permission, createParent);
+  }
+
+  @Override
+  public boolean delete(Path f, boolean recursive)
+      throws AccessControlException, FileNotFoundException,
+      UnsupportedFileSystemException, IOException {
+    return fc.delete(f, recursive);
+  }
+
+  @Override
+  public FileStatus getFileLinkStatus(Path f) throws AccessControlException,
+      FileNotFoundException, UnsupportedFileSystemException, IOException {
+    return fc.getFileLinkStatus(f);
+  }
+
+  @Override
+  public void createSymlink(Path target, Path link, boolean createParent)
+      throws AccessControlException, FileAlreadyExistsException,
+      FileNotFoundException, ParentNotDirectoryException,
+      UnsupportedFileSystemException, IOException {
+    fc.createSymlink(target, link, createParent);
+  }
+
+  @Override
+  public void setWorkingDirectory(Path newWDir) throws IOException {
+    fc.setWorkingDirectory(newWDir);
+  }
+
+  @Override
+  public Path getWorkingDirectory() {
+    return fc.getWorkingDirectory();
+  }
+
+  @Override
+  public FileStatus getFileStatus(Path f) throws AccessControlException,
+      FileNotFoundException, UnsupportedFileSystemException, IOException {
+    return fc.getFileStatus(f);
+  }
+
+  @Override
+  public FSDataOutputStream create(Path f, EnumSet<CreateFlag> createFlag,
+      CreateOpts... opts) throws AccessControlException,
+      FileAlreadyExistsException, FileNotFoundException,
+      ParentNotDirectoryException, UnsupportedFileSystemException, IOException {
+    return fc.create(f, createFlag, opts);
+  }
+
+  @Override
+  public FSDataInputStream open(Path f) throws AccessControlException,
+      FileNotFoundException, UnsupportedFileSystemException, IOException {
+    return fc.open(f);
+  }
+
+  @Override
+  public boolean setReplication(final Path f, final short replication)
+      throws AccessControlException, FileNotFoundException,
+      IOException {
+    return fc.setReplication(f, replication);
+  }
+
+  @Override
+  public Path getLinkTarget(Path f) throws AccessControlException,
+      FileNotFoundException, UnsupportedFileSystemException, IOException {
+    return fc.getLinkTarget(f);
+  }
+
+  @Override
+  public void rename(Path src, Path dst, Rename... options)
+      throws AccessControlException, FileAlreadyExistsException,
+      FileNotFoundException, ParentNotDirectoryException,
+      UnsupportedFileSystemException, IOException {
+    fc.rename(src, dst, options);
+  }
+
+  @Override
+  public BlockLocation[] getFileBlockLocations(Path f, long start, long len)
+      throws AccessControlException, FileNotFoundException,
+      UnsupportedFileSystemException, IOException {
+    return fc.getFileBlockLocations(f, start, len);
+  }
+
+  @Override
+  public FileChecksum getFileChecksum(Path f) throws AccessControlException,
+      FileNotFoundException, IOException {
+    return fc.getFileChecksum(f);
+  }
+
+  @Override
+  public RemoteIterator<FileStatus> listStatusIterator(Path f)
+      throws AccessControlException, FileNotFoundException,
+      UnsupportedFileSystemException, IOException {
+    return fc.listStatus(f);
+  }
+
+  @Override
+  public void setPermission(final Path f, final FsPermission permission)
+      throws AccessControlException, FileNotFoundException,
+      UnsupportedFileSystemException, IOException {
+    fc.setPermission(f, permission);
+  }
+
+  @Override
+  public void setOwner(final Path f, final String username,
+      final String groupname) throws AccessControlException,
+      UnsupportedFileSystemException, FileNotFoundException,
+      IOException {
+    fc.setOwner(f, username, groupname);
+  }
+
+  @Override
+  public void setTimes(Path f, long mtime, long atime)
+      throws AccessControlException, FileNotFoundException,
+      UnsupportedFileSystemException, IOException {
+    fc.setTimes(f, mtime, atime);
+  }
+
+  @Override
+  public FileStatus[] listStatus(Path f) throws AccessControlException,
+      FileNotFoundException, UnsupportedFileSystemException, IOException {
+    return fc.util().listStatus(f);
+  }
+}

+ 401 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemTestWrapper.java

@@ -0,0 +1,401 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs;
+
+import java.io.DataInputStream;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.EnumSet;
+
+import org.apache.hadoop.fs.Options.CreateOpts;
+import org.apache.hadoop.fs.Options.CreateOpts.BlockSize;
+import org.apache.hadoop.fs.Options.Rename;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.util.Progressable;
+import org.junit.Assert;
+
+/**
+ * Helper class for unit tests.
+ */
+public final class FileSystemTestWrapper extends FSTestWrapper {
+
+  private final FileSystem fs;
+
+  public FileSystemTestWrapper(FileSystem fs) {
+    this(fs, null);
+  }
+
+  public FileSystemTestWrapper(FileSystem fs, String rootDir) {
+    super(rootDir);
+    this.fs = fs;
+  }
+
+  public FSTestWrapper getLocalFSWrapper()
+      throws IOException {
+    return new FileSystemTestWrapper(FileSystem.getLocal(fs.getConf()));
+  }
+
+  public Path getDefaultWorkingDirectory() throws IOException {
+    return getTestRootPath("/user/" + System.getProperty("user.name"))
+        .makeQualified(fs.getUri(),
+            fs.getWorkingDirectory());
+  }
+
+  /*
+   * Create files with numBlocks blocks each with block size blockSize.
+   */
+  public long createFile(Path path, int numBlocks, CreateOpts... options)
+      throws IOException {
+    BlockSize blockSizeOpt =
+      (BlockSize) CreateOpts.getOpt(CreateOpts.BlockSize.class, options);
+    long blockSize = blockSizeOpt != null ? blockSizeOpt.getValue()
+        : DEFAULT_BLOCK_SIZE;
+    FSDataOutputStream out =
+      create(path, EnumSet.of(CreateFlag.CREATE), options);
+    byte[] data = getFileData(numBlocks, blockSize);
+    out.write(data, 0, data.length);
+    out.close();
+    return data.length;
+  }
+
+  public long createFile(Path path, int numBlocks, int blockSize)
+      throws IOException {
+    return createFile(path, numBlocks, CreateOpts.blockSize(blockSize),
+        CreateOpts.createParent());
+  }
+
+  public long createFile(Path path) throws IOException {
+    return createFile(path, DEFAULT_NUM_BLOCKS, CreateOpts.createParent());
+  }
+
+  public long createFile(String name) throws IOException {
+    Path path = getTestRootPath(name);
+    return createFile(path);
+  }
+
+  public long createFileNonRecursive(String name) throws IOException {
+    Path path = getTestRootPath(name);
+    return createFileNonRecursive(path);
+  }
+
+  public long createFileNonRecursive(Path path) throws IOException {
+    return createFile(path, DEFAULT_NUM_BLOCKS, CreateOpts.donotCreateParent());
+  }
+
+  public void appendToFile(Path path, int numBlocks, CreateOpts... options)
+      throws IOException {
+    BlockSize blockSizeOpt =
+      (BlockSize) CreateOpts.getOpt(CreateOpts.BlockSize.class, options);
+    long blockSize = blockSizeOpt != null ? blockSizeOpt.getValue()
+        : DEFAULT_BLOCK_SIZE;
+    FSDataOutputStream out;
+    out = fs.append(path);
+    byte[] data = getFileData(numBlocks, blockSize);
+    out.write(data, 0, data.length);
+    out.close();
+  }
+
+  public boolean exists(Path p) throws IOException {
+    return fs.exists(p);
+  }
+
+  public boolean isFile(Path p) throws IOException {
+    try {
+      return fs.getFileStatus(p).isFile();
+    } catch (FileNotFoundException e) {
+      return false;
+    }
+  }
+
+  public boolean isDir(Path p) throws IOException {
+    try {
+      return fs.getFileStatus(p).isDirectory();
+    } catch (FileNotFoundException e) {
+      return false;
+    }
+  }
+
+  public boolean isSymlink(Path p) throws IOException {
+    try {
+      return fs.getFileLinkStatus(p).isSymlink();
+    } catch (FileNotFoundException e) {
+      return false;
+    }
+  }
+
+  public void writeFile(Path path, byte b[]) throws IOException {
+    FSDataOutputStream out =
+      create(path,EnumSet.of(CreateFlag.CREATE), CreateOpts.createParent());
+    out.write(b);
+    out.close();
+  }
+
+  public byte[] readFile(Path path, int len) throws IOException {
+    DataInputStream dis = fs.open(path);
+    byte[] buffer = new byte[len];
+    IOUtils.readFully(dis, buffer, 0, len);
+    dis.close();
+    return buffer;
+  }
+
+  public FileStatus containsPath(Path path, FileStatus[] dirList)
+    throws IOException {
+    for(int i = 0; i < dirList.length; i ++) {
+      if (path.equals(dirList[i].getPath()))
+        return dirList[i];
+      }
+    return null;
+  }
+
+  public FileStatus containsPath(String path, FileStatus[] dirList)
+     throws IOException {
+    return containsPath(new Path(path), dirList);
+  }
+
+  public void checkFileStatus(String path, fileType expectedType)
+      throws IOException {
+    FileStatus s = fs.getFileStatus(new Path(path));
+    Assert.assertNotNull(s);
+    if (expectedType == fileType.isDir) {
+      Assert.assertTrue(s.isDirectory());
+    } else if (expectedType == fileType.isFile) {
+      Assert.assertTrue(s.isFile());
+    } else if (expectedType == fileType.isSymlink) {
+      Assert.assertTrue(s.isSymlink());
+    }
+    Assert.assertEquals(fs.makeQualified(new Path(path)), s.getPath());
+  }
+
+  public void checkFileLinkStatus(String path, fileType expectedType)
+      throws IOException {
+    FileStatus s = fs.getFileLinkStatus(new Path(path));
+    Assert.assertNotNull(s);
+    if (expectedType == fileType.isDir) {
+      Assert.assertTrue(s.isDirectory());
+    } else if (expectedType == fileType.isFile) {
+      Assert.assertTrue(s.isFile());
+    } else if (expectedType == fileType.isSymlink) {
+      Assert.assertTrue(s.isSymlink());
+    }
+    Assert.assertEquals(fs.makeQualified(new Path(path)), s.getPath());
+  }
+
+  //
+  // FileContext wrappers
+  //
+
+  @Override
+  public Path makeQualified(Path path) {
+    return fs.makeQualified(path);
+  }
+
+  @Override
+  public void mkdir(Path dir, FsPermission permission, boolean createParent)
+      throws AccessControlException, FileAlreadyExistsException,
+      FileNotFoundException, ParentNotDirectoryException,
+      UnsupportedFileSystemException, IOException {
+    // Note that there is no "mkdir" in FileSystem, it always does
+    // "mkdir -p" (creating parent directories).
+    fs.mkdirs(dir, permission);
+  }
+
+  @Override
+  public boolean delete(Path f, boolean recursive)
+      throws AccessControlException, FileNotFoundException,
+      UnsupportedFileSystemException, IOException {
+    return fs.delete(f, recursive);
+  }
+
+  @Override
+  public FileStatus getFileLinkStatus(Path f) throws AccessControlException,
+      FileNotFoundException, UnsupportedFileSystemException, IOException {
+    return fs.getFileLinkStatus(f);
+  }
+
+  @Override
+  public void createSymlink(Path target, Path link, boolean createParent)
+      throws AccessControlException, FileAlreadyExistsException,
+      FileNotFoundException, ParentNotDirectoryException,
+      UnsupportedFileSystemException, IOException {
+    fs.createSymlink(target, link, createParent);
+  }
+
+  @Override
+  public void setWorkingDirectory(Path newWDir) throws IOException {
+    fs.setWorkingDirectory(newWDir);
+  }
+
+  @Override
+  public Path getWorkingDirectory() {
+    return fs.getWorkingDirectory();
+  }
+
+  @Override
+  public FileStatus getFileStatus(Path f) throws AccessControlException,
+      FileNotFoundException, UnsupportedFileSystemException, IOException {
+    return fs.getFileStatus(f);
+  }
+
+  @Override
+  public FSDataOutputStream create(Path f, EnumSet<CreateFlag> createFlag,
+      CreateOpts... opts) throws AccessControlException,
+      FileAlreadyExistsException, FileNotFoundException,
+      ParentNotDirectoryException, UnsupportedFileSystemException, IOException {
+
+    // Need to translate the FileContext-style options into FileSystem-style
+
+    // Permissions with umask
+    CreateOpts.Perms permOpt = (CreateOpts.Perms) CreateOpts.getOpt(
+        CreateOpts.Perms.class, opts);
+    FsPermission umask = FsPermission.getUMask(fs.getConf());
+    FsPermission permission = (permOpt != null) ? permOpt.getValue()
+        : FsPermission.getFileDefault().applyUMask(umask);
+    permission = permission.applyUMask(umask);
+    // Overwrite
+    boolean overwrite = createFlag.contains(CreateFlag.OVERWRITE);
+    // bufferSize
+    int bufferSize = fs.getConf().getInt(
+        CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY,
+        CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT);
+    CreateOpts.BufferSize bufOpt = (CreateOpts.BufferSize) CreateOpts.getOpt(
+        CreateOpts.BufferSize.class, opts);
+    bufferSize = (bufOpt != null) ? bufOpt.getValue() : bufferSize;
+    // replication
+    short replication = fs.getDefaultReplication(f);
+    CreateOpts.ReplicationFactor repOpt =
+        (CreateOpts.ReplicationFactor) CreateOpts.getOpt(
+            CreateOpts.ReplicationFactor.class, opts);
+    replication = (repOpt != null) ? repOpt.getValue() : replication;
+    // blockSize
+    long blockSize = fs.getDefaultBlockSize(f);
+    CreateOpts.BlockSize blockOpt = (CreateOpts.BlockSize) CreateOpts.getOpt(
+        CreateOpts.BlockSize.class, opts);
+    blockSize = (blockOpt != null) ? blockOpt.getValue() : blockSize;
+    // Progressable
+    Progressable progress = null;
+    CreateOpts.Progress progressOpt = (CreateOpts.Progress) CreateOpts.getOpt(
+        CreateOpts.Progress.class, opts);
+    progress = (progressOpt != null) ? progressOpt.getValue() : progress;
+    return fs.create(f, permission, overwrite, bufferSize, replication,
+        blockSize, progress);
+  }
+
+  @Override
+  public FSDataInputStream open(Path f) throws AccessControlException,
+      FileNotFoundException, UnsupportedFileSystemException, IOException {
+    return fs.open(f);
+  }
+
+  @Override
+  public Path getLinkTarget(Path f) throws AccessControlException,
+      FileNotFoundException, UnsupportedFileSystemException, IOException {
+    return fs.getLinkTarget(f);
+  }
+
+  @Override
+  public boolean setReplication(final Path f, final short replication)
+      throws AccessControlException, FileNotFoundException,
+      IOException {
+    return fs.setReplication(f, replication);
+  }
+
+  @SuppressWarnings("deprecation")
+  @Override
+  public void rename(Path src, Path dst, Rename... options)
+      throws AccessControlException, FileAlreadyExistsException,
+      FileNotFoundException, ParentNotDirectoryException,
+      UnsupportedFileSystemException, IOException {
+    fs.rename(src, dst, options);
+  }
+
+  @Override
+  public BlockLocation[] getFileBlockLocations(Path f, long start, long len)
+      throws AccessControlException, FileNotFoundException,
+      UnsupportedFileSystemException, IOException {
+    return fs.getFileBlockLocations(f, start, len);
+  }
+
+  @Override
+  public FileChecksum getFileChecksum(Path f) throws AccessControlException,
+      FileNotFoundException, IOException {
+    return fs.getFileChecksum(f);
+  }
+
+  private class FakeRemoteIterator<E> implements RemoteIterator<E> {
+
+    private E[] elements;
+    private int count;
+
+    FakeRemoteIterator(E[] elements) {
+      this.elements = elements;
+      count = 0;
+    }
+
+    @Override
+    public boolean hasNext() throws IOException {
+      return count < elements.length;
+    }
+
+    @Override
+    public E next() throws IOException {
+      if (hasNext()) {
+        return elements[count++];
+      }
+      return null;
+    }
+  }
+
+  @Override
+  public RemoteIterator<FileStatus> listStatusIterator(Path f)
+      throws AccessControlException, FileNotFoundException,
+      UnsupportedFileSystemException, IOException {
+    // Fake the RemoteIterator, because FileSystem has no such thing
+    FileStatus[] statuses = fs.listStatus(f);
+    return new FakeRemoteIterator<FileStatus>(statuses);
+  }
+
+  @Override
+  public void setPermission(final Path f, final FsPermission permission)
+      throws AccessControlException, FileNotFoundException,
+      UnsupportedFileSystemException, IOException {
+    fs.setPermission(f, permission);
+  }
+
+  @Override
+  public void setOwner(final Path f, final String username,
+      final String groupname) throws AccessControlException,
+      UnsupportedFileSystemException, FileNotFoundException,
+      IOException {
+    fs.setOwner(f, username, groupname);
+  }
+
+  @Override
+  public void setTimes(Path f, long mtime, long atime)
+      throws AccessControlException, FileNotFoundException,
+      UnsupportedFileSystemException, IOException {
+    fs.setTimes(f, mtime, atime);
+  }
+
+  @Override
+  public FileStatus[] listStatus(Path f) throws AccessControlException,
+      FileNotFoundException, UnsupportedFileSystemException, IOException {
+    return fs.listStatus(f);
+  }
+}

+ 1373 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/SymlinkBaseTest.java

@@ -0,0 +1,1373 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs;
+
+import java.io.*;
+import java.net.URI;
+import java.util.EnumSet;
+import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.Options.CreateOpts;
+import org.apache.hadoop.fs.Options.Rename;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.fs.CreateFlag;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.test.GenericTestUtils;
+
+import static org.junit.Assert.*;
+import static org.junit.Assume.assumeTrue;
+import org.junit.Test;
+import org.junit.Before;
+import org.junit.After;
+
+/**
+ * Base test for symbolic links
+ */
+public abstract class SymlinkBaseTest {
+  static final long seed = 0xDEADBEEFL;
+  static final int  blockSize =  8192;
+  static final int  fileSize  = 16384;
+  static final int numBlocks = fileSize / blockSize;
+
+  protected static FSTestWrapper wrapper;
+
+  abstract protected String getScheme();
+  abstract protected String testBaseDir1() throws IOException;
+  abstract protected String testBaseDir2() throws IOException;
+  abstract protected URI testURI();
+
+  protected IOException unwrapException(IOException e) {
+    return e;
+  }
+
+  protected static void createAndWriteFile(Path p) throws IOException {
+    createAndWriteFile(wrapper, p);
+  }
+
+  protected static void createAndWriteFile(FSTestWrapper wrapper, Path p)
+      throws IOException {
+    wrapper.createFile(p, numBlocks, CreateOpts.createParent(),
+        CreateOpts.repFac((short) 1), CreateOpts.blockSize(blockSize));
+  }
+
+  protected static void readFile(Path p) throws IOException {
+    wrapper.readFile(p, fileSize);
+  }
+
+  protected static void appendToFile(Path p) throws IOException {
+    wrapper.appendToFile(p, numBlocks,
+        CreateOpts.blockSize(blockSize));
+  }
+
+  @Before
+  public void setUp() throws Exception {
+    wrapper.mkdir(new Path(testBaseDir1()), FileContext.DEFAULT_PERM, true);
+    wrapper.mkdir(new Path(testBaseDir2()), FileContext.DEFAULT_PERM, true);
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    wrapper.delete(new Path(testBaseDir1()), true);
+    wrapper.delete(new Path(testBaseDir2()), true);
+  }
+
+  @Test(timeout=10000)
+  /** The root is not a symlink */
+  public void testStatRoot() throws IOException {
+    assertFalse(wrapper.getFileLinkStatus(new Path("/")).isSymlink());
+  }
+
+  @Test(timeout=10000)
+  /** Test setWorkingDirectory not resolves symlinks */
+  public void testSetWDNotResolvesLinks() throws IOException {
+    Path dir       = new Path(testBaseDir1());
+    Path linkToDir = new Path(testBaseDir1()+"/link");
+    wrapper.createSymlink(dir, linkToDir, false);
+    wrapper.setWorkingDirectory(linkToDir);
+    assertEquals(linkToDir.getName(), wrapper.getWorkingDirectory().getName());
+  }
+
+  @Test(timeout=10000)
+  /** Test create a dangling link */
+  public void testCreateDanglingLink() throws IOException {
+    Path file = new Path("/noSuchFile");
+    Path link = new Path(testBaseDir1()+"/link");
+    wrapper.createSymlink(file, link, false);
+    try {
+      wrapper.getFileStatus(link);
+      fail("Got file status of non-existant file");
+    } catch (FileNotFoundException f) {
+      // Expected
+    }
+    wrapper.delete(link, false);
+  }
+
+  @Test(timeout=10000)
+  /** Test create a link to null and empty path */
+  public void testCreateLinkToNullEmpty() throws IOException {
+    Path link = new Path(testBaseDir1()+"/link");
+    try {
+      wrapper.createSymlink(null, link, false);
+      fail("Can't create symlink to null");
+    } catch (java.lang.NullPointerException e) {
+      // Expected, create* with null yields NPEs
+    }
+    try {
+      wrapper.createSymlink(new Path(""), link, false);
+      fail("Can't create symlink to empty string");
+    } catch (java.lang.IllegalArgumentException e) {
+      // Expected, Path("") is invalid
+    }
+  }
+
+  @Test(timeout=10000)
+  /** Create a link with createParent set */
+  public void testCreateLinkCanCreateParent() throws IOException {
+    Path file = new Path(testBaseDir1()+"/file");
+    Path link = new Path(testBaseDir2()+"/linkToFile");
+    createAndWriteFile(file);
+    wrapper.delete(new Path(testBaseDir2()), true);
+    try {
+      wrapper.createSymlink(file, link, false);
+      fail("Created link without first creating parent dir");
+    } catch (IOException x) {
+      // Expected. Need to create testBaseDir2() first.
+    }
+    assertFalse(wrapper.exists(new Path(testBaseDir2())));
+    wrapper.createSymlink(file, link, true);
+    readFile(link);
+  }
+
+  @Test(timeout=10000)
+  /** Try to create a directory given a path that refers to a symlink */
+  public void testMkdirExistingLink() throws IOException {
+    Path dir  = new Path(testBaseDir1()+"/link");
+    wrapper.createSymlink(new Path("/doesNotExist"), dir, false);
+    try {
+      wrapper.mkdir(dir, FileContext.DEFAULT_PERM, false);
+      fail("Created a dir where a symlink exists");
+    } catch (FileAlreadyExistsException e) {
+      // Expected. The symlink already exists.
+    } catch (IOException e) {
+      // LocalFs just throws an IOException
+      assertEquals("file", getScheme());
+    }
+  }
+
+  @Test(timeout=10000)
+  /** Try to create a file with parent that is a dangling link */
+  public void testCreateFileViaDanglingLinkParent() throws IOException {
+    Path dir  = new Path(testBaseDir1()+"/dangling");
+    Path file = new Path(testBaseDir1()+"/dangling/file");
+    wrapper.createSymlink(new Path("/doesNotExist"), dir, false);
+    FSDataOutputStream out;
+    try {
+      out = wrapper.create(file, EnumSet.of(CreateFlag.CREATE),
+                      CreateOpts.repFac((short) 1),
+                      CreateOpts.blockSize(blockSize));
+      out.close();
+      fail("Created a link with dangling link parent");
+    } catch (FileNotFoundException e) {
+      // Expected. The parent is dangling.
+    }
+  }
+
+  @Test(timeout=10000)
+  /** Delete a link */
+  public void testDeleteLink() throws IOException {
+    Path file = new Path(testBaseDir1()+"/file");
+    Path link = new Path(testBaseDir1()+"/linkToFile");
+    createAndWriteFile(file);
+    wrapper.createSymlink(file, link, false);
+    readFile(link);
+    wrapper.delete(link, false);
+    try {
+      readFile(link);
+      fail("Symlink should have been deleted");
+    } catch (IOException x) {
+      // Expected
+    }
+    // If we deleted the link we can put it back
+    wrapper.createSymlink(file, link, false);
+  }
+
+  @Test(timeout=10000)
+  /** Ensure open resolves symlinks */
+  public void testOpenResolvesLinks() throws IOException {
+    Path file = new Path(testBaseDir1()+"/noSuchFile");
+    Path link = new Path(testBaseDir1()+"/link");
+    wrapper.createSymlink(file, link, false);
+    try {
+      wrapper.open(link);
+      fail("link target does not exist");
+    } catch (FileNotFoundException x) {
+      // Expected
+    }
+    wrapper.delete(link, false);
+  }
+
+  @Test(timeout=10000)
+  /** Stat a link to a file */
+  public void testStatLinkToFile() throws IOException {
+    Path file = new Path(testBaseDir1()+"/file");
+    Path linkToFile = new Path(testBaseDir1()+"/linkToFile");
+    createAndWriteFile(file);
+    wrapper.createSymlink(file, linkToFile, false);
+    assertFalse(wrapper.getFileLinkStatus(linkToFile).isDirectory());
+    assertTrue(wrapper.isSymlink(linkToFile));
+    assertTrue(wrapper.isFile(linkToFile));
+    assertFalse(wrapper.isDir(linkToFile));
+    assertEquals(file.toUri().getPath(),
+                 wrapper.getLinkTarget(linkToFile).toString());
+    // The local file system does not fully resolve the link
+    // when obtaining the file status
+    if (!"file".equals(getScheme())) {
+      assertEquals(wrapper.getFileStatus(file),
+                   wrapper.getFileStatus(linkToFile));
+      assertEquals(wrapper.makeQualified(file),
+                   wrapper.getFileStatus(linkToFile).getPath());
+      assertEquals(wrapper.makeQualified(linkToFile),
+                   wrapper.getFileLinkStatus(linkToFile).getPath());
+    }
+  }
+
+  @Test(timeout=10000)
+  /** Stat a relative link to a file */
+  public void testStatRelLinkToFile() throws IOException {
+    assumeTrue(!"file".equals(getScheme()));
+    Path file       = new Path(testBaseDir1(), "file");
+    Path linkToFile = new Path(testBaseDir1(), "linkToFile");
+    createAndWriteFile(file);
+    wrapper.createSymlink(new Path("file"), linkToFile, false);
+    assertEquals(wrapper.getFileStatus(file),
+                 wrapper.getFileStatus(linkToFile));
+    assertEquals(wrapper.makeQualified(file),
+                 wrapper.getFileStatus(linkToFile).getPath());
+    assertEquals(wrapper.makeQualified(linkToFile),
+                 wrapper.getFileLinkStatus(linkToFile).getPath());
+  }
+
+  @Test(timeout=10000)
+  /** Stat a link to a directory */
+  public void testStatLinkToDir() throws IOException {
+    Path dir  = new Path(testBaseDir1());
+    Path linkToDir = new Path(testBaseDir1()+"/linkToDir");
+    wrapper.createSymlink(dir, linkToDir, false);
+
+    assertFalse(wrapper.getFileStatus(linkToDir).isSymlink());
+    assertTrue(wrapper.isDir(linkToDir));
+    assertFalse(wrapper.getFileLinkStatus(linkToDir).isDirectory());
+    assertTrue(wrapper.getFileLinkStatus(linkToDir).isSymlink());
+
+    assertFalse(wrapper.isFile(linkToDir));
+    assertTrue(wrapper.isDir(linkToDir));
+
+    assertEquals(dir.toUri().getPath(),
+                 wrapper.getLinkTarget(linkToDir).toString());
+  }
+
+  @Test(timeout=10000)
+  /** Stat a dangling link */
+  public void testStatDanglingLink() throws IOException {
+    Path file = new Path("/noSuchFile");
+    Path link = new Path(testBaseDir1()+"/link");
+    wrapper.createSymlink(file, link, false);
+    assertFalse(wrapper.getFileLinkStatus(link).isDirectory());
+    assertTrue(wrapper.getFileLinkStatus(link).isSymlink());
+  }
+
+  @Test(timeout=10000)
+  /** Stat a non-existant file */
+  public void testStatNonExistentFiles() throws IOException {
+    Path fileAbs = new Path("/doesNotExist");
+    try {
+      wrapper.getFileLinkStatus(fileAbs);
+      fail("Got FileStatus for non-existant file");
+    } catch (FileNotFoundException f) {
+      // Expected
+    }
+    try {
+      wrapper.getLinkTarget(fileAbs);
+      fail("Got link target for non-existant file");
+    } catch (FileNotFoundException f) {
+      // Expected
+    }
+  }
+
+  @Test(timeout=10000)
+  /** Test stat'ing a regular file and directory */
+  public void testStatNonLinks() throws IOException {
+    Path dir   = new Path(testBaseDir1());
+    Path file  = new Path(testBaseDir1()+"/file");
+    createAndWriteFile(file);
+    try {
+      wrapper.getLinkTarget(dir);
+      fail("Lstat'd a non-symlink");
+    } catch (IOException e) {
+      // Expected.
+    }
+    try {
+      wrapper.getLinkTarget(file);
+      fail("Lstat'd a non-symlink");
+    } catch (IOException e) {
+      // Expected.
+    }
+  }
+
+  @Test(timeout=10000)
+  /** Test links that link to each other */
+  public void testRecursiveLinks() throws IOException {
+    Path link1 = new Path(testBaseDir1()+"/link1");
+    Path link2 = new Path(testBaseDir1()+"/link2");
+    wrapper.createSymlink(link1, link2, false);
+    wrapper.createSymlink(link2, link1, false);
+    try {
+      readFile(link1);
+      fail("Read recursive link");
+    } catch (FileNotFoundException f) {
+      // LocalFs throws sub class of IOException, since File.exists
+      // returns false for a link to link.
+    } catch (IOException x) {
+      assertEquals("Possible cyclic loop while following symbolic link "+
+                   link1.toString(), x.getMessage());
+    }
+  }
+
+  /* Assert that the given link to a file behaves as expected. */
+  private void checkLink(Path linkAbs, Path expectedTarget, Path targetQual)
+      throws IOException {
+    Path dir = new Path(testBaseDir1());
+    // isFile/Directory
+    assertTrue(wrapper.isFile(linkAbs));
+    assertFalse(wrapper.isDir(linkAbs));
+
+    // Check getFileStatus
+    assertFalse(wrapper.getFileStatus(linkAbs).isSymlink());
+    assertFalse(wrapper.getFileStatus(linkAbs).isDirectory());
+    assertEquals(fileSize, wrapper.getFileStatus(linkAbs).getLen());
+
+    // Check getFileLinkStatus
+    assertTrue(wrapper.isSymlink(linkAbs));
+    assertFalse(wrapper.getFileLinkStatus(linkAbs).isDirectory());
+
+    // Check getSymlink always returns a qualified target, except
+    // when partially qualified paths are used (see tests below).
+    assertEquals(targetQual.toString(),
+        wrapper.getFileLinkStatus(linkAbs).getSymlink().toString());
+    assertEquals(targetQual, wrapper.getFileLinkStatus(linkAbs).getSymlink());
+    // Check that the target is qualified using the file system of the
+    // path used to access the link (if the link target was not specified
+    // fully qualified, in that case we use the link target verbatim).
+    if (!"file".equals(getScheme())) {
+      FileContext localFc = FileContext.getLocalFSFileContext();
+      Path linkQual = new Path(testURI().toString(), linkAbs);
+      assertEquals(targetQual,
+                   localFc.getFileLinkStatus(linkQual).getSymlink());
+    }
+
+    // Check getLinkTarget
+    assertEquals(expectedTarget, wrapper.getLinkTarget(linkAbs));
+
+    // Now read using all path types..
+    wrapper.setWorkingDirectory(dir);
+    readFile(new Path("linkToFile"));
+    readFile(linkAbs);
+    // And fully qualified.. (NB: for local fs this is partially qualified)
+    readFile(new Path(testURI().toString(), linkAbs));
+    // And partially qualified..
+    boolean failureExpected = true;
+    // local files are special cased, no authority
+    if ("file".equals(getScheme())) {
+      failureExpected = false;
+    }
+    // FileSystem automatically adds missing authority if scheme matches default
+    else if (wrapper instanceof FileSystemTestWrapper) {
+      failureExpected = false;
+    }
+    try {
+      readFile(new Path(getScheme()+"://"+testBaseDir1()+"/linkToFile"));
+      assertFalse(failureExpected);
+    } catch (Exception e) {
+      if (!failureExpected) {
+        throw new IOException(e);
+      }
+      //assertTrue(failureExpected);
+    }
+
+    // Now read using a different file context (for HDFS at least)
+    if (wrapper instanceof FileContextTestWrapper
+        && !"file".equals(getScheme())) {
+      FSTestWrapper localWrapper = wrapper.getLocalFSWrapper();
+      localWrapper.readFile(new Path(testURI().toString(), linkAbs), fileSize);
+    }
+  }
+
+  @Test(timeout=10000)
+  /** Test creating a symlink using relative paths */
+  public void testCreateLinkUsingRelPaths() throws IOException {
+    Path fileAbs = new Path(testBaseDir1(), "file");
+    Path linkAbs = new Path(testBaseDir1(), "linkToFile");
+    Path schemeAuth = new Path(testURI().toString());
+    Path fileQual = new Path(schemeAuth, testBaseDir1()+"/file");
+    createAndWriteFile(fileAbs);
+
+    wrapper.setWorkingDirectory(new Path(testBaseDir1()));
+    wrapper.createSymlink(new Path("file"), new Path("linkToFile"), false);
+    checkLink(linkAbs, new Path("file"), fileQual);
+
+    // Now rename the link's parent. Because the target was specified
+    // with a relative path the link should still resolve.
+    Path dir1        = new Path(testBaseDir1());
+    Path dir2        = new Path(testBaseDir2());
+    Path linkViaDir2 = new Path(testBaseDir2(), "linkToFile");
+    Path fileViaDir2 = new Path(schemeAuth, testBaseDir2()+"/file");
+    wrapper.rename(dir1, dir2, Rename.OVERWRITE);
+    FileStatus[] stats = wrapper.listStatus(dir2);
+    assertEquals(fileViaDir2,
+        wrapper.getFileLinkStatus(linkViaDir2).getSymlink());
+    readFile(linkViaDir2);
+  }
+
+  @Test(timeout=10000)
+  /** Test creating a symlink using absolute paths */
+  public void testCreateLinkUsingAbsPaths() throws IOException {
+    Path fileAbs = new Path(testBaseDir1()+"/file");
+    Path linkAbs = new Path(testBaseDir1()+"/linkToFile");
+    Path schemeAuth = new Path(testURI().toString());
+    Path fileQual = new Path(schemeAuth, testBaseDir1()+"/file");
+    createAndWriteFile(fileAbs);
+
+    wrapper.createSymlink(fileAbs, linkAbs, false);
+    checkLink(linkAbs, fileAbs, fileQual);
+
+    // Now rename the link's parent. The target doesn't change and
+    // now no longer exists so accessing the link should fail.
+    Path dir1        = new Path(testBaseDir1());
+    Path dir2        = new Path(testBaseDir2());
+    Path linkViaDir2 = new Path(testBaseDir2(), "linkToFile");
+    wrapper.rename(dir1, dir2, Rename.OVERWRITE);
+    assertEquals(fileQual, wrapper.getFileLinkStatus(linkViaDir2).getSymlink());
+    try {
+      readFile(linkViaDir2);
+      fail("The target should not exist");
+    } catch (FileNotFoundException x) {
+      // Expected
+    }
+  }
+
+  @Test(timeout=10000)
+  /**
+   * Test creating a symlink using fully and partially qualified paths.
+   * NB: For local fs this actually tests partially qualified paths,
+   * as they don't support fully qualified paths.
+   */
+  public void testCreateLinkUsingFullyQualPaths() throws IOException {
+    Path fileAbs  = new Path(testBaseDir1(), "file");
+    Path linkAbs  = new Path(testBaseDir1(), "linkToFile");
+    Path fileQual = new Path(testURI().toString(), fileAbs);
+    Path linkQual = new Path(testURI().toString(), linkAbs);
+    createAndWriteFile(fileAbs);
+
+    wrapper.createSymlink(fileQual, linkQual, false);
+    checkLink(linkAbs,
+              "file".equals(getScheme()) ? fileAbs : fileQual,
+              fileQual);
+
+    // Now rename the link's parent. The target doesn't change and
+    // now no longer exists so accessing the link should fail.
+    Path dir1        = new Path(testBaseDir1());
+    Path dir2        = new Path(testBaseDir2());
+    Path linkViaDir2 = new Path(testBaseDir2(), "linkToFile");
+    wrapper.rename(dir1, dir2, Rename.OVERWRITE);
+    assertEquals(fileQual, wrapper.getFileLinkStatus(linkViaDir2).getSymlink());
+    try {
+      readFile(linkViaDir2);
+      fail("The target should not exist");
+    } catch (FileNotFoundException x) {
+      // Expected
+    }
+  }
+
+  @Test(timeout=10000)
+  /**
+   * Test creating a symlink using partially qualified paths, ie a scheme
+   * but no authority and vice versa. We just test link targets here since
+   * creating using a partially qualified path is file system specific.
+   */
+  public void testCreateLinkUsingPartQualPath1() throws IOException {
+    // Partially qualified paths are covered for local file systems
+    // in the previous test.
+    assumeTrue(!"file".equals(getScheme()));
+    Path schemeAuth   = new Path(testURI().toString());
+    Path fileWoHost   = new Path(getScheme()+"://"+testBaseDir1()+"/file");
+    Path link         = new Path(testBaseDir1()+"/linkToFile");
+    Path linkQual     = new Path(schemeAuth, testBaseDir1()+"/linkToFile");
+    FSTestWrapper localWrapper = wrapper.getLocalFSWrapper();
+
+    wrapper.createSymlink(fileWoHost, link, false);
+    // Partially qualified path is stored
+    assertEquals(fileWoHost, wrapper.getLinkTarget(linkQual));
+    // NB: We do not add an authority
+    assertEquals(fileWoHost.toString(),
+      wrapper.getFileLinkStatus(link).getSymlink().toString());
+    assertEquals(fileWoHost.toString(),
+      wrapper.getFileLinkStatus(linkQual).getSymlink().toString());
+    // Ditto even from another file system
+    if (wrapper instanceof FileContextTestWrapper) {
+      assertEquals(fileWoHost.toString(),
+        localWrapper.getFileLinkStatus(linkQual).getSymlink().toString());
+    }
+    // Same as if we accessed a partially qualified path directly
+    try {
+      readFile(link);
+      fail("DFS requires URIs with schemes have an authority");
+    } catch (java.lang.RuntimeException e) {
+      assertTrue(wrapper instanceof FileContextTestWrapper);
+      // Expected
+    } catch (FileNotFoundException e) {
+      assertTrue(wrapper instanceof FileSystemTestWrapper);
+      GenericTestUtils.assertExceptionContains(
+          "File does not exist: /test1/file", e);
+    }
+  }
+
+  @Test(timeout=10000)
+  /** Same as above but vice versa (authority but no scheme) */
+  public void testCreateLinkUsingPartQualPath2() throws IOException {
+    Path link         = new Path(testBaseDir1(), "linkToFile");
+    Path fileWoScheme = new Path("//"+testURI().getAuthority()+
+                                 testBaseDir1()+"/file");
+    if ("file".equals(getScheme())) {
+      return;
+    }
+    wrapper.createSymlink(fileWoScheme, link, false);
+    assertEquals(fileWoScheme, wrapper.getLinkTarget(link));
+    assertEquals(fileWoScheme.toString(),
+      wrapper.getFileLinkStatus(link).getSymlink().toString());
+    try {
+      readFile(link);
+      fail("Accessed a file with w/o scheme");
+    } catch (IOException e) {
+      // Expected
+      if (wrapper instanceof FileContextTestWrapper) {
+        assertEquals("No AbstractFileSystem for scheme: null", e.getMessage());
+      } else if (wrapper instanceof FileSystemTestWrapper) {
+        assertEquals("No FileSystem for scheme: null", e.getMessage());
+      }
+    }
+  }
+
+  @Test(timeout=10000)
+  /** Lstat and readlink on a normal file and directory */
+  public void testLinkStatusAndTargetWithNonLink() throws IOException {
+    Path schemeAuth = new Path(testURI().toString());
+    Path dir        = new Path(testBaseDir1());
+    Path dirQual    = new Path(schemeAuth, dir.toString());
+    Path file       = new Path(testBaseDir1(), "file");
+    Path fileQual   = new Path(schemeAuth, file.toString());
+    createAndWriteFile(file);
+    assertEquals(wrapper.getFileStatus(file), wrapper.getFileLinkStatus(file));
+    assertEquals(wrapper.getFileStatus(dir), wrapper.getFileLinkStatus(dir));
+    try {
+      wrapper.getLinkTarget(file);
+      fail("Get link target on non-link should throw an IOException");
+    } catch (IOException x) {
+      assertEquals("Path "+fileQual+" is not a symbolic link", x.getMessage());
+    }
+    try {
+      wrapper.getLinkTarget(dir);
+      fail("Get link target on non-link should throw an IOException");
+    } catch (IOException x) {
+      assertEquals("Path "+dirQual+" is not a symbolic link", x.getMessage());
+    }
+  }
+
+  @Test(timeout=10000)
+  /** Test create symlink to a directory */
+  public void testCreateLinkToDirectory() throws IOException {
+    Path dir1      = new Path(testBaseDir1());
+    Path file      = new Path(testBaseDir1(), "file");
+    Path linkToDir = new Path(testBaseDir2(), "linkToDir");
+    createAndWriteFile(file);
+    wrapper.createSymlink(dir1, linkToDir, false);
+    assertFalse(wrapper.isFile(linkToDir));
+    assertTrue(wrapper.isDir(linkToDir));
+    assertTrue(wrapper.getFileStatus(linkToDir).isDirectory());
+    assertTrue(wrapper.getFileLinkStatus(linkToDir).isSymlink());
+  }
+
+  @Test(timeout=10000)
+  /** Test create and remove a file through a symlink */
+  public void testCreateFileViaSymlink() throws IOException {
+    Path dir         = new Path(testBaseDir1());
+    Path linkToDir   = new Path(testBaseDir2(), "linkToDir");
+    Path fileViaLink = new Path(linkToDir, "file");
+    wrapper.createSymlink(dir, linkToDir, false);
+    createAndWriteFile(fileViaLink);
+    assertTrue(wrapper.isFile(fileViaLink));
+    assertFalse(wrapper.isDir(fileViaLink));
+    assertFalse(wrapper.getFileLinkStatus(fileViaLink).isSymlink());
+    assertFalse(wrapper.getFileStatus(fileViaLink).isDirectory());
+    readFile(fileViaLink);
+    wrapper.delete(fileViaLink, true);
+    assertFalse(wrapper.exists(fileViaLink));
+  }
+
+  @Test(timeout=10000)
+  /** Test make and delete directory through a symlink */
+  public void testCreateDirViaSymlink() throws IOException {
+    Path dir1          = new Path(testBaseDir1());
+    Path subDir        = new Path(testBaseDir1(), "subDir");
+    Path linkToDir     = new Path(testBaseDir2(), "linkToDir");
+    Path subDirViaLink = new Path(linkToDir, "subDir");
+    wrapper.createSymlink(dir1, linkToDir, false);
+    wrapper.mkdir(subDirViaLink, FileContext.DEFAULT_PERM, true);
+    assertTrue(wrapper.isDir(subDirViaLink));
+    wrapper.delete(subDirViaLink, false);
+    assertFalse(wrapper.exists(subDirViaLink));
+    assertFalse(wrapper.exists(subDir));
+  }
+
+  @Test(timeout=10000)
+  /** Create symlink through a symlink */
+  public void testCreateLinkViaLink() throws IOException {
+    Path dir1        = new Path(testBaseDir1());
+    Path file        = new Path(testBaseDir1(), "file");
+    Path linkToDir   = new Path(testBaseDir2(), "linkToDir");
+    Path fileViaLink = new Path(linkToDir, "file");
+    Path linkToFile  = new Path(linkToDir, "linkToFile");
+    /*
+     * /b2/linkToDir            -> /b1
+     * /b2/linkToDir/linkToFile -> /b2/linkToDir/file
+     */
+    createAndWriteFile(file);
+    wrapper.createSymlink(dir1, linkToDir, false);
+    wrapper.createSymlink(fileViaLink, linkToFile, false);
+    assertTrue(wrapper.isFile(linkToFile));
+    assertTrue(wrapper.getFileLinkStatus(linkToFile).isSymlink());
+    readFile(linkToFile);
+    assertEquals(fileSize, wrapper.getFileStatus(linkToFile).getLen());
+    assertEquals(fileViaLink, wrapper.getLinkTarget(linkToFile));
+  }
+
+  @Test(timeout=10000)
+  /** Test create symlink to a directory */
+  public void testListStatusUsingLink() throws IOException {
+    Path file  = new Path(testBaseDir1(), "file");
+    Path link  = new Path(testBaseDir1(), "link");
+    createAndWriteFile(file);
+    wrapper.createSymlink(new Path(testBaseDir1()), link, false);
+    // The size of the result is file system dependent, Hdfs is 2 (file
+    // and link) and LocalFs is 3 (file, link, file crc).
+    FileStatus[] stats = wrapper.listStatus(link);
+    assertTrue(stats.length == 2 || stats.length == 3);
+    RemoteIterator<FileStatus> statsItor = wrapper.listStatusIterator(link);
+    int dirLen = 0;
+    while(statsItor.hasNext()) {
+      statsItor.next();
+      dirLen++;
+    }
+    assertTrue(dirLen == 2 || dirLen == 3);
+  }
+
+  @Test(timeout=10000)
+  /** Test create symlink using the same path */
+  public void testCreateLinkTwice() throws IOException {
+    Path file = new Path(testBaseDir1(), "file");
+    Path link = new Path(testBaseDir1(), "linkToFile");
+    createAndWriteFile(file);
+    wrapper.createSymlink(file, link, false);
+    try {
+      wrapper.createSymlink(file, link, false);
+      fail("link already exists");
+    } catch (IOException x) {
+      // Expected
+    }
+  }
+
+  @Test(timeout=10000)
+  /** Test access via a symlink to a symlink */
+  public void testCreateLinkToLink() throws IOException {
+    Path dir1        = new Path(testBaseDir1());
+    Path file        = new Path(testBaseDir1(), "file");
+    Path linkToDir   = new Path(testBaseDir2(), "linkToDir");
+    Path linkToLink  = new Path(testBaseDir2(), "linkToLink");
+    Path fileViaLink = new Path(testBaseDir2(), "linkToLink/file");
+    createAndWriteFile(file);
+    wrapper.createSymlink(dir1, linkToDir, false);
+    wrapper.createSymlink(linkToDir, linkToLink, false);
+    assertTrue(wrapper.isFile(fileViaLink));
+    assertFalse(wrapper.isDir(fileViaLink));
+    assertFalse(wrapper.getFileLinkStatus(fileViaLink).isSymlink());
+    assertFalse(wrapper.getFileStatus(fileViaLink).isDirectory());
+    readFile(fileViaLink);
+  }
+
+  @Test(timeout=10000)
+  /** Can not create a file with path that refers to a symlink */
+  public void testCreateFileDirExistingLink() throws IOException {
+    Path file = new Path(testBaseDir1(), "file");
+    Path link = new Path(testBaseDir1(), "linkToFile");
+    createAndWriteFile(file);
+    wrapper.createSymlink(file, link, false);
+    try {
+      createAndWriteFile(link);
+      fail("link already exists");
+    } catch (IOException x) {
+      // Expected
+    }
+    try {
+      wrapper.mkdir(link, FsPermission.getDefault(), false);
+      fail("link already exists");
+    } catch (IOException x) {
+      // Expected
+    }
+  }
+
+  @Test(timeout=10000)
+  /** Test deleting and recreating a symlink */
+  public void testUseLinkAferDeleteLink() throws IOException {
+    Path file = new Path(testBaseDir1(), "file");
+    Path link = new Path(testBaseDir1(), "linkToFile");
+    createAndWriteFile(file);
+    wrapper.createSymlink(file, link, false);
+    wrapper.delete(link, false);
+    try {
+      readFile(link);
+      fail("link was deleted");
+    } catch (IOException x) {
+      // Expected
+    }
+    readFile(file);
+    wrapper.createSymlink(file, link, false);
+    readFile(link);
+  }
+
+  @Test(timeout=10000)
+  /** Test create symlink to . */
+  public void testCreateLinkToDot() throws IOException {
+    Path dir  = new Path(testBaseDir1());
+    Path file = new Path(testBaseDir1(), "file");
+    Path link = new Path(testBaseDir1(), "linkToDot");
+    createAndWriteFile(file);
+    wrapper.setWorkingDirectory(dir);
+    try {
+      wrapper.createSymlink(new Path("."), link, false);
+      fail("Created symlink to dot");
+    } catch (IOException x) {
+      // Expected. Path(".") resolves to "" because URI normalizes
+      // the dot away and AbstractFileSystem considers "" invalid.
+    }
+  }
+
+  @Test(timeout=10000)
+  /** Test create symlink to .. */
+  public void testCreateLinkToDotDot() throws IOException {
+    Path file        = new Path(testBaseDir1(), "test/file");
+    Path dotDot      = new Path(testBaseDir1(), "test/..");
+    Path linkToDir   = new Path(testBaseDir2(), "linkToDir");
+    Path fileViaLink = new Path(linkToDir,      "test/file");
+    // Symlink to .. is not a problem since the .. is squashed early
+    assertEquals(testBaseDir1(), dotDot.toString());
+    createAndWriteFile(file);
+    wrapper.createSymlink(dotDot, linkToDir, false);
+    readFile(fileViaLink);
+    assertEquals(fileSize, wrapper.getFileStatus(fileViaLink).getLen());
+  }
+
+  @Test(timeout=10000)
+  /** Test create symlink to ../file */
+  public void testCreateLinkToDotDotPrefix() throws IOException {
+    Path file = new Path(testBaseDir1(), "file");
+    Path dir  = new Path(testBaseDir1(), "test");
+    Path link = new Path(testBaseDir1(), "test/link");
+    createAndWriteFile(file);
+    wrapper.mkdir(dir, FsPermission.getDefault(), false);
+    wrapper.setWorkingDirectory(dir);
+    wrapper.createSymlink(new Path("../file"), link, false);
+    readFile(link);
+    assertEquals(new Path("../file"), wrapper.getLinkTarget(link));
+  }
+
+  @Test(timeout=10000)
+  /** Test rename file using a path that contains a symlink. The rename should
+   * work as if the path did not contain a symlink */
+  public void testRenameFileViaSymlink() throws IOException {
+    Path dir            = new Path(testBaseDir1());
+    Path file           = new Path(testBaseDir1(), "file");
+    Path linkToDir      = new Path(testBaseDir2(), "linkToDir");
+    Path fileViaLink    = new Path(linkToDir, "file");
+    Path fileNewViaLink = new Path(linkToDir, "fileNew");
+    createAndWriteFile(file);
+    wrapper.createSymlink(dir, linkToDir, false);
+    wrapper.rename(fileViaLink, fileNewViaLink);
+    assertFalse(wrapper.exists(fileViaLink));
+    assertFalse(wrapper.exists(file));
+    assertTrue(wrapper.exists(fileNewViaLink));
+  }
+
+  @Test(timeout=10000)
+  /** Test rename a file through a symlink but this time only the
+   * destination path has an intermediate symlink. The rename should work
+   * as if the path did not contain a symlink */
+  public void testRenameFileToDestViaSymlink() throws IOException {
+    Path dir       = new Path(testBaseDir1());
+    Path file      = new Path(testBaseDir1(), "file");
+    Path linkToDir = new Path(testBaseDir2(), "linkToDir");
+    Path subDir    = new Path(linkToDir, "subDir");
+    createAndWriteFile(file);
+    wrapper.createSymlink(dir, linkToDir, false);
+    wrapper.mkdir(subDir, FileContext.DEFAULT_PERM, false);
+    try {
+      wrapper.rename(file, subDir);
+      fail("Renamed file to a directory");
+    } catch (IOException e) {
+      // Expected. Both must be directories.
+      assertTrue(unwrapException(e) instanceof IOException);
+    }
+    assertTrue(wrapper.exists(file));
+  }
+
+  @Test(timeout=10000)
+  /** Similar tests as the previous ones but rename a directory */
+  public void testRenameDirViaSymlink() throws IOException {
+    Path baseDir       = new Path(testBaseDir1());
+    Path dir           = new Path(baseDir, "dir");
+    Path linkToDir     = new Path(testBaseDir2(), "linkToDir");
+    Path dirViaLink    = new Path(linkToDir, "dir");
+    Path dirNewViaLink = new Path(linkToDir, "dirNew");
+    wrapper.mkdir(dir, FileContext.DEFAULT_PERM, false);
+    wrapper.createSymlink(baseDir, linkToDir, false);
+    assertTrue(wrapper.exists(dirViaLink));
+    wrapper.rename(dirViaLink, dirNewViaLink);
+    assertFalse(wrapper.exists(dirViaLink));
+    assertFalse(wrapper.exists(dir));
+    assertTrue(wrapper.exists(dirNewViaLink));
+  }
+
+  @Test(timeout=10000)
+  /** Similar tests as the previous ones but rename a symlink */
+  public void testRenameSymlinkViaSymlink() throws IOException {
+    Path baseDir        = new Path(testBaseDir1());
+    Path file           = new Path(testBaseDir1(), "file");
+    Path link           = new Path(testBaseDir1(), "link");
+    Path linkToDir      = new Path(testBaseDir2(), "linkToDir");
+    Path linkViaLink    = new Path(linkToDir, "link");
+    Path linkNewViaLink = new Path(linkToDir, "linkNew");
+    createAndWriteFile(file);
+    wrapper.createSymlink(file, link, false);
+    wrapper.createSymlink(baseDir, linkToDir, false);
+    wrapper.rename(linkViaLink, linkNewViaLink);
+    assertFalse(wrapper.exists(linkViaLink));
+    // Check that we didn't rename the link target
+    assertTrue(wrapper.exists(file));
+    assertTrue(wrapper.getFileLinkStatus(linkNewViaLink).isSymlink());
+    readFile(linkNewViaLink);
+  }
+
+  @Test(timeout=10000)
+  /** Test rename a directory to a symlink to a directory */
+  public void testRenameDirToSymlinkToDir() throws IOException {
+    Path dir1      = new Path(testBaseDir1());
+    Path subDir = new Path(testBaseDir2(), "subDir");
+    Path linkToDir = new Path(testBaseDir2(), "linkToDir");
+    wrapper.mkdir(subDir, FileContext.DEFAULT_PERM, false);
+    wrapper.createSymlink(subDir, linkToDir, false);
+    try {
+      wrapper.rename(dir1, linkToDir, Rename.OVERWRITE);
+      fail("Renamed directory to a symlink");
+    } catch (IOException e) {
+      // Expected. Both must be directories.
+      assertTrue(unwrapException(e) instanceof IOException);
+    }
+    assertTrue(wrapper.exists(dir1));
+    assertTrue(wrapper.exists(linkToDir));
+  }
+
+  @Test(timeout=10000)
+  /** Test rename a directory to a symlink to a file */
+  public void testRenameDirToSymlinkToFile() throws IOException {
+    Path dir1 = new Path(testBaseDir1());
+    Path file = new Path(testBaseDir2(), "file");
+    Path linkToFile = new Path(testBaseDir2(), "linkToFile");
+    createAndWriteFile(file);
+    wrapper.createSymlink(file, linkToFile, false);
+    try {
+      wrapper.rename(dir1, linkToFile, Rename.OVERWRITE);
+      fail("Renamed directory to a symlink");
+    } catch (IOException e) {
+      // Expected. Both must be directories.
+      assertTrue(unwrapException(e) instanceof IOException);
+    }
+    assertTrue(wrapper.exists(dir1));
+    assertTrue(wrapper.exists(linkToFile));
+  }
+
+  @Test(timeout=10000)
+  /** Test rename a directory to a dangling symlink */
+  public void testRenameDirToDanglingSymlink() throws IOException {
+    Path dir = new Path(testBaseDir1());
+    Path link = new Path(testBaseDir2(), "linkToFile");
+    wrapper.createSymlink(new Path("/doesNotExist"), link, false);
+    try {
+      wrapper.rename(dir, link, Rename.OVERWRITE);
+      fail("Renamed directory to a symlink");
+    } catch (IOException e) {
+      // Expected. Both must be directories.
+      assertTrue(unwrapException(e) instanceof IOException);
+    }
+    assertTrue(wrapper.exists(dir));
+    assertTrue(wrapper.getFileLinkStatus(link) != null);
+  }
+
+  @Test(timeout=10000)
+  /** Test rename a file to a symlink to a directory */
+  public void testRenameFileToSymlinkToDir() throws IOException {
+    Path file   = new Path(testBaseDir1(), "file");
+    Path subDir = new Path(testBaseDir1(), "subDir");
+    Path link   = new Path(testBaseDir1(), "link");
+    wrapper.mkdir(subDir, FileContext.DEFAULT_PERM, false);
+    wrapper.createSymlink(subDir, link, false);
+    createAndWriteFile(file);
+    try {
+      wrapper.rename(file, link);
+      fail("Renamed file to symlink w/o overwrite");
+    } catch (IOException e) {
+      // Expected
+      assertTrue(unwrapException(e) instanceof FileAlreadyExistsException);
+    }
+    wrapper.rename(file, link, Rename.OVERWRITE);
+    assertFalse(wrapper.exists(file));
+    assertTrue(wrapper.exists(link));
+    assertTrue(wrapper.isFile(link));
+    assertFalse(wrapper.getFileLinkStatus(link).isSymlink());
+  }
+
+  @Test(timeout=10000)
+  /** Test rename a file to a symlink to a file */
+  public void testRenameFileToSymlinkToFile() throws IOException {
+    Path file1 = new Path(testBaseDir1(), "file1");
+    Path file2 = new Path(testBaseDir1(), "file2");
+    Path link = new Path(testBaseDir1(), "linkToFile");
+    createAndWriteFile(file1);
+    createAndWriteFile(file2);
+    wrapper.createSymlink(file2, link, false);
+    try {
+      wrapper.rename(file1, link);
+      fail("Renamed file to symlink w/o overwrite");
+    } catch (IOException e) {
+      // Expected
+      assertTrue(unwrapException(e) instanceof FileAlreadyExistsException);
+    }
+    wrapper.rename(file1, link, Rename.OVERWRITE);
+    assertFalse(wrapper.exists(file1));
+    assertTrue(wrapper.exists(link));
+    assertTrue(wrapper.isFile(link));
+    assertFalse(wrapper.getFileLinkStatus(link).isSymlink());
+  }
+
+  @Test(timeout=10000)
+  /** Test rename a file to a dangling symlink */
+  public void testRenameFileToDanglingSymlink() throws IOException {
+    /* NB: Local file system doesn't handle dangling links correctly
+     * since File.exists(danglinLink) returns false. */
+    if ("file".equals(getScheme())) {
+      return;
+    }
+    Path file1 = new Path(testBaseDir1(), "file1");
+    Path link = new Path(testBaseDir1(), "linkToFile");
+    createAndWriteFile(file1);
+    wrapper.createSymlink(new Path("/doesNotExist"), link, false);
+    try {
+      wrapper.rename(file1, link);
+    } catch (IOException e) {
+      // Expected
+    }
+    wrapper.rename(file1, link, Rename.OVERWRITE);
+    assertFalse(wrapper.exists(file1));
+    assertTrue(wrapper.exists(link));
+    assertTrue(wrapper.isFile(link));
+    assertFalse(wrapper.getFileLinkStatus(link).isSymlink());
+  }
+
+  @Test(timeout=10000)
+  /** Rename a symlink to a new non-existant name */
+  public void testRenameSymlinkNonExistantDest() throws IOException {
+    Path file  = new Path(testBaseDir1(), "file");
+    Path link1 = new Path(testBaseDir1(), "linkToFile1");
+    Path link2 = new Path(testBaseDir1(), "linkToFile2");
+    createAndWriteFile(file);
+    wrapper.createSymlink(file, link1, false);
+    wrapper.rename(link1, link2);
+    assertTrue(wrapper.getFileLinkStatus(link2).isSymlink());
+    readFile(link2);
+    readFile(file);
+    assertFalse(wrapper.exists(link1));
+  }
+
+  @Test(timeout=10000)
+  /** Rename a symlink to a file that exists */
+  public void testRenameSymlinkToExistingFile() throws IOException {
+    Path file1 = new Path(testBaseDir1(), "file");
+    Path file2 = new Path(testBaseDir1(), "someFile");
+    Path link = new Path(testBaseDir1(), "linkToFile");
+    createAndWriteFile(file1);
+    createAndWriteFile(file2);
+    wrapper.createSymlink(file2, link, false);
+    try {
+      wrapper.rename(link, file1);
+      fail("Renamed w/o passing overwrite");
+    } catch (IOException e) {
+      // Expected
+      assertTrue(unwrapException(e) instanceof FileAlreadyExistsException);
+    }
+    wrapper.rename(link, file1, Rename.OVERWRITE);
+    assertFalse(wrapper.exists(link));
+    assertTrue(wrapper.getFileLinkStatus(file1).isSymlink());
+    assertEquals(file2, wrapper.getLinkTarget(file1));
+  }
+
+  @Test(timeout=10000)
+  /** Rename a symlink to a directory that exists */
+  public void testRenameSymlinkToExistingDir() throws IOException {
+    Path dir1   = new Path(testBaseDir1());
+    Path dir2   = new Path(testBaseDir2());
+    Path subDir = new Path(testBaseDir2(), "subDir");
+    Path link   = new Path(testBaseDir1(), "linkToDir");
+    wrapper.createSymlink(dir1, link, false);
+    try {
+      wrapper.rename(link, dir2);
+      fail("Renamed link to a directory");
+    } catch (IOException e) {
+      // Expected. Both must be directories.
+      assertTrue(unwrapException(e) instanceof IOException);
+    }
+    try {
+      wrapper.rename(link, dir2, Rename.OVERWRITE);
+      fail("Renamed link to a directory");
+    } catch (IOException e) {
+      // Expected. Both must be directories.
+      assertTrue(unwrapException(e) instanceof IOException);
+    }
+    // Also fails when dir2 has a sub-directory
+    wrapper.mkdir(subDir, FsPermission.getDefault(), false);
+    try {
+      wrapper.rename(link, dir2, Rename.OVERWRITE);
+      fail("Renamed link to a directory");
+    } catch (IOException e) {
+      // Expected. Both must be directories.
+      assertTrue(unwrapException(e) instanceof IOException);
+    }
+  }
+
+  @Test(timeout=10000)
+  /** Rename a symlink to itself */
+  public void testRenameSymlinkToItself() throws IOException {
+    Path link = new Path(testBaseDir1(), "linkToFile1");
+    wrapper.createSymlink(new Path("/doestNotExist"), link, false);
+    try {
+      wrapper.rename(link, link);
+    } catch (IOException e) {
+      assertTrue(unwrapException(e) instanceof FileAlreadyExistsException);
+    }
+    // Fails with overwrite as well
+    try {
+      wrapper.rename(link, link, Rename.OVERWRITE);
+    } catch (IOException e) {
+      assertTrue(unwrapException(e) instanceof FileAlreadyExistsException);
+    }
+  }
+
+  @Test(timeout=10000)
+  /** Rename a symlink */
+  public void testRenameSymlink() throws IOException {
+    Path file  = new Path(testBaseDir1(), "file");
+    Path link1 = new Path(testBaseDir1(), "linkToFile1");
+    Path link2 = new Path(testBaseDir1(), "linkToFile2");
+    createAndWriteFile(file);
+    wrapper.createSymlink(file, link1, false);
+    wrapper.rename(link1, link2);
+    assertTrue(wrapper.getFileLinkStatus(link2).isSymlink());
+    assertFalse(wrapper.getFileStatus(link2).isDirectory());
+    readFile(link2);
+    readFile(file);
+    try {
+      createAndWriteFile(link2);
+      fail("link was not renamed");
+    } catch (IOException x) {
+      // Expected
+    }
+  }
+
+  @Test(timeout=10000)
+  /** Rename a symlink to the file it links to */
+  public void testRenameSymlinkToFileItLinksTo() throws IOException {
+    /* NB: The rename is not atomic, so file is deleted before renaming
+     * linkToFile. In this interval linkToFile is dangling and local file
+     * system does not handle dangling links because File.exists returns
+     * false for dangling links. */
+    if ("file".equals(getScheme())) {
+      return;
+    }
+    Path file = new Path(testBaseDir1(), "file");
+    Path link = new Path(testBaseDir1(), "linkToFile");
+    createAndWriteFile(file);
+    wrapper.createSymlink(file, link, false);
+    try {
+      wrapper.rename(link, file);
+      fail("Renamed symlink to its target");
+    } catch (IOException e) {
+      assertTrue(unwrapException(e) instanceof FileAlreadyExistsException);
+    }
+    // Check the rename didn't happen
+    assertTrue(wrapper.isFile(file));
+    assertTrue(wrapper.exists(link));
+    assertTrue(wrapper.isSymlink(link));
+    assertEquals(file, wrapper.getLinkTarget(link));
+    try {
+      wrapper.rename(link, file, Rename.OVERWRITE);
+      fail("Renamed symlink to its target");
+    } catch (IOException e) {
+      assertTrue(unwrapException(e) instanceof FileAlreadyExistsException);
+    }
+    // Check the rename didn't happen
+    assertTrue(wrapper.isFile(file));
+    assertTrue(wrapper.exists(link));
+    assertTrue(wrapper.isSymlink(link));
+    assertEquals(file, wrapper.getLinkTarget(link));
+  }
+
+  @Test(timeout=10000)
+  /** Rename a symlink to the directory it links to */
+  public void testRenameSymlinkToDirItLinksTo() throws IOException {
+    /* NB: The rename is not atomic, so dir is deleted before renaming
+     * linkToFile. In this interval linkToFile is dangling and local file
+     * system does not handle dangling links because File.exists returns
+     * false for dangling links. */
+    if ("file".equals(getScheme())) {
+      return;
+    }
+    Path dir  = new Path(testBaseDir1(), "dir");
+    Path link = new Path(testBaseDir1(), "linkToDir");
+    wrapper.mkdir(dir, FileContext.DEFAULT_PERM, false);
+    wrapper.createSymlink(dir, link, false);
+    try {
+      wrapper.rename(link, dir);
+      fail("Renamed symlink to its target");
+    } catch (IOException e) {
+      assertTrue(unwrapException(e) instanceof FileAlreadyExistsException);
+    }
+    // Check the rename didn't happen
+    assertTrue(wrapper.isDir(dir));
+    assertTrue(wrapper.exists(link));
+    assertTrue(wrapper.isSymlink(link));
+    assertEquals(dir, wrapper.getLinkTarget(link));
+    try {
+      wrapper.rename(link, dir, Rename.OVERWRITE);
+      fail("Renamed symlink to its target");
+    } catch (IOException e) {
+      assertTrue(unwrapException(e) instanceof FileAlreadyExistsException);
+    }
+    // Check the rename didn't happen
+    assertTrue(wrapper.isDir(dir));
+    assertTrue(wrapper.exists(link));
+    assertTrue(wrapper.isSymlink(link));
+    assertEquals(dir, wrapper.getLinkTarget(link));
+  }
+
+  @Test(timeout=10000)
+  /** Test rename the symlink's target */
+  public void testRenameLinkTarget() throws IOException {
+    Path file    = new Path(testBaseDir1(), "file");
+    Path fileNew = new Path(testBaseDir1(), "fileNew");
+    Path link    = new Path(testBaseDir1(), "linkToFile");
+    createAndWriteFile(file);
+    wrapper.createSymlink(file, link, false);
+    wrapper.rename(file, fileNew, Rename.OVERWRITE);
+    try {
+      readFile(link);
+      fail("Link should be dangling");
+    } catch (IOException x) {
+      // Expected
+    }
+    wrapper.rename(fileNew, file, Rename.OVERWRITE);
+    readFile(link);
+  }
+
+  @Test(timeout=10000)
+  /** Test rename a file to path with destination that has symlink parent */
+  public void testRenameFileWithDestParentSymlink() throws IOException {
+    Path link  = new Path(testBaseDir1(), "link");
+    Path file1 = new Path(testBaseDir1(), "file1");
+    Path file2 = new Path(testBaseDir1(), "file2");
+    Path file3 = new Path(link, "file3");
+    Path dir2  = new Path(testBaseDir2());
+
+    // Renaming /dir1/file1 to non-existant file /dir1/link/file3 is OK
+    // if link points to a directory...
+    wrapper.createSymlink(dir2, link, false);
+    createAndWriteFile(file1);
+    wrapper.rename(file1, file3);
+    assertFalse(wrapper.exists(file1));
+    assertTrue(wrapper.exists(file3));
+    wrapper.rename(file3, file1);
+
+    // But fails if link is dangling...
+    wrapper.delete(link, false);
+    wrapper.createSymlink(file2, link, false);
+    try {
+      wrapper.rename(file1, file3);
+    } catch (IOException e) {
+      // Expected
+      assertTrue(unwrapException(e) instanceof FileNotFoundException);
+    }
+
+    // And if link points to a file...
+    createAndWriteFile(file2);
+    try {
+      wrapper.rename(file1, file3);
+    } catch (IOException e) {
+      // Expected
+      assertTrue(unwrapException(e) instanceof ParentNotDirectoryException);
+    }
+  }
+
+  @Test(timeout=10000)
+  /**
+   * Create, write, read, append, rename, get the block locations,
+   * checksums, and delete a file using a path with a symlink as an
+   * intermediate path component where the link target was specified
+   * using an absolute path. Rename is covered in more depth below.
+   */
+  public void testAccessFileViaInterSymlinkAbsTarget() throws IOException {
+    Path baseDir        = new Path(testBaseDir1());
+    Path file           = new Path(testBaseDir1(), "file");
+    Path fileNew        = new Path(baseDir, "fileNew");
+    Path linkToDir      = new Path(testBaseDir2(), "linkToDir");
+    Path fileViaLink    = new Path(linkToDir, "file");
+    Path fileNewViaLink = new Path(linkToDir, "fileNew");
+    wrapper.createSymlink(baseDir, linkToDir, false);
+    createAndWriteFile(fileViaLink);
+    assertTrue(wrapper.exists(fileViaLink));
+    assertTrue(wrapper.isFile(fileViaLink));
+    assertFalse(wrapper.isDir(fileViaLink));
+    assertFalse(wrapper.getFileLinkStatus(fileViaLink).isSymlink());
+    assertFalse(wrapper.isDir(fileViaLink));
+    assertEquals(wrapper.getFileStatus(file),
+                 wrapper.getFileLinkStatus(file));
+    assertEquals(wrapper.getFileStatus(fileViaLink),
+                 wrapper.getFileLinkStatus(fileViaLink));
+    readFile(fileViaLink);
+    appendToFile(fileViaLink);
+    wrapper.rename(fileViaLink, fileNewViaLink);
+    assertFalse(wrapper.exists(fileViaLink));
+    assertTrue(wrapper.exists(fileNewViaLink));
+    readFile(fileNewViaLink);
+    assertEquals(wrapper.getFileBlockLocations(fileNew, 0, 1).length,
+                 wrapper.getFileBlockLocations(fileNewViaLink, 0, 1).length);
+    assertEquals(wrapper.getFileChecksum(fileNew),
+                 wrapper.getFileChecksum(fileNewViaLink));
+    wrapper.delete(fileNewViaLink, true);
+    assertFalse(wrapper.exists(fileNewViaLink));
+  }
+
+  @Test(timeout=10000)
+  /**
+   * Operate on a file using a path with an intermediate symlink where
+   * the link target was specified as a fully qualified path.
+   */
+  public void testAccessFileViaInterSymlinkQualTarget() throws IOException {
+    Path baseDir        = new Path(testBaseDir1());
+    Path file           = new Path(testBaseDir1(), "file");
+    Path linkToDir      = new Path(testBaseDir2(), "linkToDir");
+    Path fileViaLink    = new Path(linkToDir, "file");
+    wrapper.createSymlink(wrapper.makeQualified(baseDir), linkToDir, false);
+    createAndWriteFile(fileViaLink);
+    assertEquals(wrapper.getFileStatus(file),
+                 wrapper.getFileLinkStatus(file));
+    assertEquals(wrapper.getFileStatus(fileViaLink),
+                 wrapper.getFileLinkStatus(fileViaLink));
+    readFile(fileViaLink);
+  }
+
+  @Test(timeout=10000)
+  /**
+   * Operate on a file using a path with an intermediate symlink where
+   * the link target was specified as a relative path.
+   */
+  public void testAccessFileViaInterSymlinkRelTarget() throws IOException {
+    assumeTrue(!"file".equals(getScheme()));
+    Path dir         = new Path(testBaseDir1(), "dir");
+    Path file        = new Path(dir, "file");
+    Path linkToDir   = new Path(testBaseDir1(), "linkToDir");
+    Path fileViaLink = new Path(linkToDir, "file");
+
+    wrapper.mkdir(dir, FileContext.DEFAULT_PERM, false);
+    wrapper.createSymlink(new Path("dir"), linkToDir, false);
+    createAndWriteFile(fileViaLink);
+    // Note that getFileStatus returns fully qualified paths even
+    // when called on an absolute path.
+    assertEquals(wrapper.makeQualified(file),
+                 wrapper.getFileStatus(file).getPath());
+    // In each case getFileLinkStatus returns the same FileStatus
+    // as getFileStatus since we're not calling it on a link and
+    // FileStatus objects are compared by Path.
+    assertEquals(wrapper.getFileStatus(file),
+                 wrapper.getFileLinkStatus(file));
+    assertEquals(wrapper.getFileStatus(fileViaLink),
+                 wrapper.getFileLinkStatus(fileViaLink));
+    assertEquals(wrapper.getFileStatus(fileViaLink),
+                 wrapper.getFileLinkStatus(file));
+  }
+
+  @Test(timeout=10000)
+  /** Test create, list, and delete a directory through a symlink */
+  public void testAccessDirViaSymlink() throws IOException {
+    Path baseDir    = new Path(testBaseDir1());
+    Path dir        = new Path(testBaseDir1(), "dir");
+    Path linkToDir  = new Path(testBaseDir2(), "linkToDir");
+    Path dirViaLink = new Path(linkToDir, "dir");
+    wrapper.createSymlink(baseDir, linkToDir, false);
+    wrapper.mkdir(dirViaLink, FileContext.DEFAULT_PERM, true);
+    assertTrue(wrapper.getFileStatus(dirViaLink).isDirectory());
+    FileStatus[] stats = wrapper.listStatus(dirViaLink);
+    assertEquals(0, stats.length);
+    RemoteIterator<FileStatus> statsItor = wrapper.listStatusIterator(dirViaLink);
+    assertFalse(statsItor.hasNext());
+    wrapper.delete(dirViaLink, false);
+    assertFalse(wrapper.exists(dirViaLink));
+    assertFalse(wrapper.exists(dir));
+  }
+
+  @Test(timeout=10000)
+  /** setTimes affects the target not the link */
+  public void testSetTimes() throws IOException {
+    Path file = new Path(testBaseDir1(), "file");
+    Path link = new Path(testBaseDir1(), "linkToFile");
+    createAndWriteFile(file);
+    wrapper.createSymlink(file, link, false);
+    long at = wrapper.getFileLinkStatus(link).getAccessTime();
+    wrapper.setTimes(link, 2L, 3L);
+    // NB: local file systems don't implement setTimes
+    if (!"file".equals(getScheme())) {
+      assertEquals(at, wrapper.getFileLinkStatus(link).getAccessTime());
+      assertEquals(3, wrapper.getFileStatus(file).getAccessTime());
+      assertEquals(2, wrapper.getFileStatus(file).getModificationTime());
+    }
+  }
+}

+ 1 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java

@@ -209,6 +209,7 @@ public class TestFilterFileSystem {
     public String getScheme() {
     public String getScheme() {
       return "dontcheck";
       return "dontcheck";
     }
     }
+    public Path fixRelativePart(Path p) { return null; }
   }
   }
   
   
   @Test
   @Test

+ 82 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java

@@ -417,6 +417,88 @@ public class TestLocalFileSystem {
       stm.close();
       stm.close();
     }
     }
   }
   }
+
+  /**
+   * Tests a simple rename of a directory.
+   */
+  @Test
+  public void testRenameDirectory() throws IOException {
+    Path src = new Path(TEST_ROOT_DIR, "dir1");
+    Path dst = new Path(TEST_ROOT_DIR, "dir2");
+    fileSys.delete(src, true);
+    fileSys.delete(dst, true);
+    assertTrue(fileSys.mkdirs(src));
+    assertTrue(fileSys.rename(src, dst));
+    assertTrue(fileSys.exists(dst));
+    assertFalse(fileSys.exists(src));
+  }
+
+  /**
+   * Tests that renaming a directory replaces the destination if the destination
+   * is an existing empty directory.
+   * 
+   * Before:
+   *   /dir1
+   *     /file1
+   *     /file2
+   *   /dir2
+   * 
+   * After rename("/dir1", "/dir2"):
+   *   /dir2
+   *     /file1
+   *     /file2
+   */
+  @Test
+  public void testRenameReplaceExistingEmptyDirectory() throws IOException {
+    Path src = new Path(TEST_ROOT_DIR, "dir1");
+    Path dst = new Path(TEST_ROOT_DIR, "dir2");
+    fileSys.delete(src, true);
+    fileSys.delete(dst, true);
+    assertTrue(fileSys.mkdirs(src));
+    writeFile(fileSys, new Path(src, "file1"), 1);
+    writeFile(fileSys, new Path(src, "file2"), 1);
+    assertTrue(fileSys.mkdirs(dst));
+    assertTrue(fileSys.rename(src, dst));
+    assertTrue(fileSys.exists(dst));
+    assertTrue(fileSys.exists(new Path(dst, "file1")));
+    assertTrue(fileSys.exists(new Path(dst, "file2")));
+    assertFalse(fileSys.exists(src));
+  }
+
+  /**
+   * Tests that renaming a directory to an existing directory that is not empty
+   * results in a full copy of source to destination.
+   * 
+   * Before:
+   *   /dir1
+   *     /dir2
+   *       /dir3
+   *         /file1
+   *         /file2
+   * 
+   * After rename("/dir1/dir2/dir3", "/dir1"):
+   *   /dir1
+   *     /dir3
+   *       /file1
+   *       /file2
+   */
+  @Test
+  public void testRenameMoveToExistingNonEmptyDirectory() throws IOException {
+    Path src = new Path(TEST_ROOT_DIR, "dir1/dir2/dir3");
+    Path dst = new Path(TEST_ROOT_DIR, "dir1");
+    fileSys.delete(src, true);
+    fileSys.delete(dst, true);
+    assertTrue(fileSys.mkdirs(src));
+    writeFile(fileSys, new Path(src, "file1"), 1);
+    writeFile(fileSys, new Path(src, "file2"), 1);
+    assertTrue(fileSys.exists(dst));
+    assertTrue(fileSys.rename(src, dst));
+    assertTrue(fileSys.exists(dst));
+    assertTrue(fileSys.exists(new Path(dst, "dir3")));
+    assertTrue(fileSys.exists(new Path(dst, "dir3/file1")));
+    assertTrue(fileSys.exists(new Path(dst, "dir3/file2")));
+    assertFalse(fileSys.exists(src));
+  }
   
   
   private void verifyRead(FSDataInputStream stm, byte[] fileContents,
   private void verifyRead(FSDataInputStream stm, byte[] fileContents,
        int seekOff, int toRead) throws IOException {
        int seekOff, int toRead) throws IOException {

+ 1 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestFailoverProxy.java

@@ -78,7 +78,7 @@ public class TestFailoverProxy {
 
 
     @Override
     @Override
     public RetryAction shouldRetry(Exception e, int retries, int failovers,
     public RetryAction shouldRetry(Exception e, int retries, int failovers,
-        boolean isMethodIdempotent) {
+        boolean isIdempotentOrAtMostOnce) {
       return failovers < 1 ? RetryAction.FAILOVER_AND_RETRY : RetryAction.FAIL;
       return failovers < 1 ? RetryAction.FAILOVER_AND_RETRY : RetryAction.FAIL;
     }
     }
     
     

+ 34 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestRetryProxy.java

@@ -35,6 +35,7 @@ import junit.framework.TestCase;
 
 
 import org.apache.hadoop.io.retry.UnreliableInterface.FatalException;
 import org.apache.hadoop.io.retry.UnreliableInterface.FatalException;
 import org.apache.hadoop.io.retry.UnreliableInterface.UnreliableException;
 import org.apache.hadoop.io.retry.UnreliableInterface.UnreliableException;
+import org.apache.hadoop.ipc.ProtocolTranslator;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.RemoteException;
 
 
 public class TestRetryProxy extends TestCase {
 public class TestRetryProxy extends TestCase {
@@ -58,6 +59,38 @@ public class TestRetryProxy extends TestCase {
     }
     }
   }
   }
   
   
+  /**
+   * Test for {@link RetryInvocationHandler#isRpcInvocation(Object)}
+   */
+  public void testRpcInvocation() throws Exception {
+    // For a proxy method should return true
+    final UnreliableInterface unreliable = (UnreliableInterface)
+      RetryProxy.create(UnreliableInterface.class, unreliableImpl, RETRY_FOREVER);
+    assertTrue(RetryInvocationHandler.isRpcInvocation(unreliable));
+    
+    // Embed the proxy in ProtocolTranslator
+    ProtocolTranslator xlator = new ProtocolTranslator() {
+      int count = 0;
+      @Override
+      public Object getUnderlyingProxyObject() {
+        count++;
+        return unreliable;
+      }
+      @Override
+      public String toString() {
+        return "" + count;
+      }
+    };
+    
+    // For a proxy wrapped in ProtocolTranslator method should return true
+    assertTrue(RetryInvocationHandler.isRpcInvocation(xlator));
+    // Ensure underlying proxy was looked at
+    assertEquals(xlator.toString(), "1");
+    
+    // For non-proxy the method must return false
+    assertFalse(RetryInvocationHandler.isRpcInvocation(new Object()));
+  }
+  
   public void testRetryForever() throws UnreliableException {
   public void testRetryForever() throws UnreliableException {
     UnreliableInterface unreliable = (UnreliableInterface)
     UnreliableInterface unreliable = (UnreliableInterface)
       RetryProxy.create(UnreliableInterface.class, unreliableImpl, RETRY_FOREVER);
       RetryProxy.create(UnreliableInterface.class, unreliableImpl, RETRY_FOREVER);
@@ -138,7 +171,7 @@ public class TestRetryProxy extends TestCase {
     }
     }
   }
   }
   
   
-  public void testRetryByRemoteException() throws UnreliableException {
+  public void testRetryByRemoteException() {
     Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap =
     Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap =
       Collections.<Class<? extends Exception>, RetryPolicy>singletonMap(FatalException.class, TRY_ONCE_THEN_FAIL);
       Collections.<Class<? extends Exception>, RetryPolicy>singletonMap(FatalException.class, TRY_ONCE_THEN_FAIL);
     
     

+ 3 - 3
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/MiniRPCBenchmark.java

@@ -189,7 +189,7 @@ public class MiniRPCBenchmark {
     MiniProtocol client = null;
     MiniProtocol client = null;
     try {
     try {
       long start = Time.now();
       long start = Time.now();
-      client = (MiniProtocol) RPC.getProxy(MiniProtocol.class,
+      client = RPC.getProxy(MiniProtocol.class,
           MiniProtocol.versionID, addr, conf);
           MiniProtocol.versionID, addr, conf);
       long end = Time.now();
       long end = Time.now();
       return end - start;
       return end - start;
@@ -211,7 +211,7 @@ public class MiniRPCBenchmark {
         client =  proxyUserUgi.doAs(new PrivilegedExceptionAction<MiniProtocol>() {
         client =  proxyUserUgi.doAs(new PrivilegedExceptionAction<MiniProtocol>() {
           @Override
           @Override
           public MiniProtocol run() throws IOException {
           public MiniProtocol run() throws IOException {
-            MiniProtocol p = (MiniProtocol) RPC.getProxy(MiniProtocol.class,
+            MiniProtocol p = RPC.getProxy(MiniProtocol.class,
                 MiniProtocol.versionID, addr, conf);
                 MiniProtocol.versionID, addr, conf);
             Token<TestDelegationTokenIdentifier> token;
             Token<TestDelegationTokenIdentifier> token;
             token = p.getDelegationToken(new Text(RENEWER));
             token = p.getDelegationToken(new Text(RENEWER));
@@ -239,7 +239,7 @@ public class MiniRPCBenchmark {
         client = currentUgi.doAs(new PrivilegedExceptionAction<MiniProtocol>() {
         client = currentUgi.doAs(new PrivilegedExceptionAction<MiniProtocol>() {
           @Override
           @Override
           public MiniProtocol run() throws IOException {
           public MiniProtocol run() throws IOException {
-            return (MiniProtocol) RPC.getProxy(MiniProtocol.class,
+            return RPC.getProxy(MiniProtocol.class,
                 MiniProtocol.versionID, addr, conf);
                 MiniProtocol.versionID, addr, conf);
           }
           }
         });
         });

+ 2 - 3
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/RPCCallBenchmark.java

@@ -31,7 +31,6 @@ import org.apache.commons.cli.HelpFormatter;
 import org.apache.commons.cli.OptionBuilder;
 import org.apache.commons.cli.OptionBuilder;
 import org.apache.commons.cli.Options;
 import org.apache.commons.cli.Options;
 import org.apache.commons.cli.ParseException;
 import org.apache.commons.cli.ParseException;
-import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.ipc.RPC.Server;
 import org.apache.hadoop.ipc.RPC.Server;
@@ -55,7 +54,7 @@ import com.google.protobuf.BlockingService;
  * Benchmark for protobuf RPC.
  * Benchmark for protobuf RPC.
  * Run with --help option for usage.
  * Run with --help option for usage.
  */
  */
-public class RPCCallBenchmark implements Tool, Configurable {
+public class RPCCallBenchmark implements Tool {
   private Configuration conf;
   private Configuration conf;
   private AtomicLong callCount = new AtomicLong(0);
   private AtomicLong callCount = new AtomicLong(0);
   private static ThreadMXBean threadBean =
   private static ThreadMXBean threadBean =
@@ -403,7 +402,7 @@ public class RPCCallBenchmark implements Tool, Configurable {
         }
         }
       };
       };
     } else if (opts.rpcEngine == WritableRpcEngine.class) {
     } else if (opts.rpcEngine == WritableRpcEngine.class) {
-      final TestProtocol proxy = (TestProtocol)RPC.getProxy(
+      final TestProtocol proxy = RPC.getProxy(
           TestProtocol.class, TestProtocol.versionID, addr, conf);
           TestProtocol.class, TestProtocol.versionID, addr, conf);
       return new RpcServiceWrapper() {
       return new RpcServiceWrapper() {
         @Override
         @Override

+ 315 - 38
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java

@@ -18,37 +18,55 @@
 
 
 package org.apache.hadoop.ipc;
 package org.apache.hadoop.ipc;
 
 
-import org.apache.commons.logging.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+import static org.mockito.Matchers.anyInt;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.spy;
 
 
-import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.io.IntWritable;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.LongWritable;
-import org.apache.hadoop.ipc.Server.Connection;
-import org.apache.hadoop.util.StringUtils;
-import org.apache.hadoop.net.ConnectTimeoutException;
-import org.apache.hadoop.net.NetUtils;
-
-import java.util.Random;
 import java.io.ByteArrayOutputStream;
 import java.io.ByteArrayOutputStream;
 import java.io.DataInput;
 import java.io.DataInput;
-import java.io.File;
 import java.io.DataOutput;
 import java.io.DataOutput;
+import java.io.File;
 import java.io.IOException;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.InputStream;
 import java.io.OutputStream;
 import java.io.OutputStream;
+import java.lang.reflect.Method;
+import java.lang.reflect.Proxy;
 import java.net.InetSocketAddress;
 import java.net.InetSocketAddress;
 import java.net.Socket;
 import java.net.Socket;
 import java.net.SocketTimeoutException;
 import java.net.SocketTimeoutException;
-import javax.net.SocketFactory;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Random;
 
 
-import org.junit.Test;
-import static org.junit.Assert.*;
-import static org.mockito.Mockito.*;
+import javax.net.SocketFactory;
 
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.IntWritable;
+import org.apache.hadoop.io.LongWritable;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.retry.RetryPolicies;
+import org.apache.hadoop.io.retry.RetryProxy;
+import org.apache.hadoop.ipc.Client.ConnectionId;
+import org.apache.hadoop.ipc.RPC.RpcKind;
+import org.apache.hadoop.ipc.Server.Connection;
+import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto;
+import org.apache.hadoop.net.ConnectTimeoutException;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.util.StringUtils;
+import org.junit.Assert;
 import org.junit.Assume;
 import org.junit.Assume;
+import org.junit.Test;
 import org.mockito.Mockito;
 import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
 import org.mockito.stubbing.Answer;
@@ -83,6 +101,10 @@ public class TestIPC {
   private static final File FD_DIR = new File("/proc/self/fd");
   private static final File FD_DIR = new File("/proc/self/fd");
 
 
   private static class TestServer extends Server {
   private static class TestServer extends Server {
+    // Tests can set callListener to run a piece of code each time the server
+    // receives a call.  This code executes on the server thread, so it has
+    // visibility of that thread's thread-local storage.
+    private Runnable callListener;
     private boolean sleep;
     private boolean sleep;
     private Class<? extends Writable> responseClass;
     private Class<? extends Writable> responseClass;
 
 
@@ -108,6 +130,9 @@ public class TestIPC {
           Thread.sleep(RANDOM.nextInt(PING_INTERVAL) + MIN_SLEEP_TIME);
           Thread.sleep(RANDOM.nextInt(PING_INTERVAL) + MIN_SLEEP_TIME);
         } catch (InterruptedException e) {}
         } catch (InterruptedException e) {}
       }
       }
+      if (callListener != null) {
+        callListener.run();
+      }
       if (responseClass != null) {
       if (responseClass != null) {
         try {
         try {
           return responseClass.newInstance();
           return responseClass.newInstance();
@@ -152,15 +177,54 @@ public class TestIPC {
     }
     }
   }
   }
 
 
+  /**
+   * A RpcInvocationHandler instance for test. Its invoke function uses the same
+   * {@link Client} instance, and will fail the first totalRetry times (by 
+   * throwing an IOException).
+   */
+  private static class TestInvocationHandler implements RpcInvocationHandler {
+    private static int retry = 0;
+    private final Client client;
+    private final Server server;
+    private final int total;
+    
+    TestInvocationHandler(Client client, Server server, int total) {
+      this.client = client;
+      this.server = server;
+      this.total = total;
+    }
+    
+    @Override
+    public Object invoke(Object proxy, Method method, Object[] args)
+        throws Throwable {
+      LongWritable param = new LongWritable(RANDOM.nextLong());
+      LongWritable value = (LongWritable) client.call(param,
+          NetUtils.getConnectAddress(server), null, null, 0, conf);
+      if (retry++ < total) {
+        throw new IOException("Fake IOException");
+      } else {
+        return value;
+      }
+    }
+
+    @Override
+    public void close() throws IOException {}
+    
+    @Override
+    public ConnectionId getConnectionId() {
+      return null;
+    }
+  }
+  
   @Test
   @Test
-  public void testSerial() throws Exception {
+  public void testSerial() throws IOException, InterruptedException {
     testSerial(3, false, 2, 5, 100);
     testSerial(3, false, 2, 5, 100);
     testSerial(3, true, 2, 5, 10);
     testSerial(3, true, 2, 5, 10);
   }
   }
 
 
   public void testSerial(int handlerCount, boolean handlerSleep, 
   public void testSerial(int handlerCount, boolean handlerSleep, 
                          int clientCount, int callerCount, int callCount)
                          int clientCount, int callerCount, int callCount)
-    throws Exception {
+    throws IOException, InterruptedException {
     Server server = new TestServer(handlerCount, handlerSleep);
     Server server = new TestServer(handlerCount, handlerSleep);
     InetSocketAddress addr = NetUtils.getConnectAddress(server);
     InetSocketAddress addr = NetUtils.getConnectAddress(server);
     server.start();
     server.start();
@@ -186,7 +250,7 @@ public class TestIPC {
   }
   }
 	
 	
   @Test
   @Test
-  public void testStandAloneClient() throws Exception {
+  public void testStandAloneClient() throws IOException {
     Client client = new Client(LongWritable.class, conf);
     Client client = new Client(LongWritable.class, conf);
     InetSocketAddress address = new InetSocketAddress("127.0.0.1", 10);
     InetSocketAddress address = new InetSocketAddress("127.0.0.1", 10);
     try {
     try {
@@ -286,7 +350,8 @@ public class TestIPC {
       Class<? extends LongWritable> clientParamClass,
       Class<? extends LongWritable> clientParamClass,
       Class<? extends LongWritable> serverParamClass,
       Class<? extends LongWritable> serverParamClass,
       Class<? extends LongWritable> serverResponseClass,
       Class<? extends LongWritable> serverResponseClass,
-      Class<? extends LongWritable> clientResponseClass) throws Exception {
+      Class<? extends LongWritable> clientResponseClass) 
+      throws IOException, InstantiationException, IllegalAccessException {
     
     
     // start server
     // start server
     Server server = new TestServer(1, false,
     Server server = new TestServer(1, false,
@@ -417,7 +482,7 @@ public class TestIPC {
    * to the client.
    * to the client.
    */
    */
   @Test
   @Test
-  public void testSocketFactoryException() throws Exception {
+  public void testSocketFactoryException() throws IOException {
     SocketFactory mockFactory = mock(SocketFactory.class);
     SocketFactory mockFactory = mock(SocketFactory.class);
     doThrow(new IOException("Injected fault")).when(mockFactory).createSocket();
     doThrow(new IOException("Injected fault")).when(mockFactory).createSocket();
     Client client = new Client(LongWritable.class, conf, mockFactory);
     Client client = new Client(LongWritable.class, conf, mockFactory);
@@ -439,7 +504,7 @@ public class TestIPC {
    * HADOOP-7428.
    * HADOOP-7428.
    */
    */
   @Test
   @Test
-  public void testRTEDuringConnectionSetup() throws Exception {
+  public void testRTEDuringConnectionSetup() throws IOException {
     // Set up a socket factory which returns sockets which
     // Set up a socket factory which returns sockets which
     // throw an RTE when setSoTimeout is called.
     // throw an RTE when setSoTimeout is called.
     SocketFactory spyFactory = spy(NetUtils.getDefaultSocketFactory(conf));
     SocketFactory spyFactory = spy(NetUtils.getDefaultSocketFactory(conf));
@@ -480,7 +545,7 @@ public class TestIPC {
   }
   }
   
   
   @Test
   @Test
-  public void testIpcTimeout() throws Exception {
+  public void testIpcTimeout() throws IOException {
     // start server
     // start server
     Server server = new TestServer(1, true);
     Server server = new TestServer(1, true);
     InetSocketAddress addr = NetUtils.getConnectAddress(server);
     InetSocketAddress addr = NetUtils.getConnectAddress(server);
@@ -502,7 +567,7 @@ public class TestIPC {
   }
   }
 
 
   @Test
   @Test
-  public void testIpcConnectTimeout() throws Exception {
+  public void testIpcConnectTimeout() throws IOException {
     // start server
     // start server
     Server server = new TestServer(1, true);
     Server server = new TestServer(1, true);
     InetSocketAddress addr = NetUtils.getConnectAddress(server);
     InetSocketAddress addr = NetUtils.getConnectAddress(server);
@@ -525,7 +590,7 @@ public class TestIPC {
    * Check service class byte in IPC header is correct on wire.
    * Check service class byte in IPC header is correct on wire.
    */
    */
   @Test(timeout=60000)
   @Test(timeout=60000)
-  public void testIpcWithServiceClass() throws Exception {
+  public void testIpcWithServiceClass() throws IOException {
     // start server
     // start server
     Server server = new TestServer(5, false);
     Server server = new TestServer(5, false);
     InetSocketAddress addr = NetUtils.getConnectAddress(server);
     InetSocketAddress addr = NetUtils.getConnectAddress(server);
@@ -552,7 +617,7 @@ public class TestIPC {
    * Make a call from a client and verify if header info is changed in server side
    * Make a call from a client and verify if header info is changed in server side
    */
    */
   private void callAndVerify(Server server, InetSocketAddress addr,
   private void callAndVerify(Server server, InetSocketAddress addr,
-      int serviceClass, boolean noChanged) throws Exception{
+      int serviceClass, boolean noChanged) throws IOException{
     Client client = new Client(LongWritable.class, conf);
     Client client = new Client(LongWritable.class, conf);
 
 
     client.call(new LongWritable(RANDOM.nextLong()),
     client.call(new LongWritable(RANDOM.nextLong()),
@@ -568,7 +633,7 @@ public class TestIPC {
    * and stopping IPC servers.
    * and stopping IPC servers.
    */
    */
   @Test(timeout=60000)
   @Test(timeout=60000)
-  public void testSocketLeak() throws Exception {
+  public void testSocketLeak() throws IOException {
     Assume.assumeTrue(FD_DIR.exists()); // only run on Linux
     Assume.assumeTrue(FD_DIR.exists()); // only run on Linux
 
 
     long startFds = countOpenFileDescriptors();
     long startFds = countOpenFileDescriptors();
@@ -588,31 +653,31 @@ public class TestIPC {
   }
   }
 
 
   @Test
   @Test
-  public void testIpcFromHadoop_0_18_13() throws Exception {
+  public void testIpcFromHadoop_0_18_13() throws IOException {
     doIpcVersionTest(NetworkTraces.HADOOP_0_18_3_RPC_DUMP,
     doIpcVersionTest(NetworkTraces.HADOOP_0_18_3_RPC_DUMP,
         NetworkTraces.RESPONSE_TO_HADOOP_0_18_3_RPC);
         NetworkTraces.RESPONSE_TO_HADOOP_0_18_3_RPC);
   }
   }
   
   
   @Test
   @Test
-  public void testIpcFromHadoop0_20_3() throws Exception {
+  public void testIpcFromHadoop0_20_3() throws IOException {
     doIpcVersionTest(NetworkTraces.HADOOP_0_20_3_RPC_DUMP,
     doIpcVersionTest(NetworkTraces.HADOOP_0_20_3_RPC_DUMP,
         NetworkTraces.RESPONSE_TO_HADOOP_0_20_3_RPC);
         NetworkTraces.RESPONSE_TO_HADOOP_0_20_3_RPC);
   }
   }
   
   
   @Test
   @Test
-  public void testIpcFromHadoop0_21_0() throws Exception {
+  public void testIpcFromHadoop0_21_0() throws IOException {
     doIpcVersionTest(NetworkTraces.HADOOP_0_21_0_RPC_DUMP,
     doIpcVersionTest(NetworkTraces.HADOOP_0_21_0_RPC_DUMP,
         NetworkTraces.RESPONSE_TO_HADOOP_0_21_0_RPC);
         NetworkTraces.RESPONSE_TO_HADOOP_0_21_0_RPC);
   }
   }
   
   
   @Test
   @Test
-  public void testHttpGetResponse() throws Exception {
+  public void testHttpGetResponse() throws IOException {
     doIpcVersionTest("GET / HTTP/1.0\r\n\r\n".getBytes(),
     doIpcVersionTest("GET / HTTP/1.0\r\n\r\n".getBytes(),
         Server.RECEIVED_HTTP_REQ_RESPONSE.getBytes());
         Server.RECEIVED_HTTP_REQ_RESPONSE.getBytes());
   }
   }
   
   
   @Test
   @Test
-  public void testConnectionRetriesOnSocketTimeoutExceptions() throws Exception {
+  public void testConnectionRetriesOnSocketTimeoutExceptions() throws IOException {
     Configuration conf = new Configuration();
     Configuration conf = new Configuration();
     // set max retries to 0
     // set max retries to 0
     conf.setInt(
     conf.setInt(
@@ -627,8 +692,220 @@ public class TestIPC {
     assertRetriesOnSocketTimeouts(conf, 4);
     assertRetriesOnSocketTimeouts(conf, 4);
   }
   }
 
 
+  private static class CallInfo {
+    int id = RpcConstants.INVALID_CALL_ID;
+    int retry = RpcConstants.INVALID_RETRY_COUNT;
+  }
+
+  /**
+   * Test if
+   * (1) the rpc server uses the call id/retry provided by the rpc client, and
+   * (2) the rpc client receives the same call id/retry from the rpc server.
+   */
+  @Test
+  public void testCallIdAndRetry() throws IOException {
+    final CallInfo info = new CallInfo();
+
+    // Override client to store the call info and check response
+    final Client client = new Client(LongWritable.class, conf) {
+      @Override
+      Call createCall(RpcKind rpcKind, Writable rpcRequest) {
+        final Call call = super.createCall(rpcKind, rpcRequest);
+        info.id = call.id;
+        info.retry = call.retry;
+        return call;
+      }
+      
+      @Override
+      void checkResponse(RpcResponseHeaderProto header) throws IOException {
+        super.checkResponse(header);
+        Assert.assertEquals(info.id, header.getCallId());
+        Assert.assertEquals(info.retry, header.getRetryCount());
+      }
+    };
+
+    // Attach a listener that tracks every call received by the server.
+    final TestServer server = new TestServer(1, false);
+    server.callListener = new Runnable() {
+      @Override
+      public void run() {
+        Assert.assertEquals(info.id, Server.getCallId());
+        Assert.assertEquals(info.retry, Server.getCallRetryCount());
+      }
+    };
+
+    try {
+      InetSocketAddress addr = NetUtils.getConnectAddress(server);
+      server.start();
+      final SerialCaller caller = new SerialCaller(client, addr, 10);
+      caller.run();
+      assertFalse(caller.failed);
+    } finally {
+      client.stop();
+      server.stop();
+    }
+  }
+  
+  /** A dummy protocol */
+  private interface DummyProtocol {
+    public void dummyRun();
+  }
+  
+  /**
+   * Test the retry count while used in a retry proxy.
+   */
+  @Test
+  public void testRetryProxy() throws IOException {
+    final Client client = new Client(LongWritable.class, conf);
+    
+    final TestServer server = new TestServer(1, false);
+    server.callListener = new Runnable() {
+      private int retryCount = 0;
+      @Override
+      public void run() {
+        Assert.assertEquals(retryCount++, Server.getCallRetryCount());
+      }
+    };
+
+    final int totalRetry = 256;
+    DummyProtocol proxy = (DummyProtocol) Proxy.newProxyInstance(
+        DummyProtocol.class.getClassLoader(),
+        new Class[] { DummyProtocol.class }, new TestInvocationHandler(client,
+            server, totalRetry));
+    DummyProtocol retryProxy = (DummyProtocol) RetryProxy.create(
+        DummyProtocol.class, proxy, RetryPolicies.RETRY_FOREVER);
+    
+    try {
+      server.start();
+      retryProxy.dummyRun();
+      Assert.assertEquals(TestInvocationHandler.retry, totalRetry + 1);
+    } finally {
+      Client.setCallIdAndRetryCount(0, 0);
+      client.stop();
+      server.stop();
+    }
+  }
+  
+  /**
+   * Test if the rpc server gets the default retry count (0) from client.
+   */
+  @Test
+  public void testInitialCallRetryCount() throws IOException {
+    // Override client to store the call id
+    final Client client = new Client(LongWritable.class, conf);
+
+    // Attach a listener that tracks every call ID received by the server.
+    final TestServer server = new TestServer(1, false);
+    server.callListener = new Runnable() {
+      @Override
+      public void run() {
+        // we have not set the retry count for the client, thus on the server
+        // side we should see retry count as 0
+        Assert.assertEquals(0, Server.getCallRetryCount());
+      }
+    };
+
+    try {
+      InetSocketAddress addr = NetUtils.getConnectAddress(server);
+      server.start();
+      final SerialCaller caller = new SerialCaller(client, addr, 10);
+      caller.run();
+      assertFalse(caller.failed);
+    } finally {
+      client.stop();
+      server.stop();
+    }
+  }
+  
+  /**
+   * Test if the rpc server gets the retry count from client.
+   */
+  @Test
+  public void testCallRetryCount() throws IOException {
+    final int retryCount = 255;
+    // Override client to store the call id
+    final Client client = new Client(LongWritable.class, conf);
+    Client.setCallIdAndRetryCount(Client.nextCallId(), 255);
+
+    // Attach a listener that tracks every call ID received by the server.
+    final TestServer server = new TestServer(1, false);
+    server.callListener = new Runnable() {
+      @Override
+      public void run() {
+        // we have not set the retry count for the client, thus on the server
+        // side we should see retry count as 0
+        Assert.assertEquals(retryCount, Server.getCallRetryCount());
+      }
+    };
+
+    try {
+      InetSocketAddress addr = NetUtils.getConnectAddress(server);
+      server.start();
+      final SerialCaller caller = new SerialCaller(client, addr, 10);
+      caller.run();
+      assertFalse(caller.failed);
+    } finally {
+      client.stop();
+      server.stop();
+    }
+  }
+
+  /**
+   * Tests that client generates a unique sequential call ID for each RPC call,
+   * even if multiple threads are using the same client.
+ * @throws InterruptedException 
+   */
+  @Test
+  public void testUniqueSequentialCallIds() 
+      throws IOException, InterruptedException {
+    int serverThreads = 10, callerCount = 100, perCallerCallCount = 100;
+    TestServer server = new TestServer(serverThreads, false);
+
+    // Attach a listener that tracks every call ID received by the server.  This
+    // list must be synchronized, because multiple server threads will add to it.
+    final List<Integer> callIds = Collections.synchronizedList(
+      new ArrayList<Integer>());
+    server.callListener = new Runnable() {
+      @Override
+      public void run() {
+        callIds.add(Server.getCallId());
+      }
+    };
+
+    Client client = new Client(LongWritable.class, conf);
+
+    try {
+      InetSocketAddress addr = NetUtils.getConnectAddress(server);
+      server.start();
+      SerialCaller[] callers = new SerialCaller[callerCount];
+      for (int i = 0; i < callerCount; ++i) {
+        callers[i] = new SerialCaller(client, addr, perCallerCallCount);
+        callers[i].start();
+      }
+      for (int i = 0; i < callerCount; ++i) {
+        callers[i].join();
+        assertFalse(callers[i].failed);
+      }
+    } finally {
+      client.stop();
+      server.stop();
+    }
+
+    int expectedCallCount = callerCount * perCallerCallCount;
+    assertEquals(expectedCallCount, callIds.size());
+
+    // It is not guaranteed that the server executes requests in sequential order
+    // of client call ID, so we must sort the call IDs before checking that it
+    // contains every expected value.
+    Collections.sort(callIds);
+    final int startID = callIds.get(0).intValue();
+    for (int i = 0; i < expectedCallCount; ++i) {
+      assertEquals(startID + i, callIds.get(i).intValue());
+    }
+  }
+
   private void assertRetriesOnSocketTimeouts(Configuration conf,
   private void assertRetriesOnSocketTimeouts(Configuration conf,
-      int maxTimeoutRetries) throws IOException, InterruptedException {
+      int maxTimeoutRetries) throws IOException {
     SocketFactory mockFactory = Mockito.mock(SocketFactory.class);
     SocketFactory mockFactory = Mockito.mock(SocketFactory.class);
     doThrow(new ConnectTimeoutException("fake")).when(mockFactory).createSocket();
     doThrow(new ConnectTimeoutException("fake")).when(mockFactory).createSocket();
     Client client = new Client(IntWritable.class, conf, mockFactory);
     Client client = new Client(IntWritable.class, conf, mockFactory);
@@ -645,7 +922,7 @@ public class TestIPC {
   
   
   private void doIpcVersionTest(
   private void doIpcVersionTest(
       byte[] requestData,
       byte[] requestData,
-      byte[] expectedResponse) throws Exception {
+      byte[] expectedResponse) throws IOException {
     Server server = new TestServer(1, true);
     Server server = new TestServer(1, true);
     InetSocketAddress addr = NetUtils.getConnectAddress(server);
     InetSocketAddress addr = NetUtils.getConnectAddress(server);
     server.start();
     server.start();
@@ -718,7 +995,7 @@ public class TestIPC {
       "6f 6e 67 00 00 00 00 00  00 00 0a                ong..... ...     \n");
       "6f 6e 67 00 00 00 00 00  00 00 0a                ong..... ...     \n");
 
 
     final static String HADOOP0_18_ERROR_MSG =
     final static String HADOOP0_18_ERROR_MSG =
-      "Server IPC version " + Server.CURRENT_VERSION +
+      "Server IPC version " + RpcConstants.CURRENT_VERSION +
       " cannot communicate with client version 2";
       " cannot communicate with client version 2";
     
     
     /**
     /**
@@ -757,7 +1034,7 @@ public class TestIPC {
       "00 14                                            ..               \n");
       "00 14                                            ..               \n");
 
 
     final static String HADOOP0_20_ERROR_MSG =
     final static String HADOOP0_20_ERROR_MSG =
-      "Server IPC version " + Server.CURRENT_VERSION +
+      "Server IPC version " + RpcConstants.CURRENT_VERSION +
       " cannot communicate with client version 3";
       " cannot communicate with client version 3";
     
     
 
 
@@ -772,7 +1049,7 @@ public class TestIPC {
     
     
     
     
     final static String HADOOP0_21_ERROR_MSG =
     final static String HADOOP0_21_ERROR_MSG =
-      "Server IPC version " + Server.CURRENT_VERSION +
+      "Server IPC version " + RpcConstants.CURRENT_VERSION +
       " cannot communicate with client version 4";
       " cannot communicate with client version 4";
 
 
     final static byte[] HADOOP_0_21_0_RPC_DUMP =
     final static byte[] HADOOP_0_21_0_RPC_DUMP =

+ 8 - 5
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPCServerResponder.java

@@ -105,17 +105,18 @@ public class TestIPCServerResponder extends TestCase {
           byte[] bytes = new byte[byteSize];
           byte[] bytes = new byte[byteSize];
           System.arraycopy(BYTES, 0, bytes, 0, byteSize);
           System.arraycopy(BYTES, 0, bytes, 0, byteSize);
           Writable param = new BytesWritable(bytes);
           Writable param = new BytesWritable(bytes);
-          Writable value = client.call(param, address);
+          client.call(param, address);
           Thread.sleep(RANDOM.nextInt(20));
           Thread.sleep(RANDOM.nextInt(20));
         } catch (Exception e) {
         } catch (Exception e) {
-          LOG.fatal("Caught: " + e);
+          LOG.fatal("Caught Exception", e);
           failed = true;
           failed = true;
         }
         }
       }
       }
     }
     }
   }
   }
 
 
-  public void testResponseBuffer() throws Exception {
+  public void testResponseBuffer() 
+      throws IOException, InterruptedException {
     Server.INITIAL_RESP_BUF_SIZE = 1;
     Server.INITIAL_RESP_BUF_SIZE = 1;
     conf.setInt(CommonConfigurationKeys.IPC_SERVER_RPC_MAX_RESPONSE_SIZE_KEY,
     conf.setInt(CommonConfigurationKeys.IPC_SERVER_RPC_MAX_RESPONSE_SIZE_KEY,
                 1);
                 1);
@@ -123,7 +124,8 @@ public class TestIPCServerResponder extends TestCase {
     conf = new Configuration(); // reset configuration
     conf = new Configuration(); // reset configuration
   }
   }
 
 
-  public void testServerResponder() throws Exception {
+  public void testServerResponder()
+      throws IOException, InterruptedException {
     testServerResponder(10, true, 1, 10, 200);
     testServerResponder(10, true, 1, 10, 200);
   }
   }
 
 
@@ -131,7 +133,8 @@ public class TestIPCServerResponder extends TestCase {
                                   final boolean handlerSleep, 
                                   final boolean handlerSleep, 
                                   final int clientCount,
                                   final int clientCount,
                                   final int callerCount,
                                   final int callerCount,
-                                  final int callCount) throws Exception {
+                                  final int callCount) throws IOException,
+                                  InterruptedException {
     Server server = new TestServer(handlerCount, handlerSleep);
     Server server = new TestServer(handlerCount, handlerSleep);
     server.start();
     server.start();
 
 

+ 1 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestMultipleProtocolServer.java

@@ -64,7 +64,7 @@ public class TestMultipleProtocolServer {
     public static final long versionID = 0L;
     public static final long versionID = 0L;
     void hello() throws IOException;
     void hello() throws IOException;
   }
   }
-  interface Bar extends Mixin, VersionedProtocol {
+  interface Bar extends Mixin {
     public static final long versionID = 0L;
     public static final long versionID = 0L;
     int echo(int i) throws IOException;
     int echo(int i) throws IOException;
   }
   }

+ 32 - 5
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestProtoBufRpc.java

@@ -24,7 +24,9 @@ import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.net.InetSocketAddress;
 import java.net.URISyntaxException;
 import java.net.URISyntaxException;
 
 
+import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto.RpcErrorCodeProto;
 import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto.RpcErrorCodeProto;
 import org.apache.hadoop.ipc.protobuf.TestProtos.EchoRequestProto;
 import org.apache.hadoop.ipc.protobuf.TestProtos.EchoRequestProto;
 import org.apache.hadoop.ipc.protobuf.TestProtos.EchoResponseProto;
 import org.apache.hadoop.ipc.protobuf.TestProtos.EchoResponseProto;
@@ -70,6 +72,10 @@ public class TestProtoBufRpc {
     @Override
     @Override
     public EmptyResponseProto ping(RpcController unused,
     public EmptyResponseProto ping(RpcController unused,
         EmptyRequestProto request) throws ServiceException {
         EmptyRequestProto request) throws ServiceException {
+      // Ensure clientId is received
+      byte[] clientId = Server.getClientId();
+      Assert.assertNotNull(Server.getClientId());
+      Assert.assertEquals(16, clientId.length);
       return EmptyResponseProto.newBuilder().build();
       return EmptyResponseProto.newBuilder().build();
     }
     }
 
 
@@ -113,6 +119,7 @@ public class TestProtoBufRpc {
   @Before
   @Before
   public  void setUp() throws IOException { // Setup server for both protocols
   public  void setUp() throws IOException { // Setup server for both protocols
     conf = new Configuration();
     conf = new Configuration();
+    conf.setInt(CommonConfigurationKeys.IPC_MAXIMUM_DATA_LENGTH, 1024);
     // Set RPC engine to protobuf RPC engine
     // Set RPC engine to protobuf RPC engine
     RPC.setProtocolEngine(conf, TestRpcService.class, ProtobufRpcEngine.class);
     RPC.setProtocolEngine(conf, TestRpcService.class, ProtobufRpcEngine.class);
 
 
@@ -144,10 +151,8 @@ public class TestProtoBufRpc {
 
 
   private static TestRpcService getClient() throws IOException {
   private static TestRpcService getClient() throws IOException {
     // Set RPC engine to protobuf RPC engine
     // Set RPC engine to protobuf RPC engine
-    RPC.setProtocolEngine(conf, TestRpcService.class,
-        ProtobufRpcEngine.class);
-        return RPC.getProxy(TestRpcService.class, 0, addr,
-        conf);
+    RPC.setProtocolEngine(conf, TestRpcService.class, ProtobufRpcEngine.class);
+    return RPC.getProxy(TestRpcService.class, 0, addr, conf);
   }
   }
   
   
   private static TestRpcService2 getClient2() throws IOException {
   private static TestRpcService2 getClient2() throws IOException {
@@ -184,6 +189,7 @@ public class TestProtoBufRpc {
       RemoteException re = (RemoteException)e.getCause();
       RemoteException re = (RemoteException)e.getCause();
       RpcServerException rse = (RpcServerException) re
       RpcServerException rse = (RpcServerException) re
           .unwrapRemoteException(RpcServerException.class);
           .unwrapRemoteException(RpcServerException.class);
+      Assert.assertNotNull(rse);
       Assert.assertTrue(re.getErrorCode().equals(
       Assert.assertTrue(re.getErrorCode().equals(
           RpcErrorCodeProto.ERROR_RPC_SERVER));
           RpcErrorCodeProto.ERROR_RPC_SERVER));
     }
     }
@@ -230,4 +236,25 @@ public class TestProtoBufRpc {
           re.getErrorCode().equals(RpcErrorCodeProto.ERROR_APPLICATION));
           re.getErrorCode().equals(RpcErrorCodeProto.ERROR_APPLICATION));
     }
     }
   }
   }
-}
+  
+  @Test(timeout=6000)
+  public void testExtraLongRpc() throws Exception {
+    TestRpcService2 client = getClient2();
+    final String shortString = StringUtils.repeat("X", 4);
+    EchoRequestProto echoRequest = EchoRequestProto.newBuilder()
+        .setMessage(shortString).build();
+    // short message goes through
+    EchoResponseProto echoResponse = client.echo2(null, echoRequest);
+    Assert.assertEquals(shortString, echoResponse.getMessage());
+    
+    final String longString = StringUtils.repeat("X", 4096);
+    echoRequest = EchoRequestProto.newBuilder()
+        .setMessage(longString).build();
+    try {
+      echoResponse = client.echo2(null, echoRequest);
+      Assert.fail("expected extra-long RPC to fail");
+    } catch (ServiceException se) {
+      // expected
+    }
+  }
+}

+ 36 - 38
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java

@@ -94,7 +94,7 @@ public class TestRPC {
 
 
   int datasize = 1024*100;
   int datasize = 1024*100;
   int numThreads = 50;
   int numThreads = 50;
-	
+
   public interface TestProtocol extends VersionedProtocol {
   public interface TestProtocol extends VersionedProtocol {
     public static final long versionID = 1L;
     public static final long versionID = 1L;
     
     
@@ -323,7 +323,7 @@ public class TestRPC {
   }
   }
   
   
   @Test
   @Test
-  public void testConfRpc() throws Exception {
+  public void testConfRpc() throws IOException {
     Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
     Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
         .setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0)
         .setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0)
         .setNumHandlers(1).setVerbose(false).build();
         .setNumHandlers(1).setVerbose(false).build();
@@ -350,7 +350,7 @@ public class TestRPC {
   }
   }
 
 
   @Test
   @Test
-  public void testProxyAddress() throws Exception {
+  public void testProxyAddress() throws IOException {
     Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
     Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
         .setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0).build();
         .setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0).build();
     TestProtocol proxy = null;
     TestProtocol proxy = null;
@@ -360,8 +360,7 @@ public class TestRPC {
       InetSocketAddress addr = NetUtils.getConnectAddress(server);
       InetSocketAddress addr = NetUtils.getConnectAddress(server);
 
 
       // create a client
       // create a client
-      proxy = (TestProtocol)RPC.getProxy(
-          TestProtocol.class, TestProtocol.versionID, addr, conf);
+      proxy = RPC.getProxy(TestProtocol.class, TestProtocol.versionID, addr, conf);
       
       
       assertEquals(addr, RPC.getServerAddress(proxy));
       assertEquals(addr, RPC.getServerAddress(proxy));
     } finally {
     } finally {
@@ -373,7 +372,7 @@ public class TestRPC {
   }
   }
 
 
   @Test
   @Test
-  public void testSlowRpc() throws Exception {
+  public void testSlowRpc() throws IOException {
     System.out.println("Testing Slow RPC");
     System.out.println("Testing Slow RPC");
     // create a server with two handlers
     // create a server with two handlers
     Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
     Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
@@ -388,8 +387,7 @@ public class TestRPC {
     InetSocketAddress addr = NetUtils.getConnectAddress(server);
     InetSocketAddress addr = NetUtils.getConnectAddress(server);
 
 
     // create a client
     // create a client
-    proxy = (TestProtocol)RPC.getProxy(
-        TestProtocol.class, TestProtocol.versionID, addr, conf);
+    proxy = RPC.getProxy(TestProtocol.class, TestProtocol.versionID, addr, conf);
 
 
     SlowRPC slowrpc = new SlowRPC(proxy);
     SlowRPC slowrpc = new SlowRPC(proxy);
     Thread thread = new Thread(slowrpc, "SlowRPC");
     Thread thread = new Thread(slowrpc, "SlowRPC");
@@ -420,11 +418,11 @@ public class TestRPC {
   }
   }
   
   
   @Test
   @Test
-  public void testCalls() throws Exception {
+  public void testCalls() throws IOException {
     testCallsInternal(conf);
     testCallsInternal(conf);
   }
   }
   
   
-  private void testCallsInternal(Configuration conf) throws Exception {
+  private void testCallsInternal(Configuration conf) throws IOException {
     Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
     Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
         .setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0).build();
         .setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0).build();
     TestProtocol proxy = null;
     TestProtocol proxy = null;
@@ -432,8 +430,7 @@ public class TestRPC {
     server.start();
     server.start();
 
 
     InetSocketAddress addr = NetUtils.getConnectAddress(server);
     InetSocketAddress addr = NetUtils.getConnectAddress(server);
-    proxy = (TestProtocol)RPC.getProxy(
-        TestProtocol.class, TestProtocol.versionID, addr, conf);
+    proxy = RPC.getProxy(TestProtocol.class, TestProtocol.versionID, addr, conf);
       
       
     proxy.ping();
     proxy.ping();
 
 
@@ -543,7 +540,7 @@ public class TestRPC {
     
     
   }
   }
   
   
-  private void doRPCs(Configuration conf, boolean expectFailure) throws Exception {
+  private void doRPCs(Configuration conf, boolean expectFailure) throws IOException {
     Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
     Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
         .setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0)
         .setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0)
         .setNumHandlers(5).setVerbose(true).build();
         .setNumHandlers(5).setVerbose(true).build();
@@ -557,8 +554,7 @@ public class TestRPC {
     InetSocketAddress addr = NetUtils.getConnectAddress(server);
     InetSocketAddress addr = NetUtils.getConnectAddress(server);
     
     
     try {
     try {
-      proxy = (TestProtocol)RPC.getProxy(
-          TestProtocol.class, TestProtocol.versionID, addr, conf);
+      proxy = RPC.getProxy(TestProtocol.class, TestProtocol.versionID, addr, conf);
       proxy.ping();
       proxy.ping();
 
 
       if (expectFailure) {
       if (expectFailure) {
@@ -603,7 +599,7 @@ public class TestRPC {
   }
   }
   
   
   @Test
   @Test
-  public void testAuthorization() throws Exception {
+  public void testAuthorization() throws IOException {
     Configuration conf = new Configuration();
     Configuration conf = new Configuration();
     conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION,
     conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION,
         true);
         true);
@@ -630,7 +626,7 @@ public class TestRPC {
    * Switch off setting socketTimeout values on RPC sockets.
    * Switch off setting socketTimeout values on RPC sockets.
    * Verify that RPC calls still work ok.
    * Verify that RPC calls still work ok.
    */
    */
-  public void testNoPings() throws Exception {
+  public void testNoPings() throws IOException {
     Configuration conf = new Configuration();
     Configuration conf = new Configuration();
     
     
     conf.setBoolean("ipc.client.ping", false);
     conf.setBoolean("ipc.client.ping", false);
@@ -642,10 +638,10 @@ public class TestRPC {
 
 
   /**
   /**
    * Test stopping a non-registered proxy
    * Test stopping a non-registered proxy
-   * @throws Exception
+   * @throws IOException
    */
    */
   @Test(expected=HadoopIllegalArgumentException.class)
   @Test(expected=HadoopIllegalArgumentException.class)
-  public void testStopNonRegisteredProxy() throws Exception {
+  public void testStopNonRegisteredProxy() throws IOException {
     RPC.stopProxy(null);
     RPC.stopProxy(null);
   }
   }
 
 
@@ -654,13 +650,13 @@ public class TestRPC {
    * be stopped without error.
    * be stopped without error.
    */
    */
   @Test
   @Test
-  public void testStopMockObject() throws Exception {
+  public void testStopMockObject() throws IOException {
     RPC.stopProxy(MockitoUtil.mockProtocol(TestProtocol.class)); 
     RPC.stopProxy(MockitoUtil.mockProtocol(TestProtocol.class)); 
   }
   }
   
   
   @Test
   @Test
   public void testStopProxy() throws IOException {
   public void testStopProxy() throws IOException {
-    StoppedProtocol proxy = (StoppedProtocol) RPC.getProxy(StoppedProtocol.class,
+    StoppedProtocol proxy = RPC.getProxy(StoppedProtocol.class,
         StoppedProtocol.versionID, null, conf);
         StoppedProtocol.versionID, null, conf);
     StoppedInvocationHandler invocationHandler = (StoppedInvocationHandler)
     StoppedInvocationHandler invocationHandler = (StoppedInvocationHandler)
         Proxy.getInvocationHandler(proxy);
         Proxy.getInvocationHandler(proxy);
@@ -671,7 +667,7 @@ public class TestRPC {
   
   
   @Test
   @Test
   public void testWrappedStopProxy() throws IOException {
   public void testWrappedStopProxy() throws IOException {
-    StoppedProtocol wrappedProxy = (StoppedProtocol) RPC.getProxy(StoppedProtocol.class,
+    StoppedProtocol wrappedProxy = RPC.getProxy(StoppedProtocol.class,
         StoppedProtocol.versionID, null, conf);
         StoppedProtocol.versionID, null, conf);
     StoppedInvocationHandler invocationHandler = (StoppedInvocationHandler)
     StoppedInvocationHandler invocationHandler = (StoppedInvocationHandler)
         Proxy.getInvocationHandler(wrappedProxy);
         Proxy.getInvocationHandler(wrappedProxy);
@@ -685,7 +681,7 @@ public class TestRPC {
   }
   }
   
   
   @Test
   @Test
-  public void testErrorMsgForInsecureClient() throws Exception {
+  public void testErrorMsgForInsecureClient() throws IOException {
     Configuration serverConf = new Configuration(conf);
     Configuration serverConf = new Configuration(conf);
     SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS,
     SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS,
                                          serverConf);
                                          serverConf);
@@ -701,8 +697,7 @@ public class TestRPC {
     final InetSocketAddress addr = NetUtils.getConnectAddress(server);
     final InetSocketAddress addr = NetUtils.getConnectAddress(server);
     TestProtocol proxy = null;
     TestProtocol proxy = null;
     try {
     try {
-      proxy = (TestProtocol) RPC.getProxy(TestProtocol.class,
-          TestProtocol.versionID, addr, conf);
+      proxy = RPC.getProxy(TestProtocol.class, TestProtocol.versionID, addr, conf);
       proxy.echo("");
       proxy.echo("");
     } catch (RemoteException e) {
     } catch (RemoteException e) {
       LOG.info("LOGGING MESSAGE: " + e.getLocalizedMessage());
       LOG.info("LOGGING MESSAGE: " + e.getLocalizedMessage());
@@ -730,7 +725,7 @@ public class TestRPC {
     proxy = null;
     proxy = null;
     try {
     try {
       UserGroupInformation.setConfiguration(conf);
       UserGroupInformation.setConfiguration(conf);
-      proxy = (TestProtocol) RPC.getProxy(TestProtocol.class,
+      proxy = RPC.getProxy(TestProtocol.class,
           TestProtocol.versionID, mulitServerAddr, conf);
           TestProtocol.versionID, mulitServerAddr, conf);
       proxy.echo("");
       proxy.echo("");
     } catch (RemoteException e) {
     } catch (RemoteException e) {
@@ -771,7 +766,7 @@ public class TestRPC {
    * Test that server.stop() properly stops all threads
    * Test that server.stop() properly stops all threads
    */
    */
   @Test
   @Test
-  public void testStopsAllThreads() throws Exception {
+  public void testStopsAllThreads() throws IOException, InterruptedException {
     int threadsBefore = countThreads("Server$Listener$Reader");
     int threadsBefore = countThreads("Server$Listener$Reader");
     assertEquals("Expect no Reader threads running before test",
     assertEquals("Expect no Reader threads running before test",
       0, threadsBefore);
       0, threadsBefore);
@@ -802,7 +797,7 @@ public class TestRPC {
   }
   }
   
   
   @Test
   @Test
-  public void testRPCBuilder() throws Exception {
+  public void testRPCBuilder() throws IOException {
     // Test mandatory field conf
     // Test mandatory field conf
     try {
     try {
       new RPC.Builder(null).setProtocol(TestProtocol.class)
       new RPC.Builder(null).setProtocol(TestProtocol.class)
@@ -838,15 +833,17 @@ public class TestRPC {
   }
   }
   
   
   @Test(timeout=90000)
   @Test(timeout=90000)
-  public void testRPCInterruptedSimple() throws Exception {
+  public void testRPCInterruptedSimple() throws IOException {
     final Configuration conf = new Configuration();
     final Configuration conf = new Configuration();
-    Server server = RPC.getServer(
-      TestProtocol.class, new TestImpl(), ADDRESS, 0, 5, true, conf, null
-    );
+    Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
+        .setInstance(new TestImpl()).setBindAddress(ADDRESS)
+        .setPort(0).setNumHandlers(5).setVerbose(true)
+        .setSecretManager(null).build();
+    
     server.start();
     server.start();
     InetSocketAddress addr = NetUtils.getConnectAddress(server);
     InetSocketAddress addr = NetUtils.getConnectAddress(server);
 
 
-    final TestProtocol proxy = (TestProtocol) RPC.getProxy(
+    final TestProtocol proxy = RPC.getProxy(
         TestProtocol.class, TestProtocol.versionID, addr, conf);
         TestProtocol.class, TestProtocol.versionID, addr, conf);
     // Connect to the server
     // Connect to the server
     proxy.ping();
     proxy.ping();
@@ -867,9 +864,10 @@ public class TestRPC {
   @Test(timeout=30000)
   @Test(timeout=30000)
   public void testRPCInterrupted() throws IOException, InterruptedException {
   public void testRPCInterrupted() throws IOException, InterruptedException {
     final Configuration conf = new Configuration();
     final Configuration conf = new Configuration();
-    Server server = RPC.getServer(
-      TestProtocol.class, new TestImpl(), ADDRESS, 0, 5, true, conf, null
-    );
+    Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
+        .setInstance(new TestImpl()).setBindAddress(ADDRESS)
+        .setPort(0).setNumHandlers(5).setVerbose(true)
+        .setSecretManager(null).build();
 
 
     server.start();
     server.start();
 
 
@@ -883,7 +881,7 @@ public class TestRPC {
     
     
     for (int i = 0; i < numConcurrentRPC; i++) {
     for (int i = 0; i < numConcurrentRPC; i++) {
       final int num = i;
       final int num = i;
-      final TestProtocol proxy = (TestProtocol) RPC.getProxy(
+      final TestProtocol proxy = RPC.getProxy(
       TestProtocol.class, TestProtocol.versionID, addr, conf);
       TestProtocol.class, TestProtocol.versionID, addr, conf);
       Thread rpcThread = new Thread(new Runnable() {
       Thread rpcThread = new Thread(new Runnable() {
         @Override
         @Override
@@ -927,7 +925,7 @@ public class TestRPC {
     assertTrue("rpc got exception " + error.get(), error.get() == null);
     assertTrue("rpc got exception " + error.get(), error.get() == null);
   }
   }
 
 
-  public static void main(String[] args) throws Exception {
+  public static void main(String[] args) throws IOException {
     new TestRPC().testCallsInternal(conf);
     new TestRPC().testCallsInternal(conf);
 
 
   }
   }

+ 3 - 3
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPCCompatibility.java

@@ -57,7 +57,7 @@ public class TestRPCCompatibility {
     void ping() throws IOException;    
     void ping() throws IOException;    
   }
   }
   
   
-  public interface TestProtocol1 extends VersionedProtocol, TestProtocol0 {
+  public interface TestProtocol1 extends TestProtocol0 {
     String echo(String value) throws IOException;
     String echo(String value) throws IOException;
   }
   }
 
 
@@ -123,7 +123,7 @@ public class TestRPCCompatibility {
   }
   }
   
   
   @After
   @After
-  public void tearDown() throws IOException {
+  public void tearDown() {
     if (proxy != null) {
     if (proxy != null) {
       RPC.stopProxy(proxy.getProxy());
       RPC.stopProxy(proxy.getProxy());
       proxy = null;
       proxy = null;
@@ -278,7 +278,7 @@ System.out.println("echo int is NOT supported");
         TestProtocol3.class.getMethod("echo_alias", int.class));
         TestProtocol3.class.getMethod("echo_alias", int.class));
     assertFalse(intEchoHash == intEchoHashAlias);
     assertFalse(intEchoHash == intEchoHashAlias);
     
     
-    // Make sure that methods with the same returninig type and method name but
+    // Make sure that methods with the same returning type and method name but
     // larger number of parameter types have different hash code
     // larger number of parameter types have different hash code
     int intEchoHash2 = ProtocolSignature.getFingerprint(
     int intEchoHash2 = ProtocolSignature.getFingerprint(
         TestProtocol3.class.getMethod("echo", int.class, int.class));
         TestProtocol3.class.getMethod("echo", int.class, int.class));

+ 215 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRetryCache.java

@@ -0,0 +1,215 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ipc;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Random;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.apache.hadoop.ipc.RPC.RpcKind;
+import org.apache.hadoop.ipc.RetryCache.CacheEntryWithPayload;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ * Tests for {@link RetryCache}
+ */
+public class TestRetryCache {
+  private static final byte[] CLIENT_ID = ClientId.getClientId();
+  private static int callId = 100;
+  private static final Random r = new Random();
+  private static final TestServer testServer = new TestServer();
+
+  @Before
+  public void setup() {
+    testServer.resetCounters();
+  }
+
+  static class TestServer {
+    AtomicInteger retryCount = new AtomicInteger();
+    AtomicInteger operationCount = new AtomicInteger();
+    private RetryCache retryCache = new RetryCache("TestRetryCache", 1,
+        100 * 1000 * 1000 * 1000L);
+
+    /**
+     * A server method implemented using {@link RetryCache}.
+     * 
+     * @param input is returned back in echo, if {@code success} is true.
+     * @param failureOuput returned on failure, if {@code success} is false.
+     * @param methodTime time taken by the operation. By passing smaller/larger
+     *          value one can simulate an operation that takes short/long time.
+     * @param success whether this operation completes successfully or not
+     * @return return the input parameter {@code input}, if {@code success} is
+     *         true, else return {@code failureOutput}.
+     */
+    int echo(int input, int failureOutput, long methodTime, boolean success)
+        throws InterruptedException {
+      CacheEntryWithPayload entry = RetryCache.waitForCompletion(retryCache,
+          null);
+      if (entry != null && entry.isSuccess()) {
+        System.out.println("retryCount incremented " + retryCount.get());
+        retryCount.incrementAndGet();
+        return (Integer) entry.getPayload();
+      }
+      try {
+        operationCount.incrementAndGet();
+        if (methodTime > 0) {
+          Thread.sleep(methodTime);
+        }
+      } finally {
+        RetryCache.setState(entry, success, input);
+      }
+      return success ? input : failureOutput;
+    }
+
+    void resetCounters() {
+      retryCount.set(0);
+      operationCount.set(0);
+    }
+  }
+
+  public static Server.Call newCall() {
+    return new Server.Call(++callId, 1, null, null,
+        RpcKind.RPC_PROTOCOL_BUFFER, CLIENT_ID);
+  }
+
+  /**
+   * This simlulates a long server retried operations. Multiple threads start an
+   * operation that takes long time and finally succeeds. The retries in this
+   * case end up waiting for the current operation to complete. All the retries
+   * then complete based on the entry in the retry cache.
+   */
+  @Test
+  public void testLongOperationsSuccessful() throws Exception {
+    // Test long successful operations
+    // There is no entry in cache expected when the first operation starts
+    testOperations(r.nextInt(), 100, 20, true, false, newCall());
+  }
+
+  /**
+   * This simlulates a long server operation. Multiple threads start an
+   * operation that takes long time and finally fails. The retries in this case
+   * end up waiting for the current operation to complete. All the retries end
+   * up performing the operation again.
+   */
+  @Test
+  public void testLongOperationsFailure() throws Exception {
+    // Test long failed operations
+    // There is no entry in cache expected when the first operation starts
+    testOperations(r.nextInt(), 100, 20, false, false, newCall());
+  }
+
+  /**
+   * This simlulates a short server operation. Multiple threads start an
+   * operation that takes very short time and finally succeeds. The retries in
+   * this case do not wait long for the current operation to complete. All the
+   * retries then complete based on the entry in the retry cache.
+   */
+  @Test
+  public void testShortOperationsSuccess() throws Exception {
+    // Test long failed operations
+    // There is no entry in cache expected when the first operation starts
+    testOperations(r.nextInt(), 25, 0, false, false, newCall());
+  }
+
+  /**
+   * This simlulates a short server operation. Multiple threads start an
+   * operation that takes short time and finally fails. The retries in this case
+   * do not wait for the current operation to complete. All the retries end up
+   * performing the operation again.
+   */
+  @Test
+  public void testShortOperationsFailure() throws Exception {
+    // Test long failed operations
+    // There is no entry in cache expected when the first operation starts
+    testOperations(r.nextInt(), 25, 0, false, false, newCall());
+  }
+
+  @Test
+  public void testRetryAfterSuccess() throws Exception {
+    // Previous operation successfully completed
+    Server.Call call = newCall();
+    int input = r.nextInt();
+    Server.getCurCall().set(call);
+    testServer.echo(input, input + 1, 5, true);
+    testOperations(input, 25, 0, true, true, call);
+  }
+
+  @Test
+  public void testRetryAfterFailure() throws Exception {
+    // Previous operation failed
+    Server.Call call = newCall();
+    int input = r.nextInt();
+    Server.getCurCall().set(call);
+    testServer.echo(input, input + 1, 5, false);
+    testOperations(input, 25, 0, false, true, call);
+  }
+
+  public void testOperations(final int input, final int numberOfThreads,
+      final int pause, final boolean success, final boolean attemptedBefore,
+      final Server.Call call) throws InterruptedException, ExecutionException {
+    final int failureOutput = input + 1;
+    ExecutorService executorService = Executors
+        .newFixedThreadPool(numberOfThreads);
+    List<Future<Integer>> list = new ArrayList<Future<Integer>>();
+    for (int i = 0; i < numberOfThreads; i++) {
+      Callable<Integer> worker = new Callable<Integer>() {
+        @Override
+        public Integer call() throws Exception {
+          Server.getCurCall().set(call);
+          Assert.assertEquals(Server.getCurCall().get(), call);
+          int randomPause = pause == 0 ? pause : r.nextInt(pause);
+          return testServer.echo(input, failureOutput, randomPause, success);
+        }
+      };
+      Future<Integer> submit = executorService.submit(worker);
+      list.add(submit);
+    }
+
+    Assert.assertEquals(numberOfThreads, list.size());
+    for (Future<Integer> future : list) {
+      if (success) {
+        Assert.assertEquals(input, future.get().intValue());
+      } else {
+        Assert.assertEquals(failureOutput, future.get().intValue());
+      }
+    }
+
+    if (success) {
+      // If the operation was successful, all the subsequent operations
+      // by other threads should be retries. Operation count should be 1.
+      int retries = numberOfThreads + (attemptedBefore ? 0 : -1);
+      Assert.assertEquals(1, testServer.operationCount.get());
+      Assert.assertEquals(retries, testServer.retryCount.get());
+    } else {
+      // If the operation failed, all the subsequent operations
+      // should execute once more, hence the retry count should be 0 and
+      // operation count should be the number of tries
+      int opCount = numberOfThreads + (attemptedBefore ? 1 : 0);
+      Assert.assertEquals(opCount, testServer.operationCount.get());
+      Assert.assertEquals(0, testServer.retryCount.get());
+    }
+  }
+}

+ 170 - 124
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java

@@ -18,14 +18,9 @@
 
 
 package org.apache.hadoop.ipc;
 package org.apache.hadoop.ipc;
 
 
-import static org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod.KERBEROS;
-import static org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod.SIMPLE;
-import static org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod.TOKEN;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION;
+import static org.apache.hadoop.security.SaslRpcServer.AuthMethod.*;
+import static org.junit.Assert.*;
 
 
 import java.io.DataInput;
 import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.DataOutput;
@@ -51,6 +46,7 @@ import javax.security.sasl.SaslServer;
 
 
 import junit.framework.Assert;
 import junit.framework.Assert;
 
 
+import org.apache.commons.lang.StringUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.commons.logging.impl.Log4JLogger;
@@ -103,6 +99,13 @@ public class TestSaslRPC {
   static Boolean forceSecretManager = null;
   static Boolean forceSecretManager = null;
   static Boolean clientFallBackToSimpleAllowed = true;
   static Boolean clientFallBackToSimpleAllowed = true;
   
   
+  static enum UseToken {
+    NONE(),
+    VALID(),
+    INVALID(),
+    OTHER();
+  }
+  
   @BeforeClass
   @BeforeClass
   public static void setupKerb() {
   public static void setupKerb() {
     System.setProperty("java.security.krb5.kdc", "");
     System.setProperty("java.security.krb5.kdc", "");
@@ -113,9 +116,11 @@ public class TestSaslRPC {
   @Before
   @Before
   public void setup() {
   public void setup() {
     conf = new Configuration();
     conf = new Configuration();
-    SecurityUtil.setAuthenticationMethod(KERBEROS, conf);
+    conf.set(HADOOP_SECURITY_AUTHENTICATION, KERBEROS.toString());
     UserGroupInformation.setConfiguration(conf);
     UserGroupInformation.setConfiguration(conf);
     enableSecretManager = null;
     enableSecretManager = null;
+    forceSecretManager = null;
+    clientFallBackToSimpleAllowed = true;
   }
   }
 
 
   static {
   static {
@@ -312,7 +317,7 @@ public class TestSaslRPC {
       doDigestRpc(server, sm);
       doDigestRpc(server, sm);
     } catch (RemoteException e) {
     } catch (RemoteException e) {
       LOG.info("LOGGING MESSAGE: " + e.getLocalizedMessage());
       LOG.info("LOGGING MESSAGE: " + e.getLocalizedMessage());
-      assertTrue(ERROR_MESSAGE.equals(e.getLocalizedMessage()));
+      assertEquals(ERROR_MESSAGE, e.getLocalizedMessage());
       assertTrue(e.unwrapRemoteException() instanceof InvalidToken);
       assertTrue(e.unwrapRemoteException() instanceof InvalidToken);
       succeeded = true;
       succeeded = true;
     }
     }
@@ -334,7 +339,7 @@ public class TestSaslRPC {
 
 
     TestSaslProtocol proxy = null;
     TestSaslProtocol proxy = null;
     try {
     try {
-      proxy = (TestSaslProtocol) RPC.getProxy(TestSaslProtocol.class,
+      proxy = RPC.getProxy(TestSaslProtocol.class,
           TestSaslProtocol.versionID, addr, conf);
           TestSaslProtocol.versionID, addr, conf);
       //QOP must be auth
       //QOP must be auth
       Assert.assertEquals(SaslRpcServer.SASL_PROPS.get(Sasl.QOP), "auth");
       Assert.assertEquals(SaslRpcServer.SASL_PROPS.get(Sasl.QOP), "auth");
@@ -367,28 +372,6 @@ public class TestSaslRPC {
     assertEquals(0, remoteId.getPingInterval());
     assertEquals(0, remoteId.getPingInterval());
   }
   }
   
   
-  @Test
-  public void testGetRemotePrincipal() throws Exception {
-    try {
-      Configuration newConf = new Configuration(conf);
-      newConf.set(SERVER_PRINCIPAL_KEY, SERVER_PRINCIPAL_1);
-      ConnectionId remoteId = ConnectionId.getConnectionId(
-          new InetSocketAddress(0), TestSaslProtocol.class, null, 0, newConf);
-      assertEquals(SERVER_PRINCIPAL_1, remoteId.getServerPrincipal());
-      // this following test needs security to be off
-      SecurityUtil.setAuthenticationMethod(SIMPLE, newConf);
-      UserGroupInformation.setConfiguration(newConf);
-      remoteId = ConnectionId.getConnectionId(new InetSocketAddress(0),
-          TestSaslProtocol.class, null, 0, newConf);
-      assertEquals(
-          "serverPrincipal should be null when security is turned off", null,
-          remoteId.getServerPrincipal());
-    } finally {
-      // revert back to security is on
-      UserGroupInformation.setConfiguration(conf);
-    }
-  }
-  
   @Test
   @Test
   public void testPerConnectionConf() throws Exception {
   public void testPerConnectionConf() throws Exception {
     TestTokenSecretManager sm = new TestTokenSecretManager();
     TestTokenSecretManager sm = new TestTokenSecretManager();
@@ -409,38 +392,40 @@ public class TestSaslRPC {
     Configuration newConf = new Configuration(conf);
     Configuration newConf = new Configuration(conf);
     newConf.set(CommonConfigurationKeysPublic.
     newConf.set(CommonConfigurationKeysPublic.
         HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_KEY, "");
         HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_KEY, "");
-    newConf.set(SERVER_PRINCIPAL_KEY, SERVER_PRINCIPAL_1);
 
 
     TestSaslProtocol proxy1 = null;
     TestSaslProtocol proxy1 = null;
     TestSaslProtocol proxy2 = null;
     TestSaslProtocol proxy2 = null;
     TestSaslProtocol proxy3 = null;
     TestSaslProtocol proxy3 = null;
+    int timeouts[] = {111222, 3333333};
     try {
     try {
-      proxy1 = (TestSaslProtocol) RPC.getProxy(TestSaslProtocol.class,
+      newConf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY, timeouts[0]);
+      proxy1 = RPC.getProxy(TestSaslProtocol.class,
           TestSaslProtocol.versionID, addr, newConf);
           TestSaslProtocol.versionID, addr, newConf);
       proxy1.getAuthMethod();
       proxy1.getAuthMethod();
       Client client = WritableRpcEngine.getClient(conf);
       Client client = WritableRpcEngine.getClient(conf);
       Set<ConnectionId> conns = client.getConnectionIds();
       Set<ConnectionId> conns = client.getConnectionIds();
       assertEquals("number of connections in cache is wrong", 1, conns.size());
       assertEquals("number of connections in cache is wrong", 1, conns.size());
       // same conf, connection should be re-used
       // same conf, connection should be re-used
-      proxy2 = (TestSaslProtocol) RPC.getProxy(TestSaslProtocol.class,
+      proxy2 = RPC.getProxy(TestSaslProtocol.class,
           TestSaslProtocol.versionID, addr, newConf);
           TestSaslProtocol.versionID, addr, newConf);
       proxy2.getAuthMethod();
       proxy2.getAuthMethod();
       assertEquals("number of connections in cache is wrong", 1, conns.size());
       assertEquals("number of connections in cache is wrong", 1, conns.size());
       // different conf, new connection should be set up
       // different conf, new connection should be set up
-      newConf.set(SERVER_PRINCIPAL_KEY, SERVER_PRINCIPAL_2);
-      proxy3 = (TestSaslProtocol) RPC.getProxy(TestSaslProtocol.class,
+      newConf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY, timeouts[1]);
+      proxy3 = RPC.getProxy(TestSaslProtocol.class,
           TestSaslProtocol.versionID, addr, newConf);
           TestSaslProtocol.versionID, addr, newConf);
       proxy3.getAuthMethod();
       proxy3.getAuthMethod();
-      ConnectionId[] connsArray = conns.toArray(new ConnectionId[0]);
-      assertEquals("number of connections in cache is wrong", 2,
-          connsArray.length);
-      String p1 = connsArray[0].getServerPrincipal();
-      String p2 = connsArray[1].getServerPrincipal();
-      assertFalse("should have different principals", p1.equals(p2));
-      assertTrue("principal not as expected", p1.equals(SERVER_PRINCIPAL_1)
-          || p1.equals(SERVER_PRINCIPAL_2));
-      assertTrue("principal not as expected", p2.equals(SERVER_PRINCIPAL_1)
-          || p2.equals(SERVER_PRINCIPAL_2));
+      assertEquals("number of connections in cache is wrong", 2, conns.size());
+      // now verify the proxies have the correct connection ids and timeouts
+      ConnectionId[] connsArray = {
+          RPC.getConnectionIdForProxy(proxy1),
+          RPC.getConnectionIdForProxy(proxy2),
+          RPC.getConnectionIdForProxy(proxy3)
+      };
+      assertEquals(connsArray[0], connsArray[1]);
+      assertEquals(connsArray[0].getMaxIdleTime(), timeouts[0]);
+      assertFalse(connsArray[0].equals(connsArray[2]));
+      assertNotSame(connsArray[2].getMaxIdleTime(), timeouts[1]);
     } finally {
     } finally {
       server.stop();
       server.stop();
       RPC.stopProxy(proxy1);
       RPC.stopProxy(proxy1);
@@ -468,7 +453,7 @@ public class TestSaslRPC {
 
 
     InetSocketAddress addr = NetUtils.getConnectAddress(server);
     InetSocketAddress addr = NetUtils.getConnectAddress(server);
     try {
     try {
-      proxy = (TestSaslProtocol) RPC.getProxy(TestSaslProtocol.class,
+      proxy = RPC.getProxy(TestSaslProtocol.class,
           TestSaslProtocol.versionID, addr, newConf);
           TestSaslProtocol.versionID, addr, newConf);
       proxy.ping();
       proxy.ping();
     } finally {
     } finally {
@@ -488,7 +473,7 @@ public class TestSaslRPC {
   }
   }
 
 
   @Test
   @Test
-  public void testSaslPlainServerBadPassword() throws IOException {
+  public void testSaslPlainServerBadPassword() {
     SaslException e = null;
     SaslException e = null;
     try {
     try {
       runNegotiation(
       runNegotiation(
@@ -599,75 +584,118 @@ public class TestSaslRPC {
   private static Pattern KrbFailed =
   private static Pattern KrbFailed =
       Pattern.compile(".*Failed on local exception:.* " +
       Pattern.compile(".*Failed on local exception:.* " +
                       "Failed to specify server's Kerberos principal name.*");
                       "Failed to specify server's Kerberos principal name.*");
-  private static Pattern Denied(AuthenticationMethod method) {
+  private static Pattern Denied(AuthMethod method) {
       return Pattern.compile(".*RemoteException.*AccessControlException.*: "
       return Pattern.compile(".*RemoteException.*AccessControlException.*: "
-          +method.getAuthMethod() + " authentication is not enabled.*");
+          + method + " authentication is not enabled.*");
+  }
+  private static Pattern No(AuthMethod ... method) {
+    String methods = StringUtils.join(method, ",\\s*");
+    return Pattern.compile(".*Failed on local exception:.* " +
+        "Client cannot authenticate via:\\[" + methods + "\\].*");
   }
   }
   private static Pattern NoTokenAuth =
   private static Pattern NoTokenAuth =
       Pattern.compile(".*IllegalArgumentException: " +
       Pattern.compile(".*IllegalArgumentException: " +
                       "TOKEN authentication requires a secret manager");
                       "TOKEN authentication requires a secret manager");
-  
+  private static Pattern NoFallback = 
+      Pattern.compile(".*Failed on local exception:.* " +
+          "Server asks us to fall back to SIMPLE auth, " +
+          "but this client is configured to only allow secure connections.*");
+
   /*
   /*
    *  simple server
    *  simple server
    */
    */
   @Test
   @Test
   public void testSimpleServer() throws Exception {
   public void testSimpleServer() throws Exception {
     assertAuthEquals(SIMPLE,    getAuthMethod(SIMPLE,   SIMPLE));
     assertAuthEquals(SIMPLE,    getAuthMethod(SIMPLE,   SIMPLE));
-    // SASL methods are reverted to SIMPLE, but test setup fails
-    assertAuthEquals(KrbFailed, getAuthMethod(KERBEROS, SIMPLE));
+    assertAuthEquals(SIMPLE,    getAuthMethod(SIMPLE,   SIMPLE, UseToken.OTHER));
+    // SASL methods are normally reverted to SIMPLE
+    assertAuthEquals(SIMPLE,    getAuthMethod(KERBEROS, SIMPLE));
+    assertAuthEquals(SIMPLE,    getAuthMethod(KERBEROS, SIMPLE, UseToken.OTHER));
   }
   }
 
 
   @Test
   @Test
-  public void testSimpleServerWithTokensWithNoClientFallbackToSimple()
+  public void testNoClientFallbackToSimple()
       throws Exception {
       throws Exception {
-
     clientFallBackToSimpleAllowed = false;
     clientFallBackToSimpleAllowed = false;
-
-    try{
-      // Client has a token even though its configs says simple auth. Server
-      // is configured for simple auth, but as client sends the token, and
-      // server asks to switch to simple, this should fail.
-      getAuthMethod(SIMPLE,   SIMPLE, true);
-    } catch (IOException ioe) {
-      Assert
-        .assertTrue(ioe.getMessage().contains("Failed on local exception: " +
-        		"java.io.IOException: java.io.IOException: " +
-        		"Server asks us to fall back to SIMPLE auth, " +
-        		"but this client is configured to only allow secure connections"
-          ));
-    }
+    // tokens are irrelevant w/o secret manager enabled
+    assertAuthEquals(SIMPLE,     getAuthMethod(SIMPLE, SIMPLE));
+    assertAuthEquals(SIMPLE,     getAuthMethod(SIMPLE, SIMPLE, UseToken.OTHER));
+    assertAuthEquals(SIMPLE,     getAuthMethod(SIMPLE, SIMPLE, UseToken.VALID));
+    assertAuthEquals(SIMPLE,     getAuthMethod(SIMPLE, SIMPLE, UseToken.INVALID));
+
+    // A secure client must not fallback
+    assertAuthEquals(NoFallback, getAuthMethod(KERBEROS, SIMPLE));
+    assertAuthEquals(NoFallback, getAuthMethod(KERBEROS, SIMPLE, UseToken.OTHER));
+    assertAuthEquals(NoFallback, getAuthMethod(KERBEROS, SIMPLE, UseToken.VALID));
+    assertAuthEquals(NoFallback, getAuthMethod(KERBEROS, SIMPLE, UseToken.INVALID));
 
 
     // Now set server to simple and also force the secret-manager. Now server
     // Now set server to simple and also force the secret-manager. Now server
     // should have both simple and token enabled.
     // should have both simple and token enabled.
     forceSecretManager = true;
     forceSecretManager = true;
-    assertAuthEquals(TOKEN, getAuthMethod(SIMPLE,   SIMPLE, true));
-    forceSecretManager = false;
-    clientFallBackToSimpleAllowed = true;
+    assertAuthEquals(SIMPLE,     getAuthMethod(SIMPLE, SIMPLE));
+    assertAuthEquals(SIMPLE,     getAuthMethod(SIMPLE, SIMPLE, UseToken.OTHER));
+    assertAuthEquals(TOKEN,      getAuthMethod(SIMPLE, SIMPLE, UseToken.VALID));
+    assertAuthEquals(BadToken,   getAuthMethod(SIMPLE, SIMPLE, UseToken.INVALID));
+
+    // A secure client must not fallback
+    assertAuthEquals(NoFallback, getAuthMethod(KERBEROS, SIMPLE));
+    assertAuthEquals(NoFallback, getAuthMethod(KERBEROS, SIMPLE, UseToken.OTHER));
+    assertAuthEquals(TOKEN,      getAuthMethod(KERBEROS, SIMPLE, UseToken.VALID));
+    assertAuthEquals(BadToken,   getAuthMethod(KERBEROS, SIMPLE, UseToken.INVALID));
+    
+    // doesn't try SASL
+    assertAuthEquals(Denied(SIMPLE), getAuthMethod(SIMPLE, TOKEN));
+    // does try SASL
+    assertAuthEquals(No(TOKEN),      getAuthMethod(SIMPLE, TOKEN, UseToken.OTHER));
+    assertAuthEquals(TOKEN,          getAuthMethod(SIMPLE, TOKEN, UseToken.VALID));
+    assertAuthEquals(BadToken,       getAuthMethod(SIMPLE, TOKEN, UseToken.INVALID));
+    
+    assertAuthEquals(No(TOKEN),      getAuthMethod(KERBEROS, TOKEN));
+    assertAuthEquals(No(TOKEN),      getAuthMethod(KERBEROS, TOKEN, UseToken.OTHER));
+    assertAuthEquals(TOKEN,          getAuthMethod(KERBEROS, TOKEN, UseToken.VALID));
+    assertAuthEquals(BadToken,       getAuthMethod(KERBEROS, TOKEN, UseToken.INVALID));
   }
   }
 
 
   @Test
   @Test
   public void testSimpleServerWithTokens() throws Exception {
   public void testSimpleServerWithTokens() throws Exception {
     // Client not using tokens
     // Client not using tokens
     assertAuthEquals(SIMPLE, getAuthMethod(SIMPLE,   SIMPLE));
     assertAuthEquals(SIMPLE, getAuthMethod(SIMPLE,   SIMPLE));
-    // SASL methods are reverted to SIMPLE, but test setup fails
-    assertAuthEquals(KrbFailed, getAuthMethod(KERBEROS, SIMPLE));
+    // SASL methods are reverted to SIMPLE
+    assertAuthEquals(SIMPLE, getAuthMethod(KERBEROS, SIMPLE));
 
 
     // Use tokens. But tokens are ignored because client is reverted to simple
     // Use tokens. But tokens are ignored because client is reverted to simple
-    assertAuthEquals(SIMPLE, getAuthMethod(KERBEROS, SIMPLE, true));
+    // due to server not using tokens
+    assertAuthEquals(SIMPLE, getAuthMethod(KERBEROS, SIMPLE, UseToken.VALID));
+    assertAuthEquals(SIMPLE, getAuthMethod(KERBEROS, SIMPLE, UseToken.OTHER));
 
 
+    // server isn't really advertising tokens
     enableSecretManager = true;
     enableSecretManager = true;
-    assertAuthEquals(SIMPLE, getAuthMethod(SIMPLE,   SIMPLE, true));
-    assertAuthEquals(SIMPLE, getAuthMethod(KERBEROS, SIMPLE, true));
+    assertAuthEquals(SIMPLE, getAuthMethod(SIMPLE,   SIMPLE, UseToken.VALID));
+    assertAuthEquals(SIMPLE, getAuthMethod(SIMPLE,   SIMPLE, UseToken.OTHER));
+    
+    assertAuthEquals(SIMPLE, getAuthMethod(KERBEROS, SIMPLE, UseToken.VALID));
+    assertAuthEquals(SIMPLE, getAuthMethod(KERBEROS, SIMPLE, UseToken.OTHER));
+    
+    // now the simple server takes tokens
+    forceSecretManager = true;
+    assertAuthEquals(TOKEN,  getAuthMethod(SIMPLE,   SIMPLE, UseToken.VALID));
+    assertAuthEquals(SIMPLE, getAuthMethod(SIMPLE,   SIMPLE, UseToken.OTHER));
+    
+    assertAuthEquals(TOKEN,  getAuthMethod(KERBEROS, SIMPLE, UseToken.VALID));
+    assertAuthEquals(SIMPLE, getAuthMethod(KERBEROS, SIMPLE, UseToken.OTHER));
   }
   }
 
 
   @Test
   @Test
   public void testSimpleServerWithInvalidTokens() throws Exception {
   public void testSimpleServerWithInvalidTokens() throws Exception {
     // Tokens are ignored because client is reverted to simple
     // Tokens are ignored because client is reverted to simple
-    assertAuthEquals(SIMPLE, getAuthMethod(SIMPLE,   SIMPLE, false));
-    assertAuthEquals(SIMPLE, getAuthMethod(KERBEROS, SIMPLE, false));
+    assertAuthEquals(SIMPLE, getAuthMethod(SIMPLE,   SIMPLE, UseToken.INVALID));
+    assertAuthEquals(SIMPLE, getAuthMethod(KERBEROS, SIMPLE, UseToken.INVALID));
     enableSecretManager = true;
     enableSecretManager = true;
-    assertAuthEquals(SIMPLE, getAuthMethod(SIMPLE,   SIMPLE, false));
-    assertAuthEquals(SIMPLE, getAuthMethod(KERBEROS, SIMPLE, false));
+    assertAuthEquals(SIMPLE, getAuthMethod(SIMPLE,   SIMPLE, UseToken.INVALID));
+    assertAuthEquals(SIMPLE, getAuthMethod(KERBEROS, SIMPLE, UseToken.INVALID));
+    forceSecretManager = true;
+    assertAuthEquals(BadToken, getAuthMethod(SIMPLE,   SIMPLE, UseToken.INVALID));
+    assertAuthEquals(BadToken, getAuthMethod(KERBEROS, SIMPLE, UseToken.INVALID));
   }
   }
   
   
   /*
   /*
@@ -675,26 +703,29 @@ public class TestSaslRPC {
    */
    */
   @Test
   @Test
   public void testTokenOnlyServer() throws Exception {
   public void testTokenOnlyServer() throws Exception {
+    // simple client w/o tokens won't try SASL, so server denies
     assertAuthEquals(Denied(SIMPLE), getAuthMethod(SIMPLE,   TOKEN));
     assertAuthEquals(Denied(SIMPLE), getAuthMethod(SIMPLE,   TOKEN));
-    assertAuthEquals(KrbFailed,      getAuthMethod(KERBEROS, TOKEN));
+    assertAuthEquals(No(TOKEN),      getAuthMethod(SIMPLE,   TOKEN, UseToken.OTHER));
+    assertAuthEquals(No(TOKEN),      getAuthMethod(KERBEROS, TOKEN));
+    assertAuthEquals(No(TOKEN),      getAuthMethod(KERBEROS, TOKEN, UseToken.OTHER));
   }
   }
 
 
   @Test
   @Test
   public void testTokenOnlyServerWithTokens() throws Exception {
   public void testTokenOnlyServerWithTokens() throws Exception {
-    assertAuthEquals(TOKEN, getAuthMethod(SIMPLE,   TOKEN, true));
-    assertAuthEquals(TOKEN, getAuthMethod(KERBEROS, TOKEN, true));
+    assertAuthEquals(TOKEN,       getAuthMethod(SIMPLE,   TOKEN, UseToken.VALID));
+    assertAuthEquals(TOKEN,       getAuthMethod(KERBEROS, TOKEN, UseToken.VALID));
     enableSecretManager = false;
     enableSecretManager = false;
-    assertAuthEquals(NoTokenAuth, getAuthMethod(SIMPLE,   TOKEN, true));
-    assertAuthEquals(NoTokenAuth, getAuthMethod(KERBEROS, TOKEN, true));
+    assertAuthEquals(NoTokenAuth, getAuthMethod(SIMPLE,   TOKEN, UseToken.VALID));
+    assertAuthEquals(NoTokenAuth, getAuthMethod(KERBEROS, TOKEN, UseToken.VALID));
   }
   }
 
 
   @Test
   @Test
   public void testTokenOnlyServerWithInvalidTokens() throws Exception {
   public void testTokenOnlyServerWithInvalidTokens() throws Exception {
-    assertAuthEquals(BadToken, getAuthMethod(SIMPLE,   TOKEN, false));
-    assertAuthEquals(BadToken, getAuthMethod(KERBEROS, TOKEN, false));
+    assertAuthEquals(BadToken,    getAuthMethod(SIMPLE,   TOKEN, UseToken.INVALID));
+    assertAuthEquals(BadToken,    getAuthMethod(KERBEROS, TOKEN, UseToken.INVALID));
     enableSecretManager = false;
     enableSecretManager = false;
-    assertAuthEquals(NoTokenAuth, getAuthMethod(SIMPLE,   TOKEN, false));
-    assertAuthEquals(NoTokenAuth, getAuthMethod(KERBEROS, TOKEN, false));
+    assertAuthEquals(NoTokenAuth, getAuthMethod(SIMPLE,   TOKEN, UseToken.INVALID));
+    assertAuthEquals(NoTokenAuth, getAuthMethod(KERBEROS, TOKEN, UseToken.INVALID));
   }
   }
 
 
   /*
   /*
@@ -702,38 +733,43 @@ public class TestSaslRPC {
    */
    */
   @Test
   @Test
   public void testKerberosServer() throws Exception {
   public void testKerberosServer() throws Exception {
-    assertAuthEquals(Denied(SIMPLE), getAuthMethod(SIMPLE,   KERBEROS));
-    assertAuthEquals(KrbFailed,      getAuthMethod(KERBEROS, KERBEROS));    
+    // doesn't try SASL
+    assertAuthEquals(Denied(SIMPLE),     getAuthMethod(SIMPLE,   KERBEROS));
+    // does try SASL
+    assertAuthEquals(No(TOKEN,KERBEROS), getAuthMethod(SIMPLE,   KERBEROS, UseToken.OTHER));
+    // no tgt
+    assertAuthEquals(KrbFailed,          getAuthMethod(KERBEROS, KERBEROS));
+    assertAuthEquals(KrbFailed,          getAuthMethod(KERBEROS, KERBEROS, UseToken.OTHER));
   }
   }
 
 
   @Test
   @Test
   public void testKerberosServerWithTokens() throws Exception {
   public void testKerberosServerWithTokens() throws Exception {
     // can use tokens regardless of auth
     // can use tokens regardless of auth
-    assertAuthEquals(TOKEN, getAuthMethod(SIMPLE,   KERBEROS, true));
-    assertAuthEquals(TOKEN, getAuthMethod(KERBEROS, KERBEROS, true));
-    // can't fallback to simple when using kerberos w/o tokens
+    assertAuthEquals(TOKEN,        getAuthMethod(SIMPLE,   KERBEROS, UseToken.VALID));
+    assertAuthEquals(TOKEN,        getAuthMethod(KERBEROS, KERBEROS, UseToken.VALID));
     enableSecretManager = false;
     enableSecretManager = false;
-    assertAuthEquals(Denied(TOKEN), getAuthMethod(SIMPLE,   KERBEROS, true));
-    assertAuthEquals(Denied(TOKEN), getAuthMethod(KERBEROS, KERBEROS, true));
+    // shouldn't even try token because server didn't tell us to
+    assertAuthEquals(No(KERBEROS), getAuthMethod(SIMPLE,   KERBEROS, UseToken.VALID));
+    assertAuthEquals(KrbFailed,    getAuthMethod(KERBEROS, KERBEROS, UseToken.VALID));
   }
   }
 
 
   @Test
   @Test
   public void testKerberosServerWithInvalidTokens() throws Exception {
   public void testKerberosServerWithInvalidTokens() throws Exception {
-    assertAuthEquals(BadToken, getAuthMethod(SIMPLE,   KERBEROS, false));
-    assertAuthEquals(BadToken, getAuthMethod(KERBEROS, KERBEROS, false));
+    assertAuthEquals(BadToken,     getAuthMethod(SIMPLE,   KERBEROS, UseToken.INVALID));
+    assertAuthEquals(BadToken,     getAuthMethod(KERBEROS, KERBEROS, UseToken.INVALID));
     enableSecretManager = false;
     enableSecretManager = false;
-    assertAuthEquals(Denied(TOKEN), getAuthMethod(SIMPLE,   KERBEROS, false));
-    assertAuthEquals(Denied(TOKEN), getAuthMethod(KERBEROS, KERBEROS, false));
+    assertAuthEquals(No(KERBEROS), getAuthMethod(SIMPLE,   KERBEROS, UseToken.INVALID));
+    assertAuthEquals(KrbFailed,    getAuthMethod(KERBEROS, KERBEROS, UseToken.INVALID));
   }
   }
 
 
 
 
   // test helpers
   // test helpers
 
 
   private String getAuthMethod(
   private String getAuthMethod(
-      final AuthenticationMethod clientAuth,
-      final AuthenticationMethod serverAuth) throws Exception {
+      final AuthMethod clientAuth,
+      final AuthMethod serverAuth) throws Exception {
     try {
     try {
-      return internalGetAuthMethod(clientAuth, serverAuth, false, false);
+      return internalGetAuthMethod(clientAuth, serverAuth, UseToken.NONE);
     } catch (Exception e) {
     } catch (Exception e) {
       LOG.warn("Auth method failure", e);
       LOG.warn("Auth method failure", e);
       return e.toString();
       return e.toString();
@@ -741,11 +777,11 @@ public class TestSaslRPC {
   }
   }
 
 
   private String getAuthMethod(
   private String getAuthMethod(
-      final AuthenticationMethod clientAuth,
-      final AuthenticationMethod serverAuth,
-      final boolean useValidToken) throws Exception {
+      final AuthMethod clientAuth,
+      final AuthMethod serverAuth,
+      final UseToken tokenType) throws Exception {
     try {
     try {
-      return internalGetAuthMethod(clientAuth, serverAuth, true, useValidToken);
+      return internalGetAuthMethod(clientAuth, serverAuth, tokenType);
     } catch (Exception e) {
     } catch (Exception e) {
       LOG.warn("Auth method failure", e);
       LOG.warn("Auth method failure", e);
       return e.toString();
       return e.toString();
@@ -753,15 +789,14 @@ public class TestSaslRPC {
   }
   }
   
   
   private String internalGetAuthMethod(
   private String internalGetAuthMethod(
-      final AuthenticationMethod clientAuth,
-      final AuthenticationMethod serverAuth,
-      final boolean useToken,
-      final boolean useValidToken) throws Exception {
+      final AuthMethod clientAuth,
+      final AuthMethod serverAuth,
+      final UseToken tokenType) throws Exception {
     
     
     String currentUser = UserGroupInformation.getCurrentUser().getUserName();
     String currentUser = UserGroupInformation.getCurrentUser().getUserName();
     
     
     final Configuration serverConf = new Configuration(conf);
     final Configuration serverConf = new Configuration(conf);
-    SecurityUtil.setAuthenticationMethod(serverAuth, serverConf);
+    serverConf.set(HADOOP_SECURITY_AUTHENTICATION, serverAuth.toString());
     UserGroupInformation.setConfiguration(serverConf);
     UserGroupInformation.setConfiguration(serverConf);
     
     
     final UserGroupInformation serverUgi =
     final UserGroupInformation serverUgi =
@@ -793,7 +828,7 @@ public class TestSaslRPC {
     });
     });
 
 
     final Configuration clientConf = new Configuration(conf);
     final Configuration clientConf = new Configuration(conf);
-    SecurityUtil.setAuthenticationMethod(clientAuth, clientConf);
+    clientConf.set(HADOOP_SECURITY_AUTHENTICATION, clientAuth.toString());
     clientConf.setBoolean(
     clientConf.setBoolean(
         CommonConfigurationKeys.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_KEY,
         CommonConfigurationKeys.IPC_CLIENT_FALLBACK_TO_SIMPLE_AUTH_ALLOWED_KEY,
         clientFallBackToSimpleAllowed);
         clientFallBackToSimpleAllowed);
@@ -804,26 +839,37 @@ public class TestSaslRPC {
     clientUgi.setAuthenticationMethod(clientAuth);    
     clientUgi.setAuthenticationMethod(clientAuth);    
 
 
     final InetSocketAddress addr = NetUtils.getConnectAddress(server);
     final InetSocketAddress addr = NetUtils.getConnectAddress(server);
-    if (useToken) {
+    if (tokenType != UseToken.NONE) {
       TestTokenIdentifier tokenId = new TestTokenIdentifier(
       TestTokenIdentifier tokenId = new TestTokenIdentifier(
           new Text(clientUgi.getUserName()));
           new Text(clientUgi.getUserName()));
-      Token<TestTokenIdentifier> token = useValidToken
-          ? new Token<TestTokenIdentifier>(tokenId, sm)
-          : new Token<TestTokenIdentifier>(
+      Token<TestTokenIdentifier> token = null;
+      switch (tokenType) {
+        case VALID:
+          token = new Token<TestTokenIdentifier>(tokenId, sm);
+          SecurityUtil.setTokenService(token, addr);
+          break;
+        case INVALID:
+          token = new Token<TestTokenIdentifier>(
               tokenId.getBytes(), "bad-password!".getBytes(),
               tokenId.getBytes(), "bad-password!".getBytes(),
               tokenId.getKind(), null);
               tokenId.getKind(), null);
-      
-      SecurityUtil.setTokenService(token, addr);
+          SecurityUtil.setTokenService(token, addr);
+          break;
+        case OTHER:
+          token = new Token<TestTokenIdentifier>();
+          break;
+        case NONE: // won't get here
+      }
       clientUgi.addToken(token);
       clientUgi.addToken(token);
     }
     }
 
 
     try {
     try {
+      LOG.info("trying ugi:"+clientUgi+" tokens:"+clientUgi.getTokens());
       return clientUgi.doAs(new PrivilegedExceptionAction<String>() {
       return clientUgi.doAs(new PrivilegedExceptionAction<String>() {
         @Override
         @Override
         public String run() throws IOException {
         public String run() throws IOException {
           TestSaslProtocol proxy = null;
           TestSaslProtocol proxy = null;
           try {
           try {
-            proxy = (TestSaslProtocol) RPC.getProxy(TestSaslProtocol.class,
+            proxy = RPC.getProxy(TestSaslProtocol.class,
                 TestSaslProtocol.versionID, addr, clientConf);
                 TestSaslProtocol.versionID, addr, clientConf);
             
             
             proxy.ping();
             proxy.ping();
@@ -847,7 +893,7 @@ public class TestSaslRPC {
     }
     }
   }
   }
 
 
-  private static void assertAuthEquals(AuthenticationMethod expect,
+  private static void assertAuthEquals(AuthMethod expect,
       String actual) {
       String actual) {
     assertEquals(expect.toString(), actual);
     assertEquals(expect.toString(), actual);
   }
   }

+ 1 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestServer.java

@@ -118,7 +118,7 @@ public class TestServer {
   }
   }
   
   
   @Test
   @Test
-  public void testExceptionsHandler() throws IOException {
+  public void testExceptionsHandler() {
     Server.ExceptionsHandler handler = new Server.ExceptionsHandler();
     Server.ExceptionsHandler handler = new Server.ExceptionsHandler();
     handler.addTerseExceptions(IOException.class);
     handler.addTerseExceptions(IOException.class);
     handler.addTerseExceptions(RpcServerException.class, IpcException.class);
     handler.addTerseExceptions(RpcServerException.class, IpcException.class);

+ 1 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSocketFactory.java

@@ -35,7 +35,7 @@ import org.junit.Test;
 public class TestSocketFactory {
 public class TestSocketFactory {
 
 
   @Test
   @Test
-  public void testSocketFactoryAsKeyInMap() throws Exception {
+  public void testSocketFactoryAsKeyInMap() {
     Map<SocketFactory, Integer> dummyCache = new HashMap<SocketFactory, Integer>();
     Map<SocketFactory, Integer> dummyCache = new HashMap<SocketFactory, Integer>();
     int toBeCached1 = 1;
     int toBeCached1 = 1;
     int toBeCached2 = 2;
     int toBeCached2 = 2;

+ 5 - 6
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestGSet.java → hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestGSet.java

@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * See the License for the specific language governing permissions and
  * limitations under the License.
  * limitations under the License.
  */
  */
-package org.apache.hadoop.hdfs.util;
+package org.apache.hadoop.util;
 
 
 import java.util.ConcurrentModificationException;
 import java.util.ConcurrentModificationException;
 import java.util.Iterator;
 import java.util.Iterator;
@@ -169,9 +169,8 @@ public class TestGSet {
   }
   }
 
 
   /**
   /**
-   * A long test,
-   * which may take ~5 hours,
-   * with various data sets and parameters.
+   * A long running test with various data sets and parameters.
+   * It may take ~5 hours, 
    * If you are changing the implementation,
    * If you are changing the implementation,
    * please un-comment the following line in order to run the test.
    * please un-comment the following line in order to run the test.
    */
    */
@@ -327,8 +326,6 @@ public class TestGSet {
       } else {
       } else {
         Assert.assertEquals(e.id, gset.remove(key).id);
         Assert.assertEquals(e.id, gset.remove(key).id);
       }
       }
-
-      check();
       return e;
       return e;
     }
     }
     @Override
     @Override
@@ -391,7 +388,9 @@ public class TestGSet {
 
 
     @Override
     @Override
     public void clear() {
     public void clear() {
+      expected.clear();
       gset.clear();
       gset.clear();
+      Assert.assertEquals(0, size());
     }
     }
   }
   }
 
 

+ 457 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestLightWeightCache.java

@@ -0,0 +1,457 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.util;
+
+import java.util.Date;
+import java.util.Iterator;
+import java.util.Random;
+
+import org.junit.Assert;
+import org.junit.Test;
+
+/** Testing {@link LightWeightCache} */
+public class TestLightWeightCache {
+  private static final long starttime = Time.now();
+  private static final long seed = starttime;
+  private static final Random ran = new Random(seed);
+  static {
+    println("Start time = " + new Date(starttime) + ", seed=" +  seed);
+  }
+
+  private static void print(Object s) {
+    System.out.print(s);
+    System.out.flush();
+  }
+
+  private static void println(Object s) {
+    System.out.println(s);
+  }
+
+  @Test
+  public void testLightWeightCache() {
+    // test randomized creation expiration with zero access expiration 
+    {
+      final long creationExpiration = ran.nextInt(1024) + 1;
+      check(1, creationExpiration, 0L, 1 << 10, 65537);
+      check(17, creationExpiration, 0L, 1 << 16, 17);
+      check(255, creationExpiration, 0L, 1 << 16, 65537);
+    }
+
+    // test randomized creation/access expiration periods
+    for(int i = 0; i < 3; i++) {
+      final long creationExpiration = ran.nextInt(1024) + 1;
+      final long accessExpiration = ran.nextInt(1024) + 1;
+      
+      check(1, creationExpiration, accessExpiration, 1 << 10, 65537);
+      check(17, creationExpiration, accessExpiration, 1 << 16, 17);
+      check(255, creationExpiration, accessExpiration, 1 << 16, 65537);
+    }
+  
+    // test size limit
+    final int dataSize = 1 << 16;
+    for(int i = 0; i < 10; i++) {
+      final int modulus = ran.nextInt(1024) + 1;
+      final int sizeLimit = ran.nextInt(modulus) + 1;
+      checkSizeLimit(sizeLimit, dataSize, modulus);
+    }
+  }
+
+  private static void checkSizeLimit(final int sizeLimit, final int datasize,
+      final int modulus) {
+    final LightWeightCacheTestCase test = new LightWeightCacheTestCase(
+        sizeLimit, sizeLimit, 1L << 32, 1L << 32, datasize, modulus);
+
+    // keep putting entries and check size limit
+    print("  check size ................. ");
+    for(int i = 0; i < test.data.size(); i++) {
+      test.cache.put(test.data.get(i));
+      Assert.assertTrue(test.cache.size() <= sizeLimit);
+    }
+    println("DONE " + test.stat());
+  }
+  
+  /** 
+   * Test various createionExpirationPeriod and accessExpirationPeriod.
+   * It runs ~2 minutes. If you are changing the implementation,
+   * please un-comment the following line in order to run the test.
+   */
+//  @Test
+  public void testExpirationPeriods() {
+    for(int k = -4; k < 10; k += 4) {
+      final long accessExpirationPeriod = k < 0? 0L: (1L << k); 
+      for(int j = 0; j < 10; j += 4) {
+        final long creationExpirationPeriod = 1L << j; 
+        runTests(1, creationExpirationPeriod, accessExpirationPeriod);
+        for(int i = 1; i < Integer.SIZE - 1; i += 8) {
+          runTests((1 << i) + 1, creationExpirationPeriod, accessExpirationPeriod);
+        }
+      }
+    }
+  }
+
+  /** Run tests with various table lengths. */
+  private static void runTests(final int modulus,
+      final long creationExpirationPeriod,
+      final long accessExpirationPeriod) {
+    println("\n\n\n*** runTest: modulus=" + modulus
+        + ", creationExpirationPeriod=" + creationExpirationPeriod
+        + ", accessExpirationPeriod=" + accessExpirationPeriod);
+    for(int i = 0; i <= 16; i += 4) {
+      final int tablelength = (1 << i);
+
+      final int upper = i + 2;
+      final int steps = Math.max(1, upper/3);
+
+      for(int j = upper; j > 0; j -= steps) {
+        final int datasize = 1 << j;
+        check(tablelength, creationExpirationPeriod, accessExpirationPeriod,
+            datasize, modulus);
+      }
+    }
+  }
+
+  private static void check(int tablelength, long creationExpirationPeriod,
+      long accessExpirationPeriod, int datasize, int modulus) {
+    check(new LightWeightCacheTestCase(tablelength, -1,
+        creationExpirationPeriod, accessExpirationPeriod, datasize, modulus));
+  }
+
+  /** 
+   * check the following operations
+   * (1) put
+   * (2) remove & put
+   * (3) remove
+   * (4) remove & put again
+   */
+  private static void check(final LightWeightCacheTestCase test) {
+    //check put
+    print("  check put .................. ");
+    for(int i = 0; i < test.data.size()/2; i++) {
+      test.put(test.data.get(i));
+    }
+    for(int i = 0; i < test.data.size(); i++) {
+      test.put(test.data.get(i));
+    }
+    println("DONE " + test.stat());
+
+    //check remove and put
+    print("  check remove & put ......... ");
+    for(int j = 0; j < 10; j++) {
+      for(int i = 0; i < test.data.size()/2; i++) {
+        final int r = ran.nextInt(test.data.size());
+        test.remove(test.data.get(r));
+      }
+      for(int i = 0; i < test.data.size()/2; i++) {
+        final int r = ran.nextInt(test.data.size());
+        test.put(test.data.get(r));
+      }
+    }
+    println("DONE " + test.stat());
+
+    //check remove
+    print("  check remove ............... ");
+    for(int i = 0; i < test.data.size(); i++) {
+      test.remove(test.data.get(i));
+    }
+    Assert.assertEquals(0, test.cache.size());
+    println("DONE " + test.stat());
+
+    //check remove and put again
+    print("  check remove & put again ... ");
+    for(int j = 0; j < 10; j++) {
+      for(int i = 0; i < test.data.size()/2; i++) {
+        final int r = ran.nextInt(test.data.size());
+        test.remove(test.data.get(r));
+      }
+      for(int i = 0; i < test.data.size()/2; i++) {
+        final int r = ran.nextInt(test.data.size());
+        test.put(test.data.get(r));
+      }
+    }
+    println("DONE " + test.stat());
+
+    final long s = (Time.now() - starttime)/1000L;
+    println("total time elapsed=" + s + "s\n");
+  }
+
+  /**
+   * The test case contains two data structures, a cache and a hashMap.
+   * The hashMap is used to verify the correctness of the cache.  Note that
+   * no automatic eviction is performed in the hashMap.  Thus, we have
+   * (1) If an entry exists in cache, it MUST exist in the hashMap.
+   * (2) If an entry does not exist in the cache, it may or may not exist in the
+   *     hashMap.  If it exists, it must be expired.
+   */
+  private static class LightWeightCacheTestCase implements GSet<IntEntry, IntEntry> {
+    /** hashMap will not evict entries automatically. */
+    final GSet<IntEntry, IntEntry> hashMap
+        = new GSetByHashMap<IntEntry, IntEntry>(1024, 0.75f);
+
+    final LightWeightCache<IntEntry, IntEntry> cache;
+    final IntData data;
+
+    final String info;
+    final long starttime = Time.now();
+    /** Determine the probability in {@link #check()}. */
+    final int denominator;
+    int iterate_count = 0;
+    int contain_count = 0;
+
+    private long currentTestTime = ran.nextInt();
+
+    LightWeightCacheTestCase(int tablelength, int sizeLimit,
+        long creationExpirationPeriod, long accessExpirationPeriod,
+        int datasize, int modulus) {
+      denominator = Math.min((datasize >> 7) + 1, 1 << 16);
+      info = getClass().getSimpleName() + "(" + new Date(starttime)
+          + "): tablelength=" + tablelength
+          + ", creationExpirationPeriod=" + creationExpirationPeriod
+          + ", accessExpirationPeriod=" + accessExpirationPeriod
+          + ", datasize=" + datasize
+          + ", modulus=" + modulus
+          + ", denominator=" + denominator;
+      println(info);
+
+      data = new IntData(datasize, modulus);
+      cache = new LightWeightCache<IntEntry, IntEntry>(tablelength, sizeLimit,
+          creationExpirationPeriod, 0, new LightWeightCache.Clock() {
+        @Override
+        long currentTime() {
+          return currentTestTime;
+        }
+      });
+
+      Assert.assertEquals(0, cache.size());
+    }
+
+    private boolean containsTest(IntEntry key) {
+      final boolean c = cache.contains(key);
+      if (c) {
+        Assert.assertTrue(hashMap.contains(key));
+      } else {
+        final IntEntry h = hashMap.remove(key);
+        if (h != null) {
+          Assert.assertTrue(cache.isExpired(h, currentTestTime));
+        }
+      }
+      return c;
+    }
+    @Override
+    public boolean contains(IntEntry key) {
+      final boolean e = containsTest(key);
+      check();
+      return e;
+    }
+
+    private IntEntry getTest(IntEntry key) {
+      final IntEntry c = cache.get(key);
+      if (c != null) {
+        Assert.assertEquals(hashMap.get(key).id, c.id);
+      } else {
+        final IntEntry h = hashMap.remove(key);
+        if (h != null) {
+          Assert.assertTrue(cache.isExpired(h, currentTestTime));
+        }
+      }
+      return c;
+    }
+    @Override
+    public IntEntry get(IntEntry key) {
+      final IntEntry e = getTest(key);
+      check();
+      return e;
+    }
+
+    private IntEntry putTest(IntEntry entry) {
+      final IntEntry c = cache.put(entry);
+      if (c != null) {
+        Assert.assertEquals(hashMap.put(entry).id, c.id);
+      } else {
+        final IntEntry h = hashMap.put(entry);
+        if (h != null && h != entry) {
+          // if h == entry, its expiration time is already updated
+          Assert.assertTrue(cache.isExpired(h, currentTestTime));
+        }
+      }
+      return c;
+    }
+    @Override
+    public IntEntry put(IntEntry entry) {
+      final IntEntry e = putTest(entry);
+      check();
+      return e;
+    }
+
+    private IntEntry removeTest(IntEntry key) {
+      final IntEntry c = cache.remove(key);
+      if (c != null) {
+        Assert.assertEquals(c.id, hashMap.remove(key).id);
+      } else {
+        final IntEntry h = hashMap.remove(key);
+        if (h != null) {
+          Assert.assertTrue(cache.isExpired(h, currentTestTime));
+        }
+      }
+      return c;
+    }
+    @Override
+    public IntEntry remove(IntEntry key) {
+      final IntEntry e = removeTest(key);
+      check();
+      return e;
+    }
+
+    private int sizeTest() {
+      final int c = cache.size();
+      Assert.assertTrue(hashMap.size() >= c);
+      return c;
+    }
+    @Override
+    public int size() {
+      final int s = sizeTest();
+      check();
+      return s;
+    }
+
+    @Override
+    public Iterator<IntEntry> iterator() {
+      throw new UnsupportedOperationException();
+    }
+
+    boolean tossCoin() {
+      return ran.nextInt(denominator) == 0;
+    }
+
+    void check() {
+      currentTestTime += ran.nextInt() & 0x3;
+
+      //test size
+      sizeTest();
+
+      if (tossCoin()) {
+        //test get(..), check content and test iterator
+        iterate_count++;
+        for(IntEntry i : cache) {
+          getTest(i);
+        }
+      }
+
+      if (tossCoin()) {
+        //test contains(..)
+        contain_count++;
+        final int count = Math.min(data.size(), 1000);
+        if (count == data.size()) {
+          for(IntEntry i : data.integers) {
+            containsTest(i);
+          }
+        } else {
+          for(int j = 0; j < count; j++) {
+            containsTest(data.get(ran.nextInt(data.size())));
+          }
+        }
+      }
+    }
+
+    String stat() {
+      final long t = Time.now() - starttime;
+      return String.format(" iterate=%5d, contain=%5d, time elapsed=%5d.%03ds",
+          iterate_count, contain_count, t/1000, t%1000);
+    }
+
+    @Override
+    public void clear() {
+      hashMap.clear();
+      cache.clear();
+      Assert.assertEquals(0, size());
+    }
+  }
+
+  private static class IntData {
+    final IntEntry[] integers;
+
+    IntData(int size, int modulus) {
+      integers = new IntEntry[size];
+      for(int i = 0; i < integers.length; i++) {
+        integers[i] = new IntEntry(i, ran.nextInt(modulus));
+      }
+    }
+
+    IntEntry get(int i) {
+      return integers[i];
+    }
+
+    int size() {
+      return integers.length;
+    }
+  }
+
+  /** Entries of {@link LightWeightCache} in this test */
+  private static class IntEntry implements LightWeightCache.Entry,
+      Comparable<IntEntry> {
+    private LightWeightGSet.LinkedElement next;
+    final int id;
+    final int value;
+    private long expirationTime = 0;
+
+    IntEntry(int id, int value) {
+      this.id = id;
+      this.value = value;
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+      return obj != null && obj instanceof IntEntry
+          && value == ((IntEntry)obj).value;
+    }
+
+    @Override
+    public int hashCode() {
+      return value;
+    }
+
+    @Override
+    public int compareTo(IntEntry that) {
+      return value - that.value;
+    }
+
+    @Override
+    public String toString() {
+      return id + "#" + value + ",expirationTime=" + expirationTime;
+    }
+
+    @Override
+    public LightWeightGSet.LinkedElement getNext() {
+      return next;
+    }
+
+    @Override
+    public void setNext(LightWeightGSet.LinkedElement e) {
+      next = e;
+    }
+
+    @Override
+    public void setExpirationTime(long timeNano) {
+      this.expirationTime = timeNano;
+    }
+
+    @Override
+    public long getExpirationTime() {
+      return expirationTime;
+    }
+  }
+}

+ 16 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestProtoUtil.java

@@ -18,12 +18,19 @@
 package org.apache.hadoop.util;
 package org.apache.hadoop.util;
 
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
 
 
 import java.io.ByteArrayInputStream;
 import java.io.ByteArrayInputStream;
 import java.io.ByteArrayOutputStream;
 import java.io.ByteArrayOutputStream;
 import java.io.DataInputStream;
 import java.io.DataInputStream;
 import java.io.IOException;
 import java.io.IOException;
+import java.util.Arrays;
 
 
+import org.apache.hadoop.ipc.ClientId;
+import org.apache.hadoop.ipc.RPC.RpcKind;
+import org.apache.hadoop.ipc.RpcConstants;
+import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcRequestHeaderProto;
+import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcRequestHeaderProto.OperationProto;
 import org.junit.Test;
 import org.junit.Test;
 
 
 import com.google.protobuf.CodedOutputStream;
 import com.google.protobuf.CodedOutputStream;
@@ -69,4 +76,13 @@ public class TestProtoUtil {
         new ByteArrayInputStream(baos.toByteArray()));
         new ByteArrayInputStream(baos.toByteArray()));
     assertEquals(value, ProtoUtil.readRawVarint32(dis));
     assertEquals(value, ProtoUtil.readRawVarint32(dis));
   }
   }
+  
+  @Test
+  public void testRpcClientId() {
+    byte[] uuid = ClientId.getClientId();
+    RpcRequestHeaderProto header = ProtoUtil.makeRpcRequestHeader(
+        RpcKind.RPC_PROTOCOL_BUFFER, OperationProto.RPC_FINAL_PACKET, 0,
+        RpcConstants.INVALID_RETRY_COUNT, uuid);
+    assertTrue(Arrays.equals(uuid, header.getClientId().toByteArray()));
+  }
 }
 }

+ 48 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestReflectionUtils.java

@@ -18,9 +18,12 @@
 
 
 package org.apache.hadoop.util;
 package org.apache.hadoop.util;
 
 
+import java.lang.reflect.Field;
+import java.lang.reflect.Method;
 import java.net.URL;
 import java.net.URL;
 import java.net.URLClassLoader;
 import java.net.URLClassLoader;
 import java.util.HashMap;
 import java.util.HashMap;
+import java.util.List;
 
 
 import static org.junit.Assert.*;
 import static org.junit.Assert.*;
 import org.junit.Before;
 import org.junit.Before;
@@ -109,6 +112,51 @@ public class TestReflectionUtils {
     System.gc();
     System.gc();
     assertTrue(cacheSize()+" too big", cacheSize()<iterations);
     assertTrue(cacheSize()+" too big", cacheSize()<iterations);
   }
   }
+  
+  @Test
+  public void testGetDeclaredFieldsIncludingInherited() {
+    Parent child = new Parent() {
+      private int childField;
+      @SuppressWarnings("unused")
+      public int getChildField() { return childField; }
+    };
+    
+    List<Field> fields = ReflectionUtils.getDeclaredFieldsIncludingInherited(
+        child.getClass());
+    boolean containsParentField = false;
+    boolean containsChildField = false;
+    for (Field field : fields) {
+      if (field.getName().equals("parentField")) {
+        containsParentField = true;
+      } else if (field.getName().equals("childField")) {
+        containsChildField = true;
+      }
+    }
+    
+    List<Method> methods = ReflectionUtils.getDeclaredMethodsIncludingInherited(
+        child.getClass());
+    boolean containsParentMethod = false;
+    boolean containsChildMethod = false;
+    for (Method method : methods) {
+      if (method.getName().equals("getParentField")) {
+        containsParentMethod = true;
+      } else if (method.getName().equals("getChildField")) {
+        containsChildMethod = true;
+      }
+    }
+    
+    assertTrue("Missing parent field", containsParentField);
+    assertTrue("Missing child field", containsChildField);
+    assertTrue("Missing parent method", containsParentMethod);
+    assertTrue("Missing child method", containsChildMethod);
+  }
+  
+  // Used for testGetDeclaredFieldsIncludingInherited
+  private class Parent {
+    private int parentField;
+    @SuppressWarnings("unused")
+    public int getParentField() { return parentField; }
+  }
     
     
   private static class LoadedInChild {
   private static class LoadedInChild {
   }
   }

+ 2 - 2
hadoop-common-project/hadoop-nfs/pom.xml

@@ -20,12 +20,12 @@
   <parent>
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
     <artifactId>hadoop-project</artifactId>
-    <version>2.1.0-beta</version>
+    <version>2.1.1-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   </parent>
   <groupId>org.apache.hadoop</groupId>
   <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-nfs</artifactId>
   <artifactId>hadoop-nfs</artifactId>
-  <version>2.1.0-beta</version>
+  <version>2.1.1-SNAPSHOT</version>
   <packaging>jar</packaging>
   <packaging>jar</packaging>
 
 
   <name>Apache Hadoop NFS</name>
   <name>Apache Hadoop NFS</name>

+ 2 - 2
hadoop-common-project/pom.xml

@@ -20,12 +20,12 @@
   <parent>
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
     <artifactId>hadoop-project</artifactId>
-    <version>2.1.0-beta</version>
+    <version>2.1.1-SNAPSHOT</version>
     <relativePath>../hadoop-project</relativePath>
     <relativePath>../hadoop-project</relativePath>
   </parent>
   </parent>
   <groupId>org.apache.hadoop</groupId>
   <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-common-project</artifactId>
   <artifactId>hadoop-common-project</artifactId>
-  <version>2.1.0-beta</version>
+  <version>2.1.1-SNAPSHOT</version>
   <description>Apache Hadoop Common Project</description>
   <description>Apache Hadoop Common Project</description>
   <name>Apache Hadoop Common Project</name>
   <name>Apache Hadoop Common Project</name>
   <packaging>pom</packaging>
   <packaging>pom</packaging>

+ 2 - 2
hadoop-dist/pom.xml

@@ -20,12 +20,12 @@
   <parent>
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
     <artifactId>hadoop-project</artifactId>
-    <version>2.1.0-beta</version>
+    <version>2.1.1-SNAPSHOT</version>
     <relativePath>../hadoop-project</relativePath>
     <relativePath>../hadoop-project</relativePath>
   </parent>
   </parent>
   <groupId>org.apache.hadoop</groupId>
   <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-dist</artifactId>
   <artifactId>hadoop-dist</artifactId>
-  <version>2.1.0-beta</version>
+  <version>2.1.1-SNAPSHOT</version>
   <description>Apache Hadoop Distribution</description>
   <description>Apache Hadoop Distribution</description>
   <name>Apache Hadoop Distribution</name>
   <name>Apache Hadoop Distribution</name>
   <packaging>jar</packaging>
   <packaging>jar</packaging>

+ 3 - 2
hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml

@@ -22,12 +22,12 @@
   <parent>
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
     <artifactId>hadoop-project</artifactId>
-    <version>2.1.0-beta</version>
+    <version>2.1.1-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   </parent>
   <groupId>org.apache.hadoop</groupId>
   <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-hdfs-httpfs</artifactId>
   <artifactId>hadoop-hdfs-httpfs</artifactId>
-  <version>2.1.0-beta</version>
+  <version>2.1.1-SNAPSHOT</version>
   <packaging>war</packaging>
   <packaging>war</packaging>
 
 
   <name>Apache Hadoop HttpFS</name>
   <name>Apache Hadoop HttpFS</name>
@@ -554,6 +554,7 @@
                     <delete file="${httpfs.tomcat.dist.dir}/conf/server.xml"/>
                     <delete file="${httpfs.tomcat.dist.dir}/conf/server.xml"/>
                     <copy file="${basedir}/src/main/tomcat/server.xml"
                     <copy file="${basedir}/src/main/tomcat/server.xml"
                           toDir="${httpfs.tomcat.dist.dir}/conf"/>
                           toDir="${httpfs.tomcat.dist.dir}/conf"/>
+                    <delete file="${httpfs.tomcat.dist.dir}/conf/logging.properties"/>
                     <copy file="${basedir}/src/main/tomcat/logging.properties"
                     <copy file="${basedir}/src/main/tomcat/logging.properties"
                           toDir="${httpfs.tomcat.dist.dir}/conf"/>
                           toDir="${httpfs.tomcat.dist.dir}/conf"/>
                     <copy toDir="${httpfs.tomcat.dist.dir}/webapps/ROOT">
                     <copy toDir="${httpfs.tomcat.dist.dir}/webapps/ROOT">

+ 0 - 1
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSKerberosAuthenticator.java

@@ -61,7 +61,6 @@ public class HttpFSKerberosAuthenticator extends KerberosAuthenticator {
   public static final String DELEGATION_PARAM = "delegation";
   public static final String DELEGATION_PARAM = "delegation";
   public static final String TOKEN_PARAM = "token";
   public static final String TOKEN_PARAM = "token";
   public static final String RENEWER_PARAM = "renewer";
   public static final String RENEWER_PARAM = "renewer";
-  public static final String TOKEN_KIND = "HTTPFS_DELEGATION_TOKEN";
   public static final String DELEGATION_TOKEN_JSON = "Token";
   public static final String DELEGATION_TOKEN_JSON = "Token";
   public static final String DELEGATION_TOKEN_URL_STRING_JSON = "urlString";
   public static final String DELEGATION_TOKEN_URL_STRING_JSON = "urlString";
   public static final String RENEW_DELEGATION_TOKEN_JSON = "long";
   public static final String RENEW_DELEGATION_TOKEN_JSON = "long";

+ 2 - 3
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/DelegationTokenIdentifier.java

@@ -18,7 +18,7 @@
 package org.apache.hadoop.lib.service;
 package org.apache.hadoop.lib.service;
 
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.fs.http.client.HttpFSKerberosAuthenticator;
+import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
 import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
 
 
@@ -29,8 +29,7 @@ import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdenti
 public class DelegationTokenIdentifier
 public class DelegationTokenIdentifier
   extends AbstractDelegationTokenIdentifier {
   extends AbstractDelegationTokenIdentifier {
 
 
-  public static final Text KIND_NAME =
-    new Text(HttpFSKerberosAuthenticator.TOKEN_KIND);
+  public static final Text KIND_NAME = WebHdfsFileSystem.TOKEN_KIND;
 
 
   public DelegationTokenIdentifier() {
   public DelegationTokenIdentifier() {
   }
   }

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml

@@ -20,12 +20,12 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <parent>
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project-dist</artifactId>
     <artifactId>hadoop-project-dist</artifactId>
-    <version>2.1.0-beta</version>
+    <version>2.1.1-SNAPSHOT</version>
     <relativePath>../../hadoop-project-dist</relativePath>
     <relativePath>../../hadoop-project-dist</relativePath>
   </parent>
   </parent>
   <groupId>org.apache.hadoop</groupId>
   <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-hdfs-nfs</artifactId>
   <artifactId>hadoop-hdfs-nfs</artifactId>
-  <version>2.1.0-beta</version>
+  <version>2.1.1-SNAPSHOT</version>
   <description>Apache Hadoop HDFS-NFS</description>
   <description>Apache Hadoop HDFS-NFS</description>
   <name>Apache Hadoop HDFS-NFS</name>
   <name>Apache Hadoop HDFS-NFS</name>
   <packaging>jar</packaging>
   <packaging>jar</packaging>

+ 99 - 4
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -72,6 +72,14 @@ Release 2.1.0-beta - 2013-07-02
     HDFS-4373. Add HTTP API for querying NameNode startup progress. (cnauroth)
     HDFS-4373. Add HTTP API for querying NameNode startup progress. (cnauroth)
 
 
     HDFS-4374. Display NameNode startup progress in UI. (cnauroth)
     HDFS-4374. Display NameNode startup progress in UI. (cnauroth)
+
+    HDFS-4974. Add Idempotent and AtMostOnce annotations to namenode
+    protocol methods. (suresh)
+
+    HDFS-4979. Implement retry cache on Namenode. (suresh)
+
+    HDFS-5025. Record ClientId and CallId in EditLog to enable rebuilding 
+    retry cache in case of HA failover. (Jing Zhao via suresh)
     
     
   IMPROVEMENTS
   IMPROVEMENTS
 
 
@@ -188,14 +196,43 @@ Release 2.1.0-beta - 2013-07-02
     HDFS-4645. Move from randomly generated block ID to sequentially generated
     HDFS-4645. Move from randomly generated block ID to sequentially generated
     block ID.  (Arpit Agarwal via szetszwo)
     block ID.  (Arpit Agarwal via szetszwo)
 
 
+    HDFS-4912. Cleanup FSNamesystem#startFileInternal. (suresh)
+
+    HDFS-4903. Print trash configuration and trash emptier state in
+    namenode log. (Arpit Agarwal via suresh)
+
     HDFS-4992. Make balancer's mover thread count and dispatcher thread count
     HDFS-4992. Make balancer's mover thread count and dispatcher thread count
     configurable.  (Max Lapan via szetszwo)
     configurable.  (Max Lapan via szetszwo)
 
 
     HDFS-4996. ClientProtocol#metaSave can be made idempotent by overwriting the
     HDFS-4996. ClientProtocol#metaSave can be made idempotent by overwriting the
     output file instead of appending to it. (cnauroth)
     output file instead of appending to it. (cnauroth)
 
 
+    HADOOP-9418.  Add symlink support to DistributedFileSystem (Andrew Wang via
+    Colin Patrick McCabe)
+
+    HDFS-5007. Replace hard-coded property keys with DFSConfigKeys fields. 
+    (Kousuke Saruta via jing9)
+
+    HDFS-5008. Make ClientProtocol#abandonBlock() idempotent. (jing9)
+
+    HADOOP-9760. Move GSet and related classes to common from HDFS.
+    (suresh)
+
+    HDFS-5020. Make DatanodeProtocol#blockReceivedAndDeleted idempotent. 
+    (jing9)
+
+    HDFS-5024. Make DatanodeProtocol#commitBlockSynchronization idempotent. 
+    (Arpit Agarwal via jing9)
+
+    HDFS-3880. Use Builder to build RPC server in HDFS.
+    (Brandon Li and Junping Du via szetszwo)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
+    HDFS-4465. Optimize datanode ReplicasMap and ReplicaInfo. (atm)
+
+    HDFS-5027. On startup, DN should scan volumes in parallel. (atm)
+
   BUG FIXES
   BUG FIXES
 
 
     HDFS-4626. ClientProtocol#getLinkTarget should throw an exception for
     HDFS-4626. ClientProtocol#getLinkTarget should throw an exception for
@@ -418,20 +455,41 @@ Release 2.1.0-beta - 2013-07-02
     HDFS-4927. CreateEditsLog creates inodes with an invalid inode ID, which then
     HDFS-4927. CreateEditsLog creates inodes with an invalid inode ID, which then
     cannot be loaded by a namenode. (cnauroth)
     cannot be loaded by a namenode. (cnauroth)
 
 
+    HDFS-4944. WebHDFS cannot create a file path containing characters that must
+    be URI-encoded, such as space. (cnauroth)
+
+    HDFS-4888. Refactor and fix FSNamesystem.getTurnOffTip. (Ravi Prakash via
+    kihwal)
+
+    HDFS-4943. WebHdfsFileSystem does not work when original file path has
+    encoded chars.  (Jerry He via szetszwo)
+
     HDFS-4948. mvn site for hadoop-hdfs-nfs fails. (brandonli)
     HDFS-4948. mvn site for hadoop-hdfs-nfs fails. (brandonli)
 
 
     HDFS-4954. In nfs, OpenFileCtx.getFlushedOffset() should handle IOException.
     HDFS-4954. In nfs, OpenFileCtx.getFlushedOffset() should handle IOException.
     (Brandon Li via szetszwo)
     (Brandon Li via szetszwo)
 
 
-    HDFS-4944. WebHDFS cannot create a file path containing characters that must
-    be URI-encoded, such as space. (cnauroth)
+    HDFS-4887. TestNNThroughputBenchmark exits abruptly. (kihwal)
 
 
-    HDFS-4943. WebHdfsFileSystem does not work when original file path has
-    encoded chars.  (Jerry He via szetszwo)
+    HDFS-4980. Incorrect logging.properties file for hadoop-httpfs.
+    (Mark Grover via suresh)
+
+    HDFS-4999. Fix TestShortCircuitLocalRead on branch-2. (cmccabe via kihwal)
+
+    HDFS-4687. TestDelegationTokenForProxyUser#testWebHdfsDoAs is flaky with
+    JDK7. (Andrew Wang via atm)
 
 
     HDFS-5003. TestNNThroughputBenchmark failed caused by existing directories.
     HDFS-5003. TestNNThroughputBenchmark failed caused by existing directories.
     (Xi Fang via cnauroth)
     (Xi Fang via cnauroth)
 
 
+    HDFS-5018. Misspelled DFSConfigKeys#DFS_NAMENODE_STALE_DATANODE_INTERVAL_DEFAULT
+    in javadoc of DatanodeInfo#isStale(). (Ted Yu via jing9)
+
+    HDFS-4602. TestBookKeeperHACheckpoints fails. (umamahesh)
+
+    HDFS-5016. Deadlock in pipeline recovery causes Datanode to be marked dead.
+    (suresh)
+
   BREAKDOWN OF HDFS-347 SUBTASKS AND RELATED JIRAS
   BREAKDOWN OF HDFS-347 SUBTASKS AND RELATED JIRAS
 
 
     HDFS-4353. Encapsulate connections to peers in Peer and PeerServer classes.
     HDFS-4353. Encapsulate connections to peers in Peer and PeerServer classes.
@@ -948,6 +1006,26 @@ Release 2.1.0-beta - 2013-07-02
     HDFS-4875. Add a test for testing snapshot file length. 
     HDFS-4875. Add a test for testing snapshot file length. 
     (Arpit Agarwal via jing9)
     (Arpit Agarwal via jing9)
 
 
+    HDFS-4841. FsShell commands using secure webhfds fail ClientFinalizer 
+    shutdown hook. (rkanter via tucu)
+
+    HDFS-4951. FsShell commands using secure httpfs throw exceptions due 
+    to missing TokenRenewer. (rknater via tucu)
+
+    HDFS-4969. WebhdfsFileSystem expects non-standard WEBHDFS Json element. 
+    (rkanter via tucu)
+
+    HDFS-4797. BlockScanInfo does not override equals(..) and hashCode()
+    consistently.  (szetszwo)
+
+    HDFS-4978. Make disallowSnapshot idempotent. (jing9)
+
+    HDFS-5005. Move SnapshotException and SnapshotAccessControlException 
+    to o.a.h.hdfs.protocol. (jing9)
+
+    HDFS-4982. JournalNode should relogin from keytab before fetching logs
+    from other JNs (todd)
+
 Release 2.0.5-alpha - 06/06/2013
 Release 2.0.5-alpha - 06/06/2013
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES
@@ -3017,6 +3095,23 @@ Release 2.0.0-alpha - 05-23-2012
     
     
     HDFS-3039. Address findbugs and javadoc warnings on branch. (todd via atm)
     HDFS-3039. Address findbugs and javadoc warnings on branch. (todd via atm)
 
 
+Release 0.23.10 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+    HDFS-5010. Reduce the frequency of getCurrentUser() calls from namenode
+    (kihwal)
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
+    HDFS-4998. TestUnderReplicatedBlocks fails intermittently (kihwal)
+
 Release 0.23.9 - UNRELEASED
 Release 0.23.9 - UNRELEASED
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES

+ 5 - 0
hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml

@@ -325,4 +325,9 @@
        <Field name="modification" />
        <Field name="modification" />
        <Bug pattern="VO_VOLATILE_INCREMENT" />
        <Bug pattern="VO_VOLATILE_INCREMENT" />
      </Match>
      </Match>
+     <Match>
+       <Class name="org.apache.hadoop.hdfs.server.datanode.ReplicaInfo" />
+       <Method name="setDirInternal" />
+       <Bug pattern="DM_STRING_CTOR" />
+     </Match>
  </FindBugsFilter>
  </FindBugsFilter>

部分文件因为文件数量过多而无法显示