Pārlūkot izejas kodu

Merging r1523878 through r1524586 from trunk to branch HDFS-2832

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2832@1524590 13f79535-47bb-0310-9956-ffa450edef68
Arpit Agarwal 11 gadi atpakaļ
vecāks
revīzija
a80b826eef
75 mainītis faili ar 2206 papildinājumiem un 651 dzēšanām
  1. 23 4
      hadoop-common-project/hadoop-common/CHANGES.txt
  2. 1144 0
      hadoop-common-project/hadoop-common/src/main/docs/releasenotes.html
  3. 2 2
      hadoop-common-project/hadoop-common/src/main/proto/ProtobufRpcEngine.proto
  4. 1 1
      hadoop-common-project/hadoop-common/src/main/proto/RpcHeader.proto
  5. 4 3
      hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountResponse.java
  6. 3 2
      hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/ACCESS3Response.java
  7. 3 2
      hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/COMMIT3Response.java
  8. 3 2
      hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/CREATE3Response.java
  9. 3 2
      hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/FSINFO3Response.java
  10. 3 2
      hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/FSSTAT3Response.java
  11. 3 2
      hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/GETATTR3Response.java
  12. 3 2
      hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/LOOKUP3Response.java
  13. 3 2
      hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/MKDIR3Response.java
  14. 11 4
      hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/NFS3Response.java
  15. 3 2
      hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/PATHCONF3Response.java
  16. 3 2
      hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/READ3Response.java
  17. 3 2
      hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/READDIR3Response.java
  18. 3 2
      hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/READDIRPLUS3Response.java
  19. 3 2
      hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/READLINK3Response.java
  20. 3 2
      hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/REMOVE3Response.java
  21. 3 2
      hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/RENAME3Response.java
  22. 3 2
      hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/RMDIR3Response.java
  23. 3 2
      hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/SETATTR3Response.java
  24. 3 2
      hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/SYMLINK3Response.java
  25. 0 37
      hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/VoidResponse.java
  26. 3 2
      hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/WRITE3Response.java
  27. 21 24
      hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcAcceptedReply.java
  28. 29 19
      hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcCall.java
  29. 14 14
      hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcDeniedReply.java
  30. 2 0
      hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcMessage.java
  31. 7 2
      hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcProgram.java
  32. 20 8
      hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcReply.java
  33. 1 2
      hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleUdpClient.java
  34. 5 4
      hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleUdpServerHandler.java
  35. 166 315
      hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/XDR.java
  36. 16 0
      hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/Credentials.java
  37. 21 4
      hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/Verifier.java
  38. 4 11
      hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/PortmapRequest.java
  39. 5 4
      hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/PortmapResponse.java
  40. 4 2
      hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/RpcProgramPortmap.java
  41. 1 1
      hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/TestNfsTime.java
  42. 1 1
      hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/nfs3/TestFileHandle.java
  43. 6 2
      hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/TestFrameDecoder.java
  44. 1 1
      hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/TestRpcAcceptedReply.java
  45. 3 4
      hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/TestRpcDeniedReply.java
  46. 4 1
      hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/TestRpcMessage.java
  47. 7 2
      hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/TestRpcReply.java
  48. 26 14
      hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/TestXDR.java
  49. 1 1
      hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/security/TestCredentialsSys.java
  50. 9 5
      hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java
  51. 18 9
      hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java
  52. 13 10
      hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
  53. 9 4
      hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java
  54. 9 18
      hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestOutOfOrderWrite.java
  55. 4 5
      hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestPortmapRegister.java
  56. 4 1
      hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestUdpServer.java
  57. 25 1
      hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
  58. 12 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
  59. 43 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
  60. 11 5
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java
  61. 6 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/RollingLogs.java
  62. 7 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RollingLogsImpl.java
  63. 136 37
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
  64. 5 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
  65. 62 5
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
  66. 39 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java
  67. 13 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java
  68. 77 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSForHA.java
  69. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/WebHdfsTestUtil.java
  70. 16 1
      hadoop-mapreduce-project/CHANGES.txt
  71. 3 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/YarnChild.java
  72. 2 2
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Counters.java
  73. 58 14
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/Limits.java
  74. 4 4
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestCounters.java
  75. 13 1
      hadoop-yarn-project/CHANGES.txt

+ 23 - 4
hadoop-common-project/hadoop-common/CHANGES.txt

@@ -363,10 +363,29 @@ Release 2.3.0 - UNRELEASED
 
     HADOOP-9908. Fix NPE when versioninfo properties file is missing (todd)
 
-Release 2.1.1-beta - UNRELEASED
+    HADOOP-9350. Hadoop not building against Java7 on OSX
+    (Robert Kanter via stevel)
+
+Release 2.2.0 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
+Release 2.1.1-beta - 2013-09-23
 
   INCOMPATIBLE CHANGES
 
+    HADOOP-9944. Fix RpcRequestHeaderProto.callId to be sint32 rather than
+    uint32 since ipc.Client.CONNECTION_CONTEXT_CALL_ID is signed (i.e. -3) 
+    (acmurthy)
+
   NEW FEATURES
 
   IMPROVEMENTS
@@ -411,6 +430,9 @@ Release 2.1.1-beta - UNRELEASED
     HADOOP-9962. in order to avoid dependency divergence within Hadoop itself 
     lets enable DependencyConvergence. (rvs via tucu)
 
+    HADOOP-9669. Reduce the number of byte array creations and copies in
+    XDR data manipulation. (Haohui Mai via brandonli)
+
   OPTIMIZATIONS
 
   BUG FIXES
@@ -468,9 +490,6 @@ Release 2.1.1-beta - UNRELEASED
     HADOOP-9557. hadoop-client excludes commons-httpclient. (Lohit Vijayarenu via
     cnauroth)
 
-    HADOOP-9350. Hadoop not building against Java7 on OSX
-    (Robert Kanter via stevel)
-
     HADOOP-9961. versions of a few transitive dependencies diverged between hadoop 
     subprojects. (rvs via tucu)
 

+ 1144 - 0
hadoop-common-project/hadoop-common/src/main/docs/releasenotes.html

@@ -1,3 +1,1147 @@
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<META http-equiv="Content-Type" content="text/html; charset=UTF-8">
+<title>Hadoop  2.1.1-beta Release Notes</title>
+<STYLE type="text/css">
+	H1 {font-family: sans-serif}
+	H2 {font-family: sans-serif; margin-left: 7mm}
+	TABLE {margin-left: 7mm}
+</STYLE>
+</head>
+<body>
+<h1>Hadoop  2.1.1-beta Release Notes</h1>
+These release notes include new developer and user-facing incompatibilities, features, and major improvements. 
+<a name="changes"/>
+<h2>Changes since Hadoop 2.1.0-beta</h2>
+<ul>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1194">YARN-1194</a>.
+     Minor bug reported by Roman Shaposhnik and fixed by Roman Shaposhnik (nodemanager)<br>
+     <b>TestContainerLogsPage fails with native builds</b><br>
+     <blockquote>Running TestContainerLogsPage on trunk while Native IO is enabled makes it fail</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1189">YARN-1189</a>.
+     Blocker bug reported by Jason Lowe and fixed by Omkar Vinit Joshi <br>
+     <b>NMTokenSecretManagerInNM is not being told when applications have finished </b><br>
+     <blockquote>The {{appFinished}} method is not being called when applications have finished.  This causes a couple of leaks as {{oldMasterKeys}} and {{appToAppAttemptMap}} are never being pruned.
+</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1184">YARN-1184</a>.
+     Major bug reported by J.Andreina and fixed by Chris Douglas (capacityscheduler , resourcemanager)<br>
+     <b>ClassCastException is thrown during preemption When a huge job is submitted to a queue B whose resources is used by a job in queueA</b><br>
+     <blockquote>preemption is enabled.
+Queue = a,b
+a capacity = 30%
+b capacity = 70%
+
+Step 1: Assign a big job to queue a ( so that job_a will utilize some resources from queue b)
+Step 2: Assigne a big job to queue b.
+
+Following exception is thrown at Resource Manager
+{noformat}
+2013-09-12 10:42:32,535 ERROR [SchedulingMonitor (ProportionalCapacityPreemptionPolicy)] yarn.YarnUncaughtExceptionHandler (YarnUncaughtExceptionHandler.java:uncaughtException(68)) - Thread Thread[SchedulingMonitor (ProportionalCapacityPreemptionPolicy),5,main] threw an Exception.
+java.lang.ClassCastException: java.util.Collections$UnmodifiableSet cannot be cast to java.util.NavigableSet
+	at org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.ProportionalCapacityPreemptionPolicy.getContainersToPreempt(ProportionalCapacityPreemptionPolicy.java:403)
+	at org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.ProportionalCapacityPreemptionPolicy.containerBasedPreemptOrKill(ProportionalCapacityPreemptionPolicy.java:202)
+	at org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.ProportionalCapacityPreemptionPolicy.editSchedule(ProportionalCapacityPreemptionPolicy.java:173)
+	at org.apache.hadoop.yarn.server.resourcemanager.monitor.SchedulingMonitor.invokePolicy(SchedulingMonitor.java:72)
+	at org.apache.hadoop.yarn.server.resourcemanager.monitor.SchedulingMonitor$PreemptionChecker.run(SchedulingMonitor.java:82)
+	at java.lang.Thread.run(Thread.java:662)
+
+{noformat}</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1176">YARN-1176</a>.
+     Critical bug reported by Thomas Graves and fixed by Jonathan Eagles (resourcemanager)<br>
+     <b>RM web services ClusterMetricsInfo total nodes doesn't include unhealthy nodes</b><br>
+     <blockquote>In the web services api for the cluster/metrics, the totalNodes reported doesn't include the unhealthy nodes.
+
+this.totalNodes = activeNodes + lostNodes + decommissionedNodes
+	        + rebootedNodes;</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1170">YARN-1170</a>.
+     Blocker bug reported by Arun C Murthy and fixed by Binglin Chang <br>
+     <b>yarn proto definitions should specify package as 'hadoop.yarn'</b><br>
+     <blockquote>yarn proto definitions should specify package as 'hadoop.yarn' similar to protos with 'hadoop.common' &amp; 'hadoop.hdfs' in Common &amp; HDFS respectively.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1152">YARN-1152</a>.
+     Blocker bug reported by Jason Lowe and fixed by Jason Lowe (resourcemanager)<br>
+     <b>Invalid key to HMAC computation error when getting application report for completed app attempt</b><br>
+     <blockquote>On a secure cluster, an invalid key to HMAC error is thrown when trying to get an application report for an application with an attempt that has unregistered.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1144">YARN-1144</a>.
+     Critical bug reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (resourcemanager)<br>
+     <b>Unmanaged AMs registering a tracking URI should not be proxy-fied</b><br>
+     <blockquote>Unmanaged AMs do not run in the cluster, their tracking URL should not be proxy-fied.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1137">YARN-1137</a>.
+     Major improvement reported by Alejandro Abdelnur and fixed by Roman Shaposhnik (nodemanager)<br>
+     <b>Add support whitelist for system users to Yarn container-executor.c</b><br>
+     <blockquote>Currently container-executor.c has a banned set of users (mapred, hdfs &amp; bin) and configurable min.user.id (defaulting to 1000).
+
+This presents a problem for systems that run as system users (below 1000) if these systems want to start containers.
+
+Systems like Impala fit in this category. A (local) 'impala' system user is created when installing Impala on the nodes. 
+
+Note that the same thing happens when installing system like HDFS, Yarn, Oozie, from packages (Bigtop); local system users are created.
+
+For Impala to be able to run containers in a secure cluster, the 'impala' system user must whitelisted. 
+
+For this, adding a configuration 'allowed.system.users' option in the container-executor.cfg and the logic in container-executor.c would allow the usernames in that list.
+
+Because system users are not guaranteed to have the same UID in different machines, the 'allowed.system.users' property should use usernames and not UIDs.
+</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1124">YARN-1124</a>.
+     Blocker bug reported by Omkar Vinit Joshi and fixed by Xuan Gong <br>
+     <b>By default yarn application -list should display all the applications in a state other than FINISHED / FAILED</b><br>
+     <blockquote>Today we are just listing application in RUNNING state by default for "yarn application -list". Instead we should show all the applications which are either submitted/accepted/running.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1120">YARN-1120</a>.
+     Minor bug reported by Chuan Liu and fixed by Chuan Liu <br>
+     <b>Make ApplicationConstants.Environment.USER definition OS neutral</b><br>
+     <blockquote>In YARN-557, we added some code to make {{ApplicationConstants.Environment.USER}} has OS-specific definition in order to fix the unit test TestUnmanagedAMLauncher. In YARN-571, the relevant test code was corrected. In YARN-602, we actually will explicitly set the environment variables for the child containers. With these changes, I think we can revert the YARN-557 change to make {{ApplicationConstants.Environment.USER}} OS neutral. The main benefit is that we can use the same method over the Enum constants. This should also fix the TestContainerLaunch#testContainerEnvVariables failure on Windows. </blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1117">YARN-1117</a>.
+     Major improvement reported by Tassapol Athiapinya and fixed by Xuan Gong (client)<br>
+     <b>Improve help message for $ yarn applications and $yarn node</b><br>
+     <blockquote>There is standardization of help message in YARN-1080. It is nice to have similar changes for $ yarn appications and yarn node</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1116">YARN-1116</a>.
+     Major sub-task reported by Jian He and fixed by Jian He (resourcemanager)<br>
+     <b>Populate AMRMTokens back to AMRMTokenSecretManager after RM restarts</b><br>
+     <blockquote>The AMRMTokens are now only saved in RMStateStore and not populated back to AMRMTokenSecretManager after RM restarts. This is more needed now since AMRMToken also becomes used in non-secure env.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1107">YARN-1107</a>.
+     Blocker bug reported by Arpit Gupta and fixed by Omkar Vinit Joshi (resourcemanager)<br>
+     <b>Job submitted with Delegation token in secured environment causes RM to fail during RM restart</b><br>
+     <blockquote>If secure RM with recovery enabled is restarted while oozie jobs are running rm fails to come up.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1101">YARN-1101</a>.
+     Major bug reported by Robert Parker and fixed by Robert Parker (resourcemanager)<br>
+     <b>Active nodes can be decremented below 0</b><br>
+     <blockquote>The issue is in RMNodeImpl where both RUNNING and UNHEALTHY states that transition to a deactive state (LOST, DECOMMISSIONED, REBOOTED) use the same DeactivateNodeTransition class.  The DeactivateNodeTransition class naturally decrements the active node, however the in cases where the node has transition to UNHEALTHY the active count has already been decremented.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1094">YARN-1094</a>.
+     Blocker bug reported by Yesha Vora and fixed by Vinod Kumar Vavilapalli <br>
+     <b>RM restart throws Null pointer Exception in Secure Env</b><br>
+     <blockquote>Enable rmrestart feature And restart Resorce Manager while a job is running.
+
+Resorce Manager fails to start with below error
+
+2013-08-23 17:57:40,705 INFO  resourcemanager.RMAppManager (RMAppManager.java:recover(370)) - Recovering application application_1377280618693_0001
+2013-08-23 17:57:40,763 ERROR resourcemanager.ResourceManager (ResourceManager.java:serviceStart(617)) - Failed to load/recover state
+java.lang.NullPointerException
+        at org.apache.hadoop.yarn.server.resourcemanager.security.DelegationTokenRenewer.setTimerForTokenRenewal(DelegationTokenRenewer.java:371)
+        at org.apache.hadoop.yarn.server.resourcemanager.security.DelegationTokenRenewer.addApplication(DelegationTokenRenewer.java:307)
+        at org.apache.hadoop.yarn.server.resourcemanager.RMAppManager.submitApplication(RMAppManager.java:291)
+        at org.apache.hadoop.yarn.server.resourcemanager.RMAppManager.recover(RMAppManager.java:371)
+        at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.recover(ResourceManager.java:819)
+        at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.serviceStart(ResourceManager.java:613)
+        at org.apache.hadoop.service.AbstractService.start(AbstractService.java:193)
+        at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.main(ResourceManager.java:832)
+2013-08-23 17:57:40,766 INFO  util.ExitUtil (ExitUtil.java:terminate(124)) - Exiting with status 1
+                                                                                                    
+
+</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1093">YARN-1093</a>.
+     Major bug reported by Wing Yew Poon and fixed by  (documentation)<br>
+     <b>Corrections to Fair Scheduler documentation</b><br>
+     <blockquote>The fair scheduler is still evolving, but the current documentation contains some inaccuracies.
+
+
+</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1085">YARN-1085</a>.
+     Blocker task reported by Jaimin D Jetly and fixed by Omkar Vinit Joshi (nodemanager , resourcemanager)<br>
+     <b>Yarn and MRv2 should do HTTP client authentication in kerberos setup.</b><br>
+     <blockquote>In kerberos setup it's expected for a http client to authenticate to kerberos before allowing user to browse any information.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1083">YARN-1083</a>.
+     Major bug reported by Yesha Vora and fixed by Zhijie Shen (resourcemanager)<br>
+     <b>ResourceManager should fail when yarn.nm.liveness-monitor.expiry-interval-ms is set less than heartbeat interval</b><br>
+     <blockquote>if 'yarn.nm.liveness-monitor.expiry-interval-ms' is set to less than heartbeat iterval, all the node managers will be added in 'Lost Nodes'
+
+Instead, Resource Manager should validate these property and It should fail to start if combination of such property is invalid.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1082">YARN-1082</a>.
+     Blocker bug reported by Arpit Gupta and fixed by Vinod Kumar Vavilapalli (resourcemanager)<br>
+     <b>Secure RM with recovery enabled and rm state store on hdfs fails with gss exception</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1081">YARN-1081</a>.
+     Minor improvement reported by Tassapol Athiapinya and fixed by Akira AJISAKA (client)<br>
+     <b>Minor improvement to output header for $ yarn node -list</b><br>
+     <blockquote>Output of $ yarn node -list shows number of running containers at each node. I found a case when new user of YARN thinks that this is container ID, use it later in other YARN commands and find an error due to misunderstanding.
+
+{code:title=current output}
+2013-07-31 04:00:37,814|beaver.machine|INFO|RUNNING: /usr/bin/yarn node -list
+2013-07-31 04:00:38,746|beaver.machine|INFO|Total Nodes:1
+2013-07-31 04:00:38,747|beaver.machine|INFO|Node-Id	Node-State	Node-Http-Address	Running-Containers
+2013-07-31 04:00:38,747|beaver.machine|INFO|myhost:45454	   RUNNING	myhost:50060	   2
+{code}
+
+{code:title=proposed output}
+2013-07-31 04:00:37,814|beaver.machine|INFO|RUNNING: /usr/bin/yarn node -list
+2013-07-31 04:00:38,746|beaver.machine|INFO|Total Nodes:1
+2013-07-31 04:00:38,747|beaver.machine|INFO|Node-Id	Node-State	Node-Http-Address	Number-of-Running-Containers
+2013-07-31 04:00:38,747|beaver.machine|INFO|myhost:45454	   RUNNING	myhost:50060	   2
+{code}</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1080">YARN-1080</a>.
+     Major improvement reported by Tassapol Athiapinya and fixed by Xuan Gong (client)<br>
+     <b>Improve help message for $ yarn logs</b><br>
+     <blockquote>There are 2 parts I am proposing in this jira. They can be fixed together in one patch.
+
+1. Standardize help message for required parameter of $ yarn logs
+YARN CLI has a command "logs" ($ yarn logs). The command always requires a parameter of "-applicationId &lt;arg&gt;". However, help message of the command does not make it clear. It lists -applicationId as optional parameter. If I don't set it, YARN CLI will complain this is missing. It is better to use standard required notation used in other Linux command for help message. Any user familiar to the command can understand that this parameter is needed more easily.
+
+{code:title=current help message}
+-bash-4.1$ yarn logs
+usage: general options are:
+ -applicationId &lt;arg&gt;   ApplicationId (required)
+ -appOwner &lt;arg&gt;        AppOwner (assumed to be current user if not
+                        specified)
+ -containerId &lt;arg&gt;     ContainerId (must be specified if node address is
+                        specified)
+ -nodeAddress &lt;arg&gt;     NodeAddress in the format nodename:port (must be
+                        specified if container id is specified)
+{code}
+
+{code:title=proposed help message}
+-bash-4.1$ yarn logs
+usage: yarn logs -applicationId &lt;application ID&gt; [OPTIONS]
+general options are:
+ -appOwner &lt;arg&gt;        AppOwner (assumed to be current user if not
+                        specified)
+ -containerId &lt;arg&gt;     ContainerId (must be specified if node address is
+                        specified)
+ -nodeAddress &lt;arg&gt;     NodeAddress in the format nodename:port (must be
+                        specified if container id is specified)
+{code}
+
+2. Add description for help command. As far as I know, a user cannot get logs for running job. Since I spent some time trying to get logs of running applications, it should be nice to say this in command description.
+{code:title=proposed help}
+Retrieve logs for completed/killed YARN application
+usage: general options are...
+{code}
+</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1078">YARN-1078</a>.
+     Minor bug reported by Chuan Liu and fixed by Chuan Liu <br>
+     <b>TestNodeManagerResync, TestNodeManagerShutdown, and TestNodeStatusUpdater fail on Windows</b><br>
+     <blockquote>The three unit tests fail on Windows due to host name resolution differences on Windows, i.e. 127.0.0.1 does not resolve to host name "localhost".
+
+{noformat}
+org.apache.hadoop.security.token.SecretManager$InvalidToken: Given Container container_0_0000_01_000000 identifier is not valid for current Node manager. Expected : 127.0.0.1:12345 Found : localhost:12345
+{noformat}
+
+{noformat}
+testNMConnectionToRM(org.apache.hadoop.yarn.server.nodemanager.TestNodeStatusUpdater)  Time elapsed: 8343 sec  &lt;&lt;&lt; FAILURE!
+org.junit.ComparisonFailure: expected:&lt;[localhost]:12345&gt; but was:&lt;[127.0.0.1]:12345&gt;
+	at org.junit.Assert.assertEquals(Assert.java:125)
+	at org.junit.Assert.assertEquals(Assert.java:147)
+	at org.apache.hadoop.yarn.server.nodemanager.TestNodeStatusUpdater$MyResourceTracker6.registerNodeManager(TestNodeStatusUpdater.java:712)
+	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
+	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
+	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
+	at java.lang.reflect.Method.invoke(Method.java:597)
+	at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:187)
+	at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:101)
+	at $Proxy26.registerNodeManager(Unknown Source)
+	at org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl.registerWithRM(NodeStatusUpdaterImpl.java:212)
+	at org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl.serviceStart(NodeStatusUpdaterImpl.java:149)
+	at org.apache.hadoop.yarn.server.nodemanager.TestNodeStatusUpdater$MyNodeStatusUpdater4.serviceStart(TestNodeStatusUpdater.java:369)
+	at org.apache.hadoop.service.AbstractService.start(AbstractService.java:193)
+	at org.apache.hadoop.service.CompositeService.serviceStart(CompositeService.java:101)
+	at org.apache.hadoop.yarn.server.nodemanager.NodeManager.serviceStart(NodeManager.java:213)
+	at org.apache.hadoop.service.AbstractService.start(AbstractService.java:193)
+	at org.apache.hadoop.yarn.server.nodemanager.TestNodeStatusUpdater.testNMConnectionToRM(TestNodeStatusUpdater.java:985)
+{noformat}</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1077">YARN-1077</a>.
+     Minor bug reported by Chuan Liu and fixed by Chuan Liu <br>
+     <b>TestContainerLaunch fails on Windows</b><br>
+     <blockquote>Several cases in this unit tests fail on Windows. (Append error log at the end.)
+
+testInvalidEnvSyntaxDiagnostics fails because the difference between cmd and bash script error handling. If some command fails in the cmd script, cmd will continue execute the the rest of the script command. Error handling needs to be explicitly carried out in the script file. The error code of the last command will be returned as the error code of the whole script. In this test, some error happened in the middle of the cmd script, the test expect an exception and non-zero error code. In the cmd script, the intermediate errors are ignored. The last command "call" succeeded and there is no exception.
+
+testContainerLaunchStdoutAndStderrDiagnostics fails due to wrong cmd commands used by the test.
+
+testContainerEnvVariables and testDelayedKill fail due to a regression from YARN-906.
+
+{noformat}
+-------------------------------------------------------------------------------
+Test set: org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.TestContainerLaunch
+-------------------------------------------------------------------------------
+Tests run: 7, Failures: 4, Errors: 0, Skipped: 0, Time elapsed: 11.526 sec &lt;&lt;&lt; FAILURE!
+testInvalidEnvSyntaxDiagnostics(org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.TestContainerLaunch)  Time elapsed: 583 sec  &lt;&lt;&lt; FAILURE!
+junit.framework.AssertionFailedError: Should catch exception
+	at junit.framework.Assert.fail(Assert.java:50)
+	at org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.TestContainerLaunch.testInvalidEnvSyntaxDiagnostics(TestContainerLaunch.java:269)
+...
+
+testContainerLaunchStdoutAndStderrDiagnostics(org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.TestContainerLaunch)  Time elapsed: 561 sec  &lt;&lt;&lt; FAILURE!
+junit.framework.AssertionFailedError: Should catch exception
+	at junit.framework.Assert.fail(Assert.java:50)
+	at org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.TestContainerLaunch.testContainerLaunchStdoutAndStderrDiagnostics(TestContainerLaunch.java:314)
+...
+
+testContainerEnvVariables(org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.TestContainerLaunch)  Time elapsed: 4136 sec  &lt;&lt;&lt; FAILURE!
+junit.framework.AssertionFailedError: expected:&lt;137&gt; but was:&lt;143&gt;
+	at junit.framework.Assert.fail(Assert.java:50)
+	at junit.framework.Assert.failNotEquals(Assert.java:287)
+	at junit.framework.Assert.assertEquals(Assert.java:67)
+	at junit.framework.Assert.assertEquals(Assert.java:199)
+	at junit.framework.Assert.assertEquals(Assert.java:205)
+	at org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.TestContainerLaunch.testContainerEnvVariables(TestContainerLaunch.java:500)
+...
+
+testDelayedKill(org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.TestContainerLaunch)  Time elapsed: 2744 sec  &lt;&lt;&lt; FAILURE!
+junit.framework.AssertionFailedError: expected:&lt;137&gt; but was:&lt;143&gt;
+	at junit.framework.Assert.fail(Assert.java:50)
+	at junit.framework.Assert.failNotEquals(Assert.java:287)
+	at junit.framework.Assert.assertEquals(Assert.java:67)
+	at junit.framework.Assert.assertEquals(Assert.java:199)
+	at junit.framework.Assert.assertEquals(Assert.java:205)
+	at org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.TestContainerLaunch.testDelayedKill(TestContainerLaunch.java:601)
+...
+{noformat}
+</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1074">YARN-1074</a>.
+     Major improvement reported by Tassapol Athiapinya and fixed by Xuan Gong (client)<br>
+     <b>Clean up YARN CLI app list to show only running apps.</b><br>
+     <blockquote>Once a user brings up YARN daemon, runs jobs, jobs will stay in output returned by $ yarn application -list even after jobs complete already. We want YARN command line to clean up this list. Specifically, we want to remove applications with FINISHED state(not Final-State) or KILLED state from the result.
+
+{code}
+[user1@host1 ~]$ yarn application -list
+Total Applications:150
+                Application-Id	    Application-Name	    Application-Type	      User	     Queue	             State       Final-State	       Progress	                       Tracking-URL
+application_1374638600275_0109	           Sleep job	           MAPREDUCE	    user1	   default	            KILLED            KILLED	           100%	   host1:54059
+application_1374638600275_0121	           Sleep job	           MAPREDUCE	    user1	   default	          FINISHED         SUCCEEDED	           100%	host1:19888/jobhistory/job/job_1374638600275_0121
+application_1374638600275_0020	           Sleep job	           MAPREDUCE	    user1	   default	          FINISHED         SUCCEEDED	           100%	host1:19888/jobhistory/job/job_1374638600275_0020
+application_1374638600275_0038	           Sleep job	           MAPREDUCE	    user1	   default	
+....
+{code}
+</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1049">YARN-1049</a>.
+     Blocker bug reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (api)<br>
+     <b>ContainerExistStatus should define a status for preempted containers</b><br>
+     <blockquote>With the current behavior is impossible to determine if a container has been preempted or lost due to a NM crash.
+
+Adding a PREEMPTED exit status (-102) will help an AM determine that a container has been preempted.
+
+Note the change of scope from the original summary/description. The original scope proposed API/behavior changes. Because we are passed 2.1.0-beta I'm reducing the scope of this JIRA.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1034">YARN-1034</a>.
+     Trivial task reported by Sandy Ryza and fixed by Karthik Kambatla (documentation , scheduler)<br>
+     <b>Remove "experimental" in the Fair Scheduler documentation</b><br>
+     <blockquote>The YARN Fair Scheduler is largely stable now, and should no longer be declared experimental.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1025">YARN-1025</a>.
+     Major bug reported by Chris Nauroth and fixed by Chris Nauroth (nodemanager , resourcemanager)<br>
+     <b>ResourceManager and NodeManager do not load native libraries on Windows.</b><br>
+     <blockquote>ResourceManager and NodeManager do not have the correct setting for java.library.path when launched on Windows.  This prevents the processes from loading native code from hadoop.dll.  The native code is required for correct functioning on Windows (not optional), so this ultimately can cause failures.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1008">YARN-1008</a>.
+     Major bug reported by Alejandro Abdelnur and fixed by Alejandro Abdelnur (nodemanager)<br>
+     <b>MiniYARNCluster with multiple nodemanagers, all nodes have same key for allocations</b><br>
+     <blockquote>While the NMs are keyed using the NodeId, the allocation is done based on the hostname. 
+
+This makes the different nodes indistinguishable to the scheduler.
+
+There should be an option to enabled the host:port instead just port for allocations. The nodes reported to the AM should report the 'key' (host or host:port). 
+</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1006">YARN-1006</a>.
+     Major bug reported by Jian He and fixed by Xuan Gong <br>
+     <b>Nodes list web page on the RM web UI is broken</b><br>
+     <blockquote>The nodes web page which list all the connected nodes of the cluster is broken.
+
+1. The page is not showing in correct format/style.
+2. If we restart the NM, the node list is not refreshed, but just add the new started NM to the list. The old NMs information still remain.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1001">YARN-1001</a>.
+     Blocker task reported by Srimanth Gunturi and fixed by Zhijie Shen (api)<br>
+     <b>YARN should provide per application-type and state statistics</b><br>
+     <blockquote>In Ambari we plan to show for MR2 the number of applications finished, running, waiting, etc. It would be efficient if YARN could provide per application-type and state aggregated counts.
+</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-994">YARN-994</a>.
+     Major bug reported by Xuan Gong and fixed by Xuan Gong <br>
+     <b>HeartBeat thread in AMRMClientAsync does not handle runtime exception correctly</b><br>
+     <blockquote>YARN-654 performs sanity checks for parameters of public methods in AMRMClient. Those may create runtime exception. 
+Currently, heartBeat thread in AMRMClientAsync only captures IOException and YarnException, and will not handle Runtime Exception properly. 
+Possible solution can be: heartbeat thread will catch throwable and notify the callbackhandler thread via existing savedException</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-981">YARN-981</a>.
+     Major bug reported by Xuan Gong and fixed by Jian He <br>
+     <b>YARN/MR2/Job-history /logs link does not have correct content</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-966">YARN-966</a>.
+     Major bug reported by Zhijie Shen and fixed by Zhijie Shen <br>
+     <b>The thread of ContainerLaunch#call will fail without any signal if getLocalizedResources() is called when the container is not at LOCALIZED</b><br>
+     <blockquote>In ContainerImpl.getLocalizedResources(), there's:
+{code}
+assert ContainerState.LOCALIZED == getContainerState(); // TODO: FIXME!!
+{code}
+
+ContainerImpl.getLocalizedResources() is called in ContainerLaunch.call(), which is scheduled on a separate thread. If the container is not at LOCALIZED (e.g. it is at KILLING, see YARN-906), an AssertError will be thrown and fails the thread without notifying NM. Therefore, the container cannot receive more events, which are supposed to be sent from ContainerLaunch.call(), and move towards completion. </blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-957">YARN-957</a>.
+     Blocker bug reported by Omkar Vinit Joshi and fixed by Omkar Vinit Joshi <br>
+     <b>Capacity Scheduler tries to reserve the memory more than what node manager reports.</b><br>
+     <blockquote>I have 2 node managers.
+* one with 1024 MB memory.(nm1)
+* second with 2048 MB memory.(nm2)
+I am submitting simple map reduce application with 1 mapper and one reducer with 1024mb each. The steps to reproduce this are
+* stop nm2 with 2048MB memory.( This I am doing to make sure that this node's heartbeat doesn't reach RM first).
+* now submit application. As soon as it receives first node's (nm1) heartbeat it will try to reserve memory for AM-container (2048MB). However it has only 1024MB of memory.
+* now start nm2 with 2048 MB memory.
+
+It hangs forever... Ideally this has two potential issues.
+* It should not try to reserve memory on a node manager which is never going to give requested memory. i.e. Current max capability of node manager is 1024MB but 2048MB is reserved on it. But it still does that.
+* Say 2048MB is reserved on nm1 but nm2 comes back with 2048MB available memory. In this case if the original request was made without any locality then scheduler should unreserve memory on nm1 and allocate requested 2048MB container on nm2.
+</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-948">YARN-948</a>.
+     Major bug reported by Omkar Vinit Joshi and fixed by Omkar Vinit Joshi <br>
+     <b>RM should validate the release container list before actually releasing them</b><br>
+     <blockquote>At present we are blinding passing the allocate request containing containers to be released to the scheduler. This may result into one application releasing another application's container.
+
+{code}
+  @Override
+  @Lock(Lock.NoLock.class)
+  public Allocation allocate(ApplicationAttemptId applicationAttemptId,
+      List&lt;ResourceRequest&gt; ask, List&lt;ContainerId&gt; release, 
+      List&lt;String&gt; blacklistAdditions, List&lt;String&gt; blacklistRemovals) {
+
+    FiCaSchedulerApp application = getApplication(applicationAttemptId);
+....
+....
+    // Release containers
+    for (ContainerId releasedContainerId : release) {
+      RMContainer rmContainer = getRMContainer(releasedContainerId);
+      if (rmContainer == null) {
+         RMAuditLogger.logFailure(application.getUser(),
+             AuditConstants.RELEASE_CONTAINER, 
+             "Unauthorized access or invalid container", "CapacityScheduler",
+             "Trying to release container not owned by app or with invalid id",
+             application.getApplicationId(), releasedContainerId);
+      }
+      completedContainer(rmContainer,
+          SchedulerUtils.createAbnormalContainerStatus(
+              releasedContainerId, 
+              SchedulerUtils.RELEASED_CONTAINER),
+          RMContainerEventType.RELEASED);
+    }
+{code}
+
+Current checks are not sufficient and we should prevent this..... thoughts?</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-942">YARN-942</a>.
+     Major bug reported by Sandy Ryza and fixed by Akira AJISAKA (scheduler)<br>
+     <b>In Fair Scheduler documentation, inconsistency on which properties have prefix</b><br>
+     <blockquote>locality.threshold.node and locality.threshold.rack should have the yarn.scheduler.fair prefix like the items before them
+
+http://hadoop.apache.org/docs/current/hadoop-yarn/hadoop-yarn-site/FairScheduler.html</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-910">YARN-910</a>.
+     Major improvement reported by Sandy Ryza and fixed by Alejandro Abdelnur (nodemanager)<br>
+     <b>Allow auxiliary services to listen for container starts and completions</b><br>
+     <blockquote>Making container start and completion events available to auxiliary services would allow them to be resource-aware.  The auxiliary service would be able to notify a co-located service that is opportunistically using free capacity of allocation changes.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-906">YARN-906</a>.
+     Major sub-task reported by Zhijie Shen and fixed by Zhijie Shen <br>
+     <b>Cancelling ContainerLaunch#call at KILLING causes that the container cannot be completed</b><br>
+     <blockquote>See https://builds.apache.org/job/PreCommit-YARN-Build/1435//testReport/org.apache.hadoop.yarn.client.api.impl/TestNMClient/testNMClientNoCleanupOnStop/</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-903">YARN-903</a>.
+     Major bug reported by Abhishek Kapoor and fixed by Omkar Vinit Joshi (applications/distributed-shell)<br>
+     <b>DistributedShell throwing Errors in logs after successfull completion</b><br>
+     <blockquote>I have tried running DistributedShell and also used ApplicationMaster of the same for my test.
+The application is successfully running through logging some errors which would be useful to fix.
+Below are the logs from NodeManager and ApplicationMasterode
+
+Log Snippet for NodeManager
+=============================
+2013-07-07 13:39:18,787 INFO org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl: Connecting to ResourceManager at localhost/127.0.0.1:9990. current no. of attempts is 1
+2013-07-07 13:39:19,050 INFO org.apache.hadoop.yarn.server.nodemanager.security.NMContainerTokenSecretManager: Rolling master-key for container-tokens, got key with id -325382586
+2013-07-07 13:39:19,052 INFO org.apache.hadoop.yarn.server.nodemanager.security.NMTokenSecretManagerInNM: Rolling master-key for nm-tokens, got key with id :1005046570
+2013-07-07 13:39:19,053 INFO org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl: Registered with ResourceManager as sunny-Inspiron:9993 with total resource of &lt;memory:10240, vCores:8&gt;
+2013-07-07 13:39:19,053 INFO org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl: Notifying ContainerManager to unblock new container-requests
+2013-07-07 13:39:35,256 INFO SecurityLogger.org.apache.hadoop.ipc.Server: Auth successful for appattempt_1373184544832_0001_000001 (auth:SIMPLE)
+2013-07-07 13:39:35,492 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl: Start request for container_1373184544832_0001_01_000001 by user sunny
+2013-07-07 13:39:35,507 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl: Creating a new application reference for app application_1373184544832_0001
+2013-07-07 13:39:35,511 INFO org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger: USER=sunny	IP=127.0.0.1	OPERATION=Start Container Request	TARGET=ContainerManageImpl	RESULT=SUCCESS	APPID=application_1373184544832_0001	CONTAINERID=container_1373184544832_0001_01_000001
+2013-07-07 13:39:35,511 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application: Application application_1373184544832_0001 transitioned from NEW to INITING
+2013-07-07 13:39:35,512 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application: Adding container_1373184544832_0001_01_000001 to application application_1373184544832_0001
+2013-07-07 13:39:35,518 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application: Application application_1373184544832_0001 transitioned from INITING to RUNNING
+2013-07-07 13:39:35,528 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container: Container container_1373184544832_0001_01_000001 transitioned from NEW to LOCALIZING
+2013-07-07 13:39:35,540 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.LocalizedResource: Resource hdfs://localhost:9000/application/test.jar transitioned from INIT to DOWNLOADING
+2013-07-07 13:39:35,540 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ResourceLocalizationService: Created localizer for container_1373184544832_0001_01_000001
+2013-07-07 13:39:35,675 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ResourceLocalizationService: Writing credentials to the nmPrivate file /home/sunny/Hadoop2/hadoopdata/nodemanagerdata/nmPrivate/container_1373184544832_0001_01_000001.tokens. Credentials list: 
+2013-07-07 13:39:35,694 INFO org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor: Initializing user sunny
+2013-07-07 13:39:35,803 INFO org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor: Copying from /home/sunny/Hadoop2/hadoopdata/nodemanagerdata/nmPrivate/container_1373184544832_0001_01_000001.tokens to /home/sunny/Hadoop2/hadoopdata/nodemanagerdata/usercache/sunny/appcache/application_1373184544832_0001/container_1373184544832_0001_01_000001.tokens
+2013-07-07 13:39:35,803 INFO org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor: CWD set to /home/sunny/Hadoop2/hadoopdata/nodemanagerdata/usercache/sunny/appcache/application_1373184544832_0001 = file:/home/sunny/Hadoop2/hadoopdata/nodemanagerdata/usercache/sunny/appcache/application_1373184544832_0001
+2013-07-07 13:39:36,136 INFO org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl: Sending out status for container: container_id {, app_attempt_id {, application_id {, id: 1, cluster_timestamp: 1373184544832, }, attemptId: 1, }, id: 1, }, state: C_RUNNING, diagnostics: "", exit_status: -1000, 
+2013-07-07 13:39:36,406 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.LocalizedResource: Resource hdfs://localhost:9000/application/test.jar transitioned from DOWNLOADING to LOCALIZED
+2013-07-07 13:39:36,409 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container: Container container_1373184544832_0001_01_000001 transitioned from LOCALIZING to LOCALIZED
+2013-07-07 13:39:36,524 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container: Container container_1373184544832_0001_01_000001 transitioned from LOCALIZED to RUNNING
+2013-07-07 13:39:36,692 INFO org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor: launchContainer: [bash, -c, /home/sunny/Hadoop2/hadoopdata/nodemanagerdata/usercache/sunny/appcache/application_1373184544832_0001/container_1373184544832_0001_01_000001/default_container_executor.sh]
+2013-07-07 13:39:37,144 INFO org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl: Sending out status for container: container_id {, app_attempt_id {, application_id {, id: 1, cluster_timestamp: 1373184544832, }, attemptId: 1, }, id: 1, }, state: C_RUNNING, diagnostics: "", exit_status: -1000, 
+2013-07-07 13:39:38,147 INFO org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl: Sending out status for container: container_id {, app_attempt_id {, application_id {, id: 1, cluster_timestamp: 1373184544832, }, attemptId: 1, }, id: 1, }, state: C_RUNNING, diagnostics: "", exit_status: -1000, 
+2013-07-07 13:39:39,151 INFO org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl: Sending out status for container: container_id {, app_attempt_id {, application_id {, id: 1, cluster_timestamp: 1373184544832, }, attemptId: 1, }, id: 1, }, state: C_RUNNING, diagnostics: "", exit_status: -1000, 
+2013-07-07 13:39:39,209 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.ContainersMonitorImpl: Starting resource-monitoring for container_1373184544832_0001_01_000001
+2013-07-07 13:39:39,259 WARN org.apache.hadoop.yarn.util.ProcfsBasedProcessTree: Unexpected: procfs stat file is not in the expected format for process with pid 11552
+2013-07-07 13:39:39,264 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.ContainersMonitorImpl: Memory usage of ProcessTree 29524 for container-id container_1373184544832_0001_01_000001: 79.9 MB of 1 GB physical memory used; 2.2 GB of 2.1 GB virtual memory used
+2013-07-07 13:39:39,645 INFO SecurityLogger.org.apache.hadoop.ipc.Server: Auth successful for appattempt_1373184544832_0001_000001 (auth:SIMPLE)
+2013-07-07 13:39:39,651 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl: Start request for container_1373184544832_0001_01_000002 by user sunny
+2013-07-07 13:39:39,651 INFO org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger: USER=sunny	IP=127.0.0.1	OPERATION=Start Container Request	TARGET=ContainerManageImpl	RESULT=SUCCESS	APPID=application_1373184544832_0001	CONTAINERID=container_1373184544832_0001_01_000002
+2013-07-07 13:39:39,651 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application: Adding container_1373184544832_0001_01_000002 to application application_1373184544832_0001
+2013-07-07 13:39:39,652 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container: Container container_1373184544832_0001_01_000002 transitioned from NEW to LOCALIZED
+2013-07-07 13:39:39,660 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl: Getting container-status for container_1373184544832_0001_01_000002
+2013-07-07 13:39:39,661 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl: Returning container_id {, app_attempt_id {, application_id {, id: 1, cluster_timestamp: 1373184544832, }, attemptId: 1, }, id: 2, }, state: C_RUNNING, diagnostics: "", exit_status: -1000, 
+2013-07-07 13:39:39,728 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container: Container container_1373184544832_0001_01_000002 transitioned from LOCALIZED to RUNNING
+2013-07-07 13:39:39,873 INFO org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor: launchContainer: [bash, -c, /home/sunny/Hadoop2/hadoopdata/nodemanagerdata/usercache/sunny/appcache/application_1373184544832_0001/container_1373184544832_0001_01_000002/default_container_executor.sh]
+2013-07-07 13:39:39,898 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch: Container container_1373184544832_0001_01_000002 succeeded 
+2013-07-07 13:39:39,899 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container: Container container_1373184544832_0001_01_000002 transitioned from RUNNING to EXITED_WITH_SUCCESS
+2013-07-07 13:39:39,900 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch: Cleaning up container container_1373184544832_0001_01_000002
+2013-07-07 13:39:39,942 INFO org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger: USER=sunny	OPERATION=Container Finished - Succeeded	TARGET=ContainerImpl	RESULT=SUCCESS	APPID=application_1373184544832_0001	CONTAINERID=container_1373184544832_0001_01_000002
+2013-07-07 13:39:39,943 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container: Container container_1373184544832_0001_01_000002 transitioned from EXITED_WITH_SUCCESS to DONE
+2013-07-07 13:39:39,944 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application: Removing container_1373184544832_0001_01_000002 from application application_1373184544832_0001
+2013-07-07 13:39:40,155 INFO org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl: Sending out status for container: container_id {, app_attempt_id {, application_id {, id: 1, cluster_timestamp: 1373184544832, }, attemptId: 1, }, id: 1, }, state: C_RUNNING, diagnostics: "", exit_status: -1000, 
+2013-07-07 13:39:40,157 INFO org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl: Sending out status for container: container_id {, app_attempt_id {, application_id {, id: 1, cluster_timestamp: 1373184544832, }, attemptId: 1, }, id: 2, }, state: C_COMPLETE, diagnostics: "", exit_status: 0, 
+2013-07-07 13:39:40,158 INFO org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl: Removed completed container container_1373184544832_0001_01_000002
+2013-07-07 13:39:40,683 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl: Getting container-status for container_1373184544832_0001_01_000002
+2013-07-07 13:39:40,686 ERROR org.apache.hadoop.security.UserGroupInformation: PriviledgedActionException as:appattempt_1373184544832_0001_000001 (auth:TOKEN) cause:org.apache.hadoop.yarn.exceptions.YarnException: Container container_1373184544832_0001_01_000002 is not handled by this NodeManager
+2013-07-07 13:39:40,687 INFO org.apache.hadoop.ipc.Server: IPC Server handler 4 on 9993, call org.apache.hadoop.yarn.api.ContainerManagementProtocolPB.stopContainer from 127.0.0.1:51085: error: org.apache.hadoop.yarn.exceptions.YarnException: Container container_1373184544832_0001_01_000002 is not handled by this NodeManager
+org.apache.hadoop.yarn.exceptions.YarnException: Container container_1373184544832_0001_01_000002 is not handled by this NodeManager
+	at org.apache.hadoop.yarn.ipc.RPCUtil.getRemoteException(RPCUtil.java:45)
+	at org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl.authorizeGetAndStopContainerRequest(ContainerManagerImpl.java:614)
+	at org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl.stopContainer(ContainerManagerImpl.java:538)
+	at org.apache.hadoop.yarn.api.impl.pb.service.ContainerManagementProtocolPBServiceImpl.stopContainer(ContainerManagementProtocolPBServiceImpl.java:88)
+	at org.apache.hadoop.yarn.proto.ContainerManagementProtocol$ContainerManagementProtocolService$2.callBlockingMethod(ContainerManagementProtocol.java:85)
+	at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:605)
+	at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1033)
+	at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:1868)
+	at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:1864)
+	at java.security.AccessController.doPrivileged(Native Method)
+	at javax.security.auth.Subject.doAs(Subject.java:396)
+	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1489)
+	at org.apache.hadoop.ipc.Server$Handler.run(Server.java:1862)
+2013-07-07 13:39:41,162 INFO org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl: Sending out status for container: container_id {, app_attempt_id {, application_id {, id: 1, cluster_timestamp: 1373184544832, }, attemptId: 1, }, id: 1, }, state: C_RUNNING, diagnostics: "", exit_status: -1000, 
+2013-07-07 13:39:41,691 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch: Container container_1373184544832_0001_01_000001 succeeded 
+2013-07-07 13:39:41,692 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container: Container container_1373184544832_0001_01_000001 transitioned from RUNNING to EXITED_WITH_SUCCESS
+2013-07-07 13:39:41,692 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch: Cleaning up container container_1373184544832_0001_01_000001
+2013-07-07 13:39:41,714 INFO org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger: USER=sunny	OPERATION=Container Finished - Succeeded	TARGET=ContainerImpl	RESULT=SUCCESS	APPID=application_1373184544832_0001	CONTAINERID=container_1373184544832_0001_01_000001
+2013-07-07 13:39:41,714 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container: Container container_1373184544832_0001_01_000001 transitioned from EXITED_WITH_SUCCESS to DONE
+2013-07-07 13:39:41,714 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application: Removing container_1373184544832_0001_01_000001 from application application_1373184544832_0001
+2013-07-07 13:39:42,166 INFO org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl: Sending out status for container: container_id {, app_attempt_id {, application_id {, id: 1, cluster_timestamp: 1373184544832, }, attemptId: 1, }, id: 1, }, state: C_COMPLETE, diagnostics: "", exit_status: 0, 
+2013-07-07 13:39:42,166 INFO org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl: Removed completed container container_1373184544832_0001_01_000001
+2013-07-07 13:39:42,191 INFO SecurityLogger.org.apache.hadoop.ipc.Server: Auth successful for appattempt_1373184544832_0001_000001 (auth:SIMPLE)
+2013-07-07 13:39:42,195 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl: Getting container-status for container_1373184544832_0001_01_000001
+2013-07-07 13:39:42,196 ERROR org.apache.hadoop.security.UserGroupInformation: PriviledgedActionException as:appattempt_1373184544832_0001_000001 (auth:TOKEN) cause:org.apache.hadoop.yarn.exceptions.YarnException: Container container_1373184544832_0001_01_000001 is not handled by this NodeManager
+2013-07-07 13:39:42,196 INFO org.apache.hadoop.ipc.Server: IPC Server handler 5 on 9993, call org.apache.hadoop.yarn.api.ContainerManagementProtocolPB.stopContainer from 127.0.0.1:51086: error: org.apache.hadoop.yarn.exceptions.YarnException: Container container_1373184544832_0001_01_000001 is not handled by this NodeManager
+org.apache.hadoop.yarn.exceptions.YarnException: Container container_1373184544832_0001_01_000001 is not handled by this NodeManager
+	at org.apache.hadoop.yarn.ipc.RPCUtil.getRemoteException(RPCUtil.java:45)
+	at org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl.authorizeGetAndStopContainerRequest(ContainerManagerImpl.java:614)
+	at org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl.stopContainer(ContainerManagerImpl.java:538)
+	at org.apache.hadoop.yarn.api.impl.pb.service.ContainerManagementProtocolPBServiceImpl.stopContainer(ContainerManagementProtocolPBServiceImpl.java:88)
+	at org.apache.hadoop.yarn.proto.ContainerManagementProtocol$ContainerManagementProtocolService$2.callBlockingMethod(ContainerManagementProtocol.java:85)
+	at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:605)
+	at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1033)
+	at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:1868)
+	at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:1864)
+	at java.security.AccessController.doPrivileged(Native Method)
+	at javax.security.auth.Subject.doAs(Subject.java:396)
+	at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1489)
+	at org.apache.hadoop.ipc.Server$Handler.run(Server.java:1862)
+2013-07-07 13:39:42,264 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.ContainersMonitorImpl: Starting resource-monitoring for container_1373184544832_0001_01_000002
+2013-07-07 13:39:42,265 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.ContainersMonitorImpl: Stopping resource-monitoring for container_1373184544832_0001_01_000002
+2013-07-07 13:39:42,265 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.ContainersMonitorImpl: Stopping resource-monitoring for container_1373184544832_0001_01_000001
+2013-07-07 13:39:43,173 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application: Application application_1373184544832_0001 transitioned from RUNNING to APPLICATION_RESOURCES_CLEANINGUP
+2013-07-07 13:39:43,174 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.AuxServices: Got event APPLICATION_STOP for appId application_1373184544832_0001
+2013-07-07 13:39:43,180 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application: Application application_1373184544832_0001 transitioned from APPLICATION_RESOURCES_CLEANINGUP to FINISHED
+2013-07-07 13:39:43,180 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.NonAggregatingLogHandler: Scheduling Log Deletion for application: application_1373184544832_0001, with delay of 10800 seconds
+
+
+Log Snippet for Application Manager
+==================================
+13/07/07 13:39:36 INFO client.SimpleApplicationMaster: Initializing ApplicationMaster
+13/07/07 13:39:37 INFO client.SimpleApplicationMaster: Application master for app, appId=1, clustertimestamp=1373184544832, attemptId=1
+13/07/07 13:39:37 INFO client.SimpleApplicationMaster: Starting ApplicationMaster
+13/07/07 13:39:37 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
+13/07/07 13:39:37 INFO impl.NMClientAsyncImpl: Upper bound of the thread pool size is 500
+13/07/07 13:39:37 INFO impl.ContainerManagementProtocolProxy: yarn.client.max-nodemanagers-proxies : 500
+13/07/07 13:39:37 INFO client.SimpleApplicationMaster: Max mem capabililty of resources in this cluster 8192
+13/07/07 13:39:37 INFO client.SimpleApplicationMaster: Requested container ask: Capability[&lt;memory:100, vCores:0&gt;]Priority[0]ContainerCount[1]
+13/07/07 13:39:39 INFO client.SimpleApplicationMaster: Got response from RM for container ask, allocatedCnt=1
+13/07/07 13:39:39 INFO client.SimpleApplicationMaster: Launching shell command on a new container., containerId=container_1373184544832_0001_01_000002, containerNode=sunny-Inspiron:9993, containerNodeURI=sunny-Inspiron:8042, containerResourceMemory1024
+13/07/07 13:39:39 INFO client.SimpleApplicationMaster: Setting up container launch container for containerid=container_1373184544832_0001_01_000002
+13/07/07 13:39:39 INFO impl.NMClientAsyncImpl: Processing Event EventType: START_CONTAINER for Container container_1373184544832_0001_01_000002
+13/07/07 13:39:39 INFO impl.ContainerManagementProtocolProxy: Opening proxy : sunny-Inspiron:9993
+13/07/07 13:39:39 INFO client.SimpleApplicationMaster: Succeeded to start Container container_1373184544832_0001_01_000002
+13/07/07 13:39:39 INFO impl.NMClientAsyncImpl: Processing Event EventType: QUERY_CONTAINER for Container container_1373184544832_0001_01_000002
+13/07/07 13:39:40 INFO client.SimpleApplicationMaster: Got response from RM for container ask, completedCnt=1
+13/07/07 13:39:40 INFO client.SimpleApplicationMaster: Got container status for containerID=container_1373184544832_0001_01_000002, state=COMPLETE, exitStatus=0, diagnostics=
+13/07/07 13:39:40 INFO client.SimpleApplicationMaster: Container completed successfully., containerId=container_1373184544832_0001_01_000002
+13/07/07 13:39:40 INFO client.SimpleApplicationMaster: Application completed. Stopping running containers
+13/07/07 13:39:40 ERROR impl.NMClientImpl: Failed to stop Container container_1373184544832_0001_01_000002when stopping NMClientImpl
+13/07/07 13:39:40 INFO impl.ContainerManagementProtocolProxy: Closing proxy : sunny-Inspiron:9993
+13/07/07 13:39:40 INFO client.SimpleApplicationMaster: Application completed. Signalling finish to RM
+13/07/07 13:39:41 INFO impl.AMRMClientAsyncImpl: Interrupted while waiting for queue
+java.lang.InterruptedException
+	at java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.reportInterruptAfterWait(AbstractQueuedSynchronizer.java:1899)
+	at java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1934)
+	at java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:399)
+	at org.apache.hadoop.yarn.client.api.async.impl.AMRMClientAsyncImpl$CallbackHandlerThread.run(AMRMClientAsyncImpl.java:281)
+13/07/07 13:39:41 INFO client.SimpleApplicationMaster: Application Master completed successfully. exiting
+
+
+</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-881">YARN-881</a>.
+     Major bug reported by Jian He and fixed by Jian He <br>
+     <b>Priority#compareTo method seems to be wrong.</b><br>
+     <blockquote>if lower int value means higher priority, shouldn't we "return other.getPriority() - this.getPriority() " </blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-771">YARN-771</a>.
+     Major sub-task reported by Bikas Saha and fixed by Junping Du <br>
+     <b>AMRMClient  support for resource blacklisting</b><br>
+     <blockquote>After YARN-750 AMRMClient should support blacklisting via the new YARN API's</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-758">YARN-758</a>.
+     Minor improvement reported by Bikas Saha and fixed by Karthik Kambatla <br>
+     <b>Augment MockNM to use multiple cores</b><br>
+     <blockquote>YARN-757 got fixed by changing the scheduler from Fair to default (which is capacity).</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-707">YARN-707</a>.
+     Blocker improvement reported by Bikas Saha and fixed by Jason Lowe <br>
+     <b>Add user info in the YARN ClientToken</b><br>
+     <blockquote>If user info is present in the client token then it can be used to do limited authz in the AM.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-696">YARN-696</a>.
+     Major improvement reported by Trevor Lorimer and fixed by Trevor Lorimer (resourcemanager)<br>
+     <b>Enable multiple states to to be specified in Resource Manager apps REST call</b><br>
+     <blockquote>Within the YARN Resource Manager REST API the GET call which returns all Applications can be filtered by a single State query parameter (http://&lt;rm http address:port&gt;/ws/v1/cluster/apps). 
+
+There are 8 possible states (New, Submitted, Accepted, Running, Finishing, Finished, Failed, Killed), if no state parameter is specified all states are returned, however if a sub-set of states is required then multiple REST calls are required (max. of 7).
+
+The proposal is to be able to specify multiple states in a single REST call.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-643">YARN-643</a>.
+     Major bug reported by Jian He and fixed by Xuan Gong <br>
+     <b>WHY appToken is removed both in BaseFinalTransition and AMUnregisteredTransition AND clientToken is removed in FinalTransition and not BaseFinalTransition</b><br>
+     <blockquote>The jira is tracking why appToken and clientToAMToken is removed separately, and why they are distributed in different transitions, ideally there may be a common place where these two tokens can be removed at the same time. </blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-602">YARN-602</a>.
+     Major bug reported by Xuan Gong and fixed by Kenji Kikushima <br>
+     <b>NodeManager should mandatorily set some Environment variables into every containers that it launches</b><br>
+     <blockquote>NodeManager should mandatorily set some Environment variables into every containers that it launches, such as Environment.user, Environment.pwd. If both users and NodeManager set those variables, the value set by NM should be used </blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-589">YARN-589</a>.
+     Major improvement reported by Sandy Ryza and fixed by Sandy Ryza (scheduler)<br>
+     <b>Expose a REST API for monitoring the fair scheduler</b><br>
+     <blockquote>The fair scheduler should have an HTTP interface that exposes information such as applications per queue, fair shares, demands, current allocations.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-573">YARN-573</a>.
+     Critical sub-task reported by Omkar Vinit Joshi and fixed by Omkar Vinit Joshi <br>
+     <b>Shared data structures in Public Localizer and Private Localizer are not Thread safe.</b><br>
+     <blockquote>PublicLocalizer
+1) pending accessed by addResource (part of event handling) and run method (as a part of PublicLocalizer.run() ).
+
+PrivateLocalizer
+1) pending accessed by addResource (part of event handling) and findNextResource (i.remove()). Also update method should be fixed. It too is sharing pending list.
+</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-540">YARN-540</a>.
+     Major sub-task reported by Jian He and fixed by Jian He (resourcemanager)<br>
+     <b>Race condition causing RM to potentially relaunch already unregistered AMs on RM restart</b><br>
+     <blockquote>When job succeeds and successfully call finishApplicationMaster, RM shutdown and restart-dispatcher is stopped before it can process REMOVE_APP event. The next time RM comes back, it will reload the existing state files even though the job is succeeded</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-502">YARN-502</a>.
+     Major sub-task reported by Lohit Vijayarenu and fixed by Mayank Bansal <br>
+     <b>RM crash with NPE on NODE_REMOVED event with FairScheduler</b><br>
+     <blockquote>While running some test and adding/removing nodes, we see RM crashed with the below exception. We are testing with fair scheduler and running hadoop-2.0.3-alpha
+
+{noformat}
+2013-03-22 18:54:27,015 INFO org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeImpl: Deactivating Node YYYY:55680 as it is now LOST
+2013-03-22 18:54:27,015 INFO org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeImpl: YYYY:55680 Node Transitioned from UNHEALTHY to LOST
+2013-03-22 18:54:27,015 FATAL org.apache.hadoop.yarn.server.resourcemanager.ResourceManager: Error in handling event type NODE_REMOVED to the scheduler
+java.lang.NullPointerException
+        at org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler.removeNode(FairScheduler.java:619)
+        at org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler.handle(FairScheduler.java:856)
+        at org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler.handle(FairScheduler.java:98)
+        at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$SchedulerEventDispatcher$EventProcessor.run(ResourceManager.java:375)
+        at java.lang.Thread.run(Thread.java:662)
+2013-03-22 18:54:27,016 INFO org.apache.hadoop.yarn.server.resourcemanager.ResourceManager: Exiting, bbye..
+2013-03-22 18:54:27,020 INFO org.mortbay.log: Stopped SelectChannelConnector@XXXX:50030
+{noformat}</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-337">YARN-337</a>.
+     Major bug reported by Jason Lowe and fixed by Jason Lowe (resourcemanager)<br>
+     <b>RM handles killed application tracking URL poorly</b><br>
+     <blockquote>When the ResourceManager kills an application, it leaves the proxy URL redirecting to the original tracking URL for the application even though the ApplicationMaster is no longer there to service it.  It should redirect it somewhere more useful, like the RM's web page for the application, where the user can find that the application was killed and links to the AM logs.
+
+In addition, sometimes the AM during teardown from the kill can attempt to unregister and provide an updated tracking URL, but unfortunately the RM has "forgotten" the AM due to the kill and refuses to process the unregistration.  Instead it logs:
+
+{noformat}
+2013-01-09 17:37:49,671 [IPC Server handler 2 on 8030] ERROR
+org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService: AppAttemptId doesnt exist in cache appattempt_1357575694478_28614_000001
+{noformat}
+
+It should go ahead and process the unregistration to update the tracking URL since the application offered it.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-292">YARN-292</a>.
+     Major sub-task reported by Devaraj K and fixed by Zhijie Shen (resourcemanager)<br>
+     <b>ResourceManager throws ArrayIndexOutOfBoundsException while handling CONTAINER_ALLOCATED for application attempt</b><br>
+     <blockquote>{code:xml}
+2012-12-26 08:41:15,030 ERROR org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler: Calling allocate on removed or non existant application appattempt_1356385141279_49525_000001
+2012-12-26 08:41:15,031 ERROR org.apache.hadoop.yarn.server.resourcemanager.ResourceManager: Error in handling event type CONTAINER_ALLOCATED for applicationAttempt application_1356385141279_49525
+java.lang.ArrayIndexOutOfBoundsException: 0
+	at java.util.Arrays$ArrayList.get(Arrays.java:3381)
+	at org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptImpl$AMContainerAllocatedTransition.transition(RMAppAttemptImpl.java:655)
+	at org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptImpl$AMContainerAllocatedTransition.transition(RMAppAttemptImpl.java:644)
+	at org.apache.hadoop.yarn.state.StateMachineFactory$SingleInternalArc.doTransition(StateMachineFactory.java:357)
+	at org.apache.hadoop.yarn.state.StateMachineFactory.doTransition(StateMachineFactory.java:298)
+	at org.apache.hadoop.yarn.state.StateMachineFactory.access$300(StateMachineFactory.java:43)
+	at org.apache.hadoop.yarn.state.StateMachineFactory$InternalStateMachine.doTransition(StateMachineFactory.java:443)
+	at org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptImpl.handle(RMAppAttemptImpl.java:490)
+	at org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptImpl.handle(RMAppAttemptImpl.java:80)
+	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$ApplicationAttemptEventDispatcher.handle(ResourceManager.java:433)
+	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$ApplicationAttemptEventDispatcher.handle(ResourceManager.java:414)
+	at org.apache.hadoop.yarn.event.AsyncDispatcher.dispatch(AsyncDispatcher.java:126)
+	at org.apache.hadoop.yarn.event.AsyncDispatcher$1.run(AsyncDispatcher.java:75)
+	at java.lang.Thread.run(Thread.java:662)
+ {code}</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-107">YARN-107</a>.
+     Major bug reported by Devaraj K and fixed by Xuan Gong (resourcemanager)<br>
+     <b>ClientRMService.forceKillApplication() should handle the non-RUNNING applications properly</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5497">MAPREDUCE-5497</a>.
+     Major bug reported by Jian He and fixed by Jian He <br>
+     <b>'5s sleep'  in MRAppMaster.shutDownJob is only needed before stopping ClientService</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5493">MAPREDUCE-5493</a>.
+     Blocker bug reported by Jason Lowe and fixed by Jason Lowe (mrv2)<br>
+     <b>In-memory map outputs can be leaked after shuffle completes</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5483">MAPREDUCE-5483</a>.
+     Major bug reported by Alejandro Abdelnur and fixed by Robert Kanter (distcp)<br>
+     <b>revert MAPREDUCE-5357</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5478">MAPREDUCE-5478</a>.
+     Minor improvement reported by Sandy Ryza and fixed by Sandy Ryza (examples)<br>
+     <b>TeraInputFormat unnecessarily defines its own FileSplit subclass</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5476">MAPREDUCE-5476</a>.
+     Blocker bug reported by Jian He and fixed by Jian He <br>
+     <b>Job can fail when RM restarts after staging dir is cleaned but before MR successfully unregister with RM</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5475">MAPREDUCE-5475</a>.
+     Blocker bug reported by Jason Lowe and fixed by Jason Lowe (mr-am , mrv2)<br>
+     <b>MRClientService does not verify ACLs properly</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5470">MAPREDUCE-5470</a>.
+     Major bug reported by Chris Nauroth and fixed by Sandy Ryza <br>
+     <b>LocalJobRunner does not work on Windows.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5468">MAPREDUCE-5468</a>.
+     Blocker bug reported by Yesha Vora and fixed by Vinod Kumar Vavilapalli <br>
+     <b>AM recovery does not work for map only jobs</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5466">MAPREDUCE-5466</a>.
+     Blocker bug reported by Yesha Vora and fixed by Jian He <br>
+     <b>Historyserver does not refresh the result of restarted jobs after RM restart</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5462">MAPREDUCE-5462</a>.
+     Major sub-task reported by Sandy Ryza and fixed by Sandy Ryza (performance , task)<br>
+     <b>In map-side sort, swap entire meta entries instead of indexes for better cache performance </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5454">MAPREDUCE-5454</a>.
+     Major bug reported by Karthik Kambatla and fixed by Karthik Kambatla (test)<br>
+     <b>TestDFSIO fails intermittently on JDK7</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5446">MAPREDUCE-5446</a>.
+     Major bug reported by Jason Lowe and fixed by Jason Lowe (mrv2 , test)<br>
+     <b>TestJobHistoryEvents and TestJobHistoryParsing have race conditions</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5441">MAPREDUCE-5441</a>.
+     Major bug reported by Rohith Sharma K S and fixed by Jian He (applicationmaster , client)<br>
+     <b>JobClient exit whenever RM issue Reboot command to 1st attempt App Master.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5440">MAPREDUCE-5440</a>.
+     Major bug reported by Robert Parker and fixed by Robert Parker (mrv2)<br>
+     <b>TestCopyCommitter Fails on JDK7</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5428">MAPREDUCE-5428</a>.
+     Major bug reported by Jason Lowe and fixed by Karthik Kambatla (jobhistoryserver , mrv2)<br>
+     <b>HistoryFileManager doesn't stop threads when service is stopped</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5425">MAPREDUCE-5425</a>.
+     Major bug reported by Ashwin Shankar and fixed by Robert Parker (jobhistoryserver)<br>
+     <b>Junit in TestJobHistoryServer failing in jdk 7</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5414">MAPREDUCE-5414</a>.
+     Major bug reported by Nemon Lou and fixed by Nemon Lou (test)<br>
+     <b>TestTaskAttempt fails jdk7 with NullPointerException</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5385">MAPREDUCE-5385</a>.
+     Blocker bug reported by Omkar Vinit Joshi and fixed by Omkar Vinit Joshi <br>
+     <b>JobContext cache files api are broken</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5379">MAPREDUCE-5379</a>.
+     Major improvement reported by Sandy Ryza and fixed by Karthik Kambatla (job submission , security)<br>
+     <b>Include token tracking ids in jobconf</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5367">MAPREDUCE-5367</a>.
+     Major improvement reported by Sandy Ryza and fixed by Sandy Ryza <br>
+     <b>Local jobs all use same local working directory</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5358">MAPREDUCE-5358</a>.
+     Major bug reported by Devaraj K and fixed by Devaraj K (mr-am)<br>
+     <b>MRAppMaster throws invalid transitions for JobImpl</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5317">MAPREDUCE-5317</a>.
+     Major bug reported by Ravi Prakash and fixed by Ravi Prakash (mrv2)<br>
+     <b>Stale files left behind for failed jobs</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5251">MAPREDUCE-5251</a>.
+     Major bug reported by Jason Lowe and fixed by Ashwin Shankar (mrv2)<br>
+     <b>Reducer should not implicate map attempt if it has insufficient space to fetch map output</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5164">MAPREDUCE-5164</a>.
+     Major bug reported by Nemon Lou and fixed by Nemon Lou <br>
+     <b>command  "mapred job" and "mapred queue" omit HADOOP_CLIENT_OPTS </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5020">MAPREDUCE-5020</a>.
+     Major bug reported by Trevor Robinson and fixed by Trevor Robinson (client)<br>
+     <b>Compile failure with JDK8</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5001">MAPREDUCE-5001</a>.
+     Major bug reported by Brock Noland and fixed by Sandy Ryza <br>
+     <b>LocalJobRunner has race condition resulting in job failures </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3193">MAPREDUCE-3193</a>.
+     Major bug reported by Ramgopal N and fixed by Devaraj K (mrv1 , mrv2)<br>
+     <b>FileInputFormat doesn't read files recursively in the input path dir</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-1981">MAPREDUCE-1981</a>.
+     Major improvement reported by Hairong Kuang and fixed by Hairong Kuang (job submission)<br>
+     <b>Improve getSplits performance by using listLocatedStatus</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5199">HDFS-5199</a>.
+     Major sub-task reported by Brandon Li and fixed by Brandon Li (nfs)<br>
+     <b>Add more debug trace for NFS READ and WRITE</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5192">HDFS-5192</a>.
+     Minor bug reported by Jing Zhao and fixed by Jing Zhao <br>
+     <b>NameNode may fail to start when dfs.client.test.drop.namenode.response.number is set</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5159">HDFS-5159</a>.
+     Major bug reported by Aaron T. Myers and fixed by Aaron T. Myers (namenode)<br>
+     <b>Secondary NameNode fails to checkpoint if error occurs downloading edits on first checkpoint</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5150">HDFS-5150</a>.
+     Blocker bug reported by Kihwal Lee and fixed by Kihwal Lee <br>
+     <b>Allow per NN SPN for internal SPNEGO.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5140">HDFS-5140</a>.
+     Blocker bug reported by Arpit Gupta and fixed by Jing Zhao (ha)<br>
+     <b>Too many safemode monitor threads being created in the standby namenode causing it to fail with out of memory error</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5136">HDFS-5136</a>.
+     Major sub-task reported by Brandon Li and fixed by Brandon Li (nfs)<br>
+     <b>MNT EXPORT should give the full group list which can mount the exports</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5132">HDFS-5132</a>.
+     Blocker bug reported by Arpit Gupta and fixed by Kihwal Lee (namenode)<br>
+     <b>Deadlock in NameNode between SafeModeMonitor#run and DatanodeManager#handleHeartbeat</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5128">HDFS-5128</a>.
+     Critical improvement reported by Kihwal Lee and fixed by Kihwal Lee <br>
+     <b>Allow multiple net interfaces to be used with HA namenode RPC server</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5124">HDFS-5124</a>.
+     Blocker bug reported by Deepesh Khandelwal and fixed by Daryn Sharp (namenode)<br>
+     <b>DelegationTokenSecretManager#retrievePassword can cause deadlock in NameNode</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5118">HDFS-5118</a>.
+     Major new feature reported by Jing Zhao and fixed by Jing Zhao <br>
+     <b>Provide testing support for DFSClient to drop RPC responses</b><br>
+     <blockquote>Used for testing when NameNode HA is enabled. Users can use a new configuration property "dfs.client.test.drop.namenode.response.number" to specify the number of responses that DFSClient will drop in each RPC call. This feature can help testing functionalities such as NameNode retry cache.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5111">HDFS-5111</a>.
+     Minor bug reported by Jing Zhao and fixed by Jing Zhao (snapshots)<br>
+     <b>Remove duplicated error message for snapshot commands when processing invalid arguments</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5110">HDFS-5110</a>.
+     Major sub-task reported by Brandon Li and fixed by Brandon Li (nfs)<br>
+     <b>Change FSDataOutputStream to HdfsDataOutputStream for opened streams to fix type cast error</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5107">HDFS-5107</a>.
+     Major sub-task reported by Brandon Li and fixed by Brandon Li (nfs)<br>
+     <b>Fix array copy error in Readdir and Readdirplus responses</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5106">HDFS-5106</a>.
+     Minor bug reported by Chuan Liu and fixed by Chuan Liu (test)<br>
+     <b>TestDatanodeBlockScanner fails on Windows due to incorrect path format</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5105">HDFS-5105</a>.
+     Minor bug reported by Chuan Liu and fixed by Chuan Liu <br>
+     <b>TestFsck fails on Windows</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5104">HDFS-5104</a>.
+     Major sub-task reported by Brandon Li and fixed by Brandon Li (nfs)<br>
+     <b>Support dotdot name in NFS LOOKUP operation</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5103">HDFS-5103</a>.
+     Minor bug reported by Chuan Liu and fixed by Chuan Liu (test)<br>
+     <b>TestDirectoryScanner fails on Windows</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5102">HDFS-5102</a>.
+     Major bug reported by Aaron T. Myers and fixed by Jing Zhao (snapshots)<br>
+     <b>Snapshot names should not be allowed to contain slash characters</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5100">HDFS-5100</a>.
+     Minor bug reported by Chuan Liu and fixed by Chuan Liu (test)<br>
+     <b>TestNamenodeRetryCache fails on Windows due to incorrect cleanup</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5099">HDFS-5099</a>.
+     Major bug reported by Chuan Liu and fixed by Chuan Liu (namenode)<br>
+     <b>Namenode#copyEditLogSegmentsToSharedDir should close EditLogInputStreams upon finishing</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5091">HDFS-5091</a>.
+     Minor bug reported by Jing Zhao and fixed by Jing Zhao <br>
+     <b>Support for spnego keytab separate from the JournalNode keytab for secure HA</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5085">HDFS-5085</a>.
+     Major sub-task reported by Brandon Li and fixed by Jing Zhao (nfs)<br>
+     <b>Refactor o.a.h.nfs to support different types of authentications</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5080">HDFS-5080</a>.
+     Major bug reported by Jing Zhao and fixed by Jing Zhao (ha , qjm)<br>
+     <b>BootstrapStandby not working with QJM when the existing NN is active</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5078">HDFS-5078</a>.
+     Major sub-task reported by Brandon Li and fixed by Brandon Li (nfs)<br>
+     <b>Support file append in NFSv3 gateway to enable data streaming to HDFS</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5076">HDFS-5076</a>.
+     Minor new feature reported by Jing Zhao and fixed by Jing Zhao <br>
+     <b>Add MXBean methods to query NN's transaction information and JournalNode's journal status</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5071">HDFS-5071</a>.
+     Major sub-task reported by Kihwal Lee and fixed by Brandon Li (nfs)<br>
+     <b>Change hdfs-nfs parent project to hadoop-project</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5069">HDFS-5069</a>.
+     Major sub-task reported by Brandon Li and fixed by Brandon Li (nfs)<br>
+     <b>Include hadoop-nfs and hadoop-hdfs-nfs into hadoop dist for NFS deployment</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5067">HDFS-5067</a>.
+     Major sub-task reported by Brandon Li and fixed by Brandon Li (nfs)<br>
+     <b>Support symlink operations</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5061">HDFS-5061</a>.
+     Major improvement reported by Arpit Agarwal and fixed by Arpit Agarwal (namenode)<br>
+     <b>Make FSNameSystem#auditLoggers an unmodifiable list</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5055">HDFS-5055</a>.
+     Blocker bug reported by Allen Wittenauer and fixed by Vinay (namenode)<br>
+     <b>nn fails to download checkpointed image from snn in some setups</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5047">HDFS-5047</a>.
+     Major bug reported by Kihwal Lee and fixed by Robert Parker (namenode)<br>
+     <b>Supress logging of full stack trace of quota and lease exceptions</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5045">HDFS-5045</a>.
+     Minor improvement reported by Jing Zhao and fixed by Jing Zhao <br>
+     <b>Add more unit tests for retry cache to cover all AtMostOnce methods</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5043">HDFS-5043</a>.
+     Major bug reported by Brandon Li and fixed by Brandon Li <br>
+     <b>For HdfsFileStatus, set default value of childrenNum to -1 instead of 0 to avoid confusing applications</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-5028">HDFS-5028</a>.
+     Major bug reported by zhaoyunjiong and fixed by zhaoyunjiong <br>
+     <b>LeaseRenewer throw java.util.ConcurrentModificationException when timeout</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-4993">HDFS-4993</a>.
+     Major bug reported by Kihwal Lee and fixed by Robert Parker <br>
+     <b>fsck can fail if a file is renamed or deleted</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-4962">HDFS-4962</a>.
+     Minor sub-task reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (nfs)<br>
+     <b>Use enum for nfs constants</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-4947">HDFS-4947</a>.
+     Major sub-task reported by Brandon Li and fixed by Jing Zhao (nfs)<br>
+     <b>Add NFS server export table to control export by hostname or IP range</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-4926">HDFS-4926</a>.
+     Trivial improvement reported by Joseph Lorenzini and fixed by Vivek Ganesan (namenode)<br>
+     <b>namenode webserver's page has a tooltip that is inconsistent with the datanode HTML link</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-4905">HDFS-4905</a>.
+     Minor improvement reported by Arpit Agarwal and fixed by Arpit Agarwal (tools)<br>
+     <b>Add appendToFile command to "hdfs dfs"</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-4898">HDFS-4898</a>.
+     Minor bug reported by Eric Sirianni and fixed by Tsz Wo (Nicholas), SZE (namenode)<br>
+     <b>BlockPlacementPolicyWithNodeGroup.chooseRemoteRack() fails to properly fallback to local rack</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-4763">HDFS-4763</a>.
+     Major sub-task reported by Brandon Li and fixed by Brandon Li (nfs)<br>
+     <b>Add script changes/utility for starting NFS gateway</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-4680">HDFS-4680</a>.
+     Major bug reported by Andrew Wang and fixed by Andrew Wang (namenode , security)<br>
+     <b>Audit logging of delegation tokens for MR tracing</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-4632">HDFS-4632</a>.
+     Major bug reported by Chris Nauroth and fixed by Chuan Liu (test)<br>
+     <b>globStatus using backslash for escaping does not work on Windows</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-4594">HDFS-4594</a>.
+     Minor bug reported by Arpit Gupta and fixed by Chris Nauroth (webhdfs)<br>
+     <b>WebHDFS open sets Content-Length header to what is specified by length parameter rather than how much data is actually returned. </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-4329">HDFS-4329</a>.
+     Major bug reported by Andy Isaacson and fixed by Cristina L. Abad (hdfs-client)<br>
+     <b>DFSShell issues with directories with spaces in name</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-3245">HDFS-3245</a>.
+     Major improvement reported by Todd Lipcon and fixed by Ravi Prakash (namenode)<br>
+     <b>Add metrics and web UI for cluster version summary</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2933">HDFS-2933</a>.
+     Major improvement reported by Philip Zeyliger and fixed by Vivek Ganesan (datanode)<br>
+     <b>Improve DataNode Web UI Index Page</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9962">HADOOP-9962</a>.
+     Major improvement reported by Roman Shaposhnik and fixed by Roman Shaposhnik (build)<br>
+     <b>in order to avoid dependency divergence within Hadoop itself lets enable DependencyConvergence</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9961">HADOOP-9961</a>.
+     Minor bug reported by Roman Shaposhnik and fixed by Roman Shaposhnik (build)<br>
+     <b>versions of a few transitive dependencies diverged between hadoop subprojects</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9960">HADOOP-9960</a>.
+     Blocker bug reported by Brock Noland and fixed by Karthik Kambatla <br>
+     <b>Upgrade Jersey version to 1.9</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9958">HADOOP-9958</a>.
+     Major bug reported by Andrew Wang and fixed by Andrew Wang <br>
+     <b>Add old constructor back to DelegationTokenInformation to unbreak downstream builds</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9945">HADOOP-9945</a>.
+     Minor improvement reported by Karthik Kambatla and fixed by Karthik Kambatla (ha)<br>
+     <b>HAServiceState should have a state for stopped services</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9944">HADOOP-9944</a>.
+     Blocker bug reported by Arun C Murthy and fixed by Arun C Murthy <br>
+     <b>RpcRequestHeaderProto defines callId as uint32 while ipc.Client.CONNECTION_CONTEXT_CALL_ID is signed (-3)</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9932">HADOOP-9932</a>.
+     Blocker bug reported by Kihwal Lee and fixed by Kihwal Lee <br>
+     <b>Improper synchronization in RetryCache</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9924">HADOOP-9924</a>.
+     Major bug reported by shanyu zhao and fixed by shanyu zhao (fs)<br>
+     <b>FileUtil.createJarWithClassPath() does not generate relative classpath correctly</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9918">HADOOP-9918</a>.
+     Minor improvement reported by Karthik Kambatla and fixed by Karthik Kambatla <br>
+     <b>Add addIfService() to CompositeService</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9916">HADOOP-9916</a>.
+     Minor bug reported by Binglin Chang and fixed by Binglin Chang <br>
+     <b>Race condition in ipc.Client causes TestIPC timeout</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9910">HADOOP-9910</a>.
+     Minor bug reported by Andr&#233; Kelpe and fixed by  <br>
+     <b>proxy server start and stop documentation wrong</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9906">HADOOP-9906</a>.
+     Minor bug reported by Karthik Kambatla and fixed by Karthik Kambatla (ha)<br>
+     <b>Move HAZKUtil to o.a.h.util.ZKUtil and make inner-classes public</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9899">HADOOP-9899</a>.
+     Minor bug reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (security)<br>
+     <b>Remove the debug message added by HADOOP-8855</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9886">HADOOP-9886</a>.
+     Minor improvement reported by Arpit Gupta and fixed by Arpit Gupta <br>
+     <b>Turn warning message in RetryInvocationHandler to debug</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9880">HADOOP-9880</a>.
+     Blocker bug reported by Kihwal Lee and fixed by Daryn Sharp <br>
+     <b>SASL changes from HADOOP-9421 breaks Secure HA NN </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9879">HADOOP-9879</a>.
+     Minor improvement reported by Karthik Kambatla and fixed by Karthik Kambatla (build)<br>
+     <b>Move the version info of zookeeper dependencies to hadoop-project/pom</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9868">HADOOP-9868</a>.
+     Blocker bug reported by Daryn Sharp and fixed by Daryn Sharp (ipc)<br>
+     <b>Server must not advertise kerberos realm</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9858">HADOOP-9858</a>.
+     Trivial bug reported by Chris Nauroth and fixed by Chris Nauroth (fs)<br>
+     <b>Remove unused private RawLocalFileSystem#execCommand method from branch-2.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9857">HADOOP-9857</a>.
+     Major bug reported by Chris Nauroth and fixed by Chris Nauroth (build , test)<br>
+     <b>Tests block and sometimes timeout on Windows due to invalid entropy source.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9833">HADOOP-9833</a>.
+     Minor improvement reported by Steve Loughran and fixed by Kousuke Saruta (build)<br>
+     <b>move slf4j to version 1.7.5</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9831">HADOOP-9831</a>.
+     Minor improvement reported by Chris Nauroth and fixed by Chris Nauroth (bin)<br>
+     <b>Make checknative shell command accessible on Windows.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9821">HADOOP-9821</a>.
+     Minor improvement reported by Tsuyoshi OZAWA and fixed by Tsuyoshi OZAWA <br>
+     <b>ClientId should have getMsb/getLsb methods</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9820">HADOOP-9820</a>.
+     Blocker bug reported by Daryn Sharp and fixed by Daryn Sharp (ipc , security)<br>
+     <b>RPCv9 wire protocol is insufficient to support multiplexing</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9806">HADOOP-9806</a>.
+     Major bug reported by Brandon Li and fixed by Brandon Li (nfs)<br>
+     <b>PortmapInterface should check if the procedure is out-of-range</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9803">HADOOP-9803</a>.
+     Minor improvement reported by Tsz Wo (Nicholas), SZE and fixed by Tsz Wo (Nicholas), SZE (ipc)<br>
+     <b>Add generic type parameter to RetryInvocationHandler</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9802">HADOOP-9802</a>.
+     Major improvement reported by Chris Nauroth and fixed by Chris Nauroth (io)<br>
+     <b>Support Snappy codec on Windows.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9801">HADOOP-9801</a>.
+     Major bug reported by Chris Nauroth and fixed by Chris Nauroth (conf)<br>
+     <b>Configuration#writeXml uses platform defaulting encoding, which may mishandle multi-byte characters.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9789">HADOOP-9789</a>.
+     Critical new feature reported by Daryn Sharp and fixed by Daryn Sharp (ipc , security)<br>
+     <b>Support server advertised kerberos principals</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9774">HADOOP-9774</a>.
+     Major bug reported by shanyu zhao and fixed by shanyu zhao (fs)<br>
+     <b>RawLocalFileSystem.listStatus() return absolute paths when input path is relative on Windows</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9768">HADOOP-9768</a>.
+     Major bug reported by Chris Nauroth and fixed by Chris Nauroth (fs)<br>
+     <b>chown and chgrp reject users and groups with spaces on platforms where spaces are otherwise acceptable</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9757">HADOOP-9757</a>.
+     Major bug reported by Jason Lowe and fixed by Cristina L. Abad (fs)<br>
+     <b>Har metadata cache can grow without limit</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9686">HADOOP-9686</a>.
+     Major improvement reported by Jason Lowe and fixed by Jason Lowe (conf)<br>
+     <b>Easy access to final parameters in Configuration</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9672">HADOOP-9672</a>.
+     Major improvement reported by Sandy Ryza and fixed by Sandy Ryza <br>
+     <b>Upgrade Avro dependency to 1.7.4</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9557">HADOOP-9557</a>.
+     Major bug reported by Lohit Vijayarenu and fixed by Lohit Vijayarenu (build)<br>
+     <b>hadoop-client excludes commons-httpclient</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9446">HADOOP-9446</a>.
+     Major improvement reported by Yu Gao and fixed by Yu Gao (security)<br>
+     <b>Support Kerberos HTTP SPNEGO authentication for non-SUN JDK</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9435">HADOOP-9435</a>.
+     Major bug reported by Tian Hong Wang and fixed by Tian Hong Wang (build)<br>
+     <b>Support building the JNI code against the IBM JVM</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9381">HADOOP-9381</a>.
+     Trivial bug reported by Keegan Witt and fixed by Keegan Witt <br>
+     <b>Document dfs cp -f option</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-9315">HADOOP-9315</a>.
+     Major bug reported by Dennis Y and fixed by Chris Nauroth (build)<br>
+     <b>Port HADOOP-9249 hadoop-maven-plugins Clover fix to branch-2 to fix build failures</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8814">HADOOP-8814</a>.
+     Minor improvement reported by Brandon Li and fixed by Brandon Li (conf , fs , fs/s3 , ha , io , metrics , performance , record , security , util)<br>
+     <b>Inefficient comparison with the empty string. Use isEmpty() instead</b><br>
+     <blockquote></blockquote></li>
+</ul>
+</body></html>
 <META http-equiv="Content-Type" content="text/html; charset=UTF-8">
 <title>Hadoop  2.1.0-beta Release Notes</title>
 <STYLE type="text/css">

+ 2 - 2
hadoop-common-project/hadoop-common/src/main/proto/ProtobufRpcEngine.proto

@@ -60,8 +60,8 @@ message RequestHeaderProto {
    * ProtocolInfoProto) since they reuse the connection; in this case
    * the declaringClassProtocolName field is set to the ProtocolInfoProto
    */
-  required string declaringClassProtocolName = 3;
+  required string declaringClassProtocolName = 2;
   
   /** protocol version of class declaring the called method */
-  required uint64 clientProtocolVersion = 4;
+  required uint64 clientProtocolVersion = 3;
 }

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/proto/RpcHeader.proto

@@ -62,7 +62,7 @@ message RpcRequestHeaderProto { // the header for the RpcRequest
 
   optional RpcKindProto rpcKind = 1;
   optional OperationProto rpcOp = 2;
-  required uint32 callId = 3; // a sequence number that is sent back in response
+  required sint32 callId = 3; // a sequence number that is sent back in response
   required bytes clientId = 4; // Globally unique client ID
   // clientId + callId uniquely identifies a request
   // retry count, 1 means this is the first retry

+ 4 - 3
hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountResponse.java

@@ -22,6 +22,7 @@ import java.util.List;
 import org.apache.hadoop.nfs.NfsExports;
 import org.apache.hadoop.oncrpc.RpcAcceptedReply;
 import org.apache.hadoop.oncrpc.XDR;
+import org.apache.hadoop.oncrpc.security.VerifierNone;
 import org.apache.hadoop.oncrpc.security.RpcAuthInfo.AuthFlavor;
 
 /**
@@ -37,7 +38,7 @@ public class MountResponse {
   /** Response for RPC call {@link MountInterface.MNTPROC#MNT} */
   public static XDR writeMNTResponse(int status, XDR xdr, int xid,
       byte[] handle) {
-    RpcAcceptedReply.voidReply(xdr, xid);
+    RpcAcceptedReply.getAcceptInstance(xid, new VerifierNone()).write(xdr);
     xdr.writeInt(status);
     if (status == MNT_OK) {
       xdr.writeVariableOpaque(handle);
@@ -50,7 +51,7 @@ public class MountResponse {
 
   /** Response for RPC call {@link MountInterface.MNTPROC#DUMP} */
   public static XDR writeMountList(XDR xdr, int xid, List<MountEntry> mounts) {
-    RpcAcceptedReply.voidReply(xdr, xid);
+    RpcAcceptedReply.getAcceptInstance(xid, new VerifierNone()).write(xdr);
     for (MountEntry mountEntry : mounts) {
       xdr.writeBoolean(true); // Value follows yes
       xdr.writeString(mountEntry.host());
@@ -65,7 +66,7 @@ public class MountResponse {
       List<NfsExports> hostMatcher) {
     assert (exports.size() == hostMatcher.size());
 
-    RpcAcceptedReply.voidReply(xdr, xid);
+    RpcAcceptedReply.getAcceptInstance(xid, new VerifierNone()).write(xdr);
     for (int i = 0; i < exports.size(); i++) {
       xdr.writeBoolean(true); // Value follows - yes
       xdr.writeString(exports.get(i));

+ 3 - 2
hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/ACCESS3Response.java

@@ -20,6 +20,7 @@ package org.apache.hadoop.nfs.nfs3.response;
 import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes;
 import org.apache.hadoop.nfs.nfs3.Nfs3Status;
 import org.apache.hadoop.oncrpc.XDR;
+import org.apache.hadoop.oncrpc.security.Verifier;
 
 /**
  * ACCESS3 Response 
@@ -43,8 +44,8 @@ public class ACCESS3Response extends NFS3Response {
   }
 
   @Override
-  public XDR send(XDR out, int xid) {
-    super.send(out, xid);
+  public XDR writeHeaderAndResponse(XDR out, int xid, Verifier verifier) {
+    super.writeHeaderAndResponse(out, xid, verifier);
     out.writeBoolean(true);
     postOpAttr.serialize(out);
     if (this.getStatus() == Nfs3Status.NFS3_OK) {

+ 3 - 2
hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/COMMIT3Response.java

@@ -20,6 +20,7 @@ package org.apache.hadoop.nfs.nfs3.response;
 import org.apache.hadoop.nfs.nfs3.Nfs3Constant;
 import org.apache.hadoop.nfs.nfs3.Nfs3Status;
 import org.apache.hadoop.oncrpc.XDR;
+import org.apache.hadoop.oncrpc.security.Verifier;
 
 /**
  * COMMIT3 Response
@@ -47,8 +48,8 @@ public class COMMIT3Response extends NFS3Response {
   }
 
   @Override
-  public XDR send(XDR out, int xid) {
-    super.send(out, xid);
+  public XDR writeHeaderAndResponse(XDR out, int xid, Verifier verifier) {
+    super.writeHeaderAndResponse(out, xid, verifier);
     fileWcc.serialize(out);
     if (getStatus() == Nfs3Status.NFS3_OK) {
       out.writeLongAsHyper(verf);

+ 3 - 2
hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/CREATE3Response.java

@@ -21,6 +21,7 @@ import org.apache.hadoop.nfs.nfs3.FileHandle;
 import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes;
 import org.apache.hadoop.nfs.nfs3.Nfs3Status;
 import org.apache.hadoop.oncrpc.XDR;
+import org.apache.hadoop.oncrpc.security.Verifier;
 
 /**
  * CREATE3 Response
@@ -55,8 +56,8 @@ public class CREATE3Response extends NFS3Response {
   }
 
   @Override
-  public XDR send(XDR out, int xid) {
-    super.send(out, xid);
+  public XDR writeHeaderAndResponse(XDR out, int xid, Verifier verifier) {
+    super.writeHeaderAndResponse(out, xid, verifier);
     if (getStatus() == Nfs3Status.NFS3_OK) {
       out.writeBoolean(true); // Handle follows
       objHandle.serialize(out);

+ 3 - 2
hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/FSINFO3Response.java

@@ -21,6 +21,7 @@ import org.apache.hadoop.nfs.NfsTime;
 import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes;
 import org.apache.hadoop.nfs.nfs3.Nfs3Status;
 import org.apache.hadoop.oncrpc.XDR;
+import org.apache.hadoop.oncrpc.security.Verifier;
 
 /**
  * FSINFO3 Response
@@ -109,8 +110,8 @@ public class FSINFO3Response extends NFS3Response {
   }
 
   @Override
-  public XDR send(XDR out, int xid) {
-    super.send(out, xid);
+  public XDR writeHeaderAndResponse(XDR out, int xid, Verifier verifier) {
+    super.writeHeaderAndResponse(out, xid, verifier);
     out.writeBoolean(true);
     postOpAttr.serialize(out);
 

+ 3 - 2
hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/FSSTAT3Response.java

@@ -20,6 +20,7 @@ package org.apache.hadoop.nfs.nfs3.response;
 import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes;
 import org.apache.hadoop.nfs.nfs3.Nfs3Status;
 import org.apache.hadoop.oncrpc.XDR;
+import org.apache.hadoop.oncrpc.security.Verifier;
 
 /**
  * FSSTAT3 Response
@@ -90,8 +91,8 @@ public class FSSTAT3Response extends NFS3Response {
   }
 
   @Override
-  public XDR send(XDR out, int xid) {
-    super.send(out, xid);
+  public XDR writeHeaderAndResponse(XDR out, int xid, Verifier verifier) {
+    super.writeHeaderAndResponse(out, xid, verifier);
     out.writeBoolean(true);
     if (postOpAttr == null) {
       postOpAttr = new Nfs3FileAttributes();

+ 3 - 2
hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/GETATTR3Response.java

@@ -20,6 +20,7 @@ package org.apache.hadoop.nfs.nfs3.response;
 import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes;
 import org.apache.hadoop.nfs.nfs3.Nfs3Status;
 import org.apache.hadoop.oncrpc.XDR;
+import org.apache.hadoop.oncrpc.security.Verifier;
 
 /**
  * GETATTR3 Response
@@ -40,8 +41,8 @@ public class GETATTR3Response extends NFS3Response {
   }
   
   @Override
-  public XDR send(XDR out, int xid) {
-    super.send(out, xid);
+  public XDR writeHeaderAndResponse(XDR out, int xid, Verifier verifier) {
+    super.writeHeaderAndResponse(out, xid, verifier);
     if (getStatus() == Nfs3Status.NFS3_OK) {
       postOpAttr.serialize(out);
     }

+ 3 - 2
hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/LOOKUP3Response.java

@@ -23,6 +23,7 @@ import org.apache.hadoop.nfs.nfs3.FileHandle;
 import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes;
 import org.apache.hadoop.nfs.nfs3.Nfs3Status;
 import org.apache.hadoop.oncrpc.XDR;
+import org.apache.hadoop.oncrpc.security.Verifier;
 
 /**
  * LOOKUP3 Response
@@ -61,8 +62,8 @@ public class LOOKUP3Response extends NFS3Response {
   }
 
   @Override
-  public XDR send(XDR out, int xid) {
-    super.send(out, xid);
+  public XDR writeHeaderAndResponse(XDR out, int xid, Verifier verifier) {
+    super.writeHeaderAndResponse(out, xid, verifier);
     if (this.status == Nfs3Status.NFS3_OK) {
       fileHandle.serialize(out);
       out.writeBoolean(true); // Attribute follows

+ 3 - 2
hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/MKDIR3Response.java

@@ -21,6 +21,7 @@ import org.apache.hadoop.nfs.nfs3.FileHandle;
 import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes;
 import org.apache.hadoop.nfs.nfs3.Nfs3Status;
 import org.apache.hadoop.oncrpc.XDR;
+import org.apache.hadoop.oncrpc.security.Verifier;
 
 /**
  * MKDIR3 Response
@@ -55,8 +56,8 @@ public class MKDIR3Response extends NFS3Response {
   }
 
   @Override
-  public XDR send(XDR out, int xid) {
-    super.send(out, xid);
+  public XDR writeHeaderAndResponse(XDR out, int xid, Verifier verifier) {
+    super.writeHeaderAndResponse(out, xid, verifier);
     if (getStatus() == Nfs3Status.NFS3_OK) {
       out.writeBoolean(true); // Handle follows
       objFileHandle.serialize(out);

+ 11 - 4
hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/NFS3Response.java

@@ -19,11 +19,13 @@ package org.apache.hadoop.nfs.nfs3.response;
 
 import org.apache.hadoop.oncrpc.RpcAcceptedReply;
 import org.apache.hadoop.oncrpc.XDR;
+import org.apache.hadoop.oncrpc.security.Verifier;
 
 /**
- * Abstract class for a NFSv3 response
+ * Base class for a NFSv3 response. This class and its subclasses contain
+ * the response from NFSv3 handlers.
  */
-abstract public class NFS3Response {
+public class NFS3Response {
   protected int status;
 
   public NFS3Response(int status) {
@@ -38,8 +40,13 @@ abstract public class NFS3Response {
     this.status = status;
   }
   
-  public XDR send(XDR out, int xid) {
-    RpcAcceptedReply.voidReply(out, xid);
+  /**
+   * Write the response, along with the rpc header (including verifier), to the
+   * XDR.
+   */
+  public XDR writeHeaderAndResponse(XDR out, int xid, Verifier verifier) {
+    RpcAcceptedReply reply = RpcAcceptedReply.getAcceptInstance(xid, verifier);
+    reply.write(out);
     out.writeInt(this.getStatus());
     return out;
   }

+ 3 - 2
hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/PATHCONF3Response.java

@@ -20,6 +20,7 @@ package org.apache.hadoop.nfs.nfs3.response;
 import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes;
 import org.apache.hadoop.nfs.nfs3.Nfs3Status;
 import org.apache.hadoop.oncrpc.XDR;
+import org.apache.hadoop.oncrpc.security.Verifier;
 
 /**
  * PATHCONF3 Response
@@ -77,8 +78,8 @@ public class PATHCONF3Response extends NFS3Response {
   }
 
   @Override
-  public XDR send(XDR out, int xid) {
-    super.send(out, xid);
+  public XDR writeHeaderAndResponse(XDR out, int xid, Verifier verifier) {
+    super.writeHeaderAndResponse(out, xid, verifier);
     out.writeBoolean(true);
     postOpAttr.serialize(out);
 

+ 3 - 2
hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/READ3Response.java

@@ -22,6 +22,7 @@ import java.nio.ByteBuffer;
 import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes;
 import org.apache.hadoop.nfs.nfs3.Nfs3Status;
 import org.apache.hadoop.oncrpc.XDR;
+import org.apache.hadoop.oncrpc.security.Verifier;
 
 /**
  * READ3 Response
@@ -62,8 +63,8 @@ public class READ3Response extends NFS3Response {
   }
 
   @Override
-  public XDR send(XDR out, int xid) {
-    super.send(out, xid);
+  public XDR writeHeaderAndResponse(XDR out, int xid, Verifier verifier) {
+    super.writeHeaderAndResponse(out, xid, verifier);
     out.writeBoolean(true); // Attribute follows
     postOpAttr.serialize(out);
 

+ 3 - 2
hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/READDIR3Response.java

@@ -24,6 +24,7 @@ import java.util.List;
 import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes;
 import org.apache.hadoop.nfs.nfs3.Nfs3Status;
 import org.apache.hadoop.oncrpc.XDR;
+import org.apache.hadoop.oncrpc.security.Verifier;
 
 /**
  * READDIR3 Response
@@ -96,8 +97,8 @@ public class READDIR3Response extends NFS3Response {
   }
 
   @Override
-  public XDR send(XDR xdr, int xid) {
-    super.send(xdr, xid);
+  public XDR writeHeaderAndResponse(XDR xdr, int xid, Verifier verifier) {
+    super.writeHeaderAndResponse(xdr, xid, verifier);
     xdr.writeBoolean(true); // Attributes follow
     postOpDirAttr.serialize(xdr);
 

+ 3 - 2
hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/READDIRPLUS3Response.java

@@ -25,6 +25,7 @@ import org.apache.hadoop.nfs.nfs3.FileHandle;
 import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes;
 import org.apache.hadoop.nfs.nfs3.Nfs3Status;
 import org.apache.hadoop.oncrpc.XDR;
+import org.apache.hadoop.oncrpc.security.Verifier;
 
 /**
  * READDIRPLUS3 Response
@@ -92,8 +93,8 @@ public class READDIRPLUS3Response  extends NFS3Response {
   }
   
   @Override
-  public XDR send(XDR out, int xid) {
-    super.send(out, xid);
+  public XDR writeHeaderAndResponse(XDR out, int xid, Verifier verifier) {
+    super.writeHeaderAndResponse(out, xid, verifier);
     out.writeBoolean(true); // attributes follow
     if (postOpDirAttr == null) {
       postOpDirAttr = new Nfs3FileAttributes();

+ 3 - 2
hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/READLINK3Response.java

@@ -20,6 +20,7 @@ package org.apache.hadoop.nfs.nfs3.response;
 import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes;
 import org.apache.hadoop.nfs.nfs3.Nfs3Status;
 import org.apache.hadoop.oncrpc.XDR;
+import org.apache.hadoop.oncrpc.security.Verifier;
 
 /**
  * READLINK3 Response
@@ -41,8 +42,8 @@ public class READLINK3Response extends NFS3Response {
   }
 
   @Override
-  public XDR send(XDR out, int xid) {
-    super.send(out, xid);
+  public XDR writeHeaderAndResponse(XDR out, int xid, Verifier verifier) {
+    super.writeHeaderAndResponse(out, xid, verifier);
     out.writeBoolean(true); // Attribute follows
     postOpSymlinkAttr.serialize(out);
     if (getStatus() == Nfs3Status.NFS3_OK) {

+ 3 - 2
hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/REMOVE3Response.java

@@ -18,6 +18,7 @@
 package org.apache.hadoop.nfs.nfs3.response;
 
 import org.apache.hadoop.oncrpc.XDR;
+import org.apache.hadoop.oncrpc.security.Verifier;
 
 /**
  * REMOVE3 Response
@@ -35,8 +36,8 @@ public class REMOVE3Response extends NFS3Response {
   }
   
   @Override
-  public XDR send(XDR out, int xid) {
-    super.send(out, xid);
+  public XDR writeHeaderAndResponse(XDR out, int xid, Verifier verifier) {
+    super.writeHeaderAndResponse(out, xid, verifier);
     if (dirWcc == null) {
       dirWcc = new WccData(null, null);
     }

+ 3 - 2
hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/RENAME3Response.java

@@ -18,6 +18,7 @@
 package org.apache.hadoop.nfs.nfs3.response;
 
 import org.apache.hadoop.oncrpc.XDR;
+import org.apache.hadoop.oncrpc.security.Verifier;
 
 /**
  * RENAME3 Response
@@ -45,8 +46,8 @@ public class RENAME3Response extends NFS3Response {
   }
 
   @Override
-  public XDR send(XDR out, int xid) {
-    super.send(out, xid);
+  public XDR writeHeaderAndResponse(XDR out, int xid, Verifier verifier) {
+    super.writeHeaderAndResponse(out, xid, verifier);
     fromDirWcc.serialize(out);
     toDirWcc.serialize(out);
     return out;

+ 3 - 2
hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/RMDIR3Response.java

@@ -18,6 +18,7 @@
 package org.apache.hadoop.nfs.nfs3.response;
 
 import org.apache.hadoop.oncrpc.XDR;
+import org.apache.hadoop.oncrpc.security.Verifier;
 
 /**
  * RMDIR3 Response
@@ -39,8 +40,8 @@ public class RMDIR3Response extends NFS3Response {
   }
 
   @Override
-  public XDR send(XDR out, int xid) {
-    super.send(out, xid);
+  public XDR writeHeaderAndResponse(XDR out, int xid, Verifier verifier) {
+    super.writeHeaderAndResponse(out, xid, verifier);
     dirWcc.serialize(out);
     return out;
   }

+ 3 - 2
hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/SETATTR3Response.java

@@ -18,6 +18,7 @@
 package org.apache.hadoop.nfs.nfs3.response;
 
 import org.apache.hadoop.oncrpc.XDR;
+import org.apache.hadoop.oncrpc.security.Verifier;
 
 /**
  * SETATTR3 Response
@@ -39,8 +40,8 @@ public class SETATTR3Response extends NFS3Response {
   }
 
   @Override
-  public XDR send(XDR out, int xid) {
-    super.send(out, xid);
+  public XDR writeHeaderAndResponse(XDR out, int xid, Verifier verifier) {
+    super.writeHeaderAndResponse(out, xid, verifier);
     wccData.serialize(out);
     return out;
   }

+ 3 - 2
hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/SYMLINK3Response.java

@@ -21,6 +21,7 @@ import org.apache.hadoop.nfs.nfs3.FileHandle;
 import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes;
 import org.apache.hadoop.nfs.nfs3.Nfs3Status;
 import org.apache.hadoop.oncrpc.XDR;
+import org.apache.hadoop.oncrpc.security.Verifier;
 
 /**
  * SYMLINK3 Response
@@ -55,8 +56,8 @@ public class SYMLINK3Response extends NFS3Response {
   }
 
   @Override
-  public XDR send(XDR out, int xid) {
-    super.send(out, xid);
+  public XDR writeHeaderAndResponse(XDR out, int xid, Verifier verifier) {
+    super.writeHeaderAndResponse(out, xid, verifier);
     if (this.getStatus() == Nfs3Status.NFS3_OK) {
       out.writeBoolean(true);
       objFileHandle.serialize(out);

+ 0 - 37
hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/VoidResponse.java

@@ -1,37 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.nfs.nfs3.response;
-
-import org.apache.hadoop.oncrpc.RpcAcceptedReply;
-import org.apache.hadoop.oncrpc.XDR;
-
-/**
- * A void NFSv3 response
- */
-public class VoidResponse extends NFS3Response {
-
-  public VoidResponse(int status) {
-    super(status);
-  }
-
-  @Override
-  public XDR send(XDR out, int xid) {
-    RpcAcceptedReply.voidReply(out, xid);
-    return out;
-  }
-}

+ 3 - 2
hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/WRITE3Response.java

@@ -21,6 +21,7 @@ import org.apache.hadoop.nfs.nfs3.Nfs3Constant;
 import org.apache.hadoop.nfs.nfs3.Nfs3Status;
 import org.apache.hadoop.nfs.nfs3.Nfs3Constant.WriteStableHow;
 import org.apache.hadoop.oncrpc.XDR;
+import org.apache.hadoop.oncrpc.security.Verifier;
 
 /**
  * WRITE3 Response
@@ -58,8 +59,8 @@ public class WRITE3Response extends NFS3Response {
   }
 
   @Override
-  public XDR send(XDR out, int xid) {
-    super.send(out, xid);
+  public XDR writeHeaderAndResponse(XDR out, int xid, Verifier verifier) {
+    super.writeHeaderAndResponse(out, xid, verifier);
     fileWcc.serialize(out);
     if (getStatus() == Nfs3Status.NFS3_OK) {
       out.writeInt(count);

+ 21 - 24
hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcAcceptedReply.java

@@ -18,8 +18,6 @@
 package org.apache.hadoop.oncrpc;
 
 import org.apache.hadoop.oncrpc.security.Verifier;
-import org.apache.hadoop.oncrpc.security.RpcAuthInfo;
-import org.apache.hadoop.oncrpc.security.RpcAuthInfo.AuthFlavor;
 
 /** 
  * Represents RPC message MSG_ACCEPTED reply body. See RFC 1831 for details.
@@ -43,43 +41,42 @@ public class RpcAcceptedReply extends RpcReply {
       return ordinal();
     }
   };
+  
+  public static RpcAcceptedReply getAcceptInstance(int xid, 
+      Verifier verifier) {
+    return getInstance(xid, AcceptState.SUCCESS, verifier);
+  }
+  
+  public static RpcAcceptedReply getInstance(int xid, AcceptState state,
+      Verifier verifier) {
+    return new RpcAcceptedReply(xid, ReplyState.MSG_ACCEPTED, verifier,
+        state);
+  }
 
-  private final RpcAuthInfo verifier;
   private final AcceptState acceptState;
 
-  RpcAcceptedReply(int xid, RpcMessage.Type messageType, ReplyState state,
-      RpcAuthInfo verifier, AcceptState acceptState) {
-    super(xid, messageType, state);
-    this.verifier = verifier;
+  RpcAcceptedReply(int xid, ReplyState state, Verifier verifier,
+      AcceptState acceptState) {
+    super(xid, state, verifier);
     this.acceptState = acceptState;
   }
 
-  public static RpcAcceptedReply read(int xid, RpcMessage.Type messageType,
-      ReplyState replyState, XDR xdr) {
+  public static RpcAcceptedReply read(int xid, ReplyState replyState, XDR xdr) {
     Verifier verifier = Verifier.readFlavorAndVerifier(xdr);
     AcceptState acceptState = AcceptState.fromValue(xdr.readInt());
-    return new RpcAcceptedReply(xid, messageType, replyState, verifier,
-        acceptState);
-  }
-
-  public RpcAuthInfo getVerifier() {
-    return verifier;
+    return new RpcAcceptedReply(xid, replyState, verifier, acceptState);
   }
 
   public AcceptState getAcceptState() {
     return acceptState;
   }
   
-  public static XDR voidReply(XDR xdr, int xid) {
-    return voidReply(xdr, xid, AcceptState.SUCCESS);
-  }
-  
-  public static XDR voidReply(XDR xdr, int xid, AcceptState acceptState) {
+  @Override
+  public XDR write(XDR xdr) {
     xdr.writeInt(xid);
-    xdr.writeInt(RpcMessage.Type.RPC_REPLY.getValue());
-    xdr.writeInt(ReplyState.MSG_ACCEPTED.getValue());
-    xdr.writeInt(AuthFlavor.AUTH_NONE.getValue());
-    xdr.writeVariableOpaque(new byte[0]);
+    xdr.writeInt(messageType.getValue());
+    xdr.writeInt(replyState.getValue());
+    Verifier.writeFlavorAndVerifier(verifier, xdr);
     xdr.writeInt(acceptState.getValue());
     return xdr;
   }

+ 29 - 19
hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcCall.java

@@ -28,11 +28,25 @@ import org.apache.hadoop.oncrpc.security.Verifier;
 public class RpcCall extends RpcMessage {
   public static final int RPC_VERSION = 2;
   private static final Log LOG = LogFactory.getLog(RpcCall.class);
+  
+  public static RpcCall read(XDR xdr) {
+    return new RpcCall(xdr.readInt(), RpcMessage.Type.fromValue(xdr.readInt()),
+        xdr.readInt(), xdr.readInt(), xdr.readInt(), xdr.readInt(), 
+        Credentials.readFlavorAndCredentials(xdr),
+        Verifier.readFlavorAndVerifier(xdr));
+  }
+  
+  public static RpcCall getInstance(int xid, int program, int version,
+      int procedure, Credentials cred, Verifier verifier) {
+    return new RpcCall(xid, RpcMessage.Type.RPC_CALL, 2, program, version,
+        procedure, cred, verifier);
+  }
+  
   private final int rpcVersion;
   private final int program;
   private final int version;
   private final int procedure;
-  private final Credentials credential;
+  private final Credentials credentials;
   private final Verifier verifier;
 
   protected RpcCall(int xid, RpcMessage.Type messageType, int rpcVersion,
@@ -43,7 +57,7 @@ public class RpcCall extends RpcMessage {
     this.program = program;
     this.version = version;
     this.procedure = procedure;
-    this.credential = credential;
+    this.credentials = credential;
     this.verifier = verifier;
     if (LOG.isTraceEnabled()) {
       LOG.trace(this);
@@ -83,28 +97,24 @@ public class RpcCall extends RpcMessage {
   }
   
   public Credentials getCredential() {
-    return credential;
+    return credentials;
   }
 
   public Verifier getVerifier() {
     return verifier;
   }
   
-  public static RpcCall read(XDR xdr) {
-    return new RpcCall(xdr.readInt(), RpcMessage.Type.fromValue(xdr.readInt()),
-        xdr.readInt(), xdr.readInt(), xdr.readInt(), xdr.readInt(), 
-        Credentials.readFlavorAndCredentials(xdr),
-        Verifier.readFlavorAndVerifier(xdr));
-  }
-  
-  public static void write(XDR out, int xid, int program, int progVersion,
-      int procedure) {
-    out.writeInt(xid);
-    out.writeInt(RpcMessage.Type.RPC_CALL.getValue());
-    out.writeInt(2);
-    out.writeInt(program);
-    out.writeInt(progVersion);
-    out.writeInt(procedure);
+  @Override
+  public XDR write(XDR xdr) {
+    xdr.writeInt(xid);
+    xdr.writeInt(RpcMessage.Type.RPC_CALL.getValue());
+    xdr.writeInt(2);
+    xdr.writeInt(program);
+    xdr.writeInt(version);
+    xdr.writeInt(procedure);
+    Credentials.writeFlavorAndCredentials(credentials, xdr);
+    Verifier.writeFlavorAndVerifier(verifier, xdr);
+    return xdr;
   }
   
   @Override
@@ -112,6 +122,6 @@ public class RpcCall extends RpcMessage {
     return String.format("Xid:%d, messageType:%s, rpcVersion:%d, program:%d,"
         + " version:%d, procedure:%d, credential:%s, verifier:%s", xid,
         messageType, rpcVersion, program, version, procedure,
-        credential.toString(), verifier.toString());
+        credentials.toString(), verifier.toString());
   }
 }

+ 14 - 14
hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcDeniedReply.java

@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.oncrpc;
 
-import org.apache.hadoop.oncrpc.security.RpcAuthInfo.AuthFlavor;
+import org.apache.hadoop.oncrpc.security.Verifier;
 
 /** 
  * Represents RPC message MSG_DENIED reply body. See RFC 1831 for details.
@@ -40,16 +40,16 @@ public class RpcDeniedReply extends RpcReply {
 
   private final RejectState rejectState;
 
-  RpcDeniedReply(int xid, RpcMessage.Type messageType, ReplyState replyState,
-      RejectState rejectState) {
-    super(xid, messageType, replyState);
+  public RpcDeniedReply(int xid, ReplyState replyState,
+      RejectState rejectState, Verifier verifier) {
+    super(xid, replyState, verifier);
     this.rejectState = rejectState;
   }
 
-  public static RpcDeniedReply read(int xid, RpcMessage.Type messageType,
-      ReplyState replyState, XDR xdr) {
+  public static RpcDeniedReply read(int xid, ReplyState replyState, XDR xdr) {
+    Verifier verifier = Verifier.readFlavorAndVerifier(xdr);
     RejectState rejectState = RejectState.fromValue(xdr.readInt());
-    return new RpcDeniedReply(xid, messageType, replyState, rejectState);
+    return new RpcDeniedReply(xid, replyState, rejectState, verifier);
   }
 
   public RejectState getRejectState() {
@@ -59,17 +59,17 @@ public class RpcDeniedReply extends RpcReply {
   @Override
   public String toString() {
     return new StringBuffer().append("xid:").append(xid)
-        .append(",messageType:").append(messageType).append("rejectState:")
+        .append(",messageType:").append(messageType).append("verifier_flavor:")
+        .append(verifier.getFlavor()).append("rejectState:")
         .append(rejectState).toString();
   }
   
-  public static XDR voidReply(XDR xdr, int xid, ReplyState msgAccepted,
-      RejectState rejectState) {
+  @Override
+  public XDR write(XDR xdr) {
     xdr.writeInt(xid);
-    xdr.writeInt(RpcMessage.Type.RPC_REPLY.getValue());
-    xdr.writeInt(msgAccepted.getValue());
-    xdr.writeInt(AuthFlavor.AUTH_NONE.getValue());
-    xdr.writeVariableOpaque(new byte[0]);
+    xdr.writeInt(messageType.getValue());
+    xdr.writeInt(replyState.getValue());
+    Verifier.writeFlavorAndVerifier(verifier, xdr);
     xdr.writeInt(rejectState.getValue());
     return xdr;
   }

+ 2 - 0
hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcMessage.java

@@ -50,6 +50,8 @@ public abstract class RpcMessage {
     this.messageType = messageType;
   }
   
+  public abstract XDR write(XDR xdr);
+  
   public int getXid() {
     return xid;
   }

+ 7 - 2
hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcProgram.java

@@ -24,6 +24,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.oncrpc.RpcAcceptedReply.AcceptState;
 import org.apache.hadoop.oncrpc.RpcCallCache.CacheEntry;
+import org.apache.hadoop.oncrpc.security.VerifierNone;
 import org.apache.hadoop.portmap.PortmapMapping;
 import org.apache.hadoop.portmap.PortmapRequest;
 import org.jboss.netty.channel.Channel;
@@ -163,13 +164,17 @@ public abstract class RpcProgram {
   
   private XDR programMismatch(XDR out, RpcCall call) {
     LOG.warn("Invalid RPC call program " + call.getProgram());
-    RpcAcceptedReply.voidReply(out, call.getXid(), AcceptState.PROG_UNAVAIL);
+    RpcAcceptedReply reply = RpcAcceptedReply.getInstance(call.getXid(),
+        AcceptState.PROG_UNAVAIL, new VerifierNone());
+    reply.write(out);
     return out;
   }
   
   private XDR programVersionMismatch(XDR out, RpcCall call) {
     LOG.warn("Invalid RPC call version " + call.getVersion());
-    RpcAcceptedReply.voidReply(out, call.getXid(), AcceptState.PROG_MISMATCH);
+    RpcAcceptedReply reply = RpcAcceptedReply.getInstance(call.getXid(),
+        AcceptState.PROG_MISMATCH, new VerifierNone());
+    reply.write(out);
     out.writeInt(lowProgVersion);
     out.writeInt(highProgVersion);
     return out;

+ 20 - 8
hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcReply.java

@@ -17,6 +17,11 @@
  */
 package org.apache.hadoop.oncrpc;
 
+import org.apache.hadoop.oncrpc.security.RpcAuthInfo;
+import org.apache.hadoop.oncrpc.security.Verifier;
+
+import com.google.common.base.Preconditions;
+
 /**
  * Represents an RPC message of type RPC reply as defined in RFC 1831
  */
@@ -36,28 +41,35 @@ public abstract class RpcReply extends RpcMessage {
     }
   }
   
-  private final ReplyState state;
+  protected final ReplyState replyState;
+  protected final Verifier verifier;
   
-  RpcReply(int xid, RpcMessage.Type messageType, ReplyState state) {
-    super(xid, messageType);
-    this.state = state;
-    validateMessageType(RpcMessage.Type.RPC_REPLY);
+  RpcReply(int xid, ReplyState state, Verifier verifier) {
+    super(xid, RpcMessage.Type.RPC_REPLY);
+    this.replyState = state;
+    this.verifier = verifier;
+  }
+  
+  public RpcAuthInfo getVerifier() {
+    return verifier;
   }
 
   public static RpcReply read(XDR xdr) {
     int xid = xdr.readInt();
     final Type messageType = Type.fromValue(xdr.readInt());
+    Preconditions.checkState(messageType == RpcMessage.Type.RPC_REPLY);
+    
     ReplyState stat = ReplyState.fromValue(xdr.readInt());
     switch (stat) {
     case MSG_ACCEPTED:
-      return RpcAcceptedReply.read(xid, messageType, stat, xdr);
+      return RpcAcceptedReply.read(xid, stat, xdr);
     case MSG_DENIED:
-      return RpcDeniedReply.read(xid, messageType, stat, xdr);
+      return RpcDeniedReply.read(xid, stat, xdr);
     }
     return null;
   }
 
   public ReplyState getState() {
-    return state;
+    return replyState;
   }
 }

+ 1 - 2
hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleUdpClient.java

@@ -57,8 +57,7 @@ public class SimpleUdpClient {
     clientSocket.receive(receivePacket);
 
     // Check reply status
-    XDR xdr = new XDR();
-    xdr.writeFixedOpaque(Arrays.copyOfRange(receiveData, 0,
+    XDR xdr = new XDR(Arrays.copyOfRange(receiveData, 0,
         receivePacket.getLength()));
     RpcReply reply = RpcReply.read(xdr);
     if (reply.getState() != RpcReply.ReplyState.MSG_ACCEPTED) {

+ 5 - 4
hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleUdpServerHandler.java

@@ -43,13 +43,14 @@ public class SimpleUdpServerHandler extends SimpleChannelHandler {
   public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) {
     ChannelBuffer buf = (ChannelBuffer) e.getMessage();
 
-    XDR request = new XDR();
-
-    request.writeFixedOpaque(buf.array());
+    XDR request = new XDR(buf.array());
+    
     InetAddress remoteInetAddr = ((InetSocketAddress) e.getRemoteAddress())
         .getAddress();
     XDR response = rpcProgram.handle(request, remoteInetAddr, null);
-    e.getChannel().write(XDR.writeMessageUdp(response), e.getRemoteAddress());
+    
+    e.getChannel().write(XDR.writeMessageUdp(response.asReadOnlyWrap()),
+        e.getRemoteAddress());
   }
 
   @Override

+ 166 - 315
hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/XDR.java

@@ -17,402 +17,253 @@
  */
 package org.apache.hadoop.oncrpc;
 
-import java.io.PrintStream;
-import java.util.Arrays;
+import java.nio.ByteBuffer;
 
 import org.jboss.netty.buffer.ChannelBuffer;
 import org.jboss.netty.buffer.ChannelBuffers;
 
 import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
 
 /**
  * Utility class for building XDR messages based on RFC 4506.
- * <p>
- * This class maintains a buffer into which java types are written as
- * XDR types for building XDR messages. Similarly this class can
- * be used to get java types from an XDR request or response.
- * <p>
- * Currently only a subset of XDR types defined in RFC 4506 are supported.
+ *
+ * Key points of the format:
+ *
+ * <ul>
+ * <li>Primitives are stored in big-endian order (i.e., the default byte order
+ * of ByteBuffer).</li>
+ * <li>Booleans are stored as an integer.</li>
+ * <li>Each field in the message is always aligned by 4.</li>
+ * </ul>
+ *
  */
-public class XDR {
-  private final static  String HEXES = "0123456789abcdef";
-  
-  /** Internal buffer for reading or writing to */
-  private byte[] bytearr;
-  
-  /** Place to read from or write to */
-  private int cursor;
+public final class XDR {
+  private static final int DEFAULT_INITIAL_CAPACITY = 256;
+  private static final int SIZEOF_INT = 4;
+  private static final int SIZEOF_LONG = 8;
+  private static final byte[] PADDING_BYTES = new byte[] { 0, 0, 0, 0 };
 
-  public XDR() {
-    this(new byte[0]);
-  }
+  private ByteBuffer buf;
 
-  public XDR(byte[] data) {
-    bytearr = Arrays.copyOf(data, data.length);
-    cursor = 0;
+  private enum State {
+    READING, WRITING,
   }
 
+  private final State state;
+
   /**
-   * @param bytes bytes to be appended to internal buffer
+   * Construct a new XDR message buffer.
+   *
+   * @param initialCapacity
+   *          the initial capacity of the buffer.
    */
-  private void append(byte[] bytesToAdd) {
-    bytearr = append(bytearr, bytesToAdd);
+  public XDR(int initialCapacity) {
+    this(ByteBuffer.allocate(initialCapacity), State.WRITING);
   }
 
-  public int size() {
-    return bytearr.length;
+  public XDR() {
+    this(DEFAULT_INITIAL_CAPACITY);
   }
 
-  /** Skip some bytes by moving the cursor */
-  public void skip(int size) {
-    cursor += size;
+  private XDR(ByteBuffer buf, State state) {
+    this.buf = buf;
+    this.state = state;
   }
 
   /**
-   * Write Java primitive integer as XDR signed integer.
-   * 
-   * Definition of XDR signed integer from RFC 4506:
-   * <pre>
-   * An XDR signed integer is a 32-bit datum that encodes an integer in
-   * the range [-2147483648,2147483647].  The integer is represented in
-   * two's complement notation.  The most and least significant bytes are
-   * 0 and 3, respectively.  Integers are declared as follows:
+   * Wraps a byte array as a read-only XDR message. There's no copy involved,
+   * thus it is the client's responsibility to ensure that the byte array
+   * remains unmodified when using the XDR object.
    * 
-   *       int identifier;
-   * 
-   *            (MSB)                   (LSB)
-   *          +-------+-------+-------+-------+
-   *          |byte 0 |byte 1 |byte 2 |byte 3 |                      INTEGER
-   *          +-------+-------+-------+-------+
-   *          <------------32 bits------------>
-   * </pre>
+   * @param src
+   *          the byte array to be wrapped.
    */
-  public void writeInt(int data) {
-    append(toBytes(data));
+  public XDR(byte[] src) {
+    this(ByteBuffer.wrap(src).asReadOnlyBuffer(), State.READING);
+  }
+
+  public XDR asReadOnlyWrap() {
+    ByteBuffer b = buf.asReadOnlyBuffer();
+    if (state == State.WRITING) {
+      b.flip();
+    }
+
+    XDR n = new XDR(b, State.READING);
+    return n;
+  }
+
+  public int size() {
+    // TODO: This overloading intends to be compatible with the semantics of
+    // the previous version of the class. This function should be separated into
+    // two with clear semantics.
+    return state == State.READING ? buf.limit() : buf.position();
   }
 
-  /**
-   * Read an XDR signed integer and return as Java primitive integer.
-   */
   public int readInt() {
-    byte byte0 = bytearr[cursor++];
-    byte byte1 = bytearr[cursor++];
-    byte byte2 = bytearr[cursor++];
-    byte byte3 = bytearr[cursor++];
-    return (XDR.toShort(byte0) << 24) + (XDR.toShort(byte1) << 16)
-        + (XDR.toShort(byte2) << 8) + XDR.toShort(byte3);
+    Preconditions.checkState(state == State.READING);
+    return buf.getInt();
   }
 
-  /**
-   * Write Java primitive boolean as an XDR boolean.
-   * 
-   * Definition of XDR boolean from RFC 4506:
-   * <pre>
-   *    Booleans are important enough and occur frequently enough to warrant
-   *    their own explicit type in the standard.  Booleans are declared as
-   *    follows:
-   * 
-   *          bool identifier;
-   * 
-   *    This is equivalent to:
-   * 
-   *          enum { FALSE = 0, TRUE = 1 } identifier;
-   * </pre>
-   */
-  public void writeBoolean(boolean data) {
-    this.writeInt(data ? 1 : 0);
+  public void writeInt(int v) {
+    ensureFreeSpace(SIZEOF_INT);
+    buf.putInt(v);
   }
 
-  /**
-   * Read an XDR boolean and return as Java primitive boolean.
-   */
   public boolean readBoolean() {
-    return readInt() == 0 ? false : true;
+    Preconditions.checkState(state == State.READING);
+    return buf.getInt() != 0;
   }
 
-  /**
-   * Write Java primitive long to an XDR signed long.
-   * 
-   * Definition of XDR signed long from RFC 4506:
-   * <pre>
-   *    The standard also defines 64-bit (8-byte) numbers called hyper
-   *    integers and unsigned hyper integers.  Their representations are the
-   *    obvious extensions of integer and unsigned integer defined above.
-   *    They are represented in two's complement notation.The most and
-   *    least significant bytes are 0 and 7, respectively. Their
-   *    declarations:
-   * 
-   *    hyper identifier; unsigned hyper identifier;
-   * 
-   *         (MSB)                                                   (LSB)
-   *       +-------+-------+-------+-------+-------+-------+-------+-------+
-   *       |byte 0 |byte 1 |byte 2 |byte 3 |byte 4 |byte 5 |byte 6 |byte 7 |
-   *       +-------+-------+-------+-------+-------+-------+-------+-------+
-   *       <----------------------------64 bits---------------------------->
-   *                                                  HYPER INTEGER
-   *                                                  UNSIGNED HYPER INTEGER
-   * </pre>
-   */
-  public void writeLongAsHyper(long data) {
-       byte byte0 = (byte) ((data & 0xff00000000000000l) >> 56);
-    byte byte1 = (byte) ((data & 0x00ff000000000000l) >> 48);
-    byte byte2 = (byte) ((data & 0x0000ff0000000000l) >> 40);
-    byte byte3 = (byte) ((data & 0x000000ff00000000l) >> 32);
-    byte byte4 = (byte) ((data & 0x00000000ff000000l) >> 24);
-    byte byte5 = (byte) ((data & 0x0000000000ff0000l) >> 16);
-    byte byte6 = (byte) ((data & 0x000000000000ff00l) >> 8);
-    byte byte7 = (byte) ((data & 0x00000000000000ffl));
-    this.append(new byte[] { byte0, byte1, byte2, byte3, byte4, byte5, byte6, byte7 });
+  public void writeBoolean(boolean v) {
+    ensureFreeSpace(SIZEOF_INT);
+    buf.putInt(v ? 1 : 0);
   }
 
-  /**
-   * Read XDR signed hyper and return as java primitive long.
-   */
   public long readHyper() {
-    byte byte0 = bytearr[cursor++];
-    byte byte1 = bytearr[cursor++];
-    byte byte2 = bytearr[cursor++];
-    byte byte3 = bytearr[cursor++];
-    byte byte4 = bytearr[cursor++];
-    byte byte5 = bytearr[cursor++];
-    byte byte6 = bytearr[cursor++];
-    byte byte7 = bytearr[cursor++];
-    return ((long) XDR.toShort(byte0) << 56)
-        + ((long) XDR.toShort(byte1) << 48) + ((long) XDR.toShort(byte2) << 40)
-        + ((long) XDR.toShort(byte3) << 32) + ((long) XDR.toShort(byte4) << 24)
-        + ((long) XDR.toShort(byte5) << 16) + ((long) XDR.toShort(byte6) << 8)
-        + XDR.toShort(byte7);
+    Preconditions.checkState(state == State.READING);
+    return buf.getLong();
   }
 
-  /**
-   * Write a Java primitive byte array to XDR fixed-length opaque data.
-   * 
-   * Defintion of fixed-length opaque data from RFC 4506:
-   * <pre>
-   *    At times, fixed-length uninterpreted data needs to be passed among
-   *    machines.  This data is called "opaque" and is declared as follows:
-   * 
-   *          opaque identifier[n];
-   * 
-   *    where the constant n is the (static) number of bytes necessary to
-   *    contain the opaque data.  If n is not a multiple of four, then the n
-   *    bytes are followed by enough (0 to 3) residual zero bytes, r, to make
-   *    the total byte count of the opaque object a multiple of four.
-   * 
-   *           0        1     ...
-   *       +--------+--------+...+--------+--------+...+--------+
-   *       | byte 0 | byte 1 |...|byte n-1|    0   |...|    0   |
-   *       +--------+--------+...+--------+--------+...+--------+
-   *       |<-----------n bytes---------->|<------r bytes------>|
-   *       |<-----------n+r (where (n+r) mod 4 = 0)------------>|
-   *                                                    FIXED-LENGTH OPAQUE
-   * </pre>
-   */
-  public void writeFixedOpaque(byte[] data) {
-    writeFixedOpaque(data, data.length);
-  }
-
-  public void writeFixedOpaque(byte[] data, int length) {
-    append(Arrays.copyOf(data, length + XDR.pad(length, 4)));
+  public void writeLongAsHyper(long v) {
+    ensureFreeSpace(SIZEOF_LONG);
+    buf.putLong(v);
   }
 
   public byte[] readFixedOpaque(int size) {
-    byte[] ret = new byte[size];
-    for(int i = 0; i < size; i++) {
-      ret[i] = bytearr[cursor];
-      cursor++;
-    }
+    Preconditions.checkState(state == State.READING);
+    byte[] r = new byte[size];
+    buf.get(r);
+    alignPosition();
+    return r;
+  }
 
-    for(int i = 0; i < XDR.pad(size, 4); i++) {
-      cursor++;
-    }
-    return ret;
+  public void writeFixedOpaque(byte[] src, int length) {
+    ensureFreeSpace(alignUp(length));
+    buf.put(src, 0, length);
+    writePadding();
   }
 
-  /**
-   * Write a Java primitive byte array as XDR variable-length opque data.
-   * 
-   * Definition of XDR variable-length opaque data RFC 4506:
-   * 
-   * <pre>
-   *    The standard also provides for variable-length (counted) opaque data,
-   *    defined as a sequence of n (numbered 0 through n-1) arbitrary bytes
-   *    to be the number n encoded as an unsigned integer (as described
-   *    below), and followed by the n bytes of the sequence.
-   * 
-   *    Byte m of the sequence always precedes byte m+1 of the sequence, and
-   *    byte 0 of the sequence always follows the sequence's length (count).
-   *    If n is not a multiple of four, then the n bytes are followed by
-   *    enough (0 to 3) residual zero bytes, r, to make the total byte count
-   *    a multiple of four.  Variable-length opaque data is declared in the
-   *    following way:
-   * 
-   *          opaque identifier<m>;
-   *       or
-   *          opaque identifier<>;
-   * 
-   *    The constant m denotes an upper bound of the number of bytes that the
-   *    sequence may contain.  If m is not specified, as in the second
-   *    declaration, it is assumed to be (2**32) - 1, the maximum length.
-   * 
-   *    The constant m would normally be found in a protocol specification.
-   *    For example, a filing protocol may state that the maximum data
-   *    transfer size is 8192 bytes, as follows:
-   * 
-   *          opaque filedata<8192>;
-   * 
-   *             0     1     2     3     4     5   ...
-   *          +-----+-----+-----+-----+-----+-----+...+-----+-----+...+-----+
-   *          |        length n       |byte0|byte1|...| n-1 |  0  |...|  0  |
-   *          +-----+-----+-----+-----+-----+-----+...+-----+-----+...+-----+
-   *          |<-------4 bytes------->|<------n bytes------>|<---r bytes--->|
-   *                                  |<----n+r (where (n+r) mod 4 = 0)---->|
-   *                                                   VARIABLE-LENGTH OPAQUE
-   * 
-   *    It is an error to encode a length greater than the maximum described
-   *    in the specification.
-   * </pre>
-   */
-  public void writeVariableOpaque(byte[] data) {
-    this.writeInt(data.length);
-    this.writeFixedOpaque(data);
+  public void writeFixedOpaque(byte[] src) {
+    writeFixedOpaque(src, src.length);
   }
 
   public byte[] readVariableOpaque() {
-    int size = this.readInt();
-    return size != 0 ? this.readFixedOpaque(size) : new byte[0];
+    Preconditions.checkState(state == State.READING);
+    int size = readInt();
+    return readFixedOpaque(size);
   }
 
-  public void skipVariableOpaque() {
-    int length= this.readInt();
-    this.skip(length+XDR.pad(length, 4));
-  }
-  
-  /**
-   * Write Java String as XDR string.
-   * 
-   * Definition of XDR string from RFC 4506:
-   * 
-   * <pre>
-   *    The standard defines a string of n (numbered 0 through n-1) ASCII
-   *    bytes to be the number n encoded as an unsigned integer (as described
-   *    above), and followed by the n bytes of the string.  Byte m of the
-   *    string always precedes byte m+1 of the string, and byte 0 of the
-   *    string always follows the string's length.  If n is not a multiple of
-   *    four, then the n bytes are followed by enough (0 to 3) residual zero
-   *    bytes, r, to make the total byte count a multiple of four.  Counted
-   *    byte strings are declared as follows:
-   * 
-   *          string object<m>;
-   *       or
-   *          string object<>;
-   * 
-   *    The constant m denotes an upper bound of the number of bytes that a
-   *    string may contain.  If m is not specified, as in the second
-   *    declaration, it is assumed to be (2**32) - 1, the maximum length.
-   *    The constant m would normally be found in a protocol specification.
-   *    For example, a filing protocol may state that a file name can be no
-   *    longer than 255 bytes, as follows:
-   * 
-   *          string filename<255>;
-   * 
-   *             0     1     2     3     4     5   ...
-   *          +-----+-----+-----+-----+-----+-----+...+-----+-----+...+-----+
-   *          |        length n       |byte0|byte1|...| n-1 |  0  |...|  0  |
-   *          +-----+-----+-----+-----+-----+-----+...+-----+-----+...+-----+
-   *          |<-------4 bytes------->|<------n bytes------>|<---r bytes--->|
-   *                                  |<----n+r (where (n+r) mod 4 = 0)---->|
-   *                                                                   STRING
-   *    It is an error to encode a length greater than the maximum described
-   *    in the specification.
-   * </pre>
-   */
-  public void writeString(String data) {
-    this.writeVariableOpaque(data.getBytes());
+  public void writeVariableOpaque(byte[] src) {
+    ensureFreeSpace(SIZEOF_INT + alignUp(src.length));
+    buf.putInt(src.length);
+    writeFixedOpaque(src);
   }
 
   public String readString() {
-    return new String(this.readVariableOpaque());
+    return new String(readVariableOpaque());
   }
 
-  public void dump(PrintStream out) {
-    for(int i = 0; i < bytearr.length; i += 4) {
-      out.println(hex(bytearr[i]) + " " + hex(bytearr[i + 1]) + " "
-          + hex(bytearr[i + 2]) + " " + hex(bytearr[i + 3]));
-    }
+  public void writeString(String s) {
+    writeVariableOpaque(s.getBytes());
   }
 
-  @VisibleForTesting
-  public byte[] getBytes() {
-    return Arrays.copyOf(bytearr, bytearr.length);
+  private void writePadding() {
+    Preconditions.checkState(state == State.WRITING);
+    int p = pad(buf.position());
+    ensureFreeSpace(p);
+    buf.put(PADDING_BYTES, 0, p);
   }
 
-  public static byte[] append(byte[] bytes, byte[] bytesToAdd) {
-    byte[] newByteArray = new byte[bytes.length + bytesToAdd.length];
-    System.arraycopy(bytes, 0, newByteArray, 0, bytes.length);
-    System.arraycopy(bytesToAdd, 0, newByteArray, bytes.length, bytesToAdd.length);
-    return newByteArray;
+  private int alignUp(int length) {
+    return length + pad(length);
   }
 
-  private static int pad(int x, int y) {
-    return x % y == 0 ? 0 : y - (x % y);
+  private int pad(int length) {
+    switch (length % 4) {
+    case 1:
+      return 3;
+    case 2:
+      return 2;
+    case 3:
+      return 1;
+    default:
+      return 0;
+    }
   }
 
-  static byte[] toBytes(int n) {
-    byte[] ret = { (byte) ((n & 0xff000000) >> 24),
-        (byte) ((n & 0x00ff0000) >> 16), (byte) ((n & 0x0000ff00) >> 8),
-        (byte) (n & 0x000000ff) };
-    return ret;
+  private void alignPosition() {
+    buf.position(alignUp(buf.position()));
   }
 
-  private static short toShort(byte b) {
-    return b < 0 ? (short) (b + 256): (short) b;
+  private void ensureFreeSpace(int size) {
+    Preconditions.checkState(state == State.WRITING);
+    if (buf.remaining() < size) {
+      int newCapacity = buf.capacity() * 2;
+      int newRemaining = buf.capacity() + buf.remaining();
+
+      while (newRemaining < size) {
+        newRemaining += newCapacity;
+        newCapacity *= 2;
+      }
+
+      ByteBuffer newbuf = ByteBuffer.allocate(newCapacity);
+      buf.flip();
+      newbuf.put(buf);
+      buf = newbuf;
+    }
   }
 
-  private static String hex(byte b) {
-    return "" + HEXES.charAt((b & 0xF0) >> 4) + HEXES.charAt((b & 0x0F));
+  /** check if the rest of data has more than len bytes */
+  public static boolean verifyLength(XDR xdr, int len) {
+    return xdr.buf.remaining() >= len;
   }
 
   private static byte[] recordMark(int size, boolean last) {
-    return toBytes(!last ? size : size | 0x80000000);
+    byte[] b = new byte[SIZEOF_INT];
+    ByteBuffer buf = ByteBuffer.wrap(b);
+    buf.putInt(!last ? size : size | 0x80000000);
+    return b;
   }
 
-  public static byte[] getVariableOpque(byte[] data) {
-    byte[] bytes = toBytes(data.length);
-    return append(bytes, Arrays.copyOf(data, data.length + XDR.pad(data.length, 4)));
+  /** Write an XDR message to a TCP ChannelBuffer */
+  public static ChannelBuffer writeMessageTcp(XDR request, boolean last) {
+    Preconditions.checkState(request.state == XDR.State.WRITING);
+    ByteBuffer b = request.buf.duplicate();
+    b.flip();
+    byte[] fragmentHeader = XDR.recordMark(b.limit(), last);
+    ByteBuffer headerBuf = ByteBuffer.wrap(fragmentHeader);
+
+    // TODO: Investigate whether making a copy of the buffer is necessary.
+    return ChannelBuffers.copiedBuffer(headerBuf, b);
+  }
+
+  /** Write an XDR message to a UDP ChannelBuffer */
+  public static ChannelBuffer writeMessageUdp(XDR response) {
+    Preconditions.checkState(response.state == XDR.State.READING);
+    // TODO: Investigate whether making a copy of the buffer is necessary.
+    return ChannelBuffers.copiedBuffer(response.buf);
   }
 
   public static int fragmentSize(byte[] mark) {
-    int n = (XDR.toShort(mark[0]) << 24) + (XDR.toShort(mark[1]) << 16)
-        + (XDR.toShort(mark[2]) << 8) + XDR.toShort(mark[3]);
+    ByteBuffer b = ByteBuffer.wrap(mark);
+    int n = b.getInt();
     return n & 0x7fffffff;
   }
 
   public static boolean isLastFragment(byte[] mark) {
-    int n = (XDR.toShort(mark[0]) << 24) + (XDR.toShort(mark[1]) << 16)
-        + (XDR.toShort(mark[2]) << 8) + XDR.toShort(mark[3]);
+    ByteBuffer b = ByteBuffer.wrap(mark);
+    int n = b.getInt();
     return (n & 0x80000000) != 0;
   }
 
-  /** check if the rest of data has more than <len> bytes */
-  public static boolean verifyLength(XDR xdr, int len) {
-    return (xdr.bytearr.length - xdr.cursor) >= len;
-  }
-
-  /** Write an XDR message to a TCP ChannelBuffer */
-  public static ChannelBuffer writeMessageTcp(XDR request, boolean last) {
-    byte[] fragmentHeader = XDR.recordMark(request.bytearr.length, last);
-    ChannelBuffer outBuf = ChannelBuffers.buffer(fragmentHeader.length
-        + request.bytearr.length);
-    outBuf.writeBytes(fragmentHeader);
-    outBuf.writeBytes(request.bytearr);
-    return outBuf;
-  }
+  @VisibleForTesting
+  public byte[] getBytes() {
+    ByteBuffer d = buf.duplicate();
+    byte[] b = new byte[d.position()];
+    d.flip();
+    d.get(b);
 
-  /** Write an XDR message to a UDP ChannelBuffer */
-  public static ChannelBuffer writeMessageUdp(XDR response) {
-    ChannelBuffer outBuf = ChannelBuffers.buffer(response.bytearr.length);
-    outBuf.writeBytes(response.bytearr);
-    return outBuf;
+    return b;
   }
-}
+}

+ 16 - 0
hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/Credentials.java

@@ -45,6 +45,22 @@ public abstract class Credentials extends RpcAuthInfo {
     return credentials;
   }
   
+  /**
+   * Write AuthFlavor and the credentials to the XDR
+   */
+  public static void writeFlavorAndCredentials(Credentials cred, XDR xdr) {
+    if (cred instanceof CredentialsNone) {
+      xdr.writeInt(AuthFlavor.AUTH_NONE.getValue());
+    } else if (cred instanceof CredentialsSys) {
+      xdr.writeInt(AuthFlavor.AUTH_SYS.getValue());
+    } else if (cred instanceof CredentialsGSS) {
+      xdr.writeInt(AuthFlavor.RPCSEC_GSS.getValue());
+    } else {
+      throw new UnsupportedOperationException("Cannot recognize the verifier");
+    }
+    cred.write(xdr);
+  }
+  
   protected int mCredentialsLength;
   
   protected Credentials(AuthFlavor flavor) {

+ 21 - 4
hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/Verifier.java

@@ -20,10 +20,11 @@ package org.apache.hadoop.oncrpc.security;
 import org.apache.hadoop.oncrpc.XDR;
 import org.apache.hadoop.oncrpc.security.RpcAuthInfo.AuthFlavor;
 
-/** 
- * Base class for verifier. Currently we only support 3 types of auth flavors: 
- * {@link AuthFlavor#AUTH_NONE}, {@link AuthFlavor#AUTH_SYS}, 
- * and {@link AuthFlavor#RPCSEC_GSS}.
+/**
+ * Base class for verifier. Currently our authentication only supports 3 types
+ * of auth flavors: {@link AuthFlavor#AUTH_NONE}, {@link AuthFlavor#AUTH_SYS},
+ * and {@link AuthFlavor#RPCSEC_GSS}. Thus for verifier we only need to handle
+ * AUTH_NONE and RPCSEC_GSS
  */
 public abstract class Verifier extends RpcAuthInfo {
 
@@ -31,6 +32,7 @@ public abstract class Verifier extends RpcAuthInfo {
     super(flavor);
   }
 
+  /** Read both AuthFlavor and the verifier from the XDR */
   public static Verifier readFlavorAndVerifier(XDR xdr) {
     AuthFlavor flavor = AuthFlavor.fromValue(xdr.readInt());
     final Verifier verifer;
@@ -46,4 +48,19 @@ public abstract class Verifier extends RpcAuthInfo {
     return verifer;
   }
   
+  /**
+   * Write AuthFlavor and the verifier to the XDR
+   */
+  public static void writeFlavorAndVerifier(Verifier verifier, XDR xdr) {
+    if (verifier instanceof VerifierNone) {
+      xdr.writeInt(AuthFlavor.AUTH_NONE.getValue());
+    } else if (verifier instanceof VerifierGSS) {
+      xdr.writeInt(AuthFlavor.RPCSEC_GSS.getValue());
+    } else {
+      throw new UnsupportedOperationException("Cannot recognize the verifier");
+    }
+    verifier.write(xdr);
+  }  
+ 
+  
 }

+ 4 - 11
hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/PortmapRequest.java

@@ -21,10 +21,7 @@ import org.apache.hadoop.oncrpc.RpcCall;
 import org.apache.hadoop.oncrpc.RpcUtil;
 import org.apache.hadoop.oncrpc.XDR;
 import org.apache.hadoop.oncrpc.security.CredentialsNone;
-import org.apache.hadoop.oncrpc.security.Credentials;
-import org.apache.hadoop.oncrpc.security.Verifier;
 import org.apache.hadoop.oncrpc.security.VerifierNone;
-import org.apache.hadoop.oncrpc.security.RpcAuthInfo.AuthFlavor;
 import org.apache.hadoop.portmap.PortmapInterface.Procedure;
 
 /**
@@ -37,16 +34,12 @@ public class PortmapRequest {
 
   public static XDR create(PortmapMapping mapping) {
     XDR request = new XDR();
-    RpcCall.write(request,
+    RpcCall call = RpcCall.getInstance(
         RpcUtil.getNewXid(String.valueOf(RpcProgramPortmap.PROGRAM)),
         RpcProgramPortmap.PROGRAM, RpcProgramPortmap.VERSION,
-        Procedure.PMAPPROC_SET.getValue());
-    request.writeInt(AuthFlavor.AUTH_NONE.getValue());
-    Credentials credential = new CredentialsNone();
-    credential.write(request);
-    request.writeInt(AuthFlavor.AUTH_NONE.getValue());
-    Verifier verifier = new VerifierNone();
-    verifier.write(request);
+        Procedure.PMAPPROC_SET.getValue(), new CredentialsNone(),
+        new VerifierNone());
+    call.write(request);
     return mapping.serialize(request);
   }
 }

+ 5 - 4
hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/PortmapResponse.java

@@ -22,30 +22,31 @@ import java.util.Collection;
 
 import org.apache.hadoop.oncrpc.RpcAcceptedReply;
 import org.apache.hadoop.oncrpc.XDR;
+import org.apache.hadoop.oncrpc.security.VerifierNone;
 
 /**
  * Helper utility for sending portmap response.
  */
 public class PortmapResponse {
   public static XDR voidReply(XDR xdr, int xid) {
-    RpcAcceptedReply.voidReply(xdr, xid);
+    RpcAcceptedReply.getAcceptInstance(xid, new VerifierNone()).write(xdr);
     return xdr;
   }
 
   public static XDR intReply(XDR xdr, int xid, int value) {
-    RpcAcceptedReply.voidReply(xdr, xid);
+    RpcAcceptedReply.getAcceptInstance(xid, new VerifierNone()).write(xdr);
     xdr.writeInt(value);
     return xdr;
   }
 
   public static XDR booleanReply(XDR xdr, int xid, boolean value) {
-    RpcAcceptedReply.voidReply(xdr, xid);
+    RpcAcceptedReply.getAcceptInstance(xid, new VerifierNone()).write(xdr);
     xdr.writeBoolean(value);
     return xdr;
   }
 
   public static XDR pmapList(XDR xdr, int xid, Collection<PortmapMapping> list) {
-    RpcAcceptedReply.voidReply(xdr, xid);
+    RpcAcceptedReply.getAcceptInstance(xid, new VerifierNone()).write(xdr);
     for (PortmapMapping mapping : list) {
       System.out.println(mapping);
       xdr.writeBoolean(true); // Value follows

+ 4 - 2
hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/RpcProgramPortmap.java

@@ -28,6 +28,7 @@ import org.apache.hadoop.oncrpc.RpcAcceptedReply;
 import org.apache.hadoop.oncrpc.RpcCall;
 import org.apache.hadoop.oncrpc.RpcProgram;
 import org.apache.hadoop.oncrpc.XDR;
+import org.apache.hadoop.oncrpc.security.VerifierNone;
 import org.jboss.netty.channel.Channel;
 
 /**
@@ -147,8 +148,9 @@ public class RpcProgramPortmap extends RpcProgram implements PortmapInterface {
       out = getport(xid, in, out);
     } else {
       LOG.info("PortmapHandler unknown rpc procedure=" + portmapProc);
-      RpcAcceptedReply.voidReply(out, xid,
-          RpcAcceptedReply.AcceptState.PROC_UNAVAIL);
+      RpcAcceptedReply.getInstance(xid,
+          RpcAcceptedReply.AcceptState.PROC_UNAVAIL, new VerifierNone()).write(
+          out);
     }
     return out;
   }

+ 1 - 1
hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/TestNfsTime.java

@@ -39,7 +39,7 @@ public class TestNfsTime {
     t1.serialize(xdr);
     
     // Deserialize it back
-    NfsTime t2 = NfsTime.deserialize(xdr);
+    NfsTime t2 = NfsTime.deserialize(xdr.asReadOnlyWrap());
     
     // Ensure the NfsTimes are equal
     Assert.assertEquals(t1, t2);

+ 1 - 1
hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/nfs3/TestFileHandle.java

@@ -33,7 +33,7 @@ public class TestFileHandle {
 
     // Deserialize it back 
     FileHandle handle2 = new FileHandle();
-    handle2.deserialize(xdr);
+    handle2.deserialize(xdr.asReadOnlyWrap());
     Assert.assertEquals(handle.getFileId(), 1024);
   }
 }

+ 6 - 2
hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/TestFrameDecoder.java

@@ -24,6 +24,8 @@ import static org.junit.Assert.assertTrue;
 import java.net.InetAddress;
 import java.nio.ByteBuffer;
 
+import org.apache.hadoop.oncrpc.security.CredentialsNone;
+import org.apache.hadoop.oncrpc.security.VerifierNone;
 import org.jboss.netty.buffer.ByteBufferBackedChannelBuffer;
 import org.jboss.netty.buffer.ChannelBuffer;
 import org.jboss.netty.channel.Channel;
@@ -55,7 +57,8 @@ public class TestFrameDecoder {
         InetAddress client, Channel channel) {
       // Get the final complete request and return a void response.
       result = in;
-      return RpcAcceptedReply.voidReply(out, 1234);
+      RpcAcceptedReply.getAcceptInstance(1234, new VerifierNone()).write(out);
+      return out;
     }
 
     @Override
@@ -161,7 +164,8 @@ public class TestFrameDecoder {
 
   static void createPortmapXDRheader(XDR xdr_out, int procedure) {
     // Make this a method
-    RpcCall.write(xdr_out, 0, 100000, 2, procedure);
+    RpcCall.getInstance(0, 100000, 2, procedure, new CredentialsNone(),
+        new VerifierNone()).write(xdr_out);
   }
 
   static XDR createGetportMount() {

+ 1 - 1
hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/TestRpcAcceptedReply.java

@@ -47,7 +47,7 @@ public class TestRpcAcceptedReply {
   @Test
   public void testConstructor() {
     Verifier verifier = new VerifierNone();
-    RpcAcceptedReply reply = new RpcAcceptedReply(0, RpcMessage.Type.RPC_REPLY,
+    RpcAcceptedReply reply = new RpcAcceptedReply(0, 
         ReplyState.MSG_ACCEPTED, verifier, AcceptState.SUCCESS);
     assertEquals(0, reply.getXid());
     assertEquals(RpcMessage.Type.RPC_REPLY, reply.getMessageType());

+ 3 - 4
hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/TestRpcDeniedReply.java

@@ -19,6 +19,7 @@ package org.apache.hadoop.oncrpc;
 
 import org.apache.hadoop.oncrpc.RpcDeniedReply.RejectState;
 import org.apache.hadoop.oncrpc.RpcReply.ReplyState;
+import org.apache.hadoop.oncrpc.security.VerifierNone;
 import org.junit.Assert;
 import org.junit.Test;
 
@@ -39,10 +40,8 @@ public class TestRpcDeniedReply {
   
   @Test
   public void testConstructor() {
-    RpcDeniedReply reply = new RpcDeniedReply(0, RpcMessage.Type.RPC_REPLY,
-        ReplyState.MSG_ACCEPTED, RejectState.AUTH_ERROR) {
-      // Anonymous class
-    };
+    RpcDeniedReply reply = new RpcDeniedReply(0, ReplyState.MSG_ACCEPTED,
+        RejectState.AUTH_ERROR, new VerifierNone());
     Assert.assertEquals(0, reply.getXid());
     Assert.assertEquals(RpcMessage.Type.RPC_REPLY, reply.getMessageType());
     Assert.assertEquals(ReplyState.MSG_ACCEPTED, reply.getState());

+ 4 - 1
hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/TestRpcMessage.java

@@ -26,7 +26,10 @@ import org.junit.Test;
 public class TestRpcMessage {
   private RpcMessage getRpcMessage(int xid, RpcMessage.Type msgType) {
     return new RpcMessage(xid, msgType) {
-      // Anonymous class
+      @Override
+      public XDR write(XDR xdr) {
+        return null;
+      }
     };
   }
   

+ 7 - 2
hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/TestRpcReply.java

@@ -19,6 +19,7 @@ package org.apache.hadoop.oncrpc;
 
 
 import org.apache.hadoop.oncrpc.RpcReply.ReplyState;
+import org.apache.hadoop.oncrpc.security.VerifierNone;
 import org.junit.Assert;
 import org.junit.Test;
 
@@ -39,8 +40,12 @@ public class TestRpcReply {
   
   @Test
   public void testRpcReply() {
-    RpcReply reply = new RpcReply(0, RpcMessage.Type.RPC_REPLY, ReplyState.MSG_ACCEPTED) {
-      // Anonymous class
+    RpcReply reply = new RpcReply(0, ReplyState.MSG_ACCEPTED,
+        new VerifierNone()) {
+          @Override
+          public XDR write(XDR xdr) {
+            return null;
+          }
     };
     Assert.assertEquals(0, reply.getXid());
     Assert.assertEquals(RpcMessage.Type.RPC_REPLY, reply.getMessageType());

+ 26 - 14
hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/TestXDR.java

@@ -17,23 +17,35 @@
  */
 package org.apache.hadoop.oncrpc;
 
-import static org.junit.Assert.assertTrue;
-
-import java.util.Arrays;
-
 import org.junit.Test;
 
-/**
- * Tests for {@link XDR}
- */
+import junit.framework.Assert;
+
 public class TestXDR {
-  /**
-   * Test {@link XDR#append(byte[], byte[])}
-   */
+  private void serializeInt(int times) {
+    XDR w = new XDR();
+    for (int i = 0; i < times; ++i)
+      w.writeInt(23);
+
+    XDR r = w.asReadOnlyWrap();
+    for (int i = 0; i < times; ++i)
+      Assert.assertEquals(r.readInt(), 23);
+  }
+
+  private void serializeLong(int times) {
+    XDR w = new XDR();
+    for (int i = 0; i < times; ++i)
+      w.writeLongAsHyper(23);
+
+    XDR r = w.asReadOnlyWrap();
+    for (int i = 0; i < times; ++i)
+      Assert.assertEquals(r.readHyper(), 23);
+  }
+
   @Test
-  public void testAppendBytes() {
-    byte[] arr1 = new byte[] {0, 1};
-    byte[] arr2 = new byte[] {2, 3};
-    assertTrue(Arrays.equals(new byte[]{0, 1, 2, 3}, XDR.append(arr1, arr2)));
+  public void testPerformance() {
+    final int TEST_TIMES = 8 << 20;
+    serializeInt(TEST_TIMES);
+    serializeLong(TEST_TIMES);
   }
 }

+ 1 - 1
hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/security/TestCredentialsSys.java

@@ -38,7 +38,7 @@ public class TestCredentialsSys {
     credential.write(xdr);
     
     CredentialsSys newCredential = new CredentialsSys();
-    newCredential.read(xdr);
+    newCredential.read(xdr.asReadOnlyWrap());
     
     assertEquals(0, newCredential.getUID());
     assertEquals(1, newCredential.getGID());

+ 9 - 5
hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java

@@ -40,6 +40,7 @@ import org.apache.hadoop.oncrpc.RpcAcceptedReply;
 import org.apache.hadoop.oncrpc.RpcCall;
 import org.apache.hadoop.oncrpc.RpcProgram;
 import org.apache.hadoop.oncrpc.XDR;
+import org.apache.hadoop.oncrpc.security.VerifierNone;
 import org.jboss.netty.channel.Channel;
 
 /**
@@ -88,7 +89,8 @@ public class RpcProgramMountd extends RpcProgram implements MountInterface {
     if (LOG.isDebugEnabled()) {
       LOG.debug("MOUNT NULLOP : " + " client: " + client);
     }
-    return RpcAcceptedReply.voidReply(out, xid);
+    return RpcAcceptedReply.getAcceptInstance(xid, new VerifierNone()).write(
+        out);
   }
 
   @Override
@@ -155,7 +157,7 @@ public class RpcProgramMountd extends RpcProgram implements MountInterface {
     
     String host = client.getHostName();
     mounts.remove(new MountEntry(host, path));
-    RpcAcceptedReply.voidReply(out, xid);
+    RpcAcceptedReply.getAcceptInstance(xid, new VerifierNone()).write(out);
     return out;
   }
 
@@ -165,7 +167,8 @@ public class RpcProgramMountd extends RpcProgram implements MountInterface {
       LOG.debug("MOUNT UMNTALL : " + " client: " + client);
     }
     mounts.clear();
-    return RpcAcceptedReply.voidReply(out, xid);
+    return RpcAcceptedReply.getAcceptInstance(xid, new VerifierNone()).write(
+        out);
   }
 
   @Override
@@ -190,8 +193,9 @@ public class RpcProgramMountd extends RpcProgram implements MountInterface {
       out = MountResponse.writeExportList(out, xid, exports, hostsMatchers);
     } else {
       // Invalid procedure
-      RpcAcceptedReply.voidReply(out, xid,
-          RpcAcceptedReply.AcceptState.PROC_UNAVAIL);
+      RpcAcceptedReply.getInstance(xid,
+          RpcAcceptedReply.AcceptState.PROC_UNAVAIL, new VerifierNone()).write(
+          out);
     }  
     return out;
   }

+ 18 - 9
hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java

@@ -49,6 +49,7 @@ import org.apache.hadoop.nfs.nfs3.response.WRITE3Response;
 import org.apache.hadoop.nfs.nfs3.response.WccAttr;
 import org.apache.hadoop.nfs.nfs3.response.WccData;
 import org.apache.hadoop.oncrpc.XDR;
+import org.apache.hadoop.oncrpc.security.VerifierNone;
 import org.jboss.netty.channel.Channel;
 
 /**
@@ -291,7 +292,8 @@ class OpenFileCtx {
         WccData fileWcc = new WccData(latestAttr.getWccAttr(), latestAttr);
         WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3ERR_IO,
             fileWcc, 0, request.getStableHow(), Nfs3Constant.WRITE_COMMIT_VERF);
-        Nfs3Utils.writeChannel(channel, response.send(new XDR(), xid), xid);
+        Nfs3Utils.writeChannel(channel, response.writeHeaderAndResponse(
+            new XDR(), xid, new VerifierNone()), xid);
       } else {
         // Handle repeated write requests(same xid or not).
         // If already replied, send reply again. If not replied, drop the
@@ -313,7 +315,8 @@ class OpenFileCtx {
             WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3_OK,
                 fileWcc, request.getCount(), request.getStableHow(),
                 Nfs3Constant.WRITE_COMMIT_VERF);
-            Nfs3Utils.writeChannel(channel, response.send(new XDR(), xid), xid);
+            Nfs3Utils.writeChannel(channel, response.writeHeaderAndResponse(
+                new XDR(), xid, new VerifierNone()), xid);
           }
           updateLastAccessTime();
           
@@ -367,7 +370,8 @@ class OpenFileCtx {
         WccData fileWcc = new WccData(preOpAttr, postOpAttr);
         WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3_OK,
             fileWcc, count, stableHow, Nfs3Constant.WRITE_COMMIT_VERF);
-        Nfs3Utils.writeChannel(channel, response.send(new XDR(), xid), xid);
+        Nfs3Utils.writeChannel(channel, response.writeHeaderAndResponse(
+            new XDR(), xid, new VerifierNone()), xid);
         writeCtx.setReplied(true);
       }
 
@@ -392,7 +396,8 @@ class OpenFileCtx {
         WccData fileWcc = new WccData(preOpAttr, postOpAttr);
         WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3_OK,
             fileWcc, count, stableHow, Nfs3Constant.WRITE_COMMIT_VERF);
-        Nfs3Utils.writeChannel(channel, response.send(new XDR(), xid), xid);
+        Nfs3Utils.writeChannel(channel, response.writeHeaderAndResponse(
+            new XDR(), xid, new VerifierNone()), xid);
         writeCtx.setReplied(true);
       }
 
@@ -418,7 +423,8 @@ class OpenFileCtx {
       }
       
       updateLastAccessTime();
-      Nfs3Utils.writeChannel(channel, response.send(new XDR(), xid), xid);
+      Nfs3Utils.writeChannel(channel, response.writeHeaderAndResponse(
+          new XDR(), xid, new VerifierNone()), xid);
     }
   }
   
@@ -707,7 +713,8 @@ class OpenFileCtx {
         WccData fileWcc = new WccData(preOpAttr, latestAttr);
         WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3_OK,
             fileWcc, count, stableHow, Nfs3Constant.WRITE_COMMIT_VERF);
-        Nfs3Utils.writeChannel(channel, response.send(new XDR(), xid), xid);
+        Nfs3Utils.writeChannel(channel, response.writeHeaderAndResponse(
+            new XDR(), xid, new VerifierNone()), xid);
       }
 
     } catch (IOException e) {
@@ -715,7 +722,8 @@ class OpenFileCtx {
           + offset + " and length " + data.length, e);
       if (!writeCtx.getReplied()) {
         WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3ERR_IO);
-        Nfs3Utils.writeChannel(channel, response.send(new XDR(), xid), xid);
+        Nfs3Utils.writeChannel(channel, response.writeHeaderAndResponse(
+            new XDR(), xid, new VerifierNone()), xid);
         // Keep stream open. Either client retries or SteamMonitor closes it.
       }
 
@@ -752,8 +760,9 @@ class OpenFileCtx {
         WccData fileWcc = new WccData(preOpAttr, latestAttr);
         WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3ERR_IO,
             fileWcc, 0, writeCtx.getStableHow(), Nfs3Constant.WRITE_COMMIT_VERF);
-        Nfs3Utils.writeChannel(writeCtx.getChannel(),
-            response.send(new XDR(), writeCtx.getXid()), writeCtx.getXid());
+        Nfs3Utils.writeChannel(writeCtx.getChannel(), response
+            .writeHeaderAndResponse(new XDR(), writeCtx.getXid(),
+                new VerifierNone()), writeCtx.getXid());
       }
     }
     

+ 13 - 10
hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java

@@ -98,7 +98,6 @@ import org.apache.hadoop.nfs.nfs3.response.RENAME3Response;
 import org.apache.hadoop.nfs.nfs3.response.RMDIR3Response;
 import org.apache.hadoop.nfs.nfs3.response.SETATTR3Response;
 import org.apache.hadoop.nfs.nfs3.response.SYMLINK3Response;
-import org.apache.hadoop.nfs.nfs3.response.VoidResponse;
 import org.apache.hadoop.nfs.nfs3.response.WRITE3Response;
 import org.apache.hadoop.nfs.nfs3.response.WccAttr;
 import org.apache.hadoop.nfs.nfs3.response.WccData;
@@ -108,12 +107,13 @@ import org.apache.hadoop.oncrpc.RpcDeniedReply;
 import org.apache.hadoop.oncrpc.RpcProgram;
 import org.apache.hadoop.oncrpc.RpcReply;
 import org.apache.hadoop.oncrpc.XDR;
-import org.apache.hadoop.oncrpc.security.CredentialsSys;
 import org.apache.hadoop.oncrpc.security.Credentials;
-import org.apache.hadoop.oncrpc.security.Verifier;
+import org.apache.hadoop.oncrpc.security.CredentialsSys;
+import org.apache.hadoop.oncrpc.security.RpcAuthInfo.AuthFlavor;
 import org.apache.hadoop.oncrpc.security.SecurityHandler;
 import org.apache.hadoop.oncrpc.security.SysSecurityHandler;
-import org.apache.hadoop.oncrpc.security.RpcAuthInfo.AuthFlavor;
+import org.apache.hadoop.oncrpc.security.Verifier;
+import org.apache.hadoop.oncrpc.security.VerifierNone;
 import org.apache.hadoop.security.AccessControlException;
 import org.jboss.netty.channel.Channel;
 
@@ -209,7 +209,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     if (LOG.isDebugEnabled()) {
       LOG.debug("NFS NULL");
     }
-    return new VoidResponse(Nfs3Status.NFS3_OK);
+    return new NFS3Response(Nfs3Status.NFS3_OK);
   }
 
   @Override
@@ -1790,9 +1790,10 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
             + rpcCall.getCredential().getFlavor()
             + " is not AUTH_SYS or RPCSEC_GSS.");
         XDR reply = new XDR();
-        reply = RpcDeniedReply.voidReply(reply, xid,
+        RpcDeniedReply rdr = new RpcDeniedReply(xid,
             RpcReply.ReplyState.MSG_ACCEPTED,
-            RpcDeniedReply.RejectState.AUTH_ERROR);
+            RpcDeniedReply.RejectState.AUTH_ERROR, new VerifierNone());
+        rdr.write(reply);
         return reply;
       }
     }
@@ -1857,11 +1858,13 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
       response = commit(xdr, securityHandler, client);
     } else {
       // Invalid procedure
-      RpcAcceptedReply.voidReply(out, xid,
-          RpcAcceptedReply.AcceptState.PROC_UNAVAIL);
+      RpcAcceptedReply.getInstance(xid,
+          RpcAcceptedReply.AcceptState.PROC_UNAVAIL, new VerifierNone()).write(
+          out);
     }
     if (response != null) {
-      out = response.send(out, xid);
+      // TODO: currently we just return VerifierNone
+      out = response.writeHeaderAndResponse(out, xid, new VerifierNone());
     }
 
     return out;

+ 9 - 4
hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java

@@ -39,6 +39,7 @@ import org.apache.hadoop.nfs.nfs3.request.WRITE3Request;
 import org.apache.hadoop.nfs.nfs3.response.WRITE3Response;
 import org.apache.hadoop.nfs.nfs3.response.WccData;
 import org.apache.hadoop.oncrpc.XDR;
+import org.apache.hadoop.oncrpc.security.VerifierNone;
 import org.apache.hadoop.util.Daemon;
 import org.jboss.netty.channel.Channel;
 
@@ -118,7 +119,8 @@ public class WriteManager {
     byte[] data = request.getData().array();
     if (data.length < count) {
       WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3ERR_INVAL);
-      Nfs3Utils.writeChannel(channel, response.send(new XDR(), xid), xid);
+      Nfs3Utils.writeChannel(channel, response.writeHeaderAndResponse(
+          new XDR(), xid, new VerifierNone()), xid);
       return;
     }
 
@@ -155,7 +157,8 @@ public class WriteManager {
         WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3ERR_IO,
             fileWcc, count, request.getStableHow(),
             Nfs3Constant.WRITE_COMMIT_VERF);
-        Nfs3Utils.writeChannel(channel, response.send(new XDR(), xid), xid);
+        Nfs3Utils.writeChannel(channel, response.writeHeaderAndResponse(
+            new XDR(), xid, new VerifierNone()), xid);
         return;
       }
 
@@ -182,10 +185,12 @@ public class WriteManager {
         WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3_OK,
             fileWcc, count, request.getStableHow(),
             Nfs3Constant.WRITE_COMMIT_VERF);
-        Nfs3Utils.writeChannel(channel, response.send(new XDR(), xid), xid);
+        Nfs3Utils.writeChannel(channel, response.writeHeaderAndResponse(
+            new XDR(), xid, new VerifierNone()), xid);
       } else {
         WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3ERR_IO);
-        Nfs3Utils.writeChannel(channel, response.send(new XDR(), xid), xid);
+        Nfs3Utils.writeChannel(channel, response.writeHeaderAndResponse(
+            new XDR(), xid, new VerifierNone()), xid);
       }
     }
 

+ 9 - 18
hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestOutOfOrderWrite.java

@@ -38,6 +38,8 @@ import org.apache.hadoop.oncrpc.RpcReply;
 import org.apache.hadoop.oncrpc.SimpleTcpClient;
 import org.apache.hadoop.oncrpc.SimpleTcpClientHandler;
 import org.apache.hadoop.oncrpc.XDR;
+import org.apache.hadoop.oncrpc.security.CredentialsNone;
+import org.apache.hadoop.oncrpc.security.VerifierNone;
 import org.jboss.netty.buffer.ChannelBuffer;
 import org.jboss.netty.channel.Channel;
 import org.jboss.netty.channel.ChannelHandlerContext;
@@ -58,15 +60,9 @@ public class TestOutOfOrderWrite {
 
   static XDR create() {
     XDR request = new XDR();
-    RpcCall.write(request, 0x8000004c, Nfs3Constant.PROGRAM,
-        Nfs3Constant.VERSION, Nfs3Constant.NFSPROC3.CREATE.getValue());
-
-    // credentials
-    request.writeInt(0); // auth null
-    request.writeInt(0); // length zero
-    // verifier
-    request.writeInt(0); // auth null
-    request.writeInt(0); // length zero
+    RpcCall.getInstance(0x8000004c, Nfs3Constant.PROGRAM, Nfs3Constant.VERSION,
+        Nfs3Constant.NFSPROC3.CREATE.getValue(), new CredentialsNone(),
+        new VerifierNone()).write(request);
 
     SetAttr3 objAttr = new SetAttr3();
     CREATE3Request createReq = new CREATE3Request(new FileHandle("/"),
@@ -78,15 +74,10 @@ public class TestOutOfOrderWrite {
   static XDR write(FileHandle handle, int xid, long offset, int count,
       byte[] data) {
     XDR request = new XDR();
-    RpcCall.write(request, xid, Nfs3Constant.PROGRAM, Nfs3Constant.VERSION,
-        Nfs3Constant.NFSPROC3.WRITE.getValue());
-
-    // credentials
-    request.writeInt(0); // auth null
-    request.writeInt(0); // length zero
-    // verifier
-    request.writeInt(0); // auth null
-    request.writeInt(0); // length zero
+    RpcCall.getInstance(xid, Nfs3Constant.PROGRAM, Nfs3Constant.VERSION,
+        Nfs3Constant.NFSPROC3.CREATE.getValue(), new CredentialsNone(),
+        new VerifierNone()).write(request);
+
     WRITE3Request write1 = new WRITE3Request(handle, offset, count,
         WriteStableHow.UNSTABLE, ByteBuffer.wrap(data));
     write1.serialize(request);

+ 4 - 5
hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestPortmapRegister.java

@@ -26,6 +26,8 @@ import org.apache.hadoop.nfs.nfs3.Nfs3Constant;
 import org.apache.hadoop.oncrpc.RegistrationClient;
 import org.apache.hadoop.oncrpc.RpcCall;
 import org.apache.hadoop.oncrpc.XDR;
+import org.apache.hadoop.oncrpc.security.CredentialsNone;
+import org.apache.hadoop.oncrpc.security.VerifierNone;
 import org.apache.hadoop.portmap.PortmapMapping;
 import org.apache.hadoop.portmap.PortmapRequest;
 
@@ -78,11 +80,8 @@ public class TestPortmapRegister {
   
   static void createPortmapXDRheader(XDR xdr_out, int procedure) {
     // TODO: Move this to RpcRequest
-    RpcCall.write(xdr_out, 0, 100000, 2, procedure);
-    xdr_out.writeInt(0); //no auth
-    xdr_out.writeInt(0);
-    xdr_out.writeInt(0);
-    xdr_out.writeInt(0);
+    RpcCall.getInstance(0, 100000, 2, procedure, new CredentialsNone(),
+        new VerifierNone()).write(xdr_out);
     
     /*
     xdr_out.putInt(1); //unix auth

+ 4 - 1
hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestUdpServer.java

@@ -27,6 +27,8 @@ import java.net.UnknownHostException;
 import org.apache.hadoop.nfs.nfs3.Nfs3Constant;
 import org.apache.hadoop.oncrpc.RpcCall;
 import org.apache.hadoop.oncrpc.XDR;
+import org.apache.hadoop.oncrpc.security.CredentialsNone;
+import org.apache.hadoop.oncrpc.security.VerifierNone;
 
 // TODO: convert this to Junit
 public class TestUdpServer {
@@ -82,7 +84,8 @@ public class TestUdpServer {
   
   static void createPortmapXDRheader(XDR xdr_out, int procedure) {
     // Make this a method
-    RpcCall.write(xdr_out, 0, 100000, 2, procedure);
+    RpcCall.getInstance(0, 100000, 2, procedure, new CredentialsNone(),
+        new VerifierNone()).write(xdr_out);
   }
  
   static void testGetportMount() {

+ 25 - 1
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -244,6 +244,9 @@ Release 2.3.0 - UNRELEASED
 
   NEW FEATURES
 
+    HDFS-5122. Support failover and retry in WebHdfsFileSystem for NN HA.
+    (Haohui Mai via jing9)
+
   IMPROVEMENTS
 
     HDFS-4657.  Limit the number of blocks logged by the NN after a block
@@ -303,7 +306,22 @@ Release 2.3.0 - UNRELEASED
     HDFS-5170. BlockPlacementPolicyDefault uses the wrong classname when
     alerting to enable debug logging. (Andrew Wang)
 
-Release 2.1.1-beta - UNRELEASED
+    HDFS-5031. BlockScanner scans the block multiple times. (Vinay via Arpit
+    Agarwal)
+
+Release 2.2.0 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
+Release 2.1.1-beta - 2013-09-23
 
   INCOMPATIBLE CHANGES
 
@@ -384,6 +402,9 @@ Release 2.1.1-beta - UNRELEASED
 
     HDFS-4680. Audit logging of delegation tokens for MR tracing. (Andrew Wang)
 
+    HDFS-5212. Refactor RpcMessage and NFS3Response to support different 
+    types of authentication information. (jing9)
+
   OPTIMIZATIONS
 
   BUG FIXES
@@ -455,6 +476,9 @@ Release 2.1.1-beta - UNRELEASED
     HDFS-5192. NameNode may fail to start when 
     dfs.client.test.drop.namenode.response.number is set. (jing9)
 
+    HDFS-5219. Add configuration keys for retry policy in WebHDFSFileSystem.
+    (Haohui Mai via jing9)
+
 Release 2.1.0-beta - 2013-08-22
 
   INCOMPATIBLE CHANGES

+ 12 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java

@@ -513,4 +513,16 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   // Timeout to wait for block receiver and responder thread to stop
   public static final String DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_KEY = "dfs.datanode.xceiver.stop.timeout.millis";
   public static final long   DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT = 60000;
+
+  // WebHDFS retry policy
+  public static final String  DFS_HTTP_CLIENT_RETRY_POLICY_ENABLED_KEY = "dfs.http.client.retry.policy.enabled";
+  public static final boolean DFS_HTTP_CLIENT_RETRY_POLICY_ENABLED_DEFAULT = false;
+  public static final String  DFS_HTTP_CLIENT_RETRY_POLICY_SPEC_KEY = "dfs.http.client.retry.policy.spec";
+  public static final String  DFS_HTTP_CLIENT_RETRY_POLICY_SPEC_DEFAULT = "10000,6,60000,10"; //t1,n1,t2,n2,...
+  public static final String  DFS_HTTP_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY = "dfs.http.client.failover.max.attempts";
+  public static final int     DFS_HTTP_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT = 15;
+  public static final String  DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_BASE_KEY = "dfs.http.client.failover.sleep.base.millis";
+  public static final int     DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT = 500;
+  public static final String  DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_MAX_KEY = "dfs.http.client.failover.sleep.max.millis";
+  public static final int     DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT = 15000;
 }

+ 43 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java

@@ -38,6 +38,7 @@ import java.net.InetSocketAddress;
 import java.net.URI;
 import java.net.URISyntaxException;
 import java.security.SecureRandom;
+import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.Comparator;
@@ -610,6 +611,48 @@ public class DFSUtil {
       Configuration conf) {
     return getAddresses(conf, null, DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
   }
+
+  /**
+   * Returns list of InetSocketAddress corresponding to HA NN HTTP addresses from
+   * the configuration.
+   *
+   * @param conf configuration
+   * @return list of InetSocketAddresses
+   */
+  public static Map<String, Map<String, InetSocketAddress>> getHaNnHttpAddresses(
+      Configuration conf) {
+    return getAddresses(conf, null, DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
+  }
+
+  /**
+   * Resolve an HDFS URL into real INetSocketAddress. It works like a DNS resolver
+   * when the URL points to an non-HA cluster. When the URL points to an HA
+   * cluster, the resolver further resolves the logical name (i.e., the authority
+   * in the URL) into real namenode addresses.
+   */
+  public static InetSocketAddress[] resolve(URI uri, int schemeDefaultPort,
+      Configuration conf) throws IOException {
+    ArrayList<InetSocketAddress> ret = new ArrayList<InetSocketAddress>();
+
+    if (!HAUtil.isLogicalUri(conf, uri)) {
+      InetSocketAddress addr = NetUtils.createSocketAddr(uri.getAuthority(),
+          schemeDefaultPort);
+      ret.add(addr);
+
+    } else {
+      Map<String, Map<String, InetSocketAddress>> addresses = DFSUtil
+          .getHaNnHttpAddresses(conf);
+
+      for (Map<String, InetSocketAddress> addrs : addresses.values()) {
+        for (InetSocketAddress addr : addrs.values()) {
+          ret.add(addr);
+        }
+      }
+    }
+
+    InetSocketAddress[] r = new InetSocketAddress[ret.size()];
+    return ret.toArray(r);
+  }
   
   /**
    * Returns list of InetSocketAddress corresponding to  backup node rpc 

+ 11 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java

@@ -100,6 +100,7 @@ class BlockPoolSliceScanner {
   private long currentPeriodStart = Time.now();
   private long bytesLeft = 0; // Bytes to scan in this period
   private long totalBytesToScan = 0;
+  private boolean isNewPeriod = true;
   
   private final LogFileHandler verificationLog;
   
@@ -126,7 +127,10 @@ class BlockPoolSliceScanner {
       public int compare(BlockScanInfo left, BlockScanInfo right) {
         final long l = left.lastScanTime;
         final long r = right.lastScanTime;
-        return l < r? -1: l > r? 1: 0; 
+        // compare blocks itself if scantimes are same to avoid.
+        // because TreeMap uses comparator if available to check existence of
+        // the object. 
+        return l < r? -1: l > r? 1: left.compareTo(right); 
       }
     };
 
@@ -148,8 +152,6 @@ class BlockPoolSliceScanner {
     public boolean equals(Object that) {
       if (this == that) {
         return true;
-      } else if (that == null || !(that instanceof BlockScanInfo)) {
-        return false;
       }
       return super.equals(that);
     }
@@ -539,10 +541,12 @@ class BlockPoolSliceScanner {
                   entry.genStamp));
               if (info != null) {
                 if (processedBlocks.get(entry.blockId) == null) {
-                  updateBytesLeft(-info.getNumBytes());
+                  if (isNewPeriod) {
+                    updateBytesLeft(-info.getNumBytes());
+                  }
                   processedBlocks.put(entry.blockId, 1);
                 }
-                if (logIterator.isPrevious()) {
+                if (logIterator.isLastReadFromPrevious()) {
                   // write the log entry to current file
                   // so that the entry is preserved for later runs.
                   verificationLog.append(entry.verificationTime, entry.genStamp,
@@ -557,6 +561,7 @@ class BlockPoolSliceScanner {
       } finally {
         IOUtils.closeStream(logIterator);
       }
+      isNewPeriod = false;
     }
     
     
@@ -597,6 +602,7 @@ class BlockPoolSliceScanner {
     // reset the byte counts :
     bytesLeft = totalBytesToScan;
     currentPeriodStart = Time.now();
+    isNewPeriod = true;
   }
   
   private synchronized boolean workRemainingInCurrentPeriod() {

+ 6 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/RollingLogs.java

@@ -33,6 +33,12 @@ public interface RollingLogs {
   public interface LineIterator extends Iterator<String>, Closeable {
     /** Is the iterator iterating the previous? */
     public boolean isPrevious();
+
+    /**
+     * Is the last read entry from previous? This should be called after
+     * reading.
+     */
+    public boolean isLastReadFromPrevious();
   }
 
   /**

+ 7 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RollingLogsImpl.java

@@ -134,6 +134,7 @@ class RollingLogsImpl implements RollingLogs {
    */
   private class Reader implements RollingLogs.LineIterator {
     private File file;
+    private File lastReadFile;
     private BufferedReader reader;
     private String line;
     private boolean closed = false;
@@ -149,6 +150,11 @@ class RollingLogsImpl implements RollingLogs {
       return file == prev;
     }
 
+    @Override
+    public boolean isLastReadFromPrevious() {
+      return lastReadFile == prev;
+    }
+
     private boolean openFile() throws IOException {
 
       for(int i=0; i<2; i++) {
@@ -203,6 +209,7 @@ class RollingLogsImpl implements RollingLogs {
     public String next() {
       String curLine = line;
       try {
+        lastReadFile = file;
         readNext();
       } catch (IOException e) {
         DataBlockScanner.LOG.warn("Failed to read next line.", e);

+ 136 - 37
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java

@@ -54,6 +54,7 @@ import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.ByteRangeInputStream;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.HAUtil;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector;
@@ -86,6 +87,7 @@ import org.apache.hadoop.hdfs.web.resources.ReplicationParam;
 import org.apache.hadoop.hdfs.web.resources.TokenArgumentParam;
 import org.apache.hadoop.hdfs.web.resources.UserParam;
 import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.retry.RetryPolicies;
 import org.apache.hadoop.io.retry.RetryPolicy;
 import org.apache.hadoop.io.retry.RetryUtils;
 import org.apache.hadoop.ipc.RemoteException;
@@ -119,7 +121,7 @@ public class WebHdfsFileSystem extends FileSystem
 
   /** SPNEGO authenticator */
   private static final KerberosUgiAuthenticator AUTH = new KerberosUgiAuthenticator();
-  /** Default connection factory may be overriden in tests to use smaller timeout values */
+  /** Default connection factory may be overridden in tests to use smaller timeout values */
   URLConnectionFactory connectionFactory = URLConnectionFactory.DEFAULT_CONNECTION_FACTORY;
   /** Configures connections for AuthenticatedURL */
   private final ConnectionConfigurator CONN_CONFIGURATOR =
@@ -159,12 +161,13 @@ public class WebHdfsFileSystem extends FileSystem
   }
 
   private UserGroupInformation ugi;
-  private InetSocketAddress nnAddr;
   private URI uri;
   private boolean hasInitedToken;
   private Token<?> delegationToken;
   private RetryPolicy retryPolicy = null;
   private Path workingDir;
+  private InetSocketAddress nnAddrs[];
+  private int currentNNAddrIndex;
 
   /**
    * Return the protocol scheme for the FileSystem.
@@ -174,7 +177,7 @@ public class WebHdfsFileSystem extends FileSystem
    */
   @Override
   public String getScheme() {
-    return "webhdfs";
+    return SCHEME;
   }
 
   @Override
@@ -183,20 +186,42 @@ public class WebHdfsFileSystem extends FileSystem
     super.initialize(uri, conf);
     setConf(conf);
     ugi = UserGroupInformation.getCurrentUser();
+
     try {
-      this.uri = new URI(uri.getScheme(), uri.getAuthority(), null, null, null);
+      this.uri = new URI(uri.getScheme(), uri.getAuthority(), null,
+          null, null);
+      this.nnAddrs = DFSUtil.resolve(this.uri, getDefaultPort(), conf);
     } catch (URISyntaxException e) {
       throw new IllegalArgumentException(e);
     }
-    this.nnAddr = NetUtils.createSocketAddr(uri.getAuthority(), getDefaultPort());
-    this.retryPolicy = 
-        RetryUtils.getDefaultRetryPolicy(
-            conf, 
-            DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_ENABLED_KEY, 
-            DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_ENABLED_DEFAULT, 
-            DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_SPEC_KEY,
-            DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_SPEC_DEFAULT,
-            SafeModeException.class);
+
+    if (!HAUtil.isLogicalUri(conf, this.uri)) {
+      this.retryPolicy =
+          RetryUtils.getDefaultRetryPolicy(
+              conf,
+              DFSConfigKeys.DFS_HTTP_CLIENT_RETRY_POLICY_ENABLED_KEY,
+              DFSConfigKeys.DFS_HTTP_CLIENT_RETRY_POLICY_ENABLED_DEFAULT,
+              DFSConfigKeys.DFS_HTTP_CLIENT_RETRY_POLICY_SPEC_KEY,
+              DFSConfigKeys.DFS_HTTP_CLIENT_RETRY_POLICY_SPEC_DEFAULT,
+              SafeModeException.class);
+    } else {
+
+      int maxFailoverAttempts = conf.getInt(
+          DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY,
+          DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT);
+      int failoverSleepBaseMillis = conf.getInt(
+          DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_BASE_KEY,
+          DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT);
+      int failoverSleepMaxMillis = conf.getInt(
+          DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_MAX_KEY,
+          DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT);
+
+      this.retryPolicy = RetryPolicies
+          .failoverOnNetworkException(RetryPolicies.TRY_ONCE_THEN_FAIL,
+              maxFailoverAttempts, failoverSleepBaseMillis,
+              failoverSleepMaxMillis);
+    }
+
     this.workingDir = getHomeDirectory();
 
     if (UserGroupInformation.isSecurityEnabled()) {
@@ -348,6 +373,19 @@ public class WebHdfsFileSystem extends FileSystem
     return ((RemoteException)ioe).unwrapRemoteException();
   }
 
+  private synchronized InetSocketAddress getCurrentNNAddr() {
+    return nnAddrs[currentNNAddrIndex];
+  }
+
+  /**
+   * Reset the appropriate state to gracefully fail over to another name node
+   */
+  private synchronized void resetStateToFailOver() {
+    currentNNAddrIndex = (currentNNAddrIndex + 1) % nnAddrs.length;
+    delegationToken = null;
+    hasInitedToken = false;
+  }
+
   /**
    * Return a URL pointing to given path on the namenode.
    *
@@ -357,6 +395,7 @@ public class WebHdfsFileSystem extends FileSystem
    * @throws IOException on error constructing the URL
    */
   private URL getNamenodeURL(String path, String query) throws IOException {
+    InetSocketAddress nnAddr = getCurrentNNAddr();
     final URL url = new URL("http", nnAddr.getHostName(),
           nnAddr.getPort(), path + '?' + query);
     if (LOG.isTraceEnabled()) {
@@ -414,38 +453,28 @@ public class WebHdfsFileSystem extends FileSystem
    */
   private Map<?, ?> run(final HttpOpParam.Op op, final Path fspath,
       final Param<?,?>... parameters) throws IOException {
-    return new Runner(op, fspath, parameters).run().json;
+    return new FsPathRunner(op, fspath, parameters).run().json;
   }
 
   /**
    * This class is for initialing a HTTP connection, connecting to server,
    * obtaining a response, and also handling retry on failures.
    */
-  class Runner {
-    private final HttpOpParam.Op op;
-    private final URL url;
+  abstract class AbstractRunner {
+    abstract protected URL getUrl() throws IOException;
+
+    protected final HttpOpParam.Op op;
     private final boolean redirected;
 
     private boolean checkRetry;
-    private HttpURLConnection conn = null;
+    protected HttpURLConnection conn = null;
     private Map<?, ?> json = null;
 
-    Runner(final HttpOpParam.Op op, final URL url, final boolean redirected) {
+    protected AbstractRunner(final HttpOpParam.Op op, boolean redirected) {
       this.op = op;
-      this.url = url;
       this.redirected = redirected;
     }
 
-    Runner(final HttpOpParam.Op op, final Path fspath,
-        final Param<?,?>... parameters) throws IOException {
-      this(op, toUrl(op, fspath, parameters), false);
-    }
-
-    Runner(final HttpOpParam.Op op, final HttpURLConnection conn) {
-      this(op, null, false);
-      this.conn = conn;
-    }
-
     private HttpURLConnection getHttpUrlConnection(final URL url)
         throws IOException, AuthenticationException {
       UserGroupInformation connectUgi = ugi.getRealUser();
@@ -493,6 +522,7 @@ public class WebHdfsFileSystem extends FileSystem
   
     private void init() throws IOException {
       checkRetry = !redirected;
+      URL url = getUrl();
       try {
         conn = getHttpUrlConnection(url);
       } catch(AuthenticationException ae) {
@@ -519,7 +549,23 @@ public class WebHdfsFileSystem extends FileSystem
       }
     }
 
-    Runner run() throws IOException {
+    AbstractRunner run() throws IOException {
+      /**
+       * Do the real work.
+       *
+       * There are three cases that the code inside the loop can throw an
+       * IOException:
+       *
+       * <ul>
+       * <li>The connection has failed (e.g., ConnectException,
+       * @see FailoverOnNetworkExceptionRetry for more details)</li>
+       * <li>The namenode enters the standby state (i.e., StandbyException).</li>
+       * <li>The server returns errors for the command (i.e., RemoteException)</li>
+       * </ul>
+       *
+       * The call to shouldRetry() will conduct the retry policy. The policy
+       * examines the exception and swallows it if it decides to rerun the work.
+       */
       for(int retry = 0; ; retry++) {
         try {
           init();
@@ -537,14 +583,25 @@ public class WebHdfsFileSystem extends FileSystem
 
     private void shouldRetry(final IOException ioe, final int retry
         ) throws IOException {
+      InetSocketAddress nnAddr = getCurrentNNAddr();
       if (checkRetry) {
         try {
           final RetryPolicy.RetryAction a = retryPolicy.shouldRetry(
               ioe, retry, 0, true);
-          if (a.action == RetryPolicy.RetryAction.RetryDecision.RETRY) {
+
+          boolean isRetry = a.action == RetryPolicy.RetryAction.RetryDecision.RETRY;
+          boolean isFailoverAndRetry =
+              a.action == RetryPolicy.RetryAction.RetryDecision.FAILOVER_AND_RETRY;
+
+          if (isRetry || isFailoverAndRetry) {
             LOG.info("Retrying connect to namenode: " + nnAddr
                 + ". Already tried " + retry + " time(s); retry policy is "
-                + retryPolicy + ", delay " + a.delayMillis + "ms.");      
+                + retryPolicy + ", delay " + a.delayMillis + "ms.");
+
+            if (isFailoverAndRetry) {
+              resetStateToFailOver();
+            }
+
             Thread.sleep(a.delayMillis);
             return;
           }
@@ -617,6 +674,48 @@ public class WebHdfsFileSystem extends FileSystem
     }
   }
 
+  final class FsPathRunner extends AbstractRunner {
+    private final Path fspath;
+    private final Param<?, ?>[] parameters;
+
+    FsPathRunner(final HttpOpParam.Op op, final Path fspath, final Param<?,?>... parameters) {
+      super(op, false);
+      this.fspath = fspath;
+      this.parameters = parameters;
+    }
+
+    @Override
+    protected URL getUrl() throws IOException {
+      return toUrl(op, fspath, parameters);
+    }
+  }
+
+  final class URLRunner extends AbstractRunner {
+    private final URL url;
+    @Override
+    protected URL getUrl() {
+      return url;
+    }
+
+    protected URLRunner(final HttpOpParam.Op op, final URL url, boolean redirected) {
+      super(op, redirected);
+      this.url = url;
+    }
+  }
+
+  @VisibleForTesting
+  final class ConnRunner extends AbstractRunner {
+    protected ConnRunner(final HttpOpParam.Op op, HttpURLConnection conn) {
+      super(op, false);
+      this.conn = conn;
+    }
+
+    @Override
+    protected URL getUrl() {
+      return null;
+    }
+  }
+
   private FsPermission applyUMask(FsPermission permission) {
     if (permission == null) {
       permission = FsPermission.getDefault();
@@ -772,7 +871,7 @@ public class WebHdfsFileSystem extends FileSystem
     statistics.incrementWriteOps(1);
 
     final HttpOpParam.Op op = PutOpParam.Op.CREATE;
-    return new Runner(op, f, 
+    return new FsPathRunner(op, f,
         new PermissionParam(applyUMask(permission)),
         new OverwriteParam(overwrite),
         new BufferSizeParam(bufferSize),
@@ -788,7 +887,7 @@ public class WebHdfsFileSystem extends FileSystem
     statistics.incrementWriteOps(1);
 
     final HttpOpParam.Op op = PostOpParam.Op.APPEND;
-    return new Runner(op, f, new BufferSizeParam(bufferSize))
+    return new FsPathRunner(op, f, new BufferSizeParam(bufferSize))
       .run()
       .write(bufferSize);
   }
@@ -835,7 +934,7 @@ public class WebHdfsFileSystem extends FileSystem
         final boolean resolved) throws IOException {
       final URL offsetUrl = offset == 0L? url
           : new URL(url + "&" + new OffsetParam(offset));
-      return new Runner(GetOpParam.Op.OPEN, offsetUrl, resolved).run().conn;
+      return new URLRunner(GetOpParam.Op.OPEN, offsetUrl, resolved).run().conn;
     }  
   }
 
@@ -909,7 +1008,7 @@ public class WebHdfsFileSystem extends FileSystem
     final HttpOpParam.Op op = GetOpParam.Op.GETDELEGATIONTOKEN;
     final Map<?, ?> m = run(op, null, new RenewerParam(renewer));
     final Token<DelegationTokenIdentifier> token = JsonUtil.toDelegationToken(m); 
-    SecurityUtil.setTokenService(token, nnAddr);
+    SecurityUtil.setTokenService(token, getCurrentNNAddr());
     return token;
   }
 

+ 5 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java

@@ -871,7 +871,11 @@ public class TestDFSClientRetries {
 
     final Path dir = new Path("/testNamenodeRestart");
 
-    conf.setBoolean(DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_ENABLED_KEY, true);
+    if (isWebHDFS) {
+      conf.setBoolean(DFSConfigKeys.DFS_HTTP_CLIENT_RETRY_POLICY_ENABLED_KEY, true);
+    } else {
+      conf.setBoolean(DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_ENABLED_KEY, true);
+    }
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_MIN_DATANODES_KEY, 1);
     conf.setInt(MiniDFSCluster.DFS_NAMENODE_SAFEMODE_EXTENSION_TESTING_KEY, 5000);
 

+ 62 - 5
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java

@@ -20,20 +20,25 @@ package org.apache.hadoop.hdfs;
 
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICES;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICE_ID;
-import org.apache.hadoop.util.Shell;
-
-import static org.junit.Assert.*;
-import org.junit.Assume;
-import static org.hamcrest.CoreMatchers.*;
+import static org.hamcrest.CoreMatchers.not;
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertThat;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 import java.io.IOException;
 import java.net.InetSocketAddress;
@@ -54,8 +59,11 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.util.Shell;
+import org.junit.Assume;
 import org.junit.Before;
 import org.junit.Test;
 
@@ -539,6 +547,55 @@ public class TestDFSUtil {
     assertEquals("ns1", DFSUtil.getSecondaryNameServiceId(conf));
   }
 
+  @Test
+  public void testGetHaNnHttpAddresses() throws IOException {
+    final String LOGICAL_HOST_NAME = "ns1";
+    final String NS1_NN1_ADDR      = "ns1-nn1.example.com:8020";
+    final String NS1_NN2_ADDR      = "ns1-nn2.example.com:8020";
+
+    Configuration conf = createWebHDFSHAConfiguration(LOGICAL_HOST_NAME, NS1_NN1_ADDR, NS1_NN2_ADDR);
+
+    Map<String, Map<String, InetSocketAddress>> map =
+        DFSUtil.getHaNnHttpAddresses(conf);
+
+    assertEquals(NS1_NN1_ADDR, map.get("ns1").get("nn1").toString());
+    assertEquals(NS1_NN2_ADDR, map.get("ns1").get("nn2").toString());
+  }
+
+  @Test
+  public void testResolve() throws IOException, URISyntaxException {
+    final String LOGICAL_HOST_NAME = "ns1";
+    final String NS1_NN1_HOST      = "ns1-nn1.example.com";
+    final String NS1_NN2_HOST      = "ns1-nn2.example.com";
+    final String NS1_NN1_ADDR      = "ns1-nn1.example.com:8020";
+    final String NS1_NN2_ADDR      = "ns1-nn2.example.com:8020";
+    final int DEFAULT_PORT         = NameNode.DEFAULT_PORT;
+
+    Configuration conf = createWebHDFSHAConfiguration(LOGICAL_HOST_NAME, NS1_NN1_ADDR, NS1_NN2_ADDR);
+    URI uri = new URI("webhdfs://ns1");
+    assertTrue(HAUtil.isLogicalUri(conf, uri));
+    InetSocketAddress[] addrs = DFSUtil.resolve(uri, DEFAULT_PORT, conf);
+    assertArrayEquals(new InetSocketAddress[] {
+      new InetSocketAddress(NS1_NN1_HOST, DEFAULT_PORT),
+      new InetSocketAddress(NS1_NN2_HOST, DEFAULT_PORT),
+    }, addrs);
+  }
+
+  private static Configuration createWebHDFSHAConfiguration(String logicalHostName, String nnaddr1, String nnaddr2) {
+    HdfsConfiguration conf = new HdfsConfiguration();
+
+    conf.set(DFS_NAMESERVICES, "ns1");
+    conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX, "ns1"),"nn1,nn2");
+    conf.set(DFSUtil.addKeySuffixes(
+        DFS_NAMENODE_HTTP_ADDRESS_KEY, "ns1", "nn1"), nnaddr1);
+    conf.set(DFSUtil.addKeySuffixes(
+        DFS_NAMENODE_HTTP_ADDRESS_KEY, "ns1", "nn2"), nnaddr2);
+
+    conf.set(DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + "." + logicalHostName,
+        ConfiguredFailoverProxyProvider.class.getName());
+    return conf;
+  }
+
   @Test
   public void testSubstituteForWildcardAddress() throws IOException {
     assertEquals("foo:12345",

+ 39 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java

@@ -459,4 +459,43 @@ public class TestDatanodeBlockScanner {
     assertArrayEquals(expectedSubDirs, ReplicaInfo.parseSubDirs(testFile).subDirs);
     assertEquals(BASE_PATH, ReplicaInfo.parseSubDirs(testFile).baseDirPath);
   }
+
+  @Test
+  public void testDuplicateScans() throws Exception {
+    long startTime = Time.now();
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(new Configuration())
+        .numDataNodes(1).build();
+    FileSystem fs = null;
+    try {
+      fs = cluster.getFileSystem();
+      DataNode dataNode = cluster.getDataNodes().get(0);
+      int infoPort = dataNode.getInfoPort();
+      long scanTimeBefore = 0, scanTimeAfter = 0;
+      for (int i = 1; i < 10; i++) {
+        Path fileName = new Path("/test" + i);
+        DFSTestUtil.createFile(fs, fileName, 1024, (short) 1, 1000L);
+        waitForVerification(infoPort, fs, fileName, i, startTime, TIMEOUT);
+        if (i > 1) {
+          scanTimeAfter = DataNodeTestUtils.getLatestScanTime(dataNode,
+              DFSTestUtil.getFirstBlock(fs, new Path("/test" + (i - 1))));
+          assertFalse("scan time shoud not be 0", scanTimeAfter == 0);
+          assertEquals("There should not be duplicate scan", scanTimeBefore,
+              scanTimeAfter);
+        }
+
+        scanTimeBefore = DataNodeTestUtils.getLatestScanTime(dataNode,
+            DFSTestUtil.getFirstBlock(fs, new Path("/test" + i)));
+      }
+      cluster.restartDataNode(0);
+      Thread.sleep(10000);
+      dataNode = cluster.getDataNodes().get(0);
+      scanTimeAfter = DataNodeTestUtils.getLatestScanTime(dataNode,
+          DFSTestUtil.getFirstBlock(fs, new Path("/test" + (9))));
+      assertEquals("There should not be duplicate scan", scanTimeBefore,
+          scanTimeAfter);
+    } finally {
+      IOUtils.closeStream(fs);
+      cluster.shutdown();
+    }
+  }
 }

+ 13 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java

@@ -115,11 +115,22 @@ public class DataNodeTestUtils {
   }
   
   public static void runBlockScannerForBlock(DataNode dn, ExtendedBlock b) {
+    BlockPoolSliceScanner bpScanner = getBlockPoolScanner(dn, b);
+    bpScanner.verifyBlock(b);
+  }
+
+  private static BlockPoolSliceScanner getBlockPoolScanner(DataNode dn,
+      ExtendedBlock b) {
     DataBlockScanner scanner = dn.getBlockScanner();
     BlockPoolSliceScanner bpScanner = scanner.getBPScanner(b.getBlockPoolId());
-    bpScanner.verifyBlock(b);
+    return bpScanner;
   }
-  
+
+  public static long getLatestScanTime(DataNode dn, ExtendedBlock b) {
+    BlockPoolSliceScanner scanner = getBlockPoolScanner(dn, b);
+    return scanner.getLastScanTime(b.getLocalBlock());
+  }
+
   public static void shutdownBlockScanner(DataNode dn) {
     if (dn.blockScanner != null) {
       dn.blockScanner.shutdown();

+ 77 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSForHA.java

@@ -0,0 +1,77 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.web;
+
+import java.net.URI;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.MiniDFSNNTopology;
+import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
+import org.junit.Assert;
+import org.junit.Test;
+
+/** Test whether WebHDFS can connect to an HA cluster */
+public class TestWebHDFSForHA {
+
+  private static final String LOGICAL_NAME = "minidfs";
+
+  @Test
+  public void test() throws Exception {
+    Configuration conf = new Configuration();
+    conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
+
+    MiniDFSNNTopology topo = new MiniDFSNNTopology()
+        .addNameservice(new MiniDFSNNTopology.NSConf(LOGICAL_NAME).addNN(
+            new MiniDFSNNTopology.NNConf("nn1")).addNN(
+            new MiniDFSNNTopology.NNConf("nn2")));
+
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).nnTopology(topo)
+        .numDataNodes(3).build();
+
+    HATestUtil.setFailoverConfigurations(cluster, conf, LOGICAL_NAME);
+
+    FileSystem fs = null;
+    try {
+      cluster.waitActive();
+
+      final String uri = WebHdfsFileSystem.SCHEME + "://" + LOGICAL_NAME;
+      fs = (WebHdfsFileSystem) FileSystem.get(new URI(uri), conf);
+      cluster.transitionToActive(0);
+
+      final Path dir = new Path("/test");
+      Assert.assertTrue(fs.mkdirs(dir));
+
+      cluster.shutdownNameNode(0);
+      cluster.transitionToActive(1);
+
+      final Path dir2 = new Path("/test2");
+      Assert.assertTrue(fs.mkdirs(dir2));
+
+    } finally {
+      if (fs != null) {
+        fs.close();
+      }
+      cluster.shutdown();
+    }
+  }
+}

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/WebHdfsTestUtil.java

@@ -81,7 +81,7 @@ public class WebHdfsTestUtil {
   
   public static HttpURLConnection twoStepWrite(final WebHdfsFileSystem webhdfs,
       final HttpOpParam.Op op, HttpURLConnection conn) throws IOException {
-    return webhdfs.new Runner(op, conn).twoStepWrite();
+    return webhdfs.new ConnRunner(op, conn).twoStepWrite();
   }
 
   public static FSDataOutputStream write(final WebHdfsFileSystem webhdfs,

+ 16 - 1
hadoop-mapreduce-project/CHANGES.txt

@@ -164,6 +164,9 @@ Release 2.3.0 - UNRELEASED
 
     MAPREDUCE-5484. YarnChild unnecessarily loads job conf twice (Sandy Ryza)
 
+    MAPREDUCE-5487. In task processes, JobConf is unnecessarily loaded again
+    in Limits (Sandy Ryza)
+
   BUG FIXES
 
     MAPREDUCE-5316. job -list-attempt-ids command does not handle illegal
@@ -175,7 +178,19 @@ Release 2.3.0 - UNRELEASED
     MAPREDUCE-5404. HSAdminServer does not use ephemeral ports in minicluster
     mode (Ted Yu via jlowe)
 
-Release 2.1.1-beta - UNRELEASED
+Release 2.2.0 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
+Release 2.1.1-beta - 2013-09-23
 
   INCOMPATIBLE CHANGES
 

+ 3 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/YarnChild.java

@@ -41,6 +41,7 @@ import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.mapreduce.MRConfig;
 import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.TaskType;
+import org.apache.hadoop.mapreduce.counters.Limits;
 import org.apache.hadoop.mapreduce.filecache.DistributedCache;
 import org.apache.hadoop.mapreduce.security.TokenCache;
 import org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier;
@@ -76,6 +77,8 @@ class YarnChild {
     LOG.debug("Child starting");
 
     final JobConf job = new JobConf();
+    // Initing with our JobConf allows us to avoid loading confs twice
+    Limits.init(job);
     job.addResource(MRJobConfig.JOB_CONF_FILE);
     UserGroupInformation.setConfiguration(job);
 

+ 2 - 2
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Counters.java

@@ -62,8 +62,8 @@ import com.google.common.collect.Iterators;
 public class Counters
     extends AbstractCounters<Counters.Counter, Counters.Group> {
   
-  public static int MAX_COUNTER_LIMIT = Limits.COUNTERS_MAX;
-  public static int MAX_GROUP_LIMIT = Limits.GROUPS_MAX;
+  public static int MAX_COUNTER_LIMIT = Limits.getCountersMax();
+  public static int MAX_GROUP_LIMIT = Limits.getGroupsMax();
   private static HashMap<String, String> depricatedCounterMap =
       new HashMap<String, String>();
   

+ 58 - 14
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/Limits.java

@@ -28,37 +28,80 @@ import static org.apache.hadoop.mapreduce.MRJobConfig.*;
 public class Limits {
 
   static final Configuration conf = new JobConf();
-  public static final int GROUP_NAME_MAX =
-      conf.getInt(COUNTER_GROUP_NAME_MAX_KEY, COUNTER_GROUP_NAME_MAX_DEFAULT);
-  public static final int COUNTER_NAME_MAX =
-      conf.getInt(COUNTER_NAME_MAX_KEY, COUNTER_NAME_MAX_DEFAULT);
-  public static final int GROUPS_MAX =
-      conf.getInt(COUNTER_GROUPS_MAX_KEY, COUNTER_GROUPS_MAX_DEFAULT);
-  public static final int COUNTERS_MAX =
-      conf.getInt(COUNTERS_MAX_KEY, COUNTERS_MAX_DEFAULT);
 
   private int totalCounters;
   private LimitExceededException firstViolation;
 
+  private static boolean isInited;
+  
+  private static int GROUP_NAME_MAX;
+  private static int COUNTER_NAME_MAX;
+  private static int GROUPS_MAX;
+  private static int COUNTERS_MAX;
+  
+  public synchronized static void init(Configuration conf) {
+    if (!isInited) {
+      if (conf == null) {
+        conf = new JobConf();
+      }
+      GROUP_NAME_MAX = conf.getInt(COUNTER_GROUP_NAME_MAX_KEY,
+          COUNTER_GROUP_NAME_MAX_DEFAULT);
+      COUNTER_NAME_MAX = conf.getInt(COUNTER_NAME_MAX_KEY,
+          COUNTER_NAME_MAX_DEFAULT);
+      GROUPS_MAX = conf.getInt(COUNTER_GROUPS_MAX_KEY, COUNTER_GROUPS_MAX_DEFAULT);
+      COUNTERS_MAX = conf.getInt(COUNTERS_MAX_KEY, COUNTERS_MAX_DEFAULT);
+    }
+    isInited = true;
+  }
+  
+  public static int getGroupNameMax() {
+    if (!isInited) {
+      init(null);
+    }
+    return GROUP_NAME_MAX;
+  }
+  
+  public static int getCounterNameMax() {
+    if (!isInited) {
+      init(null);
+    }
+    return COUNTER_NAME_MAX;
+  }
+  
+  public static int getGroupsMax() {
+    if (!isInited) {
+      init(null);
+    }
+    return GROUPS_MAX;
+  }
+  
+  public static int getCountersMax() {
+    if (!isInited) {
+      init(null);
+    }
+    return COUNTERS_MAX;
+  }
+  
   public static String filterName(String name, int maxLen) {
     return name.length() > maxLen ? name.substring(0, maxLen - 1) : name;
   }
 
   public static String filterCounterName(String name) {
-    return filterName(name, COUNTER_NAME_MAX);
+    return filterName(name, getCounterNameMax());
   }
 
   public static String filterGroupName(String name) {
-    return filterName(name, GROUP_NAME_MAX);
+    return filterName(name, getGroupNameMax());
   }
 
   public synchronized void checkCounters(int size) {
     if (firstViolation != null) {
       throw new LimitExceededException(firstViolation);
     }
-    if (size > COUNTERS_MAX) {
+    int countersMax = getCountersMax();
+    if (size > countersMax) {
       firstViolation = new LimitExceededException("Too many counters: "+ size +
-                                                  " max="+ COUNTERS_MAX);
+                                                  " max="+ countersMax);
       throw firstViolation;
     }
   }
@@ -72,9 +115,10 @@ public class Limits {
     if (firstViolation != null) {
       throw new LimitExceededException(firstViolation);
     }
-    if (size > GROUPS_MAX) {
+    int groupsMax = getGroupsMax();
+    if (size > groupsMax) {
       firstViolation = new LimitExceededException("Too many counter groups: "+
-                                                  size +" max="+ GROUPS_MAX);
+                                                  size +" max="+ groupsMax);
     }
   }
 

+ 4 - 4
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestCounters.java

@@ -101,8 +101,8 @@ public class TestCounters {
   static final long FS_COUNTER_VALUE = 10;
 
   private void testMaxCounters(final Counters counters) {
-    LOG.info("counters max="+ Limits.COUNTERS_MAX);
-    for (int i = 0; i < Limits.COUNTERS_MAX; ++i) {
+    LOG.info("counters max="+ Limits.getCountersMax());
+    for (int i = 0; i < Limits.getCountersMax(); ++i) {
       counters.findCounter("test", "test"+ i);
     }
     setExpected(counters);
@@ -115,8 +115,8 @@ public class TestCounters {
   }
 
   private void testMaxGroups(final Counters counters) {
-    LOG.info("counter groups max="+ Limits.GROUPS_MAX);
-    for (int i = 0; i < Limits.GROUPS_MAX; ++i) {
+    LOG.info("counter groups max="+ Limits.getGroupsMax());
+    for (int i = 0; i < Limits.getGroupsMax(); ++i) {
       // assuming COUNTERS_MAX > GROUPS_MAX
       counters.findCounter("test"+ i, "test");
     }

+ 13 - 1
hadoop-yarn-project/CHANGES.txt

@@ -38,7 +38,19 @@ Release 2.3.0 - UNRELEASED
     YARN-1060. Two tests in TestFairScheduler are missing @Test annotation
     (Niranjan Singh via Sandy Ryza)
 
-Release 2.1.1-beta - UNRELEASED
+Release 2.2.0 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
+Release 2.1.1-beta - 2013-09-23
 
   INCOMPATIBLE CHANGES