瀏覽代碼

Merge trunk into HA branch.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-1623@1230248 13f79535-47bb-0310-9956-ffa450edef68
Aaron Myers 13 年之前
父節點
當前提交
ede7f21278
共有 100 個文件被更改,包括 3180 次插入946 次删除
  1. 6 3
      hadoop-common-project/hadoop-common/CHANGES.txt
  2. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
  3. 5 0
      hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
  4. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
  5. 4 37
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolTranslatorPB.java
  6. 3 6
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
  7. 2 6
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
  8. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/proto/NamenodeProtocol.proto
  9. 35 1
      hadoop-mapreduce-project/CHANGES.txt
  10. 9 6
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/TaskAttemptListenerImpl.java
  11. 25 11
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
  12. 33 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/JobEndNotifier.java
  13. 40 30
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/TaskHeartbeatHandler.java
  14. 25 8
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java
  15. 10 2
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/Job.java
  16. 1 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/Task.java
  17. 1 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/TaskAttempt.java
  18. 1 2
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskAttemptStatusUpdateEvent.java
  19. 29 76
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java
  20. 15 15
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java
  21. 8 4
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskImpl.java
  22. 229 167
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java
  23. 5 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerRemoteLaunchEvent.java
  24. 1 3
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/recover/RecoveryService.java
  25. 18 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AMWebServices.java
  26. 27 21
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/CountersBlock.java
  27. 12 10
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JAXBContextResolver.java
  28. 46 2
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JobBlock.java
  29. 15 11
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/SingleCounterBlock.java
  30. 95 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/AMAttemptInfo.java
  31. 45 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/AMAttemptsInfo.java
  32. 6 6
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/CounterGroupInfo.java
  33. 4 4
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/CounterInfo.java
  34. 14 18
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/JobCounterInfo.java
  35. 1 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/JobInfo.java
  36. 3 3
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/JobTaskAttemptCounterInfo.java
  37. 3 3
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/JobTaskCounterInfo.java
  38. 4 4
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/TaskCounterGroupInfo.java
  39. 1 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapred/TestTaskAttemptListenerImpl.java
  40. 17 12
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockJobs.java
  41. 0 140
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestContainerLauncher.java
  42. 31 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestJobEndNotifier.java
  43. 7 8
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java
  44. 321 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncher.java
  45. 223 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncherImpl.java
  46. 1 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebServicesAttempts.java
  47. 136 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebServicesJobs.java
  48. 1 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebServicesTasks.java
  49. 2 2
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/TypeConverter.java
  50. 3 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/MRClientProtocol.java
  51. 64 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/MRDelegationTokenIdentifier.java
  52. 25 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/client/MRClientProtocolPBClientImpl.java
  53. 20 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/service/MRClientProtocolPBServiceImpl.java
  54. 29 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/GetDelegationTokenRequest.java
  55. 25 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/GetDelegationTokenResponse.java
  56. 97 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetDelegationTokenRequestPBImpl.java
  57. 109 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetDelegationTokenResponsePBImpl.java
  58. 19 2
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/security/client/ClientHSSecurityInfo.java
  59. 56 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/security/client/ClientHSTokenSelector.java
  60. 22 13
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java
  61. 1 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/proto/MRClientProtocol.proto
  62. 8 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/proto/mr_service_protos.proto
  63. 8 12
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/TestRPCFactories.java
  64. 22 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobClient.java
  65. 16 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
  66. 61 28
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobFinishedEvent.java
  67. 95 47
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/MapAttemptFinishedEvent.java
  68. 98 48
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/ReduceAttemptFinishedEvent.java
  69. 56 28
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/TaskAttemptFinishedEvent.java
  70. 38 18
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/TaskFinishedEvent.java
  71. 8 3
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/TokenCache.java
  72. 1 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/security/TestTokenCache.java
  73. 3 3
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java
  74. 3 3
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedTask.java
  75. 5 5
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedTaskAttempt.java
  76. 49 5
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryClientService.java
  77. 58 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JHSDelegationTokenSecretManager.java
  78. 38 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistoryServer.java
  79. 2 2
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/PartialJob.java
  80. 1 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsWebServices.java
  81. 5 3
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/dao/AMAttemptInfo.java
  82. 3 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/dao/AMAttemptsInfo.java
  83. 1 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsWebServicesAttempts.java
  84. 14 13
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsWebServicesJobs.java
  85. 1 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsWebServicesTasks.java
  86. 9 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientCache.java
  87. 13 54
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java
  88. 8 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/NotRunningJob.java
  89. 17 4
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
  90. 54 6
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java
  91. 15 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java
  92. 61 3
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientServiceDelegate.java
  93. 36 3
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestYarnClientProtocolProvider.java
  94. 1 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
  95. 18 0
      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ClientRMProtocol.java
  96. 36 0
      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetDelegationTokenRequest.java
  97. 37 0
      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetDelegationTokenResponse.java
  98. 95 0
      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetDelegationTokenRequestPBImpl.java
  99. 109 0
      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetDelegationTokenResponsePBImpl.java
  100. 84 0
      hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/DelegationToken.java

+ 6 - 3
hadoop-common-project/hadoop-common/CHANGES.txt

@@ -71,9 +71,6 @@ Trunk (unreleased changes)
     HADOOP-7574. Improve FSShell -stat, add user/group elements.
     (XieXianshan via harsh)
 
-    HADOOP-7348. Change 'addnl' in getmerge util to be a flag '-nl' instead.
-    (XieXianshan via harsh)
-
     HADOOP-7919. Remove the unused hadoop.logfile.* properties from the 
     core-default.xml file. (harsh)
 
@@ -203,6 +200,9 @@ Release 0.23.1 - Unreleased
 
     HADOOP-7934. Normalize dependencies versions across all modules. (tucu)
 
+    HADOOP-7348. Change 'addnl' in getmerge util to be a flag '-nl' instead.
+    (XieXianshan via harsh)
+
   OPTIMIZATIONS
 
   BUG FIXES
@@ -263,6 +263,9 @@ Release 0.23.1 - Unreleased
 
    HADOOP-7936. There's a Hoop README in the root dir of the tarball. (tucu)
 
+   HADOOP-7963. Fix ViewFS to catch a null canonical service-name and pass
+   tests TestViewFileSystem* (Siddharth Seth via vinodkv)
+
 Release 0.23.0 - 2011-11-01 
 
   INCOMPATIBLE CHANGES

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java

@@ -514,7 +514,7 @@ public class ViewFileSystem extends FileSystem {
     for (int i = 0; i < mountPoints.size(); ++i) {
       String serviceName =
           mountPoints.get(i).target.targetFileSystem.getCanonicalServiceName();
-      if (seenServiceNames.contains(serviceName)) {
+      if (serviceName == null || seenServiceNames.contains(serviceName)) {
         continue;
       }
       seenServiceNames.add(serviceName);

+ 5 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -146,6 +146,9 @@ Trunk (unreleased changes)
 
     HDFS-2497 Fix TestBackupNode failure. (suresh)
 
+    HDFS-2499. RPC client is created incorrectly introduced in HDFS-2459.
+    (suresh)
+
     HDFS-2526. (Client)NamenodeProtocolTranslatorR23 do not need to keep a
     reference to rpcProxyWithoutRetry (atm)
 
@@ -165,6 +168,8 @@ Trunk (unreleased changes)
 
     HDFS-2765. TestNameEditsConfigs is incorrectly swallowing IOE. (atm)
 
+    HDFS-2739. SecondaryNameNode doesn't start up. (jitendra)
+
 Release 0.23.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java

@@ -97,7 +97,7 @@ public class NamenodeProtocolServerSideTranslatorPB implements
   }
 
   @Override
-  public GetTransactionIdResponseProto getTransationId(RpcController unused,
+  public GetTransactionIdResponseProto getTransactionId(RpcController unused,
       GetTransactionIdRequestProto request) throws ServiceException {
     long txid;
     try {

+ 4 - 37
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolTranslatorPB.java

@@ -92,47 +92,14 @@ public class NamenodeProtocolTranslatorPB implements NamenodeProtocol,
 
   final private NamenodeProtocolPB rpcProxy;
 
-  private static NamenodeProtocolPB createNamenode(
-      InetSocketAddress nameNodeAddr, Configuration conf,
-      UserGroupInformation ugi) throws IOException {
+  public NamenodeProtocolTranslatorPB(InetSocketAddress nameNodeAddr,
+      Configuration conf, UserGroupInformation ugi) throws IOException {
     RPC.setProtocolEngine(conf, NamenodeProtocolPB.class,
         ProtobufRpcEngine.class);
-    return RPC.getProxy(NamenodeProtocolPB.class,
+    rpcProxy = RPC.getProxy(NamenodeProtocolPB.class,
         RPC.getProtocolVersion(NamenodeProtocolPB.class), nameNodeAddr, ugi,
         conf, NetUtils.getSocketFactory(conf, NamenodeProtocolPB.class));
   }
-
-  /** Create a {@link NameNode} proxy */
-  static NamenodeProtocolPB createNamenodeWithRetry(
-      NamenodeProtocolPB rpcNamenode) {
-    RetryPolicy createPolicy = RetryPolicies
-        .retryUpToMaximumCountWithFixedSleep(5,
-            HdfsConstants.LEASE_SOFTLIMIT_PERIOD, TimeUnit.MILLISECONDS);
-    Map<Class<? extends Exception>, RetryPolicy> remoteExceptionToPolicyMap = 
-        new HashMap<Class<? extends Exception>, RetryPolicy>();
-    remoteExceptionToPolicyMap.put(AlreadyBeingCreatedException.class,
-        createPolicy);
-
-    Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap = 
-        new HashMap<Class<? extends Exception>, RetryPolicy>();
-    exceptionToPolicyMap.put(RemoteException.class, RetryPolicies
-        .retryByRemoteException(RetryPolicies.TRY_ONCE_THEN_FAIL,
-            remoteExceptionToPolicyMap));
-    RetryPolicy methodPolicy = RetryPolicies.retryByException(
-        RetryPolicies.TRY_ONCE_THEN_FAIL, exceptionToPolicyMap);
-    Map<String, RetryPolicy> methodNameToPolicyMap = 
-        new HashMap<String, RetryPolicy>();
-
-    methodNameToPolicyMap.put("create", methodPolicy);
-
-    return (NamenodeProtocolPB) RetryProxy.create(NamenodeProtocolPB.class,
-        rpcNamenode, methodNameToPolicyMap);
-  }
-
-  public NamenodeProtocolTranslatorPB(InetSocketAddress nameNodeAddr,
-      Configuration conf, UserGroupInformation ugi) throws IOException {
-    rpcProxy = createNamenodeWithRetry(createNamenode(nameNodeAddr, conf, ugi));
-  }
   
   public NamenodeProtocolTranslatorPB(NamenodeProtocolPB rpcProxy) {
     this.rpcProxy = rpcProxy;
@@ -182,7 +149,7 @@ public class NamenodeProtocolTranslatorPB implements NamenodeProtocol,
   @Override
   public long getTransactionID() throws IOException {
     try {
-      return rpcProxy.getTransationId(NULL_CONTROLLER, GET_TRANSACTIONID)
+      return rpcProxy.getTransactionId(NULL_CONTROLLER, GET_TRANSACTIONID)
           .getTxId();
     } catch (ServiceException e) {
       throw ProtobufHelper.getRemoteException(e);

+ 3 - 6
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java

@@ -32,7 +32,6 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalProtocolService;
 import org.apache.hadoop.hdfs.protocolPB.JournalProtocolPB;
 import org.apache.hadoop.hdfs.protocolPB.JournalProtocolServerSideTranslatorPB;
-import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolPB;
 import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolTranslatorPB;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
 import org.apache.hadoop.hdfs.server.common.Storage;
@@ -45,6 +44,7 @@ import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.StandbyException;
 import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.UserGroupInformation;
 
 import com.google.protobuf.BlockingService;
 
@@ -284,11 +284,8 @@ public class BackupNode extends NameNode {
   private NamespaceInfo handshake(Configuration conf) throws IOException {
     // connect to name node
     InetSocketAddress nnAddress = NameNode.getServiceAddress(conf, true);
-    NamenodeProtocolPB proxy = 
-      RPC.waitForProxy(NamenodeProtocolPB.class,
-          RPC.getProtocolVersion(NamenodeProtocolPB.class),
-          nnAddress, conf);
-    this.namenode = new NamenodeProtocolTranslatorPB(proxy);
+    this.namenode = new NamenodeProtocolTranslatorPB(nnAddress, conf,
+        UserGroupInformation.getCurrentUser());
     this.nnRpcAddress = getHostPortString(nnAddress);
     this.nnHttpAddress = getHostPortString(super.getHttpServerAddress(conf));
     // get version and id info from the name-node

+ 2 - 6
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java

@@ -50,7 +50,6 @@ import org.apache.hadoop.hdfs.DFSUtil.ErrorSimulator;
 import org.apache.hadoop.hdfs.HAUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
-import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolPB;
 import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolTranslatorPB;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
@@ -62,7 +61,6 @@ import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
 import org.apache.hadoop.http.HttpServer;
 import org.apache.hadoop.io.MD5Hash;
-import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.metrics2.source.JvmMetrics;
@@ -219,10 +217,8 @@ public class SecondaryNameNode implements Runnable {
     nameNodeAddr = NameNode.getServiceAddress(conf, true);
 
     this.conf = conf;
-    NamenodeProtocolPB proxy = 
-        RPC.waitForProxy(NamenodeProtocolPB.class,
-            RPC.getProtocolVersion(NamenodeProtocolPB.class), nameNodeAddr, conf);
-    this.namenode = new NamenodeProtocolTranslatorPB(proxy);
+    this.namenode = new NamenodeProtocolTranslatorPB(nameNodeAddr, conf,
+        UserGroupInformation.getCurrentUser());
 
     // initialize checkpoint directories
     fsName = getInfoServer();

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/proto/NamenodeProtocol.proto

@@ -185,7 +185,7 @@ service NamenodeProtocolService {
   /**
    * Get the transaction ID of the most recently persisted editlog record
    */
-  rpc getTransationId(GetTransactionIdRequestProto) 
+  rpc getTransactionId(GetTransactionIdRequestProto) 
       returns(GetTransactionIdResponseProto);
 
   /**

+ 35 - 1
hadoop-mapreduce-project/CHANGES.txt

@@ -156,10 +156,28 @@ Release 0.23.1 - Unreleased
     MAPREDUCE-3547. Added a bunch of unit tests for the the RM/NM webservices.
     (Thomas Graves via acmurthy)
 
-    MAPREDUCE-3610. Remove use of the 'dfs.block.size' config for default block size fetching. Use FS#getDefaultBlocksize instead. (Sho Shimauchi via harsh)
+    MAPREDUCE-3610. Remove use of the 'dfs.block.size' config for default block
+    size fetching. Use FS#getDefaultBlocksize instead. (Sho Shimauchi via harsh)
 
     MAPREDUCE-3478. Cannot build against ZooKeeper 3.4.0. (Tom White via mahadev)
 
+    MAPREDUCE-3528. Fixed TaskHeartBeatHandler to use a new configuration
+    for the thread loop interval separate from task-timeout configuration
+    property. (Siddharth Seth via vinodkv)
+
+    MAPREDUCE-3312. Modified MR AM to not send a stop-container request for
+    a container that isn't launched at all. (Robert Joseph Evans via vinodkv)
+
+    MAPREDUCE-3382. Enhanced MR AM to use a proxy to ping the job-end
+    notification URL. (Ravi Prakash via vinodkv)
+
+    MAPREDUCE-3299. Added AMInfo table to the MR AM job pages to list all the
+    job-attempts when AM restarts and recovers. (Jonathan Eagles via vinodkv)
+
+    MAPREDUCE-3251. Network ACLs can prevent some clients to talk to MR AM.
+    Improved the earlier patch to not to JobHistoryServer repeatedly.
+    (Anupam Seth via vinodkv)
+
   OPTIMIZATIONS
 
     MAPREDUCE-3567. Extraneous JobConf objects in AM heap. (Vinod Kumar
@@ -174,6 +192,12 @@ Release 0.23.1 - Unreleased
     MAPREDUCE-3569. TaskAttemptListener holds a global lock for all
     task-updates. (Vinod Kumar Vavilapalli via sseth)
 
+    MAPREDUCE-3511. Removed a multitude of cloned/duplicate counters in the AM
+    thereby reducing the AM heap size and preventing full GCs. (vinodkv)
+
+    MAPREDUCE-3618. Fixed TaskHeartbeatHandler to not hold a global lock for all
+    task-updates. (Siddarth Seth via vinodkv)
+
   BUG FIXES
 
     MAPREDUCE-3221. Reenabled the previously ignored test in TestSubmitJob
@@ -428,6 +452,16 @@ Release 0.23.1 - Unreleased
     MAPREDUCE-3624. Remove unnecessary dependency on JDK's tools.jar. (mahadev
     via acmurthy)
 
+    MAPREDUCE-3616. Thread pool for launching containers in MR AM not
+    expanding as expected. (vinodkv via sseth)
+
+    MAPREDUCE-3639. Fixed TokenCache to work with absent FileSystem canonical
+    service-names. (Siddharth Seth via vinodkv)
+
+    MAPREDUCE-3380. Token infrastructure for running clients which are not kerberos 
+    authenticated. (mahadev)
+
+    MAPREDUCE-3648. TestJobConf failing. (Thomas Graves via mahadev)
 Release 0.23.0 - 2011-11-01 
 
   INCOMPATIBLE CHANGES

+ 9 - 6
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/TaskAttemptListenerImpl.java

@@ -88,7 +88,7 @@ public class TaskAttemptListenerImpl extends CompositeService
 
   @Override
   public void init(Configuration conf) {
-   registerHeartbeatHandler();
+   registerHeartbeatHandler(conf);
    super.init(conf);
   }
 
@@ -98,9 +98,10 @@ public class TaskAttemptListenerImpl extends CompositeService
     super.start();
   }
 
-  protected void registerHeartbeatHandler() {
+  protected void registerHeartbeatHandler(Configuration conf) {
     taskHeartbeatHandler = new TaskHeartbeatHandler(context.getEventHandler(), 
-        context.getClock());
+        context.getClock(), conf.getInt(MRJobConfig.MR_AM_TASK_LISTENER_THREAD_COUNT, 
+            MRJobConfig.DEFAULT_MR_AM_TASK_LISTENER_THREAD_COUNT));
     addService(taskHeartbeatHandler);
   }
 
@@ -325,9 +326,11 @@ public class TaskAttemptListenerImpl extends CompositeService
     taskAttemptStatus.outputSize = taskStatus.getOutputSize();
     // Task sends the updated phase to the TT.
     taskAttemptStatus.phase = TypeConverter.toYarn(taskStatus.getPhase());
-    // Counters are updated by the task.
-    taskAttemptStatus.counters =
-        TypeConverter.toYarn(taskStatus.getCounters());
+    // Counters are updated by the task. Convert counters into new format as
+    // that is the primary storage format inside the AM to avoid multiple
+    // conversions and unnecessary heap usage.
+    taskAttemptStatus.counters = new org.apache.hadoop.mapreduce.Counters(
+      taskStatus.getCounters());
 
     // Map Finish time set by the task (map only)
     if (taskStatus.getIsMap() && taskStatus.getMapFinishTime() != 0) {

+ 25 - 11
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java

@@ -36,10 +36,11 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.mapreduce.Counter;
+import org.apache.hadoop.mapreduce.Counters;
 import org.apache.hadoop.mapreduce.JobCounter;
 import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.TaskType;
-import org.apache.hadoop.mapreduce.v2.api.records.Counter;
 import org.apache.hadoop.mapreduce.v2.api.records.JobId;
 import org.apache.hadoop.mapreduce.v2.api.records.JobState;
 import org.apache.hadoop.mapreduce.v2.app.AppContext;
@@ -63,6 +64,8 @@ public class JobHistoryEventHandler extends AbstractService
   private final AppContext context;
   private final int startCount;
 
+  private int eventCounter;
+
   //TODO Does the FS object need to be different ? 
   private FileSystem stagingDirFS; // log Dir FileSystem
   private FileSystem doneDirFS; // done Dir FileSystem
@@ -210,6 +213,16 @@ public class JobHistoryEventHandler extends AbstractService
       public void run() {
         JobHistoryEvent event = null;
         while (!stopped && !Thread.currentThread().isInterrupted()) {
+
+          // Log the size of the history-event-queue every so often.
+          if (eventCounter % 1000 == 0) {
+            eventCounter = 0;
+            LOG.info("Size of the JobHistory event queue is "
+                + eventQueue.size());
+          } else {
+            eventCounter++;
+          }
+
           try {
             event = eventQueue.take();
           } catch (InterruptedException e) {
@@ -238,7 +251,8 @@ public class JobHistoryEventHandler extends AbstractService
 
   @Override
   public void stop() {
-    LOG.info("Stopping JobHistoryEventHandler");
+    LOG.info("Stopping JobHistoryEventHandler. "
+        + "Size of the outstanding queue size is " + eventQueue.size());
     stopped = true;
     //do not interrupt while event handling is in progress
     synchronized(lock) {
@@ -483,7 +497,7 @@ public class JobHistoryEventHandler extends AbstractService
                 .toString());
       // TODO JOB_FINISHED does not have state. Effectively job history does not
       // have state about the finished job.
-      setSummarySlotSeconds(summary, jobId);
+      setSummarySlotSeconds(summary, jfe.getTotalCounters());
       break;
     case JOB_FAILED:
     case JOB_KILLED:
@@ -492,21 +506,21 @@ public class JobHistoryEventHandler extends AbstractService
       summary.setNumFinishedMaps(context.getJob(jobId).getTotalMaps());
       summary.setNumFinishedReduces(context.getJob(jobId).getTotalReduces());
       summary.setJobFinishTime(juce.getFinishTime());
-      setSummarySlotSeconds(summary, jobId);
+      setSummarySlotSeconds(summary, context.getJob(jobId).getAllCounters());
       break;
     }
   }
 
-  private void setSummarySlotSeconds(JobSummary summary, JobId jobId) {
-    Counter slotMillisMapCounter =
-        context.getJob(jobId).getCounters()
-            .getCounter(JobCounter.SLOTS_MILLIS_MAPS);
+  private void setSummarySlotSeconds(JobSummary summary, Counters allCounters) {
+
+    Counter slotMillisMapCounter = allCounters
+      .findCounter(JobCounter.SLOTS_MILLIS_MAPS);
     if (slotMillisMapCounter != null) {
       summary.setMapSlotSeconds(slotMillisMapCounter.getValue());
     }
-    Counter slotMillisReduceCounter =
-        context.getJob(jobId).getCounters()
-            .getCounter(JobCounter.SLOTS_MILLIS_REDUCES);
+
+    Counter slotMillisReduceCounter = allCounters
+      .findCounter(JobCounter.SLOTS_MILLIS_REDUCES);
     if (slotMillisReduceCounter != null) {
       summary.setMapSlotSeconds(slotMillisReduceCounter.getValue());
     }

+ 33 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/JobEndNotifier.java

@@ -20,9 +20,11 @@ package org.apache.hadoop.mapreduce.v2.app;
 
 import java.io.IOException;
 import java.io.InputStream;
+import java.net.InetSocketAddress;
 import java.net.MalformedURLException;
 import java.net.URL;
 import java.net.URLConnection;
+import java.net.Proxy;
 
 import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.conf.Configuration;
@@ -49,9 +51,11 @@ public class JobEndNotifier implements Configurable {
 
   private Configuration conf;
   protected String userUrl;
+  protected String proxyConf;
   protected int numTries; //Number of tries to attempt notification
   protected int waitInterval; //Time to wait between retrying notification
   protected URL urlToNotify; //URL to notify read from the config
+  protected Proxy proxyToUse = Proxy.NO_PROXY; //Proxy to use for notification
 
   /**
    * Parse the URL that needs to be notified of the end of the job, along
@@ -73,6 +77,34 @@ public class JobEndNotifier implements Configurable {
     waitInterval = (waitInterval < 0) ? 5 : waitInterval;
 
     userUrl = conf.get(MRJobConfig.MR_JOB_END_NOTIFICATION_URL);
+
+    proxyConf = conf.get(MRJobConfig.MR_JOB_END_NOTIFICATION_PROXY);
+
+    //Configure the proxy to use if its set. It should be set like
+    //proxyType@proxyHostname:port
+    if(proxyConf != null && !proxyConf.equals("") &&
+         proxyConf.lastIndexOf(":") != -1) {
+      int typeIndex = proxyConf.indexOf("@");
+      Proxy.Type proxyType = Proxy.Type.HTTP;
+      if(typeIndex != -1 &&
+        proxyConf.substring(0, typeIndex).compareToIgnoreCase("socks") == 0) {
+        proxyType = Proxy.Type.SOCKS;
+      }
+      String hostname = proxyConf.substring(typeIndex + 1,
+        proxyConf.lastIndexOf(":"));
+      String portConf = proxyConf.substring(proxyConf.lastIndexOf(":") + 1);
+      try {
+        int port = Integer.parseInt(portConf);
+        proxyToUse = new Proxy(proxyType,
+          new InetSocketAddress(hostname, port));
+        Log.info("Job end notification using proxy type \"" + proxyType + 
+        "\" hostname \"" + hostname + "\" and port \"" + port + "\"");
+      } catch(NumberFormatException nfe) {
+        Log.warn("Job end notification couldn't parse configured proxy's port "
+          + portConf + ". Not going to use a proxy");
+      }
+    }
+
   }
 
   public Configuration getConf() {
@@ -87,7 +119,7 @@ public class JobEndNotifier implements Configurable {
     boolean success = false;
     try {
       Log.info("Job end notification trying " + urlToNotify);
-      URLConnection conn = urlToNotify.openConnection();
+      URLConnection conn = urlToNotify.openConnection(proxyToUse);
       conn.setConnectTimeout(5*1000);
       conn.setReadTimeout(5*1000);
       conn.setAllowUserInteraction(false);

+ 40 - 30
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/TaskHeartbeatHandler.java

@@ -18,13 +18,15 @@
 
 package org.apache.hadoop.mapreduce.v2.app;
 
-import java.util.HashMap;
 import java.util.Iterator;
 import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptDiagnosticsUpdateEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent;
@@ -40,6 +42,7 @@ import org.apache.hadoop.yarn.service.AbstractService;
  * not hear from it for a long time.
  * 
  */
+@SuppressWarnings({"unchecked", "rawtypes"})
 public class TaskHeartbeatHandler extends AbstractService {
 
   private static final Log LOG = LogFactory.getLog(TaskHeartbeatHandler.class);
@@ -48,24 +51,29 @@ public class TaskHeartbeatHandler extends AbstractService {
   //received from a task.
   private Thread lostTaskCheckerThread;
   private volatile boolean stopped;
-  private int taskTimeOut = 5*60*1000;//5 mins
+  private int taskTimeOut = 5 * 60 * 1000;// 5 mins
+  private int taskTimeOutCheckInterval = 30 * 1000; // 30 seconds.
 
   private final EventHandler eventHandler;
   private final Clock clock;
 
-  private Map<TaskAttemptId, Long> runningAttempts 
-    = new HashMap<TaskAttemptId, Long>();
+  private ConcurrentMap<TaskAttemptId, Long> runningAttempts;
 
-  public TaskHeartbeatHandler(EventHandler eventHandler, Clock clock) {
+  public TaskHeartbeatHandler(EventHandler eventHandler, Clock clock,
+      int numThreads) {
     super("TaskHeartbeatHandler");
     this.eventHandler = eventHandler;
     this.clock = clock;
+    runningAttempts =
+      new ConcurrentHashMap<TaskAttemptId, Long>(16, 0.75f, numThreads);
   }
 
   @Override
   public void init(Configuration conf) {
-   super.init(conf);
-   taskTimeOut = conf.getInt("mapreduce.task.timeout", 5*60*1000);
+    super.init(conf);
+    taskTimeOut = conf.getInt(MRJobConfig.TASK_TIMEOUT, 5 * 60 * 1000);
+    taskTimeOutCheckInterval =
+        conf.getInt(MRJobConfig.TASK_TIMEOUT_CHECK_INTERVAL_MS, 30 * 1000);
   }
 
   @Override
@@ -83,18 +91,16 @@ public class TaskHeartbeatHandler extends AbstractService {
     super.stop();
   }
 
-  public synchronized void receivedPing(TaskAttemptId attemptID) {
-    //only put for the registered attempts
-    if (runningAttempts.containsKey(attemptID)) {
-      runningAttempts.put(attemptID, clock.getTime());
-    }
+  public void receivedPing(TaskAttemptId attemptID) {
+  //only put for the registered attempts
+    runningAttempts.replace(attemptID, clock.getTime());
   }
 
-  public synchronized void register(TaskAttemptId attemptID) {
+  public void register(TaskAttemptId attemptID) {
     runningAttempts.put(attemptID, clock.getTime());
   }
 
-  public synchronized void unregister(TaskAttemptId attemptID) {
+  public void unregister(TaskAttemptId attemptID) {
     runningAttempts.remove(attemptID);
   }
 
@@ -103,36 +109,40 @@ public class TaskHeartbeatHandler extends AbstractService {
     @Override
     public void run() {
       while (!stopped && !Thread.currentThread().isInterrupted()) {
-        synchronized (TaskHeartbeatHandler.this) {
-          Iterator<Map.Entry<TaskAttemptId, Long>> iterator = 
+        Iterator<Map.Entry<TaskAttemptId, Long>> iterator =
             runningAttempts.entrySet().iterator();
 
-          //avoid calculating current time everytime in loop
-          long currentTime = clock.getTime();
+        // avoid calculating current time everytime in loop
+        long currentTime = clock.getTime();
+
+        while (iterator.hasNext()) {
+          Map.Entry<TaskAttemptId, Long> entry = iterator.next();
+          if (currentTime > entry.getValue() + taskTimeOut) {
 
-          while (iterator.hasNext()) {
-            Map.Entry<TaskAttemptId, Long> entry = iterator.next();
-            if (currentTime > entry.getValue() + taskTimeOut) {
-              //task is lost, remove from the list and raise lost event
+            //In case the iterator isn't picking up the latest.
+            // Extra lookup outside of the iterator - but only if the task
+            // is considered to be timed out.
+            Long taskTime = runningAttempts.get(entry.getKey());
+            if (taskTime != null && currentTime > taskTime + taskTimeOut) {
+              // task is lost, remove from the list and raise lost event
               iterator.remove();
-              eventHandler.handle(
-                  new TaskAttemptDiagnosticsUpdateEvent(entry.getKey(),
-                      "AttemptID:" + entry.getKey().toString() + 
-                      " Timed out after " + taskTimeOut/1000 + " secs"));
-              eventHandler.handle(new TaskAttemptEvent(entry
-                  .getKey(), TaskAttemptEventType.TA_TIMED_OUT));
+              eventHandler.handle(new TaskAttemptDiagnosticsUpdateEvent(entry
+                  .getKey(), "AttemptID:" + entry.getKey().toString()
+                  + " Timed out after " + taskTimeOut / 1000 + " secs"));
+              eventHandler.handle(new TaskAttemptEvent(entry.getKey(),
+                  TaskAttemptEventType.TA_TIMED_OUT));
             }
+
           }
         }
         try {
-          Thread.sleep(taskTimeOut);
+          Thread.sleep(taskTimeOutCheckInterval);
         } catch (InterruptedException e) {
           LOG.info("TaskHeartbeatHandler thread interrupted");
           break;
         }
       }
     }
-    
   }
 
 }

+ 25 - 8
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java

@@ -31,11 +31,14 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.mapreduce.MRJobConfig;
+import org.apache.hadoop.mapreduce.TypeConverter;
 import org.apache.hadoop.mapreduce.v2.api.MRClientProtocol;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptRequest;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptResponse;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersRequest;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDelegationTokenRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDelegationTokenResponse;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDiagnosticsRequest;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDiagnosticsResponse;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetJobReportRequest;
@@ -223,7 +226,7 @@ public class MRClientService extends AbstractService
       Job job = verifyAndGetJob(jobId, false);
       GetCountersResponse response =
         recordFactory.newRecordInstance(GetCountersResponse.class);
-      response.setCounters(job.getCounters());
+      response.setCounters(TypeConverter.toYarn(job.getAllCounters()));
       return response;
     }
     
@@ -237,8 +240,7 @@ public class MRClientService extends AbstractService
       response.setJobReport(job.getReport());
       return response;
     }
-    
-    
+
     @Override
     public GetTaskAttemptReportResponse getTaskAttemptReport(
         GetTaskAttemptReportRequest request) throws YarnRemoteException {
@@ -356,6 +358,8 @@ public class MRClientService extends AbstractService
       return response;
     }
 
+    private final Object getTaskReportsLock = new Object();
+
     @Override
     public GetTaskReportsResponse getTaskReports(
         GetTaskReportsRequest request) throws YarnRemoteException {
@@ -366,13 +370,26 @@ public class MRClientService extends AbstractService
         recordFactory.newRecordInstance(GetTaskReportsResponse.class);
       
       Job job = verifyAndGetJob(jobId, false);
-      LOG.info("Getting task report for " + taskType + "   " + jobId);
       Collection<Task> tasks = job.getTasks(taskType).values();
-      LOG.info("Getting task report size " + tasks.size());
-      for (Task task : tasks) {
-        response.addTaskReport(task.getReport());
-	  }
+      LOG.info("Getting task report for " + taskType + "   " + jobId
+          + ". Report-size will be " + tasks.size());
+
+      // Take lock to allow only one call, otherwise heap will blow up because
+      // of counters in the report when there are multiple callers.
+      synchronized (getTaskReportsLock) {
+        for (Task task : tasks) {
+          response.addTaskReport(task.getReport());
+        }
+      }
+
       return response;
     }
+
+    @Override
+    public GetDelegationTokenResponse getDelegationToken(
+        GetDelegationTokenRequest request) throws YarnRemoteException {
+      throw RPCUtil.getRemoteException("MR AM not authorized to issue delegation" +
+      		" token");
+    }
   }
 }

+ 10 - 2
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/Job.java

@@ -22,9 +22,9 @@ import java.util.List;
 import java.util.Map;
 
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapreduce.Counters;
 import org.apache.hadoop.mapreduce.JobACL;
 import org.apache.hadoop.mapreduce.v2.api.records.AMInfo;
-import org.apache.hadoop.mapreduce.v2.api.records.Counters;
 import org.apache.hadoop.mapreduce.v2.api.records.JobId;
 import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
 import org.apache.hadoop.mapreduce.v2.api.records.JobState;
@@ -44,7 +44,15 @@ public interface Job {
   String getName();
   JobState getState();
   JobReport getReport();
-  Counters getCounters();
+
+  /**
+   * Get all the counters of this job. This includes job-counters aggregated
+   * together with the counters of each task. This creates a clone of the
+   * Counters, so use this judiciously.  
+   * @return job-counters and aggregate task-counters
+   */
+  Counters getAllCounters();
+
   Map<TaskId,Task> getTasks();
   Map<TaskId,Task> getTasks(TaskType taskType);
   Task getTask(TaskId taskID);

+ 1 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/Task.java

@@ -20,7 +20,7 @@ package org.apache.hadoop.mapreduce.v2.app.job;
 
 import java.util.Map;
 
-import org.apache.hadoop.mapreduce.v2.api.records.Counters;
+import org.apache.hadoop.mapreduce.Counters;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskReport;

+ 1 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/TaskAttempt.java

@@ -20,7 +20,7 @@ package org.apache.hadoop.mapreduce.v2.app.job;
 
 import java.util.List;
 
-import org.apache.hadoop.mapreduce.v2.api.records.Counters;
+import org.apache.hadoop.mapreduce.Counters;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptReport;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;

+ 1 - 2
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskAttemptStatusUpdateEvent.java

@@ -20,12 +20,11 @@ package org.apache.hadoop.mapreduce.v2.app.job.event;
 
 import java.util.List;
 
-import org.apache.hadoop.mapreduce.v2.api.records.Counters;
+import org.apache.hadoop.mapreduce.Counters;
 import org.apache.hadoop.mapreduce.v2.api.records.Phase;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
 
-
 public class TaskAttemptStatusUpdateEvent extends TaskAttemptEvent {
 
   private TaskAttemptStatus reportedTaskAttemptStatus;

+ 29 - 76
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java

@@ -41,6 +41,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.mapred.JobACLsManager;
 import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapreduce.Counters;
 import org.apache.hadoop.mapreduce.JobACL;
 import org.apache.hadoop.mapreduce.JobContext;
 import org.apache.hadoop.mapreduce.MRJobConfig;
@@ -61,9 +62,6 @@ import org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitMetaInfo;
 import org.apache.hadoop.mapreduce.split.SplitMetaInfoReader;
 import org.apache.hadoop.mapreduce.task.JobContextImpl;
 import org.apache.hadoop.mapreduce.v2.api.records.AMInfo;
-import org.apache.hadoop.mapreduce.v2.api.records.Counter;
-import org.apache.hadoop.mapreduce.v2.api.records.CounterGroup;
-import org.apache.hadoop.mapreduce.v2.api.records.Counters;
 import org.apache.hadoop.mapreduce.v2.api.records.JobId;
 import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
 import org.apache.hadoop.mapreduce.v2.api.records.JobState;
@@ -99,7 +97,6 @@ import org.apache.hadoop.yarn.Clock;
 import org.apache.hadoop.yarn.YarnException;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.event.EventHandler;
-import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.state.InvalidStateTransitonException;
 import org.apache.hadoop.yarn.state.MultipleArcTransition;
 import org.apache.hadoop.yarn.state.SingleArcTransition;
@@ -109,10 +106,13 @@ import org.apache.hadoop.yarn.state.StateMachineFactory;
 /** Implementation of Job interface. Maintains the state machines of Job.
  * The read and write calls use ReadWriteLock for concurrency.
  */
-@SuppressWarnings({ "rawtypes", "deprecation" })
+@SuppressWarnings({ "rawtypes", "deprecation", "unchecked" })
 public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job, 
   EventHandler<JobEvent> {
 
+  private static final TaskAttemptCompletionEvent[]
+    EMPTY_TASK_ATTEMPT_COMPLETION_EVENTS = new TaskAttemptCompletionEvent[0];
+
   private static final Log LOG = LogFactory.getLog(JobImpl.class);
 
   //The maximum fraction of fetch failures allowed for a map
@@ -152,7 +152,7 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
 
   private boolean lazyTasksCopyNeeded = false;
   volatile Map<TaskId, Task> tasks = new LinkedHashMap<TaskId, Task>();
-  private Counters jobCounters = newCounters();
+  private Counters jobCounters = new Counters();
     // FIXME:  
     //
     // Can then replace task-level uber counters (MR-2424) with job-level ones
@@ -475,88 +475,29 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
   }
 
   @Override
-  public Counters getCounters() {
-    Counters counters = newCounters();
+  public Counters getAllCounters() {
+    Counters counters = new Counters();
     readLock.lock();
     try {
-      incrAllCounters(counters, jobCounters);
+      counters.incrAllCounters(jobCounters);
       return incrTaskCounters(counters, tasks.values());
     } finally {
       readLock.unlock();
     }
   }
 
-  private Counters getTypeCounters(Set<TaskId> taskIds) {
-    Counters counters = newCounters();
-    for (TaskId taskId : taskIds) {
-      Task task = tasks.get(taskId);
-      incrAllCounters(counters, task.getCounters());
-    }
-    return counters;
-  }
-
-  private Counters getMapCounters() {
-    readLock.lock();
-    try {
-      return getTypeCounters(mapTasks);
-    } finally {
-      readLock.unlock();
-    }
-  }
-  
-  private Counters getReduceCounters() {
-    readLock.lock();
-    try {
-      return getTypeCounters(reduceTasks);
-    } finally {
-      readLock.unlock();
-    }
-  }
-  
-  public static Counters newCounters() {
-    Counters counters = RecordFactoryProvider.getRecordFactory(null)
-        .newRecordInstance(Counters.class);
-    return counters;
-  }
-
-  public static Counters incrTaskCounters(Counters counters,
-                                          Collection<Task> tasks) {
+  public static Counters incrTaskCounters(
+      Counters counters, Collection<Task> tasks) {
     for (Task task : tasks) {
-      incrAllCounters(counters, task.getCounters());
+      counters.incrAllCounters(task.getCounters());
     }
     return counters;
   }
 
-  public static void incrAllCounters(Counters counters, Counters other) {
-    if (other != null) {
-      for (CounterGroup otherGroup: other.getAllCounterGroups().values()) {
-        CounterGroup group = counters.getCounterGroup(otherGroup.getName());
-        if (group == null) {
-          group = RecordFactoryProvider.getRecordFactory(null)
-              .newRecordInstance(CounterGroup.class);
-          group.setName(otherGroup.getName());
-          counters.setCounterGroup(group.getName(), group);
-        }
-        group.setDisplayName(otherGroup.getDisplayName());
-        for (Counter otherCounter : otherGroup.getAllCounters().values()) {
-          Counter counter = group.getCounter(otherCounter.getName());
-          if (counter == null) {
-            counter = RecordFactoryProvider.getRecordFactory(null)
-                .newRecordInstance(Counter.class);
-            counter.setName(otherCounter.getName());
-            group.setCounter(counter.getName(), counter);
-          }
-          counter.setDisplayName(otherCounter.getDisplayName());
-          counter.setValue(counter.getValue() + otherCounter.getValue());
-        }
-      }
-    }
-  }
-
   @Override
   public TaskAttemptCompletionEvent[] getTaskAttemptCompletionEvents(
       int fromEventId, int maxEvents) {
-    TaskAttemptCompletionEvent[] events = new TaskAttemptCompletionEvent[0];
+    TaskAttemptCompletionEvent[] events = EMPTY_TASK_ATTEMPT_COMPLETION_EVENTS;
     readLock.lock();
     try {
       if (taskAttemptCompletionEvents.size() > fromEventId) {
@@ -1204,13 +1145,24 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
   // area. May need to create a new event type for this if JobFinished should 
   // not be generated for KilledJobs, etc.
   private static JobFinishedEvent createJobFinishedEvent(JobImpl job) {
+
+    Counters mapCounters = new Counters();
+    Counters reduceCounters = new Counters();
+    for (Task t : job.tasks.values()) {
+      Counters counters = t.getCounters();
+      switch (t.getType()) {
+        case MAP:     mapCounters.incrAllCounters(counters);     break;
+        case REDUCE:  reduceCounters.incrAllCounters(counters);  break;
+      }
+    }
+
     JobFinishedEvent jfe = new JobFinishedEvent(
         job.oldJobId, job.finishTime,
         job.succeededMapTaskCount, job.succeededReduceTaskCount,
         job.failedMapTaskCount, job.failedReduceTaskCount,
-        TypeConverter.fromYarn(job.getMapCounters()),
-        TypeConverter.fromYarn(job.getReduceCounters()),
-        TypeConverter.fromYarn(job.getCounters()));
+        mapCounters,
+        reduceCounters,
+        job.getAllCounters());
     return jfe;
   }
 
@@ -1450,7 +1402,8 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
       JobCounterUpdateEvent jce = (JobCounterUpdateEvent) event;
       for (JobCounterUpdateEvent.CounterIncrementalUpdate ci : jce
           .getCounterUpdates()) {
-        job.jobCounters.incrCounter(ci.getCounterKey(), ci.getIncrementValue());
+        job.jobCounters.findCounter(ci.getCounterKey()).increment(
+          ci.getIncrementValue());
       }
     }
   }

+ 15 - 15
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java

@@ -47,6 +47,8 @@ import org.apache.hadoop.mapred.Task;
 import org.apache.hadoop.mapred.TaskAttemptContextImpl;
 import org.apache.hadoop.mapred.WrappedJvmID;
 import org.apache.hadoop.mapred.WrappedProgressSplitsBlock;
+import org.apache.hadoop.mapreduce.Counter;
+import org.apache.hadoop.mapreduce.Counters;
 import org.apache.hadoop.mapreduce.JobCounter;
 import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.OutputCommitter;
@@ -60,8 +62,6 @@ import org.apache.hadoop.mapreduce.jobhistory.TaskAttemptStartedEvent;
 import org.apache.hadoop.mapreduce.jobhistory.TaskAttemptUnsuccessfulCompletionEvent;
 import org.apache.hadoop.mapreduce.security.TokenCache;
 import org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier;
-import org.apache.hadoop.mapreduce.v2.api.records.Counter;
-import org.apache.hadoop.mapreduce.v2.api.records.Counters;
 import org.apache.hadoop.mapreduce.v2.api.records.Phase;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptReport;
@@ -132,6 +132,7 @@ public abstract class TaskAttemptImpl implements
     org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt,
       EventHandler<TaskAttemptEvent> {
 
+  static final Counters EMPTY_COUNTERS = new Counters();
   private static final Log LOG = LogFactory.getLog(TaskAttemptImpl.class);
   private static final long MEMORY_SPLITS_RESOLUTION = 1024; //TODO Make configurable?
   private static final int MAP_MEMORY_MB_DEFAULT = 1024;
@@ -846,7 +847,7 @@ public abstract class TaskAttemptImpl implements
       result.setDiagnosticInfo(StringUtils.join(LINE_SEPARATOR, getDiagnostics()));
       result.setPhase(reportedStatus.phase);
       result.setStateString(reportedStatus.stateString);
-      result.setCounters(getCounters());
+      result.setCounters(TypeConverter.toYarn(getCounters()));
       result.setContainerId(this.getAssignedContainerID());
       result.setNodeManagerHost(trackerName);
       result.setNodeManagerHttpPort(httpPort);
@@ -877,7 +878,7 @@ public abstract class TaskAttemptImpl implements
     try {
       Counters counters = reportedStatus.counters;
       if (counters == null) {
-        counters = recordFactory.newRecordInstance(Counters.class);
+        counters = EMPTY_COUNTERS;
 //        counters.groups = new HashMap<String, CounterGroup>();
       }
       return counters;
@@ -1031,22 +1032,21 @@ public abstract class TaskAttemptImpl implements
             (int) (now - start));
       }
 
-      Counter cpuCounter = counters.getCounter(
-          TaskCounter.CPU_MILLISECONDS);
+      Counter cpuCounter = counters.findCounter(TaskCounter.CPU_MILLISECONDS);
       if (cpuCounter != null && cpuCounter.getValue() <= Integer.MAX_VALUE) {
         splitsBlock.getProgressCPUTime().extend(newProgress,
-            (int) cpuCounter.getValue());
+            (int) cpuCounter.getValue()); // long to int? TODO: FIX. Same below
       }
 
-      Counter virtualBytes = counters.getCounter(
-          TaskCounter.VIRTUAL_MEMORY_BYTES);
+      Counter virtualBytes = counters
+        .findCounter(TaskCounter.VIRTUAL_MEMORY_BYTES);
       if (virtualBytes != null) {
         splitsBlock.getProgressVirtualMemoryKbytes().extend(newProgress,
             (int) (virtualBytes.getValue() / (MEMORY_SPLITS_RESOLUTION)));
       }
 
-      Counter physicalBytes = counters.getCounter(
-          TaskCounter.PHYSICAL_MEMORY_BYTES);
+      Counter physicalBytes = counters
+        .findCounter(TaskCounter.PHYSICAL_MEMORY_BYTES);
       if (physicalBytes != null) {
         splitsBlock.getProgressPhysicalMemoryKbytes().extend(newProgress,
             (int) (physicalBytes.getValue() / (MEMORY_SPLITS_RESOLUTION)));
@@ -1343,7 +1343,7 @@ public abstract class TaskAttemptImpl implements
          this.containerNodeId == null ? -1 : this.containerNodeId.getPort(),
          this.nodeRackName == null ? "UNKNOWN" : this.nodeRackName,
          this.reportedStatus.stateString,
-         TypeConverter.fromYarn(getCounters()),
+         getCounters(),
          getProgressSplitBlock().burst());
          eventHandler.handle(
            new JobHistoryEvent(attemptId.getTaskId().getJobId(), mfe));
@@ -1360,7 +1360,7 @@ public abstract class TaskAttemptImpl implements
          this.containerNodeId == null ? -1 : this.containerNodeId.getPort(),
          this.nodeRackName == null ? "UNKNOWN" : this.nodeRackName,
          this.reportedStatus.stateString,
-         TypeConverter.fromYarn(getCounters()),
+         getCounters(),
          getProgressSplitBlock().burst());
          eventHandler.handle(
            new JobHistoryEvent(attemptId.getTaskId().getJobId(), rfe));
@@ -1498,8 +1498,8 @@ public abstract class TaskAttemptImpl implements
     result.phase = Phase.STARTING;
     result.stateString = "NEW";
     result.taskState = TaskAttemptState.NEW;
-    Counters counters = recordFactory.newRecordInstance(Counters.class);
-//    counters.groups = new HashMap<String, CounterGroup>();
+    Counters counters = EMPTY_COUNTERS;
+    //    counters.groups = new HashMap<String, CounterGroup>();
     result.counters = counters;
   }
 

+ 8 - 4
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskImpl.java

@@ -33,6 +33,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapreduce.Counters;
 import org.apache.hadoop.mapreduce.OutputCommitter;
 import org.apache.hadoop.mapreduce.TypeConverter;
 import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent;
@@ -40,7 +41,6 @@ import org.apache.hadoop.mapreduce.jobhistory.TaskFailedEvent;
 import org.apache.hadoop.mapreduce.jobhistory.TaskFinishedEvent;
 import org.apache.hadoop.mapreduce.jobhistory.TaskStartedEvent;
 import org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier;
-import org.apache.hadoop.mapreduce.v2.api.records.Counters;
 import org.apache.hadoop.mapreduce.v2.api.records.JobId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEventStatus;
@@ -329,7 +329,6 @@ public abstract class TaskImpl implements Task, EventHandler<TaskEvent> {
       report.setFinishTime(getFinishTime());
       report.setTaskState(getState());
       report.setProgress(getProgress());
-      report.setCounters(getCounters());
 
       for (TaskAttempt attempt : attempts.values()) {
         if (TaskAttemptState.RUNNING.equals(attempt.getState())) {
@@ -346,6 +345,11 @@ public abstract class TaskImpl implements Task, EventHandler<TaskEvent> {
           
         }
       }
+
+      // Add a copy of counters as the last step so that their lifetime on heap
+      // is as small as possible.
+      report.setCounters(TypeConverter.toYarn(getCounters()));
+
       return report;
     } finally {
       readLock.unlock();
@@ -361,7 +365,7 @@ public abstract class TaskImpl implements Task, EventHandler<TaskEvent> {
       if (bestAttempt != null) {
         counters = bestAttempt.getCounters();
       } else {
-        counters = recordFactory.newRecordInstance(Counters.class);
+        counters = TaskAttemptImpl.EMPTY_COUNTERS;
 //        counters.groups = new HashMap<CharSequence, CounterGroup>();
       }
       return counters;
@@ -595,7 +599,7 @@ public abstract class TaskImpl implements Task, EventHandler<TaskEvent> {
         task.getFinishTime(task.successfulAttempt),
         TypeConverter.fromYarn(task.taskId.getTaskType()),
         taskState.toString(),
-        TypeConverter.fromYarn(task.getCounters()));
+        task.getCounters());
     return tfe;
   }
   

+ 229 - 167
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java

@@ -26,6 +26,7 @@ import java.util.Set;
 import java.util.Timer;
 import java.util.TimerTask;
 import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.LinkedBlockingQueue;
 import java.util.concurrent.ThreadFactory;
 import java.util.concurrent.ThreadPoolExecutor;
@@ -44,8 +45,6 @@ import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerLaunched
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptDiagnosticsUpdateEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType;
-import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocator;
-import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocatorEvent;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
@@ -75,16 +74,217 @@ public class ContainerLauncherImpl extends AbstractService implements
 
   int nmTimeOut;
 
+  private ConcurrentHashMap<ContainerId, Container> containers = 
+    new ConcurrentHashMap<ContainerId, Container>(); 
   private AppContext context;
-  private ThreadPoolExecutor launcherPool;
-  private static final int INITIAL_POOL_SIZE = 10;
+  protected ThreadPoolExecutor launcherPool;
+  protected static final int INITIAL_POOL_SIZE = 10;
   private int limitOnPoolSize;
   private Thread eventHandlingThread;
-  private BlockingQueue<ContainerLauncherEvent> eventQueue =
+  protected BlockingQueue<ContainerLauncherEvent> eventQueue =
       new LinkedBlockingQueue<ContainerLauncherEvent>();
   final Timer commandTimer = new Timer(true);
   YarnRPC rpc;
 
+  private Container getContainer(ContainerId id) {
+    Container c = containers.get(id);
+    if(c == null) {
+      c = new Container();
+      Container old = containers.putIfAbsent(id, c);
+      if(old != null) {
+        c = old;
+      }
+    }
+    return c;
+  }
+  
+  private void removeContainerIfDone(ContainerId id) {
+    Container c = containers.get(id);
+    if(c != null && c.isCompletelyDone()) {
+      containers.remove(id);
+    }
+  }
+  
+  private static enum ContainerState {
+    PREP, FAILED, RUNNING, DONE, KILLED_BEFORE_LAUNCH
+  }
+
+  private class Container {
+    private ContainerState state;
+    
+    public Container() {
+      this.state = ContainerState.PREP;
+    }
+    
+    public synchronized boolean isCompletelyDone() {
+      return state == ContainerState.DONE || state == ContainerState.FAILED;
+    }
+    
+    @SuppressWarnings("unchecked")
+    public synchronized void launch(ContainerRemoteLaunchEvent event) {
+      TaskAttemptId taskAttemptID = event.getTaskAttemptID();
+      LOG.info("Launching " + taskAttemptID);
+      if(this.state == ContainerState.KILLED_BEFORE_LAUNCH) {
+        state = ContainerState.DONE;
+        sendContainerLaunchFailedMsg(taskAttemptID, 
+            "Container was killed before it was launched");
+        return;
+      }
+      CommandTimerTask timerTask = new CommandTimerTask(Thread
+          .currentThread(), event);
+      
+      final String containerManagerBindAddr = event.getContainerMgrAddress();
+      ContainerId containerID = event.getContainerID();
+      ContainerToken containerToken = event.getContainerToken();
+
+      ContainerManager proxy = null;
+      try {
+        commandTimer.schedule(timerTask, nmTimeOut);
+
+        proxy = getCMProxy(containerID, containerManagerBindAddr,
+            containerToken);
+
+        // Interrupted during getProxy, but that didn't throw exception
+        if (Thread.interrupted()) {
+          // The timer canceled the command in the mean while.
+          String message = "Container launch failed for " + containerID
+              + " : Start-container for " + event.getContainerID()
+              + " got interrupted. Returning.";
+          this.state = ContainerState.FAILED;
+          sendContainerLaunchFailedMsg(taskAttemptID, message);
+          return;
+        }
+        // Construct the actual Container
+        ContainerLaunchContext containerLaunchContext =
+          event.getContainer();
+
+        // Now launch the actual container
+        StartContainerRequest startRequest = Records
+          .newRecord(StartContainerRequest.class);
+        startRequest.setContainerLaunchContext(containerLaunchContext);
+        StartContainerResponse response = proxy.startContainer(startRequest);
+
+        // container started properly. Stop the timer
+        timerTask.cancel();
+        if (Thread.interrupted()) {
+          // The timer canceled the command in the mean while, but
+          // startContainer didn't throw exception
+          String message = "Container launch failed for " + containerID
+              + " : Start-container for " + event.getContainerID()
+              + " got interrupted. Returning.";
+          this.state = ContainerState.FAILED;
+          sendContainerLaunchFailedMsg(taskAttemptID, message);
+          return;
+        }
+
+        ByteBuffer portInfo = response
+          .getServiceResponse(ShuffleHandler.MAPREDUCE_SHUFFLE_SERVICEID);
+        int port = -1;
+        if(portInfo != null) {
+          port = ShuffleHandler.deserializeMetaData(portInfo);
+        }
+        LOG.info("Shuffle port returned by ContainerManager for "
+            + taskAttemptID + " : " + port);
+
+        if(port < 0) {
+          this.state = ContainerState.FAILED;
+          throw new IllegalStateException("Invalid shuffle port number "
+              + port + " returned for " + taskAttemptID);
+        }
+
+        // after launching, send launched event to task attempt to move
+        // it from ASSIGNED to RUNNING state
+        context.getEventHandler().handle(
+            new TaskAttemptContainerLaunchedEvent(taskAttemptID, port));
+        this.state = ContainerState.RUNNING;
+      } catch (Throwable t) {
+        if (Thread.interrupted()) {
+          // The timer canceled the command in the mean while.
+          LOG.info("Start-container for " + event.getContainerID()
+              + " got interrupted.");
+        }
+        String message = "Container launch failed for " + containerID + " : "
+            + StringUtils.stringifyException(t);
+        this.state = ContainerState.FAILED;
+        sendContainerLaunchFailedMsg(taskAttemptID, message);
+      } finally {
+        timerTask.cancel();
+        if (proxy != null) {
+          ContainerLauncherImpl.this.rpc.stopProxy(proxy, getConfig());
+        }
+      }
+    }
+    
+    @SuppressWarnings("unchecked")
+    public synchronized void kill(ContainerLauncherEvent event) {
+      if(this.state == ContainerState.PREP) {
+        this.state = ContainerState.KILLED_BEFORE_LAUNCH;
+      } else {
+        CommandTimerTask timerTask = new CommandTimerTask(Thread
+            .currentThread(), event);
+
+        final String containerManagerBindAddr = event.getContainerMgrAddress();
+        ContainerId containerID = event.getContainerID();
+        ContainerToken containerToken = event.getContainerToken();
+        TaskAttemptId taskAttemptID = event.getTaskAttemptID();
+        LOG.info("KILLING " + taskAttemptID);
+        commandTimer.schedule(timerTask, nmTimeOut);
+
+        ContainerManager proxy = null;
+        try {
+          proxy = getCMProxy(containerID, containerManagerBindAddr,
+              containerToken);
+
+          if (Thread.interrupted()) {
+            // The timer canceled the command in the mean while. No need to
+            // return, send cleaned up event anyways.
+            LOG.info("Stop-container for " + event.getContainerID()
+                + " got interrupted.");
+          } else {
+            // kill the remote container if already launched
+            StopContainerRequest stopRequest = Records
+              .newRecord(StopContainerRequest.class);
+            stopRequest.setContainerId(event.getContainerID());
+            proxy.stopContainer(stopRequest);
+          }
+        } catch (Throwable t) {
+
+          if (Thread.interrupted()) {
+            // The timer canceled the command in the mean while, clear the
+            // interrupt flag
+            LOG.info("Stop-container for " + event.getContainerID()
+                + " got interrupted.");
+          }
+
+          // ignore the cleanup failure
+          String message = "cleanup failed for container "
+            + event.getContainerID() + " : "
+            + StringUtils.stringifyException(t);
+          context.getEventHandler().handle(
+            new TaskAttemptDiagnosticsUpdateEvent(taskAttemptID, message));
+          LOG.warn(message);
+        } finally {
+          timerTask.cancel();
+          if (Thread.interrupted()) {
+            LOG.info("Stop-container for " + event.getContainerID()
+                + " got interrupted.");
+            // ignore the cleanup failure
+            context.getEventHandler().handle(
+              new TaskAttemptDiagnosticsUpdateEvent(taskAttemptID,
+                "cleanup failed for container " + event.getContainerID()));
+          }
+          if (proxy != null) {
+            ContainerLauncherImpl.this.rpc.stopProxy(proxy, getConfig());
+          }
+        }
+        this.state = ContainerState.DONE;
+      }
+      // after killing, send killed event to task attempt
+      context.getEventHandler().handle(
+          new TaskAttemptEvent(event.getTaskAttemptID(),
+              TaskAttemptEventType.TA_CONTAINER_CLEANED));
+    }
+  }
   // To track numNodes.
   Set<String> allNodes = new HashSet<String>();
 
@@ -102,11 +302,16 @@ public class ContainerLauncherImpl extends AbstractService implements
     this.limitOnPoolSize = conf.getInt(
         MRJobConfig.MR_AM_CONTAINERLAUNCHER_THREAD_COUNT_LIMIT,
         MRJobConfig.DEFAULT_MR_AM_CONTAINERLAUNCHER_THREAD_COUNT_LIMIT);
+    LOG.info("Upper limit on the thread pool size is " + this.limitOnPoolSize);
     this.nmTimeOut = conf.getInt(ContainerLauncher.MR_AM_NM_COMMAND_TIMEOUT,
         ContainerLauncher.DEFAULT_NM_COMMAND_TIMEOUT);
-    this.rpc = YarnRPC.create(conf);
+    this.rpc = createYarnRPC(conf);
     super.init(conf);
   }
+  
+  protected YarnRPC createYarnRPC(Configuration conf) {
+    return YarnRPC.create(conf);
+  }
 
   public void start() {
 
@@ -118,7 +323,7 @@ public class ContainerLauncherImpl extends AbstractService implements
         Integer.MAX_VALUE, 1, TimeUnit.HOURS,
         new LinkedBlockingQueue<Runnable>(),
         tf);
-    eventHandlingThread = new Thread(new Runnable() {
+    eventHandlingThread = new Thread() {
       @Override
       public void run() {
         ContainerLauncherEvent event = null;
@@ -141,26 +346,27 @@ public class ContainerLauncherImpl extends AbstractService implements
             int numNodes = allNodes.size();
             int idealPoolSize = Math.min(limitOnPoolSize, numNodes);
 
-            if (poolSize <= idealPoolSize) {
+            if (poolSize < idealPoolSize) {
               // Bump up the pool size to idealPoolSize+INITIAL_POOL_SIZE, the
               // later is just a buffer so we are not always increasing the
               // pool-size
-              int newPoolSize = idealPoolSize + INITIAL_POOL_SIZE;
-              LOG.info("Setting ContainerLauncher pool size to "
-                  + newPoolSize);
+              int newPoolSize = Math.min(limitOnPoolSize, idealPoolSize
+                  + INITIAL_POOL_SIZE);
+              LOG.info("Setting ContainerLauncher pool size to " + newPoolSize
+                  + " as number-of-nodes to talk to is " + numNodes);
               launcherPool.setCorePoolSize(newPoolSize);
             }
           }
 
           // the events from the queue are handled in parallel
           // using a thread pool
-          launcherPool.execute(new EventProcessor(event));
+          launcherPool.execute(createEventProcessor(event));
 
           // TODO: Group launching of multiple containers to a single
           // NodeManager into a single connection
         }
       }
-    });
+    };
     eventHandlingThread.setName("ContainerLauncher Event Handler");
     eventHandlingThread.start();
     super.start();
@@ -172,14 +378,16 @@ public class ContainerLauncherImpl extends AbstractService implements
     super.stop();
   }
 
+  protected EventProcessor createEventProcessor(ContainerLauncherEvent event) {
+    return new EventProcessor(event);
+  }
+
   protected ContainerManager getCMProxy(ContainerId containerID,
       final String containerManagerBindAddr, ContainerToken containerToken)
       throws IOException {
 
     UserGroupInformation user = UserGroupInformation.getCurrentUser();
 
-    this.allNodes.add(containerManagerBindAddr);
-
     if (UserGroupInformation.isSecurityEnabled()) {
       Token<ContainerTokenIdentifier> token = new Token<ContainerTokenIdentifier>(
           containerToken.getIdentifier().array(), containerToken
@@ -244,182 +452,35 @@ public class ContainerLauncherImpl extends AbstractService implements
   /**
    * Setup and start the container on remote nodemanager.
    */
-  private class EventProcessor implements Runnable {
+  class EventProcessor implements Runnable {
     private ContainerLauncherEvent event;
 
     EventProcessor(ContainerLauncherEvent event) {
       this.event = event;
     }
 
-    @SuppressWarnings("unchecked")
     @Override
     public void run() {
       LOG.info("Processing the event " + event.toString());
 
       // Load ContainerManager tokens before creating a connection.
       // TODO: Do it only once per NodeManager.
-      final String containerManagerBindAddr = event.getContainerMgrAddress();
       ContainerId containerID = event.getContainerID();
-      ContainerToken containerToken = event.getContainerToken();
-      TaskAttemptId taskAttemptID = event.getTaskAttemptID();
-
-      ContainerManager proxy = null;
-
-      CommandTimerTask timerTask = new CommandTimerTask(Thread
-          .currentThread(), event);
 
+      Container c = getContainer(containerID);
       switch(event.getType()) {
 
       case CONTAINER_REMOTE_LAUNCH:
         ContainerRemoteLaunchEvent launchEvent
             = (ContainerRemoteLaunchEvent) event;
-
-        try {
-          commandTimer.schedule(timerTask, nmTimeOut);
-
-          proxy = getCMProxy(containerID, containerManagerBindAddr,
-              containerToken);
-
-          // Interruped during getProxy, but that didn't throw exception
-          if (Thread.interrupted()) {
-            // The timer cancelled the command in the mean while.
-            String message = "Container launch failed for " + containerID
-                + " : Start-container for " + event.getContainerID()
-                + " got interrupted. Returning.";
-            sendContainerLaunchFailedMsg(taskAttemptID, message);
-            return;
-          }
-
-          // Construct the actual Container
-          ContainerLaunchContext containerLaunchContext =
-              launchEvent.getContainer();
-
-          // Now launch the actual container
-          StartContainerRequest startRequest = Records
-              .newRecord(StartContainerRequest.class);
-          startRequest.setContainerLaunchContext(containerLaunchContext);
-          StartContainerResponse response = proxy.startContainer(startRequest);
-
-          // container started properly. Stop the timer
-          timerTask.cancel();
-          if (Thread.interrupted()) {
-            // The timer cancelled the command in the mean while, but
-            // startContainer didn't throw exception
-            String message = "Container launch failed for " + containerID
-                + " : Start-container for " + event.getContainerID()
-                + " got interrupted. Returning.";
-            sendContainerLaunchFailedMsg(taskAttemptID, message);
-            return;
-          }
-
-          ByteBuffer portInfo = response
-              .getServiceResponse(ShuffleHandler.MAPREDUCE_SHUFFLE_SERVICEID);
-          int port = -1;
-          if(portInfo != null) {
-            port = ShuffleHandler.deserializeMetaData(portInfo);
-          }
-          LOG.info("Shuffle port returned by ContainerManager for "
-              + taskAttemptID + " : " + port);
-          
-          if(port < 0) {
-            throw new IllegalStateException("Invalid shuffle port number "
-                + port + " returned for " + taskAttemptID);
-          }
-
-          // after launching, send launched event to task attempt to move
-          // it from ASSIGNED to RUNNING state
-          context.getEventHandler().handle(
-              new TaskAttemptContainerLaunchedEvent(taskAttemptID, port));
-        } catch (Throwable t) {
-          if (Thread.interrupted()) {
-            // The timer cancelled the command in the mean while.
-            LOG.info("Start-container for " + event.getContainerID()
-                + " got interrupted.");
-          }
-          String message = "Container launch failed for " + containerID
-              + " : " + StringUtils.stringifyException(t);
-          sendContainerLaunchFailedMsg(taskAttemptID, message);
-        } finally {
-          timerTask.cancel();
-          if (proxy != null) {
-            ContainerLauncherImpl.this.rpc.stopProxy(proxy, getConfig());
-          }
-        }
-
+        c.launch(launchEvent);
         break;
 
       case CONTAINER_REMOTE_CLEANUP:
-        // We will have to remove the launch (meant "cleanup"? FIXME) event if it is still in eventQueue
-        // and not yet processed
-        if (eventQueue.contains(event)) {
-          eventQueue.remove(event); // TODO: Any synchro needed?
-          //deallocate the container
-          context.getEventHandler().handle(
-              new ContainerAllocatorEvent(taskAttemptID,
-                  ContainerAllocator.EventType.CONTAINER_DEALLOCATE));
-        } else {
-
-          try {
-            commandTimer.schedule(timerTask, nmTimeOut);
-
-            proxy = getCMProxy(containerID, containerManagerBindAddr,
-                containerToken);
-
-            if (Thread.interrupted()) {
-              // The timer cancelled the command in the mean while. No need to
-              // return, send cleanedup event anyways.
-              LOG.info("Stop-container for " + event.getContainerID()
-                  + " got interrupted.");
-            } else {
-
-              // TODO:check whether container is launched
-
-              // kill the remote container if already launched
-              StopContainerRequest stopRequest = Records
-                  .newRecord(StopContainerRequest.class);
-              stopRequest.setContainerId(event.getContainerID());
-              proxy.stopContainer(stopRequest);
-            }
-          } catch (Throwable t) {
-
-            if (Thread.interrupted()) {
-              // The timer cancelled the command in the mean while, clear the
-              // interrupt flag
-              LOG.info("Stop-container for " + event.getContainerID()
-                  + " got interrupted.");
-            }
-
-            // ignore the cleanup failure
-            String message = "cleanup failed for container "
-                + event.getContainerID() + " : "
-                + StringUtils.stringifyException(t);
-            context.getEventHandler()
-                .handle(
-                    new TaskAttemptDiagnosticsUpdateEvent(taskAttemptID,
-                        message));
-            LOG.warn(message);
-          } finally {
-            timerTask.cancel();
-            if (Thread.interrupted()) {
-              LOG.info("Stop-container for " + event.getContainerID()
-                  + " got interrupted.");
-              // ignore the cleanup failure
-              context.getEventHandler()
-                  .handle(new TaskAttemptDiagnosticsUpdateEvent(taskAttemptID,
-                    "cleanup failed for container " + event.getContainerID()));
-            }
-            if (proxy != null) {
-              ContainerLauncherImpl.this.rpc.stopProxy(proxy, getConfig());
-            }
-          }
-
-          // after killing, send killed event to taskattempt
-          context.getEventHandler().handle(
-              new TaskAttemptEvent(event.getTaskAttemptID(),
-                  TaskAttemptEventType.TA_CONTAINER_CLEANED));
-        }
+        c.kill(event);
         break;
       }
+      removeContainerIfDone(containerID);
     }
   }
 
@@ -438,6 +499,7 @@ public class ContainerLauncherImpl extends AbstractService implements
   public void handle(ContainerLauncherEvent event) {
     try {
       eventQueue.put(event);
+      this.allNodes.add(event.getContainerMgrAddress());
     } catch (InterruptedException e) {
       throw new YarnException(e);
     }

+ 5 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerRemoteLaunchEvent.java

@@ -46,6 +46,11 @@ public class ContainerRemoteLaunchEvent extends ContainerLauncherEvent {
   public Task getRemoteTask() {
     return this.task;
   }
+  
+  @Override
+  public int hashCode() {
+    return super.hashCode();
+  }
 
   @Override
   public boolean equals(Object obj) {

+ 1 - 3
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/recover/RecoveryService.java

@@ -92,7 +92,6 @@ import org.apache.hadoop.yarn.util.ConverterUtils;
 
 //TODO:
 //task cleanup for all non completed tasks
-
 public class RecoveryService extends CompositeService implements Recovery {
 
   private static final Log LOG = LogFactory.getLog(RecoveryService.class);
@@ -411,8 +410,7 @@ public class RecoveryService extends CompositeService implements Recovery {
       if (cntrs == null) {
         taskAttemptStatus.counters = null;
       } else {
-        taskAttemptStatus.counters = TypeConverter.toYarn(attemptInfo
-            .getCounters());
+        taskAttemptStatus.counters = cntrs;
       }
       actualHandler.handle(new TaskAttemptStatusUpdateEvent(
           taskAttemptStatus.id, taskAttemptStatus));

+ 18 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AMWebServices.java

@@ -33,6 +33,7 @@ import javax.ws.rs.core.Response.Status;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.mapreduce.JobACL;
+import org.apache.hadoop.mapreduce.v2.api.records.AMInfo;
 import org.apache.hadoop.mapreduce.v2.api.records.JobId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
@@ -42,6 +43,8 @@ import org.apache.hadoop.mapreduce.v2.app.job.Job;
 import org.apache.hadoop.mapreduce.v2.app.job.Task;
 import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
 import org.apache.hadoop.mapreduce.v2.app.webapp.dao.AppInfo;
+import org.apache.hadoop.mapreduce.v2.app.webapp.dao.AMAttemptInfo;
+import org.apache.hadoop.mapreduce.v2.app.webapp.dao.AMAttemptsInfo;
 import org.apache.hadoop.mapreduce.v2.app.webapp.dao.ConfInfo;
 import org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobCounterInfo;
 import org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobInfo;
@@ -210,6 +213,21 @@ public class AMWebServices {
     return new JobInfo(job, hasAccess(job, hsr));
   }
 
+  @GET
+  @Path("/jobs/{jobid}/jobattempts")
+  @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
+  public AMAttemptsInfo getJobAttempts(@PathParam("jobid") String jid) {
+
+    Job job = getJobFromJobIdString(jid, appCtx);
+    AMAttemptsInfo amAttempts = new AMAttemptsInfo();
+    for (AMInfo amInfo : job.getAMInfos()) {
+      AMAttemptInfo attempt = new AMAttemptInfo(amInfo, MRApps.toString(
+            job.getID()), job.getUserName());
+      amAttempts.add(attempt);
+    }
+    return amAttempts;
+  }
+
   @GET
   @Path("/jobs/{jobid}/counters")
   @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })

+ 27 - 21
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/CountersBlock.java

@@ -18,25 +18,32 @@
 
 package org.apache.hadoop.mapreduce.v2.app.webapp;
 
-import com.google.inject.Inject;
+import static org.apache.hadoop.mapreduce.v2.app.webapp.AMParams.JOB_ID;
+import static org.apache.hadoop.mapreduce.v2.app.webapp.AMParams.TASK_ID;
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI.C_TABLE;
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI._INFO_WRAP;
+
 import java.util.Map;
 
-import org.apache.hadoop.mapreduce.v2.api.records.Counter;
-import org.apache.hadoop.mapreduce.v2.api.records.CounterGroup;
-import org.apache.hadoop.mapreduce.v2.api.records.Counters;
+import org.apache.hadoop.mapreduce.Counter;
+import org.apache.hadoop.mapreduce.CounterGroup;
+import org.apache.hadoop.mapreduce.Counters;
 import org.apache.hadoop.mapreduce.v2.api.records.JobId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
 import org.apache.hadoop.mapreduce.v2.app.AppContext;
 import org.apache.hadoop.mapreduce.v2.app.job.Job;
 import org.apache.hadoop.mapreduce.v2.app.job.Task;
-import org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl;
 import org.apache.hadoop.mapreduce.v2.util.MRApps;
 import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.*;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TD;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.THEAD;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TR;
 import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
 
-import static org.apache.hadoop.mapreduce.v2.app.webapp.AMWebApp.*;
-import static org.apache.hadoop.yarn.webapp.view.JQueryUI.*;
+import com.google.inject.Inject;
 
 public class CountersBlock extends HtmlBlock {
   Job job;
@@ -62,8 +69,7 @@ public class CountersBlock extends HtmlBlock {
       return;
     }
     
-    if(total == null || total.getAllCounterGroups() == null || 
-        total.getAllCounterGroups().size() <= 0) {
+    if(total == null || total.getGroupNames() == null) {
       String type = $(TASK_ID);
       if(type == null || type.isEmpty()) {
         type = $(JOB_ID, "the job");
@@ -93,9 +99,9 @@ public class CountersBlock extends HtmlBlock {
             th(".group.ui-state-default", "Counter Group").
             th(".ui-state-default", "Counters")._()._().
         tbody();
-    for (CounterGroup g : total.getAllCounterGroups().values()) {
-      CounterGroup mg = map == null ? null : map.getCounterGroup(g.getName());
-      CounterGroup rg = reduce == null ? null : reduce.getCounterGroup(g.getName());
+    for (CounterGroup g : total) {
+      CounterGroup mg = map == null ? null : map.getGroup(g.getName());
+      CounterGroup rg = reduce == null ? null : reduce.getGroup(g.getName());
       ++numGroups;
       // This is mostly for demonstration :) Typically we'd introduced
       // a CounterGroup block to reduce the verbosity. OTOH, this
@@ -116,7 +122,7 @@ public class CountersBlock extends HtmlBlock {
       TBODY<TABLE<TD<TR<TBODY<TABLE<DIV<Hamlet>>>>>>> group = groupHeadRow.
             th(map == null ? "Value" : "Total")._()._().
         tbody();
-      for (Counter counter : g.getAllCounters().values()) {
+      for (Counter counter : g) {
         // Ditto
         TR<TBODY<TABLE<TD<TR<TBODY<TABLE<DIV<Hamlet>>>>>>>> groupRow = group.
           tr();
@@ -130,8 +136,8 @@ public class CountersBlock extends HtmlBlock {
             _();
           }
         if (map != null) {
-          Counter mc = mg == null ? null : mg.getCounter(counter.getName());
-          Counter rc = rg == null ? null : rg.getCounter(counter.getName());
+          Counter mc = mg == null ? null : mg.findCounter(counter.getName());
+          Counter rc = rg == null ? null : rg.findCounter(counter.getName());
           groupRow.
             td(mc == null ? "0" : String.valueOf(mc.getValue())).
             td(rc == null ? "0" : String.valueOf(rc.getValue()));
@@ -173,14 +179,14 @@ public class CountersBlock extends HtmlBlock {
     }
     // Get all types of counters
     Map<TaskId, Task> tasks = job.getTasks();
-    total = job.getCounters();
-    map = JobImpl.newCounters();
-    reduce = JobImpl.newCounters();
+    total = job.getAllCounters();
+    map = new Counters();
+    reduce = new Counters();
     for (Task t : tasks.values()) {
       Counters counters = t.getCounters();
       switch (t.getType()) {
-        case MAP:     JobImpl.incrAllCounters(map, counters);     break;
-        case REDUCE:  JobImpl.incrAllCounters(reduce, counters);  break;
+        case MAP:     map.incrAllCounters(counters);     break;
+        case REDUCE:  reduce.incrAllCounters(counters);  break;
       }
     }
   }

+ 12 - 10
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JAXBContextResolver.java

@@ -30,6 +30,8 @@ import javax.ws.rs.ext.ContextResolver;
 import javax.ws.rs.ext.Provider;
 import javax.xml.bind.JAXBContext;
 
+import org.apache.hadoop.mapreduce.v2.app.webapp.dao.AMAttemptInfo;
+import org.apache.hadoop.mapreduce.v2.app.webapp.dao.AMAttemptsInfo;
 import org.apache.hadoop.mapreduce.v2.app.webapp.dao.AppInfo;
 import org.apache.hadoop.mapreduce.v2.app.webapp.dao.ConfInfo;
 import org.apache.hadoop.mapreduce.v2.app.webapp.dao.ConfEntryInfo;
@@ -54,22 +56,22 @@ public class JAXBContextResolver implements ContextResolver<JAXBContext> {
 
   private JAXBContext context;
   private final Set<Class> types;
-    
+
   // you have to specify all the dao classes here
-  private final Class[] cTypes = {AppInfo.class, CounterInfo.class,
-      JobTaskAttemptCounterInfo.class, JobTaskCounterInfo.class,
-      TaskCounterGroupInfo.class, ConfInfo.class, JobCounterInfo.class,
-      TaskCounterInfo.class, CounterGroupInfo.class, JobInfo.class, 
-      JobsInfo.class, ReduceTaskAttemptInfo.class, TaskAttemptInfo.class,
-      TaskInfo.class, TasksInfo.class, TaskAttemptsInfo.class,
-      ConfEntryInfo.class};
-    
+  private final Class[] cTypes = {AMAttemptInfo.class, AMAttemptsInfo.class,
+    AppInfo.class, CounterInfo.class, JobTaskAttemptCounterInfo.class,
+    JobTaskCounterInfo.class, TaskCounterGroupInfo.class, ConfInfo.class,
+    JobCounterInfo.class, TaskCounterInfo.class, CounterGroupInfo.class,
+    JobInfo.class, JobsInfo.class, ReduceTaskAttemptInfo.class,
+    TaskAttemptInfo.class, TaskInfo.class, TasksInfo.class,
+    TaskAttemptsInfo.class, ConfEntryInfo.class};
+
   public JAXBContextResolver() throws Exception {
     this.types = new HashSet<Class>(Arrays.asList(cTypes));
     this.context = new JSONJAXBContext(JSONConfiguration.natural().
         rootUnwrapping(false).build(), cTypes);
   }
-    
+
   @Override
   public JAXBContext getContext(Class<?> objectType) {
     return (types.contains(objectType)) ? context : null;

+ 46 - 2
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JobBlock.java

@@ -20,6 +20,7 @@ package org.apache.hadoop.mapreduce.v2.app.webapp;
 
 import static org.apache.hadoop.mapreduce.v2.app.webapp.AMParams.JOB_ID;
 import static org.apache.hadoop.yarn.util.StringHelper.join;
+import static org.apache.hadoop.yarn.util.StringHelper.ujoin;
 import static org.apache.hadoop.yarn.webapp.view.JQueryUI._EVEN;
 import static org.apache.hadoop.yarn.webapp.view.JQueryUI._INFO_WRAP;
 import static org.apache.hadoop.yarn.webapp.view.JQueryUI._ODD;
@@ -28,14 +29,22 @@ import static org.apache.hadoop.yarn.webapp.view.JQueryUI._PROGRESSBAR_VALUE;
 import static org.apache.hadoop.yarn.webapp.view.JQueryUI._TH;
 
 import java.util.Date;
+import java.util.List;
 
+import org.apache.hadoop.mapreduce.v2.api.records.AMInfo;
 import org.apache.hadoop.mapreduce.v2.api.records.JobId;
 import org.apache.hadoop.mapreduce.v2.app.AppContext;
 import org.apache.hadoop.mapreduce.v2.app.job.Job;
+import org.apache.hadoop.mapreduce.v2.app.webapp.dao.AMAttemptInfo;
 import org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobInfo;
 import org.apache.hadoop.mapreduce.v2.util.MRApps;
 import org.apache.hadoop.mapreduce.v2.util.MRApps.TaskAttemptStateUI;
 import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.util.BuilderUtils;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
 import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
 import org.apache.hadoop.yarn.webapp.view.InfoBlock;
 
@@ -62,6 +71,11 @@ public class JobBlock extends HtmlBlock {
         p()._("Sorry, ", jid, " not found.")._();
       return;
     }
+
+    List<AMInfo> amInfos = job.getAMInfos();
+    String amString =
+        amInfos.size() == 1 ? "ApplicationMaster" : "ApplicationMasters"; 
+
     JobInfo jinfo = new JobInfo(job, true);
     info("Job Overview").
         _("Job Name:", jinfo.getName()).
@@ -69,10 +83,40 @@ public class JobBlock extends HtmlBlock {
         _("Uberized:", jinfo.isUberized()).
         _("Started:", new Date(jinfo.getStartTime())).
         _("Elapsed:", StringUtils.formatTime(jinfo.getElapsedTime()));
-    html.
+    DIV<Hamlet> div = html.
       _(InfoBlock.class).
-      div(_INFO_WRAP).
+      div(_INFO_WRAP);
+
+    // MRAppMasters Table
+    TABLE<DIV<Hamlet>> table = div.table("#job");
+    table.
+      tr().
+      th(amString).
+      _().
+      tr().
+      th(_TH, "Attempt Number").
+      th(_TH, "Start Time").
+      th(_TH, "Node").
+      th(_TH, "Logs").
+      _();
+    for (AMInfo amInfo : amInfos) {
+      AMAttemptInfo attempt = new AMAttemptInfo(amInfo,
+          jinfo.getId(), jinfo.getUserName());
+
+      table.tr().
+        td(String.valueOf(attempt.getAttemptId())).
+        td(new Date(attempt.getStartTime()).toString()).
+        td().a(".nodelink", url("http://", attempt.getNodeHttpAddress()), 
+            attempt.getNodeHttpAddress())._().
+        td().a(".logslink", url(attempt.getLogsLink()), 
+            "logs")._().
+        _();
+    }
+
+    table._();
+    div._();
 
+    html.div(_INFO_WRAP).        
       // Tasks table
         table("#job").
           tr().

+ 15 - 11
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/SingleCounterBlock.java

@@ -18,13 +18,18 @@
 
 package org.apache.hadoop.mapreduce.v2.app.webapp;
 
-import com.google.inject.Inject;
+import static org.apache.hadoop.mapreduce.v2.app.webapp.AMParams.COUNTER_GROUP;
+import static org.apache.hadoop.mapreduce.v2.app.webapp.AMParams.COUNTER_NAME;
+import static org.apache.hadoop.mapreduce.v2.app.webapp.AMParams.JOB_ID;
+import static org.apache.hadoop.mapreduce.v2.app.webapp.AMParams.TASK_ID;
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI._INFO_WRAP;
+
 import java.util.Map;
 import java.util.TreeMap;
 
-import org.apache.hadoop.mapreduce.v2.api.records.Counter;
-import org.apache.hadoop.mapreduce.v2.api.records.CounterGroup;
-import org.apache.hadoop.mapreduce.v2.api.records.Counters;
+import org.apache.hadoop.mapreduce.Counter;
+import org.apache.hadoop.mapreduce.CounterGroup;
+import org.apache.hadoop.mapreduce.Counters;
 import org.apache.hadoop.mapreduce.v2.api.records.JobId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
@@ -40,8 +45,7 @@ import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY;
 import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TR;
 import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
 
-import static org.apache.hadoop.mapreduce.v2.app.webapp.AMWebApp.*;
-import static org.apache.hadoop.yarn.webapp.view.JQueryUI.*;
+import com.google.inject.Inject;
 
 public class SingleCounterBlock extends HtmlBlock {
   protected TreeMap<String, Long> values = new TreeMap<String, Long>(); 
@@ -122,10 +126,10 @@ public class SingleCounterBlock extends HtmlBlock {
         task.getAttempts().entrySet()) {
         long value = 0;
         Counters counters = entry.getValue().getCounters();
-        CounterGroup group = (counters != null)
-        		? counters.getCounterGroup($(COUNTER_GROUP)) : null;
+        CounterGroup group = (counters != null) ? counters
+          .getGroup($(COUNTER_GROUP)) : null;
         if(group != null)  {
-          Counter c = group.getCounter($(COUNTER_NAME));
+          Counter c = group.findCounter($(COUNTER_NAME));
           if(c != null) {
             value = c.getValue();
           }
@@ -140,9 +144,9 @@ public class SingleCounterBlock extends HtmlBlock {
     for(Map.Entry<TaskId, Task> entry : tasks.entrySet()) {
       long value = 0;
       CounterGroup group = entry.getValue().getCounters()
-      .getCounterGroup($(COUNTER_GROUP));
+        .getGroup($(COUNTER_GROUP));
       if(group != null)  {
-        Counter c = group.getCounter($(COUNTER_NAME));
+        Counter c = group.findCounter($(COUNTER_NAME));
         if(c != null) {
           value = c.getValue();
         }

+ 95 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/AMAttemptInfo.java

@@ -0,0 +1,95 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.mapreduce.v2.app.webapp.dao;
+
+import static org.apache.hadoop.yarn.util.StringHelper.join;
+import static org.apache.hadoop.yarn.util.StringHelper.ujoin;
+
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlRootElement;
+
+import org.apache.hadoop.mapreduce.v2.api.records.AMInfo;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.util.BuilderUtils;
+
+@XmlRootElement(name = "jobAttempt")
+@XmlAccessorType(XmlAccessType.FIELD)
+public class AMAttemptInfo {
+
+  protected String nodeHttpAddress;
+  protected String nodeId;
+  protected int id;
+  protected long startTime;
+  protected String containerId;
+  protected String logsLink;
+
+  public AMAttemptInfo() {
+  }
+
+  public AMAttemptInfo(AMInfo amInfo, String jobId, String user) {
+
+    this.nodeHttpAddress = "";
+    this.nodeId = "";
+    String nmHost = amInfo.getNodeManagerHost();
+    int nmHttpPort = amInfo.getNodeManagerHttpPort();
+    int nmPort = amInfo.getNodeManagerPort();
+    if (nmHost != null) {
+      this.nodeHttpAddress = nmHost + ":" + nmHttpPort;
+      NodeId nodeId = BuilderUtils.newNodeId(nmHost, nmPort);
+      this.nodeId = nodeId.toString();
+    }
+
+    this.id = amInfo.getAppAttemptId().getAttemptId();
+    this.startTime = amInfo.getStartTime();
+    this.containerId = "";
+    this.logsLink = "";
+    ContainerId containerId = amInfo.getContainerId();
+    if (containerId != null) {
+      this.containerId = containerId.toString();
+      this.logsLink = join("http://" + nodeHttpAddress,
+          ujoin("node", "containerlogs", this.containerId));
+    }
+  }
+
+  public String getNodeHttpAddress() {
+    return this.nodeHttpAddress;
+  }
+
+  public String getNodeId() {
+    return this.nodeId;
+  }
+
+  public int getAttemptId() {
+    return this.id;
+  }
+
+  public long getStartTime() {
+    return this.startTime;
+  }
+
+  public String getContainerId() {
+    return this.containerId;
+  }
+
+  public String getLogsLink() {
+    return this.logsLink;
+  }
+
+}

+ 45 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/AMAttemptsInfo.java

@@ -0,0 +1,45 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by joblicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.mapreduce.v2.app.webapp.dao;
+
+import java.util.ArrayList;
+
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlRootElement;
+
+@XmlRootElement(name = "jobAttempts")
+@XmlAccessorType(XmlAccessType.FIELD)
+public class AMAttemptsInfo {
+
+  @XmlElement(name = "jobAttempt")
+  protected ArrayList<AMAttemptInfo> attempt = new ArrayList<AMAttemptInfo>();
+
+  public AMAttemptsInfo() {
+  } // JAXB needs this
+
+  public void add(AMAttemptInfo info) {
+    this.attempt.add(info);
+  }
+
+  public ArrayList<AMAttemptInfo> getAttempts() {
+    return this.attempt;
+  }
+
+}

+ 6 - 6
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/CounterGroupInfo.java

@@ -24,8 +24,8 @@ import javax.xml.bind.annotation.XmlAccessorType;
 import javax.xml.bind.annotation.XmlElement;
 import javax.xml.bind.annotation.XmlRootElement;
 
-import org.apache.hadoop.mapreduce.v2.api.records.Counter;
-import org.apache.hadoop.mapreduce.v2.api.records.CounterGroup;
+import org.apache.hadoop.mapreduce.Counter;
+import org.apache.hadoop.mapreduce.CounterGroup;
 
 @XmlRootElement(name = "counterGroup")
 @XmlAccessorType(XmlAccessType.FIELD)
@@ -38,14 +38,14 @@ public class CounterGroupInfo {
   public CounterGroupInfo() {
   }
 
-  public CounterGroupInfo(String name, CounterGroup g, CounterGroup mg,
+  public CounterGroupInfo(String name, CounterGroup group, CounterGroup mg,
       CounterGroup rg) {
     this.counterGroupName = name;
     this.counter = new ArrayList<CounterInfo>();
 
-    for (Counter c : g.getAllCounters().values()) {
-      Counter mc = mg == null ? null : mg.getCounter(c.getName());
-      Counter rc = rg == null ? null : rg.getCounter(c.getName());
+    for (Counter c : group) {
+      Counter mc = mg == null ? null : mg.findCounter(c.getName());
+      Counter rc = rg == null ? null : rg.findCounter(c.getName());
       CounterInfo cinfo = new CounterInfo(c, mc, rc);
       this.counter.add(cinfo);
     }

+ 4 - 4
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/CounterInfo.java

@@ -21,7 +21,7 @@ import javax.xml.bind.annotation.XmlAccessType;
 import javax.xml.bind.annotation.XmlAccessorType;
 import javax.xml.bind.annotation.XmlRootElement;
 
-import org.apache.hadoop.mapreduce.v2.api.records.Counter;
+import org.apache.hadoop.mapreduce.Counter;
 
 @XmlRootElement
 @XmlAccessorType(XmlAccessType.FIELD)
@@ -35,9 +35,9 @@ public class CounterInfo {
   public CounterInfo() {
   }
 
-  public CounterInfo(Counter counter, Counter mc, Counter rc) {
-    this.name = counter.getName();
-    this.totalCounterValue = counter.getValue();
+  public CounterInfo(Counter c, Counter mc, Counter rc) {
+    this.name = c.getName();
+    this.totalCounterValue = c.getValue();
     this.mapCounterValue = mc == null ? 0 : mc.getValue();
     this.reduceCounterValue = rc == null ? 0 : rc.getValue();
   }

+ 14 - 18
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/JobCounterInfo.java

@@ -25,13 +25,12 @@ import javax.xml.bind.annotation.XmlAccessorType;
 import javax.xml.bind.annotation.XmlRootElement;
 import javax.xml.bind.annotation.XmlTransient;
 
-import org.apache.hadoop.mapreduce.v2.api.records.CounterGroup;
-import org.apache.hadoop.mapreduce.v2.api.records.Counters;
+import org.apache.hadoop.mapreduce.CounterGroup;
+import org.apache.hadoop.mapreduce.Counters;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
 import org.apache.hadoop.mapreduce.v2.app.AppContext;
 import org.apache.hadoop.mapreduce.v2.app.job.Job;
 import org.apache.hadoop.mapreduce.v2.app.job.Task;
-import org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl;
 import org.apache.hadoop.mapreduce.v2.util.MRApps;
 
 @XmlRootElement(name = "jobCounters")
@@ -56,18 +55,15 @@ public class JobCounterInfo {
     counterGroup = new ArrayList<CounterGroupInfo>();
     this.id = MRApps.toString(job.getID());
 
-    int numGroups = 0;
-
     if (total != null) {
-      for (CounterGroup g : total.getAllCounterGroups().values()) {
+      for (CounterGroup g : total) {
         if (g != null) {
-          CounterGroup mg = map == null ? null : map.getCounterGroup(g
-              .getName());
-          CounterGroup rg = reduce == null ? null : reduce.getCounterGroup(g
-              .getName());
-          ++numGroups;
+          CounterGroup mg = map == null ? null : map.getGroup(g.getName());
+          CounterGroup rg = reduce == null ? null : reduce
+            .getGroup(g.getName());
 
-          CounterGroupInfo cginfo = new CounterGroupInfo(g.getName(), g, mg, rg);
+          CounterGroupInfo cginfo = new CounterGroupInfo(g.getName(), g,
+            mg, rg);
           counterGroup.add(cginfo);
         }
       }
@@ -75,23 +71,23 @@ public class JobCounterInfo {
   }
 
   private void getCounters(AppContext ctx, Job job) {
-    total = JobImpl.newCounters();
+    total = new Counters();
     if (job == null) {
       return;
     }
-    map = JobImpl.newCounters();
-    reduce = JobImpl.newCounters();
+    map = new Counters();
+    reduce = new Counters();
     // Get all types of counters
     Map<TaskId, Task> tasks = job.getTasks();
     for (Task t : tasks.values()) {
       Counters counters = t.getCounters();
-      JobImpl.incrAllCounters(total, counters);
+      total.incrAllCounters(counters);
       switch (t.getType()) {
       case MAP:
-        JobImpl.incrAllCounters(map, counters);
+        map.incrAllCounters(counters);
         break;
       case REDUCE:
-        JobImpl.incrAllCounters(reduce, counters);
+        reduce.incrAllCounters(counters);
         break;
       }
     }

+ 1 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/JobInfo.java

@@ -214,7 +214,7 @@ public class JobInfo {
     return this.state.toString();
   }
 
-  public String getUser() {
+  public String getUserName() {
     return this.user;
   }
 

+ 3 - 3
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/JobTaskAttemptCounterInfo.java

@@ -25,8 +25,8 @@ import javax.xml.bind.annotation.XmlAccessorType;
 import javax.xml.bind.annotation.XmlRootElement;
 import javax.xml.bind.annotation.XmlTransient;
 
-import org.apache.hadoop.mapreduce.v2.api.records.CounterGroup;
-import org.apache.hadoop.mapreduce.v2.api.records.Counters;
+import org.apache.hadoop.mapreduce.CounterGroup;
+import org.apache.hadoop.mapreduce.Counters;
 import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
 import org.apache.hadoop.mapreduce.v2.util.MRApps;
 
@@ -49,7 +49,7 @@ public class JobTaskAttemptCounterInfo {
     total = taskattempt.getCounters();
     taskAttemptCounterGroup = new ArrayList<TaskCounterGroupInfo>();
     if (total != null) {
-      for (CounterGroup g : total.getAllCounterGroups().values()) {
+      for (CounterGroup g : total) {
         if (g != null) {
           TaskCounterGroupInfo cginfo = new TaskCounterGroupInfo(g.getName(), g);
           if (cginfo != null) {

+ 3 - 3
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/JobTaskCounterInfo.java

@@ -25,8 +25,8 @@ import javax.xml.bind.annotation.XmlAccessorType;
 import javax.xml.bind.annotation.XmlRootElement;
 import javax.xml.bind.annotation.XmlTransient;
 
-import org.apache.hadoop.mapreduce.v2.api.records.CounterGroup;
-import org.apache.hadoop.mapreduce.v2.api.records.Counters;
+import org.apache.hadoop.mapreduce.CounterGroup;
+import org.apache.hadoop.mapreduce.Counters;
 import org.apache.hadoop.mapreduce.v2.app.job.Task;
 import org.apache.hadoop.mapreduce.v2.util.MRApps;
 
@@ -48,7 +48,7 @@ public class JobTaskCounterInfo {
     this.id = MRApps.toString(task.getID());
     taskCounterGroup = new ArrayList<TaskCounterGroupInfo>();
     if (total != null) {
-      for (CounterGroup g : total.getAllCounterGroups().values()) {
+      for (CounterGroup g : total) {
         if (g != null) {
           TaskCounterGroupInfo cginfo = new TaskCounterGroupInfo(g.getName(), g);
           taskCounterGroup.add(cginfo);

+ 4 - 4
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/TaskCounterGroupInfo.java

@@ -24,8 +24,8 @@ import javax.xml.bind.annotation.XmlAccessType;
 import javax.xml.bind.annotation.XmlAccessorType;
 import javax.xml.bind.annotation.XmlRootElement;
 
-import org.apache.hadoop.mapreduce.v2.api.records.Counter;
-import org.apache.hadoop.mapreduce.v2.api.records.CounterGroup;
+import org.apache.hadoop.mapreduce.Counter;
+import org.apache.hadoop.mapreduce.CounterGroup;
 
 @XmlRootElement
 @XmlAccessorType(XmlAccessType.FIELD)
@@ -37,11 +37,11 @@ public class TaskCounterGroupInfo {
   public TaskCounterGroupInfo() {
   }
 
-  public TaskCounterGroupInfo(String name, CounterGroup g) {
+  public TaskCounterGroupInfo(String name, CounterGroup group) {
     this.counterGroupName = name;
     this.counter = new ArrayList<TaskCounterInfo>();
 
-    for (Counter c : g.getAllCounters().values()) {
+    for (Counter c : group) {
       TaskCounterInfo cinfo = new TaskCounterInfo(c.getName(), c.getValue());
       this.counter.add(cinfo);
     }

+ 1 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapred/TestTaskAttemptListenerImpl.java

@@ -43,7 +43,7 @@ public class TestTaskAttemptListenerImpl {
     }
     
     @Override
-    protected void registerHeartbeatHandler() {
+    protected void registerHeartbeatHandler(Configuration conf) {
       //Empty
     }
 

+ 17 - 12
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockJobs.java

@@ -30,6 +30,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.mapred.JobACLsManager;
 import org.apache.hadoop.mapred.ShuffleHandler;
+import org.apache.hadoop.mapreduce.Counters;
 import org.apache.hadoop.mapreduce.FileSystemCounter;
 import org.apache.hadoop.mapreduce.JobACL;
 import org.apache.hadoop.mapreduce.JobCounter;
@@ -37,7 +38,6 @@ import org.apache.hadoop.mapreduce.MRConfig;
 import org.apache.hadoop.mapreduce.TaskCounter;
 import org.apache.hadoop.mapreduce.TypeConverter;
 import org.apache.hadoop.mapreduce.v2.api.records.AMInfo;
-import org.apache.hadoop.mapreduce.v2.api.records.Counters;
 import org.apache.hadoop.mapreduce.v2.api.records.JobId;
 import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
 import org.apache.hadoop.mapreduce.v2.api.records.JobState;
@@ -144,7 +144,7 @@ public class MockJobs extends MockApps {
     report.setFinishTime(System.currentTimeMillis()
         + (int) (Math.random() * DT) + 1);
     report.setProgress((float) Math.random());
-    report.setCounters(newCounters());
+    report.setCounters(TypeConverter.toYarn(newCounters()));
     report.setTaskState(TASK_STATES.next());
     return report;
   }
@@ -159,13 +159,12 @@ public class MockJobs extends MockApps {
     report.setPhase(PHASES.next());
     report.setTaskAttemptState(TASK_ATTEMPT_STATES.next());
     report.setProgress((float) Math.random());
-    report.setCounters(newCounters());
+    report.setCounters(TypeConverter.toYarn(newCounters()));
     return report;
   }
 
-  @SuppressWarnings("deprecation")
   public static Counters newCounters() {
-    org.apache.hadoop.mapred.Counters hc = new org.apache.hadoop.mapred.Counters();
+    Counters hc = new Counters();
     for (JobCounter c : JobCounter.values()) {
       hc.findCounter(c).setValue((long) (Math.random() * 1000));
     }
@@ -183,7 +182,7 @@ public class MockJobs extends MockApps {
       hc.findCounter(USER_COUNTER_GROUPS.next(), USER_COUNTERS.next())
           .setValue((long) (Math.random() * 100000));
     }
-    return TypeConverter.toYarn(hc);
+    return hc;
   }
 
   public static Map<TaskAttemptId, TaskAttempt> newTaskAttempts(TaskId tid,
@@ -231,7 +230,10 @@ public class MockJobs extends MockApps {
 
       @Override
       public Counters getCounters() {
-        return report.getCounters();
+        if (report != null && report.getCounters() != null) {
+          return new Counters(TypeConverter.fromYarn(report.getCounters()));
+        }
+        return null;
       }
 
       @Override
@@ -327,7 +329,8 @@ public class MockJobs extends MockApps {
 
       @Override
       public Counters getCounters() {
-        return report.getCounters();
+        return new Counters(
+          TypeConverter.fromYarn(report.getCounters()));
       }
 
       @Override
@@ -373,8 +376,9 @@ public class MockJobs extends MockApps {
     };
   }
 
-  public static Counters getCounters(Collection<Task> tasks) {
-    Counters counters = JobImpl.newCounters();
+  public static Counters getCounters(
+      Collection<Task> tasks) {
+    Counters counters = new Counters();
     return JobImpl.incrTaskCounters(counters, tasks);
   }
 
@@ -419,7 +423,8 @@ public class MockJobs extends MockApps {
     final JobReport report = newJobReport(id);
     final Map<TaskId, Task> tasks = newTasks(id, n, m);
     final TaskCount taskCount = getTaskCount(tasks.values());
-    final Counters counters = getCounters(tasks.values());
+    final Counters counters = getCounters(tasks
+      .values());
     final Path configFile = confFile;
 
     Map<JobACL, AccessControlList> tmpJobACLs = new HashMap<JobACL, AccessControlList>();
@@ -457,7 +462,7 @@ public class MockJobs extends MockApps {
       }
 
       @Override
-      public Counters getCounters() {
+      public Counters getAllCounters() {
         return counters;
       }
 

+ 0 - 140
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestContainerLauncher.java

@@ -1,140 +0,0 @@
-/**
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-*/
-
-package org.apache.hadoop.mapreduce.v2.app;
-
-import java.io.IOException;
-import java.util.Map;
-
-import junit.framework.Assert;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.mapreduce.MRJobConfig;
-import org.apache.hadoop.mapreduce.v2.api.records.JobState;
-import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
-import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
-import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
-import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
-import org.apache.hadoop.mapreduce.v2.app.job.Job;
-import org.apache.hadoop.mapreduce.v2.app.job.Task;
-import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
-import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncher;
-import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncherImpl;
-import org.apache.hadoop.yarn.api.ContainerManager;
-import org.apache.hadoop.yarn.api.records.ContainerId;
-import org.apache.hadoop.yarn.api.records.ContainerToken;
-import org.junit.Test;
-
-public class TestContainerLauncher {
-
-  static final Log LOG = LogFactory
-      .getLog(TestContainerLauncher.class);
-
-  @Test
-  public void testSlowNM() throws Exception {
-    test(false);
-  }
-
-  @Test
-  public void testSlowNMWithInterruptsSwallowed() throws Exception {
-    test(true);
-  }
-
-  private void test(boolean swallowInterrupts) throws Exception {
-
-    MRApp app = new MRAppWithSlowNM(swallowInterrupts);
-
-    Configuration conf = new Configuration();
-    int maxAttempts = 1;
-    conf.setInt(MRJobConfig.MAP_MAX_ATTEMPTS, maxAttempts);
-    conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
-
-    // Set low timeout for NM commands
-    conf.setInt(ContainerLauncher.MR_AM_NM_COMMAND_TIMEOUT, 3000);
-
-    Job job = app.submit(conf);
-    app.waitForState(job, JobState.RUNNING);
-
-    Map<TaskId, Task> tasks = job.getTasks();
-    Assert.assertEquals("Num tasks is not correct", 1, tasks.size());
-
-    Task task = tasks.values().iterator().next();
-    app.waitForState(task, TaskState.SCHEDULED);
-
-    Map<TaskAttemptId, TaskAttempt> attempts = tasks.values().iterator()
-        .next().getAttempts();
-    Assert.assertEquals("Num attempts is not correct", maxAttempts, attempts
-        .size());
-
-    TaskAttempt attempt = attempts.values().iterator().next();
-    app.waitForState(attempt, TaskAttemptState.ASSIGNED);
-
-    app.waitForState(job, JobState.FAILED);
-
-    String diagnostics = attempt.getDiagnostics().toString();
-    LOG.info("attempt.getDiagnostics: " + diagnostics);
-    if (swallowInterrupts) {
-      Assert.assertEquals("[Container launch failed for "
-          + "container_0_0000_01_000000 : Start-container for "
-          + "container_0_0000_01_000000 got interrupted. Returning.]",
-          diagnostics);
-    } else {
-      Assert.assertTrue(diagnostics.contains("Container launch failed for "
-          + "container_0_0000_01_000000 : "));
-      Assert.assertTrue(diagnostics
-          .contains(": java.lang.InterruptedException"));
-    }
-
-    app.stop();
-  }
-
-  private static class MRAppWithSlowNM extends MRApp {
-
-    final boolean swallowInterrupts;
-
-    public MRAppWithSlowNM(boolean swallowInterrupts) {
-      super(1, 0, false, "TestContainerLauncher", true);
-      this.swallowInterrupts = swallowInterrupts;
-    }
-
-    @Override
-    protected ContainerLauncher createContainerLauncher(AppContext context) {
-      return new ContainerLauncherImpl(context) {
-        @Override
-        protected ContainerManager getCMProxy(ContainerId containerID,
-            String containerManagerBindAddr, ContainerToken containerToken)
-            throws IOException {
-          try {
-            synchronized (this) {
-              wait(); // Just hang the thread simulating a very slow NM.
-            }
-          } catch (InterruptedException e) {
-            LOG.info(e);
-            if (!MRAppWithSlowNM.this.swallowInterrupts) {
-              throw new IOException(e);
-            }
-            Thread.currentThread().interrupt();
-          }
-          return null;
-        }
-      };
-    };
-  }
-}

+ 31 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestJobEndNotifier.java

@@ -18,6 +18,8 @@
 
 package org.apache.hadoop.mapreduce.v2.app;
 
+import java.net.Proxy;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
@@ -71,6 +73,34 @@ public class TestJobEndNotifier extends JobEndNotifier {
       waitInterval == 5);
   }
 
+  private void testProxyConfiguration(Configuration conf) {
+    conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_PROXY, "somehost");
+    setConf(conf);
+    Assert.assertTrue("Proxy shouldn't be set because port wasn't specified",
+      proxyToUse.type() == Proxy.Type.DIRECT);
+    conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_PROXY, "somehost:someport");
+    setConf(conf);
+    Assert.assertTrue("Proxy shouldn't be set because port wasn't numeric",
+      proxyToUse.type() == Proxy.Type.DIRECT);
+    conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_PROXY, "somehost:1000");
+    setConf(conf);
+    Assert.assertTrue("Proxy should have been set but wasn't ",
+      proxyToUse.toString().equals("HTTP @ somehost:1000"));
+    conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_PROXY, "socks@somehost:1000");
+    setConf(conf);
+    Assert.assertTrue("Proxy should have been socks but wasn't ",
+      proxyToUse.toString().equals("SOCKS @ somehost:1000"));
+    conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_PROXY, "SOCKS@somehost:1000");
+    setConf(conf);
+    Assert.assertTrue("Proxy should have been socks but wasn't ",
+      proxyToUse.toString().equals("SOCKS @ somehost:1000"));
+    conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_PROXY, "sfafn@somehost:1000");
+    setConf(conf);
+    Assert.assertTrue("Proxy should have been http but wasn't ",
+      proxyToUse.toString().equals("HTTP @ somehost:1000"));
+    
+  }
+
   /**
    * Test that setting parameters has the desired effect
    */
@@ -79,6 +109,7 @@ public class TestJobEndNotifier extends JobEndNotifier {
     Configuration conf = new Configuration();
     testNumRetries(conf);
     testWaitInterval(conf);
+    testProxyConfiguration(conf);
   }
 
   protected int notificationCount = 0;

+ 7 - 8
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java

@@ -18,9 +18,6 @@
 
 package org.apache.hadoop.mapreduce.v2.app;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
 import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
@@ -30,11 +27,14 @@ import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.atomic.AtomicLong;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapreduce.Counters;
 import org.apache.hadoop.mapreduce.JobACL;
 import org.apache.hadoop.mapreduce.v2.api.records.AMInfo;
-import org.apache.hadoop.mapreduce.v2.api.records.Counters;
 import org.apache.hadoop.mapreduce.v2.api.records.JobId;
 import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
 import org.apache.hadoop.mapreduce.v2.api.records.JobState;
@@ -46,13 +46,12 @@ import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskReport;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
-import org.apache.hadoop.mapreduce.v2.app.AppContext;
 import org.apache.hadoop.mapreduce.v2.app.job.Job;
 import org.apache.hadoop.mapreduce.v2.app.job.Task;
 import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptStatusUpdateEvent.TaskAttemptStatus;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEventType;
-import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptStatusUpdateEvent.TaskAttemptStatus;
 import org.apache.hadoop.mapreduce.v2.app.speculate.DefaultSpeculator;
 import org.apache.hadoop.mapreduce.v2.app.speculate.ExponentiallySmoothedTaskRuntimeEstimator;
 import org.apache.hadoop.mapreduce.v2.app.speculate.LegacyTaskRuntimeEstimator;
@@ -74,7 +73,7 @@ import org.apache.hadoop.yarn.service.CompositeService;
 import org.junit.Assert;
 import org.junit.Test;
 
-
+@SuppressWarnings({"unchecked", "rawtypes"})
 public class TestRuntimeEstimators {
 
   private static int INITIAL_NUMBER_FREE_SLOTS = 600;
@@ -399,7 +398,7 @@ public class TestRuntimeEstimators {
     }
 
     @Override
-    public Counters getCounters() {
+    public Counters getAllCounters() {
       throw new UnsupportedOperationException("Not supported yet.");
     }
 

+ 321 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncher.java

@@ -0,0 +1,321 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app.launcher;
+
+import static org.mockito.Mockito.mock;
+
+import java.io.IOException;
+import java.util.Map;
+import java.util.concurrent.ThreadPoolExecutor;
+
+import junit.framework.Assert;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapreduce.MRJobConfig;
+import org.apache.hadoop.mapreduce.v2.api.records.JobId;
+import org.apache.hadoop.mapreduce.v2.api.records.JobState;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
+import org.apache.hadoop.mapreduce.v2.app.AppContext;
+import org.apache.hadoop.mapreduce.v2.app.MRApp;
+import org.apache.hadoop.mapreduce.v2.app.job.Job;
+import org.apache.hadoop.mapreduce.v2.app.job.Task;
+import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
+import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils;
+import org.apache.hadoop.yarn.api.ContainerManager;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerToken;
+import org.apache.hadoop.yarn.util.BuilderUtils;
+import org.junit.Test;
+
+public class TestContainerLauncher {
+
+  static final Log LOG = LogFactory
+      .getLog(TestContainerLauncher.class);
+
+  @Test
+  public void testPoolSize() throws InterruptedException {
+
+    ApplicationId appId = BuilderUtils.newApplicationId(12345, 67);
+    ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId(
+      appId, 3);
+    JobId jobId = MRBuilderUtils.newJobId(appId, 8);
+    TaskId taskId = MRBuilderUtils.newTaskId(jobId, 9, TaskType.MAP);
+    TaskAttemptId taskAttemptId = MRBuilderUtils.newTaskAttemptId(taskId, 0);
+    ContainerId containerId = BuilderUtils.newContainerId(appAttemptId, 10);
+
+    AppContext context = mock(AppContext.class);
+    CustomContainerLauncher containerLauncher = new CustomContainerLauncher(
+      context);
+    containerLauncher.init(new Configuration());
+    containerLauncher.start();
+
+    ThreadPoolExecutor threadPool = containerLauncher.getThreadPool();
+
+    // No events yet
+    Assert.assertEquals(0, threadPool.getPoolSize());
+    Assert.assertEquals(ContainerLauncherImpl.INITIAL_POOL_SIZE,
+      threadPool.getCorePoolSize());
+    Assert.assertNull(containerLauncher.foundErrors);
+
+    containerLauncher.expectedCorePoolSize = ContainerLauncherImpl.INITIAL_POOL_SIZE;
+    for (int i = 0; i < 10; i++) {
+      containerLauncher.handle(new ContainerLauncherEvent(taskAttemptId,
+        containerId, "host" + i + ":1234", null,
+        ContainerLauncher.EventType.CONTAINER_REMOTE_LAUNCH));
+    }
+    waitForEvents(containerLauncher, 10);
+    Assert.assertEquals(10, threadPool.getPoolSize());
+    Assert.assertNull(containerLauncher.foundErrors);
+
+    // Same set of hosts, so no change
+    containerLauncher.expectedCorePoolSize = ContainerLauncherImpl.INITIAL_POOL_SIZE;
+    containerLauncher.finishEventHandling = true;
+    for (int i = 0; i < 10; i++) {
+      containerLauncher.handle(new ContainerLauncherEvent(taskAttemptId,
+        containerId, "host" + i + ":1234", null,
+        ContainerLauncher.EventType.CONTAINER_REMOTE_LAUNCH));
+    }
+    waitForEvents(containerLauncher, 20);
+    Assert.assertEquals(10, threadPool.getPoolSize());
+    Assert.assertNull(containerLauncher.foundErrors);
+
+    // Different hosts, there should be an increase in core-thread-pool size to
+    // 21(11hosts+10buffer)
+    // Core pool size should be 21 but the live pool size should be only 11.
+    containerLauncher.expectedCorePoolSize = 12 + ContainerLauncherImpl.INITIAL_POOL_SIZE;
+    for (int i = 1; i <= 2; i++) {
+      containerLauncher.handle(new ContainerLauncherEvent(taskAttemptId,
+        containerId, "host1" + i + ":1234", null,
+        ContainerLauncher.EventType.CONTAINER_REMOTE_LAUNCH));
+    }
+    waitForEvents(containerLauncher, 22);
+    Assert.assertEquals(12, threadPool.getPoolSize());
+    Assert.assertNull(containerLauncher.foundErrors);
+
+    containerLauncher.stop();
+  }
+
+  @Test
+  public void testPoolLimits() throws InterruptedException {
+    ApplicationId appId = BuilderUtils.newApplicationId(12345, 67);
+    ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId(
+      appId, 3);
+    JobId jobId = MRBuilderUtils.newJobId(appId, 8);
+    TaskId taskId = MRBuilderUtils.newTaskId(jobId, 9, TaskType.MAP);
+    TaskAttemptId taskAttemptId = MRBuilderUtils.newTaskAttemptId(taskId, 0);
+    ContainerId containerId = BuilderUtils.newContainerId(appAttemptId, 10);
+
+    AppContext context = mock(AppContext.class);
+    CustomContainerLauncher containerLauncher = new CustomContainerLauncher(
+      context);
+    Configuration conf = new Configuration();
+    conf.setInt(MRJobConfig.MR_AM_CONTAINERLAUNCHER_THREAD_COUNT_LIMIT, 12);
+    containerLauncher.init(conf);
+    containerLauncher.start();
+
+    ThreadPoolExecutor threadPool = containerLauncher.getThreadPool();
+
+    // 10 different hosts
+    containerLauncher.expectedCorePoolSize = ContainerLauncherImpl.INITIAL_POOL_SIZE;
+    for (int i = 0; i < 10; i++) {
+      containerLauncher.handle(new ContainerLauncherEvent(taskAttemptId,
+        containerId, "host" + i + ":1234", null,
+        ContainerLauncher.EventType.CONTAINER_REMOTE_LAUNCH));
+    }
+    waitForEvents(containerLauncher, 10);
+    Assert.assertEquals(10, threadPool.getPoolSize());
+    Assert.assertNull(containerLauncher.foundErrors);
+
+    // 4 more different hosts, but thread pool size should be capped at 12
+    containerLauncher.expectedCorePoolSize = 12 ;
+    for (int i = 1; i <= 4; i++) {
+      containerLauncher.handle(new ContainerLauncherEvent(taskAttemptId,
+        containerId, "host1" + i + ":1234", null,
+        ContainerLauncher.EventType.CONTAINER_REMOTE_LAUNCH));
+    }
+    waitForEvents(containerLauncher, 12);
+    Assert.assertEquals(12, threadPool.getPoolSize());
+    Assert.assertNull(containerLauncher.foundErrors);
+
+    // Make some threads ideal so that remaining events are also done.
+    containerLauncher.finishEventHandling = true;
+    waitForEvents(containerLauncher, 14);
+    Assert.assertEquals(12, threadPool.getPoolSize());
+    Assert.assertNull(containerLauncher.foundErrors);
+
+    containerLauncher.stop();
+  }
+
+  private void waitForEvents(CustomContainerLauncher containerLauncher,
+      int expectedNumEvents) throws InterruptedException {
+    int timeOut = 20;
+    while (expectedNumEvents != containerLauncher.numEventsProcessed
+        || timeOut++ < 20) {
+      LOG.info("Waiting for number of events to become " + expectedNumEvents
+          + ". It is now " + containerLauncher.numEventsProcessed);
+      Thread.sleep(1000);
+    }
+    Assert
+      .assertEquals(expectedNumEvents, containerLauncher.numEventsProcessed);
+  }
+
+  @Test
+  public void testSlowNM() throws Exception {
+    test(false);
+  }
+
+  @Test
+  public void testSlowNMWithInterruptsSwallowed() throws Exception {
+    test(true);
+  }
+
+  private void test(boolean swallowInterrupts) throws Exception {
+
+    MRApp app = new MRAppWithSlowNM(swallowInterrupts);
+
+    Configuration conf = new Configuration();
+    int maxAttempts = 1;
+    conf.setInt(MRJobConfig.MAP_MAX_ATTEMPTS, maxAttempts);
+    conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
+
+    // Set low timeout for NM commands
+    conf.setInt(ContainerLauncher.MR_AM_NM_COMMAND_TIMEOUT, 3000);
+
+    Job job = app.submit(conf);
+    app.waitForState(job, JobState.RUNNING);
+
+    Map<TaskId, Task> tasks = job.getTasks();
+    Assert.assertEquals("Num tasks is not correct", 1, tasks.size());
+
+    Task task = tasks.values().iterator().next();
+    app.waitForState(task, TaskState.SCHEDULED);
+
+    Map<TaskAttemptId, TaskAttempt> attempts = tasks.values().iterator()
+        .next().getAttempts();
+    Assert.assertEquals("Num attempts is not correct", maxAttempts, attempts
+        .size());
+
+    TaskAttempt attempt = attempts.values().iterator().next();
+    app.waitForState(attempt, TaskAttemptState.ASSIGNED);
+
+    app.waitForState(job, JobState.FAILED);
+
+    String diagnostics = attempt.getDiagnostics().toString();
+    LOG.info("attempt.getDiagnostics: " + diagnostics);
+    if (swallowInterrupts) {
+      Assert.assertEquals("[Container launch failed for "
+          + "container_0_0000_01_000000 : Start-container for "
+          + "container_0_0000_01_000000 got interrupted. Returning.]",
+          diagnostics);
+    } else {
+      Assert.assertTrue(diagnostics.contains("Container launch failed for "
+          + "container_0_0000_01_000000 : "));
+      Assert.assertTrue(diagnostics
+          .contains(": java.lang.InterruptedException"));
+    }
+
+    app.stop();
+  }
+
+  private final class CustomContainerLauncher extends ContainerLauncherImpl {
+
+    private volatile int expectedCorePoolSize = 0;
+    private volatile int numEventsProcessed = 0;
+    private volatile String foundErrors = null;
+    private volatile boolean finishEventHandling;
+    private CustomContainerLauncher(AppContext context) {
+      super(context);
+    }
+
+    public ThreadPoolExecutor getThreadPool() {
+      return super.launcherPool;
+    }
+
+    protected ContainerLauncherImpl.EventProcessor createEventProcessor(
+        ContainerLauncherEvent event) {
+      // At this point of time, the EventProcessor is being created and so no
+      // additional threads would have been created.
+
+      // Core-pool-size should have increased by now.
+      if (expectedCorePoolSize != launcherPool.getCorePoolSize()) {
+        foundErrors = "Expected " + expectedCorePoolSize + " but found "
+            + launcherPool.getCorePoolSize();
+      }
+
+      return new ContainerLauncherImpl.EventProcessor(event) {
+        @Override
+        public void run() {
+          // do nothing substantial
+          numEventsProcessed++;
+          // Stall
+          synchronized(this) {
+            try {
+              while(!finishEventHandling) {
+                wait(1000);
+              }
+            } catch (InterruptedException e) {
+              ;
+            }
+          }
+        }
+      };
+    }
+  }
+
+  private static class MRAppWithSlowNM extends MRApp {
+
+    final boolean swallowInterrupts;
+
+    public MRAppWithSlowNM(boolean swallowInterrupts) {
+      super(1, 0, false, "TestContainerLauncher", true);
+      this.swallowInterrupts = swallowInterrupts;
+    }
+
+    @Override
+    protected ContainerLauncher createContainerLauncher(AppContext context) {
+      return new ContainerLauncherImpl(context) {
+        @Override
+        protected ContainerManager getCMProxy(ContainerId containerID,
+            String containerManagerBindAddr, ContainerToken containerToken)
+            throws IOException {
+          try {
+            synchronized (this) {
+              wait(); // Just hang the thread simulating a very slow NM.
+            }
+          } catch (InterruptedException e) {
+            LOG.info(e);
+            if (!MRAppWithSlowNM.this.swallowInterrupts) {
+              throw new IOException(e);
+            }
+            Thread.currentThread().interrupt();
+          }
+          return null;
+        }
+      };
+    };
+  }
+}

+ 223 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncherImpl.java

@@ -0,0 +1,223 @@
+package org.apache.hadoop.mapreduce.v2.app.launcher;
+
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.eq;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+import java.net.InetSocketAddress;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapred.ShuffleHandler;
+import org.apache.hadoop.mapreduce.v2.api.records.JobId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
+import org.apache.hadoop.mapreduce.v2.app.AppContext;
+import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncher.EventType;
+import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils;
+import org.apache.hadoop.yarn.api.ContainerManager;
+import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.StartContainerResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.StopContainerRequest;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.ipc.YarnRPC;
+import org.apache.hadoop.yarn.util.BuilderUtils;
+import org.junit.Test;
+
+public class TestContainerLauncherImpl {
+  static final Log LOG = LogFactory.getLog(TestContainerLauncherImpl.class);
+  private static final RecordFactory recordFactory =
+    RecordFactoryProvider.getRecordFactory(null);
+
+  
+  private static class ContainerLauncherImplUnderTest extends 
+    ContainerLauncherImpl {
+
+    private YarnRPC rpc;
+    
+    public ContainerLauncherImplUnderTest(AppContext context, YarnRPC rpc) {
+      super(context);
+      this.rpc = rpc;
+    }
+    
+    @Override
+    protected YarnRPC createYarnRPC(Configuration conf) {
+      return rpc;
+    }
+    
+    public void waitForPoolToIdle() throws InterruptedException {
+      //I wish that we did not need the sleep, but it is here so that we are sure
+      // That the other thread had time to insert the event into the queue and
+      // start processing it.  For some reason we were getting interrupted
+      // exceptions within eventQueue without this sleep.
+      Thread.sleep(100l);
+      LOG.debug("POOL SIZE 1: "+this.eventQueue.size()+
+          " POOL SIZE 2: "+this.launcherPool.getQueue().size()+
+          " ACTIVE COUNT: "+ this.launcherPool.getActiveCount());
+      while(!this.eventQueue.isEmpty() || 
+          !this.launcherPool.getQueue().isEmpty() || 
+          this.launcherPool.getActiveCount() > 0) {
+        Thread.sleep(100l);
+        LOG.debug("POOL SIZE 1: "+this.eventQueue.size()+
+            " POOL SIZE 2: "+this.launcherPool.getQueue().size()+
+            " ACTIVE COUNT: "+ this.launcherPool.getActiveCount());
+      }
+      LOG.debug("POOL SIZE 1: "+this.eventQueue.size()+
+          " POOL SIZE 2: "+this.launcherPool.getQueue().size()+
+          " ACTIVE COUNT: "+ this.launcherPool.getActiveCount());
+    }
+  }
+  
+  public static ContainerId makeContainerId(long ts, int appId, int attemptId,
+      int id) {
+    return BuilderUtils.newContainerId(
+      BuilderUtils.newApplicationAttemptId(
+        BuilderUtils.newApplicationId(ts, appId), attemptId), id);
+  }
+
+  public static TaskAttemptId makeTaskAttemptId(long ts, int appId, int taskId, 
+      TaskType taskType, int id) {
+    ApplicationId aID = BuilderUtils.newApplicationId(ts, appId);
+    JobId jID = MRBuilderUtils.newJobId(aID, id);
+    TaskId tID = MRBuilderUtils.newTaskId(jID, taskId, taskType);
+    return MRBuilderUtils.newTaskAttemptId(tID, id);
+  }
+  
+  @Test
+  public void testHandle() throws Exception {
+    LOG.info("STARTING testHandle");
+    YarnRPC mockRpc = mock(YarnRPC.class);
+    AppContext mockContext = mock(AppContext.class);
+    @SuppressWarnings("rawtypes")
+    EventHandler mockEventHandler = mock(EventHandler.class);
+    when(mockContext.getEventHandler()).thenReturn(mockEventHandler);
+
+    ContainerManager mockCM = mock(ContainerManager.class);
+    when(mockRpc.getProxy(eq(ContainerManager.class), 
+        any(InetSocketAddress.class), any(Configuration.class)))
+        .thenReturn(mockCM);
+    
+    ContainerLauncherImplUnderTest ut = 
+      new ContainerLauncherImplUnderTest(mockContext, mockRpc);
+    
+    Configuration conf = new Configuration();
+    ut.init(conf);
+    ut.start();
+    try {
+      ContainerId contId = makeContainerId(0l, 0, 0, 1);
+      TaskAttemptId taskAttemptId = makeTaskAttemptId(0l, 0, 0, TaskType.MAP, 0);
+      String cmAddress = "127.0.0.1:8000";
+      StartContainerResponse startResp = 
+        recordFactory.newRecordInstance(StartContainerResponse.class);
+      startResp.setServiceResponse(ShuffleHandler.MAPREDUCE_SHUFFLE_SERVICEID, 
+          ShuffleHandler.serializeMetaData(80));
+      
+
+      LOG.info("inserting launch event");
+      ContainerRemoteLaunchEvent mockLaunchEvent = 
+        mock(ContainerRemoteLaunchEvent.class);
+      when(mockLaunchEvent.getType())
+        .thenReturn(EventType.CONTAINER_REMOTE_LAUNCH);
+      when(mockLaunchEvent.getContainerID())
+        .thenReturn(contId);
+      when(mockLaunchEvent.getTaskAttemptID()).thenReturn(taskAttemptId);
+      when(mockLaunchEvent.getContainerMgrAddress()).thenReturn(cmAddress);
+      when(mockCM.startContainer(any(StartContainerRequest.class))).thenReturn(startResp);
+      ut.handle(mockLaunchEvent);
+      
+      ut.waitForPoolToIdle();
+      
+      verify(mockCM).startContainer(any(StartContainerRequest.class));
+      
+      LOG.info("inserting cleanup event");
+      ContainerLauncherEvent mockCleanupEvent = 
+        mock(ContainerLauncherEvent.class);
+      when(mockCleanupEvent.getType())
+        .thenReturn(EventType.CONTAINER_REMOTE_CLEANUP);
+      when(mockCleanupEvent.getContainerID())
+        .thenReturn(contId);
+      when(mockCleanupEvent.getTaskAttemptID()).thenReturn(taskAttemptId);
+      when(mockCleanupEvent.getContainerMgrAddress()).thenReturn(cmAddress);
+      ut.handle(mockCleanupEvent);
+      
+      ut.waitForPoolToIdle();
+      
+      verify(mockCM).stopContainer(any(StopContainerRequest.class));
+    } finally {
+      ut.stop();
+    }
+  }
+  
+  @Test
+  public void testOutOfOrder() throws Exception {
+    LOG.info("STARTING testOutOfOrder");
+    YarnRPC mockRpc = mock(YarnRPC.class);
+    AppContext mockContext = mock(AppContext.class);
+    @SuppressWarnings("rawtypes")
+    EventHandler mockEventHandler = mock(EventHandler.class);
+    when(mockContext.getEventHandler()).thenReturn(mockEventHandler);
+
+    ContainerManager mockCM = mock(ContainerManager.class);
+    when(mockRpc.getProxy(eq(ContainerManager.class), 
+        any(InetSocketAddress.class), any(Configuration.class)))
+        .thenReturn(mockCM);
+    
+    ContainerLauncherImplUnderTest ut = 
+      new ContainerLauncherImplUnderTest(mockContext, mockRpc);
+    
+    Configuration conf = new Configuration();
+    ut.init(conf);
+    ut.start();
+    try {
+      ContainerId contId = makeContainerId(0l, 0, 0, 1);
+      TaskAttemptId taskAttemptId = makeTaskAttemptId(0l, 0, 0, TaskType.MAP, 0);
+      String cmAddress = "127.0.0.1:8000";
+      StartContainerResponse startResp = 
+        recordFactory.newRecordInstance(StartContainerResponse.class);
+      startResp.setServiceResponse(ShuffleHandler.MAPREDUCE_SHUFFLE_SERVICEID, 
+          ShuffleHandler.serializeMetaData(80));
+
+      LOG.info("inserting cleanup event");
+      ContainerLauncherEvent mockCleanupEvent = 
+        mock(ContainerLauncherEvent.class);
+      when(mockCleanupEvent.getType())
+        .thenReturn(EventType.CONTAINER_REMOTE_CLEANUP);
+      when(mockCleanupEvent.getContainerID())
+        .thenReturn(contId);
+      when(mockCleanupEvent.getTaskAttemptID()).thenReturn(taskAttemptId);
+      when(mockCleanupEvent.getContainerMgrAddress()).thenReturn(cmAddress);
+      ut.handle(mockCleanupEvent);
+      
+      ut.waitForPoolToIdle();
+      
+      verify(mockCM, never()).stopContainer(any(StopContainerRequest.class));
+
+      LOG.info("inserting launch event");
+      ContainerRemoteLaunchEvent mockLaunchEvent = 
+        mock(ContainerRemoteLaunchEvent.class);
+      when(mockLaunchEvent.getType())
+        .thenReturn(EventType.CONTAINER_REMOTE_LAUNCH);
+      when(mockLaunchEvent.getContainerID())
+        .thenReturn(contId);
+      when(mockLaunchEvent.getTaskAttemptID()).thenReturn(taskAttemptId);
+      when(mockLaunchEvent.getContainerMgrAddress()).thenReturn(cmAddress);
+      when(mockCM.startContainer(any(StartContainerRequest.class))).thenReturn(startResp);
+      ut.handle(mockLaunchEvent);
+      
+      ut.waitForPoolToIdle();
+      
+      verify(mockCM, never()).startContainer(any(StartContainerRequest.class));
+    } finally {
+      ut.stop();
+    }
+  }
+}

+ 1 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebServicesAttempts.java

@@ -686,7 +686,7 @@ public class TestAMWebServicesAttempts extends JerseyTest {
       assertTrue("name not set", (name != null && !name.isEmpty()));
       JSONArray counters = counterGroup.getJSONArray("counter");
       for (int j = 0; j < counters.length(); j++) {
-        JSONObject counter = counters.getJSONObject(i);
+        JSONObject counter = counters.getJSONObject(j);
         String counterName = counter.getString("name");
         assertTrue("name not set",
             (counterName != null && !counterName.isEmpty()));

+ 136 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebServicesJobs.java

@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.mapreduce.v2.app.webapp;
 
+import static org.apache.hadoop.yarn.util.StringHelper.ujoin;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
@@ -33,6 +34,7 @@ import javax.xml.parsers.DocumentBuilderFactory;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.mapreduce.JobACL;
+import org.apache.hadoop.mapreduce.v2.api.records.AMInfo;
 import org.apache.hadoop.mapreduce.v2.api.records.JobId;
 import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
 import org.apache.hadoop.mapreduce.v2.app.AppContext;
@@ -44,6 +46,7 @@ import org.apache.hadoop.yarn.Clock;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.util.BuilderUtils;
 import org.apache.hadoop.yarn.util.Times;
 import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
 import org.apache.hadoop.yarn.webapp.WebServicesTestUtils;
@@ -76,6 +79,7 @@ import com.sun.jersey.test.framework.WebAppDescriptor;
  * /ws/v1/mapreduce/jobs
  * /ws/v1/mapreduce/jobs/{jobid}
  * /ws/v1/mapreduce/jobs/{jobid}/counters
+ * /ws/v1/mapreduce/jobs/{jobid}/jobattempts
  */
 public class TestAMWebServicesJobs extends JerseyTest {
 
@@ -777,4 +781,136 @@ public class TestAMWebServicesJobs extends JerseyTest {
     }
   }
 
+  @Test
+  public void testJobAttempts() throws JSONException, Exception {
+    WebResource r = resource();
+    Map<JobId, Job> jobsMap = appContext.getAllJobs();
+    for (JobId id : jobsMap.keySet()) {
+      String jobId = MRApps.toString(id);
+
+      ClientResponse response = r.path("ws").path("v1")
+          .path("mapreduce").path("jobs").path(jobId).path("jobattempts")
+          .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
+      assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+      JSONObject json = response.getEntity(JSONObject.class);
+      assertEquals("incorrect number of elements", 1, json.length());
+      JSONObject info = json.getJSONObject("jobAttempts");
+      verifyJobAttempts(info, jobsMap.get(id));
+    }
+  }
+
+  @Test
+  public void testJobAttemptsSlash() throws JSONException, Exception {
+    WebResource r = resource();
+    Map<JobId, Job> jobsMap = appContext.getAllJobs();
+    for (JobId id : jobsMap.keySet()) {
+      String jobId = MRApps.toString(id);
+
+      ClientResponse response = r.path("ws").path("v1")
+          .path("mapreduce").path("jobs").path(jobId).path("jobattempts/")
+          .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
+      assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+      JSONObject json = response.getEntity(JSONObject.class);
+      assertEquals("incorrect number of elements", 1, json.length());
+      JSONObject info = json.getJSONObject("jobAttempts");
+      verifyJobAttempts(info, jobsMap.get(id));
+    }
+  }
+
+  @Test
+  public void testJobAttemptsDefault() throws JSONException, Exception {
+    WebResource r = resource();
+    Map<JobId, Job> jobsMap = appContext.getAllJobs();
+    for (JobId id : jobsMap.keySet()) {
+      String jobId = MRApps.toString(id);
+
+      ClientResponse response = r.path("ws").path("v1")
+          .path("mapreduce").path("jobs").path(jobId).path("jobattempts")
+          .get(ClientResponse.class);
+      assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+      JSONObject json = response.getEntity(JSONObject.class);
+      assertEquals("incorrect number of elements", 1, json.length());
+      JSONObject info = json.getJSONObject("jobAttempts");
+      verifyJobAttempts(info, jobsMap.get(id));
+    }
+  }
+
+  @Test
+  public void testJobAttemptsXML() throws Exception {
+    WebResource r = resource();
+    Map<JobId, Job> jobsMap = appContext.getAllJobs();
+    for (JobId id : jobsMap.keySet()) {
+      String jobId = MRApps.toString(id);
+
+      ClientResponse response = r.path("ws").path("v1")
+          .path("mapreduce").path("jobs").path(jobId).path("jobattempts")
+          .accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
+      assertEquals(MediaType.APPLICATION_XML_TYPE, response.getType());
+      String xml = response.getEntity(String.class);
+      DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
+      DocumentBuilder db = dbf.newDocumentBuilder();
+      InputSource is = new InputSource();
+      is.setCharacterStream(new StringReader(xml));
+      Document dom = db.parse(is);
+      NodeList attempts = dom.getElementsByTagName("jobAttempts");
+      assertEquals("incorrect number of elements", 1, attempts.getLength());
+      NodeList info = dom.getElementsByTagName("jobAttempt");
+      verifyJobAttemptsXML(info, jobsMap.get(id));
+    }
+  }
+
+  public void verifyJobAttempts(JSONObject info, Job job)
+      throws JSONException {
+
+    JSONArray attempts = info.getJSONArray("jobAttempt");
+    assertEquals("incorrect number of elements", 2, attempts.length());
+    for (int i = 0; i < attempts.length(); i++) {
+      JSONObject attempt = attempts.getJSONObject(i);
+      verifyJobAttemptsGeneric(job, attempt.getString("nodeHttpAddress"),
+          attempt.getString("nodeId"), attempt.getInt("id"),
+          attempt.getLong("startTime"), attempt.getString("containerId"),
+          attempt.getString("logsLink"));
+    }
+  }
+
+  public void verifyJobAttemptsXML(NodeList nodes, Job job) {
+
+    assertEquals("incorrect number of elements", 2, nodes.getLength());
+    for (int i = 0; i < nodes.getLength(); i++) {
+      Element element = (Element) nodes.item(i);
+      verifyJobAttemptsGeneric(job,
+          WebServicesTestUtils.getXmlString(element, "nodeHttpAddress"),
+          WebServicesTestUtils.getXmlString(element, "nodeId"),
+          WebServicesTestUtils.getXmlInt(element, "id"),
+          WebServicesTestUtils.getXmlLong(element, "startTime"),
+          WebServicesTestUtils.getXmlString(element, "containerId"),
+          WebServicesTestUtils.getXmlString(element, "logsLink"));
+    }
+  }
+
+  public void verifyJobAttemptsGeneric(Job job, String nodeHttpAddress,
+      String nodeId, int id, long startTime, String containerId, String logsLink) {
+    boolean attemptFound = false;
+    for (AMInfo amInfo : job.getAMInfos()) {
+      if (amInfo.getAppAttemptId().getAttemptId() == id) {
+        attemptFound = true;
+        String nmHost = amInfo.getNodeManagerHost();
+        int nmHttpPort = amInfo.getNodeManagerHttpPort();
+        int nmPort = amInfo.getNodeManagerPort();
+        WebServicesTestUtils.checkStringMatch("nodeHttpAddress", nmHost + ":"
+            + nmHttpPort, nodeHttpAddress);
+        WebServicesTestUtils.checkStringMatch("nodeId",
+            BuilderUtils.newNodeId(nmHost, nmPort).toString(), nodeId);
+        assertTrue("startime not greater than 0", startTime > 0);
+        WebServicesTestUtils.checkStringMatch("containerId", amInfo
+            .getContainerId().toString(), containerId);
+
+        String localLogsLink = ujoin("node", "containerlogs", containerId);
+
+        assertTrue("logsLink", logsLink.contains(localLogsLink));
+      }
+    }
+    assertTrue("attempt: " + id + " was not found", attemptFound);
+  }
+
 }

+ 1 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebServicesTasks.java

@@ -774,7 +774,7 @@ public class TestAMWebServicesTasks extends JerseyTest {
       assertTrue("name not set", (name != null && !name.isEmpty()));
       JSONArray counters = counterGroup.getJSONArray("counter");
       for (int j = 0; j < counters.length(); j++) {
-        JSONObject counter = counters.getJSONObject(i);
+        JSONObject counter = counters.getJSONObject(j);
         String counterName = counter.getString("name");
         assertTrue("name not set",
             (counterName != null && !counterName.isEmpty()));

+ 2 - 2
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/TypeConverter.java

@@ -22,7 +22,6 @@ import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.List;
 
-import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.mapred.JobPriority;
 import org.apache.hadoop.mapred.TaskCompletionEvent;
@@ -45,14 +44,15 @@ import org.apache.hadoop.mapreduce.v2.util.MRApps;
 import org.apache.hadoop.yarn.YarnException;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationReport;
-import org.apache.hadoop.yarn.api.records.YarnApplicationState;
 import org.apache.hadoop.yarn.api.records.NodeReport;
 import org.apache.hadoop.yarn.api.records.QueueACL;
 import org.apache.hadoop.yarn.api.records.QueueState;
 import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
+import org.apache.hadoop.yarn.api.records.YarnApplicationState;
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 
+@SuppressWarnings("deprecation")
 public class TypeConverter {
 
   private static RecordFactory recordFactory;

+ 3 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/MRClientProtocol.java

@@ -22,6 +22,8 @@ import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptRequest
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptResponse;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersRequest;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDelegationTokenRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDelegationTokenResponse;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDiagnosticsRequest;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDiagnosticsResponse;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetJobReportRequest;
@@ -54,4 +56,5 @@ public interface MRClientProtocol {
   public KillTaskResponse killTask(KillTaskRequest request) throws YarnRemoteException;
   public KillTaskAttemptResponse killTaskAttempt(KillTaskAttemptRequest request) throws YarnRemoteException;
   public FailTaskAttemptResponse failTaskAttempt(FailTaskAttemptRequest request) throws YarnRemoteException;
+  public GetDelegationTokenResponse getDelegationToken(GetDelegationTokenRequest request) throws YarnRemoteException;
 }

+ 64 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/MRDelegationTokenIdentifier.java

@@ -0,0 +1,64 @@
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.mapreduce.v2.api;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
+
+/**
+ * {@link TokenIdentifier} that identifies delegation tokens 
+ * issued by JobHistoryServer to delegate
+ * MR tasks talking to the JobHistoryServer.
+ */
+public class MRDelegationTokenIdentifier extends AbstractDelegationTokenIdentifier {
+
+  public static final Text KIND_NAME = new Text("MR_DELEGATION_TOKEN");
+
+ 
+  public MRDelegationTokenIdentifier() {
+  }
+  
+  /**
+   * Create a new delegation token identifier
+   * @param owner the effective username of the token owner
+   * @param renewer the username of the renewer
+   * @param realUser the real username of the token owner
+   */
+  public MRDelegationTokenIdentifier(Text owner, Text renewer, Text realUser) {
+    super(owner, renewer, realUser);
+  }
+
+ 
+  @Override
+  public Text getKind() {
+    return KIND_NAME;
+  }
+
+  @InterfaceAudience.Private
+  public static class Renewer extends Token.TrivialRenewer {
+    @Override
+    protected Text getKind() {
+      return KIND_NAME;
+    }
+  }
+}

+ 25 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/client/MRClientProtocolPBClientImpl.java

@@ -29,6 +29,8 @@ import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptRequest
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptResponse;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersRequest;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDelegationTokenRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDelegationTokenResponse;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDiagnosticsRequest;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDiagnosticsResponse;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetJobReportRequest;
@@ -51,6 +53,8 @@ import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.FailTaskAttemp
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.FailTaskAttemptResponsePBImpl;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetCountersRequestPBImpl;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetCountersResponsePBImpl;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetDelegationTokenRequestPBImpl;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetDelegationTokenResponsePBImpl;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetDiagnosticsRequestPBImpl;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetDiagnosticsResponsePBImpl;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetJobReportRequestPBImpl;
@@ -71,6 +75,7 @@ import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.KillTaskReques
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.KillTaskResponsePBImpl;
 import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.FailTaskAttemptRequestProto;
 import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetCountersRequestProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetDelegationTokenRequestProto;
 import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetDiagnosticsRequestProto;
 import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetJobReportRequestProto;
 import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskAttemptCompletionEventsRequestProto;
@@ -214,7 +219,26 @@ public class MRClientProtocolPBClientImpl implements MRClientProtocol {
       }
     }
   }
-
+  
+  @Override
+  public GetDelegationTokenResponse getDelegationToken(
+      GetDelegationTokenRequest request) throws YarnRemoteException {
+    GetDelegationTokenRequestProto requestProto = ((GetDelegationTokenRequestPBImpl)
+        request).getProto();
+    try {
+      return new GetDelegationTokenResponsePBImpl(proxy.getDelegationToken(
+          null, requestProto));
+    } catch (ServiceException e) {
+      if (e.getCause() instanceof YarnRemoteException) {
+        throw (YarnRemoteException)e.getCause();
+      } else if (e.getCause() instanceof UndeclaredThrowableException) {
+        throw (UndeclaredThrowableException)e.getCause();
+      } else {
+        throw new UndeclaredThrowableException(e);
+      }
+    }
+  }
+  
   @Override
   public KillJobResponse killJob(KillJobRequest request)
       throws YarnRemoteException {

+ 20 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/service/MRClientProtocolPBServiceImpl.java

@@ -23,6 +23,8 @@ import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptRequest
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptResponse;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersRequest;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDelegationTokenRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDelegationTokenResponse;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDiagnosticsRequest;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDiagnosticsResponse;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetJobReportResponse;
@@ -44,6 +46,8 @@ import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.FailTaskAttemp
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.FailTaskAttemptResponsePBImpl;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetCountersRequestPBImpl;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetCountersResponsePBImpl;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetDelegationTokenRequestPBImpl;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetDelegationTokenResponsePBImpl;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetDiagnosticsRequestPBImpl;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetDiagnosticsResponsePBImpl;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetJobReportRequestPBImpl;
@@ -66,6 +70,8 @@ import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.FailTaskAttemptReque
 import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.FailTaskAttemptResponseProto;
 import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetCountersRequestProto;
 import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetCountersResponseProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetDelegationTokenRequestProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetDelegationTokenResponseProto;
 import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetDiagnosticsRequestProto;
 import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetDiagnosticsResponseProto;
 import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetJobReportRequestProto;
@@ -184,7 +190,20 @@ public class MRClientProtocolPBServiceImpl implements BlockingInterface {
       throw new ServiceException(e);
     }
   }
-
+  
+  @Override
+  public GetDelegationTokenResponseProto getDelegationToken(
+      RpcController controller, GetDelegationTokenRequestProto proto)
+      throws ServiceException {
+    GetDelegationTokenRequest request = new GetDelegationTokenRequestPBImpl(proto);
+    try {
+      GetDelegationTokenResponse response = real.getDelegationToken(request);
+      return ((GetDelegationTokenResponsePBImpl)response).getProto();
+    } catch (YarnRemoteException e) {
+      throw new ServiceException(e);
+    }
+  }
+  
   @Override
   public KillJobResponseProto killJob(RpcController controller,
       KillJobRequestProto proto) throws ServiceException {

+ 29 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/GetDelegationTokenRequest.java

@@ -0,0 +1,29 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.mapreduce.v2.api.protocolrecords;
+
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Evolving;
+
+@Public
+@Evolving
+public interface GetDelegationTokenRequest {
+  String getRenewer();
+  void setRenewer(String renewer);
+}

+ 25 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/GetDelegationTokenResponse.java

@@ -0,0 +1,25 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.mapreduce.v2.api.protocolrecords;
+
+import org.apache.hadoop.yarn.api.records.DelegationToken;
+
+public interface GetDelegationTokenResponse {
+  void setDelegationToken(DelegationToken clientDToken);
+  DelegationToken getDelegationToken();
+}

+ 97 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetDelegationTokenRequestPBImpl.java

@@ -0,0 +1,97 @@
+ /**
+   * Licensed to the Apache Software Foundation (ASF) under one
+   * or more contributor license agreements.  See the NOTICE file
+   * distributed with this work for additional information
+   * regarding copyright ownership.  The ASF licenses this file
+   * to you under the Apache License, Version 2.0 (the
+   * "License"); you may not use this file except in compliance
+   * with the License.  You may obtain a copy of the License at
+   *
+   *     http://www.apache.org/licenses/LICENSE-2.0
+   *
+   * Unless required by applicable law or agreed to in writing, software
+   * distributed under the License is distributed on an "AS IS" BASIS,
+   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   * See the License for the specific language governing permissions and
+   * limitations under the License.
+   */
+package org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb;
+
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDelegationTokenRequest;
+import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetDelegationTokenRequestProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetDelegationTokenRequestProtoOrBuilder;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+
+
+public class GetDelegationTokenRequestPBImpl extends
+      ProtoBase<GetDelegationTokenRequestProto> implements GetDelegationTokenRequest {
+  
+  String renewer;
+  
+
+  GetDelegationTokenRequestProto proto = 
+      GetDelegationTokenRequestProto.getDefaultInstance();
+  GetDelegationTokenRequestProto.Builder builder = null;
+  boolean viaProto = false;
+  
+  public GetDelegationTokenRequestPBImpl() {
+    builder = GetDelegationTokenRequestProto.newBuilder();
+  }
+  
+  public GetDelegationTokenRequestPBImpl (
+      GetDelegationTokenRequestProto proto) {
+    this.proto = proto;
+    viaProto = true;
+  }
+  
+  @Override
+  public String getRenewer(){
+    GetDelegationTokenRequestProtoOrBuilder p = viaProto ? proto : builder;
+    if (this.renewer != null) {
+      return this.renewer;
+    }
+    if (!p.hasRenewer()) {
+      return null;
+    }
+    this.renewer = p.getRenewer();
+    return this.renewer;
+  }
+  
+  @Override
+  public void setRenewer(String renewer) {
+    maybeInitBuilder();
+    if (renewer == null) 
+      builder.clearRenewer();
+    this.renewer = renewer;
+  }
+
+  @Override
+  public GetDelegationTokenRequestProto getProto() {
+    mergeLocalToProto();
+    proto = viaProto ? proto : builder.build();
+    viaProto = true;
+    return proto;
+  }
+  
+
+  private void mergeLocalToBuilder() {
+    if (renewer != null) {
+      builder.setRenewer(this.renewer);
+    }
+  }
+
+  private void mergeLocalToProto() {
+    if (viaProto) 
+      maybeInitBuilder();
+    mergeLocalToBuilder();
+    proto = builder.build();
+    viaProto = true;
+  }
+
+  private void maybeInitBuilder() {
+    if (viaProto || builder == null) {
+      builder = GetDelegationTokenRequestProto.newBuilder(proto);
+    }
+    viaProto = false;
+  }   
+}

+ 109 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetDelegationTokenResponsePBImpl.java

@@ -0,0 +1,109 @@
+ /**
+   * Licensed to the Apache Software Foundation (ASF) under one
+   * or more contributor license agreements.  See the NOTICE file
+   * distributed with this work for additional information
+   * regarding copyright ownership.  The ASF licenses this file
+   * to you under the Apache License, Version 2.0 (the
+   * "License"); you may not use this file except in compliance
+   * with the License.  You may obtain a copy of the License at
+   *
+   *     http://www.apache.org/licenses/LICENSE-2.0
+   *
+   * Unless required by applicable law or agreed to in writing, software
+   * distributed under the License is distributed on an "AS IS" BASIS,
+   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   * See the License for the specific language governing permissions and
+   * limitations under the License.
+   */
+package org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb;
+
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDelegationTokenResponse;
+import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetDelegationTokenResponseProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetDelegationTokenResponseProtoOrBuilder;
+import org.apache.hadoop.yarn.api.records.DelegationToken;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+import org.apache.hadoop.yarn.api.records.impl.pb.DelegationTokenPBImpl;
+import org.apache.hadoop.yarn.proto.YarnProtos.DelegationTokenProto;
+
+
+public class GetDelegationTokenResponsePBImpl extends
+      ProtoBase<GetDelegationTokenResponseProto> implements GetDelegationTokenResponse {
+  
+  DelegationToken mrToken;
+  
+
+  GetDelegationTokenResponseProto proto = 
+      GetDelegationTokenResponseProto.getDefaultInstance();
+  GetDelegationTokenResponseProto.Builder builder = null;
+  boolean viaProto = false;
+  
+  public GetDelegationTokenResponsePBImpl() {
+    builder = GetDelegationTokenResponseProto.newBuilder();
+  }
+  
+  public GetDelegationTokenResponsePBImpl (
+      GetDelegationTokenResponseProto proto) {
+    this.proto = proto;
+    viaProto = true;
+  }
+  
+  @Override
+  public DelegationToken getDelegationToken() {
+    GetDelegationTokenResponseProtoOrBuilder p = viaProto ? proto : builder;
+    if (this.mrToken != null) {
+      return this.mrToken;
+    }
+    if (!p.hasMRDelegationToken()) {
+      return null;
+    }
+    this.mrToken = convertFromProtoFormat(p.getMRDelegationToken());
+    return this.mrToken;  
+  }
+  
+  @Override
+  public void setDelegationToken(DelegationToken mrToken) {
+    maybeInitBuilder();
+    if (mrToken == null) 
+      builder.clearMRDelegationToken();
+    this.mrToken = mrToken;
+  }
+
+  @Override
+  public GetDelegationTokenResponseProto getProto() {
+    mergeLocalToProto();
+    proto = viaProto ? proto : builder.build();
+    viaProto = true;
+    return proto;
+  }
+  
+
+  private void mergeLocalToBuilder() {
+    if (mrToken != null) {
+      builder.setMRDelegationToken(convertToProtoFormat(this.mrToken));
+    }
+  }
+
+  private void mergeLocalToProto() {
+    if (viaProto) 
+      maybeInitBuilder();
+    mergeLocalToBuilder();
+    proto = builder.build();
+    viaProto = true;
+  }
+
+  private void maybeInitBuilder() {
+    if (viaProto || builder == null) {
+      builder = GetDelegationTokenResponseProto.newBuilder(proto);
+    }
+    viaProto = false;
+  }
+   
+
+  private DelegationTokenPBImpl convertFromProtoFormat(DelegationTokenProto p) {
+    return new DelegationTokenPBImpl(p);
+  }
+
+  private DelegationTokenProto convertToProtoFormat(DelegationToken t) {
+    return ((DelegationTokenPBImpl)t).getProto();
+  }
+}

+ 19 - 2
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/security/client/ClientHSSecurityInfo.java

@@ -24,7 +24,9 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
 import org.apache.hadoop.security.KerberosInfo;
 import org.apache.hadoop.security.SecurityInfo;
+import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.security.token.TokenInfo;
+import org.apache.hadoop.security.token.TokenSelector;
 import org.apache.hadoop.yarn.proto.MRClientProtocol;
 
 public class ClientHSSecurityInfo extends SecurityInfo {
@@ -56,7 +58,22 @@ public class ClientHSSecurityInfo extends SecurityInfo {
 
   @Override
   public TokenInfo getTokenInfo(Class<?> protocol, Configuration conf) {
-    return null;
-  }
+    if (!protocol
+        .equals(MRClientProtocol.MRClientProtocolService.BlockingInterface.class)) {
+      return null;
+    }
+    return new TokenInfo() {
+
+      @Override
+      public Class<? extends Annotation> annotationType() {
+        return null;
+      }
+
+      @Override
+      public Class<? extends TokenSelector<? extends TokenIdentifier>>
+          value() {
+        return ClientHSTokenSelector.class;
+      }
+    };  }
 
 }

+ 56 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/security/client/ClientHSTokenSelector.java

@@ -0,0 +1,56 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.security.client;
+
+import java.util.Collection;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.mapreduce.v2.api.MRDelegationTokenIdentifier;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.security.token.TokenSelector;
+
+public class ClientHSTokenSelector implements
+    TokenSelector<MRDelegationTokenIdentifier> {
+
+  private static final Log LOG = LogFactory
+      .getLog(ClientHSTokenSelector.class);
+
+  @SuppressWarnings("unchecked")
+  public Token<MRDelegationTokenIdentifier> selectToken(Text service,
+      Collection<Token<? extends TokenIdentifier>> tokens) {
+    if (service == null) {
+      return null;
+    }
+    LOG.debug("Looking for a token with service " + service.toString());
+    for (Token<? extends TokenIdentifier> token : tokens) {
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Token kind is " + token.getKind().toString()
+            + " and the token's service name is " + token.getService());
+      }
+      if (MRDelegationTokenIdentifier.KIND_NAME.equals(token.getKind())
+          && service.equals(token.getService())) {
+        return (Token<MRDelegationTokenIdentifier>) token;
+      }
+    }
+    return null;
+  }
+}

+ 22 - 13
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java

@@ -27,6 +27,7 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.io.InputStreamReader;
 import java.net.URI;
+import java.net.URL;
 import java.util.Arrays;
 import java.util.HashMap;
 import java.util.Iterator;
@@ -181,23 +182,31 @@ public class MRApps extends Apps {
       String mrAppGeneratedClasspathFile = "mrapp-generated-classpath";
       classpathFileStream =
           thisClassLoader.getResourceAsStream(mrAppGeneratedClasspathFile);
+
       // Put the file itself on classpath for tasks.
-      String classpathElement = thisClassLoader.getResource(mrAppGeneratedClasspathFile).getFile();
-      if (classpathElement.contains("!")) {
-        classpathElement = classpathElement.substring(0, classpathElement.indexOf("!"));
+      URL classpathResource = thisClassLoader
+        .getResource(mrAppGeneratedClasspathFile);
+      if (classpathResource != null) {
+        String classpathElement = classpathResource.getFile();
+        if (classpathElement.contains("!")) {
+          classpathElement = classpathElement.substring(0,
+            classpathElement.indexOf("!"));
+        } else {
+          classpathElement = new File(classpathElement).getParent();
+        }
+        Apps.addToEnvironment(environment, Environment.CLASSPATH.name(),
+          classpathElement);
       }
-      else {
-        classpathElement = new File(classpathElement).getParent();
+
+      if (classpathFileStream != null) {
+        reader = new BufferedReader(new InputStreamReader(classpathFileStream));
+        String cp = reader.readLine();
+        if (cp != null) {
+          Apps.addToEnvironment(environment, Environment.CLASSPATH.name(),
+            cp.trim());
+        }
       }
-      Apps.addToEnvironment(
-          environment,
-          Environment.CLASSPATH.name(), classpathElement);
 
-      reader = new BufferedReader(new InputStreamReader(classpathFileStream));
-      String cp = reader.readLine();
-      if (cp != null) {
-        Apps.addToEnvironment(environment, Environment.CLASSPATH.name(), cp.trim());
-      }      
       // Add standard Hadoop classes
       for (String c : ApplicationConstants.APPLICATION_CLASSPATH) {
         Apps.addToEnvironment(environment, Environment.CLASSPATH.name(), c);

+ 1 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/proto/MRClientProtocol.proto

@@ -30,7 +30,7 @@ service MRClientProtocolService {
   rpc getTaskAttemptCompletionEvents (GetTaskAttemptCompletionEventsRequestProto) returns (GetTaskAttemptCompletionEventsResponseProto);
   rpc getTaskReports (GetTaskReportsRequestProto) returns (GetTaskReportsResponseProto);
   rpc getDiagnostics (GetDiagnosticsRequestProto) returns (GetDiagnosticsResponseProto);
-
+  rpc getDelegationToken (GetDelegationTokenRequestProto) returns (GetDelegationTokenResponseProto);
   rpc killJob (KillJobRequestProto) returns (KillJobResponseProto);
   rpc killTask (KillTaskRequestProto) returns (KillTaskResponseProto);
   rpc killTaskAttempt (KillTaskAttemptRequestProto) returns (KillTaskAttemptResponseProto);

+ 8 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/proto/mr_service_protos.proto

@@ -22,6 +22,7 @@ option java_generic_services = true;
 option java_generate_equals_and_hash = true;
 
 import "mr_protos.proto";
+import "yarn_protos.proto";
 
 message GetJobReportRequestProto {
   optional JobIdProto job_id = 1;
@@ -75,6 +76,13 @@ message GetDiagnosticsResponseProto {
   repeated string diagnostics = 1;
 }
 
+message GetDelegationTokenRequestProto {
+  optional string renewer = 1;
+}
+
+message GetDelegationTokenResponseProto {
+  optional DelegationTokenProto m_r_delegation_token = 1;
+}
 
 message KillJobRequestProto {
   optional JobIdProto job_id = 1;

+ 8 - 12
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/TestRPCFactories.java

@@ -30,6 +30,8 @@ import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptRequest
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptResponse;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersRequest;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDelegationTokenRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDelegationTokenResponse;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDiagnosticsRequest;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDiagnosticsResponse;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetJobReportRequest;
@@ -123,28 +125,24 @@ public class TestRPCFactories {
     @Override
     public GetJobReportResponse getJobReport(GetJobReportRequest request)
         throws YarnRemoteException {
-      // TODO Auto-generated method stub
       return null;
     }
 
     @Override
     public GetTaskReportResponse getTaskReport(GetTaskReportRequest request)
         throws YarnRemoteException {
-      // TODO Auto-generated method stub
       return null;
     }
 
     @Override
     public GetTaskAttemptReportResponse getTaskAttemptReport(
         GetTaskAttemptReportRequest request) throws YarnRemoteException {
-      // TODO Auto-generated method stub
       return null;
     }
 
     @Override
     public GetCountersResponse getCounters(GetCountersRequest request)
         throws YarnRemoteException {
-      // TODO Auto-generated method stub
       return null;
     }
 
@@ -152,51 +150,49 @@ public class TestRPCFactories {
     public GetTaskAttemptCompletionEventsResponse getTaskAttemptCompletionEvents(
         GetTaskAttemptCompletionEventsRequest request)
         throws YarnRemoteException {
-      // TODO Auto-generated method stub
       return null;
     }
 
     @Override
     public GetTaskReportsResponse getTaskReports(GetTaskReportsRequest request)
         throws YarnRemoteException {
-      // TODO Auto-generated method stub
       return null;
     }
 
     @Override
     public GetDiagnosticsResponse getDiagnostics(GetDiagnosticsRequest request)
         throws YarnRemoteException {
-      // TODO Auto-generated method stub
       return null;
     }
 
     @Override
     public KillJobResponse killJob(KillJobRequest request)
         throws YarnRemoteException {
-      // TODO Auto-generated method stub
       return null;
     }
 
     @Override
     public KillTaskResponse killTask(KillTaskRequest request)
         throws YarnRemoteException {
-      // TODO Auto-generated method stub
       return null;
     }
 
     @Override
     public KillTaskAttemptResponse killTaskAttempt(
         KillTaskAttemptRequest request) throws YarnRemoteException {
-      // TODO Auto-generated method stub
       return null;
     }
 
     @Override
     public FailTaskAttemptResponse failTaskAttempt(
         FailTaskAttemptRequest request) throws YarnRemoteException {
-      // TODO Auto-generated method stub
       return null;
     }
-    
+
+    @Override
+    public GetDelegationTokenResponse getDelegationToken(
+        GetDelegationTokenRequest request) throws YarnRemoteException {
+      return null;
+    }   
   }
 }

+ 22 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobClient.java

@@ -140,7 +140,20 @@ import org.apache.hadoop.util.ToolRunner;
 public class JobClient extends CLI {
   public static enum TaskStatusFilter { NONE, KILLED, FAILED, SUCCEEDED, ALL }
   private TaskStatusFilter taskOutputFilter = TaskStatusFilter.FAILED; 
-
+  /* notes that get delegation token was called. Again this is hack for oozie 
+   * to make sure we add history server delegation tokens to the credentials
+   *  for the job. Since the api only allows one delegation token to be returned, 
+   *  we have to add this hack.
+   */
+  private boolean getDelegationTokenCalled = false;
+  /* notes the renewer that will renew the delegation token */
+  private Text dtRenewer = null;
+  /* do we need a HS delegation token for this client */
+  static final String HS_DELEGATION_TOKEN_REQUIRED 
+      = "mapreduce.history.server.delegationtoken.required";
+  static final String HS_DELEGATION_TOKEN_RENEWER 
+      = "mapreduce.history.server.delegationtoken.renewer";
+  
   static{
     ConfigUtil.loadResources();
   }
@@ -584,6 +597,12 @@ public class JobClient extends CLI {
     try {
       conf.setBooleanIfUnset("mapred.mapper.new-api", false);
       conf.setBooleanIfUnset("mapred.reducer.new-api", false);
+      if (getDelegationTokenCalled) {
+        conf.setBoolean(HS_DELEGATION_TOKEN_REQUIRED, getDelegationTokenCalled);
+        getDelegationTokenCalled = false;
+        conf.set(HS_DELEGATION_TOKEN_RENEWER, dtRenewer.toString());
+        dtRenewer = null;
+      }
       Job job = clientUgi.doAs(new PrivilegedExceptionAction<Job> () {
         @Override
         public Job run() throws IOException, ClassNotFoundException, 
@@ -1170,6 +1189,8 @@ public class JobClient extends CLI {
    */
   public Token<DelegationTokenIdentifier> 
     getDelegationToken(final Text renewer) throws IOException, InterruptedException {
+    getDelegationTokenCalled = true;
+    dtRenewer = renewer;
     return clientUgi.doAs(new 
         PrivilegedExceptionAction<Token<DelegationTokenIdentifier>>() {
       public Token<DelegationTokenIdentifier> run() throws IOException, 

+ 16 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java

@@ -156,6 +156,8 @@ public interface MRJobConfig {
   
   public static final String TASK_TIMEOUT = "mapreduce.task.timeout";
 
+  public static final String TASK_TIMEOUT_CHECK_INTERVAL_MS = "mapreduce.task.timeout.check-interval-ms";
+  
   public static final String TASK_ID = "mapreduce.task.id";
 
   public static final String TASK_OUTPUT_DIR = "mapreduce.task.output.dir";
@@ -277,6 +279,12 @@ public interface MRJobConfig {
   public static final String JOB_ACL_MODIFY_JOB = "mapreduce.job.acl-modify-job";
 
   public static final String DEFAULT_JOB_ACL_MODIFY_JOB = " ";
+  
+  /* config for tracking the local file where all the credentials for the job
+   * credentials.
+   */
+  public static final String MAPREDUCE_JOB_CREDENTIALS_BINARY = 
+      "mapreduce.job.credentials.binary";
 
   public static final String JOB_SUBMITHOST =
     "mapreduce.job.submithostname";
@@ -367,6 +375,11 @@ public interface MRJobConfig {
   public static final String MR_AM_JOB_REDUCE_PREEMPTION_LIMIT = 
     MR_AM_PREFIX  + "job.reduce.preemption.limit";
   public static final float DEFAULT_MR_AM_JOB_REDUCE_PREEMPTION_LIMIT = 0.5f;
+  
+  /** AM ACL disabled. **/
+  public static final String JOB_AM_ACCESS_DISABLED = 
+    "mapreduce.job.am-access-disabled";
+  public static final boolean DEFAULT_JOB_AM_ACCESS_DISABLED = false;
 
   /**
    * Limit reduces starting until a certain percentage of maps have finished.
@@ -499,6 +512,9 @@ public interface MRJobConfig {
   public static final String MR_JOB_END_NOTIFICATION_URL =
     "mapreduce.job.end-notification.url";
 
+  public static final String MR_JOB_END_NOTIFICATION_PROXY =
+    "mapreduce.job.end-notification.proxy";
+
   public static final String MR_JOB_END_RETRY_ATTEMPTS =
     "mapreduce.job.end-notification.retry.attempts";
 

+ 61 - 28
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobFinishedEvent.java

@@ -18,15 +18,12 @@
 
 package org.apache.hadoop.mapreduce.jobhistory;
 
-import java.io.IOException;
-
+import org.apache.avro.util.Utf8;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.mapreduce.Counters;
 import org.apache.hadoop.mapreduce.JobID;
 
-import org.apache.avro.util.Utf8;
-
 /**
  * Event to record successful completion of job
  *
@@ -34,7 +31,18 @@ import org.apache.avro.util.Utf8;
 @InterfaceAudience.Private
 @InterfaceStability.Unstable
 public class JobFinishedEvent  implements HistoryEvent {
-  private JobFinished datum = new JobFinished();
+
+  private JobFinished datum = null;
+
+  private JobID jobId;
+  private long finishTime;
+  private int finishedMaps;
+  private int finishedReduces;
+  private int failedMaps;
+  private int failedReduces;
+  private Counters mapCounters;
+  private Counters reduceCounters;
+  private Counters totalCounters;
 
   /** 
    * Create an event to record successful job completion
@@ -53,50 +61,75 @@ public class JobFinishedEvent  implements HistoryEvent {
       int failedMaps, int failedReduces,
       Counters mapCounters, Counters reduceCounters,
       Counters totalCounters) {
-    datum.jobid = new Utf8(id.toString());
-    datum.finishTime = finishTime;
-    datum.finishedMaps = finishedMaps;
-    datum.finishedReduces = finishedReduces;
-    datum.failedMaps = failedMaps;
-    datum.failedReduces = failedReduces;
-    datum.mapCounters =
-      EventWriter.toAvro(mapCounters, "MAP_COUNTERS");
-    datum.reduceCounters =
-      EventWriter.toAvro(reduceCounters, "REDUCE_COUNTERS");
-    datum.totalCounters =
-      EventWriter.toAvro(totalCounters, "TOTAL_COUNTERS");
+    this.jobId = id;
+    this.finishTime = finishTime;
+    this.finishedMaps = finishedMaps;
+    this.finishedReduces = finishedReduces;
+    this.failedMaps = failedMaps;
+    this.failedReduces = failedReduces;
+    this.mapCounters = mapCounters;
+    this.reduceCounters = reduceCounters;
+    this.totalCounters = totalCounters;
   }
 
   JobFinishedEvent() {}
 
-  public Object getDatum() { return datum; }
-  public void setDatum(Object datum) { this.datum = (JobFinished)datum; }
+  public Object getDatum() {
+    if (datum == null) {
+      datum = new JobFinished();
+      datum.jobid = new Utf8(jobId.toString());
+      datum.finishTime = finishTime;
+      datum.finishedMaps = finishedMaps;
+      datum.finishedReduces = finishedReduces;
+      datum.failedMaps = failedMaps;
+      datum.failedReduces = failedReduces;
+      datum.mapCounters = EventWriter.toAvro(mapCounters, "MAP_COUNTERS");
+      datum.reduceCounters = EventWriter.toAvro(reduceCounters,
+        "REDUCE_COUNTERS");
+      datum.totalCounters = EventWriter.toAvro(totalCounters, "TOTAL_COUNTERS");
+    }
+    return datum;
+  }
+
+  public void setDatum(Object oDatum) {
+    this.datum = (JobFinished) oDatum;
+    this.jobId = JobID.forName(datum.jobid.toString());
+    this.finishTime = datum.finishTime;
+    this.finishedMaps = datum.finishedMaps;
+    this.finishedReduces = datum.finishedReduces;
+    this.failedMaps = datum.failedMaps;
+    this.failedReduces = datum.failedReduces;
+    this.mapCounters = EventReader.fromAvro(datum.mapCounters);
+    this.reduceCounters = EventReader.fromAvro(datum.reduceCounters);
+    this.totalCounters = EventReader.fromAvro(datum.totalCounters);
+  }
+
   public EventType getEventType() {
     return EventType.JOB_FINISHED;
   }
 
   /** Get the Job ID */
-  public JobID getJobid() { return JobID.forName(datum.jobid.toString()); }
+  public JobID getJobid() { return jobId; }
   /** Get the job finish time */
-  public long getFinishTime() { return datum.finishTime; }
+  public long getFinishTime() { return finishTime; }
   /** Get the number of finished maps for the job */
-  public int getFinishedMaps() { return datum.finishedMaps; }
+  public int getFinishedMaps() { return finishedMaps; }
   /** Get the number of finished reducers for the job */
-  public int getFinishedReduces() { return datum.finishedReduces; }
+  public int getFinishedReduces() { return finishedReduces; }
   /** Get the number of failed maps for the job */
-  public int getFailedMaps() { return datum.failedMaps; }
+  public int getFailedMaps() { return failedMaps; }
   /** Get the number of failed reducers for the job */
-  public int getFailedReduces() { return datum.failedReduces; }
+  public int getFailedReduces() { return failedReduces; }
   /** Get the counters for the job */
   public Counters getTotalCounters() {
-    return EventReader.fromAvro(datum.totalCounters);
+    return totalCounters;
   }
   /** Get the Map counters for the job */
   public Counters getMapCounters() {
-    return EventReader.fromAvro(datum.mapCounters);
+    return mapCounters;
   }
   /** Get the reduce counters for the job */
   public Counters getReduceCounters() {
-    return EventReader.fromAvro(datum.reduceCounters);
+    return reduceCounters;
   }
 }

+ 95 - 47
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/MapAttemptFinishedEvent.java

@@ -34,8 +34,25 @@ import org.apache.hadoop.mapreduce.TaskType;
 @InterfaceAudience.Private
 @InterfaceStability.Unstable
 public class MapAttemptFinishedEvent  implements HistoryEvent {
-  private MapAttemptFinished datum = new MapAttemptFinished();
-  
+
+  private MapAttemptFinished datum = null;
+
+  private TaskAttemptID attemptId;
+  private TaskType taskType;
+  private String taskStatus;
+  private long finishTime;
+  private String hostname;
+  private String rackName;
+  private int port;
+  private long mapFinishTime;
+  private String state;
+  private Counters counters;
+  int[][] allSplits;
+  int[] clockSplits;
+  int[] cpuUsages;
+  int[] vMemKbytes;
+  int[] physMemKbytes;
+
   /** 
    * Create an event for successful completion of map attempts
    * @param id Task Attempt ID
@@ -60,33 +77,21 @@ public class MapAttemptFinishedEvent  implements HistoryEvent {
       (TaskAttemptID id, TaskType taskType, String taskStatus, 
        long mapFinishTime, long finishTime, String hostname, int port, 
        String rackName, String state, Counters counters, int[][] allSplits) {
-    datum.taskid = new Utf8(id.getTaskID().toString());
-    datum.attemptId = new Utf8(id.toString());
-    datum.taskType = new Utf8(taskType.name());
-    datum.taskStatus = new Utf8(taskStatus);
-    datum.mapFinishTime = mapFinishTime;
-    datum.finishTime = finishTime;
-    datum.hostname = new Utf8(hostname);
-    datum.port = port;
-    // This is needed for reading old jh files
-    if (rackName != null) {
-      datum.rackname = new Utf8(rackName);
-    }
-    datum.state = new Utf8(state);
-    datum.counters = EventWriter.toAvro(counters);
-
-    datum.clockSplits
-      = AvroArrayUtils.toAvro
-           (ProgressSplitsBlock.arrayGetWallclockTime(allSplits));
-    datum.cpuUsages 
-      = AvroArrayUtils.toAvro
-           (ProgressSplitsBlock.arrayGetCPUTime(allSplits));
-    datum.vMemKbytes 
-      = AvroArrayUtils.toAvro
-           (ProgressSplitsBlock.arrayGetVMemKbytes(allSplits));
-    datum.physMemKbytes 
-      = AvroArrayUtils.toAvro
-           (ProgressSplitsBlock.arrayGetPhysMemKbytes(allSplits));
+    this.attemptId = id;
+    this.taskType = taskType;
+    this.taskStatus = taskStatus;
+    this.mapFinishTime = mapFinishTime;
+    this.finishTime = finishTime;
+    this.hostname = hostname;
+    this.rackName = rackName;
+    this.port = port;
+    this.state = state;
+    this.counters = counters;
+    this.allSplits = allSplits;
+    this.clockSplits = ProgressSplitsBlock.arrayGetWallclockTime(allSplits);
+    this.cpuUsages = ProgressSplitsBlock.arrayGetCPUTime(allSplits);
+    this.vMemKbytes = ProgressSplitsBlock.arrayGetVMemKbytes(allSplits);
+    this.physMemKbytes = ProgressSplitsBlock.arrayGetPhysMemKbytes(allSplits);
   }
 
   /** 
@@ -117,57 +122,100 @@ public class MapAttemptFinishedEvent  implements HistoryEvent {
   
   MapAttemptFinishedEvent() {}
 
-  public Object getDatum() { return datum; }
-  public void setDatum(Object datum) {
-    this.datum = (MapAttemptFinished)datum;
+  public Object getDatum() {
+    if (datum == null) {
+      datum = new MapAttemptFinished();
+      datum.taskid = new Utf8(attemptId.getTaskID().toString());
+      datum.attemptId = new Utf8(attemptId.toString());
+      datum.taskType = new Utf8(taskType.name());
+      datum.taskStatus = new Utf8(taskStatus);
+      datum.mapFinishTime = mapFinishTime;
+      datum.finishTime = finishTime;
+      datum.hostname = new Utf8(hostname);
+      datum.port = port;
+      if (rackName != null) {
+        datum.rackname = new Utf8(rackName);
+      }
+      datum.state = new Utf8(state);
+      datum.counters = EventWriter.toAvro(counters);
+
+      datum.clockSplits = AvroArrayUtils.toAvro(ProgressSplitsBlock
+        .arrayGetWallclockTime(allSplits));
+      datum.cpuUsages = AvroArrayUtils.toAvro(ProgressSplitsBlock
+        .arrayGetCPUTime(allSplits));
+      datum.vMemKbytes = AvroArrayUtils.toAvro(ProgressSplitsBlock
+        .arrayGetVMemKbytes(allSplits));
+      datum.physMemKbytes = AvroArrayUtils.toAvro(ProgressSplitsBlock
+        .arrayGetPhysMemKbytes(allSplits));
+    }
+    return datum;
+  }
+
+  public void setDatum(Object oDatum) {
+    this.datum = (MapAttemptFinished)oDatum;
+    this.attemptId = TaskAttemptID.forName(datum.attemptId.toString());
+    this.taskType = TaskType.valueOf(datum.taskType.toString());
+    this.taskStatus = datum.taskStatus.toString();
+    this.mapFinishTime = datum.mapFinishTime;
+    this.finishTime = datum.finishTime;
+    this.hostname = datum.hostname.toString();
+    this.rackName = datum.rackname.toString();
+    this.port = datum.port;
+    this.state = datum.state.toString();
+    this.counters = EventReader.fromAvro(datum.counters);
+    this.clockSplits = AvroArrayUtils.fromAvro(datum.clockSplits);
+    this.cpuUsages = AvroArrayUtils.fromAvro(datum.cpuUsages);
+    this.vMemKbytes = AvroArrayUtils.fromAvro(datum.vMemKbytes);
+    this.physMemKbytes = AvroArrayUtils.fromAvro(datum.physMemKbytes);
   }
 
   /** Get the task ID */
-  public TaskID getTaskId() { return TaskID.forName(datum.taskid.toString()); }
+  public TaskID getTaskId() { return attemptId.getTaskID(); }
   /** Get the attempt id */
   public TaskAttemptID getAttemptId() {
-    return TaskAttemptID.forName(datum.attemptId.toString());
+    return attemptId;
   }
+
   /** Get the task type */
   public TaskType getTaskType() {
-    return TaskType.valueOf(datum.taskType.toString());
+    return TaskType.valueOf(taskType.toString());
   }
   /** Get the task status */
-  public String getTaskStatus() { return datum.taskStatus.toString(); }
+  public String getTaskStatus() { return taskStatus.toString(); }
   /** Get the map phase finish time */
-  public long getMapFinishTime() { return datum.mapFinishTime; }
+  public long getMapFinishTime() { return mapFinishTime; }
   /** Get the attempt finish time */
-  public long getFinishTime() { return datum.finishTime; }
+  public long getFinishTime() { return finishTime; }
   /** Get the host name */
-  public String getHostname() { return datum.hostname.toString(); }
+  public String getHostname() { return hostname.toString(); }
   /** Get the tracker rpc port */
-  public int getPort() { return datum.port; }
+  public int getPort() { return port; }
   
   /** Get the rack name */
   public String getRackName() {
-    return datum.rackname == null ? null : datum.rackname.toString();
+    return rackName == null ? null : rackName.toString();
   }
   
   /** Get the state string */
-  public String getState() { return datum.state.toString(); }
+  public String getState() { return state.toString(); }
   /** Get the counters */
-  Counters getCounters() { return EventReader.fromAvro(datum.counters); }
+  Counters getCounters() { return counters; }
   /** Get the event type */
    public EventType getEventType() {
     return EventType.MAP_ATTEMPT_FINISHED;
   }
 
   public int[] getClockSplits() {
-    return AvroArrayUtils.fromAvro(datum.clockSplits);
+    return clockSplits;
   }
   public int[] getCpuUsages() {
-    return AvroArrayUtils.fromAvro(datum.cpuUsages);
+    return cpuUsages;
   }
   public int[] getVMemKbytes() {
-    return AvroArrayUtils.fromAvro(datum.vMemKbytes);
+    return vMemKbytes;
   }
   public int[] getPhysMemKbytes() {
-    return AvroArrayUtils.fromAvro(datum.physMemKbytes);
+    return physMemKbytes;
   }
   
 }

+ 98 - 48
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/ReduceAttemptFinishedEvent.java

@@ -34,8 +34,25 @@ import org.apache.hadoop.mapreduce.TaskType;
 @InterfaceAudience.Private
 @InterfaceStability.Unstable
 public class ReduceAttemptFinishedEvent  implements HistoryEvent {
-  private ReduceAttemptFinished datum =
-    new ReduceAttemptFinished();
+
+  private ReduceAttemptFinished datum = null;
+
+  private TaskAttemptID attemptId;
+  private TaskType taskType;
+  private String taskStatus;
+  private long shuffleFinishTime;
+  private long sortFinishTime;
+  private long finishTime;
+  private String hostname;
+  private String rackName;
+  private int port;
+  private String state;
+  private Counters counters;
+  int[][] allSplits;
+  int[] clockSplits;
+  int[] cpuUsages;
+  int[] vMemKbytes;
+  int[] physMemKbytes;
 
   /**
    * Create an event to record completion of a reduce attempt
@@ -60,33 +77,22 @@ public class ReduceAttemptFinishedEvent  implements HistoryEvent {
      long shuffleFinishTime, long sortFinishTime, long finishTime,
      String hostname, int port,  String rackName, String state, 
      Counters counters, int[][] allSplits) {
-    datum.taskid = new Utf8(id.getTaskID().toString());
-    datum.attemptId = new Utf8(id.toString());
-    datum.taskType = new Utf8(taskType.name());
-    datum.taskStatus = new Utf8(taskStatus);
-    datum.shuffleFinishTime = shuffleFinishTime;
-    datum.sortFinishTime = sortFinishTime;
-    datum.finishTime = finishTime;
-    datum.hostname = new Utf8(hostname);
-    datum.port = port;
-    if (rackName != null) {
-      datum.rackname = new Utf8(rackName);
-    }
-    datum.state = new Utf8(state);
-    datum.counters = EventWriter.toAvro(counters);
-
-    datum.clockSplits 
-      = AvroArrayUtils.toAvro
-           (ProgressSplitsBlock.arrayGetWallclockTime(allSplits));
-    datum.cpuUsages 
-      = AvroArrayUtils.toAvro
-           (ProgressSplitsBlock.arrayGetCPUTime(allSplits));
-    datum.vMemKbytes 
-      = AvroArrayUtils.toAvro
-           (ProgressSplitsBlock.arrayGetVMemKbytes(allSplits));
-    datum.physMemKbytes 
-      = AvroArrayUtils.toAvro
-           (ProgressSplitsBlock.arrayGetPhysMemKbytes(allSplits));
+    this.attemptId = id;
+    this.taskType = taskType;
+    this.taskStatus = taskStatus;
+    this.shuffleFinishTime = shuffleFinishTime;
+    this.sortFinishTime = sortFinishTime;
+    this.finishTime = finishTime;
+    this.hostname = hostname;
+    this.rackName = rackName;
+    this.port = port;
+    this.state = state;
+    this.counters = counters;
+    this.allSplits = allSplits;
+    this.clockSplits = ProgressSplitsBlock.arrayGetWallclockTime(allSplits);
+    this.cpuUsages = ProgressSplitsBlock.arrayGetCPUTime(allSplits);
+    this.vMemKbytes = ProgressSplitsBlock.arrayGetVMemKbytes(allSplits);
+    this.physMemKbytes = ProgressSplitsBlock.arrayGetPhysMemKbytes(allSplits);
   }
 
   /**
@@ -117,43 +123,87 @@ public class ReduceAttemptFinishedEvent  implements HistoryEvent {
 
   ReduceAttemptFinishedEvent() {}
 
-  public Object getDatum() { return datum; }
-  public void setDatum(Object datum) {
-    this.datum = (ReduceAttemptFinished)datum;
+  public Object getDatum() {
+    if (datum == null) {
+      datum = new ReduceAttemptFinished();
+      datum.taskid = new Utf8(attemptId.getTaskID().toString());
+      datum.attemptId = new Utf8(attemptId.toString());
+      datum.taskType = new Utf8(taskType.name());
+      datum.taskStatus = new Utf8(taskStatus);
+      datum.shuffleFinishTime = shuffleFinishTime;
+      datum.sortFinishTime = sortFinishTime;
+      datum.finishTime = finishTime;
+      datum.hostname = new Utf8(hostname);
+      datum.port = port;
+      if (rackName != null) {
+        datum.rackname = new Utf8(rackName);
+      }
+      datum.state = new Utf8(state);
+      datum.counters = EventWriter.toAvro(counters);
+
+      datum.clockSplits = AvroArrayUtils.toAvro(ProgressSplitsBlock
+        .arrayGetWallclockTime(allSplits));
+      datum.cpuUsages = AvroArrayUtils.toAvro(ProgressSplitsBlock
+        .arrayGetCPUTime(allSplits));
+      datum.vMemKbytes = AvroArrayUtils.toAvro(ProgressSplitsBlock
+        .arrayGetVMemKbytes(allSplits));
+      datum.physMemKbytes = AvroArrayUtils.toAvro(ProgressSplitsBlock
+        .arrayGetPhysMemKbytes(allSplits));
+    }
+    return datum;
+  }
+
+  public void setDatum(Object oDatum) {
+    this.datum = (ReduceAttemptFinished)oDatum;
+    this.attemptId = TaskAttemptID.forName(datum.attemptId.toString());
+    this.taskType = TaskType.valueOf(datum.taskType.toString());
+    this.taskStatus = datum.taskStatus.toString();
+    this.shuffleFinishTime = datum.shuffleFinishTime;
+    this.sortFinishTime = datum.sortFinishTime;
+    this.finishTime = datum.finishTime;
+    this.hostname = datum.hostname.toString();
+    this.rackName = datum.rackname.toString();
+    this.port = datum.port;
+    this.state = datum.state.toString();
+    this.counters = EventReader.fromAvro(datum.counters);
+    this.clockSplits = AvroArrayUtils.fromAvro(datum.clockSplits);
+    this.cpuUsages = AvroArrayUtils.fromAvro(datum.cpuUsages);
+    this.vMemKbytes = AvroArrayUtils.fromAvro(datum.vMemKbytes);
+    this.physMemKbytes = AvroArrayUtils.fromAvro(datum.physMemKbytes);
   }
 
   /** Get the Task ID */
-  public TaskID getTaskId() { return TaskID.forName(datum.taskid.toString()); }
+  public TaskID getTaskId() { return attemptId.getTaskID(); }
   /** Get the attempt id */
   public TaskAttemptID getAttemptId() {
-    return TaskAttemptID.forName(datum.attemptId.toString());
+    return TaskAttemptID.forName(attemptId.toString());
   }
   /** Get the task type */
   public TaskType getTaskType() {
-    return TaskType.valueOf(datum.taskType.toString());
+    return TaskType.valueOf(taskType.toString());
   }
   /** Get the task status */
-  public String getTaskStatus() { return datum.taskStatus.toString(); }
+  public String getTaskStatus() { return taskStatus.toString(); }
   /** Get the finish time of the sort phase */
-  public long getSortFinishTime() { return datum.sortFinishTime; }
+  public long getSortFinishTime() { return sortFinishTime; }
   /** Get the finish time of the shuffle phase */
-  public long getShuffleFinishTime() { return datum.shuffleFinishTime; }
+  public long getShuffleFinishTime() { return shuffleFinishTime; }
   /** Get the finish time of the attempt */
-  public long getFinishTime() { return datum.finishTime; }
+  public long getFinishTime() { return finishTime; }
   /** Get the name of the host where the attempt ran */
-  public String getHostname() { return datum.hostname.toString(); }
+  public String getHostname() { return hostname.toString(); }
   /** Get the tracker rpc port */
-  public int getPort() { return datum.port; }
+  public int getPort() { return port; }
   
   /** Get the rack name of the node where the attempt ran */
   public String getRackName() {
-    return datum.rackname == null ? null : datum.rackname.toString();
+    return rackName == null ? null : rackName.toString();
   }
   
   /** Get the state string */
-  public String getState() { return datum.state.toString(); }
+  public String getState() { return state.toString(); }
   /** Get the counters for the attempt */
-  Counters getCounters() { return EventReader.fromAvro(datum.counters); }
+  Counters getCounters() { return counters; }
   /** Get the event type */
   public EventType getEventType() {
     return EventType.REDUCE_ATTEMPT_FINISHED;
@@ -161,16 +211,16 @@ public class ReduceAttemptFinishedEvent  implements HistoryEvent {
 
 
   public int[] getClockSplits() {
-    return AvroArrayUtils.fromAvro(datum.clockSplits);
+    return clockSplits;
   }
   public int[] getCpuUsages() {
-    return AvroArrayUtils.fromAvro(datum.cpuUsages);
+    return cpuUsages;
   }
   public int[] getVMemKbytes() {
-    return AvroArrayUtils.fromAvro(datum.vMemKbytes);
+    return vMemKbytes;
   }
   public int[] getPhysMemKbytes() {
-    return AvroArrayUtils.fromAvro(datum.physMemKbytes);
+    return physMemKbytes;
   }
 
 }

+ 56 - 28
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/TaskAttemptFinishedEvent.java

@@ -18,8 +18,7 @@
 
 package org.apache.hadoop.mapreduce.jobhistory;
 
-import java.io.IOException;
-
+import org.apache.avro.util.Utf8;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.mapreduce.Counters;
@@ -27,8 +26,6 @@ import org.apache.hadoop.mapreduce.TaskAttemptID;
 import org.apache.hadoop.mapreduce.TaskID;
 import org.apache.hadoop.mapreduce.TaskType;
 
-import org.apache.avro.util.Utf8;
-
 /**
  * Event to record successful task completion
  *
@@ -36,7 +33,17 @@ import org.apache.avro.util.Utf8;
 @InterfaceAudience.Private
 @InterfaceStability.Unstable
 public class TaskAttemptFinishedEvent  implements HistoryEvent {
-  private TaskAttemptFinished datum = new TaskAttemptFinished();
+
+  private TaskAttemptFinished datum = null;
+
+  private TaskAttemptID attemptId;
+  private TaskType taskType;
+  private String taskStatus;
+  private long finishTime;
+  private String rackName;
+  private String hostname;
+  private String state;
+  private Counters counters;
 
   /**
    * Create an event to record successful finishes for setup and cleanup 
@@ -53,52 +60,73 @@ public class TaskAttemptFinishedEvent  implements HistoryEvent {
       TaskType taskType, String taskStatus, 
       long finishTime, String rackName,
       String hostname, String state, Counters counters) {
-    datum.taskid = new Utf8(id.getTaskID().toString());
-    datum.attemptId = new Utf8(id.toString());
-    datum.taskType = new Utf8(taskType.name());
-    datum.taskStatus = new Utf8(taskStatus);
-    datum.finishTime = finishTime;
-    if (rackName != null) {
-      datum.rackname = new Utf8(rackName);
-    }
-    datum.hostname = new Utf8(hostname);
-    datum.state = new Utf8(state);
-    datum.counters = EventWriter.toAvro(counters);
+    this.attemptId = id;
+    this.taskType = taskType;
+    this.taskStatus = taskStatus;
+    this.finishTime = finishTime;
+    this.rackName = rackName;
+    this.hostname = hostname;
+    this.state = state;
+    this.counters = counters;
   }
 
   TaskAttemptFinishedEvent() {}
 
-  public Object getDatum() { return datum; }
-  public void setDatum(Object datum) {
-    this.datum = (TaskAttemptFinished)datum;
+  public Object getDatum() {
+    if (datum == null) {
+      datum = new TaskAttemptFinished();
+      datum.taskid = new Utf8(attemptId.getTaskID().toString());
+      datum.attemptId = new Utf8(attemptId.toString());
+      datum.taskType = new Utf8(taskType.name());
+      datum.taskStatus = new Utf8(taskStatus);
+      datum.finishTime = finishTime;
+      if (rackName != null) {
+        datum.rackname = new Utf8(rackName);
+      }
+      datum.hostname = new Utf8(hostname);
+      datum.state = new Utf8(state);
+      datum.counters = EventWriter.toAvro(counters);
+    }
+    return datum;
+  }
+  public void setDatum(Object oDatum) {
+    this.datum = (TaskAttemptFinished)oDatum;
+    this.attemptId = TaskAttemptID.forName(datum.attemptId.toString());
+    this.taskType = TaskType.valueOf(datum.taskType.toString());
+    this.taskStatus = datum.taskStatus.toString();
+    this.finishTime = datum.finishTime;
+    this.rackName = datum.rackname.toString();
+    this.hostname = datum.hostname.toString();
+    this.state = datum.state.toString();
+    this.counters = EventReader.fromAvro(datum.counters);
   }
 
   /** Get the task ID */
-  public TaskID getTaskId() { return TaskID.forName(datum.taskid.toString()); }
+  public TaskID getTaskId() { return attemptId.getTaskID(); }
   /** Get the task attempt id */
   public TaskAttemptID getAttemptId() {
-    return TaskAttemptID.forName(datum.attemptId.toString());
+    return TaskAttemptID.forName(attemptId.toString());
   }
   /** Get the task type */
   public TaskType getTaskType() {
-    return TaskType.valueOf(datum.taskType.toString());
+    return TaskType.valueOf(taskType.toString());
   }
   /** Get the task status */
-  public String getTaskStatus() { return datum.taskStatus.toString(); }
+  public String getTaskStatus() { return taskStatus.toString(); }
   /** Get the attempt finish time */
-  public long getFinishTime() { return datum.finishTime; }
+  public long getFinishTime() { return finishTime; }
   /** Get the host where the attempt executed */
-  public String getHostname() { return datum.hostname.toString(); }
+  public String getHostname() { return hostname.toString(); }
   
   /** Get the rackname where the attempt executed */
   public String getRackName() {
-    return datum.rackname == null ? null : datum.rackname.toString();
+    return rackName == null ? null : rackName.toString();
   }
   
   /** Get the state string */
-  public String getState() { return datum.state.toString(); }
+  public String getState() { return state.toString(); }
   /** Get the counters for the attempt */
-  Counters getCounters() { return EventReader.fromAvro(datum.counters); }
+  Counters getCounters() { return counters; }
   /** Get the event type */
   public EventType getEventType() {
     // Note that the task type can be setup/map/reduce/cleanup but the 

+ 38 - 18
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/TaskFinishedEvent.java

@@ -18,16 +18,13 @@
 
 package org.apache.hadoop.mapreduce.jobhistory;
 
-import java.io.IOException;
-
+import org.apache.avro.util.Utf8;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.mapreduce.Counters;
 import org.apache.hadoop.mapreduce.TaskID;
 import org.apache.hadoop.mapreduce.TaskType;
 
-import org.apache.avro.util.Utf8;
-
 /**
  * Event to record the successful completion of a task
  *
@@ -35,7 +32,14 @@ import org.apache.avro.util.Utf8;
 @InterfaceAudience.Private
 @InterfaceStability.Unstable
 public class TaskFinishedEvent implements HistoryEvent {
-  private TaskFinished datum = new TaskFinished();
+
+  private TaskFinished datum = null;
+
+  private TaskID taskid;
+  private long finishTime;
+  private TaskType taskType;
+  private String status;
+  private Counters counters;
   
   /**
    * Create an event to record the successful completion of a task
@@ -48,32 +52,48 @@ public class TaskFinishedEvent implements HistoryEvent {
   public TaskFinishedEvent(TaskID id, long finishTime,
                            TaskType taskType,
                            String status, Counters counters) {
-    datum.taskid = new Utf8(id.toString());
-    datum.finishTime = finishTime;
-    datum.counters = EventWriter.toAvro(counters);
-    datum.taskType = new Utf8(taskType.name());
-    datum.status = new Utf8(status);
+    this.taskid = id;
+    this.finishTime = finishTime;
+    this.taskType = taskType;
+    this.status = status;
+    this.counters = counters;
   }
   
   TaskFinishedEvent() {}
 
-  public Object getDatum() { return datum; }
-  public void setDatum(Object datum) {
-    this.datum = (TaskFinished)datum;
+  public Object getDatum() {
+    if (datum == null) {
+      datum = new TaskFinished();
+      datum.taskid = new Utf8(taskid.toString());
+      datum.finishTime = finishTime;
+      datum.counters = EventWriter.toAvro(counters);
+      datum.taskType = new Utf8(taskType.name());
+      datum.status = new Utf8(status);
+    }
+    return datum;
+  }
+
+  public void setDatum(Object oDatum) {
+    this.datum = (TaskFinished)oDatum;
+    this.taskid = TaskID.forName(datum.taskid.toString());
+    this.finishTime = datum.finishTime;
+    this.taskType = TaskType.valueOf(datum.taskType.toString());
+    this.status = datum.status.toString();
+    this.counters = EventReader.fromAvro(datum.counters);
   }
 
   /** Get task id */
-  public TaskID getTaskId() { return TaskID.forName(datum.taskid.toString()); }
+  public TaskID getTaskId() { return TaskID.forName(taskid.toString()); }
   /** Get the task finish time */
-  public long getFinishTime() { return datum.finishTime; }
+  public long getFinishTime() { return finishTime; }
   /** Get task counters */
-  public Counters getCounters() { return EventReader.fromAvro(datum.counters); }
+  public Counters getCounters() { return counters; }
   /** Get task type */
   public TaskType getTaskType() {
-    return TaskType.valueOf(datum.taskType.toString());
+    return TaskType.valueOf(taskType.toString());
   }
   /** Get task status */
-  public String getTaskStatus() { return datum.status.toString(); }
+  public String getTaskStatus() { return status.toString(); }
   /** Get event type */
   public EventType getEventType() {
     return EventType.TASK_FINISHED;

+ 8 - 3
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/TokenCache.java

@@ -32,6 +32,7 @@ import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifie
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapred.Master;
+import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -101,7 +102,7 @@ public class TokenCache {
     String delegTokenRenewer = Master.getMasterPrincipal(conf);
     if (delegTokenRenewer == null || delegTokenRenewer.length() == 0) {
       throw new IOException(
-          "Can't get JobTracker Kerberos principal for use as renewer");
+          "Can't get Master Kerberos principal for use as renewer");
     }
     boolean readFile = true;
 
@@ -112,7 +113,7 @@ public class TokenCache {
       if (readFile) {
         readFile = false;
         String binaryTokenFilename =
-          conf.get("mapreduce.job.credentials.binary");
+          conf.get(MRJobConfig.MAPREDUCE_JOB_CREDENTIALS_BINARY);
         if (binaryTokenFilename != null) {
           Credentials binary;
           try {
@@ -172,10 +173,14 @@ public class TokenCache {
   @InterfaceAudience.Private
   public static Token<DelegationTokenIdentifier> getDelegationToken(
       Credentials credentials, String namenode) {
+    //No fs specific tokens issues by this fs. It may however issue tokens
+    // for other filesystems - which would be keyed by that filesystems name.
+    if (namenode == null)  
+      return null;
     return (Token<DelegationTokenIdentifier>) credentials.getToken(new Text(
         namenode));
   }
-  
+
   /**
    * load job token from a file
    * @param conf

+ 1 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/security/TestTokenCache.java

@@ -130,7 +130,7 @@ public class TestTokenCache {
   private FileSystem setupMultiFs(final FileSystem singleFs,
       final String renewer, final Credentials credentials) throws Exception {
     FileSystem mockFs = mock(FileSystem.class);
-    when(mockFs.getCanonicalServiceName()).thenReturn("multifs");
+    when(mockFs.getCanonicalServiceName()).thenReturn(null);
     when(mockFs.getUri()).thenReturn(new URI("multifs:///"));
 
     when(mockFs.getDelegationTokens(any(String.class))).thenThrow(

+ 3 - 3
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java

@@ -32,13 +32,13 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.mapred.JobACLsManager;
+import org.apache.hadoop.mapreduce.Counters;
 import org.apache.hadoop.mapreduce.JobACL;
 import org.apache.hadoop.mapreduce.TypeConverter;
 import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser;
 import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.JobInfo;
 import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskInfo;
 import org.apache.hadoop.mapreduce.v2.api.records.AMInfo;
-import org.apache.hadoop.mapreduce.v2.api.records.Counters;
 import org.apache.hadoop.mapreduce.v2.api.records.JobId;
 import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
 import org.apache.hadoop.mapreduce.v2.api.records.JobState;
@@ -89,7 +89,7 @@ public class CompletedJob implements org.apache.hadoop.mapreduce.v2.app.job.Job
     
     loadFullHistoryData(loadTasks, historyFile);
     user = userName;
-    counters = TypeConverter.toYarn(jobInfo.getTotalCounters());
+    counters = jobInfo.getTotalCounters();
     diagnostics.add(jobInfo.getErrorInfo());
     report =
         RecordFactoryProvider.getRecordFactory(null).newRecordInstance(
@@ -121,7 +121,7 @@ public class CompletedJob implements org.apache.hadoop.mapreduce.v2.app.job.Job
   }
 
   @Override
-  public Counters getCounters() {
+  public Counters getAllCounters() {
     return counters;
   }
 

+ 3 - 3
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedTask.java

@@ -24,10 +24,10 @@ import java.util.Map;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.mapreduce.Counters;
 import org.apache.hadoop.mapreduce.TypeConverter;
 import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskAttemptInfo;
 import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskInfo;
-import org.apache.hadoop.mapreduce.v2.api.records.Counters;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskReport;
@@ -60,7 +60,7 @@ public class CompletedTask implements Task {
     this.finishTime = taskInfo.getFinishTime();
     this.type = TypeConverter.toYarn(taskInfo.getTaskType());
     if (taskInfo.getCounters() != null)
-      this.counters = TypeConverter.toYarn(taskInfo.getCounters());
+      this.counters = taskInfo.getCounters();
     if (taskInfo.getTaskStatus() != null) {
       this.state = TaskState.valueOf(taskInfo.getTaskStatus());
     } else {
@@ -86,7 +86,7 @@ public class CompletedTask implements Task {
     report.setFinishTime(finishTime);
     report.setTaskState(state);
     report.setProgress(getProgress());
-    report.setCounters(getCounters());
+    report.setCounters(TypeConverter.toYarn(getCounters()));
     report.addAllRunningAttempts(new ArrayList<TaskAttemptId>(attempts.keySet()));
   }
 

+ 5 - 5
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedTaskAttempt.java

@@ -21,9 +21,9 @@ package org.apache.hadoop.mapreduce.v2.hs;
 import java.util.ArrayList;
 import java.util.List;
 
+import org.apache.hadoop.mapreduce.Counters;
 import org.apache.hadoop.mapreduce.TypeConverter;
 import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskAttemptInfo;
-import org.apache.hadoop.mapreduce.v2.api.records.Counters;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptReport;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
@@ -46,8 +46,9 @@ public class CompletedTaskAttempt implements TaskAttempt {
   CompletedTaskAttempt(TaskId taskId, TaskAttemptInfo attemptInfo) {
     this.attemptInfo = attemptInfo;
     this.attemptId = TypeConverter.toYarn(attemptInfo.getAttemptId());
-    if (attemptInfo.getCounters() != null)
-      this.counters = TypeConverter.toYarn(attemptInfo.getCounters());
+    if (attemptInfo.getCounters() != null) {
+      this.counters = attemptInfo.getCounters();
+    }
     if (attemptInfo.getTaskStatus() != null) {
       this.state = TaskAttemptState.valueOf(attemptInfo.getTaskStatus());
     } else {
@@ -61,7 +62,6 @@ public class CompletedTaskAttempt implements TaskAttempt {
     }
     
     report = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(TaskAttemptReport.class);
-    report.setCounters(counters);
     
     report.setTaskAttemptId(attemptId);
     report.setTaskAttemptState(state);
@@ -78,7 +78,7 @@ public class CompletedTaskAttempt implements TaskAttempt {
     }
 //    report.setPhase(attemptInfo.get); //TODO
     report.setStateString(attemptInfo.getState());
-    report.setCounters(getCounters());
+    report.setCounters(TypeConverter.toYarn(getCounters()));
     report.setContainerId(attemptInfo.getContainerId());
     if (attemptInfo.getHostname() == null) {
       report.setNodeManagerHost("UNKNOWN");

+ 49 - 5
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryClientService.java

@@ -33,11 +33,15 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.mapreduce.JobACL;
+import org.apache.hadoop.mapreduce.TypeConverter;
 import org.apache.hadoop.mapreduce.v2.api.MRClientProtocol;
+import org.apache.hadoop.mapreduce.v2.api.MRDelegationTokenIdentifier;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptRequest;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptResponse;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersRequest;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDelegationTokenRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDelegationTokenResponse;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDiagnosticsRequest;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDiagnosticsResponse;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetJobReportRequest;
@@ -67,13 +71,17 @@ import org.apache.hadoop.mapreduce.v2.hs.webapp.HsWebApp;
 import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
+import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.yarn.YarnException;
+import org.apache.hadoop.yarn.api.records.DelegationToken;
 import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.ipc.RPCUtil;
 import org.apache.hadoop.yarn.ipc.YarnRPC;
 import org.apache.hadoop.yarn.service.AbstractService;
+import org.apache.hadoop.yarn.util.BuilderUtils;
 import org.apache.hadoop.yarn.webapp.WebApp;
 import org.apache.hadoop.yarn.webapp.WebApps;
 
@@ -91,11 +99,14 @@ public class HistoryClientService extends AbstractService {
   private WebApp webApp;
   private InetSocketAddress bindAddress;
   private HistoryContext history;
-
-  public HistoryClientService(HistoryContext history) {
+  private JHSDelegationTokenSecretManager jhsDTSecretManager;
+  
+  public HistoryClientService(HistoryContext history,
+      JHSDelegationTokenSecretManager jhsDTSecretManager) {
     super("HistoryClientService");
     this.history = history;
     this.protocolHandler = new MRClientProtocolHandler();
+    this.jhsDTSecretManager = jhsDTSecretManager;
   }
 
   public void start() {
@@ -109,14 +120,15 @@ public class HistoryClientService extends AbstractService {
       JHAdminConfig.DEFAULT_MR_HISTORY_ADDRESS);
     InetAddress hostNameResolved = null;
     try {
-      hostNameResolved = InetAddress.getLocalHost(); //address.getAddress().getLocalHost();
+      hostNameResolved = InetAddress.getLocalHost(); 
+      //address.getAddress().getLocalHost();
     } catch (UnknownHostException e) {
       throw new YarnException(e);
     }
 
     server =
         rpc.getServer(MRClientProtocol.class, protocolHandler, address,
-            conf, null,
+            conf, jhsDTSecretManager,
             conf.getInt(JHAdminConfig.MR_HISTORY_CLIENT_THREAD_COUNT,
                 JHAdminConfig.DEFAULT_MR_HISTORY_CLIENT_THREAD_COUNT));
 
@@ -190,7 +202,7 @@ public class HistoryClientService extends AbstractService {
       JobId jobId = request.getJobId();
       Job job = verifyAndGetJob(jobId);
       GetCountersResponse response = recordFactory.newRecordInstance(GetCountersResponse.class);
-      response.setCounters(job.getCounters());
+      response.setCounters(TypeConverter.toYarn(job.getAllCounters()));
       return response;
     }
 
@@ -277,6 +289,38 @@ public class HistoryClientService extends AbstractService {
       }
       return response;
     }
+    
+    @Override
+    public GetDelegationTokenResponse getDelegationToken(
+        GetDelegationTokenRequest request) throws YarnRemoteException {
+
+      try {
+      // Verify that the connection is kerberos authenticated
+      AuthenticationMethod authMethod = UserGroupInformation
+        .getRealAuthenticationMethod(UserGroupInformation.getCurrentUser());
+      if (UserGroupInformation.isSecurityEnabled()
+          && (authMethod != AuthenticationMethod.KERBEROS)) {
+       throw new IOException(
+          "Delegation Token can be issued only with kerberos authentication");
+      }
+
+      GetDelegationTokenResponse response = recordFactory.newRecordInstance(
+          GetDelegationTokenResponse.class);
+      MRDelegationTokenIdentifier tokenIdentifier =
+          new MRDelegationTokenIdentifier();
+      Token<MRDelegationTokenIdentifier> realJHSToken =
+          new Token<MRDelegationTokenIdentifier>(tokenIdentifier,
+              jhsDTSecretManager);
+      DelegationToken mrDToken = BuilderUtils.newDelegationToken(
+        realJHSToken.getIdentifier(), realJHSToken.getKind().toString(),
+        realJHSToken.getPassword(), bindAddress.getAddress().getHostAddress()
+            + ":" + bindAddress.getPort());
+      response.setDelegationToken(mrDToken);
+      return response;
+      } catch (IOException i) {
+        throw RPCUtil.getRemoteException(i);
+      }
+    }
 
     private void checkAccess(Job job, JobACL jobOperation)
         throws YarnRemoteException {

+ 58 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JHSDelegationTokenSecretManager.java

@@ -0,0 +1,58 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.mapreduce.v2.hs;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.mapreduce.v2.api.MRDelegationTokenIdentifier;
+import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager;
+
+/**
+ * A MapReduce specific delegation token secret manager.
+ * The secret manager is responsible for generating and accepting the password
+ * for each token.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public class JHSDelegationTokenSecretManager
+    extends AbstractDelegationTokenSecretManager<MRDelegationTokenIdentifier> {
+
+  /**
+   * Create a secret manager
+   * @param delegationKeyUpdateInterval the number of seconds for rolling new
+   *        secret keys.
+   * @param delegationTokenMaxLifetime the maximum lifetime of the delegation
+   *        tokens
+   * @param delegationTokenRenewInterval how often the tokens must be renewed
+   * @param delegationTokenRemoverScanInterval how often the tokens are scanned
+   *        for expired tokens
+   */
+  public JHSDelegationTokenSecretManager(long delegationKeyUpdateInterval,
+                                      long delegationTokenMaxLifetime, 
+                                      long delegationTokenRenewInterval,
+                                      long delegationTokenRemoverScanInterval) {
+    super(delegationKeyUpdateInterval, delegationTokenMaxLifetime,
+          delegationTokenRenewInterval, delegationTokenRemoverScanInterval);
+  }
+
+  @Override
+  public MRDelegationTokenIdentifier createIdentifier() {
+    return new MRDelegationTokenIdentifier();
+  }
+}

+ 38 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistoryServer.java

@@ -24,6 +24,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapreduce.MRConfig;
 import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.util.StringUtils;
@@ -41,6 +42,7 @@ public class JobHistoryServer extends CompositeService {
   private HistoryContext historyContext;
   private HistoryClientService clientService;
   private JobHistory jobHistoryService;
+  private JHSDelegationTokenSecretManager jhsDTSecretManager;
 
   public JobHistoryServer() {
     super(JobHistoryServer.class.getName());
@@ -56,17 +58,52 @@ public class JobHistoryServer extends CompositeService {
     }
     jobHistoryService = new JobHistory();
     historyContext = (HistoryContext)jobHistoryService;
-    clientService = new HistoryClientService(historyContext);
+    this.jhsDTSecretManager = createJHSSecretManager(conf);
+    clientService = new HistoryClientService(historyContext, 
+        this.jhsDTSecretManager);
     addService(jobHistoryService);
     addService(clientService);
     super.init(config);
   }
 
+  protected JHSDelegationTokenSecretManager createJHSSecretManager(
+      Configuration conf) {
+    long secretKeyInterval = 
+        conf.getLong(MRConfig.DELEGATION_KEY_UPDATE_INTERVAL_KEY, 
+                     MRConfig.DELEGATION_KEY_UPDATE_INTERVAL_DEFAULT);
+      long tokenMaxLifetime =
+        conf.getLong(MRConfig.DELEGATION_TOKEN_MAX_LIFETIME_KEY,
+                     MRConfig.DELEGATION_TOKEN_MAX_LIFETIME_DEFAULT);
+      long tokenRenewInterval =
+        conf.getLong(MRConfig.DELEGATION_TOKEN_RENEW_INTERVAL_KEY, 
+                     MRConfig.DELEGATION_TOKEN_RENEW_INTERVAL_DEFAULT);
+      
+    return new JHSDelegationTokenSecretManager(secretKeyInterval, 
+        tokenMaxLifetime, tokenRenewInterval, 3600000);
+  }
+  
   protected void doSecureLogin(Configuration conf) throws IOException {
     SecurityUtil.login(conf, JHAdminConfig.MR_HISTORY_KEYTAB,
         JHAdminConfig.MR_HISTORY_PRINCIPAL);
   }
 
+  @Override
+  public void start() {
+    try {
+      jhsDTSecretManager.startThreads();
+    } catch(IOException io) {
+      LOG.error("Error while starting the Secret Manager threads", io);
+      throw new RuntimeException(io);
+    }
+    super.start();
+  }
+  
+  @Override
+  public void stop() {
+    jhsDTSecretManager.stopThreads();
+    super.stop();
+  }
+  
   public static void main(String[] args) {
     StringUtils.startupShutdownMessage(JobHistoryServer.class, args, LOG);
     try {

+ 2 - 2
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/PartialJob.java

@@ -22,9 +22,9 @@ import java.util.List;
 import java.util.Map;
 
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapreduce.Counters;
 import org.apache.hadoop.mapreduce.JobACL;
 import org.apache.hadoop.mapreduce.v2.api.records.AMInfo;
-import org.apache.hadoop.mapreduce.v2.api.records.Counters;
 import org.apache.hadoop.mapreduce.v2.api.records.JobId;
 import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
 import org.apache.hadoop.mapreduce.v2.api.records.JobState;
@@ -95,7 +95,7 @@ public class PartialJob implements org.apache.hadoop.mapreduce.v2.app.job.Job {
   }
 
   @Override
-  public Counters getCounters() {
+  public Counters getAllCounters() {
     return null;
   }
 

+ 1 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsWebServices.java

@@ -229,7 +229,7 @@ public class HsWebServices {
   }
 
   @GET
-  @Path("/mapreduce/jobs/{jobid}/attempts")
+  @Path("/mapreduce/jobs/{jobid}/jobattempts")
   @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
   public AMAttemptsInfo getJobAttempts(@PathParam("jobid") String jid) {
 

+ 5 - 3
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/dao/AMAttemptInfo.java

@@ -30,7 +30,7 @@ import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.util.BuilderUtils;
 
-@XmlRootElement(name = "amAttempt")
+@XmlRootElement(name = "jobAttempt")
 @XmlAccessorType(XmlAccessType.FIELD)
 public class AMAttemptInfo {
 
@@ -52,12 +52,14 @@ public class AMAttemptInfo {
     this.nodeHttpAddress = "";
     this.nodeId = "";
     String nmHost = amInfo.getNodeManagerHost();
-    int nmPort = amInfo.getNodeManagerHttpPort();
+    int nmHttpPort = amInfo.getNodeManagerHttpPort();
+    int nmPort = amInfo.getNodeManagerPort();
     if (nmHost != null) {
-      this.nodeHttpAddress = nmHost + ":" + nmPort;
+      this.nodeHttpAddress = nmHost + ":" + nmHttpPort;
       NodeId nodeId = BuilderUtils.newNodeId(nmHost, nmPort);
       this.nodeId = nodeId.toString();
     }
+
     this.id = amInfo.getAppAttemptId().getAttemptId();
     this.startTime = amInfo.getStartTime();
     this.containerId = "";

+ 3 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/dao/AMAttemptsInfo.java

@@ -21,12 +21,14 @@ import java.util.ArrayList;
 
 import javax.xml.bind.annotation.XmlAccessType;
 import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlElement;
 import javax.xml.bind.annotation.XmlRootElement;
 
-@XmlRootElement(name = "attempts")
+@XmlRootElement(name = "jobAttempts")
 @XmlAccessorType(XmlAccessType.FIELD)
 public class AMAttemptsInfo {
 
+  @XmlElement(name = "jobAttempt")
   protected ArrayList<AMAttemptInfo> attempt = new ArrayList<AMAttemptInfo>();
 
   public AMAttemptsInfo() {

+ 1 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsWebServicesAttempts.java

@@ -699,7 +699,7 @@ public class TestHsWebServicesAttempts extends JerseyTest {
       assertTrue("name not set", (name != null && !name.isEmpty()));
       JSONArray counters = counterGroup.getJSONArray("counter");
       for (int j = 0; j < counters.length(); j++) {
-        JSONObject counter = counters.getJSONObject(i);
+        JSONObject counter = counters.getJSONObject(j);
         String counterName = counter.getString("name");
         assertTrue("name not set",
             (counterName != null && !counterName.isEmpty()));

+ 14 - 13
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsWebServicesJobs.java

@@ -77,7 +77,7 @@ import com.sun.jersey.test.framework.WebAppDescriptor;
  *
  * /ws/v1/history/mapreduce/jobs /ws/v1/history/mapreduce/jobs/{jobid}
  * /ws/v1/history/mapreduce/jobs/{jobid}/counters
- * /ws/v1/history/mapreduce/jobs/{jobid}/attempts
+ * /ws/v1/history/mapreduce/jobs/{jobid}/jobattempts
  */
 public class TestHsWebServicesJobs extends JerseyTest {
 
@@ -626,12 +626,12 @@ public class TestHsWebServicesJobs extends JerseyTest {
       String jobId = MRApps.toString(id);
 
       ClientResponse response = r.path("ws").path("v1").path("history")
-          .path("mapreduce").path("jobs").path(jobId).path("attempts")
+          .path("mapreduce").path("jobs").path(jobId).path("jobattempts")
           .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
       assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
       JSONObject json = response.getEntity(JSONObject.class);
       assertEquals("incorrect number of elements", 1, json.length());
-      JSONObject info = json.getJSONObject("attempts");
+      JSONObject info = json.getJSONObject("jobAttempts");
       verifyHsJobAttempts(info, jobsMap.get(id));
     }
   }
@@ -644,12 +644,12 @@ public class TestHsWebServicesJobs extends JerseyTest {
       String jobId = MRApps.toString(id);
 
       ClientResponse response = r.path("ws").path("v1").path("history")
-          .path("mapreduce").path("jobs").path(jobId).path("attempts/")
+          .path("mapreduce").path("jobs").path(jobId).path("jobattempts/")
           .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
       assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
       JSONObject json = response.getEntity(JSONObject.class);
       assertEquals("incorrect number of elements", 1, json.length());
-      JSONObject info = json.getJSONObject("attempts");
+      JSONObject info = json.getJSONObject("jobAttempts");
       verifyHsJobAttempts(info, jobsMap.get(id));
     }
   }
@@ -662,12 +662,12 @@ public class TestHsWebServicesJobs extends JerseyTest {
       String jobId = MRApps.toString(id);
 
       ClientResponse response = r.path("ws").path("v1").path("history")
-          .path("mapreduce").path("jobs").path(jobId).path("attempts")
+          .path("mapreduce").path("jobs").path(jobId).path("jobattempts")
           .get(ClientResponse.class);
       assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
       JSONObject json = response.getEntity(JSONObject.class);
       assertEquals("incorrect number of elements", 1, json.length());
-      JSONObject info = json.getJSONObject("attempts");
+      JSONObject info = json.getJSONObject("jobAttempts");
       verifyHsJobAttempts(info, jobsMap.get(id));
     }
   }
@@ -680,7 +680,7 @@ public class TestHsWebServicesJobs extends JerseyTest {
       String jobId = MRApps.toString(id);
 
       ClientResponse response = r.path("ws").path("v1").path("history")
-          .path("mapreduce").path("jobs").path(jobId).path("attempts")
+          .path("mapreduce").path("jobs").path(jobId).path("jobattempts")
           .accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
       assertEquals(MediaType.APPLICATION_XML_TYPE, response.getType());
       String xml = response.getEntity(String.class);
@@ -689,9 +689,9 @@ public class TestHsWebServicesJobs extends JerseyTest {
       InputSource is = new InputSource();
       is.setCharacterStream(new StringReader(xml));
       Document dom = db.parse(is);
-      NodeList attempts = dom.getElementsByTagName("attempts");
+      NodeList attempts = dom.getElementsByTagName("jobAttempts");
       assertEquals("incorrect number of elements", 1, attempts.getLength());
-      NodeList info = dom.getElementsByTagName("attempt");
+      NodeList info = dom.getElementsByTagName("jobAttempt");
       verifyHsJobAttemptsXML(info, jobsMap.get(id));
     }
   }
@@ -699,7 +699,7 @@ public class TestHsWebServicesJobs extends JerseyTest {
   public void verifyHsJobAttempts(JSONObject info, Job job)
       throws JSONException {
 
-    JSONArray attempts = info.getJSONArray("attempt");
+    JSONArray attempts = info.getJSONArray("jobAttempt");
     assertEquals("incorrect number of elements", 2, attempts.length());
     for (int i = 0; i < attempts.length(); i++) {
       JSONObject attempt = attempts.getJSONObject(i);
@@ -732,9 +732,10 @@ public class TestHsWebServicesJobs extends JerseyTest {
       if (amInfo.getAppAttemptId().getAttemptId() == id) {
         attemptFound = true;
         String nmHost = amInfo.getNodeManagerHost();
-        int nmPort = amInfo.getNodeManagerHttpPort();
+        int nmHttpPort = amInfo.getNodeManagerHttpPort();
+        int nmPort = amInfo.getNodeManagerPort();
         WebServicesTestUtils.checkStringMatch("nodeHttpAddress", nmHost + ":"
-            + nmPort, nodeHttpAddress);
+            + nmHttpPort, nodeHttpAddress);
         WebServicesTestUtils.checkStringMatch("nodeId",
             BuilderUtils.newNodeId(nmHost, nmPort).toString(), nodeId);
         assertTrue("startime not greater than 0", startTime > 0);

+ 1 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsWebServicesTasks.java

@@ -788,7 +788,7 @@ public class TestHsWebServicesTasks extends JerseyTest {
       assertTrue("name not set", (name != null && !name.isEmpty()));
       JSONArray counters = counterGroup.getJSONArray("counter");
       for (int j = 0; j < counters.length(); j++) {
-        JSONObject counter = counters.getJSONObject(i);
+        JSONObject counter = counters.getJSONObject(j);
         String counterName = counter.getString("name");
         assertTrue("name not set",
             (counterName != null && !counterName.isEmpty()));

+ 9 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientCache.java

@@ -70,7 +70,15 @@ public class ClientCache {
     return client;
   }
 
-  private MRClientProtocol instantiateHistoryProxy()
+  protected synchronized MRClientProtocol getInitializedHSProxy()
+      throws IOException {
+    if (this.hsProxy == null) {
+      hsProxy = instantiateHistoryProxy();
+    }
+    return this.hsProxy;
+  }
+  
+  protected MRClientProtocol instantiateHistoryProxy()
       throws IOException {
     final String serviceAddr = conf.get(JHAdminConfig.MR_HISTORY_ADDRESS);
     if (StringUtils.isEmpty(serviceAddr)) {

+ 13 - 54
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java

@@ -35,6 +35,7 @@ import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.mapreduce.JobID;
 import org.apache.hadoop.mapreduce.JobStatus;
+import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.TaskAttemptID;
 import org.apache.hadoop.mapreduce.TaskType;
 import org.apache.hadoop.mapreduce.TypeConverter;
@@ -68,7 +69,6 @@ import org.apache.hadoop.yarn.YarnException;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationReport;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
@@ -94,6 +94,8 @@ public class ClientServiceDelegate {
   private static String UNKNOWN_USER = "Unknown User";
   private String trackingUrl;
 
+  private boolean amAclDisabledStatusLogged = false;
+
   public ClientServiceDelegate(Configuration conf, ResourceMgrDelegate rm,
       JobID jobId, MRClientProtocol historyServerProxy) {
     this.conf = new Configuration(conf); // Cloning for modifying.
@@ -157,7 +159,7 @@ public class ClientServiceDelegate {
           application = rm.getApplicationReport(appId);
           continue;
         }
-        if(!conf.getBoolean(YarnConfiguration.RM_AM_NETWORK_ACL_CLOSED, false)) {
+        if(!conf.getBoolean(MRJobConfig.JOB_AM_ACCESS_DISABLED, false)) {
           UserGroupInformation newUgi = UserGroupInformation.createRemoteUser(
               UserGroupInformation.getCurrentUser().getUserName());
           serviceAddr = application.getHost() + ":" + application.getRpcPort();
@@ -182,12 +184,14 @@ public class ClientServiceDelegate {
               return instantiateAMProxy(tempStr);
             }
           });
-	} else {
-           logApplicationReportInfo(application); 
-           LOG.info("Network ACL closed to AM for job " + jobId
-             + ". Redirecting to job history server.");
-           return checkAndGetHSProxy(null, JobState.RUNNING);
-        }  
+        } else {
+          if (!amAclDisabledStatusLogged) {
+            LOG.info("Network ACL closed to AM for job " + jobId
+                + ". Not going to try to reach the AM.");
+            amAclDisabledStatusLogged = true;
+          }
+          return getNotRunningJob(null, JobState.RUNNING);
+        }
         return realProxy;
       } catch (IOException e) {
         //possibly the AM has crashed
@@ -248,55 +252,10 @@ public class ClientServiceDelegate {
     return realProxy;
   }
 
-  private void logApplicationReportInfo(ApplicationReport application) {
-    if(application == null) {
-      return;
-    }
-    LOG.info("AppId: " + application.getApplicationId()
-      + " # reserved containers: " 
-      + application.getApplicationResourceUsageReport().getNumReservedContainers()
-      + " # used containers: " 
-      + application.getApplicationResourceUsageReport().getNumUsedContainers()
-      + " Needed resources (memory): "
-      + application.getApplicationResourceUsageReport().getNeededResources().getMemory()
-      + " Reserved resources (memory): "
-      + application.getApplicationResourceUsageReport().getReservedResources().getMemory()
-      + " Used resources (memory): "
-      + application.getApplicationResourceUsageReport().getUsedResources().getMemory()
-      + " Diagnostics: " 
-      + application.getDiagnostics()
-      + " Start time: "
-      + application.getStartTime()
-      + " Finish time: "
-      + application.getFinishTime()
-      + " Host: "
-      + application.getHost()
-      + " Name: "
-      + application.getName()
-      + " Orig. tracking url: "
-      + application.getOriginalTrackingUrl()
-      + " Queue: "
-      + application.getQueue()
-      + " RPC port: "
-      + application.getRpcPort()
-      + " Tracking url: "
-      + application.getTrackingUrl()
-      + " User: "
-      + application.getUser()
-      + " Client token: "
-      + application.getClientToken()
-      + " Final appl. status: "
-      + application.getFinalApplicationStatus()
-      + " Yarn appl. state: "
-      + application.getYarnApplicationState()
-    );
-  }
-
   private MRClientProtocol checkAndGetHSProxy(
       ApplicationReport applicationReport, JobState state) {
     if (null == historyServerProxy) {
-      LOG.warn("Job History Server is not configured or " +
-      		"job information not yet available on History Server.");
+      LOG.warn("Job History Server is not configured.");
       return getNotRunningJob(applicationReport, state);
     }
     return historyServerProxy;

+ 8 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/NotRunningJob.java

@@ -27,6 +27,8 @@ import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptRequest
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptResponse;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersRequest;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDelegationTokenRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDelegationTokenResponse;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDiagnosticsRequest;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDiagnosticsResponse;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetJobReportRequest;
@@ -201,4 +203,10 @@ public class NotRunningJob implements MRClientProtocol {
     return resp;
   }
 
+  @Override
+  public GetDelegationTokenResponse getDelegationToken(
+      GetDelegationTokenRequest request) throws YarnRemoteException {
+    /* Should not be invoked by anyone. */
+    throw new NotImplementedException();
+  }
 }

+ 17 - 4
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java

@@ -37,6 +37,8 @@ import org.apache.hadoop.mapreduce.QueueInfo;
 import org.apache.hadoop.mapreduce.TaskTrackerInfo;
 import org.apache.hadoop.mapreduce.TypeConverter;
 import org.apache.hadoop.mapreduce.security.token.delegation.DelegationTokenIdentifier;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDelegationTokenRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDelegationTokenResponse;
 import org.apache.hadoop.mapreduce.v2.util.MRApps;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -59,12 +61,14 @@ import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationReport;
 import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
 import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
+import org.apache.hadoop.yarn.api.records.DelegationToken;
 import org.apache.hadoop.yarn.api.records.YarnClusterMetrics;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.ipc.YarnRPC;
+import org.apache.hadoop.yarn.security.client.RMDelegationTokenIdentifier;
 
 
 // TODO: This should be part of something like yarn-client.
@@ -156,11 +160,20 @@ public class ResourceMgrDelegate {
   }
 
 
-  public Token<DelegationTokenIdentifier> getDelegationToken(Text arg0)
+  @SuppressWarnings("rawtypes")
+  public Token getDelegationToken(Text renewer)
       throws IOException, InterruptedException {
-    // TODO: Implement getDelegationToken
-    LOG.warn("getDelegationToken - Not Implemented");
-    return null;
+    /* get the token from RM */
+    org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenRequest 
+    rmDTRequest = recordFactory.newRecordInstance(
+        org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenRequest.class);
+    rmDTRequest.setRenewer(renewer.toString());
+    org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenResponse 
+      response = applicationsManager.getDelegationToken(rmDTRequest);
+    DelegationToken yarnToken = response.getRMDelegationToken();
+    return new Token<RMDelegationTokenIdentifier>(yarnToken.getIdentifier().array(),
+        yarnToken.getPassword().array(), 
+        new Text(yarnToken.getKind()), new Text(yarnToken.getService()));
   }
 
 

+ 54 - 6
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java

@@ -28,6 +28,7 @@ import java.util.Vector;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileContext;
 import org.apache.hadoop.fs.FileStatus;
@@ -54,6 +55,9 @@ import org.apache.hadoop.mapreduce.TypeConverter;
 import org.apache.hadoop.mapreduce.protocol.ClientProtocol;
 import org.apache.hadoop.mapreduce.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.mapreduce.v2.LogParams;
+import org.apache.hadoop.mapreduce.v2.api.MRClientProtocol;
+import org.apache.hadoop.mapreduce.v2.api.MRDelegationTokenIdentifier;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDelegationTokenRequest;
 import org.apache.hadoop.mapreduce.v2.jobhistory.JobHistoryUtils;
 import org.apache.hadoop.mapreduce.v2.util.MRApps;
 import org.apache.hadoop.security.Credentials;
@@ -68,6 +72,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationReport;
 import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
 import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
+import org.apache.hadoop.yarn.api.records.DelegationToken;
 import org.apache.hadoop.yarn.api.records.LocalResource;
 import org.apache.hadoop.yarn.api.records.LocalResourceType;
 import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
@@ -84,6 +89,7 @@ import org.apache.hadoop.yarn.util.ConverterUtils;
 /**
  * This class enables the current JobClient (0.22 hadoop) to run on YARN.
  */
+@SuppressWarnings({ "rawtypes", "unchecked", "deprecation" })
 public class YARNRunner implements ClientProtocol {
 
   private static final Log LOG = LogFactory.getLog(YARNRunner.class);
@@ -93,7 +99,15 @@ public class YARNRunner implements ClientProtocol {
   private ClientCache clientCache;
   private Configuration conf;
   private final FileContext defaultFileContext;
-
+  
+  /* usually is false unless the jobclient getdelegation token is 
+   *  called. This is a hack wherein we do return a token from RM 
+   *  on getDelegationtoken but due to the restricted api on jobclient
+   *  we just add a job history DT token when submitting a job.
+   */
+  private static final boolean DEFAULT_HS_DELEGATION_TOKEN_REQUIRED = 
+      false;
+  
   /**
    * Yarn runner incapsulates the client interface of
    * yarn
@@ -131,7 +145,16 @@ public class YARNRunner implements ClientProtocol {
       throw new RuntimeException("Error in instantiating YarnClient", ufe);
     }
   }
-
+  
+  @Private
+  /**
+   * Used for testing mostly.
+   * @param resMgrDelegate the resource manager delegate to set to.
+   */
+  public void setResourceMgrDelegate(ResourceMgrDelegate resMgrDelegate) {
+    this.resMgrDelegate = resMgrDelegate;
+  }
+  
   @Override
   public void cancelDelegationToken(Token<DelegationTokenIdentifier> arg0)
       throws IOException, InterruptedException {
@@ -161,10 +184,26 @@ public class YARNRunner implements ClientProtocol {
     return resMgrDelegate.getClusterMetrics();
   }
 
-  @Override
-  public Token<DelegationTokenIdentifier> getDelegationToken(Text arg0)
+  private Token<MRDelegationTokenIdentifier> getDelegationTokenFromHS(
+      MRClientProtocol hsProxy, Text renewer) throws IOException,
+      InterruptedException {
+    GetDelegationTokenRequest request = recordFactory
+      .newRecordInstance(GetDelegationTokenRequest.class);
+    request.setRenewer(renewer.toString());
+    DelegationToken mrDelegationToken = hsProxy.getDelegationToken(request)
+      .getDelegationToken();
+    return new Token<MRDelegationTokenIdentifier>(mrDelegationToken
+      .getIdentifier().array(), mrDelegationToken.getPassword().array(),
+      new Text(mrDelegationToken.getKind()), new Text(
+        mrDelegationToken.getService()));
+  }
+  
+  @Override
+  public Token<DelegationTokenIdentifier> getDelegationToken(Text renewer)
       throws IOException, InterruptedException {
-    return resMgrDelegate.getDelegationToken(arg0);
+    // The token is only used for serialization. So the type information
+    // mismatch should be fine.
+    return resMgrDelegate.getDelegationToken(renewer);
   }
 
   @Override
@@ -224,7 +263,16 @@ public class YARNRunner implements ClientProtocol {
   @Override
   public JobStatus submitJob(JobID jobId, String jobSubmitDir, Credentials ts)
   throws IOException, InterruptedException {
-
+    // JobClient will set this flag if getDelegationToken is called, if so, get
+    // the delegation tokens for the HistoryServer also.
+    if (conf.getBoolean(JobClient.HS_DELEGATION_TOKEN_REQUIRED, 
+        DEFAULT_HS_DELEGATION_TOKEN_REQUIRED)) {
+      Token hsDT = getDelegationTokenFromHS(clientCache.
+          getInitializedHSProxy(), new Text( 
+              conf.get(JobClient.HS_DELEGATION_TOKEN_RENEWER)));
+      ts.addToken(hsDT.getService(), hsDT);
+    }
+    
     // Upload only in security mode: TODO
     Path applicationTokensFile =
         new Path(jobSubmitDir, MRJobConfig.APPLICATION_TOKENS_FILE);

+ 15 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java

@@ -76,6 +76,8 @@ import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoRequest;
@@ -330,6 +332,12 @@ public class TestClientRedirect {
         GetQueueUserAclsInfoRequest request) throws YarnRemoteException {
       return null;
     }
+
+    @Override
+    public GetDelegationTokenResponse getDelegationToken(
+        GetDelegationTokenRequest request) throws YarnRemoteException {
+      return null;
+    }
   }
 
   class HistoryService extends AMService {
@@ -482,6 +490,13 @@ public class TestClientRedirect {
         FailTaskAttemptRequest request) throws YarnRemoteException {
       return null;
     }
+
+    @Override
+    public org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDelegationTokenResponse getDelegationToken(
+        org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDelegationTokenRequest request)
+        throws YarnRemoteException {
+      return null;
+    }
   }
 
   static Counters getMyCounters() {

+ 61 - 3
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientServiceDelegate.java

@@ -19,7 +19,12 @@
 package org.apache.hadoop.mapred;
 
 import static org.mockito.Matchers.any;
-import static org.mockito.Mockito.*;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
 
 import java.io.IOException;
 import java.util.Arrays;
@@ -31,6 +36,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.mapreduce.JobID;
 import org.apache.hadoop.mapreduce.JobStatus;
 import org.apache.hadoop.mapreduce.MRConfig;
+import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.TypeConverter;
 import org.apache.hadoop.mapreduce.v2.api.MRClientProtocol;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersRequest;
@@ -165,7 +171,7 @@ public class TestClientServiceDelegate {
     ClientServiceDelegate clientServiceDelegate = getClientServiceDelegate(                       
         historyServerProxy, rm);
 
-    JobStatus jobStatus = clientServiceDelegate.getJobStatus(oldJobId);                           
+    JobStatus jobStatus = clientServiceDelegate.getJobStatus(oldJobId);
     Assert.assertNotNull(jobStatus);
     Assert.assertEquals("TestJobFilePath", jobStatus.getJobFile());                               
     Assert.assertEquals("http://TestTrackingUrl", jobStatus.getTrackingUrl());                    
@@ -254,6 +260,58 @@ public class TestClientServiceDelegate {
         any(String.class));
   }
   
+  @Test
+  public void testAMAccessDisabled() throws IOException {
+    //test only applicable when AM not reachable
+    if(isAMReachableFromClient) {
+      return;
+    }
+
+    MRClientProtocol historyServerProxy = mock(MRClientProtocol.class);
+    when(historyServerProxy.getJobReport(getJobReportRequest())).thenReturn(                      
+        getJobReportResponseFromHistoryServer());                                                 
+
+    ResourceMgrDelegate rmDelegate = mock(ResourceMgrDelegate.class);
+    when(rmDelegate.getApplicationReport(jobId.getAppId())).thenReturn(
+        getRunningApplicationReport("am1", 78)).thenReturn(
+          getRunningApplicationReport("am1", 78)).thenReturn(
+            getRunningApplicationReport("am1", 78)).thenReturn(
+        getFinishedApplicationReport());
+
+    ClientServiceDelegate clientServiceDelegate = spy(getClientServiceDelegate(
+        historyServerProxy, rmDelegate));
+
+    JobStatus jobStatus = clientServiceDelegate.getJobStatus(oldJobId);
+    Assert.assertNotNull(jobStatus);
+    Assert.assertEquals("N/A", jobStatus.getJobName());
+    
+    verify(clientServiceDelegate, times(0)).instantiateAMProxy(
+        any(String.class));
+
+    // Should not reach AM even for second and third times too.
+    jobStatus = clientServiceDelegate.getJobStatus(oldJobId);
+    Assert.assertNotNull(jobStatus);
+    Assert.assertEquals("N/A", jobStatus.getJobName());    
+    verify(clientServiceDelegate, times(0)).instantiateAMProxy(
+        any(String.class));
+    jobStatus = clientServiceDelegate.getJobStatus(oldJobId);
+    Assert.assertNotNull(jobStatus);
+    Assert.assertEquals("N/A", jobStatus.getJobName());    
+    verify(clientServiceDelegate, times(0)).instantiateAMProxy(
+        any(String.class));
+
+    // The third time around, app is completed, so should go to JHS
+    JobStatus jobStatus1 = clientServiceDelegate.getJobStatus(oldJobId);
+    Assert.assertNotNull(jobStatus1);
+    Assert.assertEquals("TestJobFilePath", jobStatus1.getJobFile());                               
+    Assert.assertEquals("http://TestTrackingUrl", jobStatus1.getTrackingUrl());                    
+    Assert.assertEquals(1.0f, jobStatus1.getMapProgress());                                        
+    Assert.assertEquals(1.0f, jobStatus1.getReduceProgress());
+    
+    verify(clientServiceDelegate, times(0)).instantiateAMProxy(
+        any(String.class));
+  }
+  
   private GetJobReportRequest getJobReportRequest() {
     GetJobReportRequest request = Records.newRecord(GetJobReportRequest.class);
     request.setJobId(jobId);
@@ -300,7 +358,7 @@ public class TestClientServiceDelegate {
       MRClientProtocol historyServerProxy, ResourceMgrDelegate rm) {
     Configuration conf = new YarnConfiguration();
     conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME);
-    conf.setBoolean(YarnConfiguration.RM_AM_NETWORK_ACL_CLOSED, !isAMReachableFromClient);
+    conf.setBoolean(MRJobConfig.JOB_AM_ACCESS_DISABLED, !isAMReachableFromClient);
     ClientServiceDelegate clientServiceDelegate = new ClientServiceDelegate(
         conf, rm, oldJobId, historyServerProxy);
     return clientServiceDelegate;

+ 36 - 3
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestYarnClientProtocolProvider.java

@@ -18,18 +18,34 @@
 
 package org.apache.hadoop.mapreduce;
 
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.any;
+import static org.mockito.Mockito.when;
 import java.io.IOException;
+import java.nio.ByteBuffer;
 
 import junit.framework.TestCase;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.mapred.ResourceMgrDelegate;
 import org.apache.hadoop.mapred.YARNRunner;
 import org.apache.hadoop.mapreduce.protocol.ClientProtocol;
-import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.yarn.api.ClientRMProtocol;
+import org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenResponse;
+import org.apache.hadoop.yarn.api.records.DelegationToken;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.junit.Test;
 
 public class TestYarnClientProtocolProvider extends TestCase {
-
+  
+  private static final RecordFactory recordFactory = RecordFactoryProvider.
+      getRecordFactory(null);
+  
   @Test
   public void testClusterWithYarnClientProtocolProvider() throws Exception {
 
@@ -68,7 +84,24 @@ public class TestYarnClientProtocolProvider extends TestCase {
       conf = new Configuration();
       conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME);
       cluster = new Cluster(conf);
-      cluster.getDelegationToken(new Text(" "));
+      YARNRunner yrunner = (YARNRunner) cluster.getClient();
+      GetDelegationTokenResponse getDTResponse = 
+          recordFactory.newRecordInstance(GetDelegationTokenResponse.class);
+      DelegationToken rmDTToken = recordFactory.newRecordInstance(
+          DelegationToken.class);
+      rmDTToken.setIdentifier(ByteBuffer.wrap(new byte[2]));
+      rmDTToken.setKind("Testclusterkind");
+      rmDTToken.setPassword(ByteBuffer.wrap("testcluster".getBytes()));
+      rmDTToken.setService("0.0.0.0:8040");
+      getDTResponse.setRMDelegationToken(rmDTToken);
+      ClientRMProtocol cRMProtocol = mock(ClientRMProtocol.class);
+      when(cRMProtocol.getDelegationToken(any(
+          GetDelegationTokenRequest.class))).thenReturn(getDTResponse);
+      ResourceMgrDelegate rmgrDelegate = new ResourceMgrDelegate(
+          new YarnConfiguration(conf), cRMProtocol);
+      yrunner.setResourceMgrDelegate(rmgrDelegate);
+      Token t = cluster.getDelegationToken(new Text(" "));
+      assertTrue("Testclusterkind".equals(t.getKind().toString()));
     } finally {
       if (cluster != null) {
         cluster.close();

+ 1 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java

@@ -164,7 +164,7 @@ public class ShuffleHandler extends AbstractService
    * @param port the port to be sent to the ApplciationMaster
    * @return the serialized form of the port.
    */
-  static ByteBuffer serializeMetaData(int port) throws IOException {
+  public static ByteBuffer serializeMetaData(int port) throws IOException {
     //TODO these bytes should be versioned
     DataOutputBuffer port_dob = new DataOutputBuffer();
     port_dob.writeInt(port);

+ 18 - 0
hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ClientRMProtocol.java

@@ -21,6 +21,8 @@ package org.apache.hadoop.yarn.api;
 import org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.classification.InterfaceStability.Stable;
 
+import org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.GetAllApplicationsRequest;
@@ -43,6 +45,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationReport;
 import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
 import org.apache.hadoop.yarn.api.records.NodeReport;
+import org.apache.hadoop.yarn.api.records.DelegationToken;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.YarnClusterMetrics;
 import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
@@ -230,4 +233,19 @@ public interface ClientRMProtocol {
   public GetQueueUserAclsInfoResponse getQueueUserAcls(
       GetQueueUserAclsInfoRequest request) 
   throws YarnRemoteException;
+  
+  /**
+   * <p>The interface used by clients to get delegation token, enabling the 
+   * containers to be able to talk to the service using those tokens.
+   * 
+   *  <p> The <code>ResourceManager</code> responds with the delegation token
+   *  {@link DelegationToken} that can be used by the client to speak to this
+   *  service.
+   * @param request request to get a delegation token for the client.
+   * @return delegation token that can be used to talk to this service
+   * @throws YarnRemoteException
+   */
+  public GetDelegationTokenResponse getDelegationToken(
+      GetDelegationTokenRequest request) 
+  throws YarnRemoteException;
 }

+ 36 - 0
hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetDelegationTokenRequest.java

@@ -0,0 +1,36 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api.protocolrecords;
+
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Evolving;
+
+import clover.org.apache.velocity.runtime.resource.ResourceManager;
+
+/**
+ * The request issued by the client to get a delegation token from
+ * the {@link ResourceManager}. 
+ * for more information.
+ */
+@Public
+@Evolving
+public interface GetDelegationTokenRequest {
+  String getRenewer();
+  void setRenewer(String renewer);
+}

+ 37 - 0
hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetDelegationTokenResponse.java

@@ -0,0 +1,37 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api.protocolrecords;
+
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Evolving;
+import org.apache.hadoop.yarn.api.records.DelegationToken;
+
+
+/**
+ * Response to a {@link GetDelegationTokenRequest} request 
+ * from the client. The response contains the token that 
+ * can be used by the containers to talk to  ClientRMService.
+ *
+ */
+@Public
+@Evolving
+public interface GetDelegationTokenResponse {
+  DelegationToken getRMDelegationToken();
+  void setRMDelegationToken(DelegationToken rmDTToken);
+}

+ 95 - 0
hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetDelegationTokenRequestPBImpl.java

@@ -0,0 +1,95 @@
+ /**
+   * Licensed to the Apache Software Foundation (ASF) under one
+   * or more contributor license agreements.  See the NOTICE file
+   * distributed with this work for additional information
+   * regarding copyright ownership.  The ASF licenses this file
+   * to you under the Apache License, Version 2.0 (the
+   * "License"); you may not use this file except in compliance
+   * with the License.  You may obtain a copy of the License at
+   *
+   *     http://www.apache.org/licenses/LICENSE-2.0
+   *
+   * Unless required by applicable law or agreed to in writing, software
+   * distributed under the License is distributed on an "AS IS" BASIS,
+   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   * See the License for the specific language governing permissions and
+   * limitations under the License.
+   */
+package org.apache.hadoop.yarn.api.protocolrecords.impl.pb;
+
+import org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenRequest;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetDelegationTokenRequestProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetDelegationTokenRequestProtoOrBuilder;
+
+public class GetDelegationTokenRequestPBImpl extends
+      ProtoBase<GetDelegationTokenRequestProto> implements GetDelegationTokenRequest {
+  
+  String renewer;
+  
+  GetDelegationTokenRequestProto proto = 
+      GetDelegationTokenRequestProto.getDefaultInstance();
+  GetDelegationTokenRequestProto.Builder builder = null;
+  boolean viaProto = false;
+  
+  public GetDelegationTokenRequestPBImpl() {
+    builder = GetDelegationTokenRequestProto.newBuilder();
+  }
+  
+  public GetDelegationTokenRequestPBImpl (
+      GetDelegationTokenRequestProto proto) {
+    this.proto = proto;
+    viaProto = true;
+  }
+  
+  @Override
+  public String getRenewer(){
+    GetDelegationTokenRequestProtoOrBuilder p = viaProto ? proto : builder;
+    if (this.renewer != null) {
+      return this.renewer;
+    }
+    if (!p.hasRenewer()) {
+      return null;
+    }
+    this.renewer = p.getRenewer();
+    return this.renewer;  
+  }
+  
+  @Override
+  public void setRenewer(String renewer) {
+    maybeInitBuilder();
+    if (renewer == null) 
+      builder.clearRenewer();
+    this.renewer = renewer;
+  }
+
+  @Override
+  public GetDelegationTokenRequestProto getProto() {
+    mergeLocalToProto();
+    proto = viaProto ? proto : builder.build();
+    viaProto = true;
+    return proto;
+  }
+  
+
+  private void mergeLocalToBuilder() {
+    if (renewer != null) {
+      builder.setRenewer(this.renewer);
+    }
+  }
+
+  private void mergeLocalToProto() {
+    if (viaProto) 
+      maybeInitBuilder();
+    mergeLocalToBuilder();
+    proto = builder.build();
+    viaProto = true;
+  }
+
+  private void maybeInitBuilder() {
+    if (viaProto || builder == null) {
+      builder = GetDelegationTokenRequestProto.newBuilder(proto);
+    }
+    viaProto = false;
+  }
+}

+ 109 - 0
hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetDelegationTokenResponsePBImpl.java

@@ -0,0 +1,109 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api.protocolrecords.impl.pb;
+
+import org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenResponse;
+import org.apache.hadoop.yarn.api.records.DelegationToken;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+import org.apache.hadoop.yarn.api.records.impl.pb.DelegationTokenPBImpl;
+import org.apache.hadoop.yarn.proto.YarnProtos.DelegationTokenProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetDelegationTokenResponseProto;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetDelegationTokenResponseProtoOrBuilder;
+
+public class GetDelegationTokenResponsePBImpl extends
+ProtoBase<GetDelegationTokenResponseProto> implements GetDelegationTokenResponse {
+
+  DelegationToken appToken;
+
+
+  GetDelegationTokenResponseProto proto = 
+      GetDelegationTokenResponseProto.getDefaultInstance();
+  GetDelegationTokenResponseProto.Builder builder = null;
+  boolean viaProto = false;
+
+  public GetDelegationTokenResponsePBImpl() {
+    builder = GetDelegationTokenResponseProto.newBuilder();
+  }
+
+  public GetDelegationTokenResponsePBImpl (
+      GetDelegationTokenResponseProto proto) {
+    this.proto = proto;
+    viaProto = true;
+  }
+
+  @Override
+  public DelegationToken getRMDelegationToken() {
+    GetDelegationTokenResponseProtoOrBuilder p = viaProto ? proto : builder;
+    if (this.appToken != null) {
+      return this.appToken;
+    }
+    if (!p.hasApplicationToken()) {
+      return null;
+    }
+    this.appToken = convertFromProtoFormat(p.getApplicationToken());
+    return this.appToken;  
+  }
+
+  @Override
+  public void setRMDelegationToken(DelegationToken appToken) {
+    maybeInitBuilder();
+    if (appToken == null) 
+      builder.clearApplicationToken();
+    this.appToken = appToken;
+  }
+
+  @Override
+  public GetDelegationTokenResponseProto getProto() {
+    mergeLocalToProto();
+    proto = viaProto ? proto : builder.build();
+    viaProto = true;
+    return proto;
+  }
+
+
+  private void mergeLocalToBuilder() {
+    if (appToken != null) {
+      builder.setApplicationToken(convertToProtoFormat(this.appToken));
+    }
+  }
+
+  private void mergeLocalToProto() {
+    if (viaProto) 
+      maybeInitBuilder();
+    mergeLocalToBuilder();
+    proto = builder.build();
+    viaProto = true;
+  }
+
+  private void maybeInitBuilder() {
+    if (viaProto || builder == null) {
+      builder = GetDelegationTokenResponseProto.newBuilder(proto);
+    }
+    viaProto = false;
+  }
+
+
+  private DelegationTokenPBImpl convertFromProtoFormat(DelegationTokenProto p) {
+    return new DelegationTokenPBImpl(p);
+  }
+
+  private DelegationTokenProto convertToProtoFormat(DelegationToken t) {
+    return ((DelegationTokenPBImpl)t).getProto();
+  }
+}

+ 84 - 0
hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/DelegationToken.java

@@ -0,0 +1,84 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api.records;
+
+import java.nio.ByteBuffer;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Evolving;
+import org.apache.hadoop.classification.InterfaceStability.Stable;
+import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
+
+/**
+ * The Delegation tokens have a identifier which maps to
+ * {@link AbstractDelegationTokenIdentifier}.
+ * 
+ */
+@Public
+@Evolving
+public interface DelegationToken {
+  /**
+   * Get the token identifier.
+   * @return token identifier
+   */
+  @Public
+  @Stable
+  ByteBuffer getIdentifier();
+  
+  @Private
+  @Stable
+  void setIdentifier(ByteBuffer identifier);
+
+  /**
+   * Get the token password
+   * @return token password
+   */
+  @Public
+  @Stable
+  ByteBuffer getPassword();
+  
+  @Private
+  @Stable
+  void setPassword(ByteBuffer password);
+
+  /**
+   * Get the token kind.
+   * @return token kind
+   */
+  @Public
+  @Stable
+  String getKind();
+  
+  @Private
+  @Stable
+  void setKind(String kind);
+
+  /**
+   * Get the service to which the token is allocated.
+   * @return service to which the token is allocated
+   */
+  @Public
+  @Stable
+  String getService();
+
+  @Private
+  @Stable
+  void setService(String service);
+}

Some files were not shown because too many files changed in this diff