Browse Source

Merge r1227776 through r1231827 from 0.23.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.23-PB@1231834 13f79535-47bb-0310-9956-ffa450edef68
Tsz-wo Sze 13 years ago
parent
commit
e4e47a7519
100 changed files with 6285 additions and 1206 deletions
  1. 0 20
      hadoop-assemblies/pom.xml
  2. 3 0
      hadoop-assemblies/src/main/resources/assemblies/hadoop-dist.xml
  3. 0 8
      hadoop-assemblies/src/main/resources/assemblies/hadoop-httpfs-dist.xml
  4. 25 0
      hadoop-common-project/hadoop-common/CHANGES.txt
  5. 5 2
      hadoop-common-project/hadoop-common/src/main/docs/src/documentation/content/xdocs/cluster_setup.xml
  6. 2 2
      hadoop-common-project/hadoop-common/src/main/docs/src/documentation/content/xdocs/file_system_shell.xml
  7. 21 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
  8. 6 10
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java
  9. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
  10. 7 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java
  11. 2 173
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java
  12. 171 17
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java
  13. 8 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
  14. 1 1
      hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
  15. 24 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
  16. 1 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemCanonicalization.java
  17. 9 10
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsTrash.java
  18. 1 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java
  19. 17 6
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/NetUtilsTestResolver.java
  20. 1 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestSecurityUtil.java
  21. 56 2
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java
  22. 5 1
      hadoop-common-project/hadoop-common/src/test/resources/testConf.xml
  23. 1 0
      hadoop-dist/pom.xml
  24. 5 5
      hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
  25. 20 0
      hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
  26. 36 42
      hadoop-hdfs-project/hadoop-hdfs/pom.xml
  27. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
  28. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
  29. 1 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
  30. 4 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
  31. 0 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
  32. 2 5
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
  33. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java
  34. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
  35. 4 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
  36. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
  37. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/BlockSizeParam.java
  38. 6 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
  39. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java
  40. 80 1
      hadoop-mapreduce-project/CHANGES.txt
  41. 0 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml
  42. 42 24
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/TaskAttemptListenerImpl.java
  43. 222 37
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
  44. 33 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/JobEndNotifier.java
  45. 26 4
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
  46. 2 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/TaskAttemptListener.java
  47. 41 30
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/TaskHeartbeatHandler.java
  48. 25 8
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java
  49. 10 2
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/Job.java
  50. 1 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/Task.java
  51. 1 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/TaskAttempt.java
  52. 1 2
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskAttemptStatusUpdateEvent.java
  53. 29 76
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java
  54. 16 16
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java
  55. 8 4
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskImpl.java
  56. 229 167
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java
  57. 5 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerRemoteLaunchEvent.java
  58. 1 3
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/recover/RecoveryService.java
  59. 133 131
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AMWebServices.java
  60. 27 21
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/CountersBlock.java
  61. 13 10
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JAXBContextResolver.java
  62. 46 2
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JobBlock.java
  63. 15 11
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/SingleCounterBlock.java
  64. 95 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/AMAttemptInfo.java
  65. 45 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/AMAttemptsInfo.java
  66. 1 2
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/AppInfo.java
  67. 1 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/ConfInfo.java
  68. 6 6
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/CounterGroupInfo.java
  69. 5 5
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/CounterInfo.java
  70. 17 21
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/JobCounterInfo.java
  71. 10 14
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/JobInfo.java
  72. 6 7
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/JobTaskAttemptCounterInfo.java
  73. 6 6
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/JobTaskCounterInfo.java
  74. 4 3
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/TaskAttemptInfo.java
  75. 4 4
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/TaskAttemptsInfo.java
  76. 4 4
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/TaskCounterGroupInfo.java
  77. 4 3
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/TaskInfo.java
  78. 6 5
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapred/TestTaskAttemptListenerImpl.java
  79. 310 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java
  80. 4 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java
  81. 121 91
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockJobs.java
  82. 0 140
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestContainerLauncher.java
  83. 31 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestJobEndNotifier.java
  84. 7 8
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java
  85. 321 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncher.java
  86. 223 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncherImpl.java
  87. 359 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebServices.java
  88. 732 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebServicesAttempts.java
  89. 336 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebServicesJobConf.java
  90. 973 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebServicesJobs.java
  91. 821 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebServicesTasks.java
  92. 2 2
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/TypeConverter.java
  93. 3 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/MRClientProtocol.java
  94. 64 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/MRDelegationTokenIdentifier.java
  95. 25 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/client/MRClientProtocolPBClientImpl.java
  96. 20 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/service/MRClientProtocolPBServiceImpl.java
  97. 29 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/GetDelegationTokenRequest.java
  98. 25 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/GetDelegationTokenResponse.java
  99. 97 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetDelegationTokenRequestPBImpl.java
  100. 109 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetDelegationTokenResponsePBImpl.java

+ 0 - 20
hadoop-assemblies/pom.xml

@@ -34,26 +34,6 @@
   </properties>
   </properties>
 
 
   <build>
   <build>
-    <pluginManagement>
-      <plugins>
-        <plugin>
-          <groupId>org.apache.maven.plugins</groupId>
-          <artifactId>maven-enforcer-plugin</artifactId>
-          <version>1.0</version>
-        </plugin>
-        <plugin>
-          <groupId>org.apache.maven.plugins</groupId>
-          <artifactId>maven-assembly-plugin</artifactId>
-          <version>2.2-beta-3</version>
-        </plugin>
-        <plugin>
-          <groupId>org.apache.rat</groupId>
-          <artifactId>apache-rat-plugin</artifactId>
-          <version>0.7</version>
-        </plugin>
-      </plugins>
-    </pluginManagement>
-
     <plugins>
     <plugins>
       <plugin>
       <plugin>
         <groupId>org.apache.maven.plugins</groupId>
         <groupId>org.apache.maven.plugins</groupId>

+ 3 - 0
hadoop-assemblies/src/main/resources/assemblies/hadoop-dist.xml

@@ -94,6 +94,9 @@
         <include>${project.artifactId}-${project.version}-sources.jar</include>
         <include>${project.artifactId}-${project.version}-sources.jar</include>
         <include>${project.artifactId}-${project.version}-test-sources.jar</include>
         <include>${project.artifactId}-${project.version}-test-sources.jar</include>
       </includes>
       </includes>
+      <excludes>
+        <exclude>hadoop-tools-dist-*.jar</exclude>
+      </excludes>
     </fileSet>
     </fileSet>
     <fileSet>
     <fileSet>
       <directory>${basedir}/dev-support/jdiff</directory>
       <directory>${basedir}/dev-support/jdiff</directory>

+ 0 - 8
hadoop-assemblies/src/main/resources/assemblies/hadoop-httpfs-dist.xml

@@ -27,14 +27,6 @@
         <include>*</include>
         <include>*</include>
       </includes>
       </includes>
     </fileSet>
     </fileSet>
-    <!-- Readme, licenses, etc. -->
-    <fileSet>
-      <directory>${basedir}</directory>
-      <outputDirectory>/</outputDirectory>
-      <includes>
-        <include>*.txt</include>
-      </includes>
-    </fileSet>
     <fileSet>
     <fileSet>
       <directory>${basedir}/src/main/sbin</directory>
       <directory>${basedir}/src/main/sbin</directory>
       <outputDirectory>/sbin</outputDirectory>
       <outputDirectory>/sbin</outputDirectory>

+ 25 - 0
hadoop-common-project/hadoop-common/CHANGES.txt

@@ -64,6 +64,8 @@ Release 0.23.1 - Unreleased
 
 
     HADOOP-7657. Add support for LZ4 compression. (Binglin Chang via todd)
     HADOOP-7657. Add support for LZ4 compression. (Binglin Chang via todd)
 
 
+    HADOOP-7910. Add Configuration.getLongBytes to handle human readable byte size values. (Sho Shimauchi via harsh)
+
   IMPROVEMENTS
   IMPROVEMENTS
 
 
     HADOOP-7801. HADOOP_PREFIX cannot be overriden. (Bruno Mahé via tomwhite)
     HADOOP-7801. HADOOP_PREFIX cannot be overriden. (Bruno Mahé via tomwhite)
@@ -108,6 +110,13 @@ Release 0.23.1 - Unreleased
     hostname in token instead of IP to allow server IP change. 
     hostname in token instead of IP to allow server IP change. 
     (Daryn Sharp via suresh)
     (Daryn Sharp via suresh)
 
 
+    HADOOP-7934. Normalize dependencies versions across all modules. (tucu)
+
+    HADOOP-7348. Change 'addnl' in getmerge util to be a flag '-nl' instead.
+    (XieXianshan via harsh)
+
+    HADOOP-7975. Add LZ4 as an entry in the default codec list, missed by HADOOP-7657 (harsh)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
   BUG FIXES
   BUG FIXES
@@ -168,6 +177,19 @@ Release 0.23.1 - Unreleased
    HADOOP-7949. Updated maxIdleTime default in the code to match
    HADOOP-7949. Updated maxIdleTime default in the code to match
    core-default.xml (eli)
    core-default.xml (eli)
 
 
+   HADOOP-7907. hadoop-tools JARs are not part of the distro. (tucu)
+
+   HADOOP-7936. There's a Hoop README in the root dir of the tarball. (tucu)
+
+   HADOOP-7963. Fix ViewFS to catch a null canonical service-name and pass
+   tests TestViewFileSystem* (Siddharth Seth via vinodkv)
+
+   HADOOP-7964. Deadlock in NetUtils and SecurityUtil class initialization.
+   (Daryn Sharp via suresh)
+
+   HADOOP-7974. TestViewFsTrash incorrectly determines the user's home
+   directory. (harsh via eli)
+
 Release 0.23.0 - 2011-11-01 
 Release 0.23.0 - 2011-11-01 
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES
@@ -914,6 +936,9 @@ Release 0.22.1 - Unreleased
 
 
   BUG FIXES
   BUG FIXES
 
 
+    HADOOP-7937. Forward port SequenceFile#syncFs and friends from Hadoop 1.x.
+    (tomwhite)
+
 Release 0.22.0 - 2011-11-29
 Release 0.22.0 - 2011-11-29
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES

+ 5 - 2
hadoop-common-project/hadoop-common/src/main/docs/src/documentation/content/xdocs/cluster_setup.xml

@@ -628,8 +628,11 @@
                   <tr>
                   <tr>
                     <td>conf/hdfs-site.xml</td>
                     <td>conf/hdfs-site.xml</td>
                     <td>dfs.blocksize</td>
                     <td>dfs.blocksize</td>
-                    <td>134217728</td>
-                    <td>HDFS blocksize of 128MB for large file-systems.</td>
+                    <td>128m</td>
+                    <td>
+                        HDFS blocksize of 128 MB for large file-systems. Sizes can be provided
+                        in size-prefixed values (10k, 128m, 1g, etc.) or simply in bytes (134217728 for 128 MB, etc.).
+                    </td>
                   </tr>
                   </tr>
                   <tr>
                   <tr>
                     <td>conf/hdfs-site.xml</td>
                     <td>conf/hdfs-site.xml</td>

+ 2 - 2
hadoop-common-project/hadoop-common/src/main/docs/src/documentation/content/xdocs/file_system_shell.xml

@@ -260,11 +260,11 @@
 		<section>
 		<section>
 			<title> getmerge </title>
 			<title> getmerge </title>
 			<p>
 			<p>
-				<code>Usage: hdfs dfs -getmerge &lt;src&gt; &lt;localdst&gt; [addnl]</code>
+				<code>Usage: hdfs dfs -getmerge [-nl] &lt;src&gt; &lt;localdst&gt;</code>
 			</p>
 			</p>
 			<p>
 			<p>
 	  Takes a source directory and a destination file as input and concatenates files in src into the destination local file. 
 	  Takes a source directory and a destination file as input and concatenates files in src into the destination local file. 
-	  Optionally <code>addnl</code> can be set to enable adding a newline character at the end of each file.  
+	  Optionally <code>-nl</code> flag can be set to enable adding a newline character at the end of each file during merge.
 	  </p>
 	  </p>
 		</section>
 		</section>
 		
 		

+ 21 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java

@@ -737,6 +737,27 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
     return Long.parseLong(valueString);
     return Long.parseLong(valueString);
   }
   }
 
 
+  /**
+   * Get the value of the <code>name</code> property as a <code>long</code> or
+   * human readable format. If no such property exists, the provided default
+   * value is returned, or if the specified value is not a valid
+   * <code>long</code> or human readable format, then an error is thrown. You
+   * can use the following suffix (case insensitive): k(kilo), m(mega), g(giga),
+   * t(tera), p(peta), e(exa)
+   *
+   * @param name property name.
+   * @param defaultValue default value.
+   * @throws NumberFormatException when the value is invalid
+   * @return property value as a <code>long</code>,
+   *         or <code>defaultValue</code>.
+   */
+  public long getLongBytes(String name, long defaultValue) {
+    String valueString = getTrimmed(name);
+    if (valueString == null)
+      return defaultValue;
+    return StringUtils.TraditionalBinaryPrefix.string2long(valueString);
+  }
+
   private String getHexDigits(String value) {
   private String getHexDigits(String value) {
     boolean negative = false;
     boolean negative = false;
     String str = value;
     String str = value;

+ 6 - 10
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java

@@ -45,26 +45,22 @@ class CopyCommands {
   /** merge multiple files together */
   /** merge multiple files together */
   public static class Merge extends FsCommand {
   public static class Merge extends FsCommand {
     public static final String NAME = "getmerge";    
     public static final String NAME = "getmerge";    
-    public static final String USAGE = "<src> <localdst> [addnl]";
+    public static final String USAGE = "[-nl] <src> <localdst>";
     public static final String DESCRIPTION =
     public static final String DESCRIPTION =
       "Get all the files in the directories that\n" +
       "Get all the files in the directories that\n" +
       "match the source file pattern and merge and sort them to only\n" +
       "match the source file pattern and merge and sort them to only\n" +
-      "one file on local fs. <src> is kept.";
+      "one file on local fs. <src> is kept.\n" +
+      "  -nl   Add a newline character at the end of each file.";
 
 
     protected PathData dst = null;
     protected PathData dst = null;
     protected String delimiter = null;
     protected String delimiter = null;
 
 
     @Override
     @Override
     protected void processOptions(LinkedList<String> args) throws IOException {
     protected void processOptions(LinkedList<String> args) throws IOException {
-      CommandFormat cf = new CommandFormat(2, 3);
+      CommandFormat cf = new CommandFormat(2, 3, "nl");
       cf.parse(args);
       cf.parse(args);
 
 
-      // TODO: this really should be a -nl option
-      if ((args.size() > 2) && Boolean.parseBoolean(args.removeLast())) {
-        delimiter = "\n";
-      } else {
-        delimiter = null;
-      }
+      delimiter = cf.getOpt("nl") ? "\n" : null;
 
 
       dst = new PathData(new File(args.removeLast()), getConf());
       dst = new PathData(new File(args.removeLast()), getConf());
     }
     }
@@ -197,4 +193,4 @@ class CopyCommands {
     public static final String USAGE = Get.USAGE;
     public static final String USAGE = Get.USAGE;
     public static final String DESCRIPTION = "Identical to the -get command.";
     public static final String DESCRIPTION = "Identical to the -get command.";
   }
   }
-}
+}

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java

@@ -514,7 +514,7 @@ public class ViewFileSystem extends FileSystem {
     for (int i = 0; i < mountPoints.size(); ++i) {
     for (int i = 0; i < mountPoints.size(); ++i) {
       String serviceName =
       String serviceName =
           mountPoints.get(i).target.targetFileSystem.getCanonicalServiceName();
           mountPoints.get(i).target.targetFileSystem.getCanonicalServiceName();
-      if (seenServiceNames.contains(serviceName)) {
+      if (serviceName == null || seenServiceNames.contains(serviceName)) {
         continue;
         continue;
       }
       }
       seenServiceNames.add(serviceName);
       seenServiceNames.add(serviceName);

+ 7 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java

@@ -1193,6 +1193,13 @@ public class SequenceFile {
       }
       }
     }
     }
 
 
+    /** flush all currently written data to the file system */
+    public void syncFs() throws IOException {
+      if (out != null) {
+        out.sync();                               // flush contents to file system
+      }
+    }
+
     /** Returns the configuration of this file. */
     /** Returns the configuration of this file. */
     Configuration getConf() { return conf; }
     Configuration getConf() { return conf; }
     
     

+ 2 - 173
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java

@@ -51,12 +51,6 @@ import org.apache.hadoop.ipc.VersionedProtocol;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.ReflectionUtils;
 
 
-import com.google.common.annotations.VisibleForTesting;
-
-//this will need to be replaced someday when there is a suitable replacement
-import sun.net.dns.ResolverConfiguration;
-import sun.net.util.IPAddressUtil;
-
 @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
 @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
 @InterfaceStability.Unstable
 @InterfaceStability.Unstable
 public class NetUtils {
 public class NetUtils {
@@ -72,26 +66,6 @@ public class NetUtils {
   /** Base URL of the Hadoop Wiki: {@value} */
   /** Base URL of the Hadoop Wiki: {@value} */
   public static final String HADOOP_WIKI = "http://wiki.apache.org/hadoop/";
   public static final String HADOOP_WIKI = "http://wiki.apache.org/hadoop/";
 
 
-  private static HostResolver hostResolver;
-  
-  static {
-    // SecurityUtils requires a more secure host resolver if tokens are
-    // using hostnames
-    setUseQualifiedHostResolver(!SecurityUtil.getTokenServiceUseIp());
-  }
-
-  /**
-   * This method is intended for use only by SecurityUtils!
-   * @param flag where the qualified or standard host resolver is used
-   *             to create socket addresses
-   */
-  @InterfaceAudience.Private
-  public static void setUseQualifiedHostResolver(boolean flag) {
-      hostResolver = flag
-          ? new QualifiedHostResolver()
-          : new StandardHostResolver();
-  }
-  
   /**
   /**
    * Get the socket factory for the given class according to its
    * Get the socket factory for the given class according to its
    * configuration parameter
    * configuration parameter
@@ -249,7 +223,7 @@ public class NetUtils {
     
     
     InetSocketAddress addr;
     InetSocketAddress addr;
     try {
     try {
-      InetAddress iaddr = hostResolver.getByName(resolveHost);
+      InetAddress iaddr = SecurityUtil.getByName(resolveHost);
       // if there is a static entry for the host, make the returned
       // if there is a static entry for the host, make the returned
       // address look like the original given host
       // address look like the original given host
       if (staticHost != null) {
       if (staticHost != null) {
@@ -261,151 +235,6 @@ public class NetUtils {
     }
     }
     return addr;
     return addr;
   }
   }
-
-  interface HostResolver {
-    InetAddress getByName(String host) throws UnknownHostException;    
-  }
-  
-  /**
-   * Uses standard java host resolution
-   */
-  static class StandardHostResolver implements HostResolver {
-    public InetAddress getByName(String host) throws UnknownHostException {
-      return InetAddress.getByName(host);
-    }
-  }
-  
-  /**
-   * This an alternate resolver with important properties that the standard
-   * java resolver lacks:
-   * 1) The hostname is fully qualified.  This avoids security issues if not
-   *    all hosts in the cluster do not share the same search domains.  It
-   *    also prevents other hosts from performing unnecessary dns searches.
-   *    In contrast, InetAddress simply returns the host as given.
-   * 2) The InetAddress is instantiated with an exact host and IP to prevent
-   *    further unnecessary lookups.  InetAddress may perform an unnecessary
-   *    reverse lookup for an IP.
-   * 3) A call to getHostName() will always return the qualified hostname, or
-   *    more importantly, the IP if instantiated with an IP.  This avoids
-   *    unnecessary dns timeouts if the host is not resolvable.
-   * 4) Point 3 also ensures that if the host is re-resolved, ex. during a
-   *    connection re-attempt, that a reverse lookup to host and forward
-   *    lookup to IP is not performed since the reverse/forward mappings may
-   *    not always return the same IP.  If the client initiated a connection
-   *    with an IP, then that IP is all that should ever be contacted.
-   *    
-   * NOTE: this resolver is only used if:
-   *       hadoop.security.token.service.use_ip=false 
-   */
-  protected static class QualifiedHostResolver implements HostResolver {
-    @SuppressWarnings("unchecked")
-    private List<String> searchDomains =
-        ResolverConfiguration.open().searchlist();
-    
-    /**
-     * Create an InetAddress with a fully qualified hostname of the given
-     * hostname.  InetAddress does not qualify an incomplete hostname that
-     * is resolved via the domain search list.
-     * {@link InetAddress#getCanonicalHostName()} will fully qualify the
-     * hostname, but it always return the A record whereas the given hostname
-     * may be a CNAME.
-     * 
-     * @param host a hostname or ip address
-     * @return InetAddress with the fully qualified hostname or ip
-     * @throws UnknownHostException if host does not exist
-     */
-    public InetAddress getByName(String host) throws UnknownHostException {
-      InetAddress addr = null;
-
-      if (IPAddressUtil.isIPv4LiteralAddress(host)) {
-        // use ipv4 address as-is
-        byte[] ip = IPAddressUtil.textToNumericFormatV4(host);
-        addr = InetAddress.getByAddress(host, ip);
-      } else if (IPAddressUtil.isIPv6LiteralAddress(host)) {
-        // use ipv6 address as-is
-        byte[] ip = IPAddressUtil.textToNumericFormatV6(host);
-        addr = InetAddress.getByAddress(host, ip);
-      } else if (host.endsWith(".")) {
-        // a rooted host ends with a dot, ex. "host."
-        // rooted hosts never use the search path, so only try an exact lookup
-        addr = getByExactName(host);
-      } else if (host.contains(".")) {
-        // the host contains a dot (domain), ex. "host.domain"
-        // try an exact host lookup, then fallback to search list
-        addr = getByExactName(host);
-        if (addr == null) {
-          addr = getByNameWithSearch(host);
-        }
-      } else {
-        // it's a simple host with no dots, ex. "host"
-        // try the search list, then fallback to exact host
-        InetAddress loopback = InetAddress.getByName(null);
-        if (host.equalsIgnoreCase(loopback.getHostName())) {
-          addr = InetAddress.getByAddress(host, loopback.getAddress());
-        } else {
-          addr = getByNameWithSearch(host);
-          if (addr == null) {
-            addr = getByExactName(host);
-          }
-        }
-      }
-      // unresolvable!
-      if (addr == null) {
-        throw new UnknownHostException(host);
-      }
-      return addr;
-    }
-
-    InetAddress getByExactName(String host) {
-      InetAddress addr = null;
-      // InetAddress will use the search list unless the host is rooted
-      // with a trailing dot.  The trailing dot will disable any use of the
-      // search path in a lower level resolver.  See RFC 1535.
-      String fqHost = host;
-      if (!fqHost.endsWith(".")) fqHost += ".";
-      try {
-        addr = getInetAddressByName(fqHost);
-        // can't leave the hostname as rooted or other parts of the system
-        // malfunction, ex. kerberos principals are lacking proper host
-        // equivalence for rooted/non-rooted hostnames
-        addr = InetAddress.getByAddress(host, addr.getAddress());
-      } catch (UnknownHostException e) {
-        // ignore, caller will throw if necessary
-      }
-      return addr;
-    }
-
-    InetAddress getByNameWithSearch(String host) {
-      InetAddress addr = null;
-      if (host.endsWith(".")) { // already qualified?
-        addr = getByExactName(host); 
-      } else {
-        for (String domain : searchDomains) {
-          String dot = !domain.startsWith(".") ? "." : "";
-          addr = getByExactName(host + dot + domain);
-          if (addr != null) break;
-        }
-      }
-      return addr;
-    }
-
-    // implemented as a separate method to facilitate unit testing
-    InetAddress getInetAddressByName(String host) throws UnknownHostException {
-      return InetAddress.getByName(host);
-    }
-
-    void setSearchDomains(String ... domains) {
-      searchDomains = Arrays.asList(domains);
-    }
-  }
-  
-  /**
-   * This is for testing only!
-   */
-  @VisibleForTesting
-  static void setHostResolver(HostResolver newResolver) {
-    hostResolver = newResolver;
-  }
   
   
   /**
   /**
    * Resolve the uri's hostname and add the default port if not in the uri
    * Resolve the uri's hostname and add the default port if not in the uri
@@ -447,7 +276,7 @@ public class NetUtils {
     String fqHost = canonicalizedHostCache.get(host);
     String fqHost = canonicalizedHostCache.get(host);
     if (fqHost == null) {
     if (fqHost == null) {
       try {
       try {
-        fqHost = hostResolver.getByName(host).getHostName();
+        fqHost = SecurityUtil.getByName(host).getHostName();
         // slight race condition, but won't hurt 
         // slight race condition, but won't hurt 
         canonicalizedHostCache.put(host, fqHost);
         canonicalizedHostCache.put(host, fqHost);
       } catch (UnknownHostException e) {
       } catch (UnknownHostException e) {

+ 171 - 17
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java

@@ -23,6 +23,8 @@ import java.net.URI;
 import java.net.URL;
 import java.net.URL;
 import java.net.UnknownHostException;
 import java.net.UnknownHostException;
 import java.security.AccessController;
 import java.security.AccessController;
+import java.util.Arrays;
+import java.util.List;
 import java.util.ServiceLoader;
 import java.util.ServiceLoader;
 import java.util.Set;
 import java.util.Set;
 
 
@@ -41,6 +43,11 @@ import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenInfo;
 import org.apache.hadoop.security.token.TokenInfo;
 
 
+import com.google.common.annotations.VisibleForTesting;
+
+//this will need to be replaced someday when there is a suitable replacement
+import sun.net.dns.ResolverConfiguration;
+import sun.net.util.IPAddressUtil;
 import sun.security.jgss.krb5.Krb5Util;
 import sun.security.jgss.krb5.Krb5Util;
 import sun.security.krb5.Credentials;
 import sun.security.krb5.Credentials;
 import sun.security.krb5.PrincipalName;
 import sun.security.krb5.PrincipalName;
@@ -53,7 +60,10 @@ public class SecurityUtil {
 
 
   // controls whether buildTokenService will use an ip or host/ip as given
   // controls whether buildTokenService will use an ip or host/ip as given
   // by the user
   // by the user
-  private static boolean useIpForTokenService;
+  @VisibleForTesting
+  static boolean useIpForTokenService;
+  @VisibleForTesting
+  static HostResolver hostResolver;
   
   
   static {
   static {
     boolean useIp = new Configuration().getBoolean(
     boolean useIp = new Configuration().getBoolean(
@@ -68,16 +78,9 @@ public class SecurityUtil {
   @InterfaceAudience.Private
   @InterfaceAudience.Private
   static void setTokenServiceUseIp(boolean flag) {
   static void setTokenServiceUseIp(boolean flag) {
     useIpForTokenService = flag;
     useIpForTokenService = flag;
-    NetUtils.setUseQualifiedHostResolver(!flag);
-  }
-  
-  /**
-   * Intended only for temporary use by NetUtils.  Do not use.
-   * @return whether tokens use an IP address
-   */
-  @InterfaceAudience.Private
-  public static boolean getTokenServiceUseIp() {
-    return useIpForTokenService;
+    hostResolver = !useIpForTokenService
+        ? new QualifiedHostResolver()
+        : new StandardHostResolver();
   }
   }
   
   
   /**
   /**
@@ -142,7 +145,7 @@ public class SecurityUtil {
    * it will be removed when the Java behavior is changed.
    * it will be removed when the Java behavior is changed.
    * 
    * 
    * @param remoteHost Target URL the krb-https client will access
    * @param remoteHost Target URL the krb-https client will access
-   * @throws IOException
+   * @throws IOException if the service ticket cannot be retrieved
    */
    */
   public static void fetchServiceTicket(URL remoteHost) throws IOException {
   public static void fetchServiceTicket(URL remoteHost) throws IOException {
     if(!UserGroupInformation.isSecurityEnabled())
     if(!UserGroupInformation.isSecurityEnabled())
@@ -179,7 +182,7 @@ public class SecurityUtil {
    * @param hostname
    * @param hostname
    *          the fully-qualified domain name used for substitution
    *          the fully-qualified domain name used for substitution
    * @return converted Kerberos principal name
    * @return converted Kerberos principal name
-   * @throws IOException
+   * @throws IOException if the client address cannot be determined
    */
    */
   public static String getServerPrincipal(String principalConfig,
   public static String getServerPrincipal(String principalConfig,
       String hostname) throws IOException {
       String hostname) throws IOException {
@@ -204,7 +207,7 @@ public class SecurityUtil {
    * @param addr
    * @param addr
    *          InetAddress of the host used for substitution
    *          InetAddress of the host used for substitution
    * @return converted Kerberos principal name
    * @return converted Kerberos principal name
-   * @throws IOException
+   * @throws IOException if the client address cannot be determined
    */
    */
   public static String getServerPrincipal(String principalConfig,
   public static String getServerPrincipal(String principalConfig,
       InetAddress addr) throws IOException {
       InetAddress addr) throws IOException {
@@ -251,7 +254,7 @@ public class SecurityUtil {
    *          the key to look for keytab file in conf
    *          the key to look for keytab file in conf
    * @param userNameKey
    * @param userNameKey
    *          the key to look for user's Kerberos principal name in conf
    *          the key to look for user's Kerberos principal name in conf
-   * @throws IOException
+   * @throws IOException if login fails
    */
    */
   public static void login(final Configuration conf,
   public static void login(final Configuration conf,
       final String keytabFileKey, final String userNameKey) throws IOException {
       final String keytabFileKey, final String userNameKey) throws IOException {
@@ -271,7 +274,7 @@ public class SecurityUtil {
    *          the key to look for user's Kerberos principal name in conf
    *          the key to look for user's Kerberos principal name in conf
    * @param hostname
    * @param hostname
    *          hostname to use for substitution
    *          hostname to use for substitution
-   * @throws IOException
+   * @throws IOException if the config doesn't specify a keytab
    */
    */
   public static void login(final Configuration conf,
   public static void login(final Configuration conf,
       final String keytabFileKey, final String userNameKey, String hostname)
       final String keytabFileKey, final String userNameKey, String hostname)
@@ -363,7 +366,7 @@ public class SecurityUtil {
    * Look up the TokenInfo for a given protocol. It searches all known
    * Look up the TokenInfo for a given protocol. It searches all known
    * SecurityInfo providers.
    * SecurityInfo providers.
    * @param protocol The protocol class to get the information for.
    * @param protocol The protocol class to get the information for.
-   * @conf conf Configuration object
+   * @param conf Configuration object
    * @return the TokenInfo or null if it has no KerberosInfo defined
    * @return the TokenInfo or null if it has no KerberosInfo defined
    */
    */
   public static TokenInfo getTokenInfo(Class<?> protocol, Configuration conf) {
   public static TokenInfo getTokenInfo(Class<?> protocol, Configuration conf) {
@@ -442,4 +445,155 @@ public class SecurityUtil {
   public static Text buildTokenService(URI uri) {
   public static Text buildTokenService(URI uri) {
     return buildTokenService(NetUtils.createSocketAddr(uri.getAuthority()));
     return buildTokenService(NetUtils.createSocketAddr(uri.getAuthority()));
   }
   }
+  
+  /**
+   * Resolves a host subject to the security requirements determined by
+   * hadoop.security.token.service.use_ip.
+   * 
+   * @param hostname host or ip to resolve
+   * @return a resolved host
+   * @throws UnknownHostException if the host doesn't exist
+   */
+  @InterfaceAudience.Private
+  public static
+  InetAddress getByName(String hostname) throws UnknownHostException {
+    return hostResolver.getByName(hostname);
+  }
+  
+  interface HostResolver {
+    InetAddress getByName(String host) throws UnknownHostException;    
+  }
+  
+  /**
+   * Uses standard java host resolution
+   */
+  static class StandardHostResolver implements HostResolver {
+    public InetAddress getByName(String host) throws UnknownHostException {
+      return InetAddress.getByName(host);
+    }
+  }
+  
+  /**
+   * This an alternate resolver with important properties that the standard
+   * java resolver lacks:
+   * 1) The hostname is fully qualified.  This avoids security issues if not
+   *    all hosts in the cluster do not share the same search domains.  It
+   *    also prevents other hosts from performing unnecessary dns searches.
+   *    In contrast, InetAddress simply returns the host as given.
+   * 2) The InetAddress is instantiated with an exact host and IP to prevent
+   *    further unnecessary lookups.  InetAddress may perform an unnecessary
+   *    reverse lookup for an IP.
+   * 3) A call to getHostName() will always return the qualified hostname, or
+   *    more importantly, the IP if instantiated with an IP.  This avoids
+   *    unnecessary dns timeouts if the host is not resolvable.
+   * 4) Point 3 also ensures that if the host is re-resolved, ex. during a
+   *    connection re-attempt, that a reverse lookup to host and forward
+   *    lookup to IP is not performed since the reverse/forward mappings may
+   *    not always return the same IP.  If the client initiated a connection
+   *    with an IP, then that IP is all that should ever be contacted.
+   *    
+   * NOTE: this resolver is only used if:
+   *       hadoop.security.token.service.use_ip=false 
+   */
+  protected static class QualifiedHostResolver implements HostResolver {
+    @SuppressWarnings("unchecked")
+    private List<String> searchDomains =
+        ResolverConfiguration.open().searchlist();
+    
+    /**
+     * Create an InetAddress with a fully qualified hostname of the given
+     * hostname.  InetAddress does not qualify an incomplete hostname that
+     * is resolved via the domain search list.
+     * {@link InetAddress#getCanonicalHostName()} will fully qualify the
+     * hostname, but it always return the A record whereas the given hostname
+     * may be a CNAME.
+     * 
+     * @param host a hostname or ip address
+     * @return InetAddress with the fully qualified hostname or ip
+     * @throws UnknownHostException if host does not exist
+     */
+    public InetAddress getByName(String host) throws UnknownHostException {
+      InetAddress addr = null;
+
+      if (IPAddressUtil.isIPv4LiteralAddress(host)) {
+        // use ipv4 address as-is
+        byte[] ip = IPAddressUtil.textToNumericFormatV4(host);
+        addr = InetAddress.getByAddress(host, ip);
+      } else if (IPAddressUtil.isIPv6LiteralAddress(host)) {
+        // use ipv6 address as-is
+        byte[] ip = IPAddressUtil.textToNumericFormatV6(host);
+        addr = InetAddress.getByAddress(host, ip);
+      } else if (host.endsWith(".")) {
+        // a rooted host ends with a dot, ex. "host."
+        // rooted hosts never use the search path, so only try an exact lookup
+        addr = getByExactName(host);
+      } else if (host.contains(".")) {
+        // the host contains a dot (domain), ex. "host.domain"
+        // try an exact host lookup, then fallback to search list
+        addr = getByExactName(host);
+        if (addr == null) {
+          addr = getByNameWithSearch(host);
+        }
+      } else {
+        // it's a simple host with no dots, ex. "host"
+        // try the search list, then fallback to exact host
+        InetAddress loopback = InetAddress.getByName(null);
+        if (host.equalsIgnoreCase(loopback.getHostName())) {
+          addr = InetAddress.getByAddress(host, loopback.getAddress());
+        } else {
+          addr = getByNameWithSearch(host);
+          if (addr == null) {
+            addr = getByExactName(host);
+          }
+        }
+      }
+      // unresolvable!
+      if (addr == null) {
+        throw new UnknownHostException(host);
+      }
+      return addr;
+    }
+
+    InetAddress getByExactName(String host) {
+      InetAddress addr = null;
+      // InetAddress will use the search list unless the host is rooted
+      // with a trailing dot.  The trailing dot will disable any use of the
+      // search path in a lower level resolver.  See RFC 1535.
+      String fqHost = host;
+      if (!fqHost.endsWith(".")) fqHost += ".";
+      try {
+        addr = getInetAddressByName(fqHost);
+        // can't leave the hostname as rooted or other parts of the system
+        // malfunction, ex. kerberos principals are lacking proper host
+        // equivalence for rooted/non-rooted hostnames
+        addr = InetAddress.getByAddress(host, addr.getAddress());
+      } catch (UnknownHostException e) {
+        // ignore, caller will throw if necessary
+      }
+      return addr;
+    }
+
+    InetAddress getByNameWithSearch(String host) {
+      InetAddress addr = null;
+      if (host.endsWith(".")) { // already qualified?
+        addr = getByExactName(host); 
+      } else {
+        for (String domain : searchDomains) {
+          String dot = !domain.startsWith(".") ? "." : "";
+          addr = getByExactName(host + dot + domain);
+          if (addr != null) break;
+        }
+      }
+      return addr;
+    }
+
+    // implemented as a separate method to facilitate unit testing
+    InetAddress getInetAddressByName(String host) throws UnknownHostException {
+      return InetAddress.getByName(host);
+    }
+
+    void setSearchDomains(String ... domains) {
+      searchDomains = Arrays.asList(domains);
+    }
+  }  
 }
 }

+ 8 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java

@@ -661,7 +661,14 @@ public class StringUtils {
       if (Character.isDigit(lastchar))
       if (Character.isDigit(lastchar))
         return Long.parseLong(s);
         return Long.parseLong(s);
       else {
       else {
-        long prefix = TraditionalBinaryPrefix.valueOf(lastchar).value;
+        long prefix;
+        try {
+          prefix = TraditionalBinaryPrefix.valueOf(lastchar).value;
+        } catch (IllegalArgumentException e) {
+          throw new IllegalArgumentException("Invalid size prefix '" + lastchar
+              + "' in '" + s
+              + "'. Allowed prefixes are k, m, g, t, p, e(case insensitive)");
+        }
         long num = Long.parseLong(s.substring(0, lastpos));
         long num = Long.parseLong(s.substring(0, lastpos));
         if (num > (Long.MAX_VALUE/prefix) || num < (Long.MIN_VALUE/prefix)) {
         if (num > (Long.MAX_VALUE/prefix) || num < (Long.MIN_VALUE/prefix)) {
           throw new IllegalArgumentException(s + " does not fit in a Long");
           throw new IllegalArgumentException(s + " does not fit in a Long");

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/resources/core-default.xml

@@ -175,7 +175,7 @@
 
 
 <property>
 <property>
   <name>io.compression.codecs</name>
   <name>io.compression.codecs</name>
-  <value>org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.BZip2Codec,org.apache.hadoop.io.compress.DeflateCodec,org.apache.hadoop.io.compress.SnappyCodec</value>
+  <value>org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.BZip2Codec,org.apache.hadoop.io.compress.DeflateCodec,org.apache.hadoop.io.compress.SnappyCodec,org.apache.hadoop.io.compress.Lz4Codec</value>
   <description>A list of the compression codec classes that can be used 
   <description>A list of the compression codec classes that can be used 
                for compression/decompression.</description>
                for compression/decompression.</description>
 </property>
 </property>

+ 24 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java

@@ -405,12 +405,16 @@ public class TestConfiguration extends TestCase {
     conf.addResource(fileResource);
     conf.addResource(fileResource);
     assertEquals(20, conf.getInt("test.int1", 0));
     assertEquals(20, conf.getInt("test.int1", 0));
     assertEquals(20, conf.getLong("test.int1", 0));
     assertEquals(20, conf.getLong("test.int1", 0));
+    assertEquals(20, conf.getLongBytes("test.int1", 0));
     assertEquals(20, conf.getInt("test.int2", 0));
     assertEquals(20, conf.getInt("test.int2", 0));
     assertEquals(20, conf.getLong("test.int2", 0));
     assertEquals(20, conf.getLong("test.int2", 0));
+    assertEquals(20, conf.getLongBytes("test.int2", 0));
     assertEquals(-20, conf.getInt("test.int3", 0));
     assertEquals(-20, conf.getInt("test.int3", 0));
     assertEquals(-20, conf.getLong("test.int3", 0));
     assertEquals(-20, conf.getLong("test.int3", 0));
+    assertEquals(-20, conf.getLongBytes("test.int3", 0));
     assertEquals(-20, conf.getInt("test.int4", 0));
     assertEquals(-20, conf.getInt("test.int4", 0));
     assertEquals(-20, conf.getLong("test.int4", 0));
     assertEquals(-20, conf.getLong("test.int4", 0));
+    assertEquals(-20, conf.getLongBytes("test.int4", 0));
     try {
     try {
       conf.getInt("test.int5", 0);
       conf.getInt("test.int5", 0);
       fail("Property had invalid int value, but was read successfully.");
       fail("Property had invalid int value, but was read successfully.");
@@ -419,6 +423,26 @@ public class TestConfiguration extends TestCase {
     }
     }
   }
   }
   
   
+  public void testHumanReadableValues() throws IOException {
+    out = new BufferedWriter(new FileWriter(CONFIG));
+    startConfig();
+    appendProperty("test.humanReadableValue1", "1m");
+    appendProperty("test.humanReadableValue2", "1M");
+    appendProperty("test.humanReadableValue5", "1MBCDE");
+
+    endConfig();
+    Path fileResource = new Path(CONFIG);
+    conf.addResource(fileResource);
+    assertEquals(1048576, conf.getLongBytes("test.humanReadableValue1", 0));
+    assertEquals(1048576, conf.getLongBytes("test.humanReadableValue2", 0));
+    try {
+      conf.getLongBytes("test.humanReadableValue5", 0);
+      fail("Property had invalid human readable value, but was read successfully.");
+    } catch (NumberFormatException e) {
+      // pass
+    }
+  }
+
   public void testBooleanValues() throws IOException {
   public void testBooleanValues() throws IOException {
     out=new BufferedWriter(new FileWriter(CONFIG));
     out=new BufferedWriter(new FileWriter(CONFIG));
     startConfig();
     startConfig();

+ 1 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileSystemCanonicalization.java

@@ -25,7 +25,7 @@ import junit.framework.TestCase;
 
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.net.NetUtilsTestResolver;
+import org.apache.hadoop.security.NetUtilsTestResolver;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.Progressable;
 import org.junit.Test;
 import org.junit.Test;
 
 

+ 9 - 10
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsTrash.java

@@ -78,17 +78,16 @@ public class TestViewFsTrash {
     // set up viewfs's home dir root to point to home dir root on target
     // set up viewfs's home dir root to point to home dir root on target
     // But home dir is different on linux, mac etc.
     // But home dir is different on linux, mac etc.
     // Figure it out by calling home dir on target
     // Figure it out by calling home dir on target
-    
-   String homeDir = fsTarget.getHomeDirectory().toUri().getPath();
-   int indexOf2ndSlash = homeDir.indexOf('/', 1);
-   String homeDirRoot = homeDir.substring(0, indexOf2ndSlash);
-   ConfigUtil.addLink(conf, homeDirRoot,
-       fsTarget.makeQualified(new Path(homeDirRoot)).toUri()); 
-   ConfigUtil.setHomeDirConf(conf, homeDirRoot);
-   Log.info("Home dir base " + homeDirRoot);
-    
+
+    String homeDirRoot = fsTarget.getHomeDirectory()
+        .getParent().toUri().getPath();
+    ConfigUtil.addLink(conf, homeDirRoot,
+        fsTarget.makeQualified(new Path(homeDirRoot)).toUri());
+    ConfigUtil.setHomeDirConf(conf, homeDirRoot);
+    Log.info("Home dir base " + homeDirRoot);
+
     fsView = ViewFileSystemTestSetup.setupForViewFs(conf, fsTarget);
     fsView = ViewFileSystemTestSetup.setupForViewFs(conf, fsTarget);
-    
+
     // set working dir so that relative paths
     // set working dir so that relative paths
     //fsView.setWorkingDirectory(new Path(fsTarget.getWorkingDirectory().toUri().getPath()));
     //fsView.setWorkingDirectory(new Path(fsTarget.getWorkingDirectory().toUri().getPath()));
     conf.set("fs.defaultFS", FsConstants.VIEWFS_URI.toString());
     conf.set("fs.defaultFS", FsConstants.VIEWFS_URI.toString());

+ 1 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java

@@ -37,6 +37,7 @@ import org.apache.commons.lang.StringUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.NetUtilsTestResolver;
 import org.junit.Before;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.Test;

+ 17 - 6
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/NetUtilsTestResolver.java → hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/NetUtilsTestResolver.java

@@ -16,7 +16,7 @@
  * limitations under the License.
  * limitations under the License.
  */
  */
 
 
-package org.apache.hadoop.net;
+package org.apache.hadoop.security;
 
 
 import java.net.InetAddress;
 import java.net.InetAddress;
 import java.net.UnknownHostException;
 import java.net.UnknownHostException;
@@ -25,7 +25,7 @@ import java.util.LinkedList;
 import java.util.List;
 import java.util.List;
 import java.util.Map;
 import java.util.Map;
 
 
-import org.apache.hadoop.net.NetUtils.QualifiedHostResolver;
+import org.apache.hadoop.security.SecurityUtil.QualifiedHostResolver;
 
 
 /**
 /**
  * provides a dummy dns search resolver with a configurable search path
  * provides a dummy dns search resolver with a configurable search path
@@ -41,7 +41,7 @@ public class NetUtilsTestResolver extends QualifiedHostResolver {
     resolver.addResolvedHost("host.a.b.", "1.1.1.1");
     resolver.addResolvedHost("host.a.b.", "1.1.1.1");
     resolver.addResolvedHost("b-host.b.", "2.2.2.2");
     resolver.addResolvedHost("b-host.b.", "2.2.2.2");
     resolver.addResolvedHost("simple.", "3.3.3.3");    
     resolver.addResolvedHost("simple.", "3.3.3.3");    
-    NetUtils.setHostResolver(resolver);
+    SecurityUtil.hostResolver = resolver;
     return resolver;
     return resolver;
   }
   }
 
 
@@ -56,7 +56,8 @@ public class NetUtilsTestResolver extends QualifiedHostResolver {
     resolvedHosts.put(host, addr);
     resolvedHosts.put(host, addr);
   }
   }
 
 
-  InetAddress getInetAddressByName(String host) throws UnknownHostException {
+  @Override
+  public InetAddress getInetAddressByName(String host) throws UnknownHostException {
     hostSearches.add(host);
     hostSearches.add(host);
     if (!resolvedHosts.containsKey(host)) {
     if (!resolvedHosts.containsKey(host)) {
       throw new UnknownHostException(host);
       throw new UnknownHostException(host);
@@ -64,11 +65,21 @@ public class NetUtilsTestResolver extends QualifiedHostResolver {
     return resolvedHosts.get(host);
     return resolvedHosts.get(host);
   }
   }
 
 
-  String[] getHostSearches() {
+  @Override
+  public InetAddress getByExactName(String host) {
+    return super.getByExactName(host);
+  }
+  
+  @Override
+  public InetAddress getByNameWithSearch(String host) {
+    return super.getByNameWithSearch(host);
+  }
+  
+  public String[] getHostSearches() {
     return hostSearches.toArray(new String[0]);
     return hostSearches.toArray(new String[0]);
   }
   }
 
 
-  void reset() {
+  public void reset() {
     hostSearches.clear();
     hostSearches.clear();
   }
   }
 }
 }

+ 1 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestSecurityUtil.java

@@ -225,7 +225,7 @@ public class TestSecurityUtil {
     assertTrue(!addr.isUnresolved());
     assertTrue(!addr.isUnresolved());
     // don't know what the standard resolver will return for hostname.
     // don't know what the standard resolver will return for hostname.
     // should be host for host; host or ip for ip is ambiguous
     // should be host for host; host or ip for ip is ambiguous
-    if (!SecurityUtil.getTokenServiceUseIp()) {
+    if (!SecurityUtil.useIpForTokenService) {
       assertEquals(host, addr.getHostName());
       assertEquals(host, addr.getHostName());
       assertEquals(host, addr.getAddress().getHostName());
       assertEquals(host, addr.getAddress().getHostName());
     }
     }

+ 56 - 2
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java

@@ -143,8 +143,62 @@ public class TestStringUtils extends UnitTestcaseTimeLimit {
     }
     }
     
     
     assertEquals(0L, StringUtils.TraditionalBinaryPrefix.string2long("0"));
     assertEquals(0L, StringUtils.TraditionalBinaryPrefix.string2long("0"));
-    assertEquals(-1259520L, StringUtils.TraditionalBinaryPrefix.string2long("-1230k"));
-    assertEquals(956703965184L, StringUtils.TraditionalBinaryPrefix.string2long("891g"));
+    assertEquals(1024L, StringUtils.TraditionalBinaryPrefix.string2long("1k"));
+    assertEquals(-1024L, StringUtils.TraditionalBinaryPrefix.string2long("-1k"));
+    assertEquals(1259520L,
+        StringUtils.TraditionalBinaryPrefix.string2long("1230K"));
+    assertEquals(-1259520L,
+        StringUtils.TraditionalBinaryPrefix.string2long("-1230K"));
+    assertEquals(104857600L,
+        StringUtils.TraditionalBinaryPrefix.string2long("100m"));
+    assertEquals(-104857600L,
+        StringUtils.TraditionalBinaryPrefix.string2long("-100M"));
+    assertEquals(956703965184L,
+        StringUtils.TraditionalBinaryPrefix.string2long("891g"));
+    assertEquals(-956703965184L,
+        StringUtils.TraditionalBinaryPrefix.string2long("-891G"));
+    assertEquals(501377302265856L,
+        StringUtils.TraditionalBinaryPrefix.string2long("456t"));
+    assertEquals(-501377302265856L,
+        StringUtils.TraditionalBinaryPrefix.string2long("-456T"));
+    assertEquals(11258999068426240L,
+        StringUtils.TraditionalBinaryPrefix.string2long("10p"));
+    assertEquals(-11258999068426240L,
+        StringUtils.TraditionalBinaryPrefix.string2long("-10P"));
+    assertEquals(1152921504606846976L,
+        StringUtils.TraditionalBinaryPrefix.string2long("1e"));
+    assertEquals(-1152921504606846976L,
+        StringUtils.TraditionalBinaryPrefix.string2long("-1E"));
+
+    String tooLargeNumStr = "10e";
+    try {
+      StringUtils.TraditionalBinaryPrefix.string2long(tooLargeNumStr);
+      fail("Test passed for a number " + tooLargeNumStr + " too large");
+    } catch (IllegalArgumentException e) {
+      assertEquals(tooLargeNumStr + " does not fit in a Long", e.getMessage());
+    }
+
+    String tooSmallNumStr = "-10e";
+    try {
+      StringUtils.TraditionalBinaryPrefix.string2long(tooSmallNumStr);
+      fail("Test passed for a number " + tooSmallNumStr + " too small");
+    } catch (IllegalArgumentException e) {
+      assertEquals(tooSmallNumStr + " does not fit in a Long", e.getMessage());
+    }
+
+    String invalidFormatNumStr = "10kb";
+    char invalidPrefix = 'b';
+    try {
+      StringUtils.TraditionalBinaryPrefix.string2long(invalidFormatNumStr);
+      fail("Test passed for a number " + invalidFormatNumStr
+          + " has invalid format");
+    } catch (IllegalArgumentException e) {
+      assertEquals("Invalid size prefix '" + invalidPrefix + "' in '"
+          + invalidFormatNumStr
+          + "'. Allowed prefixes are k, m, g, t, p, e(case insensitive)",
+          e.getMessage());
+    }
+
   }
   }
 
 
   @Test
   @Test

+ 5 - 1
hadoop-common-project/hadoop-common/src/test/resources/testConf.xml

@@ -449,7 +449,7 @@
       <comparators>
       <comparators>
         <comparator>
         <comparator>
           <type>RegexpComparator</type>
           <type>RegexpComparator</type>
-          <expected-output>^-getmerge &lt;src&gt; &lt;localdst&gt; \[addnl\]:( |\t)*Get all the files in the directories that( )*</expected-output>
+          <expected-output>^-getmerge \[-nl\] &lt;src&gt; &lt;localdst&gt;:( |\t)*Get all the files in the directories that( )*</expected-output>
         </comparator>
         </comparator>
         <comparator>
         <comparator>
           <type>RegexpComparator</type>
           <type>RegexpComparator</type>
@@ -459,6 +459,10 @@
           <type>RegexpComparator</type>
           <type>RegexpComparator</type>
           <expected-output>^( |\t)*one file on local fs. &lt;src&gt; is kept.( )*</expected-output>
           <expected-output>^( |\t)*one file on local fs. &lt;src&gt; is kept.( )*</expected-output>
         </comparator>
         </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*-nl   Add a newline character at the end of each file.( )*</expected-output>
+        </comparator>
       </comparators>
       </comparators>
     </test>
     </test>
 
 

+ 1 - 0
hadoop-dist/pom.xml

@@ -118,6 +118,7 @@
                       run cp -r $ROOT/hadoop-hdfs-project/hadoop-hdfs/target/hadoop-hdfs-${project.version}/* .
                       run cp -r $ROOT/hadoop-hdfs-project/hadoop-hdfs/target/hadoop-hdfs-${project.version}/* .
                       run cp -r $ROOT/hadoop-hdfs-project/hadoop-hdfs-httpfs/target/hadoop-hdfs-httpfs-${project.version}/* .
                       run cp -r $ROOT/hadoop-hdfs-project/hadoop-hdfs-httpfs/target/hadoop-hdfs-httpfs-${project.version}/* .
                       run cp -r $ROOT/hadoop-mapreduce-project/target/hadoop-mapreduce-${project.version}/* .
                       run cp -r $ROOT/hadoop-mapreduce-project/target/hadoop-mapreduce-${project.version}/* .
+                      run cp -r $ROOT/hadoop-tools/hadoop-tools-dist/target/hadoop-tools-dist-${project.version}/* .
                       echo
                       echo
                       echo "Hadoop dist layout available at: ${project.build.directory}/hadoop-${project.version}"
                       echo "Hadoop dist layout available at: ${project.build.directory}/hadoop-${project.version}"
                       echo
                       echo

+ 5 - 5
hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml

@@ -270,11 +270,11 @@
 
 
     <plugins>
     <plugins>
       <plugin>
       <plugin>
-	<!-- workaround for filtered/unfiltered resources in same directory -->
-	<!-- remove when maven-eclipse-plugin 2.9 is available -->
-	<groupId>org.apache.maven.plugins</groupId>
-	<artifactId>maven-eclipse-plugin</artifactId>
-	<version>2.6</version>
+        <!-- workaround for filtered/unfiltered resources in same directory -->
+        <!-- remove when maven-eclipse-plugin 2.9 is available -->
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-eclipse-plugin</artifactId>
+        <version>2.6</version>
       </plugin>
       </plugin>
       <plugin>
       <plugin>
         <groupId>org.apache.maven.plugins</groupId>
         <groupId>org.apache.maven.plugins</groupId>

+ 20 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -167,6 +167,23 @@ Release 0.23.1 - UNRELEASED
     HDFS-2710. Add HDFS tests related to HADOOP-7933. (Siddarth Seth via
     HDFS-2710. Add HDFS tests related to HADOOP-7933. (Siddarth Seth via
     suresh)
     suresh)
 
 
+    HDFS-2349. Corruption detected during block transfers between DNs
+               should log a WARN instead of INFO. (harsh)
+
+    HDFS-2572. Remove unnecessary double-check in DN#getHostName. (harsh)
+
+    HDFS-2729. Update BlockManager's comments regarding the invalid block set (harsh)
+ 
+    HDFS-2726. Fix a logging issue under DFSClient's createBlockOutputStream method (harsh)
+
+    HDFS-554. Use System.arraycopy in BlockInfo.ensureCapacity. (harsh)
+ 
+    HDFS-1314. Make dfs.blocksize accept size-indicating prefixes (Sho Shimauchi via harsh)
+
+    HDFS-69. Improve the 'dfsadmin' commandline help. (harsh)
+
+    HDFS-2788. HdfsServerConstants#DN_KEEPALIVE_TIMEOUT is dead code (eli)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
     HDFS-2130. Switch default checksum to CRC32C. (todd)
     HDFS-2130. Switch default checksum to CRC32C. (todd)
@@ -231,6 +248,9 @@ Release 0.23.1 - UNRELEASED
     HDFS-2707. HttpFS should read the hadoop-auth secret from a file instead 
     HDFS-2707. HttpFS should read the hadoop-auth secret from a file instead 
     inline from the configuration. (tucu)
     inline from the configuration. (tucu)
 
 
+    HDFS-2790. FSNamesystem.setTimes throws exception with wrong
+    configuration name in the message. (Arpit Gupta via eli)
+
 Release 0.23.0 - 2011-11-01 
 Release 0.23.0 - 2011-11-01 
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES

+ 36 - 42
hadoop-hdfs-project/hadoop-hdfs/pom.xml

@@ -318,47 +318,6 @@
               </tasks>
               </tasks>
             </configuration>
             </configuration>
           </execution>
           </execution>
-        </executions>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-javadoc-plugin</artifactId>
-        <configuration>
-          <excludePackageNames>org.apache.hadoop.hdfs.protocol.proto</excludePackageNames>
-        </configuration>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.rat</groupId>
-        <artifactId>apache-rat-plugin</artifactId>
-        <configuration>
-          <excludes>
-            <exclude>CHANGES.txt</exclude>
-            <exclude>.idea/**</exclude>
-            <exclude>src/main/conf/*</exclude>
-            <exclude>src/main/docs/**</exclude>
-            <exclude>dev-support/findbugsExcludeFile.xml</exclude>
-            <exclude>dev-support/checkstyle*</exclude>
-            <exclude>dev-support/jdiff/**</exclude>
-            <exclude>dev-support/*tests</exclude>
-            <exclude>src/main/native/*</exclude>
-            <exclude>src/main/native/config/*</exclude>
-            <exclude>src/main/native/m4/*</exclude>
-            <exclude>src/test/empty-file</exclude>
-            <exclude>src/test/all-tests</exclude>
-            <exclude>src/test/resources/*.tgz</exclude>
-            <exclude>src/test/resources/data*</exclude>
-            <exclude>src/test/resources/editsStored*</exclude>
-            <exclude>src/test/resources/empty-file</exclude>
-            <exclude>src/main/webapps/datanode/robots.txt</exclude>
-            <exclude>src/main/docs/releasenotes.html</exclude>
-            <exclude>src/contrib/**</exclude>
-          </excludes>
-        </configuration>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-antrun-plugin</artifactId>
-        <executions>
           <execution>
           <execution>
             <id>xprepare-package-hadoop-daemon</id>
             <id>xprepare-package-hadoop-daemon</id>
             <phase>prepare-package</phase>
             <phase>prepare-package</phase>
@@ -385,7 +344,7 @@
                 </condition>
                 </condition>
                 <property name="commons.daemon.tar.name"
                 <property name="commons.daemon.tar.name"
                           value="commons-daemon-${commons-daemon.version}-bin-${commons.daemon.os.name}-${commons.daemon.os.arch}.tar.gz"/>
                           value="commons-daemon-${commons-daemon.version}-bin-${commons.daemon.os.name}-${commons.daemon.os.arch}.tar.gz"/>
-                
+
                 <mkdir dir="downloads"/>
                 <mkdir dir="downloads"/>
                 <get src="http://archive.apache.org/dist/commons/daemon/binaries/${commons-daemon.version}/${commons.daemon.os.name}/${commons.daemon.tar.name}"
                 <get src="http://archive.apache.org/dist/commons/daemon/binaries/${commons-daemon.version}/${commons.daemon.os.name}/${commons.daemon.tar.name}"
                     dest="downloads/${commons.daemon.tar.name}" verbose="true" skipexisting="true"/>
                     dest="downloads/${commons.daemon.tar.name}" verbose="true" skipexisting="true"/>
@@ -404,6 +363,41 @@
           </execution>
           </execution>
         </executions>
         </executions>
       </plugin>
       </plugin>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-javadoc-plugin</artifactId>
+        <configuration>
+          <excludePackageNames>org.apache.hadoop.hdfs.protocol.proto</excludePackageNames>
+        </configuration>
+      </plugin>
+      <plugin>
+        <groupId>org.apache.rat</groupId>
+        <artifactId>apache-rat-plugin</artifactId>
+        <configuration>
+          <excludes>
+            <exclude>CHANGES.txt</exclude>
+            <exclude>.idea/**</exclude>
+            <exclude>src/main/conf/*</exclude>
+            <exclude>src/main/docs/**</exclude>
+            <exclude>dev-support/findbugsExcludeFile.xml</exclude>
+            <exclude>dev-support/checkstyle*</exclude>
+            <exclude>dev-support/jdiff/**</exclude>
+            <exclude>dev-support/*tests</exclude>
+            <exclude>src/main/native/*</exclude>
+            <exclude>src/main/native/config/*</exclude>
+            <exclude>src/main/native/m4/*</exclude>
+            <exclude>src/test/empty-file</exclude>
+            <exclude>src/test/all-tests</exclude>
+            <exclude>src/test/resources/*.tgz</exclude>
+            <exclude>src/test/resources/data*</exclude>
+            <exclude>src/test/resources/editsStored*</exclude>
+            <exclude>src/test/resources/empty-file</exclude>
+            <exclude>src/main/webapps/datanode/robots.txt</exclude>
+            <exclude>src/main/docs/releasenotes.html</exclude>
+            <exclude>src/contrib/**</exclude>
+          </excludes>
+        </configuration>
+      </plugin>
     </plugins>
     </plugins>
   </build>
   </build>
 
 

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java

@@ -180,7 +180,7 @@ public class DFSClient implements java.io.Closeable {
       /** dfs.write.packet.size is an internal config variable */
       /** dfs.write.packet.size is an internal config variable */
       writePacketSize = conf.getInt(DFS_CLIENT_WRITE_PACKET_SIZE_KEY,
       writePacketSize = conf.getInt(DFS_CLIENT_WRITE_PACKET_SIZE_KEY,
           DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT);
           DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT);
-      defaultBlockSize = conf.getLong(DFS_BLOCK_SIZE_KEY,
+      defaultBlockSize = conf.getLongBytes(DFS_BLOCK_SIZE_KEY,
           DFS_BLOCK_SIZE_DEFAULT);
           DFS_BLOCK_SIZE_DEFAULT);
       defaultReplication = (short) conf.getInt(
       defaultReplication = (short) conf.getInt(
           DFS_REPLICATION_KEY, DFS_REPLICATION_DEFAULT);
           DFS_REPLICATION_KEY, DFS_REPLICATION_DEFAULT);

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java

@@ -1056,7 +1056,7 @@ class DFSOutputStream extends FSOutputSummer implements Syncable {
 
 
       } catch (IOException ie) {
       } catch (IOException ie) {
 
 
-        DFSClient.LOG.info("Exception in createBlockOutputStream " + ie);
+        DFSClient.LOG.info("Exception in createBlockOutputStream", ie);
 
 
         // find the datanode that matches
         // find the datanode that matches
         if (firstBadLink.length() != 0) {
         if (firstBadLink.length() != 0) {

+ 1 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java

@@ -141,9 +141,7 @@ public class BlockInfo extends Block implements LightWeightGSet.LinkedElement {
      * happen only when replication is manually increased by the user. */
      * happen only when replication is manually increased by the user. */
     Object[] old = triplets;
     Object[] old = triplets;
     triplets = new Object[(last+num)*3];
     triplets = new Object[(last+num)*3];
-    for(int i=0; i < last*3; i++) {
-      triplets[i] = old[i];
-    }
+    System.arraycopy(old, 0, triplets, 0, last*3);
     return last;
     return last;
   }
   }
 
 

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java

@@ -1526,7 +1526,7 @@ public class BlockManager {
     // Ignore replicas already scheduled to be removed from the DN
     // Ignore replicas already scheduled to be removed from the DN
     if(invalidateBlocks.contains(dn.getStorageID(), block)) {
     if(invalidateBlocks.contains(dn.getStorageID(), block)) {
       assert storedBlock.findDatanode(dn) < 0 : "Block " + block
       assert storedBlock.findDatanode(dn) < 0 : "Block " + block
-        + " in recentInvalidatesSet should not appear in DN " + dn;
+        + " in invalidated blocks set should not appear in DN " + dn;
       return storedBlock;
       return storedBlock;
     }
     }
 
 
@@ -1754,7 +1754,7 @@ public class BlockManager {
    * Invalidate corrupt replicas.
    * Invalidate corrupt replicas.
    * <p>
    * <p>
    * This will remove the replicas from the block's location list,
    * This will remove the replicas from the block's location list,
-   * add them to {@link #recentInvalidateSets} so that they could be further
+   * add them to {@link #invalidateBlocks} so that they could be further
    * deleted from the respective data-nodes,
    * deleted from the respective data-nodes,
    * and remove the block from corruptReplicasMap.
    * and remove the block from corruptReplicasMap.
    * <p>
    * <p>
@@ -1983,7 +1983,7 @@ public class BlockManager {
       //
       //
       addToInvalidates(b, cur);
       addToInvalidates(b, cur);
       NameNode.stateChangeLog.info("BLOCK* chooseExcessReplicates: "
       NameNode.stateChangeLog.info("BLOCK* chooseExcessReplicates: "
-                +"("+cur.getName()+", "+b+") is added to recentInvalidateSets");
+                +"("+cur.getName()+", "+b+") is added to invalidated blocks set.");
     }
     }
   }
   }
 
 
@@ -2399,7 +2399,7 @@ public class BlockManager {
 
 
   /**
   /**
    * Get blocks to invalidate for <i>nodeId</i>
    * Get blocks to invalidate for <i>nodeId</i>
-   * in {@link #recentInvalidateSets}.
+   * in {@link #invalidateBlocks}.
    *
    *
    * @return number of blocks scheduled for removal during this iteration.
    * @return number of blocks scheduled for removal during this iteration.
    */
    */

+ 0 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java

@@ -86,7 +86,6 @@ public final class HdfsServerConstants {
   public static int READ_TIMEOUT_EXTENSION = 5 * 1000;
   public static int READ_TIMEOUT_EXTENSION = 5 * 1000;
   public static int WRITE_TIMEOUT = 8 * 60 * 1000;
   public static int WRITE_TIMEOUT = 8 * 60 * 1000;
   public static int WRITE_TIMEOUT_EXTENSION = 5 * 1000; //for write pipeline
   public static int WRITE_TIMEOUT_EXTENSION = 5 * 1000; //for write pipeline
-  public static int DN_KEEPALIVE_TIMEOUT = 5 * 1000;
 
 
   /**
   /**
    * Defines the NameNode role.
    * Defines the NameNode role.

+ 2 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java

@@ -450,11 +450,8 @@ public class DataNode extends Configured
 
 
   private static String getHostName(Configuration config)
   private static String getHostName(Configuration config)
       throws UnknownHostException {
       throws UnknownHostException {
-    String name = null;
     // use configured nameserver & interface to get local hostname
     // use configured nameserver & interface to get local hostname
-    if (config.get(DFS_DATANODE_HOST_NAME_KEY) != null) {
-      name = config.get(DFS_DATANODE_HOST_NAME_KEY);
-    }
+    String name = config.get(DFS_DATANODE_HOST_NAME_KEY);
     if (name == null) {
     if (name == null) {
       name = DNS
       name = DNS
           .getDefaultHost(config.get(DFS_DATANODE_DNS_INTERFACE_KEY,
           .getDefaultHost(config.get(DFS_DATANODE_DNS_INTERFACE_KEY,
@@ -1334,7 +1331,7 @@ public class DataNode extends Configured
       nn.reportBadBlocks(new LocatedBlock[]{
       nn.reportBadBlocks(new LocatedBlock[]{
           new LocatedBlock(block, new DatanodeInfo[] {
           new LocatedBlock(block, new DatanodeInfo[] {
               new DatanodeInfo(bpReg)})});
               new DatanodeInfo(bpReg)})});
-      LOG.info("Can't replicate block " + block
+      LOG.warn("Can't replicate block " + block
           + " because on-disk length " + onDiskLength 
           + " because on-disk length " + onDiskLength 
           + " is shorter than NameNode recorded length " + block.getNumBytes());
           + " is shorter than NameNode recorded length " + block.getNumBytes());
       return;
       return;

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java

@@ -119,7 +119,7 @@ class DataXceiverServer implements Runnable {
       conf.getInt(DFSConfigKeys.DFS_DATANODE_MAX_RECEIVER_THREADS_KEY,
       conf.getInt(DFSConfigKeys.DFS_DATANODE_MAX_RECEIVER_THREADS_KEY,
                   DFSConfigKeys.DFS_DATANODE_MAX_RECEIVER_THREADS_DEFAULT);
                   DFSConfigKeys.DFS_DATANODE_MAX_RECEIVER_THREADS_DEFAULT);
     
     
-    this.estimateBlockSize = conf.getLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,
+    this.estimateBlockSize = conf.getLongBytes(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,
         DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT);
         DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT);
     
     
     //set up parameter for cluster balancing
     //set up parameter for cluster balancing

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

@@ -502,7 +502,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
         fsOwner.getShortUserName(), supergroup, new FsPermission(filePermission));
         fsOwner.getShortUserName(), supergroup, new FsPermission(filePermission));
     
     
     this.serverDefaults = new FsServerDefaults(
     this.serverDefaults = new FsServerDefaults(
-        conf.getLong(DFS_BLOCK_SIZE_KEY, DFS_BLOCK_SIZE_DEFAULT),
+        conf.getLongBytes(DFS_BLOCK_SIZE_KEY, DFS_BLOCK_SIZE_DEFAULT),
         conf.getInt(DFS_BYTES_PER_CHECKSUM_KEY, DFS_BYTES_PER_CHECKSUM_DEFAULT),
         conf.getInt(DFS_BYTES_PER_CHECKSUM_KEY, DFS_BYTES_PER_CHECKSUM_DEFAULT),
         conf.getInt(DFS_CLIENT_WRITE_PACKET_SIZE_KEY, DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT),
         conf.getInt(DFS_CLIENT_WRITE_PACKET_SIZE_KEY, DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT),
         (short) conf.getInt(DFS_REPLICATION_KEY, DFS_REPLICATION_DEFAULT),
         (short) conf.getInt(DFS_REPLICATION_KEY, DFS_REPLICATION_DEFAULT),
@@ -958,7 +958,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
     throws IOException, UnresolvedLinkException {
     throws IOException, UnresolvedLinkException {
     if (!isAccessTimeSupported() && atime != -1) {
     if (!isAccessTimeSupported() && atime != -1) {
       throw new IOException("Access time for hdfs is not configured. " +
       throw new IOException("Access time for hdfs is not configured. " +
-                            " Please set dfs.support.accessTime configuration parameter.");
+                            " Please set " + DFS_NAMENODE_ACCESSTIME_PRECISION_KEY + " configuration parameter.");
     }
     }
     writeLock();
     writeLock();
     try {
     try {

+ 4 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java

@@ -131,8 +131,9 @@ public class DFSAdmin extends FsShell {
       "\t\ton the number of names in the directory tree\n" +
       "\t\ton the number of names in the directory tree\n" +
       "\t\tFor each directory, attempt to set the quota. An error will be reported if\n" +
       "\t\tFor each directory, attempt to set the quota. An error will be reported if\n" +
       "\t\t1. N is not a positive integer, or\n" +
       "\t\t1. N is not a positive integer, or\n" +
-      "\t\t2. user is not an administrator, or\n" +
-      "\t\t3. the directory does not exist or is a file, or\n";
+      "\t\t2. User is not an administrator, or\n" +
+      "\t\t3. The directory does not exist or is a file.\n" +
+      "\t\tNote: A quota of 1 would force the directory to remain empty.\n";
 
 
     private final long quota; // the quota to be set
     private final long quota; // the quota to be set
     
     
@@ -929,6 +930,7 @@ public class DFSAdmin extends FsShell {
                   + " [-setBalancerBandwidth <bandwidth in bytes per second>]");
                   + " [-setBalancerBandwidth <bandwidth in bytes per second>]");
     } else {
     } else {
       System.err.println("Usage: java DFSAdmin");
       System.err.println("Usage: java DFSAdmin");
+      System.err.println("Note: Administrative commands can only be run as the HDFS superuser.");
       System.err.println("           [-report]");
       System.err.println("           [-report]");
       System.err.println("           [-safemode enter | leave | get | wait]");
       System.err.println("           [-safemode enter | leave | get | wait]");
       System.err.println("           [-saveNamespace]");
       System.err.println("           [-saveNamespace]");

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java

@@ -529,7 +529,7 @@ public class WebHdfsFileSystem extends FileSystem
 
 
   @Override
   @Override
   public long getDefaultBlockSize() {
   public long getDefaultBlockSize() {
-    return getConf().getLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,
+    return getConf().getLongBytes(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,
         DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT);
         DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT);
   }
   }
 
 

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/BlockSizeParam.java

@@ -55,6 +55,6 @@ public class BlockSizeParam extends LongParam {
   /** @return the value or, if it is null, return the default from conf. */
   /** @return the value or, if it is null, return the default from conf. */
   public long getValue(final Configuration conf) {
   public long getValue(final Configuration conf) {
     return getValue() != null? getValue()
     return getValue() != null? getValue()
-        : conf.getLong(DFS_BLOCK_SIZE_KEY, DFS_BLOCK_SIZE_DEFAULT);
+        : conf.getLongBytes(DFS_BLOCK_SIZE_KEY, DFS_BLOCK_SIZE_DEFAULT);
   }
   }
 }
 }

+ 6 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml

@@ -329,7 +329,12 @@ creations/deletions), or "all".</description>
 <property>
 <property>
   <name>dfs.blocksize</name>
   <name>dfs.blocksize</name>
   <value>67108864</value>
   <value>67108864</value>
-  <description>The default block size for new files.</description>
+  <description>
+      The default block size for new files, in bytes.
+      You can use the following suffix (case insensitive):
+      k(kilo), m(mega), g(giga), t(tera), p(peta), e(exa) to specify the size (such as 128k, 512m, 1g, etc.),
+      Or provide complete size in bytes (such as 134217728 for 128 MB).
+  </description>
 </property>
 </property>
 
 
 <property>
 <property>

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java

@@ -51,7 +51,7 @@ public class TestParam {
     final BlockSizeParam p = new BlockSizeParam(BlockSizeParam.DEFAULT);
     final BlockSizeParam p = new BlockSizeParam(BlockSizeParam.DEFAULT);
     Assert.assertEquals(null, p.getValue());
     Assert.assertEquals(null, p.getValue());
     Assert.assertEquals(
     Assert.assertEquals(
-        conf.getLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,
+        conf.getLongBytes(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,
             DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT),
             DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT),
         p.getValue(conf));
         p.getValue(conf));
 
 

+ 80 - 1
hadoop-mapreduce-project/CHANGES.txt

@@ -106,10 +106,31 @@ Release 0.23.1 - Unreleased
     MAPREDUCE-3547. Added a bunch of unit tests for the the RM/NM webservices.
     MAPREDUCE-3547. Added a bunch of unit tests for the the RM/NM webservices.
     (Thomas Graves via acmurthy)
     (Thomas Graves via acmurthy)
 
 
-    MAPREDUCE-3610. Remove use of the 'dfs.block.size' config for default block size fetching. Use FS#getDefaultBlocksize instead. (Sho Shimauchi via harsh)
+    MAPREDUCE-3610. Remove use of the 'dfs.block.size' config for default block
+    size fetching. Use FS#getDefaultBlocksize instead. (Sho Shimauchi via harsh)
 
 
     MAPREDUCE-3478. Cannot build against ZooKeeper 3.4.0. (Tom White via mahadev)
     MAPREDUCE-3478. Cannot build against ZooKeeper 3.4.0. (Tom White via mahadev)
 
 
+    MAPREDUCE-3528. Fixed TaskHeartBeatHandler to use a new configuration
+    for the thread loop interval separate from task-timeout configuration
+    property. (Siddharth Seth via vinodkv)
+
+    MAPREDUCE-3312. Modified MR AM to not send a stop-container request for
+    a container that isn't launched at all. (Robert Joseph Evans via vinodkv)
+
+    MAPREDUCE-3382. Enhanced MR AM to use a proxy to ping the job-end
+    notification URL. (Ravi Prakash via vinodkv)
+
+    MAPREDUCE-3299. Added AMInfo table to the MR AM job pages to list all the
+    job-attempts when AM restarts and recovers. (Jonathan Eagles via vinodkv)
+
+    MAPREDUCE-3251. Network ACLs can prevent some clients to talk to MR AM.
+    Improved the earlier patch to not to JobHistoryServer repeatedly.
+    (Anupam Seth via vinodkv)
+
+    MAPREDUCE-3553. Add support for data returned when exceptions thrown from web 
+    service apis to be in either xml or in JSON. (Thomas Graves via mahadev)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
     MAPREDUCE-3567. Extraneous JobConf objects in AM heap. (Vinod Kumar
     MAPREDUCE-3567. Extraneous JobConf objects in AM heap. (Vinod Kumar
@@ -124,6 +145,15 @@ Release 0.23.1 - Unreleased
     MAPREDUCE-3569. TaskAttemptListener holds a global lock for all
     MAPREDUCE-3569. TaskAttemptListener holds a global lock for all
     task-updates. (Vinod Kumar Vavilapalli via sseth)
     task-updates. (Vinod Kumar Vavilapalli via sseth)
 
 
+    MAPREDUCE-3511. Removed a multitude of cloned/duplicate counters in the AM
+    thereby reducing the AM heap size and preventing full GCs. (vinodkv)
+
+    MAPREDUCE-3618. Fixed TaskHeartbeatHandler to not hold a global lock for all
+    task-updates. (Siddarth Seth via vinodkv)
+
+    MAPREDUCE-3512. Batching JobHistory flushing to DFS so that we don't flush
+    for every event slowing down AM. (Siddarth Seth via vinodkv)
+
   BUG FIXES
   BUG FIXES
     MAPREDUCE-3462. Fix Gridmix JUnit testcase failures. 
     MAPREDUCE-3462. Fix Gridmix JUnit testcase failures. 
                     (Ravi Prakash and Ravi Gummadi via amarrk)
                     (Ravi Prakash and Ravi Gummadi via amarrk)
@@ -373,6 +403,55 @@ Release 0.23.1 - Unreleased
 
 
     MAPREDUCE-3615. Fix some ant test failures. (Thomas Graves via sseth)
     MAPREDUCE-3615. Fix some ant test failures. (Thomas Graves via sseth)
 
 
+    MAPREDUCE-1744. DistributedCache creates its own FileSytem instance when 
+    adding a file/archive to the path. (Dick King via tucu)
+
+    MAPREDUCE-3326. Added detailed information about queue's to the
+    CapacityScheduler web-ui. (Jason Lowe via acmurthy) 
+
+    MAPREDUCE-3548. Added more unit tests for MR AM & JHS web-services.
+    (Thomas Graves via acmurthy) 
+
+    MAPREDUCE-3617. Removed wrong default value for
+    yarn.resourcemanager.principal and yarn.nodemanager.principal. (Jonathan
+    Eagles via acmurthy) 
+
+    MAPREDUCE-3624. Remove unnecessary dependency on JDK's tools.jar. (mahadev
+    via acmurthy)
+
+    MAPREDUCE-3616. Thread pool for launching containers in MR AM not
+    expanding as expected. (vinodkv via sseth)
+
+    MAPREDUCE-3639. Fixed TokenCache to work with absent FileSystem canonical
+    service-names. (Siddharth Seth via vinodkv)
+
+    MAPREDUCE-3380. Token infrastructure for running clients which are not kerberos 
+    authenticated. (mahadev)
+
+    MAPREDUCE-3648. TestJobConf failing. (Thomas Graves via mahadev)
+
+    MAPREDUCE-3651. TestQueueManagerRefresh fails. (Thomas Graves via mahadev)
+
+    MAPREDUCE-3645. TestJobHistory fails. (Thomas Graves via mahadev)
+  
+    MAPREDUCE-3652. org.apache.hadoop.mapred.TestWebUIAuthorization.testWebUIAuthorization 
+    fails. (Thomas Graves via mahadev)
+
+    MAPREDUCE-3625. CapacityScheduler web-ui display of queue's used capacity is broken.
+    (Jason Lowe via mahadev)
+
+    MAPREDUCE-3596. Fix scheduler to handle cleaned up containers, which NMs
+    may subsequently report as running. (Vinod Kumar Vavilapalli via sseth)
+
+    MAPREDUCE-3656. Fixed a race condition in MR AM which is failing the sort
+    benchmark consistently. (Siddarth Seth via vinodkv)
+
+    MAPREDUCE-3532. Modified NM to report correct http address when an ephemeral
+    web port is configured. (Bhallamudi Venkata Siva Kamesh via vinodkv)
+
+    MAPREDUCE-3404. Corrected MR AM to honor speculative configuration and enable
+    speculating either maps or reduces. (Eric Payne via vinodkv)
+
 Release 0.23.0 - 2011-11-01 
 Release 0.23.0 - 2011-11-01 
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES

+ 0 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml

@@ -136,7 +136,6 @@
           <plugin>
           <plugin>
             <groupId>org.codehaus.mojo</groupId>
             <groupId>org.codehaus.mojo</groupId>
             <artifactId>exec-maven-plugin</artifactId>
             <artifactId>exec-maven-plugin</artifactId>
-            <version>1.2</version>
             <executions>
             <executions>
               <execution>
               <execution>
                 <phase>compile</phase>
                 <phase>compile</phase>

+ 42 - 24
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/TaskAttemptListenerImpl.java

@@ -22,7 +22,9 @@ import java.io.IOException;
 import java.net.InetAddress;
 import java.net.InetAddress;
 import java.net.InetSocketAddress;
 import java.net.InetSocketAddress;
 import java.util.ArrayList;
 import java.util.ArrayList;
+import java.util.Collections;
 import java.util.List;
 import java.util.List;
+import java.util.Set;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
 import java.util.concurrent.ConcurrentMap;
 
 
@@ -77,6 +79,9 @@ public class TaskAttemptListenerImpl extends CompositeService
   private ConcurrentMap<WrappedJvmID, org.apache.hadoop.mapred.Task>
   private ConcurrentMap<WrappedJvmID, org.apache.hadoop.mapred.Task>
     jvmIDToActiveAttemptMap
     jvmIDToActiveAttemptMap
       = new ConcurrentHashMap<WrappedJvmID, org.apache.hadoop.mapred.Task>();
       = new ConcurrentHashMap<WrappedJvmID, org.apache.hadoop.mapred.Task>();
+  private Set<WrappedJvmID> launchedJVMs = Collections
+      .newSetFromMap(new ConcurrentHashMap<WrappedJvmID, Boolean>()); 
+  
   private JobTokenSecretManager jobTokenSecretManager = null;
   private JobTokenSecretManager jobTokenSecretManager = null;
   
   
   public TaskAttemptListenerImpl(AppContext context,
   public TaskAttemptListenerImpl(AppContext context,
@@ -88,7 +93,7 @@ public class TaskAttemptListenerImpl extends CompositeService
 
 
   @Override
   @Override
   public void init(Configuration conf) {
   public void init(Configuration conf) {
-   registerHeartbeatHandler();
+   registerHeartbeatHandler(conf);
    super.init(conf);
    super.init(conf);
   }
   }
 
 
@@ -98,9 +103,10 @@ public class TaskAttemptListenerImpl extends CompositeService
     super.start();
     super.start();
   }
   }
 
 
-  protected void registerHeartbeatHandler() {
+  protected void registerHeartbeatHandler(Configuration conf) {
     taskHeartbeatHandler = new TaskHeartbeatHandler(context.getEventHandler(), 
     taskHeartbeatHandler = new TaskHeartbeatHandler(context.getEventHandler(), 
-        context.getClock());
+        context.getClock(), conf.getInt(MRJobConfig.MR_AM_TASK_LISTENER_THREAD_COUNT, 
+            MRJobConfig.DEFAULT_MR_AM_TASK_LISTENER_THREAD_COUNT));
     addService(taskHeartbeatHandler);
     addService(taskHeartbeatHandler);
   }
   }
 
 
@@ -325,9 +331,11 @@ public class TaskAttemptListenerImpl extends CompositeService
     taskAttemptStatus.outputSize = taskStatus.getOutputSize();
     taskAttemptStatus.outputSize = taskStatus.getOutputSize();
     // Task sends the updated phase to the TT.
     // Task sends the updated phase to the TT.
     taskAttemptStatus.phase = TypeConverter.toYarn(taskStatus.getPhase());
     taskAttemptStatus.phase = TypeConverter.toYarn(taskStatus.getPhase());
-    // Counters are updated by the task.
-    taskAttemptStatus.counters =
-        TypeConverter.toYarn(taskStatus.getCounters());
+    // Counters are updated by the task. Convert counters into new format as
+    // that is the primary storage format inside the AM to avoid multiple
+    // conversions and unnecessary heap usage.
+    taskAttemptStatus.counters = new org.apache.hadoop.mapreduce.Counters(
+      taskStatus.getCounters());
 
 
     // Map Finish time set by the task (map only)
     // Map Finish time set by the task (map only)
     if (taskStatus.getIsMap() && taskStatus.getMapFinishTime() != 0) {
     if (taskStatus.getIsMap() && taskStatus.getMapFinishTime() != 0) {
@@ -409,22 +417,28 @@ public class TaskAttemptListenerImpl extends CompositeService
 
 
     // Try to look up the task. We remove it directly as we don't give
     // Try to look up the task. We remove it directly as we don't give
     // multiple tasks to a JVM
     // multiple tasks to a JVM
-    org.apache.hadoop.mapred.Task task = jvmIDToActiveAttemptMap
-        .remove(wJvmID);
-    if (task != null) {
-      LOG.info("JVM with ID: " + jvmId + " given task: " + task.getTaskID());
-      jvmTask = new JvmTask(task, false);
-
-      // remove the task as it is no more needed and free up the memory
-      // Also we have already told the JVM to process a task, so it is no
-      // longer pending, and further request should ask it to exit.
-    } else {
+    if (!jvmIDToActiveAttemptMap.containsKey(wJvmID)) {
       LOG.info("JVM with ID: " + jvmId + " is invalid and will be killed.");
       LOG.info("JVM with ID: " + jvmId + " is invalid and will be killed.");
       jvmTask = TASK_FOR_INVALID_JVM;
       jvmTask = TASK_FOR_INVALID_JVM;
+    } else {
+      if (!launchedJVMs.contains(wJvmID)) {
+        jvmTask = null;
+        LOG.info("JVM with ID: " + jvmId
+            + " asking for task before AM launch registered. Given null task");
+      } else {
+        // remove the task as it is no more needed and free up the memory.
+        // Also we have already told the JVM to process a task, so it is no
+        // longer pending, and further request should ask it to exit.
+        org.apache.hadoop.mapred.Task task =
+            jvmIDToActiveAttemptMap.remove(wJvmID);
+        launchedJVMs.remove(wJvmID);
+        LOG.info("JVM with ID: " + jvmId + " given task: " + task.getTaskID());
+        jvmTask = new JvmTask(task, false);
+      }
     }
     }
     return jvmTask;
     return jvmTask;
   }
   }
-  
+
   @Override
   @Override
   public void registerPendingTask(
   public void registerPendingTask(
       org.apache.hadoop.mapred.Task task, WrappedJvmID jvmID) {
       org.apache.hadoop.mapred.Task task, WrappedJvmID jvmID) {
@@ -437,13 +451,12 @@ public class TaskAttemptListenerImpl extends CompositeService
 
 
   @Override
   @Override
   public void registerLaunchedTask(
   public void registerLaunchedTask(
-      org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID) {
-
-    // The task is launched. Register this for expiry-tracking.
+      org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID,
+      WrappedJvmID jvmId) {
+    // The AM considers the task to be launched (Has asked the NM to launch it)
+    // The JVM will only be given a task after this registartion.
+    launchedJVMs.add(jvmId);
 
 
-    // Timing can cause this to happen after the real JVM launches and gets a
-    // task which is still fine as we will only be tracking for expiry a little
-    // late than usual.
     taskHeartbeatHandler.register(attemptID);
     taskHeartbeatHandler.register(attemptID);
   }
   }
 
 
@@ -456,7 +469,12 @@ public class TaskAttemptListenerImpl extends CompositeService
     // registration. Events are ordered at TaskAttempt, so unregistration will
     // registration. Events are ordered at TaskAttempt, so unregistration will
     // always come after registration.
     // always come after registration.
 
 
-    // remove the mapping if not already removed
+    // Remove from launchedJVMs before jvmIDToActiveAttemptMap to avoid
+    // synchronization issue with getTask(). getTask should be checking
+    // jvmIDToActiveAttemptMap before it checks launchedJVMs.
+ 
+    // remove the mappings if not already removed
+    launchedJVMs.remove(jvmID);
     jvmIDToActiveAttemptMap.remove(jvmID);
     jvmIDToActiveAttemptMap.remove(jvmID);
 
 
     //unregister this attempt
     //unregister this attempt

+ 222 - 37
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java

@@ -20,9 +20,12 @@ package org.apache.hadoop.mapreduce.jobhistory;
 
 
 import java.io.IOException;
 import java.io.IOException;
 import java.util.Collections;
 import java.util.Collections;
+import java.util.EnumSet;
 import java.util.HashMap;
 import java.util.HashMap;
 import java.util.Iterator;
 import java.util.Iterator;
 import java.util.Map;
 import java.util.Map;
+import java.util.Timer;
+import java.util.TimerTask;
 import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.LinkedBlockingQueue;
 import java.util.concurrent.LinkedBlockingQueue;
 
 
@@ -36,10 +39,11 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.mapreduce.Counter;
+import org.apache.hadoop.mapreduce.Counters;
 import org.apache.hadoop.mapreduce.JobCounter;
 import org.apache.hadoop.mapreduce.JobCounter;
 import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.TaskType;
 import org.apache.hadoop.mapreduce.TaskType;
-import org.apache.hadoop.mapreduce.v2.api.records.Counter;
 import org.apache.hadoop.mapreduce.v2.api.records.JobId;
 import org.apache.hadoop.mapreduce.v2.api.records.JobId;
 import org.apache.hadoop.mapreduce.v2.api.records.JobState;
 import org.apache.hadoop.mapreduce.v2.api.records.JobState;
 import org.apache.hadoop.mapreduce.v2.app.AppContext;
 import org.apache.hadoop.mapreduce.v2.app.AppContext;
@@ -63,17 +67,26 @@ public class JobHistoryEventHandler extends AbstractService
   private final AppContext context;
   private final AppContext context;
   private final int startCount;
   private final int startCount;
 
 
+  private int eventCounter;
+
   //TODO Does the FS object need to be different ? 
   //TODO Does the FS object need to be different ? 
   private FileSystem stagingDirFS; // log Dir FileSystem
   private FileSystem stagingDirFS; // log Dir FileSystem
   private FileSystem doneDirFS; // done Dir FileSystem
   private FileSystem doneDirFS; // done Dir FileSystem
 
 
-  private Configuration conf;
 
 
   private Path stagingDirPath = null;
   private Path stagingDirPath = null;
   private Path doneDirPrefixPath = null; // folder for completed jobs
   private Path doneDirPrefixPath = null; // folder for completed jobs
 
 
+  private int maxUnflushedCompletionEvents;
+  private int postJobCompletionMultiplier;
+  private long flushTimeout;
+  private int minQueueSizeForBatchingFlushes; // TODO: Rename
+
+  private int numUnflushedCompletionEvents = 0;
+  private boolean isTimerActive;
+
 
 
-  private BlockingQueue<JobHistoryEvent> eventQueue =
+  protected BlockingQueue<JobHistoryEvent> eventQueue =
     new LinkedBlockingQueue<JobHistoryEvent>();
     new LinkedBlockingQueue<JobHistoryEvent>();
   protected Thread eventHandlingThread;
   protected Thread eventHandlingThread;
   private volatile boolean stopped;
   private volatile boolean stopped;
@@ -100,8 +113,6 @@ public class JobHistoryEventHandler extends AbstractService
   @Override
   @Override
   public void init(Configuration conf) {
   public void init(Configuration conf) {
 
 
-    this.conf = conf;
-
     String stagingDirStr = null;
     String stagingDirStr = null;
     String doneDirStr = null;
     String doneDirStr = null;
     String userDoneDirStr = null;
     String userDoneDirStr = null;
@@ -181,6 +192,27 @@ public class JobHistoryEventHandler extends AbstractService
       throw new YarnException(e);
       throw new YarnException(e);
     }
     }
 
 
+    // Maximum number of unflushed completion-events that can stay in the queue
+    // before flush kicks in.
+    maxUnflushedCompletionEvents =
+        conf.getInt(MRJobConfig.MR_AM_HISTORY_MAX_UNFLUSHED_COMPLETE_EVENTS,
+            MRJobConfig.DEFAULT_MR_AM_HISTORY_MAX_UNFLUSHED_COMPLETE_EVENTS);
+    // We want to cut down flushes after job completes so as to write quicker,
+    // so we increase maxUnflushedEvents post Job completion by using the
+    // following multiplier.
+    postJobCompletionMultiplier =
+        conf.getInt(
+            MRJobConfig.MR_AM_HISTORY_JOB_COMPLETE_UNFLUSHED_MULTIPLIER,
+            MRJobConfig.DEFAULT_MR_AM_HISTORY_JOB_COMPLETE_UNFLUSHED_MULTIPLIER);
+    // Max time until which flush doesn't take place.
+    flushTimeout =
+        conf.getLong(MRJobConfig.MR_AM_HISTORY_COMPLETE_EVENT_FLUSH_TIMEOUT_MS,
+            MRJobConfig.DEFAULT_MR_AM_HISTORY_COMPLETE_EVENT_FLUSH_TIMEOUT_MS);
+    minQueueSizeForBatchingFlushes =
+        conf.getInt(
+            MRJobConfig.MR_AM_HISTORY_USE_BATCHED_FLUSH_QUEUE_SIZE_THRESHOLD,
+            MRJobConfig.DEFAULT_MR_AM_HISTORY_USE_BATCHED_FLUSH_QUEUE_SIZE_THRESHOLD);
+    
     super.init(conf);
     super.init(conf);
   }
   }
 
 
@@ -210,6 +242,16 @@ public class JobHistoryEventHandler extends AbstractService
       public void run() {
       public void run() {
         JobHistoryEvent event = null;
         JobHistoryEvent event = null;
         while (!stopped && !Thread.currentThread().isInterrupted()) {
         while (!stopped && !Thread.currentThread().isInterrupted()) {
+
+          // Log the size of the history-event-queue every so often.
+          if (eventCounter % 1000 == 0) {
+            eventCounter = 0;
+            LOG.info("Size of the JobHistory event queue is "
+                + eventQueue.size());
+          } else {
+            eventCounter++;
+          }
+
           try {
           try {
             event = eventQueue.take();
             event = eventQueue.take();
           } catch (InterruptedException e) {
           } catch (InterruptedException e) {
@@ -238,18 +280,33 @@ public class JobHistoryEventHandler extends AbstractService
 
 
   @Override
   @Override
   public void stop() {
   public void stop() {
-    LOG.info("Stopping JobHistoryEventHandler");
+    LOG.info("Stopping JobHistoryEventHandler. "
+        + "Size of the outstanding queue size is " + eventQueue.size());
     stopped = true;
     stopped = true;
     //do not interrupt while event handling is in progress
     //do not interrupt while event handling is in progress
     synchronized(lock) {
     synchronized(lock) {
-      eventHandlingThread.interrupt();
+      if (eventHandlingThread != null)
+        eventHandlingThread.interrupt();
     }
     }
 
 
     try {
     try {
-      eventHandlingThread.join();
+      if (eventHandlingThread != null)
+        eventHandlingThread.join();
     } catch (InterruptedException ie) {
     } catch (InterruptedException ie) {
       LOG.info("Interruped Exception while stopping", ie);
       LOG.info("Interruped Exception while stopping", ie);
     }
     }
+
+    // Cancel all timers - so that they aren't invoked during or after
+    // the metaInfo object is wrapped up.
+    for (MetaInfo mi : fileMap.values()) {
+      try {
+        mi.shutDownTimer();
+      } catch (IOException e) {
+        LOG.info("Exception while cancelling delayed flush timer. "
+            + "Likely caused by a failed flush " + e.getMessage());
+      }
+    }
+
     //write all the events remaining in queue
     //write all the events remaining in queue
     Iterator<JobHistoryEvent> it = eventQueue.iterator();
     Iterator<JobHistoryEvent> it = eventQueue.iterator();
     while(it.hasNext()) {
     while(it.hasNext()) {
@@ -270,6 +327,12 @@ public class JobHistoryEventHandler extends AbstractService
     super.stop();
     super.stop();
   }
   }
 
 
+  protected EventWriter createEventWriter(Path historyFilePath)
+      throws IOException {
+    FSDataOutputStream out = stagingDirFS.create(historyFilePath, true);
+    return new EventWriter(out);
+  }
+  
   /**
   /**
    * Create an event writer for the Job represented by the jobID.
    * Create an event writer for the Job represented by the jobID.
    * Writes out the job configuration to the log directory.
    * Writes out the job configuration to the log directory.
@@ -305,8 +368,7 @@ public class JobHistoryEventHandler extends AbstractService
         JobHistoryUtils.getStagingConfFile(stagingDirPath, jobId, startCount);
         JobHistoryUtils.getStagingConfFile(stagingDirPath, jobId, startCount);
     if (writer == null) {
     if (writer == null) {
       try {
       try {
-        FSDataOutputStream out = stagingDirFS.create(historyFile, true);
-        writer = new EventWriter(out);
+        writer = createEventWriter(historyFile);
         LOG.info("Event Writer setup for JobId: " + jobId + ", File: "
         LOG.info("Event Writer setup for JobId: " + jobId + ", File: "
             + historyFile);
             + historyFile);
       } catch (IOException ioe) {
       } catch (IOException ioe) {
@@ -357,12 +419,26 @@ public class JobHistoryEventHandler extends AbstractService
   @Override
   @Override
   public void handle(JobHistoryEvent event) {
   public void handle(JobHistoryEvent event) {
     try {
     try {
+      if (isJobCompletionEvent(event.getHistoryEvent())) {
+        // When the job is complete, flush slower but write faster.
+        maxUnflushedCompletionEvents =
+            maxUnflushedCompletionEvents * postJobCompletionMultiplier;
+      }
+
       eventQueue.put(event);
       eventQueue.put(event);
     } catch (InterruptedException e) {
     } catch (InterruptedException e) {
       throw new YarnException(e);
       throw new YarnException(e);
     }
     }
   }
   }
 
 
+  private boolean isJobCompletionEvent(HistoryEvent historyEvent) {
+    if (EnumSet.of(EventType.JOB_FINISHED, EventType.JOB_FAILED,
+        EventType.JOB_KILLED).contains(historyEvent.getEventType())) {
+      return true;
+    }
+    return false;
+  }
+
   protected void handleEvent(JobHistoryEvent event) {
   protected void handleEvent(JobHistoryEvent event) {
     synchronized (lock) {
     synchronized (lock) {
 
 
@@ -483,7 +559,7 @@ public class JobHistoryEventHandler extends AbstractService
                 .toString());
                 .toString());
       // TODO JOB_FINISHED does not have state. Effectively job history does not
       // TODO JOB_FINISHED does not have state. Effectively job history does not
       // have state about the finished job.
       // have state about the finished job.
-      setSummarySlotSeconds(summary, jobId);
+      setSummarySlotSeconds(summary, jfe.getTotalCounters());
       break;
       break;
     case JOB_FAILED:
     case JOB_FAILED:
     case JOB_KILLED:
     case JOB_KILLED:
@@ -492,21 +568,21 @@ public class JobHistoryEventHandler extends AbstractService
       summary.setNumFinishedMaps(context.getJob(jobId).getTotalMaps());
       summary.setNumFinishedMaps(context.getJob(jobId).getTotalMaps());
       summary.setNumFinishedReduces(context.getJob(jobId).getTotalReduces());
       summary.setNumFinishedReduces(context.getJob(jobId).getTotalReduces());
       summary.setJobFinishTime(juce.getFinishTime());
       summary.setJobFinishTime(juce.getFinishTime());
-      setSummarySlotSeconds(summary, jobId);
+      setSummarySlotSeconds(summary, context.getJob(jobId).getAllCounters());
       break;
       break;
     }
     }
   }
   }
 
 
-  private void setSummarySlotSeconds(JobSummary summary, JobId jobId) {
-    Counter slotMillisMapCounter =
-        context.getJob(jobId).getCounters()
-            .getCounter(JobCounter.SLOTS_MILLIS_MAPS);
+  private void setSummarySlotSeconds(JobSummary summary, Counters allCounters) {
+
+    Counter slotMillisMapCounter = allCounters
+      .findCounter(JobCounter.SLOTS_MILLIS_MAPS);
     if (slotMillisMapCounter != null) {
     if (slotMillisMapCounter != null) {
       summary.setMapSlotSeconds(slotMillisMapCounter.getValue());
       summary.setMapSlotSeconds(slotMillisMapCounter.getValue());
     }
     }
-    Counter slotMillisReduceCounter =
-        context.getJob(jobId).getCounters()
-            .getCounter(JobCounter.SLOTS_MILLIS_REDUCES);
+
+    Counter slotMillisReduceCounter = allCounters
+      .findCounter(JobCounter.SLOTS_MILLIS_REDUCES);
     if (slotMillisReduceCounter != null) {
     if (slotMillisReduceCounter != null) {
       summary.setMapSlotSeconds(slotMillisReduceCounter.getValue());
       summary.setMapSlotSeconds(slotMillisReduceCounter.getValue());
     }
     }
@@ -601,50 +677,159 @@ public class JobHistoryEventHandler extends AbstractService
     }
     }
   }
   }
 
 
+  private class FlushTimerTask extends TimerTask {
+    private MetaInfo metaInfo;
+    private IOException ioe = null;
+    private volatile boolean shouldRun = true;
+
+    FlushTimerTask(MetaInfo metaInfo) {
+      this.metaInfo = metaInfo;
+    }
+
+    @Override
+    public void run() {
+      synchronized (lock) {
+        try {
+          if (!metaInfo.isTimerShutDown() && shouldRun)
+            metaInfo.flush();
+        } catch (IOException e) {
+          ioe = e;
+        }
+      }
+    }
+
+    public IOException getException() {
+      return ioe;
+    }
+
+    public void stop() {
+      shouldRun = false;
+      this.cancel();
+    }
+  }
+
   private class MetaInfo {
   private class MetaInfo {
     private Path historyFile;
     private Path historyFile;
     private Path confFile;
     private Path confFile;
     private EventWriter writer;
     private EventWriter writer;
     JobIndexInfo jobIndexInfo;
     JobIndexInfo jobIndexInfo;
     JobSummary jobSummary;
     JobSummary jobSummary;
+    Timer flushTimer; 
+    FlushTimerTask flushTimerTask;
+    private boolean isTimerShutDown = false;
 
 
-    MetaInfo(Path historyFile, Path conf, EventWriter writer, 
-             String user, String jobName, JobId jobId) {
+    MetaInfo(Path historyFile, Path conf, EventWriter writer, String user,
+        String jobName, JobId jobId) {
       this.historyFile = historyFile;
       this.historyFile = historyFile;
       this.confFile = conf;
       this.confFile = conf;
       this.writer = writer;
       this.writer = writer;
-      this.jobIndexInfo = new JobIndexInfo(-1, -1, user, jobName, jobId, -1, -1,
-          null);
+      this.jobIndexInfo =
+          new JobIndexInfo(-1, -1, user, jobName, jobId, -1, -1, null);
       this.jobSummary = new JobSummary();
       this.jobSummary = new JobSummary();
+      this.flushTimer = new Timer("FlushTimer", true);
     }
     }
 
 
-    Path getHistoryFile() { return historyFile; }
+    Path getHistoryFile() {
+      return historyFile;
+    }
 
 
-    Path getConfFile() {return confFile; } 
+    Path getConfFile() {
+      return confFile;
+    }
 
 
-    JobIndexInfo getJobIndexInfo() { return jobIndexInfo; }
+    JobIndexInfo getJobIndexInfo() {
+      return jobIndexInfo;
+    }
 
 
-    JobSummary getJobSummary() { return jobSummary; }
+    JobSummary getJobSummary() {
+      return jobSummary;
+    }
 
 
-    boolean isWriterActive() {return writer != null ; }
+    boolean isWriterActive() {
+      return writer != null;
+    }
+    
+    boolean isTimerShutDown() {
+      return isTimerShutDown;
+    }
 
 
     void closeWriter() throws IOException {
     void closeWriter() throws IOException {
       synchronized (lock) {
       synchronized (lock) {
-      if (writer != null) {
-        writer.close();
+        if (writer != null) {
+          writer.close();
+        }
+        writer = null;
       }
       }
-      writer = null;
-    }
     }
     }
 
 
     void writeEvent(HistoryEvent event) throws IOException {
     void writeEvent(HistoryEvent event) throws IOException {
       synchronized (lock) {
       synchronized (lock) {
-      if (writer != null) {
-        writer.write(event);
-        writer.flush();
+        if (writer != null) {
+          writer.write(event);
+          processEventForFlush(event);
+          maybeFlush(event);
+        }
+      }
+    }
+
+    void processEventForFlush(HistoryEvent historyEvent) throws IOException {
+      if (EnumSet.of(EventType.MAP_ATTEMPT_FINISHED,
+          EventType.MAP_ATTEMPT_FAILED, EventType.MAP_ATTEMPT_KILLED,
+          EventType.REDUCE_ATTEMPT_FINISHED, EventType.REDUCE_ATTEMPT_FAILED,
+          EventType.REDUCE_ATTEMPT_KILLED, EventType.TASK_FINISHED,
+          EventType.TASK_FAILED, EventType.JOB_FINISHED, EventType.JOB_FAILED,
+          EventType.JOB_KILLED).contains(historyEvent.getEventType())) {
+        numUnflushedCompletionEvents++;
+        if (!isTimerActive) {
+          resetFlushTimer();
+          if (!isTimerShutDown) {
+            flushTimerTask = new FlushTimerTask(this);
+            flushTimer.schedule(flushTimerTask, flushTimeout);
+          }
+        }
+      }
+    }
+
+    void resetFlushTimer() throws IOException {
+      if (flushTimerTask != null) {
+        IOException exception = flushTimerTask.getException();
+        flushTimerTask.stop();
+        if (exception != null) {
+          throw exception;
+        }
+        flushTimerTask = null;
+      }
+      isTimerActive = false;
+    }
+
+    void maybeFlush(HistoryEvent historyEvent) throws IOException {
+      if ((eventQueue.size() < minQueueSizeForBatchingFlushes 
+          && numUnflushedCompletionEvents > 0)
+          || numUnflushedCompletionEvents >= maxUnflushedCompletionEvents 
+          || isJobCompletionEvent(historyEvent)) {
+        this.flush();
+      }
+    }
+
+    void flush() throws IOException {
+      synchronized (lock) {
+        if (numUnflushedCompletionEvents != 0) { // skipped timer cancel.
+          writer.flush();
+          numUnflushedCompletionEvents = 0;
+          resetFlushTimer();
+        }
+      }
+    }
+
+    void shutDownTimer() throws IOException {
+      synchronized (lock) {
+        isTimerShutDown = true;
+        flushTimer.cancel();
+        if (flushTimerTask != null && flushTimerTask.getException() != null) {
+          throw flushTimerTask.getException();
+        }
       }
       }
     }
     }
-  }
   }
   }
 
 
   private void moveTmpToDone(Path tmpPath) throws IOException {
   private void moveTmpToDone(Path tmpPath) throws IOException {
@@ -668,7 +853,7 @@ public class JobHistoryEventHandler extends AbstractService
         doneDirFS.delete(toPath, true);
         doneDirFS.delete(toPath, true);
       }
       }
       boolean copied = FileUtil.copy(stagingDirFS, fromPath, doneDirFS, toPath,
       boolean copied = FileUtil.copy(stagingDirFS, fromPath, doneDirFS, toPath,
-          false, conf);
+          false, getConfig());
 
 
       if (copied)
       if (copied)
         LOG.info("Copied to done location: " + toPath);
         LOG.info("Copied to done location: " + toPath);

+ 33 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/JobEndNotifier.java

@@ -20,9 +20,11 @@ package org.apache.hadoop.mapreduce.v2.app;
 
 
 import java.io.IOException;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.InputStream;
+import java.net.InetSocketAddress;
 import java.net.MalformedURLException;
 import java.net.MalformedURLException;
 import java.net.URL;
 import java.net.URL;
 import java.net.URLConnection;
 import java.net.URLConnection;
+import java.net.Proxy;
 
 
 import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
@@ -49,9 +51,11 @@ public class JobEndNotifier implements Configurable {
 
 
   private Configuration conf;
   private Configuration conf;
   protected String userUrl;
   protected String userUrl;
+  protected String proxyConf;
   protected int numTries; //Number of tries to attempt notification
   protected int numTries; //Number of tries to attempt notification
   protected int waitInterval; //Time to wait between retrying notification
   protected int waitInterval; //Time to wait between retrying notification
   protected URL urlToNotify; //URL to notify read from the config
   protected URL urlToNotify; //URL to notify read from the config
+  protected Proxy proxyToUse = Proxy.NO_PROXY; //Proxy to use for notification
 
 
   /**
   /**
    * Parse the URL that needs to be notified of the end of the job, along
    * Parse the URL that needs to be notified of the end of the job, along
@@ -73,6 +77,34 @@ public class JobEndNotifier implements Configurable {
     waitInterval = (waitInterval < 0) ? 5 : waitInterval;
     waitInterval = (waitInterval < 0) ? 5 : waitInterval;
 
 
     userUrl = conf.get(MRJobConfig.MR_JOB_END_NOTIFICATION_URL);
     userUrl = conf.get(MRJobConfig.MR_JOB_END_NOTIFICATION_URL);
+
+    proxyConf = conf.get(MRJobConfig.MR_JOB_END_NOTIFICATION_PROXY);
+
+    //Configure the proxy to use if its set. It should be set like
+    //proxyType@proxyHostname:port
+    if(proxyConf != null && !proxyConf.equals("") &&
+         proxyConf.lastIndexOf(":") != -1) {
+      int typeIndex = proxyConf.indexOf("@");
+      Proxy.Type proxyType = Proxy.Type.HTTP;
+      if(typeIndex != -1 &&
+        proxyConf.substring(0, typeIndex).compareToIgnoreCase("socks") == 0) {
+        proxyType = Proxy.Type.SOCKS;
+      }
+      String hostname = proxyConf.substring(typeIndex + 1,
+        proxyConf.lastIndexOf(":"));
+      String portConf = proxyConf.substring(proxyConf.lastIndexOf(":") + 1);
+      try {
+        int port = Integer.parseInt(portConf);
+        proxyToUse = new Proxy(proxyType,
+          new InetSocketAddress(hostname, port));
+        Log.info("Job end notification using proxy type \"" + proxyType + 
+        "\" hostname \"" + hostname + "\" and port \"" + port + "\"");
+      } catch(NumberFormatException nfe) {
+        Log.warn("Job end notification couldn't parse configured proxy's port "
+          + portConf + ". Not going to use a proxy");
+      }
+    }
+
   }
   }
 
 
   public Configuration getConf() {
   public Configuration getConf() {
@@ -87,7 +119,7 @@ public class JobEndNotifier implements Configurable {
     boolean success = false;
     boolean success = false;
     try {
     try {
       Log.info("Job end notification trying " + urlToNotify);
       Log.info("Job end notification trying " + urlToNotify);
-      URLConnection conn = urlToNotify.openConnection();
+      URLConnection conn = urlToNotify.openConnection(proxyToUse);
       conn.setConnectTimeout(5*1000);
       conn.setConnectTimeout(5*1000);
       conn.setReadTimeout(5*1000);
       conn.setReadTimeout(5*1000);
       conn.setAllowUserInteraction(false);
       conn.setAllowUserInteraction(false);

+ 26 - 4
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java

@@ -258,7 +258,7 @@ public class MRAppMaster extends CompositeService {
     dispatcher.register(TaskAttemptEventType.class, 
     dispatcher.register(TaskAttemptEventType.class, 
         new TaskAttemptEventDispatcher());
         new TaskAttemptEventDispatcher());
     dispatcher.register(TaskCleaner.EventType.class, taskCleaner);
     dispatcher.register(TaskCleaner.EventType.class, taskCleaner);
-    
+   
     if (conf.getBoolean(MRJobConfig.MAP_SPECULATIVE, false)
     if (conf.getBoolean(MRJobConfig.MAP_SPECULATIVE, false)
         || conf.getBoolean(MRJobConfig.REDUCE_SPECULATIVE, false)) {
         || conf.getBoolean(MRJobConfig.REDUCE_SPECULATIVE, false)) {
       //optional service to speculate on task attempts' progress
       //optional service to speculate on task attempts' progress
@@ -881,9 +881,31 @@ public class MRAppMaster extends CompositeService {
     }
     }
     @Override
     @Override
     public void handle(SpeculatorEvent event) {
     public void handle(SpeculatorEvent event) {
-      if (!disabled && 
-          (conf.getBoolean(MRJobConfig.MAP_SPECULATIVE, false)
-          || conf.getBoolean(MRJobConfig.REDUCE_SPECULATIVE, false))) {
+      if (disabled) {
+        return;
+      }
+
+      TaskId tId = event.getTaskID();
+      TaskType tType = null;
+      /* event's TaskId will be null if the event type is JOB_CREATE or
+       * ATTEMPT_STATUS_UPDATE
+       */
+      if (tId != null) {
+        tType = tId.getTaskType(); 
+      }
+      boolean shouldMapSpec =
+              conf.getBoolean(MRJobConfig.MAP_SPECULATIVE, false);
+      boolean shouldReduceSpec =
+              conf.getBoolean(MRJobConfig.REDUCE_SPECULATIVE, false);
+
+      /* The point of the following is to allow the MAP and REDUCE speculative
+       * config values to be independent:
+       * IF spec-exec is turned on for maps AND the task is a map task
+       * OR IF spec-exec is turned on for reduces AND the task is a reduce task
+       * THEN call the speculator to handle the event.
+       */
+      if ( (shouldMapSpec && (tType == null || tType == TaskType.MAP))
+        || (shouldReduceSpec && (tType == null || tType == TaskType.REDUCE))) {
         // Speculator IS enabled, direct the event to there.
         // Speculator IS enabled, direct the event to there.
         speculator.handle(event);
         speculator.handle(event);
       }
       }

+ 2 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/TaskAttemptListener.java

@@ -45,8 +45,9 @@ public interface TaskAttemptListener {
    * 
    * 
    * @param attemptID
    * @param attemptID
    *          the id of the attempt for this JVM.
    *          the id of the attempt for this JVM.
+   * @param jvmID the ID of the JVM.
    */
    */
-  void registerLaunchedTask(TaskAttemptId attemptID);
+  void registerLaunchedTask(TaskAttemptId attemptID, WrappedJvmID jvmID);
 
 
   /**
   /**
    * Unregister the JVM and the attempt associated with it.  This should be 
    * Unregister the JVM and the attempt associated with it.  This should be 

+ 41 - 30
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/TaskHeartbeatHandler.java

@@ -18,13 +18,15 @@
 
 
 package org.apache.hadoop.mapreduce.v2.app;
 package org.apache.hadoop.mapreduce.v2.app;
 
 
-import java.util.HashMap;
 import java.util.Iterator;
 import java.util.Iterator;
 import java.util.Map;
 import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
 
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptDiagnosticsUpdateEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptDiagnosticsUpdateEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent;
@@ -40,6 +42,7 @@ import org.apache.hadoop.yarn.service.AbstractService;
  * not hear from it for a long time.
  * not hear from it for a long time.
  * 
  * 
  */
  */
+@SuppressWarnings({"unchecked", "rawtypes"})
 public class TaskHeartbeatHandler extends AbstractService {
 public class TaskHeartbeatHandler extends AbstractService {
 
 
   private static final Log LOG = LogFactory.getLog(TaskHeartbeatHandler.class);
   private static final Log LOG = LogFactory.getLog(TaskHeartbeatHandler.class);
@@ -48,24 +51,29 @@ public class TaskHeartbeatHandler extends AbstractService {
   //received from a task.
   //received from a task.
   private Thread lostTaskCheckerThread;
   private Thread lostTaskCheckerThread;
   private volatile boolean stopped;
   private volatile boolean stopped;
-  private int taskTimeOut = 5*60*1000;//5 mins
+  private int taskTimeOut = 5 * 60 * 1000;// 5 mins
+  private int taskTimeOutCheckInterval = 30 * 1000; // 30 seconds.
 
 
   private final EventHandler eventHandler;
   private final EventHandler eventHandler;
   private final Clock clock;
   private final Clock clock;
 
 
-  private Map<TaskAttemptId, Long> runningAttempts 
-    = new HashMap<TaskAttemptId, Long>();
+  private ConcurrentMap<TaskAttemptId, Long> runningAttempts;
 
 
-  public TaskHeartbeatHandler(EventHandler eventHandler, Clock clock) {
+  public TaskHeartbeatHandler(EventHandler eventHandler, Clock clock,
+      int numThreads) {
     super("TaskHeartbeatHandler");
     super("TaskHeartbeatHandler");
     this.eventHandler = eventHandler;
     this.eventHandler = eventHandler;
     this.clock = clock;
     this.clock = clock;
+    runningAttempts =
+      new ConcurrentHashMap<TaskAttemptId, Long>(16, 0.75f, numThreads);
   }
   }
 
 
   @Override
   @Override
   public void init(Configuration conf) {
   public void init(Configuration conf) {
-   super.init(conf);
-   taskTimeOut = conf.getInt("mapreduce.task.timeout", 5*60*1000);
+    super.init(conf);
+    taskTimeOut = conf.getInt(MRJobConfig.TASK_TIMEOUT, 5 * 60 * 1000);
+    taskTimeOutCheckInterval =
+        conf.getInt(MRJobConfig.TASK_TIMEOUT_CHECK_INTERVAL_MS, 30 * 1000);
   }
   }
 
 
   @Override
   @Override
@@ -83,18 +91,17 @@ public class TaskHeartbeatHandler extends AbstractService {
     super.stop();
     super.stop();
   }
   }
 
 
-  public synchronized void receivedPing(TaskAttemptId attemptID) {
-    //only put for the registered attempts
-    if (runningAttempts.containsKey(attemptID)) {
-      runningAttempts.put(attemptID, clock.getTime());
-    }
+  public void receivedPing(TaskAttemptId attemptID) {
+  //only put for the registered attempts
+    //TODO throw an exception if the task isn't registered.
+    runningAttempts.replace(attemptID, clock.getTime());
   }
   }
 
 
-  public synchronized void register(TaskAttemptId attemptID) {
+  public void register(TaskAttemptId attemptID) {
     runningAttempts.put(attemptID, clock.getTime());
     runningAttempts.put(attemptID, clock.getTime());
   }
   }
 
 
-  public synchronized void unregister(TaskAttemptId attemptID) {
+  public void unregister(TaskAttemptId attemptID) {
     runningAttempts.remove(attemptID);
     runningAttempts.remove(attemptID);
   }
   }
 
 
@@ -103,36 +110,40 @@ public class TaskHeartbeatHandler extends AbstractService {
     @Override
     @Override
     public void run() {
     public void run() {
       while (!stopped && !Thread.currentThread().isInterrupted()) {
       while (!stopped && !Thread.currentThread().isInterrupted()) {
-        synchronized (TaskHeartbeatHandler.this) {
-          Iterator<Map.Entry<TaskAttemptId, Long>> iterator = 
+        Iterator<Map.Entry<TaskAttemptId, Long>> iterator =
             runningAttempts.entrySet().iterator();
             runningAttempts.entrySet().iterator();
 
 
-          //avoid calculating current time everytime in loop
-          long currentTime = clock.getTime();
+        // avoid calculating current time everytime in loop
+        long currentTime = clock.getTime();
+
+        while (iterator.hasNext()) {
+          Map.Entry<TaskAttemptId, Long> entry = iterator.next();
+          if (currentTime > entry.getValue() + taskTimeOut) {
 
 
-          while (iterator.hasNext()) {
-            Map.Entry<TaskAttemptId, Long> entry = iterator.next();
-            if (currentTime > entry.getValue() + taskTimeOut) {
-              //task is lost, remove from the list and raise lost event
+            //In case the iterator isn't picking up the latest.
+            // Extra lookup outside of the iterator - but only if the task
+            // is considered to be timed out.
+            Long taskTime = runningAttempts.get(entry.getKey());
+            if (taskTime != null && currentTime > taskTime + taskTimeOut) {
+              // task is lost, remove from the list and raise lost event
               iterator.remove();
               iterator.remove();
-              eventHandler.handle(
-                  new TaskAttemptDiagnosticsUpdateEvent(entry.getKey(),
-                      "AttemptID:" + entry.getKey().toString() + 
-                      " Timed out after " + taskTimeOut/1000 + " secs"));
-              eventHandler.handle(new TaskAttemptEvent(entry
-                  .getKey(), TaskAttemptEventType.TA_TIMED_OUT));
+              eventHandler.handle(new TaskAttemptDiagnosticsUpdateEvent(entry
+                  .getKey(), "AttemptID:" + entry.getKey().toString()
+                  + " Timed out after " + taskTimeOut / 1000 + " secs"));
+              eventHandler.handle(new TaskAttemptEvent(entry.getKey(),
+                  TaskAttemptEventType.TA_TIMED_OUT));
             }
             }
+
           }
           }
         }
         }
         try {
         try {
-          Thread.sleep(taskTimeOut);
+          Thread.sleep(taskTimeOutCheckInterval);
         } catch (InterruptedException e) {
         } catch (InterruptedException e) {
           LOG.info("TaskHeartbeatHandler thread interrupted");
           LOG.info("TaskHeartbeatHandler thread interrupted");
           break;
           break;
         }
         }
       }
       }
     }
     }
-    
   }
   }
 
 
 }
 }

+ 25 - 8
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java

@@ -31,11 +31,14 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.MRJobConfig;
+import org.apache.hadoop.mapreduce.TypeConverter;
 import org.apache.hadoop.mapreduce.v2.api.MRClientProtocol;
 import org.apache.hadoop.mapreduce.v2.api.MRClientProtocol;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptRequest;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptRequest;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptResponse;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptResponse;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersRequest;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersRequest;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersResponse;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDelegationTokenRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDelegationTokenResponse;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDiagnosticsRequest;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDiagnosticsRequest;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDiagnosticsResponse;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDiagnosticsResponse;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetJobReportRequest;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetJobReportRequest;
@@ -223,7 +226,7 @@ public class MRClientService extends AbstractService
       Job job = verifyAndGetJob(jobId, false);
       Job job = verifyAndGetJob(jobId, false);
       GetCountersResponse response =
       GetCountersResponse response =
         recordFactory.newRecordInstance(GetCountersResponse.class);
         recordFactory.newRecordInstance(GetCountersResponse.class);
-      response.setCounters(job.getCounters());
+      response.setCounters(TypeConverter.toYarn(job.getAllCounters()));
       return response;
       return response;
     }
     }
     
     
@@ -237,8 +240,7 @@ public class MRClientService extends AbstractService
       response.setJobReport(job.getReport());
       response.setJobReport(job.getReport());
       return response;
       return response;
     }
     }
-    
-    
+
     @Override
     @Override
     public GetTaskAttemptReportResponse getTaskAttemptReport(
     public GetTaskAttemptReportResponse getTaskAttemptReport(
         GetTaskAttemptReportRequest request) throws YarnRemoteException {
         GetTaskAttemptReportRequest request) throws YarnRemoteException {
@@ -356,6 +358,8 @@ public class MRClientService extends AbstractService
       return response;
       return response;
     }
     }
 
 
+    private final Object getTaskReportsLock = new Object();
+
     @Override
     @Override
     public GetTaskReportsResponse getTaskReports(
     public GetTaskReportsResponse getTaskReports(
         GetTaskReportsRequest request) throws YarnRemoteException {
         GetTaskReportsRequest request) throws YarnRemoteException {
@@ -366,13 +370,26 @@ public class MRClientService extends AbstractService
         recordFactory.newRecordInstance(GetTaskReportsResponse.class);
         recordFactory.newRecordInstance(GetTaskReportsResponse.class);
       
       
       Job job = verifyAndGetJob(jobId, false);
       Job job = verifyAndGetJob(jobId, false);
-      LOG.info("Getting task report for " + taskType + "   " + jobId);
       Collection<Task> tasks = job.getTasks(taskType).values();
       Collection<Task> tasks = job.getTasks(taskType).values();
-      LOG.info("Getting task report size " + tasks.size());
-      for (Task task : tasks) {
-        response.addTaskReport(task.getReport());
-	  }
+      LOG.info("Getting task report for " + taskType + "   " + jobId
+          + ". Report-size will be " + tasks.size());
+
+      // Take lock to allow only one call, otherwise heap will blow up because
+      // of counters in the report when there are multiple callers.
+      synchronized (getTaskReportsLock) {
+        for (Task task : tasks) {
+          response.addTaskReport(task.getReport());
+        }
+      }
+
       return response;
       return response;
     }
     }
+
+    @Override
+    public GetDelegationTokenResponse getDelegationToken(
+        GetDelegationTokenRequest request) throws YarnRemoteException {
+      throw RPCUtil.getRemoteException("MR AM not authorized to issue delegation" +
+      		" token");
+    }
   }
   }
 }
 }

+ 10 - 2
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/Job.java

@@ -22,9 +22,9 @@ import java.util.List;
 import java.util.Map;
 import java.util.Map;
 
 
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapreduce.Counters;
 import org.apache.hadoop.mapreduce.JobACL;
 import org.apache.hadoop.mapreduce.JobACL;
 import org.apache.hadoop.mapreduce.v2.api.records.AMInfo;
 import org.apache.hadoop.mapreduce.v2.api.records.AMInfo;
-import org.apache.hadoop.mapreduce.v2.api.records.Counters;
 import org.apache.hadoop.mapreduce.v2.api.records.JobId;
 import org.apache.hadoop.mapreduce.v2.api.records.JobId;
 import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
 import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
 import org.apache.hadoop.mapreduce.v2.api.records.JobState;
 import org.apache.hadoop.mapreduce.v2.api.records.JobState;
@@ -44,7 +44,15 @@ public interface Job {
   String getName();
   String getName();
   JobState getState();
   JobState getState();
   JobReport getReport();
   JobReport getReport();
-  Counters getCounters();
+
+  /**
+   * Get all the counters of this job. This includes job-counters aggregated
+   * together with the counters of each task. This creates a clone of the
+   * Counters, so use this judiciously.  
+   * @return job-counters and aggregate task-counters
+   */
+  Counters getAllCounters();
+
   Map<TaskId,Task> getTasks();
   Map<TaskId,Task> getTasks();
   Map<TaskId,Task> getTasks(TaskType taskType);
   Map<TaskId,Task> getTasks(TaskType taskType);
   Task getTask(TaskId taskID);
   Task getTask(TaskId taskID);

+ 1 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/Task.java

@@ -20,7 +20,7 @@ package org.apache.hadoop.mapreduce.v2.app.job;
 
 
 import java.util.Map;
 import java.util.Map;
 
 
-import org.apache.hadoop.mapreduce.v2.api.records.Counters;
+import org.apache.hadoop.mapreduce.Counters;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskReport;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskReport;

+ 1 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/TaskAttempt.java

@@ -20,7 +20,7 @@ package org.apache.hadoop.mapreduce.v2.app.job;
 
 
 import java.util.List;
 import java.util.List;
 
 
-import org.apache.hadoop.mapreduce.v2.api.records.Counters;
+import org.apache.hadoop.mapreduce.Counters;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptReport;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptReport;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;

+ 1 - 2
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskAttemptStatusUpdateEvent.java

@@ -20,12 +20,11 @@ package org.apache.hadoop.mapreduce.v2.app.job.event;
 
 
 import java.util.List;
 import java.util.List;
 
 
-import org.apache.hadoop.mapreduce.v2.api.records.Counters;
+import org.apache.hadoop.mapreduce.Counters;
 import org.apache.hadoop.mapreduce.v2.api.records.Phase;
 import org.apache.hadoop.mapreduce.v2.api.records.Phase;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
 
 
-
 public class TaskAttemptStatusUpdateEvent extends TaskAttemptEvent {
 public class TaskAttemptStatusUpdateEvent extends TaskAttemptEvent {
 
 
   private TaskAttemptStatus reportedTaskAttemptStatus;
   private TaskAttemptStatus reportedTaskAttemptStatus;

+ 29 - 76
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java

@@ -41,6 +41,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.mapred.JobACLsManager;
 import org.apache.hadoop.mapred.JobACLsManager;
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapreduce.Counters;
 import org.apache.hadoop.mapreduce.JobACL;
 import org.apache.hadoop.mapreduce.JobACL;
 import org.apache.hadoop.mapreduce.JobContext;
 import org.apache.hadoop.mapreduce.JobContext;
 import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.MRJobConfig;
@@ -61,9 +62,6 @@ import org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitMetaInfo;
 import org.apache.hadoop.mapreduce.split.SplitMetaInfoReader;
 import org.apache.hadoop.mapreduce.split.SplitMetaInfoReader;
 import org.apache.hadoop.mapreduce.task.JobContextImpl;
 import org.apache.hadoop.mapreduce.task.JobContextImpl;
 import org.apache.hadoop.mapreduce.v2.api.records.AMInfo;
 import org.apache.hadoop.mapreduce.v2.api.records.AMInfo;
-import org.apache.hadoop.mapreduce.v2.api.records.Counter;
-import org.apache.hadoop.mapreduce.v2.api.records.CounterGroup;
-import org.apache.hadoop.mapreduce.v2.api.records.Counters;
 import org.apache.hadoop.mapreduce.v2.api.records.JobId;
 import org.apache.hadoop.mapreduce.v2.api.records.JobId;
 import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
 import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
 import org.apache.hadoop.mapreduce.v2.api.records.JobState;
 import org.apache.hadoop.mapreduce.v2.api.records.JobState;
@@ -99,7 +97,6 @@ import org.apache.hadoop.yarn.Clock;
 import org.apache.hadoop.yarn.YarnException;
 import org.apache.hadoop.yarn.YarnException;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.event.EventHandler;
-import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.state.InvalidStateTransitonException;
 import org.apache.hadoop.yarn.state.InvalidStateTransitonException;
 import org.apache.hadoop.yarn.state.MultipleArcTransition;
 import org.apache.hadoop.yarn.state.MultipleArcTransition;
 import org.apache.hadoop.yarn.state.SingleArcTransition;
 import org.apache.hadoop.yarn.state.SingleArcTransition;
@@ -109,10 +106,13 @@ import org.apache.hadoop.yarn.state.StateMachineFactory;
 /** Implementation of Job interface. Maintains the state machines of Job.
 /** Implementation of Job interface. Maintains the state machines of Job.
  * The read and write calls use ReadWriteLock for concurrency.
  * The read and write calls use ReadWriteLock for concurrency.
  */
  */
-@SuppressWarnings({ "rawtypes", "deprecation" })
+@SuppressWarnings({ "rawtypes", "deprecation", "unchecked" })
 public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job, 
 public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job, 
   EventHandler<JobEvent> {
   EventHandler<JobEvent> {
 
 
+  private static final TaskAttemptCompletionEvent[]
+    EMPTY_TASK_ATTEMPT_COMPLETION_EVENTS = new TaskAttemptCompletionEvent[0];
+
   private static final Log LOG = LogFactory.getLog(JobImpl.class);
   private static final Log LOG = LogFactory.getLog(JobImpl.class);
 
 
   //The maximum fraction of fetch failures allowed for a map
   //The maximum fraction of fetch failures allowed for a map
@@ -152,7 +152,7 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
 
 
   private boolean lazyTasksCopyNeeded = false;
   private boolean lazyTasksCopyNeeded = false;
   volatile Map<TaskId, Task> tasks = new LinkedHashMap<TaskId, Task>();
   volatile Map<TaskId, Task> tasks = new LinkedHashMap<TaskId, Task>();
-  private Counters jobCounters = newCounters();
+  private Counters jobCounters = new Counters();
     // FIXME:  
     // FIXME:  
     //
     //
     // Can then replace task-level uber counters (MR-2424) with job-level ones
     // Can then replace task-level uber counters (MR-2424) with job-level ones
@@ -475,88 +475,29 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
   }
   }
 
 
   @Override
   @Override
-  public Counters getCounters() {
-    Counters counters = newCounters();
+  public Counters getAllCounters() {
+    Counters counters = new Counters();
     readLock.lock();
     readLock.lock();
     try {
     try {
-      incrAllCounters(counters, jobCounters);
+      counters.incrAllCounters(jobCounters);
       return incrTaskCounters(counters, tasks.values());
       return incrTaskCounters(counters, tasks.values());
     } finally {
     } finally {
       readLock.unlock();
       readLock.unlock();
     }
     }
   }
   }
 
 
-  private Counters getTypeCounters(Set<TaskId> taskIds) {
-    Counters counters = newCounters();
-    for (TaskId taskId : taskIds) {
-      Task task = tasks.get(taskId);
-      incrAllCounters(counters, task.getCounters());
-    }
-    return counters;
-  }
-
-  private Counters getMapCounters() {
-    readLock.lock();
-    try {
-      return getTypeCounters(mapTasks);
-    } finally {
-      readLock.unlock();
-    }
-  }
-  
-  private Counters getReduceCounters() {
-    readLock.lock();
-    try {
-      return getTypeCounters(reduceTasks);
-    } finally {
-      readLock.unlock();
-    }
-  }
-  
-  public static Counters newCounters() {
-    Counters counters = RecordFactoryProvider.getRecordFactory(null)
-        .newRecordInstance(Counters.class);
-    return counters;
-  }
-
-  public static Counters incrTaskCounters(Counters counters,
-                                          Collection<Task> tasks) {
+  public static Counters incrTaskCounters(
+      Counters counters, Collection<Task> tasks) {
     for (Task task : tasks) {
     for (Task task : tasks) {
-      incrAllCounters(counters, task.getCounters());
+      counters.incrAllCounters(task.getCounters());
     }
     }
     return counters;
     return counters;
   }
   }
 
 
-  public static void incrAllCounters(Counters counters, Counters other) {
-    if (other != null) {
-      for (CounterGroup otherGroup: other.getAllCounterGroups().values()) {
-        CounterGroup group = counters.getCounterGroup(otherGroup.getName());
-        if (group == null) {
-          group = RecordFactoryProvider.getRecordFactory(null)
-              .newRecordInstance(CounterGroup.class);
-          group.setName(otherGroup.getName());
-          counters.setCounterGroup(group.getName(), group);
-        }
-        group.setDisplayName(otherGroup.getDisplayName());
-        for (Counter otherCounter : otherGroup.getAllCounters().values()) {
-          Counter counter = group.getCounter(otherCounter.getName());
-          if (counter == null) {
-            counter = RecordFactoryProvider.getRecordFactory(null)
-                .newRecordInstance(Counter.class);
-            counter.setName(otherCounter.getName());
-            group.setCounter(counter.getName(), counter);
-          }
-          counter.setDisplayName(otherCounter.getDisplayName());
-          counter.setValue(counter.getValue() + otherCounter.getValue());
-        }
-      }
-    }
-  }
-
   @Override
   @Override
   public TaskAttemptCompletionEvent[] getTaskAttemptCompletionEvents(
   public TaskAttemptCompletionEvent[] getTaskAttemptCompletionEvents(
       int fromEventId, int maxEvents) {
       int fromEventId, int maxEvents) {
-    TaskAttemptCompletionEvent[] events = new TaskAttemptCompletionEvent[0];
+    TaskAttemptCompletionEvent[] events = EMPTY_TASK_ATTEMPT_COMPLETION_EVENTS;
     readLock.lock();
     readLock.lock();
     try {
     try {
       if (taskAttemptCompletionEvents.size() > fromEventId) {
       if (taskAttemptCompletionEvents.size() > fromEventId) {
@@ -1204,13 +1145,24 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
   // area. May need to create a new event type for this if JobFinished should 
   // area. May need to create a new event type for this if JobFinished should 
   // not be generated for KilledJobs, etc.
   // not be generated for KilledJobs, etc.
   private static JobFinishedEvent createJobFinishedEvent(JobImpl job) {
   private static JobFinishedEvent createJobFinishedEvent(JobImpl job) {
+
+    Counters mapCounters = new Counters();
+    Counters reduceCounters = new Counters();
+    for (Task t : job.tasks.values()) {
+      Counters counters = t.getCounters();
+      switch (t.getType()) {
+        case MAP:     mapCounters.incrAllCounters(counters);     break;
+        case REDUCE:  reduceCounters.incrAllCounters(counters);  break;
+      }
+    }
+
     JobFinishedEvent jfe = new JobFinishedEvent(
     JobFinishedEvent jfe = new JobFinishedEvent(
         job.oldJobId, job.finishTime,
         job.oldJobId, job.finishTime,
         job.succeededMapTaskCount, job.succeededReduceTaskCount,
         job.succeededMapTaskCount, job.succeededReduceTaskCount,
         job.failedMapTaskCount, job.failedReduceTaskCount,
         job.failedMapTaskCount, job.failedReduceTaskCount,
-        TypeConverter.fromYarn(job.getMapCounters()),
-        TypeConverter.fromYarn(job.getReduceCounters()),
-        TypeConverter.fromYarn(job.getCounters()));
+        mapCounters,
+        reduceCounters,
+        job.getAllCounters());
     return jfe;
     return jfe;
   }
   }
 
 
@@ -1450,7 +1402,8 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
       JobCounterUpdateEvent jce = (JobCounterUpdateEvent) event;
       JobCounterUpdateEvent jce = (JobCounterUpdateEvent) event;
       for (JobCounterUpdateEvent.CounterIncrementalUpdate ci : jce
       for (JobCounterUpdateEvent.CounterIncrementalUpdate ci : jce
           .getCounterUpdates()) {
           .getCounterUpdates()) {
-        job.jobCounters.incrCounter(ci.getCounterKey(), ci.getIncrementValue());
+        job.jobCounters.findCounter(ci.getCounterKey()).increment(
+          ci.getIncrementValue());
       }
       }
     }
     }
   }
   }

+ 16 - 16
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java

@@ -47,6 +47,8 @@ import org.apache.hadoop.mapred.Task;
 import org.apache.hadoop.mapred.TaskAttemptContextImpl;
 import org.apache.hadoop.mapred.TaskAttemptContextImpl;
 import org.apache.hadoop.mapred.WrappedJvmID;
 import org.apache.hadoop.mapred.WrappedJvmID;
 import org.apache.hadoop.mapred.WrappedProgressSplitsBlock;
 import org.apache.hadoop.mapred.WrappedProgressSplitsBlock;
+import org.apache.hadoop.mapreduce.Counter;
+import org.apache.hadoop.mapreduce.Counters;
 import org.apache.hadoop.mapreduce.JobCounter;
 import org.apache.hadoop.mapreduce.JobCounter;
 import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.OutputCommitter;
 import org.apache.hadoop.mapreduce.OutputCommitter;
@@ -60,8 +62,6 @@ import org.apache.hadoop.mapreduce.jobhistory.TaskAttemptStartedEvent;
 import org.apache.hadoop.mapreduce.jobhistory.TaskAttemptUnsuccessfulCompletionEvent;
 import org.apache.hadoop.mapreduce.jobhistory.TaskAttemptUnsuccessfulCompletionEvent;
 import org.apache.hadoop.mapreduce.security.TokenCache;
 import org.apache.hadoop.mapreduce.security.TokenCache;
 import org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier;
 import org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier;
-import org.apache.hadoop.mapreduce.v2.api.records.Counter;
-import org.apache.hadoop.mapreduce.v2.api.records.Counters;
 import org.apache.hadoop.mapreduce.v2.api.records.Phase;
 import org.apache.hadoop.mapreduce.v2.api.records.Phase;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptReport;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptReport;
@@ -132,6 +132,7 @@ public abstract class TaskAttemptImpl implements
     org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt,
     org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt,
       EventHandler<TaskAttemptEvent> {
       EventHandler<TaskAttemptEvent> {
 
 
+  static final Counters EMPTY_COUNTERS = new Counters();
   private static final Log LOG = LogFactory.getLog(TaskAttemptImpl.class);
   private static final Log LOG = LogFactory.getLog(TaskAttemptImpl.class);
   private static final long MEMORY_SPLITS_RESOLUTION = 1024; //TODO Make configurable?
   private static final long MEMORY_SPLITS_RESOLUTION = 1024; //TODO Make configurable?
   private static final int MAP_MEMORY_MB_DEFAULT = 1024;
   private static final int MAP_MEMORY_MB_DEFAULT = 1024;
@@ -846,7 +847,7 @@ public abstract class TaskAttemptImpl implements
       result.setDiagnosticInfo(StringUtils.join(LINE_SEPARATOR, getDiagnostics()));
       result.setDiagnosticInfo(StringUtils.join(LINE_SEPARATOR, getDiagnostics()));
       result.setPhase(reportedStatus.phase);
       result.setPhase(reportedStatus.phase);
       result.setStateString(reportedStatus.stateString);
       result.setStateString(reportedStatus.stateString);
-      result.setCounters(getCounters());
+      result.setCounters(TypeConverter.toYarn(getCounters()));
       result.setContainerId(this.getAssignedContainerID());
       result.setContainerId(this.getAssignedContainerID());
       result.setNodeManagerHost(trackerName);
       result.setNodeManagerHost(trackerName);
       result.setNodeManagerHttpPort(httpPort);
       result.setNodeManagerHttpPort(httpPort);
@@ -877,7 +878,7 @@ public abstract class TaskAttemptImpl implements
     try {
     try {
       Counters counters = reportedStatus.counters;
       Counters counters = reportedStatus.counters;
       if (counters == null) {
       if (counters == null) {
-        counters = recordFactory.newRecordInstance(Counters.class);
+        counters = EMPTY_COUNTERS;
 //        counters.groups = new HashMap<String, CounterGroup>();
 //        counters.groups = new HashMap<String, CounterGroup>();
       }
       }
       return counters;
       return counters;
@@ -1031,22 +1032,21 @@ public abstract class TaskAttemptImpl implements
             (int) (now - start));
             (int) (now - start));
       }
       }
 
 
-      Counter cpuCounter = counters.getCounter(
-          TaskCounter.CPU_MILLISECONDS);
+      Counter cpuCounter = counters.findCounter(TaskCounter.CPU_MILLISECONDS);
       if (cpuCounter != null && cpuCounter.getValue() <= Integer.MAX_VALUE) {
       if (cpuCounter != null && cpuCounter.getValue() <= Integer.MAX_VALUE) {
         splitsBlock.getProgressCPUTime().extend(newProgress,
         splitsBlock.getProgressCPUTime().extend(newProgress,
-            (int) cpuCounter.getValue());
+            (int) cpuCounter.getValue()); // long to int? TODO: FIX. Same below
       }
       }
 
 
-      Counter virtualBytes = counters.getCounter(
-          TaskCounter.VIRTUAL_MEMORY_BYTES);
+      Counter virtualBytes = counters
+        .findCounter(TaskCounter.VIRTUAL_MEMORY_BYTES);
       if (virtualBytes != null) {
       if (virtualBytes != null) {
         splitsBlock.getProgressVirtualMemoryKbytes().extend(newProgress,
         splitsBlock.getProgressVirtualMemoryKbytes().extend(newProgress,
             (int) (virtualBytes.getValue() / (MEMORY_SPLITS_RESOLUTION)));
             (int) (virtualBytes.getValue() / (MEMORY_SPLITS_RESOLUTION)));
       }
       }
 
 
-      Counter physicalBytes = counters.getCounter(
-          TaskCounter.PHYSICAL_MEMORY_BYTES);
+      Counter physicalBytes = counters
+        .findCounter(TaskCounter.PHYSICAL_MEMORY_BYTES);
       if (physicalBytes != null) {
       if (physicalBytes != null) {
         splitsBlock.getProgressPhysicalMemoryKbytes().extend(newProgress,
         splitsBlock.getProgressPhysicalMemoryKbytes().extend(newProgress,
             (int) (physicalBytes.getValue() / (MEMORY_SPLITS_RESOLUTION)));
             (int) (physicalBytes.getValue() / (MEMORY_SPLITS_RESOLUTION)));
@@ -1201,7 +1201,7 @@ public abstract class TaskAttemptImpl implements
 
 
       // register it to TaskAttemptListener so that it can start monitoring it.
       // register it to TaskAttemptListener so that it can start monitoring it.
       taskAttempt.taskAttemptListener
       taskAttempt.taskAttemptListener
-        .registerLaunchedTask(taskAttempt.attemptId);
+        .registerLaunchedTask(taskAttempt.attemptId, taskAttempt.jvmID);
       //TODO Resolve to host / IP in case of a local address.
       //TODO Resolve to host / IP in case of a local address.
       InetSocketAddress nodeHttpInetAddr =
       InetSocketAddress nodeHttpInetAddr =
           NetUtils.createSocketAddr(taskAttempt.nodeHttpAddress); // TODO:
           NetUtils.createSocketAddr(taskAttempt.nodeHttpAddress); // TODO:
@@ -1343,7 +1343,7 @@ public abstract class TaskAttemptImpl implements
          this.containerNodeId == null ? -1 : this.containerNodeId.getPort(),
          this.containerNodeId == null ? -1 : this.containerNodeId.getPort(),
          this.nodeRackName == null ? "UNKNOWN" : this.nodeRackName,
          this.nodeRackName == null ? "UNKNOWN" : this.nodeRackName,
          this.reportedStatus.stateString,
          this.reportedStatus.stateString,
-         TypeConverter.fromYarn(getCounters()),
+         getCounters(),
          getProgressSplitBlock().burst());
          getProgressSplitBlock().burst());
          eventHandler.handle(
          eventHandler.handle(
            new JobHistoryEvent(attemptId.getTaskId().getJobId(), mfe));
            new JobHistoryEvent(attemptId.getTaskId().getJobId(), mfe));
@@ -1360,7 +1360,7 @@ public abstract class TaskAttemptImpl implements
          this.containerNodeId == null ? -1 : this.containerNodeId.getPort(),
          this.containerNodeId == null ? -1 : this.containerNodeId.getPort(),
          this.nodeRackName == null ? "UNKNOWN" : this.nodeRackName,
          this.nodeRackName == null ? "UNKNOWN" : this.nodeRackName,
          this.reportedStatus.stateString,
          this.reportedStatus.stateString,
-         TypeConverter.fromYarn(getCounters()),
+         getCounters(),
          getProgressSplitBlock().burst());
          getProgressSplitBlock().burst());
          eventHandler.handle(
          eventHandler.handle(
            new JobHistoryEvent(attemptId.getTaskId().getJobId(), rfe));
            new JobHistoryEvent(attemptId.getTaskId().getJobId(), rfe));
@@ -1498,8 +1498,8 @@ public abstract class TaskAttemptImpl implements
     result.phase = Phase.STARTING;
     result.phase = Phase.STARTING;
     result.stateString = "NEW";
     result.stateString = "NEW";
     result.taskState = TaskAttemptState.NEW;
     result.taskState = TaskAttemptState.NEW;
-    Counters counters = recordFactory.newRecordInstance(Counters.class);
-//    counters.groups = new HashMap<String, CounterGroup>();
+    Counters counters = EMPTY_COUNTERS;
+    //    counters.groups = new HashMap<String, CounterGroup>();
     result.counters = counters;
     result.counters = counters;
   }
   }
 
 

+ 8 - 4
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskImpl.java

@@ -33,6 +33,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapreduce.Counters;
 import org.apache.hadoop.mapreduce.OutputCommitter;
 import org.apache.hadoop.mapreduce.OutputCommitter;
 import org.apache.hadoop.mapreduce.TypeConverter;
 import org.apache.hadoop.mapreduce.TypeConverter;
 import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent;
 import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent;
@@ -40,7 +41,6 @@ import org.apache.hadoop.mapreduce.jobhistory.TaskFailedEvent;
 import org.apache.hadoop.mapreduce.jobhistory.TaskFinishedEvent;
 import org.apache.hadoop.mapreduce.jobhistory.TaskFinishedEvent;
 import org.apache.hadoop.mapreduce.jobhistory.TaskStartedEvent;
 import org.apache.hadoop.mapreduce.jobhistory.TaskStartedEvent;
 import org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier;
 import org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier;
-import org.apache.hadoop.mapreduce.v2.api.records.Counters;
 import org.apache.hadoop.mapreduce.v2.api.records.JobId;
 import org.apache.hadoop.mapreduce.v2.api.records.JobId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEventStatus;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEventStatus;
@@ -329,7 +329,6 @@ public abstract class TaskImpl implements Task, EventHandler<TaskEvent> {
       report.setFinishTime(getFinishTime());
       report.setFinishTime(getFinishTime());
       report.setTaskState(getState());
       report.setTaskState(getState());
       report.setProgress(getProgress());
       report.setProgress(getProgress());
-      report.setCounters(getCounters());
 
 
       for (TaskAttempt attempt : attempts.values()) {
       for (TaskAttempt attempt : attempts.values()) {
         if (TaskAttemptState.RUNNING.equals(attempt.getState())) {
         if (TaskAttemptState.RUNNING.equals(attempt.getState())) {
@@ -346,6 +345,11 @@ public abstract class TaskImpl implements Task, EventHandler<TaskEvent> {
           
           
         }
         }
       }
       }
+
+      // Add a copy of counters as the last step so that their lifetime on heap
+      // is as small as possible.
+      report.setCounters(TypeConverter.toYarn(getCounters()));
+
       return report;
       return report;
     } finally {
     } finally {
       readLock.unlock();
       readLock.unlock();
@@ -361,7 +365,7 @@ public abstract class TaskImpl implements Task, EventHandler<TaskEvent> {
       if (bestAttempt != null) {
       if (bestAttempt != null) {
         counters = bestAttempt.getCounters();
         counters = bestAttempt.getCounters();
       } else {
       } else {
-        counters = recordFactory.newRecordInstance(Counters.class);
+        counters = TaskAttemptImpl.EMPTY_COUNTERS;
 //        counters.groups = new HashMap<CharSequence, CounterGroup>();
 //        counters.groups = new HashMap<CharSequence, CounterGroup>();
       }
       }
       return counters;
       return counters;
@@ -595,7 +599,7 @@ public abstract class TaskImpl implements Task, EventHandler<TaskEvent> {
         task.getFinishTime(task.successfulAttempt),
         task.getFinishTime(task.successfulAttempt),
         TypeConverter.fromYarn(task.taskId.getTaskType()),
         TypeConverter.fromYarn(task.taskId.getTaskType()),
         taskState.toString(),
         taskState.toString(),
-        TypeConverter.fromYarn(task.getCounters()));
+        task.getCounters());
     return tfe;
     return tfe;
   }
   }
   
   

+ 229 - 167
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java

@@ -26,6 +26,7 @@ import java.util.Set;
 import java.util.Timer;
 import java.util.Timer;
 import java.util.TimerTask;
 import java.util.TimerTask;
 import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.LinkedBlockingQueue;
 import java.util.concurrent.LinkedBlockingQueue;
 import java.util.concurrent.ThreadFactory;
 import java.util.concurrent.ThreadFactory;
 import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.ThreadPoolExecutor;
@@ -44,8 +45,6 @@ import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerLaunched
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptDiagnosticsUpdateEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptDiagnosticsUpdateEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType;
-import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocator;
-import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocatorEvent;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
@@ -75,16 +74,217 @@ public class ContainerLauncherImpl extends AbstractService implements
 
 
   int nmTimeOut;
   int nmTimeOut;
 
 
+  private ConcurrentHashMap<ContainerId, Container> containers = 
+    new ConcurrentHashMap<ContainerId, Container>(); 
   private AppContext context;
   private AppContext context;
-  private ThreadPoolExecutor launcherPool;
-  private static final int INITIAL_POOL_SIZE = 10;
+  protected ThreadPoolExecutor launcherPool;
+  protected static final int INITIAL_POOL_SIZE = 10;
   private int limitOnPoolSize;
   private int limitOnPoolSize;
   private Thread eventHandlingThread;
   private Thread eventHandlingThread;
-  private BlockingQueue<ContainerLauncherEvent> eventQueue =
+  protected BlockingQueue<ContainerLauncherEvent> eventQueue =
       new LinkedBlockingQueue<ContainerLauncherEvent>();
       new LinkedBlockingQueue<ContainerLauncherEvent>();
   final Timer commandTimer = new Timer(true);
   final Timer commandTimer = new Timer(true);
   YarnRPC rpc;
   YarnRPC rpc;
 
 
+  private Container getContainer(ContainerId id) {
+    Container c = containers.get(id);
+    if(c == null) {
+      c = new Container();
+      Container old = containers.putIfAbsent(id, c);
+      if(old != null) {
+        c = old;
+      }
+    }
+    return c;
+  }
+  
+  private void removeContainerIfDone(ContainerId id) {
+    Container c = containers.get(id);
+    if(c != null && c.isCompletelyDone()) {
+      containers.remove(id);
+    }
+  }
+  
+  private static enum ContainerState {
+    PREP, FAILED, RUNNING, DONE, KILLED_BEFORE_LAUNCH
+  }
+
+  private class Container {
+    private ContainerState state;
+    
+    public Container() {
+      this.state = ContainerState.PREP;
+    }
+    
+    public synchronized boolean isCompletelyDone() {
+      return state == ContainerState.DONE || state == ContainerState.FAILED;
+    }
+    
+    @SuppressWarnings("unchecked")
+    public synchronized void launch(ContainerRemoteLaunchEvent event) {
+      TaskAttemptId taskAttemptID = event.getTaskAttemptID();
+      LOG.info("Launching " + taskAttemptID);
+      if(this.state == ContainerState.KILLED_BEFORE_LAUNCH) {
+        state = ContainerState.DONE;
+        sendContainerLaunchFailedMsg(taskAttemptID, 
+            "Container was killed before it was launched");
+        return;
+      }
+      CommandTimerTask timerTask = new CommandTimerTask(Thread
+          .currentThread(), event);
+      
+      final String containerManagerBindAddr = event.getContainerMgrAddress();
+      ContainerId containerID = event.getContainerID();
+      ContainerToken containerToken = event.getContainerToken();
+
+      ContainerManager proxy = null;
+      try {
+        commandTimer.schedule(timerTask, nmTimeOut);
+
+        proxy = getCMProxy(containerID, containerManagerBindAddr,
+            containerToken);
+
+        // Interrupted during getProxy, but that didn't throw exception
+        if (Thread.interrupted()) {
+          // The timer canceled the command in the mean while.
+          String message = "Container launch failed for " + containerID
+              + " : Start-container for " + event.getContainerID()
+              + " got interrupted. Returning.";
+          this.state = ContainerState.FAILED;
+          sendContainerLaunchFailedMsg(taskAttemptID, message);
+          return;
+        }
+        // Construct the actual Container
+        ContainerLaunchContext containerLaunchContext =
+          event.getContainer();
+
+        // Now launch the actual container
+        StartContainerRequest startRequest = Records
+          .newRecord(StartContainerRequest.class);
+        startRequest.setContainerLaunchContext(containerLaunchContext);
+        StartContainerResponse response = proxy.startContainer(startRequest);
+
+        // container started properly. Stop the timer
+        timerTask.cancel();
+        if (Thread.interrupted()) {
+          // The timer canceled the command in the mean while, but
+          // startContainer didn't throw exception
+          String message = "Container launch failed for " + containerID
+              + " : Start-container for " + event.getContainerID()
+              + " got interrupted. Returning.";
+          this.state = ContainerState.FAILED;
+          sendContainerLaunchFailedMsg(taskAttemptID, message);
+          return;
+        }
+
+        ByteBuffer portInfo = response
+          .getServiceResponse(ShuffleHandler.MAPREDUCE_SHUFFLE_SERVICEID);
+        int port = -1;
+        if(portInfo != null) {
+          port = ShuffleHandler.deserializeMetaData(portInfo);
+        }
+        LOG.info("Shuffle port returned by ContainerManager for "
+            + taskAttemptID + " : " + port);
+
+        if(port < 0) {
+          this.state = ContainerState.FAILED;
+          throw new IllegalStateException("Invalid shuffle port number "
+              + port + " returned for " + taskAttemptID);
+        }
+
+        // after launching, send launched event to task attempt to move
+        // it from ASSIGNED to RUNNING state
+        context.getEventHandler().handle(
+            new TaskAttemptContainerLaunchedEvent(taskAttemptID, port));
+        this.state = ContainerState.RUNNING;
+      } catch (Throwable t) {
+        if (Thread.interrupted()) {
+          // The timer canceled the command in the mean while.
+          LOG.info("Start-container for " + event.getContainerID()
+              + " got interrupted.");
+        }
+        String message = "Container launch failed for " + containerID + " : "
+            + StringUtils.stringifyException(t);
+        this.state = ContainerState.FAILED;
+        sendContainerLaunchFailedMsg(taskAttemptID, message);
+      } finally {
+        timerTask.cancel();
+        if (proxy != null) {
+          ContainerLauncherImpl.this.rpc.stopProxy(proxy, getConfig());
+        }
+      }
+    }
+    
+    @SuppressWarnings("unchecked")
+    public synchronized void kill(ContainerLauncherEvent event) {
+      if(this.state == ContainerState.PREP) {
+        this.state = ContainerState.KILLED_BEFORE_LAUNCH;
+      } else {
+        CommandTimerTask timerTask = new CommandTimerTask(Thread
+            .currentThread(), event);
+
+        final String containerManagerBindAddr = event.getContainerMgrAddress();
+        ContainerId containerID = event.getContainerID();
+        ContainerToken containerToken = event.getContainerToken();
+        TaskAttemptId taskAttemptID = event.getTaskAttemptID();
+        LOG.info("KILLING " + taskAttemptID);
+        commandTimer.schedule(timerTask, nmTimeOut);
+
+        ContainerManager proxy = null;
+        try {
+          proxy = getCMProxy(containerID, containerManagerBindAddr,
+              containerToken);
+
+          if (Thread.interrupted()) {
+            // The timer canceled the command in the mean while. No need to
+            // return, send cleaned up event anyways.
+            LOG.info("Stop-container for " + event.getContainerID()
+                + " got interrupted.");
+          } else {
+            // kill the remote container if already launched
+            StopContainerRequest stopRequest = Records
+              .newRecord(StopContainerRequest.class);
+            stopRequest.setContainerId(event.getContainerID());
+            proxy.stopContainer(stopRequest);
+          }
+        } catch (Throwable t) {
+
+          if (Thread.interrupted()) {
+            // The timer canceled the command in the mean while, clear the
+            // interrupt flag
+            LOG.info("Stop-container for " + event.getContainerID()
+                + " got interrupted.");
+          }
+
+          // ignore the cleanup failure
+          String message = "cleanup failed for container "
+            + event.getContainerID() + " : "
+            + StringUtils.stringifyException(t);
+          context.getEventHandler().handle(
+            new TaskAttemptDiagnosticsUpdateEvent(taskAttemptID, message));
+          LOG.warn(message);
+        } finally {
+          timerTask.cancel();
+          if (Thread.interrupted()) {
+            LOG.info("Stop-container for " + event.getContainerID()
+                + " got interrupted.");
+            // ignore the cleanup failure
+            context.getEventHandler().handle(
+              new TaskAttemptDiagnosticsUpdateEvent(taskAttemptID,
+                "cleanup failed for container " + event.getContainerID()));
+          }
+          if (proxy != null) {
+            ContainerLauncherImpl.this.rpc.stopProxy(proxy, getConfig());
+          }
+        }
+        this.state = ContainerState.DONE;
+      }
+      // after killing, send killed event to task attempt
+      context.getEventHandler().handle(
+          new TaskAttemptEvent(event.getTaskAttemptID(),
+              TaskAttemptEventType.TA_CONTAINER_CLEANED));
+    }
+  }
   // To track numNodes.
   // To track numNodes.
   Set<String> allNodes = new HashSet<String>();
   Set<String> allNodes = new HashSet<String>();
 
 
@@ -102,11 +302,16 @@ public class ContainerLauncherImpl extends AbstractService implements
     this.limitOnPoolSize = conf.getInt(
     this.limitOnPoolSize = conf.getInt(
         MRJobConfig.MR_AM_CONTAINERLAUNCHER_THREAD_COUNT_LIMIT,
         MRJobConfig.MR_AM_CONTAINERLAUNCHER_THREAD_COUNT_LIMIT,
         MRJobConfig.DEFAULT_MR_AM_CONTAINERLAUNCHER_THREAD_COUNT_LIMIT);
         MRJobConfig.DEFAULT_MR_AM_CONTAINERLAUNCHER_THREAD_COUNT_LIMIT);
+    LOG.info("Upper limit on the thread pool size is " + this.limitOnPoolSize);
     this.nmTimeOut = conf.getInt(ContainerLauncher.MR_AM_NM_COMMAND_TIMEOUT,
     this.nmTimeOut = conf.getInt(ContainerLauncher.MR_AM_NM_COMMAND_TIMEOUT,
         ContainerLauncher.DEFAULT_NM_COMMAND_TIMEOUT);
         ContainerLauncher.DEFAULT_NM_COMMAND_TIMEOUT);
-    this.rpc = YarnRPC.create(conf);
+    this.rpc = createYarnRPC(conf);
     super.init(conf);
     super.init(conf);
   }
   }
+  
+  protected YarnRPC createYarnRPC(Configuration conf) {
+    return YarnRPC.create(conf);
+  }
 
 
   public void start() {
   public void start() {
 
 
@@ -118,7 +323,7 @@ public class ContainerLauncherImpl extends AbstractService implements
         Integer.MAX_VALUE, 1, TimeUnit.HOURS,
         Integer.MAX_VALUE, 1, TimeUnit.HOURS,
         new LinkedBlockingQueue<Runnable>(),
         new LinkedBlockingQueue<Runnable>(),
         tf);
         tf);
-    eventHandlingThread = new Thread(new Runnable() {
+    eventHandlingThread = new Thread() {
       @Override
       @Override
       public void run() {
       public void run() {
         ContainerLauncherEvent event = null;
         ContainerLauncherEvent event = null;
@@ -141,26 +346,27 @@ public class ContainerLauncherImpl extends AbstractService implements
             int numNodes = allNodes.size();
             int numNodes = allNodes.size();
             int idealPoolSize = Math.min(limitOnPoolSize, numNodes);
             int idealPoolSize = Math.min(limitOnPoolSize, numNodes);
 
 
-            if (poolSize <= idealPoolSize) {
+            if (poolSize < idealPoolSize) {
               // Bump up the pool size to idealPoolSize+INITIAL_POOL_SIZE, the
               // Bump up the pool size to idealPoolSize+INITIAL_POOL_SIZE, the
               // later is just a buffer so we are not always increasing the
               // later is just a buffer so we are not always increasing the
               // pool-size
               // pool-size
-              int newPoolSize = idealPoolSize + INITIAL_POOL_SIZE;
-              LOG.info("Setting ContainerLauncher pool size to "
-                  + newPoolSize);
+              int newPoolSize = Math.min(limitOnPoolSize, idealPoolSize
+                  + INITIAL_POOL_SIZE);
+              LOG.info("Setting ContainerLauncher pool size to " + newPoolSize
+                  + " as number-of-nodes to talk to is " + numNodes);
               launcherPool.setCorePoolSize(newPoolSize);
               launcherPool.setCorePoolSize(newPoolSize);
             }
             }
           }
           }
 
 
           // the events from the queue are handled in parallel
           // the events from the queue are handled in parallel
           // using a thread pool
           // using a thread pool
-          launcherPool.execute(new EventProcessor(event));
+          launcherPool.execute(createEventProcessor(event));
 
 
           // TODO: Group launching of multiple containers to a single
           // TODO: Group launching of multiple containers to a single
           // NodeManager into a single connection
           // NodeManager into a single connection
         }
         }
       }
       }
-    });
+    };
     eventHandlingThread.setName("ContainerLauncher Event Handler");
     eventHandlingThread.setName("ContainerLauncher Event Handler");
     eventHandlingThread.start();
     eventHandlingThread.start();
     super.start();
     super.start();
@@ -172,14 +378,16 @@ public class ContainerLauncherImpl extends AbstractService implements
     super.stop();
     super.stop();
   }
   }
 
 
+  protected EventProcessor createEventProcessor(ContainerLauncherEvent event) {
+    return new EventProcessor(event);
+  }
+
   protected ContainerManager getCMProxy(ContainerId containerID,
   protected ContainerManager getCMProxy(ContainerId containerID,
       final String containerManagerBindAddr, ContainerToken containerToken)
       final String containerManagerBindAddr, ContainerToken containerToken)
       throws IOException {
       throws IOException {
 
 
     UserGroupInformation user = UserGroupInformation.getCurrentUser();
     UserGroupInformation user = UserGroupInformation.getCurrentUser();
 
 
-    this.allNodes.add(containerManagerBindAddr);
-
     if (UserGroupInformation.isSecurityEnabled()) {
     if (UserGroupInformation.isSecurityEnabled()) {
       Token<ContainerTokenIdentifier> token = new Token<ContainerTokenIdentifier>(
       Token<ContainerTokenIdentifier> token = new Token<ContainerTokenIdentifier>(
           containerToken.getIdentifier().array(), containerToken
           containerToken.getIdentifier().array(), containerToken
@@ -244,182 +452,35 @@ public class ContainerLauncherImpl extends AbstractService implements
   /**
   /**
    * Setup and start the container on remote nodemanager.
    * Setup and start the container on remote nodemanager.
    */
    */
-  private class EventProcessor implements Runnable {
+  class EventProcessor implements Runnable {
     private ContainerLauncherEvent event;
     private ContainerLauncherEvent event;
 
 
     EventProcessor(ContainerLauncherEvent event) {
     EventProcessor(ContainerLauncherEvent event) {
       this.event = event;
       this.event = event;
     }
     }
 
 
-    @SuppressWarnings("unchecked")
     @Override
     @Override
     public void run() {
     public void run() {
       LOG.info("Processing the event " + event.toString());
       LOG.info("Processing the event " + event.toString());
 
 
       // Load ContainerManager tokens before creating a connection.
       // Load ContainerManager tokens before creating a connection.
       // TODO: Do it only once per NodeManager.
       // TODO: Do it only once per NodeManager.
-      final String containerManagerBindAddr = event.getContainerMgrAddress();
       ContainerId containerID = event.getContainerID();
       ContainerId containerID = event.getContainerID();
-      ContainerToken containerToken = event.getContainerToken();
-      TaskAttemptId taskAttemptID = event.getTaskAttemptID();
-
-      ContainerManager proxy = null;
-
-      CommandTimerTask timerTask = new CommandTimerTask(Thread
-          .currentThread(), event);
 
 
+      Container c = getContainer(containerID);
       switch(event.getType()) {
       switch(event.getType()) {
 
 
       case CONTAINER_REMOTE_LAUNCH:
       case CONTAINER_REMOTE_LAUNCH:
         ContainerRemoteLaunchEvent launchEvent
         ContainerRemoteLaunchEvent launchEvent
             = (ContainerRemoteLaunchEvent) event;
             = (ContainerRemoteLaunchEvent) event;
-
-        try {
-          commandTimer.schedule(timerTask, nmTimeOut);
-
-          proxy = getCMProxy(containerID, containerManagerBindAddr,
-              containerToken);
-
-          // Interruped during getProxy, but that didn't throw exception
-          if (Thread.interrupted()) {
-            // The timer cancelled the command in the mean while.
-            String message = "Container launch failed for " + containerID
-                + " : Start-container for " + event.getContainerID()
-                + " got interrupted. Returning.";
-            sendContainerLaunchFailedMsg(taskAttemptID, message);
-            return;
-          }
-
-          // Construct the actual Container
-          ContainerLaunchContext containerLaunchContext =
-              launchEvent.getContainer();
-
-          // Now launch the actual container
-          StartContainerRequest startRequest = Records
-              .newRecord(StartContainerRequest.class);
-          startRequest.setContainerLaunchContext(containerLaunchContext);
-          StartContainerResponse response = proxy.startContainer(startRequest);
-
-          // container started properly. Stop the timer
-          timerTask.cancel();
-          if (Thread.interrupted()) {
-            // The timer cancelled the command in the mean while, but
-            // startContainer didn't throw exception
-            String message = "Container launch failed for " + containerID
-                + " : Start-container for " + event.getContainerID()
-                + " got interrupted. Returning.";
-            sendContainerLaunchFailedMsg(taskAttemptID, message);
-            return;
-          }
-
-          ByteBuffer portInfo = response
-              .getServiceResponse(ShuffleHandler.MAPREDUCE_SHUFFLE_SERVICEID);
-          int port = -1;
-          if(portInfo != null) {
-            port = ShuffleHandler.deserializeMetaData(portInfo);
-          }
-          LOG.info("Shuffle port returned by ContainerManager for "
-              + taskAttemptID + " : " + port);
-          
-          if(port < 0) {
-            throw new IllegalStateException("Invalid shuffle port number "
-                + port + " returned for " + taskAttemptID);
-          }
-
-          // after launching, send launched event to task attempt to move
-          // it from ASSIGNED to RUNNING state
-          context.getEventHandler().handle(
-              new TaskAttemptContainerLaunchedEvent(taskAttemptID, port));
-        } catch (Throwable t) {
-          if (Thread.interrupted()) {
-            // The timer cancelled the command in the mean while.
-            LOG.info("Start-container for " + event.getContainerID()
-                + " got interrupted.");
-          }
-          String message = "Container launch failed for " + containerID
-              + " : " + StringUtils.stringifyException(t);
-          sendContainerLaunchFailedMsg(taskAttemptID, message);
-        } finally {
-          timerTask.cancel();
-          if (proxy != null) {
-            ContainerLauncherImpl.this.rpc.stopProxy(proxy, getConfig());
-          }
-        }
-
+        c.launch(launchEvent);
         break;
         break;
 
 
       case CONTAINER_REMOTE_CLEANUP:
       case CONTAINER_REMOTE_CLEANUP:
-        // We will have to remove the launch (meant "cleanup"? FIXME) event if it is still in eventQueue
-        // and not yet processed
-        if (eventQueue.contains(event)) {
-          eventQueue.remove(event); // TODO: Any synchro needed?
-          //deallocate the container
-          context.getEventHandler().handle(
-              new ContainerAllocatorEvent(taskAttemptID,
-                  ContainerAllocator.EventType.CONTAINER_DEALLOCATE));
-        } else {
-
-          try {
-            commandTimer.schedule(timerTask, nmTimeOut);
-
-            proxy = getCMProxy(containerID, containerManagerBindAddr,
-                containerToken);
-
-            if (Thread.interrupted()) {
-              // The timer cancelled the command in the mean while. No need to
-              // return, send cleanedup event anyways.
-              LOG.info("Stop-container for " + event.getContainerID()
-                  + " got interrupted.");
-            } else {
-
-              // TODO:check whether container is launched
-
-              // kill the remote container if already launched
-              StopContainerRequest stopRequest = Records
-                  .newRecord(StopContainerRequest.class);
-              stopRequest.setContainerId(event.getContainerID());
-              proxy.stopContainer(stopRequest);
-            }
-          } catch (Throwable t) {
-
-            if (Thread.interrupted()) {
-              // The timer cancelled the command in the mean while, clear the
-              // interrupt flag
-              LOG.info("Stop-container for " + event.getContainerID()
-                  + " got interrupted.");
-            }
-
-            // ignore the cleanup failure
-            String message = "cleanup failed for container "
-                + event.getContainerID() + " : "
-                + StringUtils.stringifyException(t);
-            context.getEventHandler()
-                .handle(
-                    new TaskAttemptDiagnosticsUpdateEvent(taskAttemptID,
-                        message));
-            LOG.warn(message);
-          } finally {
-            timerTask.cancel();
-            if (Thread.interrupted()) {
-              LOG.info("Stop-container for " + event.getContainerID()
-                  + " got interrupted.");
-              // ignore the cleanup failure
-              context.getEventHandler()
-                  .handle(new TaskAttemptDiagnosticsUpdateEvent(taskAttemptID,
-                    "cleanup failed for container " + event.getContainerID()));
-            }
-            if (proxy != null) {
-              ContainerLauncherImpl.this.rpc.stopProxy(proxy, getConfig());
-            }
-          }
-
-          // after killing, send killed event to taskattempt
-          context.getEventHandler().handle(
-              new TaskAttemptEvent(event.getTaskAttemptID(),
-                  TaskAttemptEventType.TA_CONTAINER_CLEANED));
-        }
+        c.kill(event);
         break;
         break;
       }
       }
+      removeContainerIfDone(containerID);
     }
     }
   }
   }
 
 
@@ -438,6 +499,7 @@ public class ContainerLauncherImpl extends AbstractService implements
   public void handle(ContainerLauncherEvent event) {
   public void handle(ContainerLauncherEvent event) {
     try {
     try {
       eventQueue.put(event);
       eventQueue.put(event);
+      this.allNodes.add(event.getContainerMgrAddress());
     } catch (InterruptedException e) {
     } catch (InterruptedException e) {
       throw new YarnException(e);
       throw new YarnException(e);
     }
     }

+ 5 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerRemoteLaunchEvent.java

@@ -46,6 +46,11 @@ public class ContainerRemoteLaunchEvent extends ContainerLauncherEvent {
   public Task getRemoteTask() {
   public Task getRemoteTask() {
     return this.task;
     return this.task;
   }
   }
+  
+  @Override
+  public int hashCode() {
+    return super.hashCode();
+  }
 
 
   @Override
   @Override
   public boolean equals(Object obj) {
   public boolean equals(Object obj) {

+ 1 - 3
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/recover/RecoveryService.java

@@ -92,7 +92,6 @@ import org.apache.hadoop.yarn.util.ConverterUtils;
 
 
 //TODO:
 //TODO:
 //task cleanup for all non completed tasks
 //task cleanup for all non completed tasks
-
 public class RecoveryService extends CompositeService implements Recovery {
 public class RecoveryService extends CompositeService implements Recovery {
 
 
   private static final Log LOG = LogFactory.getLog(RecoveryService.class);
   private static final Log LOG = LogFactory.getLog(RecoveryService.class);
@@ -411,8 +410,7 @@ public class RecoveryService extends CompositeService implements Recovery {
       if (cntrs == null) {
       if (cntrs == null) {
         taskAttemptStatus.counters = null;
         taskAttemptStatus.counters = null;
       } else {
       } else {
-        taskAttemptStatus.counters = TypeConverter.toYarn(attemptInfo
-            .getCounters());
+        taskAttemptStatus.counters = cntrs;
       }
       }
       actualHandler.handle(new TaskAttemptStatusUpdateEvent(
       actualHandler.handle(new TaskAttemptStatusUpdateEvent(
           taskAttemptStatus.id, taskAttemptStatus));
           taskAttemptStatus.id, taskAttemptStatus));

+ 133 - 131
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AMWebServices.java

@@ -33,6 +33,7 @@ import javax.ws.rs.core.Response.Status;
 
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.mapreduce.JobACL;
 import org.apache.hadoop.mapreduce.JobACL;
+import org.apache.hadoop.mapreduce.v2.api.records.AMInfo;
 import org.apache.hadoop.mapreduce.v2.api.records.JobId;
 import org.apache.hadoop.mapreduce.v2.api.records.JobId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
@@ -42,6 +43,8 @@ import org.apache.hadoop.mapreduce.v2.app.job.Job;
 import org.apache.hadoop.mapreduce.v2.app.job.Task;
 import org.apache.hadoop.mapreduce.v2.app.job.Task;
 import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
 import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
 import org.apache.hadoop.mapreduce.v2.app.webapp.dao.AppInfo;
 import org.apache.hadoop.mapreduce.v2.app.webapp.dao.AppInfo;
+import org.apache.hadoop.mapreduce.v2.app.webapp.dao.AMAttemptInfo;
+import org.apache.hadoop.mapreduce.v2.app.webapp.dao.AMAttemptsInfo;
 import org.apache.hadoop.mapreduce.v2.app.webapp.dao.ConfInfo;
 import org.apache.hadoop.mapreduce.v2.app.webapp.dao.ConfInfo;
 import org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobCounterInfo;
 import org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobCounterInfo;
 import org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobInfo;
 import org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobInfo;
@@ -76,14 +79,90 @@ public class AMWebServices {
   }
   }
 
 
   Boolean hasAccess(Job job, HttpServletRequest request) {
   Boolean hasAccess(Job job, HttpServletRequest request) {
-    UserGroupInformation callerUgi = UserGroupInformation
-        .createRemoteUser(request.getRemoteUser());
-    if (!job.checkAccess(callerUgi, JobACL.VIEW_JOB)) {
+    String remoteUser = request.getRemoteUser();
+    UserGroupInformation callerUGI = null;
+    if (remoteUser != null) {
+      callerUGI = UserGroupInformation.createRemoteUser(remoteUser);
+    }
+    if (callerUGI != null && !job.checkAccess(callerUGI, JobACL.VIEW_JOB)) {
       return false;
       return false;
     }
     }
     return true;
     return true;
   }
   }
 
 
+  /**
+   * convert a job id string to an actual job and handle all the error checking.
+   */
+ public static Job getJobFromJobIdString(String jid, AppContext appCtx) throws NotFoundException {
+    JobId jobId;
+    Job job;
+    try {
+      jobId = MRApps.toJobID(jid);
+    } catch (YarnException e) {
+      throw new NotFoundException(e.getMessage());
+    }
+    if (jobId == null) {
+      throw new NotFoundException("job, " + jid + ", is not found");
+    }
+    job = appCtx.getJob(jobId);
+    if (job == null) {
+      throw new NotFoundException("job, " + jid + ", is not found");
+    }
+    return job;
+  }
+
+  /**
+   * convert a task id string to an actual task and handle all the error
+   * checking.
+   */
+  public static Task getTaskFromTaskIdString(String tid, Job job) throws NotFoundException {
+    TaskId taskID;
+    Task task;
+    try {
+      taskID = MRApps.toTaskID(tid);
+    } catch (YarnException e) {
+      throw new NotFoundException(e.getMessage());
+    } catch (NumberFormatException ne) {
+      throw new NotFoundException(ne.getMessage());
+    }
+    if (taskID == null) {
+      throw new NotFoundException("taskid " + tid + " not found or invalid");
+    }
+    task = job.getTask(taskID);
+    if (task == null) {
+      throw new NotFoundException("task not found with id " + tid);
+    }
+    return task;
+  }
+
+  /**
+   * convert a task attempt id string to an actual task attempt and handle all
+   * the error checking.
+   */
+  public static TaskAttempt getTaskAttemptFromTaskAttemptString(String attId, Task task)
+      throws NotFoundException {
+    TaskAttemptId attemptId;
+    TaskAttempt ta;
+    try {
+      attemptId = MRApps.toTaskAttemptID(attId);
+    } catch (YarnException e) {
+      throw new NotFoundException(e.getMessage());
+    } catch (NumberFormatException ne) {
+      throw new NotFoundException(ne.getMessage());
+    }
+    if (attemptId == null) {
+      throw new NotFoundException("task attempt id " + attId
+          + " not found or invalid");
+    }
+    ta = task.getAttempt(attemptId);
+    if (ta == null) {
+      throw new NotFoundException("Error getting info on task attempt id "
+          + attId);
+    }
+    return ta;
+  }
+
+
   /**
   /**
    * check for job access.
    * check for job access.
    *
    *
@@ -130,16 +209,23 @@ public class AMWebServices {
   @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
   @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
   public JobInfo getJob(@Context HttpServletRequest hsr,
   public JobInfo getJob(@Context HttpServletRequest hsr,
       @PathParam("jobid") String jid) {
       @PathParam("jobid") String jid) {
-    JobId jobId = MRApps.toJobID(jid);
-    if (jobId == null) {
-      throw new NotFoundException("job, " + jid + ", is not found");
-    }
-    Job job = appCtx.getJob(jobId);
-    if (job == null) {
-      throw new NotFoundException("job, " + jid + ", is not found");
-    }
+    Job job = getJobFromJobIdString(jid, appCtx);
     return new JobInfo(job, hasAccess(job, hsr));
     return new JobInfo(job, hasAccess(job, hsr));
+  }
+
+  @GET
+  @Path("/jobs/{jobid}/jobattempts")
+  @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
+  public AMAttemptsInfo getJobAttempts(@PathParam("jobid") String jid) {
 
 
+    Job job = getJobFromJobIdString(jid, appCtx);
+    AMAttemptsInfo amAttempts = new AMAttemptsInfo();
+    for (AMInfo amInfo : job.getAMInfos()) {
+      AMAttemptInfo attempt = new AMAttemptInfo(amInfo, MRApps.toString(
+            job.getID()), job.getUserName());
+      amAttempts.add(attempt);
+    }
+    return amAttempts;
   }
   }
 
 
   @GET
   @GET
@@ -147,63 +233,25 @@ public class AMWebServices {
   @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
   @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
   public JobCounterInfo getJobCounters(@Context HttpServletRequest hsr,
   public JobCounterInfo getJobCounters(@Context HttpServletRequest hsr,
       @PathParam("jobid") String jid) {
       @PathParam("jobid") String jid) {
-    JobId jobId = MRApps.toJobID(jid);
-    if (jobId == null) {
-      throw new NotFoundException("job, " + jid + ", is not found");
-    }
-    Job job = appCtx.getJob(jobId);
-    if (job == null) {
-      throw new NotFoundException("job, " + jid + ", is not found");
-    }
+    Job job = getJobFromJobIdString(jid, appCtx);
     checkAccess(job, hsr);
     checkAccess(job, hsr);
     return new JobCounterInfo(this.appCtx, job);
     return new JobCounterInfo(this.appCtx, job);
   }
   }
 
 
-  @GET
-  @Path("/jobs/{jobid}/tasks/{taskid}/counters")
-  @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
-  public JobTaskCounterInfo getSingleTaskCounters(
-      @Context HttpServletRequest hsr, @PathParam("jobid") String jid,
-      @PathParam("taskid") String tid) {
-    JobId jobId = MRApps.toJobID(jid);
-    if (jobId == null) {
-      throw new NotFoundException("job, " + jid + ", is not found");
-    }
-    Job job = this.appCtx.getJob(jobId);
-    if (job == null) {
-      throw new NotFoundException("job, " + jid + ", is not found");
-    }
-    checkAccess(job, hsr);
-    TaskId taskID = MRApps.toTaskID(tid);
-    if (taskID == null) {
-      throw new NotFoundException("taskid " + tid + " not found or invalid");
-    }
-    Task task = job.getTask(taskID);
-    if (task == null) {
-      throw new NotFoundException("task not found with id " + tid);
-    }
-    return new JobTaskCounterInfo(task);
-  }
-
   @GET
   @GET
   @Path("/jobs/{jobid}/conf")
   @Path("/jobs/{jobid}/conf")
   @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
   @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
   public ConfInfo getJobConf(@Context HttpServletRequest hsr,
   public ConfInfo getJobConf(@Context HttpServletRequest hsr,
       @PathParam("jobid") String jid) {
       @PathParam("jobid") String jid) {
-    JobId jobId = MRApps.toJobID(jid);
-    if (jobId == null) {
-      throw new NotFoundException("job, " + jid + ", is not found");
-    }
-    Job job = appCtx.getJob(jobId);
-    if (job == null) {
-      throw new NotFoundException("job, " + jid + ", is not found");
-    }
+
+    Job job = getJobFromJobIdString(jid, appCtx);
     checkAccess(job, hsr);
     checkAccess(job, hsr);
     ConfInfo info;
     ConfInfo info;
     try {
     try {
       info = new ConfInfo(job, this.conf);
       info = new ConfInfo(job, this.conf);
     } catch (IOException e) {
     } catch (IOException e) {
-      throw new NotFoundException("unable to load configuration for job: " + jid);
+      throw new NotFoundException("unable to load configuration for job: "
+          + jid);
     }
     }
     return info;
     return info;
   }
   }
@@ -213,10 +261,8 @@ public class AMWebServices {
   @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
   @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
   public TasksInfo getJobTasks(@Context HttpServletRequest hsr,
   public TasksInfo getJobTasks(@Context HttpServletRequest hsr,
       @PathParam("jobid") String jid, @QueryParam("type") String type) {
       @PathParam("jobid") String jid, @QueryParam("type") String type) {
-    Job job = this.appCtx.getJob(MRApps.toJobID(jid));
-    if (job == null) {
-      throw new NotFoundException("job, " + jid + ", is not found");
-    }
+
+    Job job = getJobFromJobIdString(jid, appCtx);
     checkAccess(job, hsr);
     checkAccess(job, hsr);
     TasksInfo allTasks = new TasksInfo();
     TasksInfo allTasks = new TasksInfo();
     for (Task task : job.getTasks().values()) {
     for (Task task : job.getTasks().values()) {
@@ -225,7 +271,8 @@ public class AMWebServices {
         try {
         try {
           ttype = MRApps.taskType(type);
           ttype = MRApps.taskType(type);
         } catch (YarnException e) {
         } catch (YarnException e) {
-          throw new BadRequestException("tasktype must be either m or r");        }
+          throw new BadRequestException("tasktype must be either m or r");
+        }
       }
       }
       if (ttype != null && task.getType() != ttype) {
       if (ttype != null && task.getType() != ttype) {
         continue;
         continue;
@@ -240,21 +287,24 @@ public class AMWebServices {
   @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
   @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
   public TaskInfo getJobTask(@Context HttpServletRequest hsr,
   public TaskInfo getJobTask(@Context HttpServletRequest hsr,
       @PathParam("jobid") String jid, @PathParam("taskid") String tid) {
       @PathParam("jobid") String jid, @PathParam("taskid") String tid) {
-    Job job = this.appCtx.getJob(MRApps.toJobID(jid));
-    if (job == null) {
-      throw new NotFoundException("job, " + jid + ", is not found");
-    }
+
+    Job job = getJobFromJobIdString(jid, appCtx);
     checkAccess(job, hsr);
     checkAccess(job, hsr);
-    TaskId taskID = MRApps.toTaskID(tid);
-    if (taskID == null) {
-      throw new NotFoundException("taskid " + tid + " not found or invalid");
-    }
-    Task task = job.getTask(taskID);
-    if (task == null) {
-      throw new NotFoundException("task not found with id " + tid);
-    }
+    Task task = getTaskFromTaskIdString(tid, job);
     return new TaskInfo(task);
     return new TaskInfo(task);
+  }
+
+  @GET
+  @Path("/jobs/{jobid}/tasks/{taskid}/counters")
+  @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
+  public JobTaskCounterInfo getSingleTaskCounters(
+      @Context HttpServletRequest hsr, @PathParam("jobid") String jid,
+      @PathParam("taskid") String tid) {
 
 
+    Job job = getJobFromJobIdString(jid, appCtx);
+    checkAccess(job, hsr);
+    Task task = getTaskFromTaskIdString(tid, job);
+    return new JobTaskCounterInfo(task);
   }
   }
 
 
   @GET
   @GET
@@ -263,19 +313,11 @@ public class AMWebServices {
   public TaskAttemptsInfo getJobTaskAttempts(@Context HttpServletRequest hsr,
   public TaskAttemptsInfo getJobTaskAttempts(@Context HttpServletRequest hsr,
       @PathParam("jobid") String jid, @PathParam("taskid") String tid) {
       @PathParam("jobid") String jid, @PathParam("taskid") String tid) {
     TaskAttemptsInfo attempts = new TaskAttemptsInfo();
     TaskAttemptsInfo attempts = new TaskAttemptsInfo();
-    Job job = this.appCtx.getJob(MRApps.toJobID(jid));
-    if (job == null) {
-      throw new NotFoundException("job, " + jid + ", is not found");
-    }
+
+    Job job = getJobFromJobIdString(jid, appCtx);
     checkAccess(job, hsr);
     checkAccess(job, hsr);
-    TaskId taskID = MRApps.toTaskID(tid);
-    if (taskID == null) {
-      throw new NotFoundException("taskid " + tid + " not found or invalid");
-    }
-    Task task = job.getTask(taskID);
-    if (task == null) {
-      throw new NotFoundException("task not found with id " + tid);
-    }
+    Task task = getTaskFromTaskIdString(tid, job);
+
     for (TaskAttempt ta : task.getAttempts().values()) {
     for (TaskAttempt ta : task.getAttempts().values()) {
       if (ta != null) {
       if (ta != null) {
         if (task.getType() == TaskType.REDUCE) {
         if (task.getType() == TaskType.REDUCE) {
@@ -294,29 +336,11 @@ public class AMWebServices {
   public TaskAttemptInfo getJobTaskAttemptId(@Context HttpServletRequest hsr,
   public TaskAttemptInfo getJobTaskAttemptId(@Context HttpServletRequest hsr,
       @PathParam("jobid") String jid, @PathParam("taskid") String tid,
       @PathParam("jobid") String jid, @PathParam("taskid") String tid,
       @PathParam("attemptid") String attId) {
       @PathParam("attemptid") String attId) {
-    Job job = this.appCtx.getJob(MRApps.toJobID(jid));
-    if (job == null) {
-      throw new NotFoundException("job, " + jid + ", is not found");
-    }
+
+    Job job = getJobFromJobIdString(jid, appCtx);
     checkAccess(job, hsr);
     checkAccess(job, hsr);
-    TaskId taskID = MRApps.toTaskID(tid);
-    if (taskID == null) {
-      throw new NotFoundException("taskid " + tid + " not found or invalid");
-    }
-    Task task = job.getTask(taskID);
-    if (task == null) {
-      throw new NotFoundException("task not found with id " + tid);
-    }
-    TaskAttemptId attemptId = MRApps.toTaskAttemptID(attId);
-    if (attemptId == null) {
-      throw new NotFoundException("task attempt id " + attId
-          + " not found or invalid");
-    }
-    TaskAttempt ta = task.getAttempt(attemptId);
-    if (ta == null) {
-      throw new NotFoundException("Error getting info on task attempt id "
-          + attId);
-    }
+    Task task = getTaskFromTaskIdString(tid, job);
+    TaskAttempt ta = getTaskAttemptFromTaskAttemptString(attId, task);
     if (task.getType() == TaskType.REDUCE) {
     if (task.getType() == TaskType.REDUCE) {
       return new ReduceTaskAttemptInfo(ta, task.getType());
       return new ReduceTaskAttemptInfo(ta, task.getType());
     } else {
     } else {
@@ -330,33 +354,11 @@ public class AMWebServices {
   public JobTaskAttemptCounterInfo getJobTaskAttemptIdCounters(
   public JobTaskAttemptCounterInfo getJobTaskAttemptIdCounters(
       @Context HttpServletRequest hsr, @PathParam("jobid") String jid,
       @Context HttpServletRequest hsr, @PathParam("jobid") String jid,
       @PathParam("taskid") String tid, @PathParam("attemptid") String attId) {
       @PathParam("taskid") String tid, @PathParam("attemptid") String attId) {
-    JobId jobId = MRApps.toJobID(jid);
-    if (jobId == null) {
-      throw new NotFoundException("job, " + jid + ", is not found");
-    }
-    Job job = this.appCtx.getJob(jobId);
-    if (job == null) {
-      throw new NotFoundException("job, " + jid + ", is not found");
-    }
+
+    Job job = getJobFromJobIdString(jid, appCtx);
     checkAccess(job, hsr);
     checkAccess(job, hsr);
-    TaskId taskID = MRApps.toTaskID(tid);
-    if (taskID == null) {
-      throw new NotFoundException("taskid " + tid + " not found or invalid");
-    }
-    Task task = job.getTask(taskID);
-    if (task == null) {
-      throw new NotFoundException("task not found with id " + tid);
-    }
-    TaskAttemptId attemptId = MRApps.toTaskAttemptID(attId);
-    if (attemptId == null) {
-      throw new NotFoundException("task attempt id " + attId
-          + " not found or invalid");
-    }
-    TaskAttempt ta = task.getAttempt(attemptId);
-    if (ta == null) {
-      throw new NotFoundException("Error getting info on task attempt id "
-          + attId);
-    }
+    Task task = getTaskFromTaskIdString(tid, job);
+    TaskAttempt ta = getTaskAttemptFromTaskAttemptString(attId, task);
     return new JobTaskAttemptCounterInfo(ta);
     return new JobTaskAttemptCounterInfo(ta);
   }
   }
 }
 }

+ 27 - 21
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/CountersBlock.java

@@ -18,25 +18,32 @@
 
 
 package org.apache.hadoop.mapreduce.v2.app.webapp;
 package org.apache.hadoop.mapreduce.v2.app.webapp;
 
 
-import com.google.inject.Inject;
+import static org.apache.hadoop.mapreduce.v2.app.webapp.AMParams.JOB_ID;
+import static org.apache.hadoop.mapreduce.v2.app.webapp.AMParams.TASK_ID;
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI.C_TABLE;
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI._INFO_WRAP;
+
 import java.util.Map;
 import java.util.Map;
 
 
-import org.apache.hadoop.mapreduce.v2.api.records.Counter;
-import org.apache.hadoop.mapreduce.v2.api.records.CounterGroup;
-import org.apache.hadoop.mapreduce.v2.api.records.Counters;
+import org.apache.hadoop.mapreduce.Counter;
+import org.apache.hadoop.mapreduce.CounterGroup;
+import org.apache.hadoop.mapreduce.Counters;
 import org.apache.hadoop.mapreduce.v2.api.records.JobId;
 import org.apache.hadoop.mapreduce.v2.api.records.JobId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
 import org.apache.hadoop.mapreduce.v2.app.AppContext;
 import org.apache.hadoop.mapreduce.v2.app.AppContext;
 import org.apache.hadoop.mapreduce.v2.app.job.Job;
 import org.apache.hadoop.mapreduce.v2.app.job.Job;
 import org.apache.hadoop.mapreduce.v2.app.job.Task;
 import org.apache.hadoop.mapreduce.v2.app.job.Task;
-import org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl;
 import org.apache.hadoop.mapreduce.v2.util.MRApps;
 import org.apache.hadoop.mapreduce.v2.util.MRApps;
 import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
 import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.*;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TD;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.THEAD;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TR;
 import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
 import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
 
 
-import static org.apache.hadoop.mapreduce.v2.app.webapp.AMWebApp.*;
-import static org.apache.hadoop.yarn.webapp.view.JQueryUI.*;
+import com.google.inject.Inject;
 
 
 public class CountersBlock extends HtmlBlock {
 public class CountersBlock extends HtmlBlock {
   Job job;
   Job job;
@@ -62,8 +69,7 @@ public class CountersBlock extends HtmlBlock {
       return;
       return;
     }
     }
     
     
-    if(total == null || total.getAllCounterGroups() == null || 
-        total.getAllCounterGroups().size() <= 0) {
+    if(total == null || total.getGroupNames() == null) {
       String type = $(TASK_ID);
       String type = $(TASK_ID);
       if(type == null || type.isEmpty()) {
       if(type == null || type.isEmpty()) {
         type = $(JOB_ID, "the job");
         type = $(JOB_ID, "the job");
@@ -93,9 +99,9 @@ public class CountersBlock extends HtmlBlock {
             th(".group.ui-state-default", "Counter Group").
             th(".group.ui-state-default", "Counter Group").
             th(".ui-state-default", "Counters")._()._().
             th(".ui-state-default", "Counters")._()._().
         tbody();
         tbody();
-    for (CounterGroup g : total.getAllCounterGroups().values()) {
-      CounterGroup mg = map == null ? null : map.getCounterGroup(g.getName());
-      CounterGroup rg = reduce == null ? null : reduce.getCounterGroup(g.getName());
+    for (CounterGroup g : total) {
+      CounterGroup mg = map == null ? null : map.getGroup(g.getName());
+      CounterGroup rg = reduce == null ? null : reduce.getGroup(g.getName());
       ++numGroups;
       ++numGroups;
       // This is mostly for demonstration :) Typically we'd introduced
       // This is mostly for demonstration :) Typically we'd introduced
       // a CounterGroup block to reduce the verbosity. OTOH, this
       // a CounterGroup block to reduce the verbosity. OTOH, this
@@ -116,7 +122,7 @@ public class CountersBlock extends HtmlBlock {
       TBODY<TABLE<TD<TR<TBODY<TABLE<DIV<Hamlet>>>>>>> group = groupHeadRow.
       TBODY<TABLE<TD<TR<TBODY<TABLE<DIV<Hamlet>>>>>>> group = groupHeadRow.
             th(map == null ? "Value" : "Total")._()._().
             th(map == null ? "Value" : "Total")._()._().
         tbody();
         tbody();
-      for (Counter counter : g.getAllCounters().values()) {
+      for (Counter counter : g) {
         // Ditto
         // Ditto
         TR<TBODY<TABLE<TD<TR<TBODY<TABLE<DIV<Hamlet>>>>>>>> groupRow = group.
         TR<TBODY<TABLE<TD<TR<TBODY<TABLE<DIV<Hamlet>>>>>>>> groupRow = group.
           tr();
           tr();
@@ -130,8 +136,8 @@ public class CountersBlock extends HtmlBlock {
             _();
             _();
           }
           }
         if (map != null) {
         if (map != null) {
-          Counter mc = mg == null ? null : mg.getCounter(counter.getName());
-          Counter rc = rg == null ? null : rg.getCounter(counter.getName());
+          Counter mc = mg == null ? null : mg.findCounter(counter.getName());
+          Counter rc = rg == null ? null : rg.findCounter(counter.getName());
           groupRow.
           groupRow.
             td(mc == null ? "0" : String.valueOf(mc.getValue())).
             td(mc == null ? "0" : String.valueOf(mc.getValue())).
             td(rc == null ? "0" : String.valueOf(rc.getValue()));
             td(rc == null ? "0" : String.valueOf(rc.getValue()));
@@ -173,14 +179,14 @@ public class CountersBlock extends HtmlBlock {
     }
     }
     // Get all types of counters
     // Get all types of counters
     Map<TaskId, Task> tasks = job.getTasks();
     Map<TaskId, Task> tasks = job.getTasks();
-    total = job.getCounters();
-    map = JobImpl.newCounters();
-    reduce = JobImpl.newCounters();
+    total = job.getAllCounters();
+    map = new Counters();
+    reduce = new Counters();
     for (Task t : tasks.values()) {
     for (Task t : tasks.values()) {
       Counters counters = t.getCounters();
       Counters counters = t.getCounters();
       switch (t.getType()) {
       switch (t.getType()) {
-        case MAP:     JobImpl.incrAllCounters(map, counters);     break;
-        case REDUCE:  JobImpl.incrAllCounters(reduce, counters);  break;
+        case MAP:     map.incrAllCounters(counters);     break;
+        case REDUCE:  reduce.incrAllCounters(counters);  break;
       }
       }
     }
     }
   }
   }

+ 13 - 10
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JAXBContextResolver.java

@@ -30,6 +30,8 @@ import javax.ws.rs.ext.ContextResolver;
 import javax.ws.rs.ext.Provider;
 import javax.ws.rs.ext.Provider;
 import javax.xml.bind.JAXBContext;
 import javax.xml.bind.JAXBContext;
 
 
+import org.apache.hadoop.mapreduce.v2.app.webapp.dao.AMAttemptInfo;
+import org.apache.hadoop.mapreduce.v2.app.webapp.dao.AMAttemptsInfo;
 import org.apache.hadoop.mapreduce.v2.app.webapp.dao.AppInfo;
 import org.apache.hadoop.mapreduce.v2.app.webapp.dao.AppInfo;
 import org.apache.hadoop.mapreduce.v2.app.webapp.dao.ConfInfo;
 import org.apache.hadoop.mapreduce.v2.app.webapp.dao.ConfInfo;
 import org.apache.hadoop.mapreduce.v2.app.webapp.dao.ConfEntryInfo;
 import org.apache.hadoop.mapreduce.v2.app.webapp.dao.ConfEntryInfo;
@@ -47,6 +49,7 @@ import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskCounterGroupInfo;
 import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskCounterInfo;
 import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskCounterInfo;
 import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskInfo;
 import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskInfo;
 import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TasksInfo;
 import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TasksInfo;
+import org.apache.hadoop.yarn.webapp.RemoteExceptionData;
 
 
 @Singleton
 @Singleton
 @Provider
 @Provider
@@ -54,22 +57,22 @@ public class JAXBContextResolver implements ContextResolver<JAXBContext> {
 
 
   private JAXBContext context;
   private JAXBContext context;
   private final Set<Class> types;
   private final Set<Class> types;
-    
+
   // you have to specify all the dao classes here
   // you have to specify all the dao classes here
-  private final Class[] cTypes = {AppInfo.class, CounterInfo.class,
-      JobTaskAttemptCounterInfo.class, JobTaskCounterInfo.class,
-      TaskCounterGroupInfo.class, ConfInfo.class, JobCounterInfo.class,
-      TaskCounterInfo.class, CounterGroupInfo.class, JobInfo.class, 
-      JobsInfo.class, ReduceTaskAttemptInfo.class, TaskAttemptInfo.class,
-      TaskInfo.class, TasksInfo.class, TaskAttemptsInfo.class,
-      ConfEntryInfo.class};
-    
+  private final Class[] cTypes = {AMAttemptInfo.class, AMAttemptsInfo.class,
+    AppInfo.class, CounterInfo.class, JobTaskAttemptCounterInfo.class,
+    JobTaskCounterInfo.class, TaskCounterGroupInfo.class, ConfInfo.class,
+    JobCounterInfo.class, TaskCounterInfo.class, CounterGroupInfo.class,
+    JobInfo.class, JobsInfo.class, ReduceTaskAttemptInfo.class,
+    TaskAttemptInfo.class, TaskInfo.class, TasksInfo.class,
+    TaskAttemptsInfo.class, ConfEntryInfo.class, RemoteExceptionData.class};
+
   public JAXBContextResolver() throws Exception {
   public JAXBContextResolver() throws Exception {
     this.types = new HashSet<Class>(Arrays.asList(cTypes));
     this.types = new HashSet<Class>(Arrays.asList(cTypes));
     this.context = new JSONJAXBContext(JSONConfiguration.natural().
     this.context = new JSONJAXBContext(JSONConfiguration.natural().
         rootUnwrapping(false).build(), cTypes);
         rootUnwrapping(false).build(), cTypes);
   }
   }
-    
+
   @Override
   @Override
   public JAXBContext getContext(Class<?> objectType) {
   public JAXBContext getContext(Class<?> objectType) {
     return (types.contains(objectType)) ? context : null;
     return (types.contains(objectType)) ? context : null;

+ 46 - 2
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JobBlock.java

@@ -20,6 +20,7 @@ package org.apache.hadoop.mapreduce.v2.app.webapp;
 
 
 import static org.apache.hadoop.mapreduce.v2.app.webapp.AMParams.JOB_ID;
 import static org.apache.hadoop.mapreduce.v2.app.webapp.AMParams.JOB_ID;
 import static org.apache.hadoop.yarn.util.StringHelper.join;
 import static org.apache.hadoop.yarn.util.StringHelper.join;
+import static org.apache.hadoop.yarn.util.StringHelper.ujoin;
 import static org.apache.hadoop.yarn.webapp.view.JQueryUI._EVEN;
 import static org.apache.hadoop.yarn.webapp.view.JQueryUI._EVEN;
 import static org.apache.hadoop.yarn.webapp.view.JQueryUI._INFO_WRAP;
 import static org.apache.hadoop.yarn.webapp.view.JQueryUI._INFO_WRAP;
 import static org.apache.hadoop.yarn.webapp.view.JQueryUI._ODD;
 import static org.apache.hadoop.yarn.webapp.view.JQueryUI._ODD;
@@ -28,14 +29,22 @@ import static org.apache.hadoop.yarn.webapp.view.JQueryUI._PROGRESSBAR_VALUE;
 import static org.apache.hadoop.yarn.webapp.view.JQueryUI._TH;
 import static org.apache.hadoop.yarn.webapp.view.JQueryUI._TH;
 
 
 import java.util.Date;
 import java.util.Date;
+import java.util.List;
 
 
+import org.apache.hadoop.mapreduce.v2.api.records.AMInfo;
 import org.apache.hadoop.mapreduce.v2.api.records.JobId;
 import org.apache.hadoop.mapreduce.v2.api.records.JobId;
 import org.apache.hadoop.mapreduce.v2.app.AppContext;
 import org.apache.hadoop.mapreduce.v2.app.AppContext;
 import org.apache.hadoop.mapreduce.v2.app.job.Job;
 import org.apache.hadoop.mapreduce.v2.app.job.Job;
+import org.apache.hadoop.mapreduce.v2.app.webapp.dao.AMAttemptInfo;
 import org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobInfo;
 import org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobInfo;
 import org.apache.hadoop.mapreduce.v2.util.MRApps;
 import org.apache.hadoop.mapreduce.v2.util.MRApps;
 import org.apache.hadoop.mapreduce.v2.util.MRApps.TaskAttemptStateUI;
 import org.apache.hadoop.mapreduce.v2.util.MRApps.TaskAttemptStateUI;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.util.BuilderUtils;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV;
+import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
 import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
 import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
 import org.apache.hadoop.yarn.webapp.view.InfoBlock;
 import org.apache.hadoop.yarn.webapp.view.InfoBlock;
 
 
@@ -62,6 +71,11 @@ public class JobBlock extends HtmlBlock {
         p()._("Sorry, ", jid, " not found.")._();
         p()._("Sorry, ", jid, " not found.")._();
       return;
       return;
     }
     }
+
+    List<AMInfo> amInfos = job.getAMInfos();
+    String amString =
+        amInfos.size() == 1 ? "ApplicationMaster" : "ApplicationMasters"; 
+
     JobInfo jinfo = new JobInfo(job, true);
     JobInfo jinfo = new JobInfo(job, true);
     info("Job Overview").
     info("Job Overview").
         _("Job Name:", jinfo.getName()).
         _("Job Name:", jinfo.getName()).
@@ -69,10 +83,40 @@ public class JobBlock extends HtmlBlock {
         _("Uberized:", jinfo.isUberized()).
         _("Uberized:", jinfo.isUberized()).
         _("Started:", new Date(jinfo.getStartTime())).
         _("Started:", new Date(jinfo.getStartTime())).
         _("Elapsed:", StringUtils.formatTime(jinfo.getElapsedTime()));
         _("Elapsed:", StringUtils.formatTime(jinfo.getElapsedTime()));
-    html.
+    DIV<Hamlet> div = html.
       _(InfoBlock.class).
       _(InfoBlock.class).
-      div(_INFO_WRAP).
+      div(_INFO_WRAP);
+
+    // MRAppMasters Table
+    TABLE<DIV<Hamlet>> table = div.table("#job");
+    table.
+      tr().
+      th(amString).
+      _().
+      tr().
+      th(_TH, "Attempt Number").
+      th(_TH, "Start Time").
+      th(_TH, "Node").
+      th(_TH, "Logs").
+      _();
+    for (AMInfo amInfo : amInfos) {
+      AMAttemptInfo attempt = new AMAttemptInfo(amInfo,
+          jinfo.getId(), jinfo.getUserName());
+
+      table.tr().
+        td(String.valueOf(attempt.getAttemptId())).
+        td(new Date(attempt.getStartTime()).toString()).
+        td().a(".nodelink", url("http://", attempt.getNodeHttpAddress()), 
+            attempt.getNodeHttpAddress())._().
+        td().a(".logslink", url(attempt.getLogsLink()), 
+            "logs")._().
+        _();
+    }
+
+    table._();
+    div._();
 
 
+    html.div(_INFO_WRAP).        
       // Tasks table
       // Tasks table
         table("#job").
         table("#job").
           tr().
           tr().

+ 15 - 11
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/SingleCounterBlock.java

@@ -18,13 +18,18 @@
 
 
 package org.apache.hadoop.mapreduce.v2.app.webapp;
 package org.apache.hadoop.mapreduce.v2.app.webapp;
 
 
-import com.google.inject.Inject;
+import static org.apache.hadoop.mapreduce.v2.app.webapp.AMParams.COUNTER_GROUP;
+import static org.apache.hadoop.mapreduce.v2.app.webapp.AMParams.COUNTER_NAME;
+import static org.apache.hadoop.mapreduce.v2.app.webapp.AMParams.JOB_ID;
+import static org.apache.hadoop.mapreduce.v2.app.webapp.AMParams.TASK_ID;
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI._INFO_WRAP;
+
 import java.util.Map;
 import java.util.Map;
 import java.util.TreeMap;
 import java.util.TreeMap;
 
 
-import org.apache.hadoop.mapreduce.v2.api.records.Counter;
-import org.apache.hadoop.mapreduce.v2.api.records.CounterGroup;
-import org.apache.hadoop.mapreduce.v2.api.records.Counters;
+import org.apache.hadoop.mapreduce.Counter;
+import org.apache.hadoop.mapreduce.CounterGroup;
+import org.apache.hadoop.mapreduce.Counters;
 import org.apache.hadoop.mapreduce.v2.api.records.JobId;
 import org.apache.hadoop.mapreduce.v2.api.records.JobId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
@@ -40,8 +45,7 @@ import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY;
 import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TR;
 import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TR;
 import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
 import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
 
 
-import static org.apache.hadoop.mapreduce.v2.app.webapp.AMWebApp.*;
-import static org.apache.hadoop.yarn.webapp.view.JQueryUI.*;
+import com.google.inject.Inject;
 
 
 public class SingleCounterBlock extends HtmlBlock {
 public class SingleCounterBlock extends HtmlBlock {
   protected TreeMap<String, Long> values = new TreeMap<String, Long>(); 
   protected TreeMap<String, Long> values = new TreeMap<String, Long>(); 
@@ -122,10 +126,10 @@ public class SingleCounterBlock extends HtmlBlock {
         task.getAttempts().entrySet()) {
         task.getAttempts().entrySet()) {
         long value = 0;
         long value = 0;
         Counters counters = entry.getValue().getCounters();
         Counters counters = entry.getValue().getCounters();
-        CounterGroup group = (counters != null)
-        		? counters.getCounterGroup($(COUNTER_GROUP)) : null;
+        CounterGroup group = (counters != null) ? counters
+          .getGroup($(COUNTER_GROUP)) : null;
         if(group != null)  {
         if(group != null)  {
-          Counter c = group.getCounter($(COUNTER_NAME));
+          Counter c = group.findCounter($(COUNTER_NAME));
           if(c != null) {
           if(c != null) {
             value = c.getValue();
             value = c.getValue();
           }
           }
@@ -140,9 +144,9 @@ public class SingleCounterBlock extends HtmlBlock {
     for(Map.Entry<TaskId, Task> entry : tasks.entrySet()) {
     for(Map.Entry<TaskId, Task> entry : tasks.entrySet()) {
       long value = 0;
       long value = 0;
       CounterGroup group = entry.getValue().getCounters()
       CounterGroup group = entry.getValue().getCounters()
-      .getCounterGroup($(COUNTER_GROUP));
+        .getGroup($(COUNTER_GROUP));
       if(group != null)  {
       if(group != null)  {
-        Counter c = group.getCounter($(COUNTER_NAME));
+        Counter c = group.findCounter($(COUNTER_NAME));
         if(c != null) {
         if(c != null) {
           value = c.getValue();
           value = c.getValue();
         }
         }

+ 95 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/AMAttemptInfo.java

@@ -0,0 +1,95 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.mapreduce.v2.app.webapp.dao;
+
+import static org.apache.hadoop.yarn.util.StringHelper.join;
+import static org.apache.hadoop.yarn.util.StringHelper.ujoin;
+
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlRootElement;
+
+import org.apache.hadoop.mapreduce.v2.api.records.AMInfo;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.util.BuilderUtils;
+
+@XmlRootElement(name = "jobAttempt")
+@XmlAccessorType(XmlAccessType.FIELD)
+public class AMAttemptInfo {
+
+  protected String nodeHttpAddress;
+  protected String nodeId;
+  protected int id;
+  protected long startTime;
+  protected String containerId;
+  protected String logsLink;
+
+  public AMAttemptInfo() {
+  }
+
+  public AMAttemptInfo(AMInfo amInfo, String jobId, String user) {
+
+    this.nodeHttpAddress = "";
+    this.nodeId = "";
+    String nmHost = amInfo.getNodeManagerHost();
+    int nmHttpPort = amInfo.getNodeManagerHttpPort();
+    int nmPort = amInfo.getNodeManagerPort();
+    if (nmHost != null) {
+      this.nodeHttpAddress = nmHost + ":" + nmHttpPort;
+      NodeId nodeId = BuilderUtils.newNodeId(nmHost, nmPort);
+      this.nodeId = nodeId.toString();
+    }
+
+    this.id = amInfo.getAppAttemptId().getAttemptId();
+    this.startTime = amInfo.getStartTime();
+    this.containerId = "";
+    this.logsLink = "";
+    ContainerId containerId = amInfo.getContainerId();
+    if (containerId != null) {
+      this.containerId = containerId.toString();
+      this.logsLink = join("http://" + nodeHttpAddress,
+          ujoin("node", "containerlogs", this.containerId));
+    }
+  }
+
+  public String getNodeHttpAddress() {
+    return this.nodeHttpAddress;
+  }
+
+  public String getNodeId() {
+    return this.nodeId;
+  }
+
+  public int getAttemptId() {
+    return this.id;
+  }
+
+  public long getStartTime() {
+    return this.startTime;
+  }
+
+  public String getContainerId() {
+    return this.containerId;
+  }
+
+  public String getLogsLink() {
+    return this.logsLink;
+  }
+
+}

+ 45 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/AMAttemptsInfo.java

@@ -0,0 +1,45 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by joblicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.mapreduce.v2.app.webapp.dao;
+
+import java.util.ArrayList;
+
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlRootElement;
+
+@XmlRootElement(name = "jobAttempts")
+@XmlAccessorType(XmlAccessType.FIELD)
+public class AMAttemptsInfo {
+
+  @XmlElement(name = "jobAttempt")
+  protected ArrayList<AMAttemptInfo> attempt = new ArrayList<AMAttemptInfo>();
+
+  public AMAttemptsInfo() {
+  } // JAXB needs this
+
+  public void add(AMAttemptInfo info) {
+    this.attempt.add(info);
+  }
+
+  public ArrayList<AMAttemptInfo> getAttempts() {
+    return this.attempt;
+  }
+
+}

+ 1 - 2
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/AppInfo.java

@@ -32,7 +32,6 @@ public class AppInfo {
   protected String appId;
   protected String appId;
   protected String name;
   protected String name;
   protected String user;
   protected String user;
-  protected String hostname;
   protected long startedOn;
   protected long startedOn;
   protected long elapsedTime;
   protected long elapsedTime;
 
 
@@ -44,7 +43,7 @@ public class AppInfo {
     this.name = context.getApplicationName().toString();
     this.name = context.getApplicationName().toString();
     this.user = context.getUser().toString();
     this.user = context.getUser().toString();
     this.startedOn = context.getStartTime();
     this.startedOn = context.getStartTime();
-    this.elapsedTime = Times.elapsed(context.getStartTime(), 0);
+    this.elapsedTime = Times.elapsed(this.startedOn, 0);
   }
   }
 
 
   public String getId() {
   public String getId() {

+ 1 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/ConfInfo.java

@@ -30,7 +30,7 @@ import org.apache.hadoop.fs.FileContext;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.mapreduce.v2.app.job.Job;
 import org.apache.hadoop.mapreduce.v2.app.job.Job;
 
 
-@XmlRootElement
+@XmlRootElement(name = "conf")
 @XmlAccessorType(XmlAccessType.FIELD)
 @XmlAccessorType(XmlAccessType.FIELD)
 public class ConfInfo {
 public class ConfInfo {
 
 

+ 6 - 6
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/CounterGroupInfo.java

@@ -24,8 +24,8 @@ import javax.xml.bind.annotation.XmlAccessorType;
 import javax.xml.bind.annotation.XmlElement;
 import javax.xml.bind.annotation.XmlElement;
 import javax.xml.bind.annotation.XmlRootElement;
 import javax.xml.bind.annotation.XmlRootElement;
 
 
-import org.apache.hadoop.mapreduce.v2.api.records.Counter;
-import org.apache.hadoop.mapreduce.v2.api.records.CounterGroup;
+import org.apache.hadoop.mapreduce.Counter;
+import org.apache.hadoop.mapreduce.CounterGroup;
 
 
 @XmlRootElement(name = "counterGroup")
 @XmlRootElement(name = "counterGroup")
 @XmlAccessorType(XmlAccessType.FIELD)
 @XmlAccessorType(XmlAccessType.FIELD)
@@ -38,14 +38,14 @@ public class CounterGroupInfo {
   public CounterGroupInfo() {
   public CounterGroupInfo() {
   }
   }
 
 
-  public CounterGroupInfo(String name, CounterGroup g, CounterGroup mg,
+  public CounterGroupInfo(String name, CounterGroup group, CounterGroup mg,
       CounterGroup rg) {
       CounterGroup rg) {
     this.counterGroupName = name;
     this.counterGroupName = name;
     this.counter = new ArrayList<CounterInfo>();
     this.counter = new ArrayList<CounterInfo>();
 
 
-    for (Counter c : g.getAllCounters().values()) {
-      Counter mc = mg == null ? null : mg.getCounter(c.getName());
-      Counter rc = rg == null ? null : rg.getCounter(c.getName());
+    for (Counter c : group) {
+      Counter mc = mg == null ? null : mg.findCounter(c.getName());
+      Counter rc = rg == null ? null : rg.findCounter(c.getName());
       CounterInfo cinfo = new CounterInfo(c, mc, rc);
       CounterInfo cinfo = new CounterInfo(c, mc, rc);
       this.counter.add(cinfo);
       this.counter.add(cinfo);
     }
     }

+ 5 - 5
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/CounterInfo.java

@@ -21,13 +21,13 @@ import javax.xml.bind.annotation.XmlAccessType;
 import javax.xml.bind.annotation.XmlAccessorType;
 import javax.xml.bind.annotation.XmlAccessorType;
 import javax.xml.bind.annotation.XmlRootElement;
 import javax.xml.bind.annotation.XmlRootElement;
 
 
-import org.apache.hadoop.mapreduce.v2.api.records.Counter;
+import org.apache.hadoop.mapreduce.Counter;
 
 
 @XmlRootElement
 @XmlRootElement
 @XmlAccessorType(XmlAccessType.FIELD)
 @XmlAccessorType(XmlAccessType.FIELD)
 public class CounterInfo {
 public class CounterInfo {
 
 
-  protected String counterName;
+  protected String name;
   protected long totalCounterValue;
   protected long totalCounterValue;
   protected long mapCounterValue;
   protected long mapCounterValue;
   protected long reduceCounterValue;
   protected long reduceCounterValue;
@@ -35,9 +35,9 @@ public class CounterInfo {
   public CounterInfo() {
   public CounterInfo() {
   }
   }
 
 
-  public CounterInfo(Counter counter, Counter mc, Counter rc) {
-    this.counterName = counter.getName();
-    this.totalCounterValue = counter.getValue();
+  public CounterInfo(Counter c, Counter mc, Counter rc) {
+    this.name = c.getName();
+    this.totalCounterValue = c.getValue();
     this.mapCounterValue = mc == null ? 0 : mc.getValue();
     this.mapCounterValue = mc == null ? 0 : mc.getValue();
     this.reduceCounterValue = rc == null ? 0 : rc.getValue();
     this.reduceCounterValue = rc == null ? 0 : rc.getValue();
   }
   }

+ 17 - 21
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/JobCounterInfo.java

@@ -25,13 +25,12 @@ import javax.xml.bind.annotation.XmlAccessorType;
 import javax.xml.bind.annotation.XmlRootElement;
 import javax.xml.bind.annotation.XmlRootElement;
 import javax.xml.bind.annotation.XmlTransient;
 import javax.xml.bind.annotation.XmlTransient;
 
 
-import org.apache.hadoop.mapreduce.v2.api.records.CounterGroup;
-import org.apache.hadoop.mapreduce.v2.api.records.Counters;
+import org.apache.hadoop.mapreduce.CounterGroup;
+import org.apache.hadoop.mapreduce.Counters;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
 import org.apache.hadoop.mapreduce.v2.app.AppContext;
 import org.apache.hadoop.mapreduce.v2.app.AppContext;
 import org.apache.hadoop.mapreduce.v2.app.job.Job;
 import org.apache.hadoop.mapreduce.v2.app.job.Job;
 import org.apache.hadoop.mapreduce.v2.app.job.Task;
 import org.apache.hadoop.mapreduce.v2.app.job.Task;
-import org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl;
 import org.apache.hadoop.mapreduce.v2.util.MRApps;
 import org.apache.hadoop.mapreduce.v2.util.MRApps;
 
 
 @XmlRootElement(name = "jobCounters")
 @XmlRootElement(name = "jobCounters")
@@ -46,52 +45,49 @@ public class JobCounterInfo {
   protected Counters reduce = null;
   protected Counters reduce = null;
 
 
   protected String id;
   protected String id;
-  protected ArrayList<CounterGroupInfo> counterGroups;
+  protected ArrayList<CounterGroupInfo> counterGroup;
 
 
   public JobCounterInfo() {
   public JobCounterInfo() {
   }
   }
 
 
   public JobCounterInfo(AppContext ctx, Job job) {
   public JobCounterInfo(AppContext ctx, Job job) {
     getCounters(ctx, job);
     getCounters(ctx, job);
-    counterGroups = new ArrayList<CounterGroupInfo>();
+    counterGroup = new ArrayList<CounterGroupInfo>();
     this.id = MRApps.toString(job.getID());
     this.id = MRApps.toString(job.getID());
 
 
-    int numGroups = 0;
-
     if (total != null) {
     if (total != null) {
-      for (CounterGroup g : total.getAllCounterGroups().values()) {
+      for (CounterGroup g : total) {
         if (g != null) {
         if (g != null) {
-          CounterGroup mg = map == null ? null : map.getCounterGroup(g
-              .getName());
-          CounterGroup rg = reduce == null ? null : reduce.getCounterGroup(g
-              .getName());
-          ++numGroups;
+          CounterGroup mg = map == null ? null : map.getGroup(g.getName());
+          CounterGroup rg = reduce == null ? null : reduce
+            .getGroup(g.getName());
 
 
-          CounterGroupInfo cginfo = new CounterGroupInfo(g.getName(), g, mg, rg);
-          counterGroups.add(cginfo);
+          CounterGroupInfo cginfo = new CounterGroupInfo(g.getName(), g,
+            mg, rg);
+          counterGroup.add(cginfo);
         }
         }
       }
       }
     }
     }
   }
   }
 
 
   private void getCounters(AppContext ctx, Job job) {
   private void getCounters(AppContext ctx, Job job) {
-    total = JobImpl.newCounters();
+    total = new Counters();
     if (job == null) {
     if (job == null) {
       return;
       return;
     }
     }
-    map = JobImpl.newCounters();
-    reduce = JobImpl.newCounters();
+    map = new Counters();
+    reduce = new Counters();
     // Get all types of counters
     // Get all types of counters
     Map<TaskId, Task> tasks = job.getTasks();
     Map<TaskId, Task> tasks = job.getTasks();
     for (Task t : tasks.values()) {
     for (Task t : tasks.values()) {
       Counters counters = t.getCounters();
       Counters counters = t.getCounters();
-      JobImpl.incrAllCounters(total, counters);
+      total.incrAllCounters(counters);
       switch (t.getType()) {
       switch (t.getType()) {
       case MAP:
       case MAP:
-        JobImpl.incrAllCounters(map, counters);
+        map.incrAllCounters(counters);
         break;
         break;
       case REDUCE:
       case REDUCE:
-        JobImpl.incrAllCounters(reduce, counters);
+        reduce.incrAllCounters(counters);
         break;
         break;
       }
       }
     }
     }

+ 10 - 14
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/JobInfo.java

@@ -30,6 +30,7 @@ import javax.xml.bind.annotation.XmlTransient;
 
 
 import org.apache.hadoop.mapreduce.JobACL;
 import org.apache.hadoop.mapreduce.JobACL;
 import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
 import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
+import org.apache.hadoop.mapreduce.v2.api.records.JobState;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
 import org.apache.hadoop.mapreduce.v2.app.job.Job;
 import org.apache.hadoop.mapreduce.v2.app.job.Job;
@@ -51,12 +52,12 @@ public class JobInfo {
   protected String id;
   protected String id;
   protected String name;
   protected String name;
   protected String user;
   protected String user;
-  protected String state;
+  protected JobState state;
   protected int mapsTotal;
   protected int mapsTotal;
   protected int mapsCompleted;
   protected int mapsCompleted;
-  protected float mapProgress;
   protected int reducesTotal;
   protected int reducesTotal;
   protected int reducesCompleted;
   protected int reducesCompleted;
+  protected float mapProgress;
   protected float reduceProgress;
   protected float reduceProgress;
 
 
   @XmlTransient
   @XmlTransient
@@ -83,18 +84,12 @@ public class JobInfo {
   protected int successfulMapAttempts = 0;
   protected int successfulMapAttempts = 0;
   protected ArrayList<ConfEntryInfo> acls;
   protected ArrayList<ConfEntryInfo> acls;
 
 
-  @XmlTransient
-  protected int numMaps;
-  @XmlTransient
-  protected int numReduces;
-
   public JobInfo() {
   public JobInfo() {
   }
   }
 
 
   public JobInfo(Job job, Boolean hasAccess) {
   public JobInfo(Job job, Boolean hasAccess) {
     this.id = MRApps.toString(job.getID());
     this.id = MRApps.toString(job.getID());
     JobReport report = job.getReport();
     JobReport report = job.getReport();
-    countTasksAndAttempts(job);
     this.startTime = report.getStartTime();
     this.startTime = report.getStartTime();
     this.finishTime = report.getFinishTime();
     this.finishTime = report.getFinishTime();
     this.elapsedTime = Times.elapsed(this.startTime, this.finishTime);
     this.elapsedTime = Times.elapsed(this.startTime, this.finishTime);
@@ -103,7 +98,7 @@ public class JobInfo {
     }
     }
     this.name = job.getName().toString();
     this.name = job.getName().toString();
     this.user = job.getUserName();
     this.user = job.getUserName();
-    this.state = job.getState().toString();
+    this.state = job.getState();
     this.mapsTotal = job.getTotalMaps();
     this.mapsTotal = job.getTotalMaps();
     this.mapsCompleted = job.getCompletedMaps();
     this.mapsCompleted = job.getCompletedMaps();
     this.mapProgress = report.getMapProgress() * 100;
     this.mapProgress = report.getMapProgress() * 100;
@@ -115,6 +110,9 @@ public class JobInfo {
 
 
     this.acls = new ArrayList<ConfEntryInfo>();
     this.acls = new ArrayList<ConfEntryInfo>();
     if (hasAccess) {
     if (hasAccess) {
+      this.diagnostics = "";
+      countTasksAndAttempts(job);
+
       this.uberized = job.isUber();
       this.uberized = job.isUber();
 
 
       List<String> diagnostics = job.getDiagnostics();
       List<String> diagnostics = job.getDiagnostics();
@@ -213,10 +211,10 @@ public class JobInfo {
   }
   }
 
 
   public String getState() {
   public String getState() {
-    return this.state;
+    return this.state.toString();
   }
   }
 
 
-  public String getUser() {
+  public String getUserName() {
     return this.user;
     return this.user;
   }
   }
 
 
@@ -267,13 +265,11 @@ public class JobInfo {
   /**
   /**
    * Go through a job and update the member variables with counts for
    * Go through a job and update the member variables with counts for
    * information to output in the page.
    * information to output in the page.
-   * 
+   *
    * @param job
    * @param job
    *          the job to get counts for.
    *          the job to get counts for.
    */
    */
   private void countTasksAndAttempts(Job job) {
   private void countTasksAndAttempts(Job job) {
-    numReduces = 0;
-    numMaps = 0;
     final Map<TaskId, Task> tasks = job.getTasks();
     final Map<TaskId, Task> tasks = job.getTasks();
     if (tasks == null) {
     if (tasks == null) {
       return;
       return;

+ 6 - 7
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/JobTaskAttemptCounterInfo.java

@@ -25,8 +25,8 @@ import javax.xml.bind.annotation.XmlAccessorType;
 import javax.xml.bind.annotation.XmlRootElement;
 import javax.xml.bind.annotation.XmlRootElement;
 import javax.xml.bind.annotation.XmlTransient;
 import javax.xml.bind.annotation.XmlTransient;
 
 
-import org.apache.hadoop.mapreduce.v2.api.records.CounterGroup;
-import org.apache.hadoop.mapreduce.v2.api.records.Counters;
+import org.apache.hadoop.mapreduce.CounterGroup;
+import org.apache.hadoop.mapreduce.Counters;
 import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
 import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
 import org.apache.hadoop.mapreduce.v2.util.MRApps;
 import org.apache.hadoop.mapreduce.v2.util.MRApps;
 
 
@@ -38,23 +38,22 @@ public class JobTaskAttemptCounterInfo {
   protected Counters total = null;
   protected Counters total = null;
 
 
   protected String id;
   protected String id;
-  protected ArrayList<TaskCounterGroupInfo> taskCounterGroups;
+  protected ArrayList<TaskCounterGroupInfo> taskAttemptCounterGroup;
 
 
   public JobTaskAttemptCounterInfo() {
   public JobTaskAttemptCounterInfo() {
   }
   }
 
 
   public JobTaskAttemptCounterInfo(TaskAttempt taskattempt) {
   public JobTaskAttemptCounterInfo(TaskAttempt taskattempt) {
 
 
-    long value = 0;
     this.id = MRApps.toString(taskattempt.getID());
     this.id = MRApps.toString(taskattempt.getID());
     total = taskattempt.getCounters();
     total = taskattempt.getCounters();
-    taskCounterGroups = new ArrayList<TaskCounterGroupInfo>();
+    taskAttemptCounterGroup = new ArrayList<TaskCounterGroupInfo>();
     if (total != null) {
     if (total != null) {
-      for (CounterGroup g : total.getAllCounterGroups().values()) {
+      for (CounterGroup g : total) {
         if (g != null) {
         if (g != null) {
           TaskCounterGroupInfo cginfo = new TaskCounterGroupInfo(g.getName(), g);
           TaskCounterGroupInfo cginfo = new TaskCounterGroupInfo(g.getName(), g);
           if (cginfo != null) {
           if (cginfo != null) {
-            taskCounterGroups.add(cginfo);
+            taskAttemptCounterGroup.add(cginfo);
           }
           }
         }
         }
       }
       }

+ 6 - 6
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/JobTaskCounterInfo.java

@@ -25,8 +25,8 @@ import javax.xml.bind.annotation.XmlAccessorType;
 import javax.xml.bind.annotation.XmlRootElement;
 import javax.xml.bind.annotation.XmlRootElement;
 import javax.xml.bind.annotation.XmlTransient;
 import javax.xml.bind.annotation.XmlTransient;
 
 
-import org.apache.hadoop.mapreduce.v2.api.records.CounterGroup;
-import org.apache.hadoop.mapreduce.v2.api.records.Counters;
+import org.apache.hadoop.mapreduce.CounterGroup;
+import org.apache.hadoop.mapreduce.Counters;
 import org.apache.hadoop.mapreduce.v2.app.job.Task;
 import org.apache.hadoop.mapreduce.v2.app.job.Task;
 import org.apache.hadoop.mapreduce.v2.util.MRApps;
 import org.apache.hadoop.mapreduce.v2.util.MRApps;
 
 
@@ -38,7 +38,7 @@ public class JobTaskCounterInfo {
   protected Counters total = null;
   protected Counters total = null;
 
 
   protected String id;
   protected String id;
-  protected ArrayList<TaskCounterGroupInfo> taskCounterGroups;
+  protected ArrayList<TaskCounterGroupInfo> taskCounterGroup;
 
 
   public JobTaskCounterInfo() {
   public JobTaskCounterInfo() {
   }
   }
@@ -46,12 +46,12 @@ public class JobTaskCounterInfo {
   public JobTaskCounterInfo(Task task) {
   public JobTaskCounterInfo(Task task) {
     total = task.getCounters();
     total = task.getCounters();
     this.id = MRApps.toString(task.getID());
     this.id = MRApps.toString(task.getID());
-    taskCounterGroups = new ArrayList<TaskCounterGroupInfo>();
+    taskCounterGroup = new ArrayList<TaskCounterGroupInfo>();
     if (total != null) {
     if (total != null) {
-      for (CounterGroup g : total.getAllCounterGroups().values()) {
+      for (CounterGroup g : total) {
         if (g != null) {
         if (g != null) {
           TaskCounterGroupInfo cginfo = new TaskCounterGroupInfo(g.getName(), g);
           TaskCounterGroupInfo cginfo = new TaskCounterGroupInfo(g.getName(), g);
-          taskCounterGroups.add(cginfo);
+          taskCounterGroup.add(cginfo);
         }
         }
       }
       }
     }
     }

+ 4 - 3
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/TaskAttemptInfo.java

@@ -25,6 +25,7 @@ import javax.xml.bind.annotation.XmlRootElement;
 import javax.xml.bind.annotation.XmlSeeAlso;
 import javax.xml.bind.annotation.XmlSeeAlso;
 import javax.xml.bind.annotation.XmlTransient;
 import javax.xml.bind.annotation.XmlTransient;
 
 
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
 import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
 import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
 import org.apache.hadoop.mapreduce.v2.util.MRApps;
 import org.apache.hadoop.mapreduce.v2.util.MRApps;
@@ -43,7 +44,7 @@ public class TaskAttemptInfo {
   protected float progress;
   protected float progress;
   protected String id;
   protected String id;
   protected String rack;
   protected String rack;
-  protected String state;
+  protected TaskAttemptState state;
   protected String nodeHttpAddress;
   protected String nodeHttpAddress;
   protected String diagnostics;
   protected String diagnostics;
   protected String type;
   protected String type;
@@ -69,7 +70,7 @@ public class TaskAttemptInfo {
         .getAssignedContainerID());
         .getAssignedContainerID());
     this.assignedContainer = ta.getAssignedContainerID();
     this.assignedContainer = ta.getAssignedContainerID();
     this.progress = ta.getProgress() * 100;
     this.progress = ta.getProgress() * 100;
-    this.state = ta.getState().toString();
+    this.state = ta.getState();
     this.elapsedTime = Times
     this.elapsedTime = Times
         .elapsed(this.startTime, this.finishTime, isRunning);
         .elapsed(this.startTime, this.finishTime, isRunning);
     if (this.elapsedTime == -1) {
     if (this.elapsedTime == -1) {
@@ -95,7 +96,7 @@ public class TaskAttemptInfo {
   }
   }
 
 
   public String getState() {
   public String getState() {
-    return this.state;
+    return this.state.toString();
   }
   }
 
 
   public String getId() {
   public String getId() {

+ 4 - 4
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/TaskAttemptsInfo.java

@@ -23,21 +23,21 @@ import javax.xml.bind.annotation.XmlAccessType;
 import javax.xml.bind.annotation.XmlAccessorType;
 import javax.xml.bind.annotation.XmlAccessorType;
 import javax.xml.bind.annotation.XmlRootElement;
 import javax.xml.bind.annotation.XmlRootElement;
 
 
-@XmlRootElement(name = "taskattempts")
+@XmlRootElement(name = "taskAttempts")
 @XmlAccessorType(XmlAccessType.FIELD)
 @XmlAccessorType(XmlAccessType.FIELD)
 public class TaskAttemptsInfo {
 public class TaskAttemptsInfo {
 
 
-  protected ArrayList<TaskAttemptInfo> taskattempt = new ArrayList<TaskAttemptInfo>();
+  protected ArrayList<TaskAttemptInfo> taskAttempt = new ArrayList<TaskAttemptInfo>();
 
 
   public TaskAttemptsInfo() {
   public TaskAttemptsInfo() {
   } // JAXB needs this
   } // JAXB needs this
 
 
   public void add(TaskAttemptInfo taskattemptInfo) {
   public void add(TaskAttemptInfo taskattemptInfo) {
-    taskattempt.add(taskattemptInfo);
+    taskAttempt.add(taskattemptInfo);
   }
   }
 
 
   public ArrayList<TaskAttemptInfo> getTaskAttempts() {
   public ArrayList<TaskAttemptInfo> getTaskAttempts() {
-    return taskattempt;
+    return taskAttempt;
   }
   }
 
 
 }
 }

+ 4 - 4
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/TaskCounterGroupInfo.java

@@ -24,8 +24,8 @@ import javax.xml.bind.annotation.XmlAccessType;
 import javax.xml.bind.annotation.XmlAccessorType;
 import javax.xml.bind.annotation.XmlAccessorType;
 import javax.xml.bind.annotation.XmlRootElement;
 import javax.xml.bind.annotation.XmlRootElement;
 
 
-import org.apache.hadoop.mapreduce.v2.api.records.Counter;
-import org.apache.hadoop.mapreduce.v2.api.records.CounterGroup;
+import org.apache.hadoop.mapreduce.Counter;
+import org.apache.hadoop.mapreduce.CounterGroup;
 
 
 @XmlRootElement
 @XmlRootElement
 @XmlAccessorType(XmlAccessType.FIELD)
 @XmlAccessorType(XmlAccessType.FIELD)
@@ -37,11 +37,11 @@ public class TaskCounterGroupInfo {
   public TaskCounterGroupInfo() {
   public TaskCounterGroupInfo() {
   }
   }
 
 
-  public TaskCounterGroupInfo(String name, CounterGroup g) {
+  public TaskCounterGroupInfo(String name, CounterGroup group) {
     this.counterGroupName = name;
     this.counterGroupName = name;
     this.counter = new ArrayList<TaskCounterInfo>();
     this.counter = new ArrayList<TaskCounterInfo>();
 
 
-    for (Counter c : g.getAllCounters().values()) {
+    for (Counter c : group) {
       TaskCounterInfo cinfo = new TaskCounterInfo(c.getName(), c.getValue());
       TaskCounterInfo cinfo = new TaskCounterInfo(c.getName(), c.getValue());
       this.counter.add(cinfo);
       this.counter.add(cinfo);
     }
     }

+ 4 - 3
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/dao/TaskInfo.java

@@ -24,6 +24,7 @@ import javax.xml.bind.annotation.XmlTransient;
 
 
 import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskReport;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskReport;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
 import org.apache.hadoop.mapreduce.v2.app.job.Task;
 import org.apache.hadoop.mapreduce.v2.app.job.Task;
 import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
 import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
@@ -39,7 +40,7 @@ public class TaskInfo {
   protected long elapsedTime;
   protected long elapsedTime;
   protected float progress;
   protected float progress;
   protected String id;
   protected String id;
-  protected String state;
+  protected TaskState state;
   protected String type;
   protected String type;
   protected String successfulAttempt;
   protected String successfulAttempt;
 
 
@@ -62,7 +63,7 @@ public class TaskInfo {
     if (this.elapsedTime == -1) {
     if (this.elapsedTime == -1) {
       this.elapsedTime = 0;
       this.elapsedTime = 0;
     }
     }
-    this.state = report.getTaskState().toString();
+    this.state = report.getTaskState();
     this.progress = report.getProgress() * 100;
     this.progress = report.getProgress() * 100;
     this.id = MRApps.toString(task.getID());
     this.id = MRApps.toString(task.getID());
     this.taskNum = task.getID().getId();
     this.taskNum = task.getID().getId();
@@ -79,7 +80,7 @@ public class TaskInfo {
   }
   }
 
 
   public String getState() {
   public String getState() {
-    return this.state;
+    return this.state.toString();
   }
   }
 
 
   public String getId() {
   public String getId() {

+ 6 - 5
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapred/TestTaskAttemptListenerImpl.java

@@ -19,6 +19,7 @@ package org.apache.hadoop.mapred;
 
 
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.assertTrue;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.verify;
 import static org.mockito.Mockito.verify;
@@ -43,7 +44,7 @@ public class TestTaskAttemptListenerImpl {
     }
     }
     
     
     @Override
     @Override
-    protected void registerHeartbeatHandler() {
+    protected void registerHeartbeatHandler(Configuration conf) {
       //Empty
       //Empty
     }
     }
 
 
@@ -79,21 +80,21 @@ public class TestTaskAttemptListenerImpl {
     assertNotNull(result);
     assertNotNull(result);
     assertTrue(result.shouldDie);
     assertTrue(result.shouldDie);
 
 
-    // Verify ask after registration but before launch
+    // Verify ask after registration but before launch. 
+    // Don't kill, should be null.
     TaskAttemptId attemptID = mock(TaskAttemptId.class);
     TaskAttemptId attemptID = mock(TaskAttemptId.class);
     Task task = mock(Task.class);
     Task task = mock(Task.class);
     //Now put a task with the ID
     //Now put a task with the ID
     listener.registerPendingTask(task, wid);
     listener.registerPendingTask(task, wid);
     result = listener.getTask(context);
     result = listener.getTask(context);
-    assertNotNull(result);
-    assertFalse(result.shouldDie);
+    assertNull(result);
     // Unregister for more testing.
     // Unregister for more testing.
     listener.unregister(attemptID, wid);
     listener.unregister(attemptID, wid);
 
 
     // Verify ask after registration and launch
     // Verify ask after registration and launch
     //Now put a task with the ID
     //Now put a task with the ID
     listener.registerPendingTask(task, wid);
     listener.registerPendingTask(task, wid);
-    listener.registerLaunchedTask(attemptID);
+    listener.registerLaunchedTask(attemptID, wid);
     verify(hbHandler).register(attemptID);
     verify(hbHandler).register(attemptID);
     result = listener.getTask(context);
     result = listener.getTask(context);
     assertNotNull(result);
     assertNotNull(result);

+ 310 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java

@@ -0,0 +1,310 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.mapreduce.jobhistory;
+
+import static org.mockito.Matchers.any;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+import java.io.File;
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapreduce.Counters;
+import org.apache.hadoop.mapreduce.MRJobConfig;
+import org.apache.hadoop.mapreduce.TaskID;
+import org.apache.hadoop.mapreduce.TaskType;
+import org.apache.hadoop.mapreduce.TypeConverter;
+import org.apache.hadoop.mapreduce.v2.api.records.JobId;
+import org.apache.hadoop.mapreduce.v2.app.AppContext;
+import org.apache.hadoop.mapreduce.v2.app.job.Job;
+import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils;
+import org.apache.hadoop.yarn.YarnException;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.util.BuilderUtils;
+import org.junit.Test;
+
+public class TestJobHistoryEventHandler {
+
+
+  private static final Log LOG = LogFactory
+      .getLog(TestJobHistoryEventHandler.class);
+
+  @Test
+  public void testFirstFlushOnCompletionEvent() throws Exception {
+    TestParams t = new TestParams();
+    Configuration conf = new Configuration();
+    conf.set(MRJobConfig.MR_AM_STAGING_DIR, t.workDir);
+    conf.setLong(MRJobConfig.MR_AM_HISTORY_COMPLETE_EVENT_FLUSH_TIMEOUT_MS,
+        60 * 1000l);
+    conf.setInt(MRJobConfig.MR_AM_HISTORY_JOB_COMPLETE_UNFLUSHED_MULTIPLIER, 10);
+    conf.setInt(MRJobConfig.MR_AM_HISTORY_MAX_UNFLUSHED_COMPLETE_EVENTS, 10);
+    conf.setInt(
+        MRJobConfig.MR_AM_HISTORY_USE_BATCHED_FLUSH_QUEUE_SIZE_THRESHOLD, 200);
+
+    JHEvenHandlerForTest realJheh =
+        new JHEvenHandlerForTest(t.mockAppContext, 0);
+    JHEvenHandlerForTest jheh = spy(realJheh);
+    jheh.init(conf);
+
+    EventWriter mockWriter = null;
+    try {
+      jheh.start();
+      handleEvent(jheh, new JobHistoryEvent(t.jobId, new AMStartedEvent(
+          t.appAttemptId, 200, t.containerId, "nmhost", 3000, 4000)));
+      mockWriter = jheh.getEventWriter();
+      verify(mockWriter).write(any(HistoryEvent.class));
+
+      for (int i = 0; i < 100; i++) {
+        queueEvent(jheh, new JobHistoryEvent(t.jobId, new TaskStartedEvent(
+            t.taskID, 0, TaskType.MAP, "")));
+      }
+      handleNextNEvents(jheh, 100);
+      verify(mockWriter, times(0)).flush();
+
+      // First completion event, but min-queue-size for batching flushes is 10
+      handleEvent(jheh, new JobHistoryEvent(t.jobId, new TaskFinishedEvent(
+          t.taskID, 0, TaskType.MAP, "", null)));
+      verify(mockWriter).flush();
+
+    } finally {
+      jheh.stop();
+      verify(mockWriter).close();
+    }
+  }
+
+  @Test
+  public void testMaxUnflushedCompletionEvents() throws Exception {
+    TestParams t = new TestParams();
+    Configuration conf = new Configuration();
+    conf.set(MRJobConfig.MR_AM_STAGING_DIR, t.workDir);
+    conf.setLong(MRJobConfig.MR_AM_HISTORY_COMPLETE_EVENT_FLUSH_TIMEOUT_MS,
+        60 * 1000l);
+    conf.setInt(MRJobConfig.MR_AM_HISTORY_JOB_COMPLETE_UNFLUSHED_MULTIPLIER, 10);
+    conf.setInt(MRJobConfig.MR_AM_HISTORY_MAX_UNFLUSHED_COMPLETE_EVENTS, 10);
+    conf.setInt(
+        MRJobConfig.MR_AM_HISTORY_USE_BATCHED_FLUSH_QUEUE_SIZE_THRESHOLD, 5);
+
+    JHEvenHandlerForTest realJheh =
+        new JHEvenHandlerForTest(t.mockAppContext, 0);
+    JHEvenHandlerForTest jheh = spy(realJheh);
+    jheh.init(conf);
+
+    EventWriter mockWriter = null;
+    try {
+      jheh.start();
+      handleEvent(jheh, new JobHistoryEvent(t.jobId, new AMStartedEvent(
+          t.appAttemptId, 200, t.containerId, "nmhost", 3000, 4000)));
+      mockWriter = jheh.getEventWriter();
+      verify(mockWriter).write(any(HistoryEvent.class));
+
+      for (int i = 0 ; i < 100 ; i++) {
+        queueEvent(jheh, new JobHistoryEvent(t.jobId, new TaskFinishedEvent(
+            t.taskID, 0, TaskType.MAP, "", null)));
+      }
+
+      handleNextNEvents(jheh, 9);
+      verify(mockWriter, times(0)).flush();
+
+      handleNextNEvents(jheh, 1);
+      verify(mockWriter).flush();
+      
+      handleNextNEvents(jheh, 50);
+      verify(mockWriter, times(6)).flush();
+      
+    } finally {
+      jheh.stop();
+      verify(mockWriter).close();
+    }
+  }
+  
+  @Test
+  public void testUnflushedTimer() throws Exception {
+    TestParams t = new TestParams();
+    Configuration conf = new Configuration();
+    conf.set(MRJobConfig.MR_AM_STAGING_DIR, t.workDir);
+    conf.setLong(MRJobConfig.MR_AM_HISTORY_COMPLETE_EVENT_FLUSH_TIMEOUT_MS,
+        2 * 1000l); //2 seconds.
+    conf.setInt(MRJobConfig.MR_AM_HISTORY_JOB_COMPLETE_UNFLUSHED_MULTIPLIER, 10);
+    conf.setInt(MRJobConfig.MR_AM_HISTORY_MAX_UNFLUSHED_COMPLETE_EVENTS, 100);
+    conf.setInt(
+        MRJobConfig.MR_AM_HISTORY_USE_BATCHED_FLUSH_QUEUE_SIZE_THRESHOLD, 5);
+
+    JHEvenHandlerForTest realJheh =
+        new JHEvenHandlerForTest(t.mockAppContext, 0);
+    JHEvenHandlerForTest jheh = spy(realJheh);
+    jheh.init(conf);
+
+    EventWriter mockWriter = null;
+    try {
+      jheh.start();
+      handleEvent(jheh, new JobHistoryEvent(t.jobId, new AMStartedEvent(
+          t.appAttemptId, 200, t.containerId, "nmhost", 3000, 4000)));
+      mockWriter = jheh.getEventWriter();
+      verify(mockWriter).write(any(HistoryEvent.class));
+
+      for (int i = 0 ; i < 100 ; i++) {
+        queueEvent(jheh, new JobHistoryEvent(t.jobId, new TaskFinishedEvent(
+            t.taskID, 0, TaskType.MAP, "", null)));
+      }
+
+      handleNextNEvents(jheh, 9);
+      verify(mockWriter, times(0)).flush();
+
+      Thread.sleep(2 * 4 * 1000l); // 4 seconds should be enough. Just be safe.
+      verify(mockWriter).flush();
+    } finally {
+      jheh.stop();
+      verify(mockWriter).close();
+    }
+  }
+  
+  @Test
+  public void testBatchedFlushJobEndMultiplier() throws Exception {
+    TestParams t = new TestParams();
+    Configuration conf = new Configuration();
+    conf.set(MRJobConfig.MR_AM_STAGING_DIR, t.workDir);
+    conf.setLong(MRJobConfig.MR_AM_HISTORY_COMPLETE_EVENT_FLUSH_TIMEOUT_MS,
+        60 * 1000l); //2 seconds.
+    conf.setInt(MRJobConfig.MR_AM_HISTORY_JOB_COMPLETE_UNFLUSHED_MULTIPLIER, 3);
+    conf.setInt(MRJobConfig.MR_AM_HISTORY_MAX_UNFLUSHED_COMPLETE_EVENTS, 10);
+    conf.setInt(
+        MRJobConfig.MR_AM_HISTORY_USE_BATCHED_FLUSH_QUEUE_SIZE_THRESHOLD, 0);
+
+    JHEvenHandlerForTest realJheh =
+        new JHEvenHandlerForTest(t.mockAppContext, 0);
+    JHEvenHandlerForTest jheh = spy(realJheh);
+    jheh.init(conf);
+
+    EventWriter mockWriter = null;
+    try {
+      jheh.start();
+      handleEvent(jheh, new JobHistoryEvent(t.jobId, new AMStartedEvent(
+          t.appAttemptId, 200, t.containerId, "nmhost", 3000, 4000)));
+      mockWriter = jheh.getEventWriter();
+      verify(mockWriter).write(any(HistoryEvent.class));
+
+      for (int i = 0 ; i < 100 ; i++) {
+        queueEvent(jheh, new JobHistoryEvent(t.jobId, new TaskFinishedEvent(
+            t.taskID, 0, TaskType.MAP, "", null)));
+      }
+      queueEvent(jheh, new JobHistoryEvent(t.jobId, new JobFinishedEvent(
+          TypeConverter.fromYarn(t.jobId), 0, 10, 10, 0, 0, null, null, new Counters())));
+
+      handleNextNEvents(jheh, 29);
+      verify(mockWriter, times(0)).flush();
+
+      handleNextNEvents(jheh, 72);
+      verify(mockWriter, times(4)).flush(); //3 * 30 + 1 for JobFinished
+    } finally {
+      jheh.stop();
+      verify(mockWriter).close();
+    }
+  }
+
+  private void queueEvent(JHEvenHandlerForTest jheh, JobHistoryEvent event) {
+    jheh.handle(event);
+  }
+
+  private void handleEvent(JHEvenHandlerForTest jheh, JobHistoryEvent event)
+      throws InterruptedException {
+    jheh.handle(event);
+    jheh.handleEvent(jheh.eventQueue.take());
+  }
+
+  private void handleNextNEvents(JHEvenHandlerForTest jheh, int numEvents)
+      throws InterruptedException {
+    for (int i = 0; i < numEvents; i++) {
+      jheh.handleEvent(jheh.eventQueue.take());
+    }
+  }
+
+  private String setupTestWorkDir() {
+    File testWorkDir = new File("target", this.getClass().getCanonicalName());
+    try {
+      FileContext.getLocalFSFileContext().delete(
+          new Path(testWorkDir.getAbsolutePath()), true);
+      return testWorkDir.getAbsolutePath();
+    } catch (Exception e) {
+      LOG.warn("Could not cleanup", e);
+      throw new YarnException("could not cleanup test dir", e);
+    }
+  }
+
+  private AppContext mockAppContext(JobId jobId) {
+    AppContext mockContext = mock(AppContext.class);
+    Job mockJob = mock(Job.class);
+    when(mockJob.getTotalMaps()).thenReturn(10);
+    when(mockJob.getTotalReduces()).thenReturn(10);
+    when(mockJob.getName()).thenReturn("mockjob");
+    when(mockContext.getJob(jobId)).thenReturn(mockJob);
+    return mockContext;
+  }
+  
+
+  private class TestParams {
+    String workDir = setupTestWorkDir();
+    ApplicationId appId = BuilderUtils.newApplicationId(200, 1);
+    ApplicationAttemptId appAttemptId =
+        BuilderUtils.newApplicationAttemptId(appId, 1);
+    ContainerId containerId = BuilderUtils.newContainerId(appAttemptId, 1);
+    TaskID taskID = TaskID.forName("task_200707121733_0003_m_000005");
+    JobId jobId = MRBuilderUtils.newJobId(appId, 1);
+    AppContext mockAppContext = mockAppContext(jobId);
+  }
+}
+
+class JHEvenHandlerForTest extends JobHistoryEventHandler {
+
+  private EventWriter eventWriter;
+  volatile int handleEventCompleteCalls = 0;
+  volatile int handleEventStartedCalls = 0;
+
+  public JHEvenHandlerForTest(AppContext context, int startCount) {
+    super(context, startCount);
+  }
+
+  @Override
+  public void start() {
+  }
+  
+  @Override
+  protected EventWriter createEventWriter(Path historyFilePath)
+      throws IOException {
+    this.eventWriter = mock(EventWriter.class);
+    return this.eventWriter;
+  }
+
+  @Override
+  protected void closeEventWriter(JobId jobId) {
+  }
+  
+  public EventWriter getEventWriter() {
+    return this.eventWriter;
+  }
+}

+ 4 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java

@@ -324,7 +324,9 @@ public class MRApp extends MRAppMaster {
         return NetUtils.createSocketAddr("localhost:54321");
         return NetUtils.createSocketAddr("localhost:54321");
       }
       }
       @Override
       @Override
-      public void registerLaunchedTask(TaskAttemptId attemptID) {}
+      public void registerLaunchedTask(TaskAttemptId attemptID,
+          WrappedJvmID jvmID) {
+      }
       @Override
       @Override
       public void unregister(TaskAttemptId attemptID, WrappedJvmID jvmID) {
       public void unregister(TaskAttemptId attemptID, WrappedJvmID jvmID) {
       }
       }
@@ -463,6 +465,7 @@ public class MRApp extends MRAppMaster {
       return localStateMachine;
       return localStateMachine;
     }
     }
 
 
+    @SuppressWarnings("rawtypes")
     public TestJob(JobId jobId, ApplicationAttemptId applicationAttemptId,
     public TestJob(JobId jobId, ApplicationAttemptId applicationAttemptId,
         Configuration conf, EventHandler eventHandler,
         Configuration conf, EventHandler eventHandler,
         TaskAttemptListener taskAttemptListener, Clock clock,
         TaskAttemptListener taskAttemptListener, Clock clock,

+ 121 - 91
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockJobs.java

@@ -1,41 +1,43 @@
 /**
 /**
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-*/
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 
 
 package org.apache.hadoop.mapreduce.v2.app;
 package org.apache.hadoop.mapreduce.v2.app;
 
 
-import com.google.common.collect.Iterators;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
 import java.util.Collection;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.Collections;
+import java.util.HashMap;
 import java.util.Iterator;
 import java.util.Iterator;
 import java.util.LinkedList;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.List;
 import java.util.Map;
 import java.util.Map;
 
 
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapred.JobACLsManager;
 import org.apache.hadoop.mapred.ShuffleHandler;
 import org.apache.hadoop.mapred.ShuffleHandler;
+import org.apache.hadoop.mapreduce.Counters;
 import org.apache.hadoop.mapreduce.FileSystemCounter;
 import org.apache.hadoop.mapreduce.FileSystemCounter;
 import org.apache.hadoop.mapreduce.JobACL;
 import org.apache.hadoop.mapreduce.JobACL;
 import org.apache.hadoop.mapreduce.JobCounter;
 import org.apache.hadoop.mapreduce.JobCounter;
+import org.apache.hadoop.mapreduce.MRConfig;
 import org.apache.hadoop.mapreduce.TaskCounter;
 import org.apache.hadoop.mapreduce.TaskCounter;
+import org.apache.hadoop.mapreduce.TypeConverter;
 import org.apache.hadoop.mapreduce.v2.api.records.AMInfo;
 import org.apache.hadoop.mapreduce.v2.api.records.AMInfo;
-import org.apache.hadoop.mapreduce.v2.api.records.Counters;
 import org.apache.hadoop.mapreduce.v2.api.records.JobId;
 import org.apache.hadoop.mapreduce.v2.api.records.JobId;
 import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
 import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
 import org.apache.hadoop.mapreduce.v2.api.records.JobState;
 import org.apache.hadoop.mapreduce.v2.api.records.JobState;
@@ -48,7 +50,6 @@ import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskReport;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskReport;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
-import org.apache.hadoop.mapreduce.TypeConverter;
 import org.apache.hadoop.mapreduce.v2.app.job.Job;
 import org.apache.hadoop.mapreduce.v2.app.job.Job;
 import org.apache.hadoop.mapreduce.v2.app.job.Task;
 import org.apache.hadoop.mapreduce.v2.app.job.Task;
 import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
 import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
@@ -63,33 +64,38 @@ import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.util.BuilderUtils;
 import org.apache.hadoop.yarn.util.BuilderUtils;
 import org.apache.hadoop.yarn.util.Records;
 import org.apache.hadoop.yarn.util.Records;
 
 
+import com.google.common.collect.Iterators;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+
 public class MockJobs extends MockApps {
 public class MockJobs extends MockApps {
-  static final Iterator<JobState> JOB_STATES = Iterators.cycle(
-      JobState.values());
-  static final Iterator<TaskState> TASK_STATES = Iterators.cycle(
-      TaskState.values());
-  static final Iterator<TaskAttemptState> TASK_ATTEMPT_STATES = Iterators.cycle(
-      TaskAttemptState.values());
-  static final Iterator<TaskType> TASK_TYPES = Iterators.cycle(
-      TaskType.values());
-  static final Iterator<JobCounter> JOB_COUNTERS = Iterators.cycle(
-      JobCounter.values());
-  static final Iterator<FileSystemCounter> FS_COUNTERS = Iterators.cycle(
-      FileSystemCounter.values());
-  static final Iterator<TaskCounter> TASK_COUNTERS = Iterators.cycle(
-      TaskCounter.values());
+  static final Iterator<JobState> JOB_STATES = Iterators.cycle(JobState
+      .values());
+  static final Iterator<TaskState> TASK_STATES = Iterators.cycle(TaskState
+      .values());
+  static final Iterator<TaskAttemptState> TASK_ATTEMPT_STATES = Iterators
+      .cycle(TaskAttemptState.values());
+  static final Iterator<TaskType> TASK_TYPES = Iterators.cycle(TaskType
+      .values());
+  static final Iterator<JobCounter> JOB_COUNTERS = Iterators.cycle(JobCounter
+      .values());
+  static final Iterator<FileSystemCounter> FS_COUNTERS = Iterators
+      .cycle(FileSystemCounter.values());
+  static final Iterator<TaskCounter> TASK_COUNTERS = Iterators
+      .cycle(TaskCounter.values());
   static final Iterator<String> FS_SCHEMES = Iterators.cycle("FILE", "HDFS",
   static final Iterator<String> FS_SCHEMES = Iterators.cycle("FILE", "HDFS",
       "LAFS", "CEPH");
       "LAFS", "CEPH");
-  static final Iterator<String> USER_COUNTER_GROUPS = Iterators.cycle(
-      "com.company.project.subproject.component.subcomponent.UserDefinedSpecificSpecialTask$Counters",
-      "PigCounters");
-  static final Iterator<String> USER_COUNTERS = Iterators.cycle(
-      "counter1", "counter2", "counter3");
+  static final Iterator<String> USER_COUNTER_GROUPS = Iterators
+      .cycle(
+          "com.company.project.subproject.component.subcomponent.UserDefinedSpecificSpecialTask$Counters",
+          "PigCounters");
+  static final Iterator<String> USER_COUNTERS = Iterators.cycle("counter1",
+      "counter2", "counter3");
   static final Iterator<Phase> PHASES = Iterators.cycle(Phase.values());
   static final Iterator<Phase> PHASES = Iterators.cycle(Phase.values());
   static final Iterator<String> DIAGS = Iterators.cycle(
   static final Iterator<String> DIAGS = Iterators.cycle(
       "Error: java.lang.OutOfMemoryError: Java heap space",
       "Error: java.lang.OutOfMemoryError: Java heap space",
       "Lost task tracker: tasktracker.domain/127.0.0.1:40879");
       "Lost task tracker: tasktracker.domain/127.0.0.1:40879");
-  
+
   public static final String NM_HOST = "localhost";
   public static final String NM_HOST = "localhost";
   public static final int NM_PORT = 1234;
   public static final int NM_PORT = 1234;
   public static final int NM_HTTP_PORT = 9999;
   public static final int NM_HTTP_PORT = 9999;
@@ -101,8 +107,7 @@ public class MockJobs extends MockApps {
   }
   }
 
 
   public static Map<JobId, Job> newJobs(ApplicationId appID, int numJobsPerApp,
   public static Map<JobId, Job> newJobs(ApplicationId appID, int numJobsPerApp,
-                                        int numTasksPerJob,
-                                        int numAttemptsPerTask) {
+      int numTasksPerJob, int numAttemptsPerTask) {
     Map<JobId, Job> map = Maps.newHashMap();
     Map<JobId, Job> map = Maps.newHashMap();
     for (int j = 0; j < numJobsPerApp; ++j) {
     for (int j = 0; j < numJobsPerApp; ++j) {
       Job job = newJob(appID, j, numTasksPerJob, numAttemptsPerTask);
       Job job = newJob(appID, j, numTasksPerJob, numAttemptsPerTask);
@@ -121,10 +126,12 @@ public class MockJobs extends MockApps {
   public static JobReport newJobReport(JobId id) {
   public static JobReport newJobReport(JobId id) {
     JobReport report = Records.newRecord(JobReport.class);
     JobReport report = Records.newRecord(JobReport.class);
     report.setJobId(id);
     report.setJobId(id);
-    report.setStartTime(System.currentTimeMillis() - (int)(Math.random() * DT));
-    report.setFinishTime(System.currentTimeMillis() + (int)(Math.random() * DT) + 1);
-    report.setMapProgress((float)Math.random());
-    report.setReduceProgress((float)Math.random());
+    report
+        .setStartTime(System.currentTimeMillis() - (int) (Math.random() * DT));
+    report.setFinishTime(System.currentTimeMillis()
+        + (int) (Math.random() * DT) + 1);
+    report.setMapProgress((float) Math.random());
+    report.setReduceProgress((float) Math.random());
     report.setJobState(JOB_STATES.next());
     report.setJobState(JOB_STATES.next());
     return report;
     return report;
   }
   }
@@ -132,10 +139,12 @@ public class MockJobs extends MockApps {
   public static TaskReport newTaskReport(TaskId id) {
   public static TaskReport newTaskReport(TaskId id) {
     TaskReport report = Records.newRecord(TaskReport.class);
     TaskReport report = Records.newRecord(TaskReport.class);
     report.setTaskId(id);
     report.setTaskId(id);
-    report.setStartTime(System.currentTimeMillis() - (int)(Math.random() * DT));
-    report.setFinishTime(System.currentTimeMillis() + (int)(Math.random() * DT) + 1);
-    report.setProgress((float)Math.random());
-    report.setCounters(newCounters());
+    report
+        .setStartTime(System.currentTimeMillis() - (int) (Math.random() * DT));
+    report.setFinishTime(System.currentTimeMillis()
+        + (int) (Math.random() * DT) + 1);
+    report.setProgress((float) Math.random());
+    report.setCounters(TypeConverter.toYarn(newCounters()));
     report.setTaskState(TASK_STATES.next());
     report.setTaskState(TASK_STATES.next());
     return report;
     return report;
   }
   }
@@ -143,41 +152,41 @@ public class MockJobs extends MockApps {
   public static TaskAttemptReport newTaskAttemptReport(TaskAttemptId id) {
   public static TaskAttemptReport newTaskAttemptReport(TaskAttemptId id) {
     TaskAttemptReport report = Records.newRecord(TaskAttemptReport.class);
     TaskAttemptReport report = Records.newRecord(TaskAttemptReport.class);
     report.setTaskAttemptId(id);
     report.setTaskAttemptId(id);
-    report.setStartTime(System.currentTimeMillis() - (int)(Math.random() * DT));
-    report.setFinishTime(System.currentTimeMillis() + (int)(Math.random() * DT) + 1);
+    report
+        .setStartTime(System.currentTimeMillis() - (int) (Math.random() * DT));
+    report.setFinishTime(System.currentTimeMillis()
+        + (int) (Math.random() * DT) + 1);
     report.setPhase(PHASES.next());
     report.setPhase(PHASES.next());
     report.setTaskAttemptState(TASK_ATTEMPT_STATES.next());
     report.setTaskAttemptState(TASK_ATTEMPT_STATES.next());
-    report.setProgress((float)Math.random());
-    report.setCounters(newCounters());
+    report.setProgress((float) Math.random());
+    report.setCounters(TypeConverter.toYarn(newCounters()));
     return report;
     return report;
   }
   }
 
 
-  @SuppressWarnings("deprecation")
   public static Counters newCounters() {
   public static Counters newCounters() {
-    org.apache.hadoop.mapred.Counters hc =
-        new org.apache.hadoop.mapred.Counters();
+    Counters hc = new Counters();
     for (JobCounter c : JobCounter.values()) {
     for (JobCounter c : JobCounter.values()) {
-      hc.findCounter(c).setValue((long)(Math.random() * 1000));
+      hc.findCounter(c).setValue((long) (Math.random() * 1000));
     }
     }
     for (TaskCounter c : TaskCounter.values()) {
     for (TaskCounter c : TaskCounter.values()) {
-      hc.findCounter(c).setValue((long)(Math.random() * 1000));
+      hc.findCounter(c).setValue((long) (Math.random() * 1000));
     }
     }
     int nc = FileSystemCounter.values().length * 4;
     int nc = FileSystemCounter.values().length * 4;
     for (int i = 0; i < nc; ++i) {
     for (int i = 0; i < nc; ++i) {
       for (FileSystemCounter c : FileSystemCounter.values()) {
       for (FileSystemCounter c : FileSystemCounter.values()) {
-        hc.findCounter(FS_SCHEMES.next(), c).
-            setValue((long)(Math.random() * DT));
+        hc.findCounter(FS_SCHEMES.next(), c).setValue(
+            (long) (Math.random() * DT));
       }
       }
     }
     }
     for (int i = 0; i < 2 * 3; ++i) {
     for (int i = 0; i < 2 * 3; ++i) {
-      hc.findCounter(USER_COUNTER_GROUPS.next(), USER_COUNTERS.next()).
-          setValue((long)(Math.random() * 100000));
+      hc.findCounter(USER_COUNTER_GROUPS.next(), USER_COUNTERS.next())
+          .setValue((long) (Math.random() * 100000));
     }
     }
-    return TypeConverter.toYarn(hc);
+    return hc;
   }
   }
 
 
   public static Map<TaskAttemptId, TaskAttempt> newTaskAttempts(TaskId tid,
   public static Map<TaskAttemptId, TaskAttempt> newTaskAttempts(TaskId tid,
-                                                                int m) {
+      int m) {
     Map<TaskAttemptId, TaskAttempt> map = Maps.newHashMap();
     Map<TaskAttemptId, TaskAttempt> map = Maps.newHashMap();
     for (int i = 0; i < m; ++i) {
     for (int i = 0; i < m; ++i) {
       TaskAttempt ta = newTaskAttempt(tid, i);
       TaskAttempt ta = newTaskAttempt(tid, i);
@@ -221,7 +230,10 @@ public class MockJobs extends MockApps {
 
 
       @Override
       @Override
       public Counters getCounters() {
       public Counters getCounters() {
-        return report.getCounters();
+        if (report != null && report.getCounters() != null) {
+          return new Counters(TypeConverter.fromYarn(report.getCounters()));
+        }
+        return null;
       }
       }
 
 
       @Override
       @Override
@@ -237,9 +249,10 @@ public class MockJobs extends MockApps {
       @Override
       @Override
       public boolean isFinished() {
       public boolean isFinished() {
         switch (report.getTaskAttemptState()) {
         switch (report.getTaskAttemptState()) {
-          case SUCCEEDED:
-          case FAILED:
-          case KILLED: return true;
+        case SUCCEEDED:
+        case FAILED:
+        case KILLED:
+          return true;
         }
         }
         return false;
         return false;
       }
       }
@@ -247,8 +260,8 @@ public class MockJobs extends MockApps {
       @Override
       @Override
       public ContainerId getAssignedContainerID() {
       public ContainerId getAssignedContainerID() {
         ContainerId id = Records.newRecord(ContainerId.class);
         ContainerId id = Records.newRecord(ContainerId.class);
-        ApplicationAttemptId appAttemptId = 
-            Records.newRecord(ApplicationAttemptId.class);
+        ApplicationAttemptId appAttemptId = Records
+            .newRecord(ApplicationAttemptId.class);
         appAttemptId.setApplicationId(taid.getTaskId().getJobId().getAppId());
         appAttemptId.setApplicationId(taid.getTaskId().getJobId().getAppId());
         appAttemptId.setAttemptId(0);
         appAttemptId.setAttemptId(0);
         id.setApplicationAttemptId(appAttemptId);
         id.setApplicationAttemptId(appAttemptId);
@@ -280,10 +293,10 @@ public class MockJobs extends MockApps {
         return 0;
         return 0;
       }
       }
 
 
-	@Override
-	public String getNodeRackName() {
-		return "/default-rack";
-	}
+      @Override
+      public String getNodeRackName() {
+        return "/default-rack";
+      }
     };
     };
   }
   }
 
 
@@ -316,7 +329,8 @@ public class MockJobs extends MockApps {
 
 
       @Override
       @Override
       public Counters getCounters() {
       public Counters getCounters() {
-        return report.getCounters();
+        return new Counters(
+          TypeConverter.fromYarn(report.getCounters()));
       }
       }
 
 
       @Override
       @Override
@@ -342,9 +356,10 @@ public class MockJobs extends MockApps {
       @Override
       @Override
       public boolean isFinished() {
       public boolean isFinished() {
         switch (report.getTaskState()) {
         switch (report.getTaskState()) {
-          case SUCCEEDED:
-          case KILLED:
-          case FAILED: return true;
+        case SUCCEEDED:
+        case KILLED:
+        case FAILED:
+          return true;
         }
         }
         return false;
         return false;
       }
       }
@@ -361,8 +376,9 @@ public class MockJobs extends MockApps {
     };
     };
   }
   }
 
 
-  public static Counters getCounters(Collection<Task> tasks) {
-    Counters counters = JobImpl.newCounters();
+  public static Counters getCounters(
+      Collection<Task> tasks) {
+    Counters counters = new Counters();
     return JobImpl.incrTaskCounters(counters, tasks);
     return JobImpl.incrTaskCounters(counters, tasks);
   }
   }
 
 
@@ -398,12 +414,27 @@ public class MockJobs extends MockApps {
   }
   }
 
 
   public static Job newJob(ApplicationId appID, int i, int n, int m) {
   public static Job newJob(ApplicationId appID, int i, int n, int m) {
+    return newJob(appID, i, n, m, null);
+  }
+
+  public static Job newJob(ApplicationId appID, int i, int n, int m, Path confFile) {
     final JobId id = newJobID(appID, i);
     final JobId id = newJobID(appID, i);
     final String name = newJobName();
     final String name = newJobName();
     final JobReport report = newJobReport(id);
     final JobReport report = newJobReport(id);
     final Map<TaskId, Task> tasks = newTasks(id, n, m);
     final Map<TaskId, Task> tasks = newTasks(id, n, m);
     final TaskCount taskCount = getTaskCount(tasks.values());
     final TaskCount taskCount = getTaskCount(tasks.values());
-    final Counters counters = getCounters(tasks.values());
+    final Counters counters = getCounters(tasks
+      .values());
+    final Path configFile = confFile;
+
+    Map<JobACL, AccessControlList> tmpJobACLs = new HashMap<JobACL, AccessControlList>();
+    Configuration conf = new Configuration();
+    conf.set(JobACL.VIEW_JOB.getAclName(), "testuser");
+    conf.setBoolean(MRConfig.MR_ACLS_ENABLED, true);
+
+    JobACLsManager aclsManager = new JobACLsManager(conf);
+    tmpJobACLs = aclsManager.constructJobACLs(conf);
+    final Map<JobACL, AccessControlList> jobACLs = tmpJobACLs;
     return new Job() {
     return new Job() {
       @Override
       @Override
       public JobId getID() {
       public JobId getID() {
@@ -431,7 +462,7 @@ public class MockJobs extends MockApps {
       }
       }
 
 
       @Override
       @Override
-      public Counters getCounters() {
+      public Counters getAllCounters() {
         return counters;
         return counters;
       }
       }
 
 
@@ -483,7 +514,7 @@ public class MockJobs extends MockApps {
 
 
       @Override
       @Override
       public List<String> getDiagnostics() {
       public List<String> getDiagnostics() {
-        return Collections.<String>emptyList();
+        return Collections.<String> emptyList();
       }
       }
 
 
       @Override
       @Override
@@ -504,12 +535,12 @@ public class MockJobs extends MockApps {
 
 
       @Override
       @Override
       public Path getConfFile() {
       public Path getConfFile() {
-        throw new UnsupportedOperationException("Not supported yet.");
+        return configFile;
       }
       }
 
 
       @Override
       @Override
       public Map<JobACL, AccessControlList> getJobACLs() {
       public Map<JobACL, AccessControlList> getJobACLs() {
-        return Collections.<JobACL, AccessControlList>emptyMap();
+        return jobACLs;
       }
       }
 
 
       @Override
       @Override
@@ -521,11 +552,10 @@ public class MockJobs extends MockApps {
       }
       }
     };
     };
   }
   }
-  
+
   private static AMInfo createAMInfo(int attempt) {
   private static AMInfo createAMInfo(int attempt) {
-    ApplicationAttemptId appAttemptId =
-        BuilderUtils.newApplicationAttemptId(
-            BuilderUtils.newApplicationId(100, 1), attempt);
+    ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId(
+        BuilderUtils.newApplicationId(100, 1), attempt);
     ContainerId containerId = BuilderUtils.newContainerId(appAttemptId, 1);
     ContainerId containerId = BuilderUtils.newContainerId(appAttemptId, 1);
     return MRBuilderUtils.newAMInfo(appAttemptId, System.currentTimeMillis(),
     return MRBuilderUtils.newAMInfo(appAttemptId, System.currentTimeMillis(),
         containerId, NM_HOST, NM_PORT, NM_HTTP_PORT);
         containerId, NM_HOST, NM_PORT, NM_HTTP_PORT);

+ 0 - 140
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestContainerLauncher.java

@@ -1,140 +0,0 @@
-/**
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-*/
-
-package org.apache.hadoop.mapreduce.v2.app;
-
-import java.io.IOException;
-import java.util.Map;
-
-import junit.framework.Assert;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.mapreduce.MRJobConfig;
-import org.apache.hadoop.mapreduce.v2.api.records.JobState;
-import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
-import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
-import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
-import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
-import org.apache.hadoop.mapreduce.v2.app.job.Job;
-import org.apache.hadoop.mapreduce.v2.app.job.Task;
-import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
-import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncher;
-import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncherImpl;
-import org.apache.hadoop.yarn.api.ContainerManager;
-import org.apache.hadoop.yarn.api.records.ContainerId;
-import org.apache.hadoop.yarn.api.records.ContainerToken;
-import org.junit.Test;
-
-public class TestContainerLauncher {
-
-  static final Log LOG = LogFactory
-      .getLog(TestContainerLauncher.class);
-
-  @Test
-  public void testSlowNM() throws Exception {
-    test(false);
-  }
-
-  @Test
-  public void testSlowNMWithInterruptsSwallowed() throws Exception {
-    test(true);
-  }
-
-  private void test(boolean swallowInterrupts) throws Exception {
-
-    MRApp app = new MRAppWithSlowNM(swallowInterrupts);
-
-    Configuration conf = new Configuration();
-    int maxAttempts = 1;
-    conf.setInt(MRJobConfig.MAP_MAX_ATTEMPTS, maxAttempts);
-    conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
-
-    // Set low timeout for NM commands
-    conf.setInt(ContainerLauncher.MR_AM_NM_COMMAND_TIMEOUT, 3000);
-
-    Job job = app.submit(conf);
-    app.waitForState(job, JobState.RUNNING);
-
-    Map<TaskId, Task> tasks = job.getTasks();
-    Assert.assertEquals("Num tasks is not correct", 1, tasks.size());
-
-    Task task = tasks.values().iterator().next();
-    app.waitForState(task, TaskState.SCHEDULED);
-
-    Map<TaskAttemptId, TaskAttempt> attempts = tasks.values().iterator()
-        .next().getAttempts();
-    Assert.assertEquals("Num attempts is not correct", maxAttempts, attempts
-        .size());
-
-    TaskAttempt attempt = attempts.values().iterator().next();
-    app.waitForState(attempt, TaskAttemptState.ASSIGNED);
-
-    app.waitForState(job, JobState.FAILED);
-
-    String diagnostics = attempt.getDiagnostics().toString();
-    LOG.info("attempt.getDiagnostics: " + diagnostics);
-    if (swallowInterrupts) {
-      Assert.assertEquals("[Container launch failed for "
-          + "container_0_0000_01_000000 : Start-container for "
-          + "container_0_0000_01_000000 got interrupted. Returning.]",
-          diagnostics);
-    } else {
-      Assert.assertTrue(diagnostics.contains("Container launch failed for "
-          + "container_0_0000_01_000000 : "));
-      Assert.assertTrue(diagnostics
-          .contains(": java.lang.InterruptedException"));
-    }
-
-    app.stop();
-  }
-
-  private static class MRAppWithSlowNM extends MRApp {
-
-    final boolean swallowInterrupts;
-
-    public MRAppWithSlowNM(boolean swallowInterrupts) {
-      super(1, 0, false, "TestContainerLauncher", true);
-      this.swallowInterrupts = swallowInterrupts;
-    }
-
-    @Override
-    protected ContainerLauncher createContainerLauncher(AppContext context) {
-      return new ContainerLauncherImpl(context) {
-        @Override
-        protected ContainerManager getCMProxy(ContainerId containerID,
-            String containerManagerBindAddr, ContainerToken containerToken)
-            throws IOException {
-          try {
-            synchronized (this) {
-              wait(); // Just hang the thread simulating a very slow NM.
-            }
-          } catch (InterruptedException e) {
-            LOG.info(e);
-            if (!MRAppWithSlowNM.this.swallowInterrupts) {
-              throw new IOException(e);
-            }
-            Thread.currentThread().interrupt();
-          }
-          return null;
-        }
-      };
-    };
-  }
-}

+ 31 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestJobEndNotifier.java

@@ -18,6 +18,8 @@
 
 
 package org.apache.hadoop.mapreduce.v2.app;
 package org.apache.hadoop.mapreduce.v2.app;
 
 
+import java.net.Proxy;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
 import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
@@ -71,6 +73,34 @@ public class TestJobEndNotifier extends JobEndNotifier {
       waitInterval == 5);
       waitInterval == 5);
   }
   }
 
 
+  private void testProxyConfiguration(Configuration conf) {
+    conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_PROXY, "somehost");
+    setConf(conf);
+    Assert.assertTrue("Proxy shouldn't be set because port wasn't specified",
+      proxyToUse.type() == Proxy.Type.DIRECT);
+    conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_PROXY, "somehost:someport");
+    setConf(conf);
+    Assert.assertTrue("Proxy shouldn't be set because port wasn't numeric",
+      proxyToUse.type() == Proxy.Type.DIRECT);
+    conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_PROXY, "somehost:1000");
+    setConf(conf);
+    Assert.assertTrue("Proxy should have been set but wasn't ",
+      proxyToUse.toString().equals("HTTP @ somehost:1000"));
+    conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_PROXY, "socks@somehost:1000");
+    setConf(conf);
+    Assert.assertTrue("Proxy should have been socks but wasn't ",
+      proxyToUse.toString().equals("SOCKS @ somehost:1000"));
+    conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_PROXY, "SOCKS@somehost:1000");
+    setConf(conf);
+    Assert.assertTrue("Proxy should have been socks but wasn't ",
+      proxyToUse.toString().equals("SOCKS @ somehost:1000"));
+    conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_PROXY, "sfafn@somehost:1000");
+    setConf(conf);
+    Assert.assertTrue("Proxy should have been http but wasn't ",
+      proxyToUse.toString().equals("HTTP @ somehost:1000"));
+    
+  }
+
   /**
   /**
    * Test that setting parameters has the desired effect
    * Test that setting parameters has the desired effect
    */
    */
@@ -79,6 +109,7 @@ public class TestJobEndNotifier extends JobEndNotifier {
     Configuration conf = new Configuration();
     Configuration conf = new Configuration();
     testNumRetries(conf);
     testNumRetries(conf);
     testWaitInterval(conf);
     testWaitInterval(conf);
+    testProxyConfiguration(conf);
   }
   }
 
 
   protected int notificationCount = 0;
   protected int notificationCount = 0;

+ 7 - 8
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java

@@ -18,9 +18,6 @@
 
 
 package org.apache.hadoop.mapreduce.v2.app;
 package org.apache.hadoop.mapreduce.v2.app;
 
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
 import java.util.Collection;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashMap;
@@ -30,11 +27,14 @@ import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.atomic.AtomicLong;
 import java.util.concurrent.atomic.AtomicLong;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapreduce.Counters;
 import org.apache.hadoop.mapreduce.JobACL;
 import org.apache.hadoop.mapreduce.JobACL;
 import org.apache.hadoop.mapreduce.v2.api.records.AMInfo;
 import org.apache.hadoop.mapreduce.v2.api.records.AMInfo;
-import org.apache.hadoop.mapreduce.v2.api.records.Counters;
 import org.apache.hadoop.mapreduce.v2.api.records.JobId;
 import org.apache.hadoop.mapreduce.v2.api.records.JobId;
 import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
 import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
 import org.apache.hadoop.mapreduce.v2.api.records.JobState;
 import org.apache.hadoop.mapreduce.v2.api.records.JobState;
@@ -46,13 +46,12 @@ import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskReport;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskReport;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
-import org.apache.hadoop.mapreduce.v2.app.AppContext;
 import org.apache.hadoop.mapreduce.v2.app.job.Job;
 import org.apache.hadoop.mapreduce.v2.app.job.Job;
 import org.apache.hadoop.mapreduce.v2.app.job.Task;
 import org.apache.hadoop.mapreduce.v2.app.job.Task;
 import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
 import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptStatusUpdateEvent.TaskAttemptStatus;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEventType;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEventType;
-import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptStatusUpdateEvent.TaskAttemptStatus;
 import org.apache.hadoop.mapreduce.v2.app.speculate.DefaultSpeculator;
 import org.apache.hadoop.mapreduce.v2.app.speculate.DefaultSpeculator;
 import org.apache.hadoop.mapreduce.v2.app.speculate.ExponentiallySmoothedTaskRuntimeEstimator;
 import org.apache.hadoop.mapreduce.v2.app.speculate.ExponentiallySmoothedTaskRuntimeEstimator;
 import org.apache.hadoop.mapreduce.v2.app.speculate.LegacyTaskRuntimeEstimator;
 import org.apache.hadoop.mapreduce.v2.app.speculate.LegacyTaskRuntimeEstimator;
@@ -74,7 +73,7 @@ import org.apache.hadoop.yarn.service.CompositeService;
 import org.junit.Assert;
 import org.junit.Assert;
 import org.junit.Test;
 import org.junit.Test;
 
 
-
+@SuppressWarnings({"unchecked", "rawtypes"})
 public class TestRuntimeEstimators {
 public class TestRuntimeEstimators {
 
 
   private static int INITIAL_NUMBER_FREE_SLOTS = 600;
   private static int INITIAL_NUMBER_FREE_SLOTS = 600;
@@ -399,7 +398,7 @@ public class TestRuntimeEstimators {
     }
     }
 
 
     @Override
     @Override
-    public Counters getCounters() {
+    public Counters getAllCounters() {
       throw new UnsupportedOperationException("Not supported yet.");
       throw new UnsupportedOperationException("Not supported yet.");
     }
     }
 
 

+ 321 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncher.java

@@ -0,0 +1,321 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app.launcher;
+
+import static org.mockito.Mockito.mock;
+
+import java.io.IOException;
+import java.util.Map;
+import java.util.concurrent.ThreadPoolExecutor;
+
+import junit.framework.Assert;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapreduce.MRJobConfig;
+import org.apache.hadoop.mapreduce.v2.api.records.JobId;
+import org.apache.hadoop.mapreduce.v2.api.records.JobState;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
+import org.apache.hadoop.mapreduce.v2.app.AppContext;
+import org.apache.hadoop.mapreduce.v2.app.MRApp;
+import org.apache.hadoop.mapreduce.v2.app.job.Job;
+import org.apache.hadoop.mapreduce.v2.app.job.Task;
+import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
+import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils;
+import org.apache.hadoop.yarn.api.ContainerManager;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerToken;
+import org.apache.hadoop.yarn.util.BuilderUtils;
+import org.junit.Test;
+
+public class TestContainerLauncher {
+
+  static final Log LOG = LogFactory
+      .getLog(TestContainerLauncher.class);
+
+  @Test
+  public void testPoolSize() throws InterruptedException {
+
+    ApplicationId appId = BuilderUtils.newApplicationId(12345, 67);
+    ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId(
+      appId, 3);
+    JobId jobId = MRBuilderUtils.newJobId(appId, 8);
+    TaskId taskId = MRBuilderUtils.newTaskId(jobId, 9, TaskType.MAP);
+    TaskAttemptId taskAttemptId = MRBuilderUtils.newTaskAttemptId(taskId, 0);
+    ContainerId containerId = BuilderUtils.newContainerId(appAttemptId, 10);
+
+    AppContext context = mock(AppContext.class);
+    CustomContainerLauncher containerLauncher = new CustomContainerLauncher(
+      context);
+    containerLauncher.init(new Configuration());
+    containerLauncher.start();
+
+    ThreadPoolExecutor threadPool = containerLauncher.getThreadPool();
+
+    // No events yet
+    Assert.assertEquals(0, threadPool.getPoolSize());
+    Assert.assertEquals(ContainerLauncherImpl.INITIAL_POOL_SIZE,
+      threadPool.getCorePoolSize());
+    Assert.assertNull(containerLauncher.foundErrors);
+
+    containerLauncher.expectedCorePoolSize = ContainerLauncherImpl.INITIAL_POOL_SIZE;
+    for (int i = 0; i < 10; i++) {
+      containerLauncher.handle(new ContainerLauncherEvent(taskAttemptId,
+        containerId, "host" + i + ":1234", null,
+        ContainerLauncher.EventType.CONTAINER_REMOTE_LAUNCH));
+    }
+    waitForEvents(containerLauncher, 10);
+    Assert.assertEquals(10, threadPool.getPoolSize());
+    Assert.assertNull(containerLauncher.foundErrors);
+
+    // Same set of hosts, so no change
+    containerLauncher.expectedCorePoolSize = ContainerLauncherImpl.INITIAL_POOL_SIZE;
+    containerLauncher.finishEventHandling = true;
+    for (int i = 0; i < 10; i++) {
+      containerLauncher.handle(new ContainerLauncherEvent(taskAttemptId,
+        containerId, "host" + i + ":1234", null,
+        ContainerLauncher.EventType.CONTAINER_REMOTE_LAUNCH));
+    }
+    waitForEvents(containerLauncher, 20);
+    Assert.assertEquals(10, threadPool.getPoolSize());
+    Assert.assertNull(containerLauncher.foundErrors);
+
+    // Different hosts, there should be an increase in core-thread-pool size to
+    // 21(11hosts+10buffer)
+    // Core pool size should be 21 but the live pool size should be only 11.
+    containerLauncher.expectedCorePoolSize = 12 + ContainerLauncherImpl.INITIAL_POOL_SIZE;
+    for (int i = 1; i <= 2; i++) {
+      containerLauncher.handle(new ContainerLauncherEvent(taskAttemptId,
+        containerId, "host1" + i + ":1234", null,
+        ContainerLauncher.EventType.CONTAINER_REMOTE_LAUNCH));
+    }
+    waitForEvents(containerLauncher, 22);
+    Assert.assertEquals(12, threadPool.getPoolSize());
+    Assert.assertNull(containerLauncher.foundErrors);
+
+    containerLauncher.stop();
+  }
+
+  @Test
+  public void testPoolLimits() throws InterruptedException {
+    ApplicationId appId = BuilderUtils.newApplicationId(12345, 67);
+    ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId(
+      appId, 3);
+    JobId jobId = MRBuilderUtils.newJobId(appId, 8);
+    TaskId taskId = MRBuilderUtils.newTaskId(jobId, 9, TaskType.MAP);
+    TaskAttemptId taskAttemptId = MRBuilderUtils.newTaskAttemptId(taskId, 0);
+    ContainerId containerId = BuilderUtils.newContainerId(appAttemptId, 10);
+
+    AppContext context = mock(AppContext.class);
+    CustomContainerLauncher containerLauncher = new CustomContainerLauncher(
+      context);
+    Configuration conf = new Configuration();
+    conf.setInt(MRJobConfig.MR_AM_CONTAINERLAUNCHER_THREAD_COUNT_LIMIT, 12);
+    containerLauncher.init(conf);
+    containerLauncher.start();
+
+    ThreadPoolExecutor threadPool = containerLauncher.getThreadPool();
+
+    // 10 different hosts
+    containerLauncher.expectedCorePoolSize = ContainerLauncherImpl.INITIAL_POOL_SIZE;
+    for (int i = 0; i < 10; i++) {
+      containerLauncher.handle(new ContainerLauncherEvent(taskAttemptId,
+        containerId, "host" + i + ":1234", null,
+        ContainerLauncher.EventType.CONTAINER_REMOTE_LAUNCH));
+    }
+    waitForEvents(containerLauncher, 10);
+    Assert.assertEquals(10, threadPool.getPoolSize());
+    Assert.assertNull(containerLauncher.foundErrors);
+
+    // 4 more different hosts, but thread pool size should be capped at 12
+    containerLauncher.expectedCorePoolSize = 12 ;
+    for (int i = 1; i <= 4; i++) {
+      containerLauncher.handle(new ContainerLauncherEvent(taskAttemptId,
+        containerId, "host1" + i + ":1234", null,
+        ContainerLauncher.EventType.CONTAINER_REMOTE_LAUNCH));
+    }
+    waitForEvents(containerLauncher, 12);
+    Assert.assertEquals(12, threadPool.getPoolSize());
+    Assert.assertNull(containerLauncher.foundErrors);
+
+    // Make some threads ideal so that remaining events are also done.
+    containerLauncher.finishEventHandling = true;
+    waitForEvents(containerLauncher, 14);
+    Assert.assertEquals(12, threadPool.getPoolSize());
+    Assert.assertNull(containerLauncher.foundErrors);
+
+    containerLauncher.stop();
+  }
+
+  private void waitForEvents(CustomContainerLauncher containerLauncher,
+      int expectedNumEvents) throws InterruptedException {
+    int timeOut = 20;
+    while (expectedNumEvents != containerLauncher.numEventsProcessed
+        || timeOut++ < 20) {
+      LOG.info("Waiting for number of events to become " + expectedNumEvents
+          + ". It is now " + containerLauncher.numEventsProcessed);
+      Thread.sleep(1000);
+    }
+    Assert
+      .assertEquals(expectedNumEvents, containerLauncher.numEventsProcessed);
+  }
+
+  @Test
+  public void testSlowNM() throws Exception {
+    test(false);
+  }
+
+  @Test
+  public void testSlowNMWithInterruptsSwallowed() throws Exception {
+    test(true);
+  }
+
+  private void test(boolean swallowInterrupts) throws Exception {
+
+    MRApp app = new MRAppWithSlowNM(swallowInterrupts);
+
+    Configuration conf = new Configuration();
+    int maxAttempts = 1;
+    conf.setInt(MRJobConfig.MAP_MAX_ATTEMPTS, maxAttempts);
+    conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
+
+    // Set low timeout for NM commands
+    conf.setInt(ContainerLauncher.MR_AM_NM_COMMAND_TIMEOUT, 3000);
+
+    Job job = app.submit(conf);
+    app.waitForState(job, JobState.RUNNING);
+
+    Map<TaskId, Task> tasks = job.getTasks();
+    Assert.assertEquals("Num tasks is not correct", 1, tasks.size());
+
+    Task task = tasks.values().iterator().next();
+    app.waitForState(task, TaskState.SCHEDULED);
+
+    Map<TaskAttemptId, TaskAttempt> attempts = tasks.values().iterator()
+        .next().getAttempts();
+    Assert.assertEquals("Num attempts is not correct", maxAttempts, attempts
+        .size());
+
+    TaskAttempt attempt = attempts.values().iterator().next();
+    app.waitForState(attempt, TaskAttemptState.ASSIGNED);
+
+    app.waitForState(job, JobState.FAILED);
+
+    String diagnostics = attempt.getDiagnostics().toString();
+    LOG.info("attempt.getDiagnostics: " + diagnostics);
+    if (swallowInterrupts) {
+      Assert.assertEquals("[Container launch failed for "
+          + "container_0_0000_01_000000 : Start-container for "
+          + "container_0_0000_01_000000 got interrupted. Returning.]",
+          diagnostics);
+    } else {
+      Assert.assertTrue(diagnostics.contains("Container launch failed for "
+          + "container_0_0000_01_000000 : "));
+      Assert.assertTrue(diagnostics
+          .contains(": java.lang.InterruptedException"));
+    }
+
+    app.stop();
+  }
+
+  private final class CustomContainerLauncher extends ContainerLauncherImpl {
+
+    private volatile int expectedCorePoolSize = 0;
+    private volatile int numEventsProcessed = 0;
+    private volatile String foundErrors = null;
+    private volatile boolean finishEventHandling;
+    private CustomContainerLauncher(AppContext context) {
+      super(context);
+    }
+
+    public ThreadPoolExecutor getThreadPool() {
+      return super.launcherPool;
+    }
+
+    protected ContainerLauncherImpl.EventProcessor createEventProcessor(
+        ContainerLauncherEvent event) {
+      // At this point of time, the EventProcessor is being created and so no
+      // additional threads would have been created.
+
+      // Core-pool-size should have increased by now.
+      if (expectedCorePoolSize != launcherPool.getCorePoolSize()) {
+        foundErrors = "Expected " + expectedCorePoolSize + " but found "
+            + launcherPool.getCorePoolSize();
+      }
+
+      return new ContainerLauncherImpl.EventProcessor(event) {
+        @Override
+        public void run() {
+          // do nothing substantial
+          numEventsProcessed++;
+          // Stall
+          synchronized(this) {
+            try {
+              while(!finishEventHandling) {
+                wait(1000);
+              }
+            } catch (InterruptedException e) {
+              ;
+            }
+          }
+        }
+      };
+    }
+  }
+
+  private static class MRAppWithSlowNM extends MRApp {
+
+    final boolean swallowInterrupts;
+
+    public MRAppWithSlowNM(boolean swallowInterrupts) {
+      super(1, 0, false, "TestContainerLauncher", true);
+      this.swallowInterrupts = swallowInterrupts;
+    }
+
+    @Override
+    protected ContainerLauncher createContainerLauncher(AppContext context) {
+      return new ContainerLauncherImpl(context) {
+        @Override
+        protected ContainerManager getCMProxy(ContainerId containerID,
+            String containerManagerBindAddr, ContainerToken containerToken)
+            throws IOException {
+          try {
+            synchronized (this) {
+              wait(); // Just hang the thread simulating a very slow NM.
+            }
+          } catch (InterruptedException e) {
+            LOG.info(e);
+            if (!MRAppWithSlowNM.this.swallowInterrupts) {
+              throw new IOException(e);
+            }
+            Thread.currentThread().interrupt();
+          }
+          return null;
+        }
+      };
+    };
+  }
+}

+ 223 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncherImpl.java

@@ -0,0 +1,223 @@
+package org.apache.hadoop.mapreduce.v2.app.launcher;
+
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.eq;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+import java.net.InetSocketAddress;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapred.ShuffleHandler;
+import org.apache.hadoop.mapreduce.v2.api.records.JobId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
+import org.apache.hadoop.mapreduce.v2.app.AppContext;
+import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncher.EventType;
+import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils;
+import org.apache.hadoop.yarn.api.ContainerManager;
+import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.StartContainerResponse;
+import org.apache.hadoop.yarn.api.protocolrecords.StopContainerRequest;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.ipc.YarnRPC;
+import org.apache.hadoop.yarn.util.BuilderUtils;
+import org.junit.Test;
+
+public class TestContainerLauncherImpl {
+  static final Log LOG = LogFactory.getLog(TestContainerLauncherImpl.class);
+  private static final RecordFactory recordFactory =
+    RecordFactoryProvider.getRecordFactory(null);
+
+  
+  private static class ContainerLauncherImplUnderTest extends 
+    ContainerLauncherImpl {
+
+    private YarnRPC rpc;
+    
+    public ContainerLauncherImplUnderTest(AppContext context, YarnRPC rpc) {
+      super(context);
+      this.rpc = rpc;
+    }
+    
+    @Override
+    protected YarnRPC createYarnRPC(Configuration conf) {
+      return rpc;
+    }
+    
+    public void waitForPoolToIdle() throws InterruptedException {
+      //I wish that we did not need the sleep, but it is here so that we are sure
+      // That the other thread had time to insert the event into the queue and
+      // start processing it.  For some reason we were getting interrupted
+      // exceptions within eventQueue without this sleep.
+      Thread.sleep(100l);
+      LOG.debug("POOL SIZE 1: "+this.eventQueue.size()+
+          " POOL SIZE 2: "+this.launcherPool.getQueue().size()+
+          " ACTIVE COUNT: "+ this.launcherPool.getActiveCount());
+      while(!this.eventQueue.isEmpty() || 
+          !this.launcherPool.getQueue().isEmpty() || 
+          this.launcherPool.getActiveCount() > 0) {
+        Thread.sleep(100l);
+        LOG.debug("POOL SIZE 1: "+this.eventQueue.size()+
+            " POOL SIZE 2: "+this.launcherPool.getQueue().size()+
+            " ACTIVE COUNT: "+ this.launcherPool.getActiveCount());
+      }
+      LOG.debug("POOL SIZE 1: "+this.eventQueue.size()+
+          " POOL SIZE 2: "+this.launcherPool.getQueue().size()+
+          " ACTIVE COUNT: "+ this.launcherPool.getActiveCount());
+    }
+  }
+  
+  public static ContainerId makeContainerId(long ts, int appId, int attemptId,
+      int id) {
+    return BuilderUtils.newContainerId(
+      BuilderUtils.newApplicationAttemptId(
+        BuilderUtils.newApplicationId(ts, appId), attemptId), id);
+  }
+
+  public static TaskAttemptId makeTaskAttemptId(long ts, int appId, int taskId, 
+      TaskType taskType, int id) {
+    ApplicationId aID = BuilderUtils.newApplicationId(ts, appId);
+    JobId jID = MRBuilderUtils.newJobId(aID, id);
+    TaskId tID = MRBuilderUtils.newTaskId(jID, taskId, taskType);
+    return MRBuilderUtils.newTaskAttemptId(tID, id);
+  }
+  
+  @Test
+  public void testHandle() throws Exception {
+    LOG.info("STARTING testHandle");
+    YarnRPC mockRpc = mock(YarnRPC.class);
+    AppContext mockContext = mock(AppContext.class);
+    @SuppressWarnings("rawtypes")
+    EventHandler mockEventHandler = mock(EventHandler.class);
+    when(mockContext.getEventHandler()).thenReturn(mockEventHandler);
+
+    ContainerManager mockCM = mock(ContainerManager.class);
+    when(mockRpc.getProxy(eq(ContainerManager.class), 
+        any(InetSocketAddress.class), any(Configuration.class)))
+        .thenReturn(mockCM);
+    
+    ContainerLauncherImplUnderTest ut = 
+      new ContainerLauncherImplUnderTest(mockContext, mockRpc);
+    
+    Configuration conf = new Configuration();
+    ut.init(conf);
+    ut.start();
+    try {
+      ContainerId contId = makeContainerId(0l, 0, 0, 1);
+      TaskAttemptId taskAttemptId = makeTaskAttemptId(0l, 0, 0, TaskType.MAP, 0);
+      String cmAddress = "127.0.0.1:8000";
+      StartContainerResponse startResp = 
+        recordFactory.newRecordInstance(StartContainerResponse.class);
+      startResp.setServiceResponse(ShuffleHandler.MAPREDUCE_SHUFFLE_SERVICEID, 
+          ShuffleHandler.serializeMetaData(80));
+      
+
+      LOG.info("inserting launch event");
+      ContainerRemoteLaunchEvent mockLaunchEvent = 
+        mock(ContainerRemoteLaunchEvent.class);
+      when(mockLaunchEvent.getType())
+        .thenReturn(EventType.CONTAINER_REMOTE_LAUNCH);
+      when(mockLaunchEvent.getContainerID())
+        .thenReturn(contId);
+      when(mockLaunchEvent.getTaskAttemptID()).thenReturn(taskAttemptId);
+      when(mockLaunchEvent.getContainerMgrAddress()).thenReturn(cmAddress);
+      when(mockCM.startContainer(any(StartContainerRequest.class))).thenReturn(startResp);
+      ut.handle(mockLaunchEvent);
+      
+      ut.waitForPoolToIdle();
+      
+      verify(mockCM).startContainer(any(StartContainerRequest.class));
+      
+      LOG.info("inserting cleanup event");
+      ContainerLauncherEvent mockCleanupEvent = 
+        mock(ContainerLauncherEvent.class);
+      when(mockCleanupEvent.getType())
+        .thenReturn(EventType.CONTAINER_REMOTE_CLEANUP);
+      when(mockCleanupEvent.getContainerID())
+        .thenReturn(contId);
+      when(mockCleanupEvent.getTaskAttemptID()).thenReturn(taskAttemptId);
+      when(mockCleanupEvent.getContainerMgrAddress()).thenReturn(cmAddress);
+      ut.handle(mockCleanupEvent);
+      
+      ut.waitForPoolToIdle();
+      
+      verify(mockCM).stopContainer(any(StopContainerRequest.class));
+    } finally {
+      ut.stop();
+    }
+  }
+  
+  @Test
+  public void testOutOfOrder() throws Exception {
+    LOG.info("STARTING testOutOfOrder");
+    YarnRPC mockRpc = mock(YarnRPC.class);
+    AppContext mockContext = mock(AppContext.class);
+    @SuppressWarnings("rawtypes")
+    EventHandler mockEventHandler = mock(EventHandler.class);
+    when(mockContext.getEventHandler()).thenReturn(mockEventHandler);
+
+    ContainerManager mockCM = mock(ContainerManager.class);
+    when(mockRpc.getProxy(eq(ContainerManager.class), 
+        any(InetSocketAddress.class), any(Configuration.class)))
+        .thenReturn(mockCM);
+    
+    ContainerLauncherImplUnderTest ut = 
+      new ContainerLauncherImplUnderTest(mockContext, mockRpc);
+    
+    Configuration conf = new Configuration();
+    ut.init(conf);
+    ut.start();
+    try {
+      ContainerId contId = makeContainerId(0l, 0, 0, 1);
+      TaskAttemptId taskAttemptId = makeTaskAttemptId(0l, 0, 0, TaskType.MAP, 0);
+      String cmAddress = "127.0.0.1:8000";
+      StartContainerResponse startResp = 
+        recordFactory.newRecordInstance(StartContainerResponse.class);
+      startResp.setServiceResponse(ShuffleHandler.MAPREDUCE_SHUFFLE_SERVICEID, 
+          ShuffleHandler.serializeMetaData(80));
+
+      LOG.info("inserting cleanup event");
+      ContainerLauncherEvent mockCleanupEvent = 
+        mock(ContainerLauncherEvent.class);
+      when(mockCleanupEvent.getType())
+        .thenReturn(EventType.CONTAINER_REMOTE_CLEANUP);
+      when(mockCleanupEvent.getContainerID())
+        .thenReturn(contId);
+      when(mockCleanupEvent.getTaskAttemptID()).thenReturn(taskAttemptId);
+      when(mockCleanupEvent.getContainerMgrAddress()).thenReturn(cmAddress);
+      ut.handle(mockCleanupEvent);
+      
+      ut.waitForPoolToIdle();
+      
+      verify(mockCM, never()).stopContainer(any(StopContainerRequest.class));
+
+      LOG.info("inserting launch event");
+      ContainerRemoteLaunchEvent mockLaunchEvent = 
+        mock(ContainerRemoteLaunchEvent.class);
+      when(mockLaunchEvent.getType())
+        .thenReturn(EventType.CONTAINER_REMOTE_LAUNCH);
+      when(mockLaunchEvent.getContainerID())
+        .thenReturn(contId);
+      when(mockLaunchEvent.getTaskAttemptID()).thenReturn(taskAttemptId);
+      when(mockLaunchEvent.getContainerMgrAddress()).thenReturn(cmAddress);
+      when(mockCM.startContainer(any(StartContainerRequest.class))).thenReturn(startResp);
+      ut.handle(mockLaunchEvent);
+      
+      ut.waitForPoolToIdle();
+      
+      verify(mockCM, never()).startContainer(any(StartContainerRequest.class));
+    } finally {
+      ut.stop();
+    }
+  }
+}

+ 359 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebServices.java

@@ -0,0 +1,359 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.mapreduce.v2.app.webapp;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.io.StringReader;
+import java.util.Map;
+
+import javax.ws.rs.core.MediaType;
+import javax.xml.parsers.DocumentBuilder;
+import javax.xml.parsers.DocumentBuilderFactory;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapreduce.v2.api.records.JobId;
+import org.apache.hadoop.mapreduce.v2.app.AppContext;
+import org.apache.hadoop.mapreduce.v2.app.MockJobs;
+import org.apache.hadoop.mapreduce.v2.app.job.Job;
+import org.apache.hadoop.yarn.Clock;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
+import org.apache.hadoop.yarn.webapp.WebServicesTestUtils;
+import org.codehaus.jettison.json.JSONException;
+import org.codehaus.jettison.json.JSONObject;
+import org.junit.Before;
+import org.junit.Test;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+import org.w3c.dom.NodeList;
+import org.xml.sax.InputSource;
+
+import com.google.inject.Guice;
+import com.google.inject.Injector;
+import com.google.inject.servlet.GuiceServletContextListener;
+import com.google.inject.servlet.ServletModule;
+import com.sun.jersey.api.client.ClientResponse;
+import com.sun.jersey.api.client.ClientResponse.Status;
+import com.sun.jersey.api.client.UniformInterfaceException;
+import com.sun.jersey.api.client.WebResource;
+import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
+import com.sun.jersey.test.framework.JerseyTest;
+import com.sun.jersey.test.framework.WebAppDescriptor;
+
+/**
+ * Test the MapReduce Application master info web services api's. Also test
+ * non-existent urls.
+ *
+ *  /ws/v1/mapreduce
+ *  /ws/v1/mapreduce/info
+ */
+public class TestAMWebServices extends JerseyTest {
+
+  private static Configuration conf = new Configuration();
+  private static TestAppContext appContext;
+
+  static class TestAppContext implements AppContext {
+    final ApplicationAttemptId appAttemptID;
+    final ApplicationId appID;
+    final String user = MockJobs.newUserName();
+    final Map<JobId, Job> jobs;
+    final long startTime = System.currentTimeMillis();
+
+    TestAppContext(int appid, int numJobs, int numTasks, int numAttempts) {
+      appID = MockJobs.newAppID(appid);
+      appAttemptID = MockJobs.newAppAttemptID(appID, 0);
+      jobs = MockJobs.newJobs(appID, numJobs, numTasks, numAttempts);
+    }
+
+    TestAppContext() {
+      this(0, 1, 1, 1);
+    }
+
+    @Override
+    public ApplicationAttemptId getApplicationAttemptId() {
+      return appAttemptID;
+    }
+
+    @Override
+    public ApplicationId getApplicationID() {
+      return appID;
+    }
+
+    @Override
+    public CharSequence getUser() {
+      return user;
+    }
+
+    @Override
+    public Job getJob(JobId jobID) {
+      return jobs.get(jobID);
+    }
+
+    @Override
+    public Map<JobId, Job> getAllJobs() {
+      return jobs; // OK
+    }
+
+    @SuppressWarnings("rawtypes")
+    @Override
+    public EventHandler getEventHandler() {
+      return null;
+    }
+
+    @Override
+    public Clock getClock() {
+      return null;
+    }
+
+    @Override
+    public String getApplicationName() {
+      return "TestApp";
+    }
+
+    @Override
+    public long getStartTime() {
+      return startTime;
+    }
+  }
+
+  private Injector injector = Guice.createInjector(new ServletModule() {
+    @Override
+    protected void configureServlets() {
+
+      appContext = new TestAppContext();
+      bind(JAXBContextResolver.class);
+      bind(AMWebServices.class);
+      bind(GenericExceptionHandler.class);
+      bind(AppContext.class).toInstance(appContext);
+      bind(Configuration.class).toInstance(conf);
+
+      serve("/*").with(GuiceContainer.class);
+    }
+  });
+
+  public class GuiceServletConfig extends GuiceServletContextListener {
+
+    @Override
+    protected Injector getInjector() {
+      return injector;
+    }
+  }
+
+  @Before
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+  }
+
+  public TestAMWebServices() {
+    super(new WebAppDescriptor.Builder(
+        "org.apache.hadoop.mapreduce.v2.app.webapp")
+        .contextListenerClass(GuiceServletConfig.class)
+        .filterClass(com.google.inject.servlet.GuiceFilter.class)
+        .contextPath("jersey-guice-filter").servletPath("/").build());
+  }
+
+  @Test
+  public void testAM() throws JSONException, Exception {
+    WebResource r = resource();
+    ClientResponse response = r.path("ws").path("v1").path("mapreduce")
+        .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
+    assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+    JSONObject json = response.getEntity(JSONObject.class);
+    assertEquals("incorrect number of elements", 1, json.length());
+    verifyAMInfo(json.getJSONObject("info"), appContext);
+  }
+
+  @Test
+  public void testAMSlash() throws JSONException, Exception {
+    WebResource r = resource();
+    ClientResponse response = r.path("ws").path("v1").path("mapreduce/")
+        .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
+    assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+    JSONObject json = response.getEntity(JSONObject.class);
+    assertEquals("incorrect number of elements", 1, json.length());
+    verifyAMInfo(json.getJSONObject("info"), appContext);
+  }
+
+  @Test
+  public void testAMDefault() throws JSONException, Exception {
+    WebResource r = resource();
+    ClientResponse response = r.path("ws").path("v1").path("mapreduce/")
+        .get(ClientResponse.class);
+    assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+    JSONObject json = response.getEntity(JSONObject.class);
+    assertEquals("incorrect number of elements", 1, json.length());
+    verifyAMInfo(json.getJSONObject("info"), appContext);
+  }
+
+  @Test
+  public void testAMXML() throws JSONException, Exception {
+    WebResource r = resource();
+    ClientResponse response = r.path("ws").path("v1").path("mapreduce")
+        .accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
+    assertEquals(MediaType.APPLICATION_XML_TYPE, response.getType());
+    String xml = response.getEntity(String.class);
+    verifyAMInfoXML(xml, appContext);
+  }
+
+  @Test
+  public void testInfo() throws JSONException, Exception {
+    WebResource r = resource();
+    ClientResponse response = r.path("ws").path("v1").path("mapreduce")
+        .path("info").accept(MediaType.APPLICATION_JSON)
+        .get(ClientResponse.class);
+    assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+    JSONObject json = response.getEntity(JSONObject.class);
+    assertEquals("incorrect number of elements", 1, json.length());
+    verifyAMInfo(json.getJSONObject("info"), appContext);
+  }
+
+  @Test
+  public void testInfoSlash() throws JSONException, Exception {
+    WebResource r = resource();
+    ClientResponse response = r.path("ws").path("v1").path("mapreduce")
+        .path("info/").accept(MediaType.APPLICATION_JSON)
+        .get(ClientResponse.class);
+    assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+    JSONObject json = response.getEntity(JSONObject.class);
+    assertEquals("incorrect number of elements", 1, json.length());
+    verifyAMInfo(json.getJSONObject("info"), appContext);
+  }
+
+  @Test
+  public void testInfoDefault() throws JSONException, Exception {
+    WebResource r = resource();
+    ClientResponse response = r.path("ws").path("v1").path("mapreduce")
+        .path("info/").get(ClientResponse.class);
+    assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+    JSONObject json = response.getEntity(JSONObject.class);
+    assertEquals("incorrect number of elements", 1, json.length());
+    verifyAMInfo(json.getJSONObject("info"), appContext);
+  }
+
+  @Test
+  public void testInfoXML() throws JSONException, Exception {
+    WebResource r = resource();
+    ClientResponse response = r.path("ws").path("v1").path("mapreduce")
+        .path("info/").accept(MediaType.APPLICATION_XML)
+        .get(ClientResponse.class);
+    assertEquals(MediaType.APPLICATION_XML_TYPE, response.getType());
+    String xml = response.getEntity(String.class);
+    verifyAMInfoXML(xml, appContext);
+  }
+
+  @Test
+  public void testInvalidUri() throws JSONException, Exception {
+    WebResource r = resource();
+    String responseStr = "";
+    try {
+      responseStr = r.path("ws").path("v1").path("mapreduce").path("bogus")
+          .accept(MediaType.APPLICATION_JSON).get(String.class);
+      fail("should have thrown exception on invalid uri");
+    } catch (UniformInterfaceException ue) {
+      ClientResponse response = ue.getResponse();
+      assertEquals(Status.NOT_FOUND, response.getClientResponseStatus());
+      WebServicesTestUtils.checkStringMatch(
+          "error string exists and shouldn't", "", responseStr);
+    }
+  }
+
+  @Test
+  public void testInvalidUri2() throws JSONException, Exception {
+    WebResource r = resource();
+    String responseStr = "";
+    try {
+      responseStr = r.path("ws").path("v1").path("invalid")
+          .accept(MediaType.APPLICATION_JSON).get(String.class);
+      fail("should have thrown exception on invalid uri");
+    } catch (UniformInterfaceException ue) {
+      ClientResponse response = ue.getResponse();
+      assertEquals(Status.NOT_FOUND, response.getClientResponseStatus());
+      WebServicesTestUtils.checkStringMatch(
+          "error string exists and shouldn't", "", responseStr);
+    }
+  }
+
+  @Test
+  public void testInvalidAccept() throws JSONException, Exception {
+    WebResource r = resource();
+    String responseStr = "";
+    try {
+      responseStr = r.path("ws").path("v1").path("mapreduce")
+          .accept(MediaType.TEXT_PLAIN).get(String.class);
+      fail("should have thrown exception on invalid uri");
+    } catch (UniformInterfaceException ue) {
+      ClientResponse response = ue.getResponse();
+      assertEquals(Status.INTERNAL_SERVER_ERROR,
+          response.getClientResponseStatus());
+      WebServicesTestUtils.checkStringMatch(
+          "error string exists and shouldn't", "", responseStr);
+    }
+  }
+
+  public void verifyAMInfo(JSONObject info, TestAppContext ctx)
+      throws JSONException {
+    assertEquals("incorrect number of elements", 5, info.length());
+
+    verifyAMInfoGeneric(ctx, info.getString("appId"), info.getString("user"),
+        info.getString("name"), info.getLong("startedOn"),
+        info.getLong("elapsedTime"));
+  }
+
+  public void verifyAMInfoXML(String xml, TestAppContext ctx)
+      throws JSONException, Exception {
+    DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
+    DocumentBuilder db = dbf.newDocumentBuilder();
+    InputSource is = new InputSource();
+    is.setCharacterStream(new StringReader(xml));
+    Document dom = db.parse(is);
+    NodeList nodes = dom.getElementsByTagName("info");
+    assertEquals("incorrect number of elements", 1, nodes.getLength());
+
+    for (int i = 0; i < nodes.getLength(); i++) {
+      Element element = (Element) nodes.item(i);
+      verifyAMInfoGeneric(ctx,
+          WebServicesTestUtils.getXmlString(element, "appId"),
+          WebServicesTestUtils.getXmlString(element, "user"),
+          WebServicesTestUtils.getXmlString(element, "name"),
+          WebServicesTestUtils.getXmlLong(element, "startedOn"),
+          WebServicesTestUtils.getXmlLong(element, "elapsedTime"));
+    }
+  }
+
+  public void verifyAMInfoGeneric(TestAppContext ctx, String id, String user,
+      String name, long startedOn, long elapsedTime) {
+
+    WebServicesTestUtils.checkStringMatch("id", ctx.getApplicationID()
+        .toString(), id);
+    WebServicesTestUtils.checkStringMatch("user", ctx.getUser().toString(),
+        user);
+    WebServicesTestUtils.checkStringMatch("name", ctx.getApplicationName(),
+        name);
+
+    assertEquals("startedOn incorrect", ctx.getStartTime(), startedOn);
+    assertTrue("elapsedTime not greater then 0", (elapsedTime > 0));
+
+  }
+}

+ 732 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebServicesAttempts.java

@@ -0,0 +1,732 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.mapreduce.v2.app.webapp;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.io.StringReader;
+import java.util.List;
+import java.util.Map;
+
+import javax.ws.rs.core.MediaType;
+import javax.xml.parsers.DocumentBuilder;
+import javax.xml.parsers.DocumentBuilderFactory;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapreduce.v2.api.records.JobId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
+import org.apache.hadoop.mapreduce.v2.app.AppContext;
+import org.apache.hadoop.mapreduce.v2.app.MockJobs;
+import org.apache.hadoop.mapreduce.v2.app.job.Job;
+import org.apache.hadoop.mapreduce.v2.app.job.Task;
+import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
+import org.apache.hadoop.mapreduce.v2.util.MRApps;
+import org.apache.hadoop.yarn.Clock;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.util.ConverterUtils;
+import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
+import org.apache.hadoop.yarn.webapp.WebServicesTestUtils;
+import org.codehaus.jettison.json.JSONArray;
+import org.codehaus.jettison.json.JSONException;
+import org.codehaus.jettison.json.JSONObject;
+import org.junit.Before;
+import org.junit.Test;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+import org.w3c.dom.NodeList;
+import org.xml.sax.InputSource;
+
+import com.google.inject.Guice;
+import com.google.inject.Injector;
+import com.google.inject.servlet.GuiceServletContextListener;
+import com.google.inject.servlet.ServletModule;
+import com.sun.jersey.api.client.ClientResponse;
+import com.sun.jersey.api.client.UniformInterfaceException;
+import com.sun.jersey.api.client.WebResource;
+import com.sun.jersey.api.client.ClientResponse.Status;
+import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
+import com.sun.jersey.test.framework.JerseyTest;
+import com.sun.jersey.test.framework.WebAppDescriptor;
+
+/**
+ * Test the app master web service Rest API for getting task attempts, a
+ * specific task attempt, and task attempt counters
+ *
+ * /ws/v1/mapreduce/jobs/{jobid}/tasks/{taskid}/attempts
+ * /ws/v1/mapreduce/jobs/{jobid}/tasks/{taskid}/attempts/{attemptid}
+ * /ws/v1/mapreduce/jobs/{jobid}/tasks/{taskid}/attempts/{attemptid}/counters
+ */
+public class TestAMWebServicesAttempts extends JerseyTest {
+
+  private static Configuration conf = new Configuration();
+  private static TestAppContext appContext;
+
+  static class TestAppContext implements AppContext {
+    final ApplicationAttemptId appAttemptID;
+    final ApplicationId appID;
+    final String user = MockJobs.newUserName();
+    final Map<JobId, Job> jobs;
+    final long startTime = System.currentTimeMillis();
+
+    TestAppContext(int appid, int numJobs, int numTasks, int numAttempts) {
+      appID = MockJobs.newAppID(appid);
+      appAttemptID = MockJobs.newAppAttemptID(appID, 0);
+      jobs = MockJobs.newJobs(appID, numJobs, numTasks, numAttempts);
+    }
+
+    TestAppContext() {
+      this(0, 1, 2, 1);
+    }
+
+    @Override
+    public ApplicationAttemptId getApplicationAttemptId() {
+      return appAttemptID;
+    }
+
+    @Override
+    public ApplicationId getApplicationID() {
+      return appID;
+    }
+
+    @Override
+    public CharSequence getUser() {
+      return user;
+    }
+
+    @Override
+    public Job getJob(JobId jobID) {
+      return jobs.get(jobID);
+    }
+
+    @Override
+    public Map<JobId, Job> getAllJobs() {
+      return jobs; // OK
+    }
+
+    @SuppressWarnings("rawtypes")
+    @Override
+    public EventHandler getEventHandler() {
+      return null;
+    }
+
+    @Override
+    public Clock getClock() {
+      return null;
+    }
+
+    @Override
+    public String getApplicationName() {
+      return "TestApp";
+    }
+
+    @Override
+    public long getStartTime() {
+      return startTime;
+    }
+  }
+
+  private Injector injector = Guice.createInjector(new ServletModule() {
+    @Override
+    protected void configureServlets() {
+
+      appContext = new TestAppContext();
+      bind(JAXBContextResolver.class);
+      bind(AMWebServices.class);
+      bind(GenericExceptionHandler.class);
+      bind(AppContext.class).toInstance(appContext);
+      bind(Configuration.class).toInstance(conf);
+
+      serve("/*").with(GuiceContainer.class);
+    }
+  });
+
+  public class GuiceServletConfig extends GuiceServletContextListener {
+
+    @Override
+    protected Injector getInjector() {
+      return injector;
+    }
+  }
+
+  @Before
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+
+  }
+
+  public TestAMWebServicesAttempts() {
+    super(new WebAppDescriptor.Builder(
+        "org.apache.hadoop.mapreduce.v2.app.webapp")
+        .contextListenerClass(GuiceServletConfig.class)
+        .filterClass(com.google.inject.servlet.GuiceFilter.class)
+        .contextPath("jersey-guice-filter").servletPath("/").build());
+  }
+
+  @Test
+  public void testTaskAttempts() throws JSONException, Exception {
+    WebResource r = resource();
+    Map<JobId, Job> jobsMap = appContext.getAllJobs();
+    for (JobId id : jobsMap.keySet()) {
+      String jobId = MRApps.toString(id);
+      for (Task task : jobsMap.get(id).getTasks().values()) {
+
+        String tid = MRApps.toString(task.getID());
+        ClientResponse response = r.path("ws").path("v1").path("mapreduce")
+            .path("jobs").path(jobId).path("tasks").path(tid).path("attempts")
+            .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
+        assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+        JSONObject json = response.getEntity(JSONObject.class);
+        verifyAMTaskAttempts(json, task);
+      }
+    }
+  }
+
+  @Test
+  public void testTaskAttemptsSlash() throws JSONException, Exception {
+    WebResource r = resource();
+    Map<JobId, Job> jobsMap = appContext.getAllJobs();
+    for (JobId id : jobsMap.keySet()) {
+      String jobId = MRApps.toString(id);
+      for (Task task : jobsMap.get(id).getTasks().values()) {
+
+        String tid = MRApps.toString(task.getID());
+        ClientResponse response = r.path("ws").path("v1").path("mapreduce")
+            .path("jobs").path(jobId).path("tasks").path(tid).path("attempts/")
+            .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
+        assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+        JSONObject json = response.getEntity(JSONObject.class);
+        verifyAMTaskAttempts(json, task);
+      }
+    }
+  }
+
+  @Test
+  public void testTaskAttemptsDefault() throws JSONException, Exception {
+    WebResource r = resource();
+    Map<JobId, Job> jobsMap = appContext.getAllJobs();
+    for (JobId id : jobsMap.keySet()) {
+      String jobId = MRApps.toString(id);
+      for (Task task : jobsMap.get(id).getTasks().values()) {
+
+        String tid = MRApps.toString(task.getID());
+        ClientResponse response = r.path("ws").path("v1").path("mapreduce")
+            .path("jobs").path(jobId).path("tasks").path(tid).path("attempts")
+            .get(ClientResponse.class);
+        assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+        JSONObject json = response.getEntity(JSONObject.class);
+        verifyAMTaskAttempts(json, task);
+      }
+    }
+  }
+
+  @Test
+  public void testTaskAttemptsXML() throws JSONException, Exception {
+    WebResource r = resource();
+    Map<JobId, Job> jobsMap = appContext.getAllJobs();
+    for (JobId id : jobsMap.keySet()) {
+      String jobId = MRApps.toString(id);
+      for (Task task : jobsMap.get(id).getTasks().values()) {
+
+        String tid = MRApps.toString(task.getID());
+        ClientResponse response = r.path("ws").path("v1").path("mapreduce")
+            .path("jobs").path(jobId).path("tasks").path(tid).path("attempts")
+            .accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
+
+        assertEquals(MediaType.APPLICATION_XML_TYPE, response.getType());
+        String xml = response.getEntity(String.class);
+        DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
+        DocumentBuilder db = dbf.newDocumentBuilder();
+        InputSource is = new InputSource();
+        is.setCharacterStream(new StringReader(xml));
+        Document dom = db.parse(is);
+        NodeList attempts = dom.getElementsByTagName("taskAttempts");
+        assertEquals("incorrect number of elements", 1, attempts.getLength());
+
+        NodeList nodes = dom.getElementsByTagName("taskAttempt");
+        verifyAMTaskAttemptsXML(nodes, task);
+      }
+    }
+  }
+
+  @Test
+  public void testTaskAttemptId() throws JSONException, Exception {
+    WebResource r = resource();
+    Map<JobId, Job> jobsMap = appContext.getAllJobs();
+
+    for (JobId id : jobsMap.keySet()) {
+      String jobId = MRApps.toString(id);
+
+      for (Task task : jobsMap.get(id).getTasks().values()) {
+        String tid = MRApps.toString(task.getID());
+
+        for (TaskAttempt att : task.getAttempts().values()) {
+          TaskAttemptId attemptid = att.getID();
+          String attid = MRApps.toString(attemptid);
+
+          ClientResponse response = r.path("ws").path("v1").path("mapreduce")
+              .path("jobs").path(jobId).path("tasks").path(tid)
+              .path("attempts").path(attid).accept(MediaType.APPLICATION_JSON)
+              .get(ClientResponse.class);
+          assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+          JSONObject json = response.getEntity(JSONObject.class);
+          assertEquals("incorrect number of elements", 1, json.length());
+          JSONObject info = json.getJSONObject("taskAttempt");
+          verifyAMTaskAttempt(info, att, task.getType());
+        }
+      }
+    }
+  }
+
+  @Test
+  public void testTaskAttemptIdSlash() throws JSONException, Exception {
+    WebResource r = resource();
+    Map<JobId, Job> jobsMap = appContext.getAllJobs();
+
+    for (JobId id : jobsMap.keySet()) {
+      String jobId = MRApps.toString(id);
+
+      for (Task task : jobsMap.get(id).getTasks().values()) {
+        String tid = MRApps.toString(task.getID());
+
+        for (TaskAttempt att : task.getAttempts().values()) {
+          TaskAttemptId attemptid = att.getID();
+          String attid = MRApps.toString(attemptid);
+
+          ClientResponse response = r.path("ws").path("v1").path("mapreduce")
+              .path("jobs").path(jobId).path("tasks").path(tid)
+              .path("attempts").path(attid + "/")
+              .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
+          assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+          JSONObject json = response.getEntity(JSONObject.class);
+          assertEquals("incorrect number of elements", 1, json.length());
+          JSONObject info = json.getJSONObject("taskAttempt");
+          verifyAMTaskAttempt(info, att, task.getType());
+        }
+      }
+    }
+  }
+
+  @Test
+  public void testTaskAttemptIdDefault() throws JSONException, Exception {
+    WebResource r = resource();
+    Map<JobId, Job> jobsMap = appContext.getAllJobs();
+
+    for (JobId id : jobsMap.keySet()) {
+      String jobId = MRApps.toString(id);
+
+      for (Task task : jobsMap.get(id).getTasks().values()) {
+        String tid = MRApps.toString(task.getID());
+
+        for (TaskAttempt att : task.getAttempts().values()) {
+          TaskAttemptId attemptid = att.getID();
+          String attid = MRApps.toString(attemptid);
+
+          ClientResponse response = r.path("ws").path("v1").path("mapreduce")
+              .path("jobs").path(jobId).path("tasks").path(tid)
+              .path("attempts").path(attid).get(ClientResponse.class);
+          assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+          JSONObject json = response.getEntity(JSONObject.class);
+          assertEquals("incorrect number of elements", 1, json.length());
+          JSONObject info = json.getJSONObject("taskAttempt");
+          verifyAMTaskAttempt(info, att, task.getType());
+        }
+      }
+    }
+  }
+
+  @Test
+  public void testTaskAttemptIdXML() throws JSONException, Exception {
+    WebResource r = resource();
+    Map<JobId, Job> jobsMap = appContext.getAllJobs();
+    for (JobId id : jobsMap.keySet()) {
+      String jobId = MRApps.toString(id);
+      for (Task task : jobsMap.get(id).getTasks().values()) {
+
+        String tid = MRApps.toString(task.getID());
+        for (TaskAttempt att : task.getAttempts().values()) {
+          TaskAttemptId attemptid = att.getID();
+          String attid = MRApps.toString(attemptid);
+
+          ClientResponse response = r.path("ws").path("v1").path("mapreduce")
+              .path("jobs").path(jobId).path("tasks").path(tid)
+              .path("attempts").path(attid).accept(MediaType.APPLICATION_XML)
+              .get(ClientResponse.class);
+
+          assertEquals(MediaType.APPLICATION_XML_TYPE, response.getType());
+          String xml = response.getEntity(String.class);
+          DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
+          DocumentBuilder db = dbf.newDocumentBuilder();
+          InputSource is = new InputSource();
+          is.setCharacterStream(new StringReader(xml));
+          Document dom = db.parse(is);
+          NodeList nodes = dom.getElementsByTagName("taskAttempt");
+          for (int i = 0; i < nodes.getLength(); i++) {
+            Element element = (Element) nodes.item(i);
+            verifyAMTaskAttemptXML(element, att, task.getType());
+          }
+        }
+      }
+    }
+  }
+
+  @Test
+  public void testTaskAttemptIdBogus() throws JSONException, Exception {
+
+    testTaskAttemptIdErrorGeneric("bogusid",
+        "java.lang.Exception: Error parsing attempt ID: bogusid");
+  }
+
+  @Test
+  public void testTaskAttemptIdNonExist() throws JSONException, Exception {
+
+    testTaskAttemptIdErrorGeneric(
+        "attempt_12345_0_0_r_1_0",
+        "java.lang.Exception: Error getting info on task attempt id attempt_12345_0_0_r_1_0");
+  }
+
+  @Test
+  public void testTaskAttemptIdInvalid() throws JSONException, Exception {
+
+    testTaskAttemptIdErrorGeneric("attempt_12345_0_0_d_1_0",
+        "java.lang.Exception: Unknown task symbol: d");
+  }
+
+  @Test
+  public void testTaskAttemptIdInvalid2() throws JSONException, Exception {
+
+    testTaskAttemptIdErrorGeneric("attempt_12345_0_r_1_0",
+        "java.lang.Exception: For input string: \"r\"");
+  }
+
+  @Test
+  public void testTaskAttemptIdInvalid3() throws JSONException, Exception {
+
+    testTaskAttemptIdErrorGeneric("attempt_12345_0_0_r_1",
+        "java.lang.Exception: Error parsing attempt ID: attempt_12345_0_0_r_1");
+  }
+
+  private void testTaskAttemptIdErrorGeneric(String attid, String error)
+      throws JSONException, Exception {
+    WebResource r = resource();
+    Map<JobId, Job> jobsMap = appContext.getAllJobs();
+
+    for (JobId id : jobsMap.keySet()) {
+      String jobId = MRApps.toString(id);
+
+      for (Task task : jobsMap.get(id).getTasks().values()) {
+        String tid = MRApps.toString(task.getID());
+
+        try {
+          r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId)
+              .path("tasks").path(tid).path("attempts").path(attid)
+              .accept(MediaType.APPLICATION_JSON).get(JSONObject.class);
+          fail("should have thrown exception on invalid uri");
+        } catch (UniformInterfaceException ue) {
+          ClientResponse response = ue.getResponse();
+          assertEquals(Status.NOT_FOUND, response.getClientResponseStatus());
+          assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+          JSONObject msg = response.getEntity(JSONObject.class);
+          JSONObject exception = msg.getJSONObject("RemoteException");
+          assertEquals("incorrect number of elements", 3, exception.length());
+          String message = exception.getString("message");
+          String type = exception.getString("exception");
+          String classname = exception.getString("javaClassName");
+          WebServicesTestUtils.checkStringMatch("exception message", error,
+              message);
+          WebServicesTestUtils.checkStringMatch("exception type",
+              "NotFoundException", type);
+          WebServicesTestUtils.checkStringMatch("exception classname",
+              "org.apache.hadoop.yarn.webapp.NotFoundException", classname);
+        }
+      }
+    }
+  }
+
+  public void verifyAMTaskAttemptXML(Element element, TaskAttempt att,
+      TaskType ttype) {
+    verifyTaskAttemptGeneric(att, ttype,
+        WebServicesTestUtils.getXmlString(element, "id"),
+        WebServicesTestUtils.getXmlString(element, "state"),
+        WebServicesTestUtils.getXmlString(element, "type"),
+        WebServicesTestUtils.getXmlString(element, "rack"),
+        WebServicesTestUtils.getXmlString(element, "nodeHttpAddress"),
+        WebServicesTestUtils.getXmlString(element, "diagnostics"),
+        WebServicesTestUtils.getXmlString(element, "assignedContainerId"),
+        WebServicesTestUtils.getXmlLong(element, "startTime"),
+        WebServicesTestUtils.getXmlLong(element, "finishTime"),
+        WebServicesTestUtils.getXmlLong(element, "elapsedTime"),
+        WebServicesTestUtils.getXmlFloat(element, "progress"));
+
+    if (ttype == TaskType.REDUCE) {
+      verifyReduceTaskAttemptGeneric(att,
+          WebServicesTestUtils.getXmlLong(element, "shuffleFinishTime"),
+          WebServicesTestUtils.getXmlLong(element, "mergeFinishTime"),
+          WebServicesTestUtils.getXmlLong(element, "elapsedShuffleTime"),
+          WebServicesTestUtils.getXmlLong(element, "elapsedMergeTime"),
+          WebServicesTestUtils.getXmlLong(element, "elapsedReduceTime"));
+    }
+  }
+
+  public void verifyAMTaskAttempt(JSONObject info, TaskAttempt att,
+      TaskType ttype) throws JSONException {
+    if (ttype == TaskType.REDUCE) {
+      assertEquals("incorrect number of elements", 16, info.length());
+    } else {
+      assertEquals("incorrect number of elements", 11, info.length());
+    }
+
+    verifyTaskAttemptGeneric(att, ttype, info.getString("id"),
+        info.getString("state"), info.getString("type"),
+        info.getString("rack"), info.getString("nodeHttpAddress"),
+        info.getString("diagnostics"), info.getString("assignedContainerId"),
+        info.getLong("startTime"), info.getLong("finishTime"),
+        info.getLong("elapsedTime"), (float) info.getDouble("progress"));
+
+    if (ttype == TaskType.REDUCE) {
+      verifyReduceTaskAttemptGeneric(att, info.getLong("shuffleFinishTime"),
+          info.getLong("mergeFinishTime"), info.getLong("elapsedShuffleTime"),
+          info.getLong("elapsedMergeTime"), info.getLong("elapsedReduceTime"));
+    }
+  }
+
+  public void verifyAMTaskAttempts(JSONObject json, Task task)
+      throws JSONException {
+    assertEquals("incorrect number of elements", 1, json.length());
+    JSONObject attempts = json.getJSONObject("taskAttempts");
+    assertEquals("incorrect number of elements", 1, json.length());
+    JSONArray arr = attempts.getJSONArray("taskAttempt");
+    for (TaskAttempt att : task.getAttempts().values()) {
+      TaskAttemptId id = att.getID();
+      String attid = MRApps.toString(id);
+      Boolean found = false;
+
+      for (int i = 0; i < arr.length(); i++) {
+        JSONObject info = arr.getJSONObject(i);
+        if (attid.matches(info.getString("id"))) {
+          found = true;
+          verifyAMTaskAttempt(info, att, task.getType());
+        }
+      }
+      assertTrue("task attempt with id: " + attid
+          + " not in web service output", found);
+    }
+  }
+
+  public void verifyAMTaskAttemptsXML(NodeList nodes, Task task) {
+    assertEquals("incorrect number of elements", 1, nodes.getLength());
+
+    for (TaskAttempt att : task.getAttempts().values()) {
+      TaskAttemptId id = att.getID();
+      String attid = MRApps.toString(id);
+      Boolean found = false;
+      for (int i = 0; i < nodes.getLength(); i++) {
+        Element element = (Element) nodes.item(i);
+
+        if (attid.matches(WebServicesTestUtils.getXmlString(element, "id"))) {
+          found = true;
+          verifyAMTaskAttemptXML(element, att, task.getType());
+        }
+      }
+      assertTrue("task with id: " + attid + " not in web service output", found);
+    }
+  }
+
+  public void verifyTaskAttemptGeneric(TaskAttempt ta, TaskType ttype,
+      String id, String state, String type, String rack,
+      String nodeHttpAddress, String diagnostics, String assignedContainerId,
+      long startTime, long finishTime, long elapsedTime, float progress) {
+
+    TaskAttemptId attid = ta.getID();
+    String attemptId = MRApps.toString(attid);
+
+    WebServicesTestUtils.checkStringMatch("id", attemptId, id);
+    WebServicesTestUtils.checkStringMatch("type", ttype.toString(), type);
+    WebServicesTestUtils.checkStringMatch("state", ta.getState().toString(),
+        state);
+    WebServicesTestUtils.checkStringMatch("rack", ta.getNodeRackName(), rack);
+    WebServicesTestUtils.checkStringMatch("nodeHttpAddress",
+        ta.getNodeHttpAddress(), nodeHttpAddress);
+
+    String expectDiag = "";
+    List<String> diagnosticsList = ta.getDiagnostics();
+    if (diagnosticsList != null && !diagnostics.isEmpty()) {
+      StringBuffer b = new StringBuffer();
+      for (String diag : diagnosticsList) {
+        b.append(diag);
+      }
+      expectDiag = b.toString();
+    }
+    WebServicesTestUtils.checkStringMatch("diagnostics", expectDiag,
+        diagnostics);
+    WebServicesTestUtils.checkStringMatch("assignedContainerId",
+        ConverterUtils.toString(ta.getAssignedContainerID()),
+        assignedContainerId);
+
+    assertEquals("startTime wrong", ta.getLaunchTime(), startTime);
+    assertEquals("finishTime wrong", ta.getFinishTime(), finishTime);
+    assertEquals("elapsedTime wrong", finishTime - startTime, elapsedTime);
+    assertEquals("progress wrong", ta.getProgress() * 100, progress, 1e-3f);
+  }
+
+  public void verifyReduceTaskAttemptGeneric(TaskAttempt ta,
+      long shuffleFinishTime, long mergeFinishTime, long elapsedShuffleTime,
+      long elapsedMergeTime, long elapsedReduceTime) {
+
+    assertEquals("shuffleFinishTime wrong", ta.getShuffleFinishTime(),
+        shuffleFinishTime);
+    assertEquals("mergeFinishTime wrong", ta.getSortFinishTime(),
+        mergeFinishTime);
+    assertEquals("elapsedShuffleTime wrong",
+        ta.getLaunchTime() - ta.getShuffleFinishTime(), elapsedShuffleTime);
+    assertEquals("elapsedMergeTime wrong",
+        ta.getShuffleFinishTime() - ta.getSortFinishTime(), elapsedMergeTime);
+    assertEquals("elapsedReduceTime wrong",
+        ta.getSortFinishTime() - ta.getFinishTime(), elapsedReduceTime);
+  }
+
+  @Test
+  public void testTaskAttemptIdCounters() throws JSONException, Exception {
+    WebResource r = resource();
+    Map<JobId, Job> jobsMap = appContext.getAllJobs();
+
+    for (JobId id : jobsMap.keySet()) {
+      String jobId = MRApps.toString(id);
+
+      for (Task task : jobsMap.get(id).getTasks().values()) {
+        String tid = MRApps.toString(task.getID());
+
+        for (TaskAttempt att : task.getAttempts().values()) {
+          TaskAttemptId attemptid = att.getID();
+          String attid = MRApps.toString(attemptid);
+
+          ClientResponse response = r.path("ws").path("v1").path("mapreduce")
+              .path("jobs").path(jobId).path("tasks").path(tid)
+              .path("attempts").path(attid).path("counters")
+              .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
+          assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+          JSONObject json = response.getEntity(JSONObject.class);
+          assertEquals("incorrect number of elements", 1, json.length());
+          JSONObject info = json.getJSONObject("JobTaskAttemptCounters");
+          verifyAMJobTaskAttemptCounters(info, att);
+        }
+      }
+    }
+  }
+
+  @Test
+  public void testTaskAttemptIdXMLCounters() throws JSONException, Exception {
+    WebResource r = resource();
+    Map<JobId, Job> jobsMap = appContext.getAllJobs();
+    for (JobId id : jobsMap.keySet()) {
+      String jobId = MRApps.toString(id);
+      for (Task task : jobsMap.get(id).getTasks().values()) {
+
+        String tid = MRApps.toString(task.getID());
+        for (TaskAttempt att : task.getAttempts().values()) {
+          TaskAttemptId attemptid = att.getID();
+          String attid = MRApps.toString(attemptid);
+
+          ClientResponse response = r.path("ws").path("v1").path("mapreduce")
+              .path("jobs").path(jobId).path("tasks").path(tid)
+              .path("attempts").path(attid).path("counters")
+              .accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
+
+          assertEquals(MediaType.APPLICATION_XML_TYPE, response.getType());
+          String xml = response.getEntity(String.class);
+          DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
+          DocumentBuilder db = dbf.newDocumentBuilder();
+          InputSource is = new InputSource();
+          is.setCharacterStream(new StringReader(xml));
+          Document dom = db.parse(is);
+          NodeList nodes = dom.getElementsByTagName("JobTaskAttemptCounters");
+
+          verifyAMTaskCountersXML(nodes, att);
+        }
+      }
+    }
+  }
+
+  public void verifyAMJobTaskAttemptCounters(JSONObject info, TaskAttempt att)
+      throws JSONException {
+
+    assertEquals("incorrect number of elements", 2, info.length());
+
+    WebServicesTestUtils.checkStringMatch("id", MRApps.toString(att.getID()),
+        info.getString("id"));
+
+    // just do simple verification of fields - not data is correct
+    // in the fields
+    JSONArray counterGroups = info.getJSONArray("taskAttemptCounterGroup");
+    for (int i = 0; i < counterGroups.length(); i++) {
+      JSONObject counterGroup = counterGroups.getJSONObject(i);
+      String name = counterGroup.getString("counterGroupName");
+      assertTrue("name not set", (name != null && !name.isEmpty()));
+      JSONArray counters = counterGroup.getJSONArray("counter");
+      for (int j = 0; j < counters.length(); j++) {
+        JSONObject counter = counters.getJSONObject(j);
+        String counterName = counter.getString("name");
+        assertTrue("name not set",
+            (counterName != null && !counterName.isEmpty()));
+        long value = counter.getLong("value");
+        assertTrue("value  >= 0", value >= 0);
+      }
+    }
+  }
+
+  public void verifyAMTaskCountersXML(NodeList nodes, TaskAttempt att) {
+
+    for (int i = 0; i < nodes.getLength(); i++) {
+
+      Element element = (Element) nodes.item(i);
+      WebServicesTestUtils.checkStringMatch("id", MRApps.toString(att.getID()),
+          WebServicesTestUtils.getXmlString(element, "id"));
+      // just do simple verification of fields - not data is correct
+      // in the fields
+      NodeList groups = element.getElementsByTagName("taskAttemptCounterGroup");
+
+      for (int j = 0; j < groups.getLength(); j++) {
+        Element counters = (Element) groups.item(j);
+        assertNotNull("should have counters in the web service info", counters);
+        String name = WebServicesTestUtils.getXmlString(counters,
+            "counterGroupName");
+        assertTrue("name not set", (name != null && !name.isEmpty()));
+        NodeList counterArr = counters.getElementsByTagName("counter");
+        for (int z = 0; z < counterArr.getLength(); z++) {
+          Element counter = (Element) counterArr.item(z);
+          String counterName = WebServicesTestUtils.getXmlString(counter,
+              "name");
+          assertTrue("counter name not set",
+              (counterName != null && !counterName.isEmpty()));
+
+          long value = WebServicesTestUtils.getXmlLong(counter, "value");
+          assertTrue("value not >= 0", value >= 0);
+
+        }
+      }
+    }
+  }
+
+}

+ 336 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebServicesJobConf.java

@@ -0,0 +1,336 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.mapreduce.v2.app.webapp;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.io.StringReader;
+import java.util.Map;
+
+import javax.ws.rs.core.MediaType;
+import javax.xml.parsers.DocumentBuilder;
+import javax.xml.parsers.DocumentBuilderFactory;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapreduce.MRJobConfig;
+import org.apache.hadoop.mapreduce.v2.api.records.JobId;
+import org.apache.hadoop.mapreduce.v2.app.AppContext;
+import org.apache.hadoop.mapreduce.v2.app.MockJobs;
+import org.apache.hadoop.mapreduce.v2.app.job.Job;
+import org.apache.hadoop.mapreduce.v2.util.MRApps;
+import org.apache.hadoop.yarn.Clock;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
+import org.apache.hadoop.yarn.webapp.WebServicesTestUtils;
+import org.codehaus.jettison.json.JSONArray;
+import org.codehaus.jettison.json.JSONException;
+import org.codehaus.jettison.json.JSONObject;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.Test;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+import org.w3c.dom.NodeList;
+import org.xml.sax.InputSource;
+
+import com.google.common.collect.Maps;
+import com.google.inject.Guice;
+import com.google.inject.Injector;
+import com.google.inject.servlet.GuiceServletContextListener;
+import com.google.inject.servlet.ServletModule;
+import com.sun.jersey.api.client.ClientResponse;
+import com.sun.jersey.api.client.WebResource;
+import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
+import com.sun.jersey.test.framework.JerseyTest;
+import com.sun.jersey.test.framework.WebAppDescriptor;
+
+/**
+ * Test the app master web service Rest API for getting the job conf. This
+ * requires created a temporary configuration file.
+ *
+ *   /ws/v1/mapreduce/job/{jobid}/conf
+ */
+public class TestAMWebServicesJobConf extends JerseyTest {
+
+  private static Configuration conf = new Configuration();
+  private static TestAppContext appContext;
+
+  private static File testConfDir = new File("target",
+      TestAMWebServicesJobConf.class.getSimpleName() + "confDir");
+
+  static class TestAppContext implements AppContext {
+    final ApplicationAttemptId appAttemptID;
+    final ApplicationId appID;
+    final String user = MockJobs.newUserName();
+    final Map<JobId, Job> jobs;
+    final long startTime = System.currentTimeMillis();
+
+    TestAppContext(int appid, int numTasks, int numAttempts, Path confPath) {
+      appID = MockJobs.newAppID(appid);
+      appAttemptID = MockJobs.newAppAttemptID(appID, 0);
+      Map<JobId, Job> map = Maps.newHashMap();
+      Job job = MockJobs.newJob(appID, 0, numTasks, numAttempts, confPath);
+      map.put(job.getID(), job);
+      jobs = map;
+    }
+
+    @Override
+    public ApplicationAttemptId getApplicationAttemptId() {
+      return appAttemptID;
+    }
+
+    @Override
+    public ApplicationId getApplicationID() {
+      return appID;
+    }
+
+    @Override
+    public CharSequence getUser() {
+      return user;
+    }
+
+    @Override
+    public Job getJob(JobId jobID) {
+      return jobs.get(jobID);
+    }
+
+    @Override
+    public Map<JobId, Job> getAllJobs() {
+      return jobs; // OK
+    }
+
+    @SuppressWarnings("rawtypes")
+    @Override
+    public EventHandler getEventHandler() {
+      return null;
+    }
+
+    @Override
+    public Clock getClock() {
+      return null;
+    }
+
+    @Override
+    public String getApplicationName() {
+      return "TestApp";
+    }
+
+    @Override
+    public long getStartTime() {
+      return startTime;
+    }
+  }
+
+  private Injector injector = Guice.createInjector(new ServletModule() {
+    @Override
+    protected void configureServlets() {
+
+      Path confPath = new Path(testConfDir.toString(),
+          MRJobConfig.JOB_CONF_FILE);
+      Configuration config = new Configuration();
+
+      FileSystem localFs;
+      try {
+        localFs = FileSystem.getLocal(config);
+        confPath = localFs.makeQualified(confPath);
+
+        OutputStream out = localFs.create(confPath);
+        try {
+          conf.writeXml(out);
+        } finally {
+          out.close();
+        }
+        if (!localFs.exists(confPath)) {
+          fail("error creating config file: " + confPath);
+        }
+
+      } catch (IOException e) {
+        fail("error creating config file: " + e.getMessage());
+      }
+
+      appContext = new TestAppContext(0, 2, 1, confPath);
+
+      bind(JAXBContextResolver.class);
+      bind(AMWebServices.class);
+      bind(GenericExceptionHandler.class);
+      bind(AppContext.class).toInstance(appContext);
+      bind(Configuration.class).toInstance(conf);
+
+      serve("/*").with(GuiceContainer.class);
+    }
+  });
+
+  public class GuiceServletConfig extends GuiceServletContextListener {
+
+    @Override
+    protected Injector getInjector() {
+      return injector;
+    }
+  }
+
+  @Before
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    testConfDir.mkdir();
+
+  }
+
+  @AfterClass
+  static public void stop() {
+    FileUtil.fullyDelete(testConfDir);
+  }
+
+  public TestAMWebServicesJobConf() {
+    super(new WebAppDescriptor.Builder(
+        "org.apache.hadoop.mapreduce.v2.app.webapp")
+        .contextListenerClass(GuiceServletConfig.class)
+        .filterClass(com.google.inject.servlet.GuiceFilter.class)
+        .contextPath("jersey-guice-filter").servletPath("/").build());
+  }
+
+  @Test
+  public void testJobConf() throws JSONException, Exception {
+    WebResource r = resource();
+    Map<JobId, Job> jobsMap = appContext.getAllJobs();
+    for (JobId id : jobsMap.keySet()) {
+      String jobId = MRApps.toString(id);
+
+      ClientResponse response = r.path("ws").path("v1").path("mapreduce")
+          .path("jobs").path(jobId).path("conf")
+          .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
+      assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+      JSONObject json = response.getEntity(JSONObject.class);
+      assertEquals("incorrect number of elements", 1, json.length());
+      JSONObject info = json.getJSONObject("conf");
+      verifyAMJobConf(info, jobsMap.get(id));
+    }
+  }
+
+  @Test
+  public void testJobConfSlash() throws JSONException, Exception {
+    WebResource r = resource();
+    Map<JobId, Job> jobsMap = appContext.getAllJobs();
+    for (JobId id : jobsMap.keySet()) {
+      String jobId = MRApps.toString(id);
+
+      ClientResponse response = r.path("ws").path("v1").path("mapreduce")
+          .path("jobs").path(jobId).path("conf/")
+          .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
+      assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+      JSONObject json = response.getEntity(JSONObject.class);
+      assertEquals("incorrect number of elements", 1, json.length());
+      JSONObject info = json.getJSONObject("conf");
+      verifyAMJobConf(info, jobsMap.get(id));
+    }
+  }
+
+  @Test
+  public void testJobConfDefault() throws JSONException, Exception {
+    WebResource r = resource();
+    Map<JobId, Job> jobsMap = appContext.getAllJobs();
+    for (JobId id : jobsMap.keySet()) {
+      String jobId = MRApps.toString(id);
+
+      ClientResponse response = r.path("ws").path("v1").path("mapreduce")
+          .path("jobs").path(jobId).path("conf").get(ClientResponse.class);
+      assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+      JSONObject json = response.getEntity(JSONObject.class);
+      assertEquals("incorrect number of elements", 1, json.length());
+      JSONObject info = json.getJSONObject("conf");
+      verifyAMJobConf(info, jobsMap.get(id));
+    }
+  }
+
+  @Test
+  public void testJobConfXML() throws JSONException, Exception {
+    WebResource r = resource();
+    Map<JobId, Job> jobsMap = appContext.getAllJobs();
+    for (JobId id : jobsMap.keySet()) {
+      String jobId = MRApps.toString(id);
+
+      ClientResponse response = r.path("ws").path("v1").path("mapreduce")
+          .path("jobs").path(jobId).path("conf")
+          .accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
+      assertEquals(MediaType.APPLICATION_XML_TYPE, response.getType());
+      String xml = response.getEntity(String.class);
+      DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
+      DocumentBuilder db = dbf.newDocumentBuilder();
+      InputSource is = new InputSource();
+      is.setCharacterStream(new StringReader(xml));
+      Document dom = db.parse(is);
+      NodeList info = dom.getElementsByTagName("conf");
+      verifyAMJobConfXML(info, jobsMap.get(id));
+    }
+  }
+
+  public void verifyAMJobConf(JSONObject info, Job job) throws JSONException {
+
+    assertEquals("incorrect number of elements", 2, info.length());
+
+    WebServicesTestUtils.checkStringMatch("path", job.getConfFile().toString(),
+        info.getString("path"));
+    // just do simple verification of fields - not data is correct
+    // in the fields
+    JSONArray properties = info.getJSONArray("property");
+    for (int i = 0; i < properties.length(); i++) {
+      JSONObject prop = properties.getJSONObject(i);
+      String name = prop.getString("name");
+      String value = prop.getString("value");
+      assertTrue("name not set", (name != null && !name.isEmpty()));
+      assertTrue("value not set", (value != null && !value.isEmpty()));
+    }
+  }
+
+  public void verifyAMJobConfXML(NodeList nodes, Job job) {
+
+    assertEquals("incorrect number of elements", 1, nodes.getLength());
+
+    for (int i = 0; i < nodes.getLength(); i++) {
+      Element element = (Element) nodes.item(i);
+      WebServicesTestUtils.checkStringMatch("path", job.getConfFile()
+          .toString(), WebServicesTestUtils.getXmlString(element, "path"));
+
+      // just do simple verification of fields - not data is correct
+      // in the fields
+      NodeList properties = element.getElementsByTagName("property");
+
+      for (int j = 0; j < properties.getLength(); j++) {
+        Element property = (Element) properties.item(j);
+        assertNotNull("should have counters in the web service info", property);
+        String name = WebServicesTestUtils.getXmlString(property, "name");
+        String value = WebServicesTestUtils.getXmlString(property, "value");
+        assertTrue("name not set", (name != null && !name.isEmpty()));
+        assertTrue("name not set", (value != null && !value.isEmpty()));
+      }
+    }
+  }
+
+}

+ 973 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebServicesJobs.java

@@ -0,0 +1,973 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.mapreduce.v2.app.webapp;
+
+import static org.apache.hadoop.yarn.util.StringHelper.ujoin;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.io.StringReader;
+import java.util.List;
+import java.util.Map;
+
+import javax.ws.rs.core.MediaType;
+import javax.xml.parsers.DocumentBuilder;
+import javax.xml.parsers.DocumentBuilderFactory;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapreduce.JobACL;
+import org.apache.hadoop.mapreduce.v2.api.records.AMInfo;
+import org.apache.hadoop.mapreduce.v2.api.records.JobId;
+import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
+import org.apache.hadoop.mapreduce.v2.app.AppContext;
+import org.apache.hadoop.mapreduce.v2.app.MockJobs;
+import org.apache.hadoop.mapreduce.v2.app.job.Job;
+import org.apache.hadoop.mapreduce.v2.util.MRApps;
+import org.apache.hadoop.security.authorize.AccessControlList;
+import org.apache.hadoop.yarn.Clock;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.util.BuilderUtils;
+import org.apache.hadoop.yarn.util.Times;
+import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
+import org.apache.hadoop.yarn.webapp.WebServicesTestUtils;
+import org.codehaus.jettison.json.JSONArray;
+import org.codehaus.jettison.json.JSONException;
+import org.codehaus.jettison.json.JSONObject;
+import org.junit.Before;
+import org.junit.Test;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+import org.w3c.dom.NodeList;
+import org.xml.sax.InputSource;
+
+import com.google.inject.Guice;
+import com.google.inject.Injector;
+import com.google.inject.servlet.GuiceServletContextListener;
+import com.google.inject.servlet.ServletModule;
+import com.sun.jersey.api.client.ClientResponse;
+import com.sun.jersey.api.client.UniformInterfaceException;
+import com.sun.jersey.api.client.WebResource;
+import com.sun.jersey.api.client.ClientResponse.Status;
+import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
+import com.sun.jersey.test.framework.JerseyTest;
+import com.sun.jersey.test.framework.WebAppDescriptor;
+
+/**
+ * Test the app master web service Rest API for getting jobs, a specific job,
+ * and job counters.
+ *
+ * /ws/v1/mapreduce/jobs
+ * /ws/v1/mapreduce/jobs/{jobid}
+ * /ws/v1/mapreduce/jobs/{jobid}/counters
+ * /ws/v1/mapreduce/jobs/{jobid}/jobattempts
+ */
+public class TestAMWebServicesJobs extends JerseyTest {
+
+  private static Configuration conf = new Configuration();
+  private static TestAppContext appContext;
+
+  static class TestAppContext implements AppContext {
+    final ApplicationAttemptId appAttemptID;
+    final ApplicationId appID;
+    final String user = MockJobs.newUserName();
+    final Map<JobId, Job> jobs;
+    final long startTime = System.currentTimeMillis();
+
+    TestAppContext(int appid, int numJobs, int numTasks, int numAttempts) {
+      appID = MockJobs.newAppID(appid);
+      appAttemptID = MockJobs.newAppAttemptID(appID, 0);
+      jobs = MockJobs.newJobs(appID, numJobs, numTasks, numAttempts);
+    }
+
+    TestAppContext() {
+      this(0, 1, 2, 1);
+    }
+
+    @Override
+    public ApplicationAttemptId getApplicationAttemptId() {
+      return appAttemptID;
+    }
+
+    @Override
+    public ApplicationId getApplicationID() {
+      return appID;
+    }
+
+    @Override
+    public CharSequence getUser() {
+      return user;
+    }
+
+    @Override
+    public Job getJob(JobId jobID) {
+      return jobs.get(jobID);
+    }
+
+    @Override
+    public Map<JobId, Job> getAllJobs() {
+      return jobs; // OK
+    }
+
+    @SuppressWarnings("rawtypes")
+    @Override
+    public EventHandler getEventHandler() {
+      return null;
+    }
+
+    @Override
+    public Clock getClock() {
+      return null;
+    }
+
+    @Override
+    public String getApplicationName() {
+      return "TestApp";
+    }
+
+    @Override
+    public long getStartTime() {
+      return startTime;
+    }
+  }
+
+  private Injector injector = Guice.createInjector(new ServletModule() {
+    @Override
+    protected void configureServlets() {
+
+      appContext = new TestAppContext();
+      bind(JAXBContextResolver.class);
+      bind(AMWebServices.class);
+      bind(GenericExceptionHandler.class);
+      bind(AppContext.class).toInstance(appContext);
+      bind(Configuration.class).toInstance(conf);
+
+      serve("/*").with(GuiceContainer.class);
+    }
+  });
+
+  public class GuiceServletConfig extends GuiceServletContextListener {
+
+    @Override
+    protected Injector getInjector() {
+      return injector;
+    }
+  }
+
+  @Before
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+
+  }
+
+  public TestAMWebServicesJobs() {
+    super(new WebAppDescriptor.Builder(
+        "org.apache.hadoop.mapreduce.v2.app.webapp")
+        .contextListenerClass(GuiceServletConfig.class)
+        .filterClass(com.google.inject.servlet.GuiceFilter.class)
+        .contextPath("jersey-guice-filter").servletPath("/").build());
+  }
+
+  @Test
+  public void testJobs() throws JSONException, Exception {
+    WebResource r = resource();
+    ClientResponse response = r.path("ws").path("v1").path("mapreduce")
+        .path("jobs").accept(MediaType.APPLICATION_JSON)
+        .get(ClientResponse.class);
+    assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+    JSONObject json = response.getEntity(JSONObject.class);
+    assertEquals("incorrect number of elements", 1, json.length());
+    JSONObject jobs = json.getJSONObject("jobs");
+    JSONArray arr = jobs.getJSONArray("job");
+    JSONObject info = arr.getJSONObject(0);
+    Job job = appContext.getJob(MRApps.toJobID(info.getString("id")));
+    verifyAMJob(info, job);
+
+  }
+
+  @Test
+  public void testJobsSlash() throws JSONException, Exception {
+    WebResource r = resource();
+    ClientResponse response = r.path("ws").path("v1").path("mapreduce")
+        .path("jobs/").accept(MediaType.APPLICATION_JSON)
+        .get(ClientResponse.class);
+    assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+    JSONObject json = response.getEntity(JSONObject.class);
+    assertEquals("incorrect number of elements", 1, json.length());
+    JSONObject jobs = json.getJSONObject("jobs");
+    JSONArray arr = jobs.getJSONArray("job");
+    JSONObject info = arr.getJSONObject(0);
+    Job job = appContext.getJob(MRApps.toJobID(info.getString("id")));
+    verifyAMJob(info, job);
+
+  }
+
+  @Test
+  public void testJobsDefault() throws JSONException, Exception {
+    WebResource r = resource();
+    ClientResponse response = r.path("ws").path("v1").path("mapreduce")
+        .path("jobs").get(ClientResponse.class);
+    assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+    JSONObject json = response.getEntity(JSONObject.class);
+    assertEquals("incorrect number of elements", 1, json.length());
+    JSONObject jobs = json.getJSONObject("jobs");
+    JSONArray arr = jobs.getJSONArray("job");
+    JSONObject info = arr.getJSONObject(0);
+    Job job = appContext.getJob(MRApps.toJobID(info.getString("id")));
+    verifyAMJob(info, job);
+
+  }
+
+  @Test
+  public void testJobsXML() throws Exception {
+    WebResource r = resource();
+    ClientResponse response = r.path("ws").path("v1").path("mapreduce")
+        .path("jobs").accept(MediaType.APPLICATION_XML)
+        .get(ClientResponse.class);
+    assertEquals(MediaType.APPLICATION_XML_TYPE, response.getType());
+    String xml = response.getEntity(String.class);
+    DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
+    DocumentBuilder db = dbf.newDocumentBuilder();
+    InputSource is = new InputSource();
+    is.setCharacterStream(new StringReader(xml));
+    Document dom = db.parse(is);
+    NodeList jobs = dom.getElementsByTagName("jobs");
+    assertEquals("incorrect number of elements", 1, jobs.getLength());
+    NodeList job = dom.getElementsByTagName("job");
+    assertEquals("incorrect number of elements", 1, job.getLength());
+    verifyAMJobXML(job, appContext);
+
+  }
+
+  @Test
+  public void testJobId() throws JSONException, Exception {
+    WebResource r = resource();
+    Map<JobId, Job> jobsMap = appContext.getAllJobs();
+    for (JobId id : jobsMap.keySet()) {
+      String jobId = MRApps.toString(id);
+
+      ClientResponse response = r.path("ws").path("v1").path("mapreduce")
+          .path("jobs").path(jobId).accept(MediaType.APPLICATION_JSON)
+          .get(ClientResponse.class);
+      assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+      JSONObject json = response.getEntity(JSONObject.class);
+      assertEquals("incorrect number of elements", 1, json.length());
+      JSONObject info = json.getJSONObject("job");
+      verifyAMJob(info, jobsMap.get(id));
+    }
+
+  }
+
+  @Test
+  public void testJobIdSlash() throws JSONException, Exception {
+    WebResource r = resource();
+    Map<JobId, Job> jobsMap = appContext.getAllJobs();
+    for (JobId id : jobsMap.keySet()) {
+      String jobId = MRApps.toString(id);
+
+      ClientResponse response = r.path("ws").path("v1").path("mapreduce")
+          .path("jobs").path(jobId + "/").accept(MediaType.APPLICATION_JSON)
+          .get(ClientResponse.class);
+      assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+      JSONObject json = response.getEntity(JSONObject.class);
+      assertEquals("incorrect number of elements", 1, json.length());
+      JSONObject info = json.getJSONObject("job");
+      verifyAMJob(info, jobsMap.get(id));
+    }
+  }
+
+  @Test
+  public void testJobIdDefault() throws JSONException, Exception {
+    WebResource r = resource();
+    Map<JobId, Job> jobsMap = appContext.getAllJobs();
+    for (JobId id : jobsMap.keySet()) {
+      String jobId = MRApps.toString(id);
+
+      ClientResponse response = r.path("ws").path("v1").path("mapreduce")
+          .path("jobs").path(jobId).get(ClientResponse.class);
+      assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+      JSONObject json = response.getEntity(JSONObject.class);
+      assertEquals("incorrect number of elements", 1, json.length());
+      JSONObject info = json.getJSONObject("job");
+      verifyAMJob(info, jobsMap.get(id));
+    }
+
+  }
+
+  @Test
+  public void testJobIdNonExist() throws JSONException, Exception {
+    WebResource r = resource();
+
+    try {
+      r.path("ws").path("v1").path("mapreduce").path("jobs")
+          .path("job_1234_1_2").get(JSONObject.class);
+      fail("should have thrown exception on invalid uri");
+    } catch (UniformInterfaceException ue) {
+      ClientResponse response = ue.getResponse();
+      assertEquals(Status.NOT_FOUND, response.getClientResponseStatus());
+      assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+      JSONObject msg = response.getEntity(JSONObject.class);
+      JSONObject exception = msg.getJSONObject("RemoteException");
+      assertEquals("incorrect number of elements", 3, exception.length());
+      String message = exception.getString("message");
+      String type = exception.getString("exception");
+      String classname = exception.getString("javaClassName");
+      WebServicesTestUtils.checkStringMatch("exception message",
+          "java.lang.Exception: job, job_1234_1_2, is not found", message);
+      WebServicesTestUtils.checkStringMatch("exception type",
+          "NotFoundException", type);
+      WebServicesTestUtils.checkStringMatch("exception classname",
+          "org.apache.hadoop.yarn.webapp.NotFoundException", classname);
+    }
+  }
+
+  @Test
+  public void testJobIdInvalid() throws JSONException, Exception {
+    WebResource r = resource();
+
+    try {
+      r.path("ws").path("v1").path("mapreduce").path("jobs").path("job_foo")
+          .accept(MediaType.APPLICATION_JSON).get(JSONObject.class);
+      fail("should have thrown exception on invalid uri");
+    } catch (UniformInterfaceException ue) {
+      ClientResponse response = ue.getResponse();
+      assertEquals(Status.BAD_REQUEST, response.getClientResponseStatus());
+      assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+      JSONObject msg = response.getEntity(JSONObject.class);
+      JSONObject exception = msg.getJSONObject("RemoteException");
+      assertEquals("incorrect number of elements", 3, exception.length());
+      String message = exception.getString("message");
+      String type = exception.getString("exception");
+      String classname = exception.getString("javaClassName");
+      verifyJobIdInvalid(message, type, classname);
+    }
+  }
+
+  // verify the exception output default is JSON
+  @Test
+  public void testJobIdInvalidDefault() throws JSONException, Exception {
+    WebResource r = resource();
+
+    try {
+      r.path("ws").path("v1").path("mapreduce").path("jobs").path("job_foo")
+          .get(JSONObject.class);
+      fail("should have thrown exception on invalid uri");
+    } catch (UniformInterfaceException ue) {
+      ClientResponse response = ue.getResponse();
+      assertEquals(Status.BAD_REQUEST, response.getClientResponseStatus());
+      assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+      JSONObject msg = response.getEntity(JSONObject.class);
+      JSONObject exception = msg.getJSONObject("RemoteException");
+      assertEquals("incorrect number of elements", 3, exception.length());
+      String message = exception.getString("message");
+      String type = exception.getString("exception");
+      String classname = exception.getString("javaClassName");
+      verifyJobIdInvalid(message, type, classname);
+    }
+  }
+
+  // test that the exception output works in XML
+  @Test
+  public void testJobIdInvalidXML() throws JSONException, Exception {
+    WebResource r = resource();
+
+    try {
+      r.path("ws").path("v1").path("mapreduce").path("jobs").path("job_foo")
+          .accept(MediaType.APPLICATION_XML).get(JSONObject.class);
+      fail("should have thrown exception on invalid uri");
+    } catch (UniformInterfaceException ue) {
+      ClientResponse response = ue.getResponse();
+      assertEquals(Status.BAD_REQUEST, response.getClientResponseStatus());
+      assertEquals(MediaType.APPLICATION_XML_TYPE, response.getType());
+      String msg = response.getEntity(String.class);
+      System.out.println(msg);
+      DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
+      DocumentBuilder db = dbf.newDocumentBuilder();
+      InputSource is = new InputSource();
+      is.setCharacterStream(new StringReader(msg));
+      Document dom = db.parse(is);
+      NodeList nodes = dom.getElementsByTagName("RemoteException");
+      Element element = (Element) nodes.item(0);
+      String message = WebServicesTestUtils.getXmlString(element, "message");
+      String type = WebServicesTestUtils.getXmlString(element, "exception");
+      String classname = WebServicesTestUtils.getXmlString(element,
+          "javaClassName");
+      verifyJobIdInvalid(message, type, classname);
+    }
+  }
+
+  private void verifyJobIdInvalid(String message, String type, String classname) {
+    WebServicesTestUtils.checkStringMatch("exception message",
+        "For input string: \"foo\"", message);
+    WebServicesTestUtils.checkStringMatch("exception type",
+        "NumberFormatException", type);
+    WebServicesTestUtils.checkStringMatch("exception classname",
+        "java.lang.NumberFormatException", classname);
+  }
+
+  @Test
+  public void testJobIdInvalidBogus() throws JSONException, Exception {
+    WebResource r = resource();
+
+    try {
+      r.path("ws").path("v1").path("mapreduce").path("jobs").path("bogusfoo")
+          .get(JSONObject.class);
+      fail("should have thrown exception on invalid uri");
+    } catch (UniformInterfaceException ue) {
+      ClientResponse response = ue.getResponse();
+      assertEquals(Status.NOT_FOUND, response.getClientResponseStatus());
+      assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+      JSONObject msg = response.getEntity(JSONObject.class);
+      JSONObject exception = msg.getJSONObject("RemoteException");
+      assertEquals("incorrect number of elements", 3, exception.length());
+      String message = exception.getString("message");
+      String type = exception.getString("exception");
+      String classname = exception.getString("javaClassName");
+      WebServicesTestUtils.checkStringMatch("exception message",
+          "java.lang.Exception: Error parsing job ID: bogusfoo", message);
+      WebServicesTestUtils.checkStringMatch("exception type",
+          "NotFoundException", type);
+      WebServicesTestUtils.checkStringMatch("exception classname",
+          "org.apache.hadoop.yarn.webapp.NotFoundException", classname);
+    }
+  }
+
+  @Test
+  public void testJobIdXML() throws Exception {
+    WebResource r = resource();
+    Map<JobId, Job> jobsMap = appContext.getAllJobs();
+    for (JobId id : jobsMap.keySet()) {
+      String jobId = MRApps.toString(id);
+
+      ClientResponse response = r.path("ws").path("v1").path("mapreduce")
+          .path("jobs").path(jobId).accept(MediaType.APPLICATION_XML)
+          .get(ClientResponse.class);
+      assertEquals(MediaType.APPLICATION_XML_TYPE, response.getType());
+      String xml = response.getEntity(String.class);
+      DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
+      DocumentBuilder db = dbf.newDocumentBuilder();
+      InputSource is = new InputSource();
+      is.setCharacterStream(new StringReader(xml));
+      Document dom = db.parse(is);
+      NodeList job = dom.getElementsByTagName("job");
+      verifyAMJobXML(job, appContext);
+    }
+
+  }
+
+  public void verifyAMJob(JSONObject info, Job job) throws JSONException {
+
+    assertEquals("incorrect number of elements", 30, info.length());
+
+    // everyone access fields
+    verifyAMJobGeneric(job, info.getString("id"), info.getString("user"),
+        info.getString("name"), info.getString("state"),
+        info.getLong("startTime"), info.getLong("finishTime"),
+        info.getLong("elapsedTime"), info.getInt("mapsTotal"),
+        info.getInt("mapsCompleted"), info.getInt("reducesTotal"),
+        info.getInt("reducesCompleted"),
+        (float) info.getDouble("reduceProgress"),
+        (float) info.getDouble("mapProgress"));
+
+    String diagnostics = "";
+    if (info.has("diagnostics")) {
+      diagnostics = info.getString("diagnostics");
+    }
+
+    // restricted access fields - if security and acls set
+    verifyAMJobGenericSecure(job, info.getInt("mapsPending"),
+        info.getInt("mapsRunning"), info.getInt("reducesPending"),
+        info.getInt("reducesRunning"), info.getBoolean("uberized"),
+        diagnostics, info.getInt("newReduceAttempts"),
+        info.getInt("runningReduceAttempts"),
+        info.getInt("failedReduceAttempts"),
+        info.getInt("killedReduceAttempts"),
+        info.getInt("successfulReduceAttempts"), info.getInt("newMapAttempts"),
+        info.getInt("runningMapAttempts"), info.getInt("failedMapAttempts"),
+        info.getInt("killedMapAttempts"), info.getInt("successfulMapAttempts"));
+
+    Map<JobACL, AccessControlList> allacls = job.getJobACLs();
+    if (allacls != null) {
+
+      for (Map.Entry<JobACL, AccessControlList> entry : allacls.entrySet()) {
+        String expectName = entry.getKey().getAclName();
+        String expectValue = entry.getValue().getAclString();
+        Boolean found = false;
+        // make sure ws includes it
+        if (info.has("acls")) {
+          JSONArray arr = info.getJSONArray("acls");
+
+          for (int i = 0; i < arr.length(); i++) {
+            JSONObject aclInfo = arr.getJSONObject(i);
+            if (expectName.matches(aclInfo.getString("name"))) {
+              found = true;
+              WebServicesTestUtils.checkStringMatch("value", expectValue,
+                  aclInfo.getString("value"));
+            }
+          }
+        } else {
+          fail("should have acls in the web service info");
+        }
+        assertTrue("acl: " + expectName + " not found in webservice output",
+            found);
+      }
+    }
+
+  }
+
+  public void verifyAMJobXML(NodeList nodes, TestAppContext appContext) {
+
+    assertEquals("incorrect number of elements", 1, nodes.getLength());
+
+    for (int i = 0; i < nodes.getLength(); i++) {
+      Element element = (Element) nodes.item(i);
+
+      Job job = appContext.getJob(MRApps.toJobID(WebServicesTestUtils
+          .getXmlString(element, "id")));
+      assertNotNull("Job not found - output incorrect", job);
+
+      verifyAMJobGeneric(job, WebServicesTestUtils.getXmlString(element, "id"),
+          WebServicesTestUtils.getXmlString(element, "user"),
+          WebServicesTestUtils.getXmlString(element, "name"),
+          WebServicesTestUtils.getXmlString(element, "state"),
+          WebServicesTestUtils.getXmlLong(element, "startTime"),
+          WebServicesTestUtils.getXmlLong(element, "finishTime"),
+          WebServicesTestUtils.getXmlLong(element, "elapsedTime"),
+          WebServicesTestUtils.getXmlInt(element, "mapsTotal"),
+          WebServicesTestUtils.getXmlInt(element, "mapsCompleted"),
+          WebServicesTestUtils.getXmlInt(element, "reducesTotal"),
+          WebServicesTestUtils.getXmlInt(element, "reducesCompleted"),
+          WebServicesTestUtils.getXmlFloat(element, "reduceProgress"),
+          WebServicesTestUtils.getXmlFloat(element, "mapProgress"));
+
+      // restricted access fields - if security and acls set
+      verifyAMJobGenericSecure(job,
+          WebServicesTestUtils.getXmlInt(element, "mapsPending"),
+          WebServicesTestUtils.getXmlInt(element, "mapsRunning"),
+          WebServicesTestUtils.getXmlInt(element, "reducesPending"),
+          WebServicesTestUtils.getXmlInt(element, "reducesRunning"),
+          WebServicesTestUtils.getXmlBoolean(element, "uberized"),
+          WebServicesTestUtils.getXmlString(element, "diagnostics"),
+          WebServicesTestUtils.getXmlInt(element, "newReduceAttempts"),
+          WebServicesTestUtils.getXmlInt(element, "runningReduceAttempts"),
+          WebServicesTestUtils.getXmlInt(element, "failedReduceAttempts"),
+          WebServicesTestUtils.getXmlInt(element, "killedReduceAttempts"),
+          WebServicesTestUtils.getXmlInt(element, "successfulReduceAttempts"),
+          WebServicesTestUtils.getXmlInt(element, "newMapAttempts"),
+          WebServicesTestUtils.getXmlInt(element, "runningMapAttempts"),
+          WebServicesTestUtils.getXmlInt(element, "failedMapAttempts"),
+          WebServicesTestUtils.getXmlInt(element, "killedMapAttempts"),
+          WebServicesTestUtils.getXmlInt(element, "successfulMapAttempts"));
+
+      Map<JobACL, AccessControlList> allacls = job.getJobACLs();
+      if (allacls != null) {
+        for (Map.Entry<JobACL, AccessControlList> entry : allacls.entrySet()) {
+          String expectName = entry.getKey().getAclName();
+          String expectValue = entry.getValue().getAclString();
+          Boolean found = false;
+          // make sure ws includes it
+          NodeList id = element.getElementsByTagName("acls");
+          if (id != null) {
+            for (int j = 0; j < id.getLength(); j++) {
+              Element aclElem = (Element) id.item(j);
+              if (aclElem == null) {
+                fail("should have acls in the web service info");
+              }
+              if (expectName.matches(WebServicesTestUtils.getXmlString(aclElem,
+                  "name"))) {
+                found = true;
+                WebServicesTestUtils.checkStringMatch("value", expectValue,
+                    WebServicesTestUtils.getXmlString(aclElem, "value"));
+              }
+            }
+          } else {
+            fail("should have acls in the web service info");
+          }
+          assertTrue("acl: " + expectName + " not found in webservice output",
+              found);
+        }
+      }
+    }
+  }
+
+  public void verifyAMJobGeneric(Job job, String id, String user, String name,
+      String state, long startTime, long finishTime, long elapsedTime,
+      int mapsTotal, int mapsCompleted, int reducesTotal, int reducesCompleted,
+      float reduceProgress, float mapProgress) {
+    JobReport report = job.getReport();
+
+    WebServicesTestUtils.checkStringMatch("id", MRApps.toString(job.getID()),
+        id);
+    WebServicesTestUtils.checkStringMatch("user", job.getUserName().toString(),
+        user);
+    WebServicesTestUtils.checkStringMatch("name", job.getName(), name);
+    WebServicesTestUtils.checkStringMatch("state", job.getState().toString(),
+        state);
+
+    assertEquals("startTime incorrect", report.getStartTime(), startTime);
+    assertEquals("finishTime incorrect", report.getFinishTime(), finishTime);
+    assertEquals("elapsedTime incorrect",
+        Times.elapsed(report.getStartTime(), report.getFinishTime()),
+        elapsedTime);
+    assertEquals("mapsTotal incorrect", job.getTotalMaps(), mapsTotal);
+    assertEquals("mapsCompleted incorrect", job.getCompletedMaps(),
+        mapsCompleted);
+    assertEquals("reducesTotal incorrect", job.getTotalReduces(), reducesTotal);
+    assertEquals("reducesCompleted incorrect", job.getCompletedReduces(),
+        reducesCompleted);
+    assertEquals("mapProgress incorrect", report.getMapProgress() * 100,
+        mapProgress, 0);
+    assertEquals("reduceProgress incorrect", report.getReduceProgress() * 100,
+        reduceProgress, 0);
+  }
+
+  public void verifyAMJobGenericSecure(Job job, int mapsPending,
+      int mapsRunning, int reducesPending, int reducesRunning,
+      Boolean uberized, String diagnostics, int newReduceAttempts,
+      int runningReduceAttempts, int failedReduceAttempts,
+      int killedReduceAttempts, int successfulReduceAttempts,
+      int newMapAttempts, int runningMapAttempts, int failedMapAttempts,
+      int killedMapAttempts, int successfulMapAttempts) {
+
+    String diagString = "";
+    List<String> diagList = job.getDiagnostics();
+    if (diagList != null && !diagList.isEmpty()) {
+      StringBuffer b = new StringBuffer();
+      for (String diag : diagList) {
+        b.append(diag);
+      }
+      diagString = b.toString();
+    }
+    WebServicesTestUtils.checkStringMatch("diagnostics", diagString,
+        diagnostics);
+
+    assertEquals("isUber incorrect", job.isUber(), uberized);
+
+    // unfortunately the following fields are all calculated in JobInfo
+    // so not easily accessible without doing all the calculations again.
+    // For now just make sure they are present.
+    assertTrue("mapsPending not >= 0", mapsPending >= 0);
+    assertTrue("mapsRunning not >= 0", mapsRunning >= 0);
+    assertTrue("reducesPending not >= 0", reducesPending >= 0);
+    assertTrue("reducesRunning not >= 0", reducesRunning >= 0);
+
+    assertTrue("newReduceAttempts not >= 0", newReduceAttempts >= 0);
+    assertTrue("runningReduceAttempts not >= 0", runningReduceAttempts >= 0);
+    assertTrue("failedReduceAttempts not >= 0", failedReduceAttempts >= 0);
+    assertTrue("killedReduceAttempts not >= 0", killedReduceAttempts >= 0);
+    assertTrue("successfulReduceAttempts not >= 0",
+        successfulReduceAttempts >= 0);
+
+    assertTrue("newMapAttempts not >= 0", newMapAttempts >= 0);
+    assertTrue("runningMapAttempts not >= 0", runningMapAttempts >= 0);
+    assertTrue("failedMapAttempts not >= 0", failedMapAttempts >= 0);
+    assertTrue("killedMapAttempts not >= 0", killedMapAttempts >= 0);
+    assertTrue("successfulMapAttempts not >= 0", successfulMapAttempts >= 0);
+
+  }
+
+  @Test
+  public void testJobCounters() throws JSONException, Exception {
+    WebResource r = resource();
+    Map<JobId, Job> jobsMap = appContext.getAllJobs();
+    for (JobId id : jobsMap.keySet()) {
+      String jobId = MRApps.toString(id);
+
+      ClientResponse response = r.path("ws").path("v1").path("mapreduce")
+          .path("jobs").path(jobId).path("counters")
+          .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
+      assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+      JSONObject json = response.getEntity(JSONObject.class);
+      assertEquals("incorrect number of elements", 1, json.length());
+      JSONObject info = json.getJSONObject("jobCounters");
+      verifyAMJobCounters(info, jobsMap.get(id));
+    }
+  }
+
+  @Test
+  public void testJobCountersSlash() throws JSONException, Exception {
+    WebResource r = resource();
+    Map<JobId, Job> jobsMap = appContext.getAllJobs();
+    for (JobId id : jobsMap.keySet()) {
+      String jobId = MRApps.toString(id);
+
+      ClientResponse response = r.path("ws").path("v1").path("mapreduce")
+          .path("jobs").path(jobId).path("counters/")
+          .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
+      assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+      JSONObject json = response.getEntity(JSONObject.class);
+      assertEquals("incorrect number of elements", 1, json.length());
+      JSONObject info = json.getJSONObject("jobCounters");
+      verifyAMJobCounters(info, jobsMap.get(id));
+    }
+  }
+
+  @Test
+  public void testJobCountersDefault() throws JSONException, Exception {
+    WebResource r = resource();
+    Map<JobId, Job> jobsMap = appContext.getAllJobs();
+    for (JobId id : jobsMap.keySet()) {
+      String jobId = MRApps.toString(id);
+
+      ClientResponse response = r.path("ws").path("v1").path("mapreduce")
+          .path("jobs").path(jobId).path("counters/").get(ClientResponse.class);
+      assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+      JSONObject json = response.getEntity(JSONObject.class);
+      assertEquals("incorrect number of elements", 1, json.length());
+      JSONObject info = json.getJSONObject("jobCounters");
+      verifyAMJobCounters(info, jobsMap.get(id));
+    }
+  }
+
+  @Test
+  public void testJobCountersXML() throws Exception {
+    WebResource r = resource();
+    Map<JobId, Job> jobsMap = appContext.getAllJobs();
+    for (JobId id : jobsMap.keySet()) {
+      String jobId = MRApps.toString(id);
+
+      ClientResponse response = r.path("ws").path("v1").path("mapreduce")
+          .path("jobs").path(jobId).path("counters")
+          .accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
+      assertEquals(MediaType.APPLICATION_XML_TYPE, response.getType());
+      String xml = response.getEntity(String.class);
+      DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
+      DocumentBuilder db = dbf.newDocumentBuilder();
+      InputSource is = new InputSource();
+      is.setCharacterStream(new StringReader(xml));
+      Document dom = db.parse(is);
+      NodeList info = dom.getElementsByTagName("jobCounters");
+      verifyAMJobCountersXML(info, jobsMap.get(id));
+    }
+  }
+
+  public void verifyAMJobCounters(JSONObject info, Job job)
+      throws JSONException {
+
+    assertEquals("incorrect number of elements", 2, info.length());
+
+    WebServicesTestUtils.checkStringMatch("id", MRApps.toString(job.getID()),
+        info.getString("id"));
+    // just do simple verification of fields - not data is correct
+    // in the fields
+    JSONArray counterGroups = info.getJSONArray("counterGroup");
+    for (int i = 0; i < counterGroups.length(); i++) {
+      JSONObject counterGroup = counterGroups.getJSONObject(i);
+      String name = counterGroup.getString("counterGroupName");
+      assertTrue("name not set", (name != null && !name.isEmpty()));
+      JSONArray counters = counterGroup.getJSONArray("counter");
+      for (int j = 0; j < counters.length(); j++) {
+        JSONObject counter = counters.getJSONObject(i);
+        String counterName = counter.getString("name");
+        assertTrue("counter name not set",
+            (counterName != null && !counterName.isEmpty()));
+
+        long mapValue = counter.getLong("mapCounterValue");
+        assertTrue("mapCounterValue  >= 0", mapValue >= 0);
+
+        long reduceValue = counter.getLong("reduceCounterValue");
+        assertTrue("reduceCounterValue  >= 0", reduceValue >= 0);
+
+        long totalValue = counter.getLong("totalCounterValue");
+        assertTrue("totalCounterValue  >= 0", totalValue >= 0);
+
+      }
+    }
+  }
+
+  public void verifyAMJobCountersXML(NodeList nodes, Job job) {
+
+    for (int i = 0; i < nodes.getLength(); i++) {
+      Element element = (Element) nodes.item(i);
+
+      assertNotNull("Job not found - output incorrect", job);
+
+      WebServicesTestUtils.checkStringMatch("id", MRApps.toString(job.getID()),
+          WebServicesTestUtils.getXmlString(element, "id"));
+      // just do simple verification of fields - not data is correct
+      // in the fields
+      NodeList groups = element.getElementsByTagName("counterGroup");
+
+      for (int j = 0; j < groups.getLength(); j++) {
+        Element counters = (Element) groups.item(j);
+        assertNotNull("should have counters in the web service info", counters);
+        String name = WebServicesTestUtils.getXmlString(counters,
+            "counterGroupName");
+        assertTrue("name not set", (name != null && !name.isEmpty()));
+        NodeList counterArr = counters.getElementsByTagName("counter");
+        for (int z = 0; z < counterArr.getLength(); z++) {
+          Element counter = (Element) counterArr.item(z);
+          String counterName = WebServicesTestUtils.getXmlString(counter,
+              "name");
+          assertTrue("counter name not set",
+              (counterName != null && !counterName.isEmpty()));
+
+          long mapValue = WebServicesTestUtils.getXmlLong(counter,
+              "mapCounterValue");
+          assertTrue("mapCounterValue not >= 0", mapValue >= 0);
+
+          long reduceValue = WebServicesTestUtils.getXmlLong(counter,
+              "reduceCounterValue");
+          assertTrue("reduceCounterValue  >= 0", reduceValue >= 0);
+
+          long totalValue = WebServicesTestUtils.getXmlLong(counter,
+              "totalCounterValue");
+          assertTrue("totalCounterValue  >= 0", totalValue >= 0);
+        }
+      }
+    }
+  }
+
+  @Test
+  public void testJobAttempts() throws JSONException, Exception {
+    WebResource r = resource();
+    Map<JobId, Job> jobsMap = appContext.getAllJobs();
+    for (JobId id : jobsMap.keySet()) {
+      String jobId = MRApps.toString(id);
+
+      ClientResponse response = r.path("ws").path("v1")
+          .path("mapreduce").path("jobs").path(jobId).path("jobattempts")
+          .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
+      assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+      JSONObject json = response.getEntity(JSONObject.class);
+      assertEquals("incorrect number of elements", 1, json.length());
+      JSONObject info = json.getJSONObject("jobAttempts");
+      verifyJobAttempts(info, jobsMap.get(id));
+    }
+  }
+
+  @Test
+  public void testJobAttemptsSlash() throws JSONException, Exception {
+    WebResource r = resource();
+    Map<JobId, Job> jobsMap = appContext.getAllJobs();
+    for (JobId id : jobsMap.keySet()) {
+      String jobId = MRApps.toString(id);
+
+      ClientResponse response = r.path("ws").path("v1")
+          .path("mapreduce").path("jobs").path(jobId).path("jobattempts/")
+          .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
+      assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+      JSONObject json = response.getEntity(JSONObject.class);
+      assertEquals("incorrect number of elements", 1, json.length());
+      JSONObject info = json.getJSONObject("jobAttempts");
+      verifyJobAttempts(info, jobsMap.get(id));
+    }
+  }
+
+  @Test
+  public void testJobAttemptsDefault() throws JSONException, Exception {
+    WebResource r = resource();
+    Map<JobId, Job> jobsMap = appContext.getAllJobs();
+    for (JobId id : jobsMap.keySet()) {
+      String jobId = MRApps.toString(id);
+
+      ClientResponse response = r.path("ws").path("v1")
+          .path("mapreduce").path("jobs").path(jobId).path("jobattempts")
+          .get(ClientResponse.class);
+      assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+      JSONObject json = response.getEntity(JSONObject.class);
+      assertEquals("incorrect number of elements", 1, json.length());
+      JSONObject info = json.getJSONObject("jobAttempts");
+      verifyJobAttempts(info, jobsMap.get(id));
+    }
+  }
+
+  @Test
+  public void testJobAttemptsXML() throws Exception {
+    WebResource r = resource();
+    Map<JobId, Job> jobsMap = appContext.getAllJobs();
+    for (JobId id : jobsMap.keySet()) {
+      String jobId = MRApps.toString(id);
+
+      ClientResponse response = r.path("ws").path("v1")
+          .path("mapreduce").path("jobs").path(jobId).path("jobattempts")
+          .accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
+      assertEquals(MediaType.APPLICATION_XML_TYPE, response.getType());
+      String xml = response.getEntity(String.class);
+      DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
+      DocumentBuilder db = dbf.newDocumentBuilder();
+      InputSource is = new InputSource();
+      is.setCharacterStream(new StringReader(xml));
+      Document dom = db.parse(is);
+      NodeList attempts = dom.getElementsByTagName("jobAttempts");
+      assertEquals("incorrect number of elements", 1, attempts.getLength());
+      NodeList info = dom.getElementsByTagName("jobAttempt");
+      verifyJobAttemptsXML(info, jobsMap.get(id));
+    }
+  }
+
+  public void verifyJobAttempts(JSONObject info, Job job)
+      throws JSONException {
+
+    JSONArray attempts = info.getJSONArray("jobAttempt");
+    assertEquals("incorrect number of elements", 2, attempts.length());
+    for (int i = 0; i < attempts.length(); i++) {
+      JSONObject attempt = attempts.getJSONObject(i);
+      verifyJobAttemptsGeneric(job, attempt.getString("nodeHttpAddress"),
+          attempt.getString("nodeId"), attempt.getInt("id"),
+          attempt.getLong("startTime"), attempt.getString("containerId"),
+          attempt.getString("logsLink"));
+    }
+  }
+
+  public void verifyJobAttemptsXML(NodeList nodes, Job job) {
+
+    assertEquals("incorrect number of elements", 2, nodes.getLength());
+    for (int i = 0; i < nodes.getLength(); i++) {
+      Element element = (Element) nodes.item(i);
+      verifyJobAttemptsGeneric(job,
+          WebServicesTestUtils.getXmlString(element, "nodeHttpAddress"),
+          WebServicesTestUtils.getXmlString(element, "nodeId"),
+          WebServicesTestUtils.getXmlInt(element, "id"),
+          WebServicesTestUtils.getXmlLong(element, "startTime"),
+          WebServicesTestUtils.getXmlString(element, "containerId"),
+          WebServicesTestUtils.getXmlString(element, "logsLink"));
+    }
+  }
+
+  public void verifyJobAttemptsGeneric(Job job, String nodeHttpAddress,
+      String nodeId, int id, long startTime, String containerId, String logsLink) {
+    boolean attemptFound = false;
+    for (AMInfo amInfo : job.getAMInfos()) {
+      if (amInfo.getAppAttemptId().getAttemptId() == id) {
+        attemptFound = true;
+        String nmHost = amInfo.getNodeManagerHost();
+        int nmHttpPort = amInfo.getNodeManagerHttpPort();
+        int nmPort = amInfo.getNodeManagerPort();
+        WebServicesTestUtils.checkStringMatch("nodeHttpAddress", nmHost + ":"
+            + nmHttpPort, nodeHttpAddress);
+        WebServicesTestUtils.checkStringMatch("nodeId",
+            BuilderUtils.newNodeId(nmHost, nmPort).toString(), nodeId);
+        assertTrue("startime not greater than 0", startTime > 0);
+        WebServicesTestUtils.checkStringMatch("containerId", amInfo
+            .getContainerId().toString(), containerId);
+
+        String localLogsLink = ujoin("node", "containerlogs", containerId);
+
+        assertTrue("logsLink", logsLink.contains(localLogsLink));
+      }
+    }
+    assertTrue("attempt: " + id + " was not found", attemptFound);
+  }
+
+}

+ 821 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAMWebServicesTasks.java

@@ -0,0 +1,821 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.mapreduce.v2.app.webapp;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.io.StringReader;
+import java.util.Map;
+
+import javax.ws.rs.core.MediaType;
+import javax.xml.parsers.DocumentBuilder;
+import javax.xml.parsers.DocumentBuilderFactory;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapreduce.v2.api.records.JobId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskReport;
+import org.apache.hadoop.mapreduce.v2.app.AppContext;
+import org.apache.hadoop.mapreduce.v2.app.MockJobs;
+import org.apache.hadoop.mapreduce.v2.app.job.Job;
+import org.apache.hadoop.mapreduce.v2.app.job.Task;
+import org.apache.hadoop.mapreduce.v2.util.MRApps;
+import org.apache.hadoop.yarn.Clock;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
+import org.apache.hadoop.yarn.webapp.WebServicesTestUtils;
+import org.codehaus.jettison.json.JSONArray;
+import org.codehaus.jettison.json.JSONException;
+import org.codehaus.jettison.json.JSONObject;
+import org.junit.Before;
+import org.junit.Test;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+import org.w3c.dom.NodeList;
+import org.xml.sax.InputSource;
+
+import com.google.inject.Guice;
+import com.google.inject.Injector;
+import com.google.inject.servlet.GuiceServletContextListener;
+import com.google.inject.servlet.ServletModule;
+import com.sun.jersey.api.client.ClientResponse;
+import com.sun.jersey.api.client.ClientResponse.Status;
+import com.sun.jersey.api.client.UniformInterfaceException;
+import com.sun.jersey.api.client.WebResource;
+import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
+import com.sun.jersey.test.framework.JerseyTest;
+import com.sun.jersey.test.framework.WebAppDescriptor;
+
+/**
+ * Test the app master web service Rest API for getting tasks, a specific task,
+ * and task counters.
+ *
+ * /ws/v1/mapreduce/jobs/{jobid}/tasks
+ * /ws/v1/mapreduce/jobs/{jobid}/tasks/{taskid}
+ * /ws/v1/mapreduce/jobs/{jobid}/tasks/{taskid}/counters
+ */
+public class TestAMWebServicesTasks extends JerseyTest {
+
+  private static Configuration conf = new Configuration();
+  private static TestAppContext appContext;
+
+  static class TestAppContext implements AppContext {
+    final ApplicationAttemptId appAttemptID;
+    final ApplicationId appID;
+    final String user = MockJobs.newUserName();
+    final Map<JobId, Job> jobs;
+    final long startTime = System.currentTimeMillis();
+
+    TestAppContext(int appid, int numJobs, int numTasks, int numAttempts) {
+      appID = MockJobs.newAppID(appid);
+      appAttemptID = MockJobs.newAppAttemptID(appID, 0);
+      jobs = MockJobs.newJobs(appID, numJobs, numTasks, numAttempts);
+    }
+
+    TestAppContext() {
+      this(0, 1, 2, 1);
+    }
+
+    @Override
+    public ApplicationAttemptId getApplicationAttemptId() {
+      return appAttemptID;
+    }
+
+    @Override
+    public ApplicationId getApplicationID() {
+      return appID;
+    }
+
+    @Override
+    public CharSequence getUser() {
+      return user;
+    }
+
+    @Override
+    public Job getJob(JobId jobID) {
+      return jobs.get(jobID);
+    }
+
+    @Override
+    public Map<JobId, Job> getAllJobs() {
+      return jobs; // OK
+    }
+
+    @SuppressWarnings("rawtypes")
+    @Override
+    public EventHandler getEventHandler() {
+      return null;
+    }
+
+    @Override
+    public Clock getClock() {
+      return null;
+    }
+
+    @Override
+    public String getApplicationName() {
+      return "TestApp";
+    }
+
+    @Override
+    public long getStartTime() {
+      return startTime;
+    }
+  }
+
+  private Injector injector = Guice.createInjector(new ServletModule() {
+    @Override
+    protected void configureServlets() {
+
+      appContext = new TestAppContext();
+      bind(JAXBContextResolver.class);
+      bind(AMWebServices.class);
+      bind(GenericExceptionHandler.class);
+      bind(AppContext.class).toInstance(appContext);
+      bind(Configuration.class).toInstance(conf);
+
+      serve("/*").with(GuiceContainer.class);
+    }
+  });
+
+  public class GuiceServletConfig extends GuiceServletContextListener {
+
+    @Override
+    protected Injector getInjector() {
+      return injector;
+    }
+  }
+
+  @Before
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+
+  }
+
+  public TestAMWebServicesTasks() {
+    super(new WebAppDescriptor.Builder(
+        "org.apache.hadoop.mapreduce.v2.app.webapp")
+        .contextListenerClass(GuiceServletConfig.class)
+        .filterClass(com.google.inject.servlet.GuiceFilter.class)
+        .contextPath("jersey-guice-filter").servletPath("/").build());
+  }
+
+  @Test
+  public void testTasks() throws JSONException, Exception {
+    WebResource r = resource();
+    Map<JobId, Job> jobsMap = appContext.getAllJobs();
+    for (JobId id : jobsMap.keySet()) {
+      String jobId = MRApps.toString(id);
+      ClientResponse response = r.path("ws").path("v1").path("mapreduce")
+          .path("jobs").path(jobId).path("tasks")
+          .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
+      assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+      JSONObject json = response.getEntity(JSONObject.class);
+      assertEquals("incorrect number of elements", 1, json.length());
+      JSONObject tasks = json.getJSONObject("tasks");
+      JSONArray arr = tasks.getJSONArray("task");
+      assertEquals("incorrect number of elements", 2, arr.length());
+
+      verifyAMTask(arr, jobsMap.get(id), null);
+    }
+  }
+
+  @Test
+  public void testTasksDefault() throws JSONException, Exception {
+    WebResource r = resource();
+    Map<JobId, Job> jobsMap = appContext.getAllJobs();
+    for (JobId id : jobsMap.keySet()) {
+      String jobId = MRApps.toString(id);
+      ClientResponse response = r.path("ws").path("v1").path("mapreduce")
+          .path("jobs").path(jobId).path("tasks").get(ClientResponse.class);
+      assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+      JSONObject json = response.getEntity(JSONObject.class);
+      assertEquals("incorrect number of elements", 1, json.length());
+      JSONObject tasks = json.getJSONObject("tasks");
+      JSONArray arr = tasks.getJSONArray("task");
+      assertEquals("incorrect number of elements", 2, arr.length());
+
+      verifyAMTask(arr, jobsMap.get(id), null);
+    }
+  }
+
+  @Test
+  public void testTasksSlash() throws JSONException, Exception {
+    WebResource r = resource();
+    Map<JobId, Job> jobsMap = appContext.getAllJobs();
+    for (JobId id : jobsMap.keySet()) {
+      String jobId = MRApps.toString(id);
+      ClientResponse response = r.path("ws").path("v1").path("mapreduce")
+          .path("jobs").path(jobId).path("tasks/")
+          .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
+      assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+      JSONObject json = response.getEntity(JSONObject.class);
+      assertEquals("incorrect number of elements", 1, json.length());
+      JSONObject tasks = json.getJSONObject("tasks");
+      JSONArray arr = tasks.getJSONArray("task");
+      assertEquals("incorrect number of elements", 2, arr.length());
+
+      verifyAMTask(arr, jobsMap.get(id), null);
+    }
+  }
+
+  @Test
+  public void testTasksXML() throws JSONException, Exception {
+
+    WebResource r = resource();
+    Map<JobId, Job> jobsMap = appContext.getAllJobs();
+    for (JobId id : jobsMap.keySet()) {
+      String jobId = MRApps.toString(id);
+      ClientResponse response = r.path("ws").path("v1").path("mapreduce")
+          .path("jobs").path(jobId).path("tasks")
+          .accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
+      assertEquals(MediaType.APPLICATION_XML_TYPE, response.getType());
+      String xml = response.getEntity(String.class);
+      DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
+      DocumentBuilder db = dbf.newDocumentBuilder();
+      InputSource is = new InputSource();
+      is.setCharacterStream(new StringReader(xml));
+      Document dom = db.parse(is);
+      NodeList tasks = dom.getElementsByTagName("tasks");
+      assertEquals("incorrect number of elements", 1, tasks.getLength());
+      NodeList task = dom.getElementsByTagName("task");
+      verifyAMTaskXML(task, jobsMap.get(id));
+    }
+  }
+
+  @Test
+  public void testTasksQueryMap() throws JSONException, Exception {
+    WebResource r = resource();
+    Map<JobId, Job> jobsMap = appContext.getAllJobs();
+    for (JobId id : jobsMap.keySet()) {
+      String jobId = MRApps.toString(id);
+      String type = "m";
+      ClientResponse response = r.path("ws").path("v1").path("mapreduce")
+          .path("jobs").path(jobId).path("tasks").queryParam("type", type)
+          .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
+      assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+      JSONObject json = response.getEntity(JSONObject.class);
+      assertEquals("incorrect number of elements", 1, json.length());
+      JSONObject tasks = json.getJSONObject("tasks");
+      JSONArray arr = tasks.getJSONArray("task");
+      assertEquals("incorrect number of elements", 1, arr.length());
+      verifyAMTask(arr, jobsMap.get(id), type);
+    }
+  }
+
+  @Test
+  public void testTasksQueryReduce() throws JSONException, Exception {
+    WebResource r = resource();
+    Map<JobId, Job> jobsMap = appContext.getAllJobs();
+    for (JobId id : jobsMap.keySet()) {
+      String jobId = MRApps.toString(id);
+      String type = "r";
+      ClientResponse response = r.path("ws").path("v1").path("mapreduce")
+          .path("jobs").path(jobId).path("tasks").queryParam("type", type)
+          .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
+      assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+      JSONObject json = response.getEntity(JSONObject.class);
+      assertEquals("incorrect number of elements", 1, json.length());
+      JSONObject tasks = json.getJSONObject("tasks");
+      JSONArray arr = tasks.getJSONArray("task");
+      assertEquals("incorrect number of elements", 1, arr.length());
+      verifyAMTask(arr, jobsMap.get(id), type);
+    }
+  }
+
+  @Test
+  public void testTasksQueryInvalid() throws JSONException, Exception {
+    WebResource r = resource();
+    Map<JobId, Job> jobsMap = appContext.getAllJobs();
+    for (JobId id : jobsMap.keySet()) {
+      String jobId = MRApps.toString(id);
+      // tasktype must be exactly either "m" or "r"
+      String tasktype = "reduce";
+
+      try {
+        r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId)
+            .path("tasks").queryParam("type", tasktype)
+            .accept(MediaType.APPLICATION_JSON).get(JSONObject.class);
+        fail("should have thrown exception on invalid uri");
+      } catch (UniformInterfaceException ue) {
+        ClientResponse response = ue.getResponse();
+        assertEquals(Status.BAD_REQUEST, response.getClientResponseStatus());
+        assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+        JSONObject msg = response.getEntity(JSONObject.class);
+        JSONObject exception = msg.getJSONObject("RemoteException");
+        assertEquals("incorrect number of elements", 3, exception.length());
+        String message = exception.getString("message");
+        String type = exception.getString("exception");
+        String classname = exception.getString("javaClassName");
+        WebServicesTestUtils.checkStringMatch("exception message",
+            "java.lang.Exception: tasktype must be either m or r", message);
+        WebServicesTestUtils.checkStringMatch("exception type",
+            "BadRequestException", type);
+        WebServicesTestUtils.checkStringMatch("exception classname",
+            "org.apache.hadoop.yarn.webapp.BadRequestException", classname);
+      }
+    }
+  }
+
+  @Test
+  public void testTaskId() throws JSONException, Exception {
+    WebResource r = resource();
+    Map<JobId, Job> jobsMap = appContext.getAllJobs();
+    for (JobId id : jobsMap.keySet()) {
+      String jobId = MRApps.toString(id);
+      for (Task task : jobsMap.get(id).getTasks().values()) {
+
+        String tid = MRApps.toString(task.getID());
+        ClientResponse response = r.path("ws").path("v1").path("mapreduce")
+            .path("jobs").path(jobId).path("tasks").path(tid)
+            .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
+        assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+        JSONObject json = response.getEntity(JSONObject.class);
+        assertEquals("incorrect number of elements", 1, json.length());
+        JSONObject info = json.getJSONObject("task");
+        verifyAMSingleTask(info, task);
+      }
+    }
+  }
+
+  @Test
+  public void testTaskIdSlash() throws JSONException, Exception {
+    WebResource r = resource();
+    Map<JobId, Job> jobsMap = appContext.getAllJobs();
+    for (JobId id : jobsMap.keySet()) {
+      String jobId = MRApps.toString(id);
+      for (Task task : jobsMap.get(id).getTasks().values()) {
+
+        String tid = MRApps.toString(task.getID());
+        ClientResponse response = r.path("ws").path("v1").path("mapreduce")
+            .path("jobs").path(jobId).path("tasks").path(tid + "/")
+            .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
+        assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+        JSONObject json = response.getEntity(JSONObject.class);
+        assertEquals("incorrect number of elements", 1, json.length());
+        JSONObject info = json.getJSONObject("task");
+        verifyAMSingleTask(info, task);
+      }
+    }
+  }
+
+  @Test
+  public void testTaskIdDefault() throws JSONException, Exception {
+    WebResource r = resource();
+    Map<JobId, Job> jobsMap = appContext.getAllJobs();
+    for (JobId id : jobsMap.keySet()) {
+      String jobId = MRApps.toString(id);
+      for (Task task : jobsMap.get(id).getTasks().values()) {
+
+        String tid = MRApps.toString(task.getID());
+        ClientResponse response = r.path("ws").path("v1").path("mapreduce")
+            .path("jobs").path(jobId).path("tasks").path(tid)
+            .get(ClientResponse.class);
+        assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+        JSONObject json = response.getEntity(JSONObject.class);
+        assertEquals("incorrect number of elements", 1, json.length());
+        JSONObject info = json.getJSONObject("task");
+        verifyAMSingleTask(info, task);
+      }
+    }
+  }
+
+  @Test
+  public void testTaskIdBogus() throws JSONException, Exception {
+    WebResource r = resource();
+    Map<JobId, Job> jobsMap = appContext.getAllJobs();
+    for (JobId id : jobsMap.keySet()) {
+      String jobId = MRApps.toString(id);
+      String tid = "bogustaskid";
+      try {
+        r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId)
+            .path("tasks").path(tid).get(JSONObject.class);
+        fail("should have thrown exception on invalid uri");
+      } catch (UniformInterfaceException ue) {
+        ClientResponse response = ue.getResponse();
+        assertEquals(Status.NOT_FOUND, response.getClientResponseStatus());
+        assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+        JSONObject msg = response.getEntity(JSONObject.class);
+        JSONObject exception = msg.getJSONObject("RemoteException");
+        assertEquals("incorrect number of elements", 3, exception.length());
+        String message = exception.getString("message");
+        String type = exception.getString("exception");
+        String classname = exception.getString("javaClassName");
+        WebServicesTestUtils.checkStringMatch("exception message",
+            "java.lang.Exception: Error parsing task ID: bogustaskid", message);
+        WebServicesTestUtils.checkStringMatch("exception type",
+            "NotFoundException", type);
+        WebServicesTestUtils.checkStringMatch("exception classname",
+            "org.apache.hadoop.yarn.webapp.NotFoundException", classname);
+      }
+    }
+  }
+
+  @Test
+  public void testTaskIdNonExist() throws JSONException, Exception {
+    WebResource r = resource();
+    Map<JobId, Job> jobsMap = appContext.getAllJobs();
+    for (JobId id : jobsMap.keySet()) {
+      String jobId = MRApps.toString(id);
+      String tid = "task_1234_0_0_m_0";
+      try {
+        r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId)
+            .path("tasks").path(tid).get(JSONObject.class);
+        fail("should have thrown exception on invalid uri");
+      } catch (UniformInterfaceException ue) {
+        ClientResponse response = ue.getResponse();
+        assertEquals(Status.NOT_FOUND, response.getClientResponseStatus());
+        assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+        JSONObject msg = response.getEntity(JSONObject.class);
+        JSONObject exception = msg.getJSONObject("RemoteException");
+        assertEquals("incorrect number of elements", 3, exception.length());
+        String message = exception.getString("message");
+        String type = exception.getString("exception");
+        String classname = exception.getString("javaClassName");
+        WebServicesTestUtils.checkStringMatch("exception message",
+            "java.lang.Exception: task not found with id task_1234_0_0_m_0",
+            message);
+        WebServicesTestUtils.checkStringMatch("exception type",
+            "NotFoundException", type);
+        WebServicesTestUtils.checkStringMatch("exception classname",
+            "org.apache.hadoop.yarn.webapp.NotFoundException", classname);
+      }
+    }
+  }
+
+  @Test
+  public void testTaskIdInvalid() throws JSONException, Exception {
+    WebResource r = resource();
+    Map<JobId, Job> jobsMap = appContext.getAllJobs();
+    for (JobId id : jobsMap.keySet()) {
+      String jobId = MRApps.toString(id);
+      String tid = "task_1234_0_0_d_0";
+      try {
+        r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId)
+            .path("tasks").path(tid).get(JSONObject.class);
+        fail("should have thrown exception on invalid uri");
+      } catch (UniformInterfaceException ue) {
+        ClientResponse response = ue.getResponse();
+        assertEquals(Status.NOT_FOUND, response.getClientResponseStatus());
+        assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+        JSONObject msg = response.getEntity(JSONObject.class);
+        JSONObject exception = msg.getJSONObject("RemoteException");
+        assertEquals("incorrect number of elements", 3, exception.length());
+        String message = exception.getString("message");
+        String type = exception.getString("exception");
+        String classname = exception.getString("javaClassName");
+        WebServicesTestUtils.checkStringMatch("exception message",
+            "java.lang.Exception: Unknown task symbol: d", message);
+        WebServicesTestUtils.checkStringMatch("exception type",
+            "NotFoundException", type);
+        WebServicesTestUtils.checkStringMatch("exception classname",
+            "org.apache.hadoop.yarn.webapp.NotFoundException", classname);
+      }
+    }
+  }
+
+  @Test
+  public void testTaskIdInvalid2() throws JSONException, Exception {
+    WebResource r = resource();
+    Map<JobId, Job> jobsMap = appContext.getAllJobs();
+    for (JobId id : jobsMap.keySet()) {
+      String jobId = MRApps.toString(id);
+      String tid = "task_1234_0_m_0";
+      try {
+        r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId)
+            .path("tasks").path(tid).get(JSONObject.class);
+        fail("should have thrown exception on invalid uri");
+      } catch (UniformInterfaceException ue) {
+        ClientResponse response = ue.getResponse();
+        assertEquals(Status.NOT_FOUND, response.getClientResponseStatus());
+        assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+        JSONObject msg = response.getEntity(JSONObject.class);
+        JSONObject exception = msg.getJSONObject("RemoteException");
+        assertEquals("incorrect number of elements", 3, exception.length());
+        String message = exception.getString("message");
+        String type = exception.getString("exception");
+        String classname = exception.getString("javaClassName");
+        WebServicesTestUtils.checkStringMatch("exception message",
+            "java.lang.Exception: For input string: \"m\"", message);
+        WebServicesTestUtils.checkStringMatch("exception type",
+            "NotFoundException", type);
+        WebServicesTestUtils.checkStringMatch("exception classname",
+            "org.apache.hadoop.yarn.webapp.NotFoundException", classname);
+      }
+    }
+  }
+
+  @Test
+  public void testTaskIdInvalid3() throws JSONException, Exception {
+    WebResource r = resource();
+    Map<JobId, Job> jobsMap = appContext.getAllJobs();
+    for (JobId id : jobsMap.keySet()) {
+      String jobId = MRApps.toString(id);
+      String tid = "task_1234_0_0_m";
+      try {
+        r.path("ws").path("v1").path("mapreduce").path("jobs").path(jobId)
+            .path("tasks").path(tid).get(JSONObject.class);
+        fail("should have thrown exception on invalid uri");
+      } catch (UniformInterfaceException ue) {
+        ClientResponse response = ue.getResponse();
+        assertEquals(Status.NOT_FOUND, response.getClientResponseStatus());
+        assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+        JSONObject msg = response.getEntity(JSONObject.class);
+        JSONObject exception = msg.getJSONObject("RemoteException");
+        assertEquals("incorrect number of elements", 3, exception.length());
+        String message = exception.getString("message");
+        String type = exception.getString("exception");
+        String classname = exception.getString("javaClassName");
+        WebServicesTestUtils.checkStringMatch("exception message",
+            "java.lang.Exception: Error parsing task ID: task_1234_0_0_m",
+            message);
+        WebServicesTestUtils.checkStringMatch("exception type",
+            "NotFoundException", type);
+        WebServicesTestUtils.checkStringMatch("exception classname",
+            "org.apache.hadoop.yarn.webapp.NotFoundException", classname);
+      }
+    }
+  }
+
+  @Test
+  public void testTaskIdXML() throws JSONException, Exception {
+    WebResource r = resource();
+    Map<JobId, Job> jobsMap = appContext.getAllJobs();
+    for (JobId id : jobsMap.keySet()) {
+      String jobId = MRApps.toString(id);
+      for (Task task : jobsMap.get(id).getTasks().values()) {
+
+        String tid = MRApps.toString(task.getID());
+        ClientResponse response = r.path("ws").path("v1").path("mapreduce")
+            .path("jobs").path(jobId).path("tasks").path(tid)
+            .accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
+
+        assertEquals(MediaType.APPLICATION_XML_TYPE, response.getType());
+        String xml = response.getEntity(String.class);
+        DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
+        DocumentBuilder db = dbf.newDocumentBuilder();
+        InputSource is = new InputSource();
+        is.setCharacterStream(new StringReader(xml));
+        Document dom = db.parse(is);
+        NodeList nodes = dom.getElementsByTagName("task");
+        for (int i = 0; i < nodes.getLength(); i++) {
+          Element element = (Element) nodes.item(i);
+          verifyAMSingleTaskXML(element, task);
+        }
+      }
+    }
+  }
+
+  public void verifyAMSingleTask(JSONObject info, Task task)
+      throws JSONException {
+    assertEquals("incorrect number of elements", 8, info.length());
+
+    verifyTaskGeneric(task, info.getString("id"), info.getString("state"),
+        info.getString("type"), info.getString("successfulAttempt"),
+        info.getLong("startTime"), info.getLong("finishTime"),
+        info.getLong("elapsedTime"), (float) info.getDouble("progress"));
+  }
+
+  public void verifyAMTask(JSONArray arr, Job job, String type)
+      throws JSONException {
+    for (Task task : job.getTasks().values()) {
+      TaskId id = task.getID();
+      String tid = MRApps.toString(id);
+      Boolean found = false;
+      if (type != null && task.getType() == MRApps.taskType(type)) {
+
+        for (int i = 0; i < arr.length(); i++) {
+          JSONObject info = arr.getJSONObject(i);
+          if (tid.matches(info.getString("id"))) {
+            found = true;
+            verifyAMSingleTask(info, task);
+          }
+        }
+        assertTrue("task with id: " + tid + " not in web service output", found);
+      }
+    }
+  }
+
+  public void verifyTaskGeneric(Task task, String id, String state,
+      String type, String successfulAttempt, long startTime, long finishTime,
+      long elapsedTime, float progress) {
+
+    TaskId taskid = task.getID();
+    String tid = MRApps.toString(taskid);
+    TaskReport report = task.getReport();
+
+    WebServicesTestUtils.checkStringMatch("id", tid, id);
+    WebServicesTestUtils.checkStringMatch("type", task.getType().toString(),
+        type);
+    WebServicesTestUtils.checkStringMatch("state", report.getTaskState()
+        .toString(), state);
+    // not easily checked without duplicating logic, just make sure its here
+    assertNotNull("successfulAttempt null", successfulAttempt);
+    assertEquals("startTime wrong", report.getStartTime(), startTime);
+    assertEquals("finishTime wrong", report.getFinishTime(), finishTime);
+    assertEquals("elapsedTime wrong", finishTime - startTime, elapsedTime);
+    assertEquals("progress wrong", report.getProgress() * 100, progress, 1e-3f);
+  }
+
+  public void verifyAMSingleTaskXML(Element element, Task task) {
+    verifyTaskGeneric(task, WebServicesTestUtils.getXmlString(element, "id"),
+        WebServicesTestUtils.getXmlString(element, "state"),
+        WebServicesTestUtils.getXmlString(element, "type"),
+        WebServicesTestUtils.getXmlString(element, "successfulAttempt"),
+        WebServicesTestUtils.getXmlLong(element, "startTime"),
+        WebServicesTestUtils.getXmlLong(element, "finishTime"),
+        WebServicesTestUtils.getXmlLong(element, "elapsedTime"),
+        WebServicesTestUtils.getXmlFloat(element, "progress"));
+  }
+
+  public void verifyAMTaskXML(NodeList nodes, Job job) {
+
+    assertEquals("incorrect number of elements", 2, nodes.getLength());
+
+    for (Task task : job.getTasks().values()) {
+      TaskId id = task.getID();
+      String tid = MRApps.toString(id);
+      Boolean found = false;
+      for (int i = 0; i < nodes.getLength(); i++) {
+        Element element = (Element) nodes.item(i);
+
+        if (tid.matches(WebServicesTestUtils.getXmlString(element, "id"))) {
+          found = true;
+          verifyAMSingleTaskXML(element, task);
+        }
+      }
+      assertTrue("task with id: " + tid + " not in web service output", found);
+    }
+  }
+
+  @Test
+  public void testTaskIdCounters() throws JSONException, Exception {
+    WebResource r = resource();
+    Map<JobId, Job> jobsMap = appContext.getAllJobs();
+    for (JobId id : jobsMap.keySet()) {
+      String jobId = MRApps.toString(id);
+      for (Task task : jobsMap.get(id).getTasks().values()) {
+
+        String tid = MRApps.toString(task.getID());
+        ClientResponse response = r.path("ws").path("v1").path("mapreduce")
+            .path("jobs").path(jobId).path("tasks").path(tid).path("counters")
+            .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
+        assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+        JSONObject json = response.getEntity(JSONObject.class);
+        assertEquals("incorrect number of elements", 1, json.length());
+        JSONObject info = json.getJSONObject("jobTaskCounters");
+        verifyAMJobTaskCounters(info, task);
+      }
+    }
+  }
+
+  @Test
+  public void testTaskIdCountersSlash() throws JSONException, Exception {
+    WebResource r = resource();
+    Map<JobId, Job> jobsMap = appContext.getAllJobs();
+    for (JobId id : jobsMap.keySet()) {
+      String jobId = MRApps.toString(id);
+      for (Task task : jobsMap.get(id).getTasks().values()) {
+
+        String tid = MRApps.toString(task.getID());
+        ClientResponse response = r.path("ws").path("v1").path("mapreduce")
+            .path("jobs").path(jobId).path("tasks").path(tid).path("counters/")
+            .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
+        assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+        JSONObject json = response.getEntity(JSONObject.class);
+        assertEquals("incorrect number of elements", 1, json.length());
+        JSONObject info = json.getJSONObject("jobTaskCounters");
+        verifyAMJobTaskCounters(info, task);
+      }
+    }
+  }
+
+  @Test
+  public void testTaskIdCountersDefault() throws JSONException, Exception {
+    WebResource r = resource();
+    Map<JobId, Job> jobsMap = appContext.getAllJobs();
+    for (JobId id : jobsMap.keySet()) {
+      String jobId = MRApps.toString(id);
+      for (Task task : jobsMap.get(id).getTasks().values()) {
+
+        String tid = MRApps.toString(task.getID());
+        ClientResponse response = r.path("ws").path("v1").path("mapreduce")
+            .path("jobs").path(jobId).path("tasks").path(tid).path("counters")
+            .get(ClientResponse.class);
+        assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+        JSONObject json = response.getEntity(JSONObject.class);
+        assertEquals("incorrect number of elements", 1, json.length());
+        JSONObject info = json.getJSONObject("jobTaskCounters");
+        verifyAMJobTaskCounters(info, task);
+      }
+    }
+  }
+
+  @Test
+  public void testJobTaskCountersXML() throws Exception {
+    WebResource r = resource();
+    Map<JobId, Job> jobsMap = appContext.getAllJobs();
+    for (JobId id : jobsMap.keySet()) {
+      String jobId = MRApps.toString(id);
+      for (Task task : jobsMap.get(id).getTasks().values()) {
+
+        String tid = MRApps.toString(task.getID());
+        ClientResponse response = r.path("ws").path("v1").path("mapreduce")
+            .path("jobs").path(jobId).path("tasks").path(tid).path("counters")
+            .accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
+        assertEquals(MediaType.APPLICATION_XML_TYPE, response.getType());
+        String xml = response.getEntity(String.class);
+        DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
+        DocumentBuilder db = dbf.newDocumentBuilder();
+        InputSource is = new InputSource();
+        is.setCharacterStream(new StringReader(xml));
+        Document dom = db.parse(is);
+        NodeList info = dom.getElementsByTagName("jobTaskCounters");
+        verifyAMTaskCountersXML(info, task);
+      }
+    }
+  }
+
+  public void verifyAMJobTaskCounters(JSONObject info, Task task)
+      throws JSONException {
+
+    assertEquals("incorrect number of elements", 2, info.length());
+
+    WebServicesTestUtils.checkStringMatch("id", MRApps.toString(task.getID()),
+        info.getString("id"));
+    // just do simple verification of fields - not data is correct
+    // in the fields
+    JSONArray counterGroups = info.getJSONArray("taskCounterGroup");
+    for (int i = 0; i < counterGroups.length(); i++) {
+      JSONObject counterGroup = counterGroups.getJSONObject(i);
+      String name = counterGroup.getString("counterGroupName");
+      assertTrue("name not set", (name != null && !name.isEmpty()));
+      JSONArray counters = counterGroup.getJSONArray("counter");
+      for (int j = 0; j < counters.length(); j++) {
+        JSONObject counter = counters.getJSONObject(j);
+        String counterName = counter.getString("name");
+        assertTrue("name not set",
+            (counterName != null && !counterName.isEmpty()));
+        long value = counter.getLong("value");
+        assertTrue("value  >= 0", value >= 0);
+      }
+    }
+  }
+
+  public void verifyAMTaskCountersXML(NodeList nodes, Task task) {
+
+    for (int i = 0; i < nodes.getLength(); i++) {
+
+      Element element = (Element) nodes.item(i);
+      WebServicesTestUtils.checkStringMatch("id",
+          MRApps.toString(task.getID()),
+          WebServicesTestUtils.getXmlString(element, "id"));
+      // just do simple verification of fields - not data is correct
+      // in the fields
+      NodeList groups = element.getElementsByTagName("taskCounterGroup");
+
+      for (int j = 0; j < groups.getLength(); j++) {
+        Element counters = (Element) groups.item(j);
+        assertNotNull("should have counters in the web service info", counters);
+        String name = WebServicesTestUtils.getXmlString(counters,
+            "counterGroupName");
+        assertTrue("name not set", (name != null && !name.isEmpty()));
+        NodeList counterArr = counters.getElementsByTagName("counter");
+        for (int z = 0; z < counterArr.getLength(); z++) {
+          Element counter = (Element) counterArr.item(z);
+          String counterName = WebServicesTestUtils.getXmlString(counter,
+              "name");
+          assertTrue("counter name not set",
+              (counterName != null && !counterName.isEmpty()));
+
+          long value = WebServicesTestUtils.getXmlLong(counter, "value");
+          assertTrue("value not >= 0", value >= 0);
+
+        }
+      }
+    }
+  }
+
+}

+ 2 - 2
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/TypeConverter.java

@@ -22,7 +22,6 @@ import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.HashMap;
 import java.util.List;
 import java.util.List;
 
 
-import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.mapred.JobPriority;
 import org.apache.hadoop.mapred.JobPriority;
 import org.apache.hadoop.mapred.TaskCompletionEvent;
 import org.apache.hadoop.mapred.TaskCompletionEvent;
@@ -45,14 +44,15 @@ import org.apache.hadoop.mapreduce.v2.util.MRApps;
 import org.apache.hadoop.yarn.YarnException;
 import org.apache.hadoop.yarn.YarnException;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationReport;
 import org.apache.hadoop.yarn.api.records.ApplicationReport;
-import org.apache.hadoop.yarn.api.records.YarnApplicationState;
 import org.apache.hadoop.yarn.api.records.NodeReport;
 import org.apache.hadoop.yarn.api.records.NodeReport;
 import org.apache.hadoop.yarn.api.records.QueueACL;
 import org.apache.hadoop.yarn.api.records.QueueACL;
 import org.apache.hadoop.yarn.api.records.QueueState;
 import org.apache.hadoop.yarn.api.records.QueueState;
 import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
 import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
+import org.apache.hadoop.yarn.api.records.YarnApplicationState;
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 
 
+@SuppressWarnings("deprecation")
 public class TypeConverter {
 public class TypeConverter {
 
 
   private static RecordFactory recordFactory;
   private static RecordFactory recordFactory;

+ 3 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/MRClientProtocol.java

@@ -22,6 +22,8 @@ import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptRequest
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptResponse;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptResponse;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersRequest;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersRequest;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersResponse;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDelegationTokenRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDelegationTokenResponse;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDiagnosticsRequest;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDiagnosticsRequest;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDiagnosticsResponse;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDiagnosticsResponse;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetJobReportRequest;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetJobReportRequest;
@@ -54,4 +56,5 @@ public interface MRClientProtocol {
   public KillTaskResponse killTask(KillTaskRequest request) throws YarnRemoteException;
   public KillTaskResponse killTask(KillTaskRequest request) throws YarnRemoteException;
   public KillTaskAttemptResponse killTaskAttempt(KillTaskAttemptRequest request) throws YarnRemoteException;
   public KillTaskAttemptResponse killTaskAttempt(KillTaskAttemptRequest request) throws YarnRemoteException;
   public FailTaskAttemptResponse failTaskAttempt(FailTaskAttemptRequest request) throws YarnRemoteException;
   public FailTaskAttemptResponse failTaskAttempt(FailTaskAttemptRequest request) throws YarnRemoteException;
+  public GetDelegationTokenResponse getDelegationToken(GetDelegationTokenRequest request) throws YarnRemoteException;
 }
 }

+ 64 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/MRDelegationTokenIdentifier.java

@@ -0,0 +1,64 @@
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.mapreduce.v2.api;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
+
+/**
+ * {@link TokenIdentifier} that identifies delegation tokens 
+ * issued by JobHistoryServer to delegate
+ * MR tasks talking to the JobHistoryServer.
+ */
+public class MRDelegationTokenIdentifier extends AbstractDelegationTokenIdentifier {
+
+  public static final Text KIND_NAME = new Text("MR_DELEGATION_TOKEN");
+
+ 
+  public MRDelegationTokenIdentifier() {
+  }
+  
+  /**
+   * Create a new delegation token identifier
+   * @param owner the effective username of the token owner
+   * @param renewer the username of the renewer
+   * @param realUser the real username of the token owner
+   */
+  public MRDelegationTokenIdentifier(Text owner, Text renewer, Text realUser) {
+    super(owner, renewer, realUser);
+  }
+
+ 
+  @Override
+  public Text getKind() {
+    return KIND_NAME;
+  }
+
+  @InterfaceAudience.Private
+  public static class Renewer extends Token.TrivialRenewer {
+    @Override
+    protected Text getKind() {
+      return KIND_NAME;
+    }
+  }
+}

+ 25 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/client/MRClientProtocolPBClientImpl.java

@@ -29,6 +29,8 @@ import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptRequest
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptResponse;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptResponse;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersRequest;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersRequest;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersResponse;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDelegationTokenRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDelegationTokenResponse;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDiagnosticsRequest;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDiagnosticsRequest;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDiagnosticsResponse;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDiagnosticsResponse;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetJobReportRequest;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetJobReportRequest;
@@ -51,6 +53,8 @@ import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.FailTaskAttemp
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.FailTaskAttemptResponsePBImpl;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.FailTaskAttemptResponsePBImpl;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetCountersRequestPBImpl;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetCountersRequestPBImpl;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetCountersResponsePBImpl;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetCountersResponsePBImpl;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetDelegationTokenRequestPBImpl;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetDelegationTokenResponsePBImpl;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetDiagnosticsRequestPBImpl;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetDiagnosticsRequestPBImpl;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetDiagnosticsResponsePBImpl;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetDiagnosticsResponsePBImpl;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetJobReportRequestPBImpl;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetJobReportRequestPBImpl;
@@ -71,6 +75,7 @@ import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.KillTaskReques
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.KillTaskResponsePBImpl;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.KillTaskResponsePBImpl;
 import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.FailTaskAttemptRequestProto;
 import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.FailTaskAttemptRequestProto;
 import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetCountersRequestProto;
 import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetCountersRequestProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetDelegationTokenRequestProto;
 import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetDiagnosticsRequestProto;
 import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetDiagnosticsRequestProto;
 import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetJobReportRequestProto;
 import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetJobReportRequestProto;
 import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskAttemptCompletionEventsRequestProto;
 import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskAttemptCompletionEventsRequestProto;
@@ -214,7 +219,26 @@ public class MRClientProtocolPBClientImpl implements MRClientProtocol {
       }
       }
     }
     }
   }
   }
-
+  
+  @Override
+  public GetDelegationTokenResponse getDelegationToken(
+      GetDelegationTokenRequest request) throws YarnRemoteException {
+    GetDelegationTokenRequestProto requestProto = ((GetDelegationTokenRequestPBImpl)
+        request).getProto();
+    try {
+      return new GetDelegationTokenResponsePBImpl(proxy.getDelegationToken(
+          null, requestProto));
+    } catch (ServiceException e) {
+      if (e.getCause() instanceof YarnRemoteException) {
+        throw (YarnRemoteException)e.getCause();
+      } else if (e.getCause() instanceof UndeclaredThrowableException) {
+        throw (UndeclaredThrowableException)e.getCause();
+      } else {
+        throw new UndeclaredThrowableException(e);
+      }
+    }
+  }
+  
   @Override
   @Override
   public KillJobResponse killJob(KillJobRequest request)
   public KillJobResponse killJob(KillJobRequest request)
       throws YarnRemoteException {
       throws YarnRemoteException {

+ 20 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/service/MRClientProtocolPBServiceImpl.java

@@ -23,6 +23,8 @@ import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptRequest
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptResponse;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptResponse;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersRequest;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersRequest;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersResponse;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDelegationTokenRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDelegationTokenResponse;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDiagnosticsRequest;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDiagnosticsRequest;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDiagnosticsResponse;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDiagnosticsResponse;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetJobReportResponse;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetJobReportResponse;
@@ -44,6 +46,8 @@ import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.FailTaskAttemp
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.FailTaskAttemptResponsePBImpl;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.FailTaskAttemptResponsePBImpl;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetCountersRequestPBImpl;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetCountersRequestPBImpl;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetCountersResponsePBImpl;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetCountersResponsePBImpl;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetDelegationTokenRequestPBImpl;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetDelegationTokenResponsePBImpl;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetDiagnosticsRequestPBImpl;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetDiagnosticsRequestPBImpl;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetDiagnosticsResponsePBImpl;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetDiagnosticsResponsePBImpl;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetJobReportRequestPBImpl;
 import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.GetJobReportRequestPBImpl;
@@ -66,6 +70,8 @@ import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.FailTaskAttemptReque
 import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.FailTaskAttemptResponseProto;
 import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.FailTaskAttemptResponseProto;
 import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetCountersRequestProto;
 import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetCountersRequestProto;
 import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetCountersResponseProto;
 import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetCountersResponseProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetDelegationTokenRequestProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetDelegationTokenResponseProto;
 import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetDiagnosticsRequestProto;
 import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetDiagnosticsRequestProto;
 import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetDiagnosticsResponseProto;
 import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetDiagnosticsResponseProto;
 import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetJobReportRequestProto;
 import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetJobReportRequestProto;
@@ -184,7 +190,20 @@ public class MRClientProtocolPBServiceImpl implements BlockingInterface {
       throw new ServiceException(e);
       throw new ServiceException(e);
     }
     }
   }
   }
-
+  
+  @Override
+  public GetDelegationTokenResponseProto getDelegationToken(
+      RpcController controller, GetDelegationTokenRequestProto proto)
+      throws ServiceException {
+    GetDelegationTokenRequest request = new GetDelegationTokenRequestPBImpl(proto);
+    try {
+      GetDelegationTokenResponse response = real.getDelegationToken(request);
+      return ((GetDelegationTokenResponsePBImpl)response).getProto();
+    } catch (YarnRemoteException e) {
+      throw new ServiceException(e);
+    }
+  }
+  
   @Override
   @Override
   public KillJobResponseProto killJob(RpcController controller,
   public KillJobResponseProto killJob(RpcController controller,
       KillJobRequestProto proto) throws ServiceException {
       KillJobRequestProto proto) throws ServiceException {

+ 29 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/GetDelegationTokenRequest.java

@@ -0,0 +1,29 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.mapreduce.v2.api.protocolrecords;
+
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Evolving;
+
+@Public
+@Evolving
+public interface GetDelegationTokenRequest {
+  String getRenewer();
+  void setRenewer(String renewer);
+}

+ 25 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/GetDelegationTokenResponse.java

@@ -0,0 +1,25 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.mapreduce.v2.api.protocolrecords;
+
+import org.apache.hadoop.yarn.api.records.DelegationToken;
+
+public interface GetDelegationTokenResponse {
+  void setDelegationToken(DelegationToken clientDToken);
+  DelegationToken getDelegationToken();
+}

+ 97 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetDelegationTokenRequestPBImpl.java

@@ -0,0 +1,97 @@
+ /**
+   * Licensed to the Apache Software Foundation (ASF) under one
+   * or more contributor license agreements.  See the NOTICE file
+   * distributed with this work for additional information
+   * regarding copyright ownership.  The ASF licenses this file
+   * to you under the Apache License, Version 2.0 (the
+   * "License"); you may not use this file except in compliance
+   * with the License.  You may obtain a copy of the License at
+   *
+   *     http://www.apache.org/licenses/LICENSE-2.0
+   *
+   * Unless required by applicable law or agreed to in writing, software
+   * distributed under the License is distributed on an "AS IS" BASIS,
+   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   * See the License for the specific language governing permissions and
+   * limitations under the License.
+   */
+package org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb;
+
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDelegationTokenRequest;
+import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetDelegationTokenRequestProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetDelegationTokenRequestProtoOrBuilder;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+
+
+public class GetDelegationTokenRequestPBImpl extends
+      ProtoBase<GetDelegationTokenRequestProto> implements GetDelegationTokenRequest {
+  
+  String renewer;
+  
+
+  GetDelegationTokenRequestProto proto = 
+      GetDelegationTokenRequestProto.getDefaultInstance();
+  GetDelegationTokenRequestProto.Builder builder = null;
+  boolean viaProto = false;
+  
+  public GetDelegationTokenRequestPBImpl() {
+    builder = GetDelegationTokenRequestProto.newBuilder();
+  }
+  
+  public GetDelegationTokenRequestPBImpl (
+      GetDelegationTokenRequestProto proto) {
+    this.proto = proto;
+    viaProto = true;
+  }
+  
+  @Override
+  public String getRenewer(){
+    GetDelegationTokenRequestProtoOrBuilder p = viaProto ? proto : builder;
+    if (this.renewer != null) {
+      return this.renewer;
+    }
+    if (!p.hasRenewer()) {
+      return null;
+    }
+    this.renewer = p.getRenewer();
+    return this.renewer;
+  }
+  
+  @Override
+  public void setRenewer(String renewer) {
+    maybeInitBuilder();
+    if (renewer == null) 
+      builder.clearRenewer();
+    this.renewer = renewer;
+  }
+
+  @Override
+  public GetDelegationTokenRequestProto getProto() {
+    mergeLocalToProto();
+    proto = viaProto ? proto : builder.build();
+    viaProto = true;
+    return proto;
+  }
+  
+
+  private void mergeLocalToBuilder() {
+    if (renewer != null) {
+      builder.setRenewer(this.renewer);
+    }
+  }
+
+  private void mergeLocalToProto() {
+    if (viaProto) 
+      maybeInitBuilder();
+    mergeLocalToBuilder();
+    proto = builder.build();
+    viaProto = true;
+  }
+
+  private void maybeInitBuilder() {
+    if (viaProto || builder == null) {
+      builder = GetDelegationTokenRequestProto.newBuilder(proto);
+    }
+    viaProto = false;
+  }   
+}

+ 109 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetDelegationTokenResponsePBImpl.java

@@ -0,0 +1,109 @@
+ /**
+   * Licensed to the Apache Software Foundation (ASF) under one
+   * or more contributor license agreements.  See the NOTICE file
+   * distributed with this work for additional information
+   * regarding copyright ownership.  The ASF licenses this file
+   * to you under the Apache License, Version 2.0 (the
+   * "License"); you may not use this file except in compliance
+   * with the License.  You may obtain a copy of the License at
+   *
+   *     http://www.apache.org/licenses/LICENSE-2.0
+   *
+   * Unless required by applicable law or agreed to in writing, software
+   * distributed under the License is distributed on an "AS IS" BASIS,
+   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   * See the License for the specific language governing permissions and
+   * limitations under the License.
+   */
+package org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb;
+
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDelegationTokenResponse;
+import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetDelegationTokenResponseProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetDelegationTokenResponseProtoOrBuilder;
+import org.apache.hadoop.yarn.api.records.DelegationToken;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+import org.apache.hadoop.yarn.api.records.impl.pb.DelegationTokenPBImpl;
+import org.apache.hadoop.yarn.proto.YarnProtos.DelegationTokenProto;
+
+
+public class GetDelegationTokenResponsePBImpl extends
+      ProtoBase<GetDelegationTokenResponseProto> implements GetDelegationTokenResponse {
+  
+  DelegationToken mrToken;
+  
+
+  GetDelegationTokenResponseProto proto = 
+      GetDelegationTokenResponseProto.getDefaultInstance();
+  GetDelegationTokenResponseProto.Builder builder = null;
+  boolean viaProto = false;
+  
+  public GetDelegationTokenResponsePBImpl() {
+    builder = GetDelegationTokenResponseProto.newBuilder();
+  }
+  
+  public GetDelegationTokenResponsePBImpl (
+      GetDelegationTokenResponseProto proto) {
+    this.proto = proto;
+    viaProto = true;
+  }
+  
+  @Override
+  public DelegationToken getDelegationToken() {
+    GetDelegationTokenResponseProtoOrBuilder p = viaProto ? proto : builder;
+    if (this.mrToken != null) {
+      return this.mrToken;
+    }
+    if (!p.hasMRDelegationToken()) {
+      return null;
+    }
+    this.mrToken = convertFromProtoFormat(p.getMRDelegationToken());
+    return this.mrToken;  
+  }
+  
+  @Override
+  public void setDelegationToken(DelegationToken mrToken) {
+    maybeInitBuilder();
+    if (mrToken == null) 
+      builder.clearMRDelegationToken();
+    this.mrToken = mrToken;
+  }
+
+  @Override
+  public GetDelegationTokenResponseProto getProto() {
+    mergeLocalToProto();
+    proto = viaProto ? proto : builder.build();
+    viaProto = true;
+    return proto;
+  }
+  
+
+  private void mergeLocalToBuilder() {
+    if (mrToken != null) {
+      builder.setMRDelegationToken(convertToProtoFormat(this.mrToken));
+    }
+  }
+
+  private void mergeLocalToProto() {
+    if (viaProto) 
+      maybeInitBuilder();
+    mergeLocalToBuilder();
+    proto = builder.build();
+    viaProto = true;
+  }
+
+  private void maybeInitBuilder() {
+    if (viaProto || builder == null) {
+      builder = GetDelegationTokenResponseProto.newBuilder(proto);
+    }
+    viaProto = false;
+  }
+   
+
+  private DelegationTokenPBImpl convertFromProtoFormat(DelegationTokenProto p) {
+    return new DelegationTokenPBImpl(p);
+  }
+
+  private DelegationTokenProto convertToProtoFormat(DelegationToken t) {
+    return ((DelegationTokenPBImpl)t).getProto();
+  }
+}

Some files were not shown because too many files changed in this diff