Browse Source

Merge trunk into HA branch.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-1623@1294445 13f79535-47bb-0310-9956-ffa450edef68
Aaron Myers 13 years ago
parent
commit
586bd479cb
13 changed files with 992 additions and 367 deletions
  1. 1 1
      BUILDING.txt
  2. 71 67
      hadoop-common-project/hadoop-common/CHANGES.txt
  3. 116 117
      hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
  4. 29 23
      hadoop-mapreduce-project/CHANGES.txt
  5. 6 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRBuilderUtils.java
  6. 3 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/ResourceBundles.java
  7. 95 60
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java
  8. 71 49
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedTask.java
  9. 41 41
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedTaskAttempt.java
  10. 16 8
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistory.java
  11. 145 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistoryEntities.java
  12. 1 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/resources/job_1329348432655_0001-1329348443227-user-Sleep+job-1329348468601-10-1-SUCCEEDED-default.jhist
  13. 397 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/resources/job_1329348432655_0001_conf.xml

+ 1 - 1
BUILDING.txt

@@ -8,7 +8,7 @@ Requirements:
 * Maven 3.0
 * Maven 3.0
 * Forrest 0.8 (if generating docs)
 * Forrest 0.8 (if generating docs)
 * Findbugs 1.3.9 (if running findbugs)
 * Findbugs 1.3.9 (if running findbugs)
-* ProtocolBuffer 2.4.1+ (for MapReduce)
+* ProtocolBuffer 2.4.1+ (for MapReduce and HDFS)
 * Autotools (if compiling native code)
 * Autotools (if compiling native code)
 * Internet connection for first build (to fetch all Maven and Hadoop dependencies)
 * Internet connection for first build (to fetch all Maven and Hadoop dependencies)
 
 

+ 71 - 67
hadoop-common-project/hadoop-common/CHANGES.txt

@@ -3,17 +3,11 @@ Hadoop Change Log
 Trunk (unreleased changes)
 Trunk (unreleased changes)
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES
-  
-    HADOOP-7920. Remove Avro Rpc. (suresh)
 
 
   NEW FEATURES
   NEW FEATURES
-    HADOOP-7773. Add support for protocol buffer based RPC engine.
-    (suresh)
-
-    HADOOP-7875. Add helper class to unwrap protobuf ServiceException.
-    (suresh)
 
 
   IMPROVEMENTS
   IMPROVEMENTS
+
     HADOOP-8017. Configure hadoop-main pom to get rid of M2E plugin execution
     HADOOP-8017. Configure hadoop-main pom to get rid of M2E plugin execution
     not covered (Eric Charles via bobby)
     not covered (Eric Charles via bobby)
 
 
@@ -22,22 +16,6 @@ Trunk (unreleased changes)
 
 
     HADOOP-7595. Upgrade dependency to Avro 1.5.3. (Alejandro Abdelnur via atm)
     HADOOP-7595. Upgrade dependency to Avro 1.5.3. (Alejandro Abdelnur via atm)
 
 
-    HADOOP-7524. Change RPC to allow multiple protocols including multuple
-    versions of the same protocol (sanjay Radia)
-
-    HADOOP-7607. Simplify the RPC proxy cleanup process. (atm)
-
-    HADOOP-7635. RetryInvocationHandler should release underlying resources on
-    close (atm)
-
-    HADOOP-7687 Make getProtocolSignature public  (sanjay)
-
-    HADOOP-7693. Enhance AvroRpcEngine to support the new #addProtocol
-    interface introduced in HADOOP-7524.  (cutting)
-
-    HADOOP-7716. RPC protocol registration on SS does not log the protocol name
-    (only the class which may be different) (sanjay)
-
     HADOOP-7717. Move handling of concurrent client fail-overs to
     HADOOP-7717. Move handling of concurrent client fail-overs to
     RetryInvocationHandler (atm)
     RetryInvocationHandler (atm)
 
 
@@ -54,45 +32,20 @@ Trunk (unreleased changes)
     HADOOP-7792. Add verifyToken method to AbstractDelegationTokenSecretManager.
     HADOOP-7792. Add verifyToken method to AbstractDelegationTokenSecretManager.
     (jitendra)
     (jitendra)
 
 
-    HADOOP-7776 Make the Ipc-Header in a RPC-Payload an explicit header (sanjay)
-
     HADOOP-7688. Add servlet handler check in HttpServer.start().
     HADOOP-7688. Add servlet handler check in HttpServer.start().
     (Uma Maheswara Rao G via szetszwo)
     (Uma Maheswara Rao G via szetszwo)
 
 
-    HADOOP-7862. Move the support for multiple protocols to lower layer so 
-    that Writable, PB and Avro can all use it (Sanjay)
-
-    HADOOP-7876. Provided access to encoded key in DelegationKey for
-    use in protobuf based RPCs. (suresh)
-
     HADOOP-7886. Add toString to FileStatus. (SreeHari via jghoman)
     HADOOP-7886. Add toString to FileStatus. (SreeHari via jghoman)
 
 
-    HADOOP-7899. Generate proto java files as part of the build. (tucu)
-
     HADOOP-7808. Port HADOOP-7510 - Add configurable option to use original 
     HADOOP-7808. Port HADOOP-7510 - Add configurable option to use original 
     hostname in token instead of IP to allow server IP change. 
     hostname in token instead of IP to allow server IP change. 
     (Daryn Sharp via suresh)
     (Daryn Sharp via suresh)
- 
-    HADOOP-7957. Classes deriving GetGroupsBase should be able to override 
-    proxy creation. (jitendra)
-
-    HADOOP-7968. Errant println left in RPC.getHighestSupportedProtocol (Sho Shimauchi via harsh)
 
 
     HADOOP-7987. Support setting the run-as user in unsecure mode. (jitendra)
     HADOOP-7987. Support setting the run-as user in unsecure mode. (jitendra)
 
 
-    HADOOP-7965. Support for protocol version and signature in PB. (jitendra)
-
-    HADOOP-7988. Upper case in hostname part of the principals doesn't work with 
+    HADOOP-7988. Upper case in hostname part of the principals doesn't work with
     kerberos. (jitendra)
     kerberos. (jitendra)
 
 
-    HADOOP-8070. Add a standalone benchmark for RPC call performance. (todd)
-
-    HADOOP-8084. Updates ProtoBufRpc engine to not do an unnecessary copy 
-    for RPC request/response. (ddas)
-
-    HADOOP-8085. Add RPC metrics to ProtobufRpcEngine. (Hari Mankude via
-    suresh)
-
     HADOOP-8108. Move method getHostPortString() from NameNode to NetUtils.
     HADOOP-8108. Move method getHostPortString() from NameNode to NetUtils.
     (Brandon Li via jitendra)
     (Brandon Li via jitendra)
 
 
@@ -133,30 +86,14 @@ Trunk (unreleased changes)
     HADOOP-7704. Reduce number of object created by JMXJsonServlet.
     HADOOP-7704. Reduce number of object created by JMXJsonServlet.
     (Devaraj K via Eric Yang)
     (Devaraj K via Eric Yang)
 
 
-    HADOOP-7695. RPC.stopProxy can throw unintended exception while logging
-    error (atm)
-
     HADOOP-7769. TestJMXJsonServlet is failing. (tomwhite)
     HADOOP-7769. TestJMXJsonServlet is failing. (tomwhite)
 
 
     HADOOP-7770. ViewFS getFileChecksum throws FileNotFoundException for files in 
     HADOOP-7770. ViewFS getFileChecksum throws FileNotFoundException for files in 
     /tmp and /user. (Ravi Prakash via jitendra)
     /tmp and /user. (Ravi Prakash via jitendra)
 
 
-    HADOOP-7833. Fix findbugs warnings in protobuf generated code.
-    (John Lee via suresh)
-
     HADOOP-7888. TestFailoverProxy fails intermittently on trunk. (Jason Lowe
     HADOOP-7888. TestFailoverProxy fails intermittently on trunk. (Jason Lowe
     via atm)
     via atm)
 
 
-    HADOOP-7897. ProtobufRpcEngine client side exception mechanism is not
-    consistent with WritableRpcEngine. (suresh)
-
-    HADOOP-7913 Fix bug in ProtoBufRpcEngine  (sanjay)
-
-    HADOOP-7892. IPC logs too verbose after "RpcKind" introduction (todd)
-
-    HADOOP-7931. o.a.h.ipc.WritableRpcEngine should have a way to force
-                 initialization (atm)
-
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
     HADOOP-7761. Improve the performance of raw comparisons. (todd)
     HADOOP-7761. Improve the performance of raw comparisons. (todd)
@@ -165,14 +102,81 @@ Release 0.23.3 - UNRELEASED
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES
 
 
-  NEW FEATURES                                                                    
-  
+    HADOOP-7920. Remove Avro Rpc. (suresh)
+
+  NEW FEATURES
+
+    HADOOP-7773. Add support for protocol buffer based RPC engine.
+    (suresh)
+
+    HADOOP-7875. Add helper class to unwrap protobuf ServiceException.
+    (suresh)
+
   IMPROVEMENTS
   IMPROVEMENTS
 
 
+    HADOOP-7524. Change RPC to allow multiple protocols including multuple
+    versions of the same protocol (sanjay Radia)
+
+    HADOOP-7607. Simplify the RPC proxy cleanup process. (atm)
+
+    HADOOP-7687. Make getProtocolSignature public  (sanjay)
+
+    HADOOP-7693. Enhance AvroRpcEngine to support the new #addProtocol
+    interface introduced in HADOOP-7524.  (cutting)
+
+    HADOOP-7716. RPC protocol registration on SS does not log the protocol name
+    (only the class which may be different) (sanjay)
+
+    HADOOP-7776. Make the Ipc-Header in a RPC-Payload an explicit header.
+    (sanjay)
+
+    HADOOP-7862. Move the support for multiple protocols to lower layer so
+    that Writable, PB and Avro can all use it (Sanjay)
+
+    HADOOP-7876. Provided access to encoded key in DelegationKey for
+    use in protobuf based RPCs. (suresh)
+
+    HADOOP-7899. Generate proto java files as part of the build. (tucu)
+
+    HADOOP-7957. Classes deriving GetGroupsBase should be able to override 
+    proxy creation. (jitendra)
+
+    HADOOP-7965. Support for protocol version and signature in PB. (jitendra)
+
+    HADOOP-8070. Add a standalone benchmark for RPC call performance. (todd)
+
+    HADOOP-8084. Updates ProtoBufRpc engine to not do an unnecessary copy 
+    for RPC request/response. (ddas)
+
+    HADOOP-8085. Add RPC metrics to ProtobufRpcEngine. (Hari Mankude via
+    suresh)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
   BUG FIXES
   BUG FIXES
 
 
+    HADOOP-7635. RetryInvocationHandler should release underlying resources on
+    close. (atm)
+
+    HADOOP-7695. RPC.stopProxy can throw unintended exception while logging
+    error. (atm)
+
+    HADOOP-7833. Fix findbugs warnings in protobuf generated code.
+    (John Lee via suresh)
+
+    HADOOP-7897. ProtobufRpcEngine client side exception mechanism is not
+    consistent with WritableRpcEngine. (suresh)
+
+    HADOOP-7913. Fix bug in ProtoBufRpcEngine.  (sanjay)
+
+    HADOOP-7892. IPC logs too verbose after "RpcKind" introduction. (todd)
+
+    HADOOP-7968. Errant println left in RPC.getHighestSupportedProtocol. (Sho
+    Shimauchi via harsh)
+
+    HADOOP-7931. o.a.h.ipc.WritableRpcEngine should have a way to force
+    initialization. (atm)
+
 Release 0.23.2 - UNRELEASED 
 Release 0.23.2 - UNRELEASED 
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES

+ 116 - 117
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -3,93 +3,24 @@ Hadoop HDFS Change Log
 Trunk (unreleased changes)
 Trunk (unreleased changes)
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES
-    
-    HDFS-2676. Remove Avro RPC. (suresh)
 
 
   NEW FEATURES
   NEW FEATURES
-  
-    HDFS-395.  DFS Scalability: Incremental block reports. (Tomasz Nykiel
-    via hairong)
-
-    HDFS-2517. Add protobuf service for JounralProtocol. (suresh)
-
-    HDFS-2518. Add protobuf service for NamenodeProtocol. (suresh)
-
-    HDFS-2520. Add protobuf service for InterDatanodeProtocol. (suresh)
-
-    HDFS-2519. Add protobuf service for DatanodeProtocol. (suresh)
-
-    HDFS-2581. Implement protobuf service for JournalProtocol. (suresh)
-
-    HDFS-2618. Implement protobuf service for NamenodeProtocol. (suresh)
-
-    HDFS-2629. Implement protobuf service for InterDatanodeProtocol. (suresh)
-
-    HDFS-2636. Implement protobuf service for ClientDatanodeProtocol. (suresh)
 
 
     HDFS-2430. The number of failed or low-resource volumes the NN can tolerate
     HDFS-2430. The number of failed or low-resource volumes the NN can tolerate
                should be configurable. (atm)
                should be configurable. (atm)
 
 
-    HDFS-2642. Protobuf translators for DatanodeProtocol. (jitendra)
-
-    HDFS-2647. Used protobuf based RPC for InterDatanodeProtocol, 
-    ClientDatanodeProtocol, JournalProtocol, NamenodeProtocol. (suresh)
-
-    HDFS-2666. Fix TestBackupNode failure. (suresh)
-
     HDFS-234. Integration with BookKeeper logging system. (Ivan Kelly 
     HDFS-234. Integration with BookKeeper logging system. (Ivan Kelly 
     via jitendra)
     via jitendra)
 
 
-    HDFS-2663. Optional protobuf parameters are not handled correctly.
-    (suresh)
-
-    HDFS-2661. Enable protobuf RPC for DatanodeProtocol. (jitendra)
-
-    HDFS-2697. Move RefreshAuthPolicy, RefreshUserMappings, GetUserMappings 
-    protocol to protocol buffers. (jitendra)
-
-    HDFS-2880. Protobuf changes in DatanodeProtocol to add multiple storages.
-    (suresh)
-
-    HDFS-2899. Service protocol changes in DatanodeProtocol to add multiple 
-    storages. (suresh)
-
   IMPROVEMENTS
   IMPROVEMENTS
 
 
-    HADOOP-7524 Change RPC to allow multiple protocols including multuple 
-    versions of the same protocol (Sanjay Radia)
-
     HDFS-1620. Rename HdfsConstants -> HdfsServerConstants, FSConstants ->
     HDFS-1620. Rename HdfsConstants -> HdfsServerConstants, FSConstants ->
                HdfsConstants. (Harsh J Chouraria via atm)
                HdfsConstants. (Harsh J Chouraria via atm)
-    HDFS-2197. Refactor RPC call implementations out of NameNode class (todd)
-
-    HDFS-2018. Move all journal stream management code into one place.
-               (Ivan Kelly via jitendra)
-
-    HDFS-2223. Untangle depencencies between NN components (todd)
 
 
-    HDFS-2337. DFSClient shouldn't keep multiple RPC proxy references (atm)
-
-    HDFS-2351 Change Namenode and Datanode to register each of their protocols
-    seperately. (Sanjay Radia)
+    HDFS-2197. Refactor RPC call implementations out of NameNode class (todd)
 
 
     HDFS-2158. Add JournalSet to manage the set of journals. (jitendra)
     HDFS-2158. Add JournalSet to manage the set of journals. (jitendra)
 
 
-    HDFS-2459. Separate datatypes for JournalProtocol. (suresh)
-
-    HDFS-2480. Separate datatypes for NamenodeProtocol. (suresh)
-
-    HDFS-2181 Separate HDFS Client wire protocol data types (sanjay)
-
-    HDFS-2489. Move Finalize and Register to separate file out of
-    DatanodeCommand.java. (suresh)
-
-    HDFS-2488. Separate datatypes for InterDatanodeProtocol. (suresh)
-
-    HDFS-2496. Separate datatypes for DatanodeProtocol. (suresh)
-
-    HDFS-2479 HDFS Client Data Types in Protocol Buffers (sanjay)
-
     HDFS-2334. Add Closeable to JournalManager. (Ivan Kelly via jitendra)
     HDFS-2334. Add Closeable to JournalManager. (Ivan Kelly via jitendra)
 
 
     HDFS-2572. Remove unnecessary double-check in DN#getHostName. (harsh)
     HDFS-2572. Remove unnecessary double-check in DN#getHostName. (harsh)
@@ -102,30 +33,12 @@ Trunk (unreleased changes)
 
 
     HDFS-2857. Cleanup BlockInfo class. (suresh)
     HDFS-2857. Cleanup BlockInfo class. (suresh)
 
 
-    HADOOP-7862   Hdfs changes to work with HADOOP 7862: 
-    Move the support for multiple protocols to lower layer so that Writable,
-    PB and Avro can all use it (Sanjay)
-
     HDFS-1580. Add interface for generic Write Ahead Logging mechanisms.
     HDFS-1580. Add interface for generic Write Ahead Logging mechanisms.
     (Ivan Kelly via jitendra)
     (Ivan Kelly via jitendra)
 
 
-    HDFS-2597 ClientNameNodeProtocol in Protocol Buffers (sanjay)
-
-    HDFS-2651 ClientNameNodeProtocol Translators for Protocol Buffers (sanjay)
-
-    HDFS-2650. Replace @inheritDoc with @Override. (Hari Mankude via suresh)
-
-    HDFS-2669. Enable protobuf rpc for ClientNamenodeProtocol. (Sanjay Radia)
-
-    HDFS-2801. Provide a method in client side translators to check for a 
-    methods supported in underlying protocol. (jitendra)
-
     HDFS-208. name node should warn if only one dir is listed in dfs.name.dir.
     HDFS-208. name node should warn if only one dir is listed in dfs.name.dir.
     (Uma Maheswara Rao G via eli)
     (Uma Maheswara Rao G via eli)
 
 
-    HDS-2895. Remove Writable wire protocol types and translators to
-    complete transition to protocol buffers. (suresh)
-
     HDFS-2786. Fix host-based token incompatibilities in DFSUtil. (Kihwal Lee
     HDFS-2786. Fix host-based token incompatibilities in DFSUtil. (Kihwal Lee
     via jitendra)
     via jitendra)
 
 
@@ -148,6 +61,7 @@ Trunk (unreleased changes)
     (suresh)
     (suresh)
 
 
   OPTIMIZATIONS
   OPTIMIZATIONS
+
     HDFS-2477. Optimize computing the diff between a block report and the
     HDFS-2477. Optimize computing the diff between a block report and the
     namenode state. (Tomasz Nykiel via hairong)
     namenode state. (Tomasz Nykiel via hairong)
 
 
@@ -158,6 +72,7 @@ Trunk (unreleased changes)
     over-replicated, and invalidated blocks. (Tomasz Nykiel via todd)
     over-replicated, and invalidated blocks. (Tomasz Nykiel via todd)
 
 
   BUG FIXES
   BUG FIXES
+
     HDFS-2299. TestOfflineEditsViewer is failing on trunk. (Uma Maheswara Rao G
     HDFS-2299. TestOfflineEditsViewer is failing on trunk. (Uma Maheswara Rao G
                via atm)
                via atm)
     HDFS-2310. TestBackupNode fails since HADOOP-7524 went in.
     HDFS-2310. TestBackupNode fails since HADOOP-7524 went in.
@@ -180,10 +95,118 @@ Trunk (unreleased changes)
     HDFS-2188. Make FSEditLog create its journals from a list of URIs rather 
     HDFS-2188. Make FSEditLog create its journals from a list of URIs rather 
     than NNStorage. (Ivan Kelly via jitendra)
     than NNStorage. (Ivan Kelly via jitendra)
 
 
-    HDFS-2481 Unknown protocol: org.apache.hadoop.hdfs.protocol.ClientProtocol.
+    HDFS-1765. Block Replication should respect under-replication
+    block priority. (Uma Maheswara Rao G via eli)
+
+    HDFS-2765. TestNameEditsConfigs is incorrectly swallowing IOE. (atm)
+
+    HDFS-2776. Missing interface annotation on JournalSet. 
+    (Brandon Li via jitendra)
+
+    HDFS-2759. Pre-allocate HDFS edit log files after writing version number.
+    (atm)
+
+    HDFS-2908. Add apache license header for StorageReport.java. (Brandon Li
+    via jitendra)
+
+Release 0.23.3 - UNRELEASED 
+
+  INCOMPATIBLE CHANGES
+
+    HDFS-2676. Remove Avro RPC. (suresh)
+
+  NEW FEATURES
+
+    HDFS-2978. The NameNode should expose name dir statuses via JMX. (atm)
+
+    HDFS-395.  DFS Scalability: Incremental block reports. (Tomasz Nykiel
+    via hairong)
+
+    HDFS-2517. Add protobuf service for JounralProtocol. (suresh)
+
+    HDFS-2518. Add protobuf service for NamenodeProtocol. (suresh)
+
+    HDFS-2520. Add protobuf service for InterDatanodeProtocol. (suresh)
+
+    HDFS-2519. Add protobuf service for DatanodeProtocol. (suresh)
+
+    HDFS-2581. Implement protobuf service for JournalProtocol. (suresh)
+
+    HDFS-2618. Implement protobuf service for NamenodeProtocol. (suresh)
+
+    HDFS-2629. Implement protobuf service for InterDatanodeProtocol. (suresh)
+
+    HDFS-2636. Implement protobuf service for ClientDatanodeProtocol. (suresh)
+
+    HDFS-2642. Protobuf translators for DatanodeProtocol. (jitendra)
+
+    HDFS-2647. Used protobuf based RPC for InterDatanodeProtocol, 
+    ClientDatanodeProtocol, JournalProtocol, NamenodeProtocol. (suresh)
+
+    HDFS-2661. Enable protobuf RPC for DatanodeProtocol. (jitendra)
+
+    HDFS-2697. Move RefreshAuthPolicy, RefreshUserMappings, GetUserMappings 
+    protocol to protocol buffers. (jitendra)
+
+    HDFS-2880. Protobuf changes in DatanodeProtocol to add multiple storages.
+    (suresh)
+
+    HDFS-2899. Service protocol changes in DatanodeProtocol to add multiple 
+    storages. (suresh)
+
+  IMPROVEMENTS
+
+    HDFS-2018. Move all journal stream management code into one place.
+    (Ivan Kelly via jitendra)
+
+    HDFS-2223. Untangle depencencies between NN components (todd)
+
+    HDFS-2351. Change Namenode and Datanode to register each of their protocols
+    seperately (sanjay)
+
+    HDFS-2337. DFSClient shouldn't keep multiple RPC proxy references (atm)
+ 
+    HDFS-2181. Separate HDFS Client wire protocol data types (sanjay)
+
+    HDFS-2459. Separate datatypes for Journal Protocol. (suresh)
+
+    HDFS-2480. Separate datatypes for NamenodeProtocol. (suresh)
+
+    HDFS-2489. Move Finalize and Register to separate file out of
+    DatanodeCommand.java. (suresh)
+
+    HDFS-2488. Separate datatypes for InterDatanodeProtocol. (suresh)
+
+    HDFS-2496. Separate datatypes for DatanodeProtocol. (suresh)
+
+    HDFS-2479. HDFS Client Data Types in Protocol Buffers (sanjay)
+
+    HADOOP-7862. Hdfs changes to work with HADOOP-7862: Move the support for
+    multiple protocols to lower layer so that Writable, PB and Avro can all
+    use it. (sanjay)
+
+    HDFS-2597. ClientNameNodeProtocol in Protocol Buffers. (sanjay)
+
+    HDFS-2651. ClientNameNodeProtocol Translators for Protocol Buffers. (sanjay)
+
+    HDFS-2650. Replace @inheritDoc with @Override. (Hari Mankude via suresh).
+
+    HDFS-2669. Enable protobuf rpc for ClientNamenodeProtocol. (sanjay)
+
+    HDFS-2801. Provide a method in client side translators to check for a 
+    methods supported in underlying protocol. (jitendra)
+
+    HDFS-2895. Remove Writable wire protocol types and translators to
+    complete transition to protocol buffers. (suresh)
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
+    HDFS-2481. Unknown protocol: org.apache.hadoop.hdfs.protocol.ClientProtocol.
     (sanjay)
     (sanjay)
 
 
-    HDFS-2497 Fix TestBackupNode failure. (suresh)
+    HDFS-2497. Fix TestBackupNode failure. (suresh)
 
 
     HDFS-2499. RPC client is created incorrectly introduced in HDFS-2459.
     HDFS-2499. RPC client is created incorrectly introduced in HDFS-2459.
     (suresh)
     (suresh)
@@ -194,8 +217,9 @@ Trunk (unreleased changes)
     HDFS-2532. TestDfsOverAvroRpc timing out in trunk (Uma Maheswara Rao G
     HDFS-2532. TestDfsOverAvroRpc timing out in trunk (Uma Maheswara Rao G
     via todd)
     via todd)
 
 
-    HDFS-1765. Block Replication should respect under-replication
-    block priority. (Uma Maheswara Rao G via eli)
+    HDFS-2666. Fix TestBackupNode failure. (suresh)
+
+    HDFS-2663. Optional protobuf parameters are not handled correctly. (suresh)
 
 
     HDFS-2694. Removal of Avro broke non-PB NN services. (atm)
     HDFS-2694. Removal of Avro broke non-PB NN services. (atm)
 
 
@@ -205,39 +229,14 @@ Trunk (unreleased changes)
     HDFS-2700. Fix failing TestDataNodeMultipleRegistrations in trunk
     HDFS-2700. Fix failing TestDataNodeMultipleRegistrations in trunk
     (Uma Maheswara Rao G via todd)
     (Uma Maheswara Rao G via todd)
 
 
-    HDFS-2765. TestNameEditsConfigs is incorrectly swallowing IOE. (atm)
-
     HDFS-2739. SecondaryNameNode doesn't start up. (jitendra)
     HDFS-2739. SecondaryNameNode doesn't start up. (jitendra)
 
 
-    HDFS-2776. Missing interface annotation on JournalSet. 
-    (Brandon Li via jitendra)
-
     HDFS-2768. BackupNode stop can not close proxy connections because
     HDFS-2768. BackupNode stop can not close proxy connections because
     it is not a proxy instance. (Uma Maheswara Rao G via eli)
     it is not a proxy instance. (Uma Maheswara Rao G via eli)
 
 
-    HDFS-2759. Pre-allocate HDFS edit log files after writing version number.
-    (atm)
-
-    HDFS-2908. Add apache license header for StorageReport.java. (Brandon Li
-    via jitendra)
-
     HDFS-2968. Protocol translator for BlockRecoveryCommand broken when
     HDFS-2968. Protocol translator for BlockRecoveryCommand broken when
     multiple blocks need recovery. (todd)
     multiple blocks need recovery. (todd)
 
 
-Release 0.23.3 - UNRELEASED 
-
-  INCOMPATIBLE CHANGES
-
-  NEW FEATURES
-
-    HDFS-2978. The NameNode should expose name dir statuses via JMX. (atm)
-  
-  IMPROVEMENTS
-
-  OPTIMIZATIONS
-
-  BUG FIXES
-
 Release 0.23.2 - UNRELEASED 
 Release 0.23.2 - UNRELEASED 
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES

+ 29 - 23
hadoop-mapreduce-project/CHANGES.txt

@@ -4,8 +4,6 @@ Trunk (unreleased changes)
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES
 
 
-    MAPREDUCE-3545. Remove Avro RPC. (suresh)
-
   NEW FEATURES
   NEW FEATURES
 
 
     MAPREDUCE-778. Rumen Anonymizer. (Amar Kamat and Chris Douglas via amarrk)
     MAPREDUCE-778. Rumen Anonymizer. (Amar Kamat and Chris Douglas via amarrk)
@@ -32,12 +30,6 @@ Trunk (unreleased changes)
     MAPREDUCE-3008. Improvements to cumulative CPU emulation for short running
     MAPREDUCE-3008. Improvements to cumulative CPU emulation for short running
                     tasks in Gridmix. (amarrk)
                     tasks in Gridmix. (amarrk)
 
 
-    MAPREDUCE-2887 due to HADOOP-7524 Change RPC to allow multiple protocols
-                   including multuple versions of the same protocol (sanjay Radia)
-
-    MAPREDUCE-2934. MR portion of HADOOP-7607 - Simplify the RPC proxy cleanup
-                    process (atm)
-
     MAPREDUCE-2836. Provide option to fail jobs when submitted to non-existent
     MAPREDUCE-2836. Provide option to fail jobs when submitted to non-existent
     fair scheduler pools. (Ahmed Radwan via todd)
     fair scheduler pools. (Ahmed Radwan via todd)
 
 
@@ -50,14 +42,8 @@ Trunk (unreleased changes)
     MAPREDUCE-3169. Create a new MiniMRCluster equivalent which only provides
     MAPREDUCE-3169. Create a new MiniMRCluster equivalent which only provides
     client APIs cross MR1 and MR2 (Ahmed via tucu)
     client APIs cross MR1 and MR2 (Ahmed via tucu)
 
 
-    HADOOP-7862   MR changes to work with HADOOP 7862:
-    Move the support for multiple protocols to lower layer so that Writable,
-    PB and Avro can all use it (Sanjay)
-
     MAPREDUCE-2944. Improve checking of input for JobClient.displayTasks() (XieXianshan via harsh)
     MAPREDUCE-2944. Improve checking of input for JobClient.displayTasks() (XieXianshan via harsh)
 
 
-    MAPREDUCE-3909 Javadoc the Service interfaces (stevel)
-
   BUG FIXES
   BUG FIXES
 
 
     MAPREDUCE-3757. [Rumen] Fixed Rumen Folder to adjust shuffleFinished and
     MAPREDUCE-3757. [Rumen] Fixed Rumen Folder to adjust shuffleFinished and
@@ -89,24 +75,41 @@ Trunk (unreleased changes)
     MAPREDUCE-3664. Federation Documentation has incorrect configuration example.
     MAPREDUCE-3664. Federation Documentation has incorrect configuration example.
     (Brandon Li via jitendra)
     (Brandon Li via jitendra)
 
 
-    MAPREDUCE-3740. Fixed broken mapreduce compilation after the patch for
-    HADOOP-7965. (Devaraj K via vinodkv)
-
-    MAPREDUCE-3818. Fixed broken compilation in TestSubmitJob after the patch
-    for HDFS-2895. (Suresh Srinivas via vinodkv)
-
 Release 0.23.3 - UNRELEASED
 Release 0.23.3 - UNRELEASED
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES
 
 
+    MAPREDUCE-3545. Remove Avro RPC. (suresh)
+
   NEW FEATURES
   NEW FEATURES
 
 
   IMPROVEMENTS
   IMPROVEMENTS
 
 
+    MAPREDUCE-2887. Due to HADOOP-7524, change RPC to allow multiple protocols
+    including multuple versions of the same protocol (Sanjay Radia)
+
+    MAPREDUCE-2934. MR portion of HADOOP-7607 - Simplify the RPC proxy cleanup
+    process (atm)
+
+    HADOOP-7862. MR changes to work with HADOOP 7862: Move the support for
+    multiple protocols to lower layer so that Writable, PB and Avro can all
+    use it (Sanjay Radia)
+
+    MAPREDUCE-3909 Javadoc the Service interfaces (stevel)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
   BUG FIXES
   BUG FIXES
 
 
+    MAPREDUCE-3740. Fixed broken mapreduce compilation after the patch for
+    HADOOP-7965. (Devaraj K via vinodkv) 
+
+    MAPREDUCE-3818. Fixed broken compilation in TestSubmitJob after the patch
+    for HDFS-2895. (Suresh Srinivas via vinodkv)
+
+    MAPREDUCE-2942. TestNMAuditLogger.testNMAuditLoggerWithIP failing (Thomas
+    Graves via mahadev)
+
 Release 0.23.2 - UNRELEASED
 Release 0.23.2 - UNRELEASED
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES
@@ -135,6 +138,12 @@ Release 0.23.2 - UNRELEASED
 
 
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
+    MAPREDUCE-3901. Modified JobHistory records in YARN to lazily load job and
+    task reports so as to improve UI response times. (Siddarth Seth via vinodkv)
+
+    MAPREDUCE-2855. Passing a cached class-loader to ResourceBundle creator to
+    minimize counter names lookup time. (Siddarth Seth via vinodkv)
+
   BUG FIXES
   BUG FIXES
     MAPREDUCE-3918  proc_historyserver no longer in command line arguments for
     MAPREDUCE-3918  proc_historyserver no longer in command line arguments for
     HistoryServer (Jon Eagles via bobby)
     HistoryServer (Jon Eagles via bobby)
@@ -2270,9 +2279,6 @@ Release 0.23.0 - 2011-11-01
 
 
     MAPREDUCE-2908. Fix all findbugs warnings. (vinodkv via acmurthy)
     MAPREDUCE-2908. Fix all findbugs warnings. (vinodkv via acmurthy)
 
 
-    MAPREDUCE-2942. TestNMAuditLogger.testNMAuditLoggerWithIP failing (Thomas Graves
-    via mahadev)
-
     MAPREDUCE-2947. Fixed race condition in AuxiliaryServices. (vinodkv via
     MAPREDUCE-2947. Fixed race condition in AuxiliaryServices. (vinodkv via
     acmurthy)
     acmurthy)
 
 

+ 6 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRBuilderUtils.java

@@ -30,6 +30,7 @@ import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.util.BuilderUtils;
 import org.apache.hadoop.yarn.util.Records;
 import org.apache.hadoop.yarn.util.Records;
 
 
 public class MRBuilderUtils {
 public class MRBuilderUtils {
@@ -41,6 +42,11 @@ public class MRBuilderUtils {
     return jobId;
     return jobId;
   }
   }
 
 
+  public static JobId newJobId(long clusterTs, int appIdInt, int id) {
+    ApplicationId appId = BuilderUtils.newApplicationId(clusterTs, appIdInt);
+    return MRBuilderUtils.newJobId(appId, id);
+  }
+
   public static TaskId newTaskId(JobId jobId, int id, TaskType taskType) {
   public static TaskId newTaskId(JobId jobId, int id, TaskType taskType) {
     TaskId taskId = Records.newRecord(TaskId.class);
     TaskId taskId = Records.newRecord(TaskId.class);
     taskId.setJobId(jobId);
     taskId.setJobId(jobId);

+ 3 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/ResourceBundles.java

@@ -18,6 +18,7 @@
 
 
 package org.apache.hadoop.mapreduce.util;
 package org.apache.hadoop.mapreduce.util;
 
 
+import java.util.Locale;
 import java.util.ResourceBundle;
 import java.util.ResourceBundle;
 import java.util.MissingResourceException;
 import java.util.MissingResourceException;
 
 
@@ -33,7 +34,8 @@ public class ResourceBundles {
    * @throws MissingResourceException
    * @throws MissingResourceException
    */
    */
   public static ResourceBundle getBundle(String bundleName) {
   public static ResourceBundle getBundle(String bundleName) {
-    return ResourceBundle.getBundle(bundleName.replace('$', '_'));
+    return ResourceBundle.getBundle(bundleName.replace('$', '_'),
+        Locale.getDefault(), Thread.currentThread().getContextClassLoader());
   }
   }
 
 
   /**
   /**

+ 95 - 60
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java

@@ -19,13 +19,16 @@
 package org.apache.hadoop.mapreduce.v2.hs;
 package org.apache.hadoop.mapreduce.v2.hs;
 
 
 import java.io.IOException;
 import java.io.IOException;
-import java.util.ArrayList;
+import java.net.UnknownHostException;
 import java.util.Collections;
 import java.util.Collections;
 import java.util.Comparator;
 import java.util.Comparator;
 import java.util.HashMap;
 import java.util.HashMap;
 import java.util.LinkedList;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.List;
 import java.util.Map;
 import java.util.Map;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReentrantLock;
 
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
@@ -34,6 +37,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.mapred.JobACLsManager;
 import org.apache.hadoop.mapred.JobACLsManager;
 import org.apache.hadoop.mapreduce.Counters;
 import org.apache.hadoop.mapreduce.Counters;
 import org.apache.hadoop.mapreduce.JobACL;
 import org.apache.hadoop.mapreduce.JobACL;
+import org.apache.hadoop.mapreduce.TaskID;
 import org.apache.hadoop.mapreduce.TypeConverter;
 import org.apache.hadoop.mapreduce.TypeConverter;
 import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser;
 import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser;
 import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.JobInfo;
 import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.JobInfo;
@@ -54,7 +58,7 @@ import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authorize.AccessControlList;
 import org.apache.hadoop.security.authorize.AccessControlList;
 import org.apache.hadoop.yarn.YarnException;
 import org.apache.hadoop.yarn.YarnException;
-import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.util.Records;
 
 
 
 
 /**
 /**
@@ -64,50 +68,31 @@ import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 public class CompletedJob implements org.apache.hadoop.mapreduce.v2.app.job.Job {
 public class CompletedJob implements org.apache.hadoop.mapreduce.v2.app.job.Job {
   
   
   static final Log LOG = LogFactory.getLog(CompletedJob.class);
   static final Log LOG = LogFactory.getLog(CompletedJob.class);
-  private final Counters counters;
   private final Configuration conf;
   private final Configuration conf;
-  private final JobId jobId;
-  private final List<String> diagnostics = new ArrayList<String>();
-  private final JobReport report;
-  private final Map<TaskId, Task> tasks = new HashMap<TaskId, Task>();
-  private final Map<TaskId, Task> mapTasks = new HashMap<TaskId, Task>();
-  private final Map<TaskId, Task> reduceTasks = new HashMap<TaskId, Task>();
-  private final String user;
+  private final JobId jobId; //Can be picked from JobInfo with a conversion.
+  private final String user; //Can be picked up from JobInfo
   private final Path confFile;
   private final Path confFile;
-  private JobACLsManager aclsMgr;
-  private List<TaskAttemptCompletionEvent> completionEvents = null;
   private JobInfo jobInfo;
   private JobInfo jobInfo;
-
+  private JobReport report;
+  AtomicBoolean tasksLoaded = new AtomicBoolean(false);
+  private Lock tasksLock = new ReentrantLock();
+  private Map<TaskId, Task> tasks = new HashMap<TaskId, Task>();
+  private Map<TaskId, Task> mapTasks = new HashMap<TaskId, Task>();
+  private Map<TaskId, Task> reduceTasks = new HashMap<TaskId, Task>();
+  private List<TaskAttemptCompletionEvent> completionEvents = null;
+  private JobACLsManager aclsMgr;
+  
+  
   public CompletedJob(Configuration conf, JobId jobId, Path historyFile, 
   public CompletedJob(Configuration conf, JobId jobId, Path historyFile, 
       boolean loadTasks, String userName, Path confFile, JobACLsManager aclsMgr) 
       boolean loadTasks, String userName, Path confFile, JobACLsManager aclsMgr) 
           throws IOException {
           throws IOException {
     LOG.info("Loading job: " + jobId + " from file: " + historyFile);
     LOG.info("Loading job: " + jobId + " from file: " + historyFile);
     this.conf = conf;
     this.conf = conf;
     this.jobId = jobId;
     this.jobId = jobId;
+    this.user = userName;
     this.confFile = confFile;
     this.confFile = confFile;
     this.aclsMgr = aclsMgr;
     this.aclsMgr = aclsMgr;
-    
     loadFullHistoryData(loadTasks, historyFile);
     loadFullHistoryData(loadTasks, historyFile);
-    user = userName;
-    counters = jobInfo.getTotalCounters();
-    diagnostics.add(jobInfo.getErrorInfo());
-    report =
-        RecordFactoryProvider.getRecordFactory(null).newRecordInstance(
-            JobReport.class);
-    report.setJobId(jobId);
-    report.setJobState(JobState.valueOf(jobInfo.getJobStatus()));
-    report.setSubmitTime(jobInfo.getSubmitTime());
-    report.setStartTime(jobInfo.getLaunchTime());
-    report.setFinishTime(jobInfo.getFinishTime());
-    report.setJobName(jobInfo.getJobname());
-    report.setUser(jobInfo.getUsername());
-    report.setMapProgress((float) getCompletedMaps() / getTotalMaps());
-    report.setReduceProgress((float) getCompletedReduces() / getTotalReduces());
-    report.setJobFile(confFile.toString());
-    report.setTrackingUrl(JobHistoryUtils.getHistoryUrl(conf, TypeConverter
-        .toYarn(TypeConverter.fromYarn(jobId)).getAppId()));
-    report.setAMInfos(getAMInfos());
-    report.setIsUber(isUber());
   }
   }
 
 
   @Override
   @Override
@@ -122,7 +107,7 @@ public class CompletedJob implements org.apache.hadoop.mapreduce.v2.app.job.Job
 
 
   @Override
   @Override
   public Counters getAllCounters() {
   public Counters getAllCounters() {
-    return counters;
+    return jobInfo.getTotalCounters();
   }
   }
 
 
   @Override
   @Override
@@ -131,10 +116,36 @@ public class CompletedJob implements org.apache.hadoop.mapreduce.v2.app.job.Job
   }
   }
 
 
   @Override
   @Override
-  public JobReport getReport() {
+  public synchronized JobReport getReport() {
+    if (report == null) {
+      constructJobReport();
+    }
     return report;
     return report;
   }
   }
 
 
+  private void constructJobReport() {
+    report = Records.newRecord(JobReport.class);
+    report.setJobId(jobId);
+    report.setJobState(JobState.valueOf(jobInfo.getJobStatus()));
+    report.setSubmitTime(jobInfo.getSubmitTime());
+    report.setStartTime(jobInfo.getLaunchTime());
+    report.setFinishTime(jobInfo.getFinishTime());
+    report.setJobName(jobInfo.getJobname());
+    report.setUser(jobInfo.getUsername());
+    report.setMapProgress((float) getCompletedMaps() / getTotalMaps());
+    report.setReduceProgress((float) getCompletedReduces() / getTotalReduces());
+    report.setJobFile(confFile.toString());
+    String historyUrl = "N/A";
+    try {
+      historyUrl = JobHistoryUtils.getHistoryUrl(conf, jobId.getAppId());
+    } catch (UnknownHostException e) {
+      //Ignore.
+    }
+    report.setTrackingUrl(historyUrl);
+    report.setAMInfos(getAMInfos());
+    report.setIsUber(isUber());
+  }
+
   @Override
   @Override
   public float getProgress() {
   public float getProgress() {
     return 1.0f;
     return 1.0f;
@@ -142,16 +153,23 @@ public class CompletedJob implements org.apache.hadoop.mapreduce.v2.app.job.Job
 
 
   @Override
   @Override
   public JobState getState() {
   public JobState getState() {
-    return report.getJobState();
+    return JobState.valueOf(jobInfo.getJobStatus());
   }
   }
 
 
   @Override
   @Override
   public Task getTask(TaskId taskId) {
   public Task getTask(TaskId taskId) {
-    return tasks.get(taskId);
+    if (tasksLoaded.get()) {
+      return tasks.get(taskId);
+    } else {
+      TaskID oldTaskId = TypeConverter.fromYarn(taskId);
+      CompletedTask completedTask =
+          new CompletedTask(taskId, jobInfo.getAllTasks().get(oldTaskId));
+      return completedTask;
+    }
   }
   }
 
 
   @Override
   @Override
-  public TaskAttemptCompletionEvent[] getTaskAttemptCompletionEvents(
+  public synchronized TaskAttemptCompletionEvent[] getTaskAttemptCompletionEvents(
       int fromEventId, int maxEvents) {
       int fromEventId, int maxEvents) {
     if (completionEvents == null) {
     if (completionEvents == null) {
       constructTaskAttemptCompletionEvents();
       constructTaskAttemptCompletionEvents();
@@ -167,6 +185,7 @@ public class CompletedJob implements org.apache.hadoop.mapreduce.v2.app.job.Job
   }
   }
 
 
   private void constructTaskAttemptCompletionEvents() {
   private void constructTaskAttemptCompletionEvents() {
+    loadAllTasks();
     completionEvents = new LinkedList<TaskAttemptCompletionEvent>();
     completionEvents = new LinkedList<TaskAttemptCompletionEvent>();
     List<TaskAttempt> allTaskAttempts = new LinkedList<TaskAttempt>();
     List<TaskAttempt> allTaskAttempts = new LinkedList<TaskAttempt>();
     for (TaskId taskId : tasks.keySet()) {
     for (TaskId taskId : tasks.keySet()) {
@@ -205,8 +224,8 @@ public class CompletedJob implements org.apache.hadoop.mapreduce.v2.app.job.Job
     int eventId = 0;
     int eventId = 0;
     for (TaskAttempt taskAttempt : allTaskAttempts) {
     for (TaskAttempt taskAttempt : allTaskAttempts) {
 
 
-      TaskAttemptCompletionEvent tace = RecordFactoryProvider.getRecordFactory(
-          null).newRecordInstance(TaskAttemptCompletionEvent.class);
+      TaskAttemptCompletionEvent tace =
+          Records.newRecord(TaskAttemptCompletionEvent.class);
 
 
       int attemptRunTime = -1;
       int attemptRunTime = -1;
       if (taskAttempt.getLaunchTime() != 0 && taskAttempt.getFinishTime() != 0) {
       if (taskAttempt.getLaunchTime() != 0 && taskAttempt.getFinishTime() != 0) {
@@ -237,15 +256,42 @@ public class CompletedJob implements org.apache.hadoop.mapreduce.v2.app.job.Job
 
 
   @Override
   @Override
   public Map<TaskId, Task> getTasks() {
   public Map<TaskId, Task> getTasks() {
+    loadAllTasks();
     return tasks;
     return tasks;
   }
   }
 
 
+  private void loadAllTasks() {
+    if (tasksLoaded.get()) {
+      return;
+    }
+    tasksLock.lock();
+    try {
+      if (tasksLoaded.get()) {
+        return;
+      }
+      for (Map.Entry<TaskID, TaskInfo> entry : jobInfo.getAllTasks().entrySet()) {
+        TaskId yarnTaskID = TypeConverter.toYarn(entry.getKey());
+        TaskInfo taskInfo = entry.getValue();
+        Task task = new CompletedTask(yarnTaskID, taskInfo);
+        tasks.put(yarnTaskID, task);
+        if (task.getType() == TaskType.MAP) {
+          mapTasks.put(task.getID(), task);
+        } else if (task.getType() == TaskType.REDUCE) {
+          reduceTasks.put(task.getID(), task);
+        }
+      }
+      tasksLoaded.set(true);
+    } finally {
+      tasksLock.unlock();
+    }
+  }
+
   //History data is leisurely loaded when task level data is requested
   //History data is leisurely loaded when task level data is requested
   private synchronized void loadFullHistoryData(boolean loadTasks,
   private synchronized void loadFullHistoryData(boolean loadTasks,
       Path historyFileAbsolute) throws IOException {
       Path historyFileAbsolute) throws IOException {
     LOG.info("Loading history file: [" + historyFileAbsolute + "]");
     LOG.info("Loading history file: [" + historyFileAbsolute + "]");
-    if (jobInfo != null) {
-      return; //data already loaded
+    if (this.jobInfo != null) {
+      return;
     }
     }
     
     
     if (historyFileAbsolute != null) {
     if (historyFileAbsolute != null) {
@@ -254,7 +300,7 @@ public class CompletedJob implements org.apache.hadoop.mapreduce.v2.app.job.Job
         parser =
         parser =
             new JobHistoryParser(historyFileAbsolute.getFileSystem(conf),
             new JobHistoryParser(historyFileAbsolute.getFileSystem(conf),
                 historyFileAbsolute);
                 historyFileAbsolute);
-        jobInfo = parser.parse();
+        this.jobInfo = parser.parse();
       } catch (IOException e) {
       } catch (IOException e) {
         throw new YarnException("Could not load history file "
         throw new YarnException("Could not load history file "
             + historyFileAbsolute, e);
             + historyFileAbsolute, e);
@@ -268,27 +314,15 @@ public class CompletedJob implements org.apache.hadoop.mapreduce.v2.app.job.Job
     } else {
     } else {
       throw new IOException("History file not found");
       throw new IOException("History file not found");
     }
     }
-    
     if (loadTasks) {
     if (loadTasks) {
-    for (Map.Entry<org.apache.hadoop.mapreduce.TaskID, TaskInfo> entry : jobInfo
-        .getAllTasks().entrySet()) {
-      TaskId yarnTaskID = TypeConverter.toYarn(entry.getKey());
-      TaskInfo taskInfo = entry.getValue();
-      Task task = new CompletedTask(yarnTaskID, taskInfo);
-      tasks.put(yarnTaskID, task);
-      if (task.getType() == TaskType.MAP) {
-        mapTasks.put(task.getID(), task);
-      } else if (task.getType() == TaskType.REDUCE) {
-        reduceTasks.put(task.getID(), task);
-      }
-    }
-    }
-    LOG.info("TaskInfo loaded");
+      loadAllTasks();
+      LOG.info("TaskInfo loaded");
+    }    
   }
   }
 
 
   @Override
   @Override
   public List<String> getDiagnostics() {
   public List<String> getDiagnostics() {
-    return diagnostics;
+    return Collections.singletonList(jobInfo.getErrorInfo());
   }
   }
 
 
   @Override
   @Override
@@ -318,6 +352,7 @@ public class CompletedJob implements org.apache.hadoop.mapreduce.v2.app.job.Job
 
 
   @Override
   @Override
   public Map<TaskId, Task> getTasks(TaskType taskType) {
   public Map<TaskId, Task> getTasks(TaskType taskType) {
+    loadAllTasks();
     if (TaskType.MAP.equals(taskType)) {
     if (TaskType.MAP.equals(taskType)) {
       return mapTasks;
       return mapTasks;
     } else {//we have only two types of tasks
     } else {//we have only two types of tasks

+ 71 - 49
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedTask.java

@@ -20,10 +20,13 @@ package org.apache.hadoop.mapreduce.v2.hs;
 
 
 import java.util.ArrayList;
 import java.util.ArrayList;
 import java.util.LinkedHashMap;
 import java.util.LinkedHashMap;
+import java.util.LinkedList;
+import java.util.List;
 import java.util.Map;
 import java.util.Map;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReentrantLock;
 
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.mapreduce.Counters;
 import org.apache.hadoop.mapreduce.Counters;
 import org.apache.hadoop.mapreduce.TypeConverter;
 import org.apache.hadoop.mapreduce.TypeConverter;
 import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskAttemptInfo;
 import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskAttemptInfo;
@@ -35,59 +38,24 @@ import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
 import org.apache.hadoop.mapreduce.v2.app.job.Task;
 import org.apache.hadoop.mapreduce.v2.app.job.Task;
 import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
 import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
-import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.util.Records;
 
 
 public class CompletedTask implements Task {
 public class CompletedTask implements Task {
 
 
-
-  private final TaskType type;
-  private Counters counters;
-  private final long startTime;
-  private final long finishTime;
-  private TaskState state;
   private final TaskId taskId;
   private final TaskId taskId;
-  private final TaskReport report;
+  private final TaskInfo taskInfo;
+  private TaskReport report;
+  private TaskAttemptId successfulAttempt;
+  private List<String> reportDiagnostics = new LinkedList<String>();
+  private Lock taskAttemptsLock = new ReentrantLock();
+  private AtomicBoolean taskAttemptsLoaded = new AtomicBoolean(false);
   private final Map<TaskAttemptId, TaskAttempt> attempts =
   private final Map<TaskAttemptId, TaskAttempt> attempts =
     new LinkedHashMap<TaskAttemptId, TaskAttempt>();
     new LinkedHashMap<TaskAttemptId, TaskAttempt>();
-  
-  private static final Log LOG = LogFactory.getLog(CompletedTask.class);
 
 
   CompletedTask(TaskId taskId, TaskInfo taskInfo) {
   CompletedTask(TaskId taskId, TaskInfo taskInfo) {
     //TODO JobHistoryParser.handleTaskFailedAttempt should use state from the event.
     //TODO JobHistoryParser.handleTaskFailedAttempt should use state from the event.
-    LOG.debug("HandlingTaskId: [" + taskId + "]");
+    this.taskInfo = taskInfo;
     this.taskId = taskId;
     this.taskId = taskId;
-    this.startTime = taskInfo.getStartTime();
-    this.finishTime = taskInfo.getFinishTime();
-    this.type = TypeConverter.toYarn(taskInfo.getTaskType());
-    if (taskInfo.getCounters() != null)
-      this.counters = taskInfo.getCounters();
-    if (taskInfo.getTaskStatus() != null) {
-      this.state = TaskState.valueOf(taskInfo.getTaskStatus());
-    } else {
-      this.state = TaskState.KILLED;
-    }
-    report = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(TaskReport.class);
-    for (TaskAttemptInfo attemptHistory : taskInfo.getAllTaskAttempts()
-        .values()) {
-      CompletedTaskAttempt attempt = new CompletedTaskAttempt(taskId, 
-          attemptHistory);
-      report.addAllDiagnostics(attempt.getDiagnostics()); //TODO TMI?
-      attempts.put(attempt.getID(), attempt);
-      if (attemptHistory.getTaskStatus() != null
-          && attemptHistory.getTaskStatus().equals(
-              TaskState.SUCCEEDED.toString())
-          && report.getSuccessfulAttempt() == null) {
-        report.setSuccessfulAttempt(TypeConverter.toYarn(attemptHistory
-            .getAttemptId()));
-      }
-    }
-    report.setTaskId(taskId);
-    report.setStartTime(startTime);
-    report.setFinishTime(finishTime);
-    report.setTaskState(state);
-    report.setProgress(getProgress());
-    report.setCounters(TypeConverter.toYarn(getCounters()));
-    report.addAllRunningAttempts(new ArrayList<TaskAttemptId>(attempts.keySet()));
   }
   }
 
 
   @Override
   @Override
@@ -97,17 +65,19 @@ public class CompletedTask implements Task {
 
 
   @Override
   @Override
   public TaskAttempt getAttempt(TaskAttemptId attemptID) {
   public TaskAttempt getAttempt(TaskAttemptId attemptID) {
+    loadAllTaskAttempts();
     return attempts.get(attemptID);
     return attempts.get(attemptID);
   }
   }
 
 
   @Override
   @Override
   public Map<TaskAttemptId, TaskAttempt> getAttempts() {
   public Map<TaskAttemptId, TaskAttempt> getAttempts() {
+    loadAllTaskAttempts();
     return attempts;
     return attempts;
   }
   }
 
 
   @Override
   @Override
   public Counters getCounters() {
   public Counters getCounters() {
-    return counters;
+    return taskInfo.getCounters();
   }
   }
 
 
   @Override
   @Override
@@ -121,13 +91,18 @@ public class CompletedTask implements Task {
   }
   }
 
 
   @Override
   @Override
-  public TaskReport getReport() {
+  public synchronized TaskReport getReport() {
+    if (report == null) {
+      constructTaskReport();
+    }
     return report;
     return report;
   }
   }
+  
 
 
+  
   @Override
   @Override
   public TaskType getType() {
   public TaskType getType() {
-    return type;
+    return TypeConverter.toYarn(taskInfo.getTaskType());
   }
   }
 
 
   @Override
   @Override
@@ -137,7 +112,54 @@ public class CompletedTask implements Task {
 
 
   @Override
   @Override
   public TaskState getState() {
   public TaskState getState() {
-    return state;
+    return taskInfo.getTaskStatus() == null ? TaskState.KILLED : TaskState
+        .valueOf(taskInfo.getTaskStatus());
   }
   }
 
 
+  private void constructTaskReport() {
+    loadAllTaskAttempts();
+    this.report = Records.newRecord(TaskReport.class);
+    report.setTaskId(taskId);
+    report.setStartTime(taskInfo.getStartTime());
+    report.setFinishTime(taskInfo.getFinishTime());
+    report.setTaskState(getState());
+    report.setProgress(getProgress());
+    report.setCounters(TypeConverter.toYarn(getCounters()));
+    if (successfulAttempt != null) {
+      report.setSuccessfulAttempt(successfulAttempt);
+    }
+    report.addAllDiagnostics(reportDiagnostics);
+    report
+        .addAllRunningAttempts(new ArrayList<TaskAttemptId>(attempts.keySet()));
+  }
+
+  private void loadAllTaskAttempts() {
+    if (taskAttemptsLoaded.get()) {
+      return;
+    }
+    taskAttemptsLock.lock();
+    try {
+      if (taskAttemptsLoaded.get()) {
+        return;
+      }
+
+      for (TaskAttemptInfo attemptHistory : taskInfo.getAllTaskAttempts()
+          .values()) {
+        CompletedTaskAttempt attempt =
+            new CompletedTaskAttempt(taskId, attemptHistory);
+        reportDiagnostics.addAll(attempt.getDiagnostics());
+        attempts.put(attempt.getID(), attempt);
+        if (successfulAttempt == null
+            && attemptHistory.getTaskStatus() != null
+            && attemptHistory.getTaskStatus().equals(
+                TaskState.SUCCEEDED.toString())) {
+          successfulAttempt =
+              TypeConverter.toYarn(attemptHistory.getAttemptId());
+        }
+      }
+      taskAttemptsLoaded.set(true);
+    } finally {
+      taskAttemptsLock.unlock();
+    }
+  }
 }
 }

+ 41 - 41
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedTaskAttempt.java

@@ -30,25 +30,21 @@ import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
 import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
 import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerId;
-import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.util.Records;
 
 
 public class CompletedTaskAttempt implements TaskAttempt {
 public class CompletedTaskAttempt implements TaskAttempt {
 
 
   private final TaskAttemptInfo attemptInfo;
   private final TaskAttemptInfo attemptInfo;
   private final TaskAttemptId attemptId;
   private final TaskAttemptId attemptId;
-  private Counters counters;
   private final TaskAttemptState state;
   private final TaskAttemptState state;
-  private final TaskAttemptReport report;
   private final List<String> diagnostics = new ArrayList<String>();
   private final List<String> diagnostics = new ArrayList<String>();
+  private TaskAttemptReport report;
 
 
   private String localDiagMessage;
   private String localDiagMessage;
 
 
   CompletedTaskAttempt(TaskId taskId, TaskAttemptInfo attemptInfo) {
   CompletedTaskAttempt(TaskId taskId, TaskAttemptInfo attemptInfo) {
     this.attemptInfo = attemptInfo;
     this.attemptInfo = attemptInfo;
     this.attemptId = TypeConverter.toYarn(attemptInfo.getAttemptId());
     this.attemptId = TypeConverter.toYarn(attemptInfo.getAttemptId());
-    if (attemptInfo.getCounters() != null) {
-      this.counters = attemptInfo.getCounters();
-    }
     if (attemptInfo.getTaskStatus() != null) {
     if (attemptInfo.getTaskStatus() != null) {
       this.state = TaskAttemptState.valueOf(attemptInfo.getTaskStatus());
       this.state = TaskAttemptState.valueOf(attemptInfo.getTaskStatus());
     } else {
     } else {
@@ -56,37 +52,9 @@ public class CompletedTaskAttempt implements TaskAttempt {
       localDiagMessage = "Attmpt state missing from History : marked as KILLED";
       localDiagMessage = "Attmpt state missing from History : marked as KILLED";
       diagnostics.add(localDiagMessage);
       diagnostics.add(localDiagMessage);
     }
     }
-    
     if (attemptInfo.getError() != null) {
     if (attemptInfo.getError() != null) {
       diagnostics.add(attemptInfo.getError());
       diagnostics.add(attemptInfo.getError());
     }
     }
-    
-    report = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(TaskAttemptReport.class);
-    
-    report.setTaskAttemptId(attemptId);
-    report.setTaskAttemptState(state);
-    report.setProgress(getProgress());
-    report.setStartTime(attemptInfo.getStartTime());
-    
-    report.setFinishTime(attemptInfo.getFinishTime());
-    report.setShuffleFinishTime(attemptInfo.getShuffleFinishTime());
-    report.setSortFinishTime(attemptInfo.getSortFinishTime());
-    if (localDiagMessage != null) {
-      report.setDiagnosticInfo(attemptInfo.getError() + ", " + localDiagMessage);
-    } else {
-    report.setDiagnosticInfo(attemptInfo.getError());
-    }
-//    report.setPhase(attemptInfo.get); //TODO
-    report.setStateString(attemptInfo.getState());
-    report.setCounters(TypeConverter.toYarn(getCounters()));
-    report.setContainerId(attemptInfo.getContainerId());
-    if (attemptInfo.getHostname() == null) {
-      report.setNodeManagerHost("UNKNOWN");
-    } else {
-      report.setNodeManagerHost(attemptInfo.getHostname());
-      report.setNodeManagerPort(attemptInfo.getPort());
-    }
-    report.setNodeManagerHttpPort(attemptInfo.getHttpPort());
   }
   }
 
 
   @Override
   @Override
@@ -111,7 +79,7 @@ public class CompletedTaskAttempt implements TaskAttempt {
 
 
   @Override
   @Override
   public Counters getCounters() {
   public Counters getCounters() {
-    return counters;
+    return attemptInfo.getCounters();
   }
   }
 
 
   @Override
   @Override
@@ -125,7 +93,10 @@ public class CompletedTaskAttempt implements TaskAttempt {
   }
   }
 
 
   @Override
   @Override
-  public TaskAttemptReport getReport() {
+  public synchronized TaskAttemptReport getReport() {
+    if (report == null) {
+      constructTaskAttemptReport();
+    }
     return report;
     return report;
   }
   }
 
 
@@ -146,26 +117,55 @@ public class CompletedTaskAttempt implements TaskAttempt {
 
 
   @Override
   @Override
   public long getLaunchTime() {
   public long getLaunchTime() {
-    return report.getStartTime();
+    return attemptInfo.getStartTime();
   }
   }
 
 
   @Override
   @Override
   public long getFinishTime() {
   public long getFinishTime() {
-    return report.getFinishTime();
+    return attemptInfo.getFinishTime();
   }
   }
   
   
   @Override
   @Override
   public long getShuffleFinishTime() {
   public long getShuffleFinishTime() {
-    return report.getShuffleFinishTime();
+    return attemptInfo.getShuffleFinishTime();
   }
   }
 
 
   @Override
   @Override
   public long getSortFinishTime() {
   public long getSortFinishTime() {
-    return report.getSortFinishTime();
+    return attemptInfo.getSortFinishTime();
   }
   }
 
 
   @Override
   @Override
   public int getShufflePort() {
   public int getShufflePort() {
-    throw new UnsupportedOperationException("Not supported yet.");
+    return attemptInfo.getShufflePort();
+  }
+
+  private void constructTaskAttemptReport() {
+    report = Records.newRecord(TaskAttemptReport.class);
+
+    report.setTaskAttemptId(attemptId);
+    report.setTaskAttemptState(state);
+    report.setProgress(getProgress());
+    report.setStartTime(attemptInfo.getStartTime());
+    report.setFinishTime(attemptInfo.getFinishTime());
+    report.setShuffleFinishTime(attemptInfo.getShuffleFinishTime());
+    report.setSortFinishTime(attemptInfo.getSortFinishTime());
+    if (localDiagMessage != null) {
+      report
+          .setDiagnosticInfo(attemptInfo.getError() + ", " + localDiagMessage);
+    } else {
+      report.setDiagnosticInfo(attemptInfo.getError());
+    }
+    // report.setPhase(attemptInfo.get); //TODO
+    report.setStateString(attemptInfo.getState());
+    report.setCounters(TypeConverter.toYarn(getCounters()));
+    report.setContainerId(attemptInfo.getContainerId());
+    if (attemptInfo.getHostname() == null) {
+      report.setNodeManagerHost("UNKNOWN");
+    } else {
+      report.setNodeManagerHost(attemptInfo.getHostname());
+      report.setNodeManagerPort(attemptInfo.getPort());
+    }
+    report.setNodeManagerHttpPort(attemptInfo.getHttpPort());
   }
   }
 }
 }

+ 16 - 8
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistory.java

@@ -24,6 +24,7 @@ import java.util.ArrayList;
 import java.util.Collections;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.HashSet;
+import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.List;
 import java.util.Map;
 import java.util.Map;
 import java.util.Set;
 import java.util.Set;
@@ -117,9 +118,8 @@ public class JobHistory extends AbstractService implements HistoryContext   {
   
   
   //Maintains a list of known done subdirectories. Not currently used.
   //Maintains a list of known done subdirectories. Not currently used.
   private final Set<Path> existingDoneSubdirs = new HashSet<Path>();
   private final Set<Path> existingDoneSubdirs = new HashSet<Path>();
-  
-  private final SortedMap<JobId, Job> loadedJobCache = 
-    new ConcurrentSkipListMap<JobId, Job>();
+
+  private Map<JobId, Job> loadedJobCache = null;
 
 
   /**
   /**
    * Maintains a mapping between intermediate user directories and the last 
    * Maintains a mapping between intermediate user directories and the last 
@@ -167,6 +167,7 @@ public class JobHistory extends AbstractService implements HistoryContext   {
    * .....${DONE_DIR}/VERSION_STRING/YYYY/MM/DD/HH/SERIAL_NUM/jh{index_entries}.jhist
    * .....${DONE_DIR}/VERSION_STRING/YYYY/MM/DD/HH/SERIAL_NUM/jh{index_entries}.jhist
    */
    */
 
 
+  @SuppressWarnings("serial")
   @Override
   @Override
   public void init(Configuration conf) throws YarnException {
   public void init(Configuration conf) throws YarnException {
     LOG.info("JobHistory Init");
     LOG.info("JobHistory Init");
@@ -224,6 +225,16 @@ public class JobHistory extends AbstractService implements HistoryContext   {
             DEFAULT_MOVE_THREAD_INTERVAL);
             DEFAULT_MOVE_THREAD_INTERVAL);
     numMoveThreads = conf.getInt(JHAdminConfig.MR_HISTORY_MOVE_THREAD_COUNT,
     numMoveThreads = conf.getInt(JHAdminConfig.MR_HISTORY_MOVE_THREAD_COUNT,
         DEFAULT_MOVE_THREAD_COUNT);
         DEFAULT_MOVE_THREAD_COUNT);
+    
+    loadedJobCache =
+        Collections.synchronizedMap(new LinkedHashMap<JobId, Job>(
+            loadedJobCacheSize + 1, 0.75f, true) {
+          @Override
+          public boolean removeEldestEntry(final Map.Entry<JobId, Job> eldest) {
+            return super.size() > loadedJobCacheSize;
+          }
+        });
+    
     try {
     try {
       initExisting();
       initExisting();
     } catch (IOException e) {
     } catch (IOException e) {
@@ -465,9 +476,6 @@ public class JobHistory extends AbstractService implements HistoryContext   {
       LOG.debug("Adding "+job.getID()+" to loaded job cache");
       LOG.debug("Adding "+job.getID()+" to loaded job cache");
     }
     }
     loadedJobCache.put(job.getID(), job);
     loadedJobCache.put(job.getID(), job);
-    if (loadedJobCache.size() > loadedJobCacheSize ) {
-      loadedJobCache.remove(loadedJobCache.firstKey());
-    }
   }
   }
   
   
   
   
@@ -655,7 +663,7 @@ public class JobHistory extends AbstractService implements HistoryContext   {
     synchronized(metaInfo) {
     synchronized(metaInfo) {
       try {
       try {
         Job job = new CompletedJob(conf, metaInfo.getJobIndexInfo().getJobId(), 
         Job job = new CompletedJob(conf, metaInfo.getJobIndexInfo().getJobId(), 
-            metaInfo.getHistoryFile(), true, metaInfo.getJobIndexInfo().getUser(),
+            metaInfo.getHistoryFile(), false, metaInfo.getJobIndexInfo().getUser(),
             metaInfo.getConfFile(), this.aclsMgr);
             metaInfo.getConfFile(), this.aclsMgr);
         addToLoadedJobCache(job);
         addToLoadedJobCache(job);
         return job;
         return job;
@@ -938,7 +946,7 @@ public class JobHistory extends AbstractService implements HistoryContext   {
     LOG.debug("Called getAllJobs()");
     LOG.debug("Called getAllJobs()");
     return getAllJobsInternal();
     return getAllJobsInternal();
   }
   }
-  
+
   static class MetaInfo {
   static class MetaInfo {
     private Path historyFile;
     private Path historyFile;
     private Path confFile; 
     private Path confFile; 

+ 145 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistoryEntities.java

@@ -0,0 +1,145 @@
+package org.apache.hadoop.mapreduce.v2.hs;
+
+import static junit.framework.Assert.assertEquals;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapred.JobACLsManager;
+import org.apache.hadoop.mapreduce.v2.api.records.JobId;
+import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
+import org.apache.hadoop.mapreduce.v2.api.records.JobState;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptReport;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskReport;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
+import org.apache.hadoop.mapreduce.v2.app.job.Task;
+import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
+import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameters;
+
+@RunWith(value = Parameterized.class)
+public class TestJobHistoryEntities {
+
+  private final String historyFileName =
+      "job_1329348432655_0001-1329348443227-user-Sleep+job-1329348468601-10-1-SUCCEEDED-default.jhist";
+  private final String confFileName = "job_1329348432655_0001_conf.xml";
+  private final Configuration conf = new Configuration();
+  private final JobACLsManager jobAclsManager = new JobACLsManager(conf);
+  private boolean loadTasks;
+  private JobId jobId = MRBuilderUtils.newJobId(1329348432655l, 1, 1);
+  Path fulleHistoryPath =
+    new Path(this.getClass().getClassLoader().getResource(historyFileName)
+        .getFile());
+  Path fullConfPath =
+    new Path(this.getClass().getClassLoader().getResource(confFileName)
+        .getFile());
+  private CompletedJob completedJob;
+
+  public TestJobHistoryEntities(boolean loadTasks) throws Exception {
+    this.loadTasks = loadTasks;
+  }
+
+  @Parameters
+  public static Collection<Object[]> data() {
+    List<Object[]> list = new ArrayList<Object[]>(2);
+    list.add(new Object[] { true });
+    list.add(new Object[] { false });
+    return list;
+  }
+
+  /* Verify some expected values based on the history file */
+  @Test
+  public void testCompletedJob() throws Exception {
+    //Re-initialize to verify the delayed load.
+    completedJob =
+      new CompletedJob(conf, jobId, fulleHistoryPath, loadTasks, "user",
+          fullConfPath, jobAclsManager);
+    //Verify tasks loaded based on loadTask parameter.
+    assertEquals(loadTasks, completedJob.tasksLoaded.get());
+    assertEquals(1, completedJob.getAMInfos().size());
+    assertEquals(10, completedJob.getCompletedMaps());
+    assertEquals(1, completedJob.getCompletedReduces());
+    assertEquals(11, completedJob.getTasks().size());
+    //Verify tasks loaded at this point.
+    assertEquals(true, completedJob.tasksLoaded.get());
+    assertEquals(10, completedJob.getTasks(TaskType.MAP).size());
+    assertEquals(1, completedJob.getTasks(TaskType.REDUCE).size());
+    assertEquals("user", completedJob.getUserName());
+    assertEquals(JobState.SUCCEEDED, completedJob.getState());
+    JobReport jobReport = completedJob.getReport();
+    assertEquals("user", jobReport.getUser());
+    assertEquals(JobState.SUCCEEDED, jobReport.getJobState());
+  }
+  
+  @Test
+  public void testCompletedTask() throws Exception {
+    completedJob =
+      new CompletedJob(conf, jobId, fulleHistoryPath, loadTasks, "user",
+          fullConfPath, jobAclsManager);
+    TaskId mt1Id = MRBuilderUtils.newTaskId(jobId, 0, TaskType.MAP);
+    TaskId rt1Id = MRBuilderUtils.newTaskId(jobId, 0, TaskType.REDUCE);
+    
+    Map<TaskId, Task> mapTasks = completedJob.getTasks(TaskType.MAP);
+    Map<TaskId, Task> reduceTasks = completedJob.getTasks(TaskType.REDUCE);
+    assertEquals(10, mapTasks.size());
+    assertEquals(1, reduceTasks.size());
+    
+    Task mt1 = mapTasks.get(mt1Id);
+    assertEquals(1, mt1.getAttempts().size());
+    assertEquals(TaskState.SUCCEEDED, mt1.getState());
+    TaskReport mt1Report = mt1.getReport();
+    assertEquals(TaskState.SUCCEEDED, mt1Report.getTaskState());
+    assertEquals(mt1Id, mt1Report.getTaskId());
+    Task rt1 = reduceTasks.get(rt1Id);
+    assertEquals(1, rt1.getAttempts().size());
+    assertEquals(TaskState.SUCCEEDED, rt1.getState());
+    TaskReport rt1Report = rt1.getReport();
+    assertEquals(TaskState.SUCCEEDED, rt1Report.getTaskState());
+    assertEquals(rt1Id, rt1Report.getTaskId());
+  }
+  
+  @Test
+  public void testCompletedTaskAttempt() throws Exception {
+    completedJob =
+      new CompletedJob(conf, jobId, fulleHistoryPath, loadTasks, "user",
+          fullConfPath, jobAclsManager);
+    TaskId mt1Id = MRBuilderUtils.newTaskId(jobId, 0, TaskType.MAP);
+    TaskId rt1Id = MRBuilderUtils.newTaskId(jobId, 0, TaskType.REDUCE);
+    TaskAttemptId mta1Id = MRBuilderUtils.newTaskAttemptId(mt1Id, 0);
+    TaskAttemptId rta1Id = MRBuilderUtils.newTaskAttemptId(rt1Id, 0);
+    
+    Task mt1 = completedJob.getTask(mt1Id);
+    Task rt1 = completedJob.getTask(rt1Id);
+    
+    TaskAttempt mta1 = mt1.getAttempt(mta1Id);
+    assertEquals(TaskAttemptState.SUCCEEDED, mta1.getState());
+    assertEquals("localhost:45454", mta1.getAssignedContainerMgrAddress());
+    assertEquals("localhost:9999", mta1.getNodeHttpAddress());
+    TaskAttemptReport mta1Report = mta1.getReport();
+    assertEquals(TaskAttemptState.SUCCEEDED, mta1Report.getTaskAttemptState());
+    assertEquals("localhost", mta1Report.getNodeManagerHost());
+    assertEquals(45454, mta1Report.getNodeManagerPort());
+    assertEquals(9999, mta1Report.getNodeManagerHttpPort());
+    
+    TaskAttempt rta1 = rt1.getAttempt(rta1Id);
+    assertEquals(TaskAttemptState.SUCCEEDED, rta1.getState());
+    assertEquals("localhost:45454", rta1.getAssignedContainerMgrAddress());
+    assertEquals("localhost:9999", rta1.getNodeHttpAddress());
+    TaskAttemptReport rta1Report = rta1.getReport();
+    assertEquals(TaskAttemptState.SUCCEEDED, rta1Report.getTaskAttemptState());
+    assertEquals("localhost", rta1Report.getNodeManagerHost());
+    assertEquals(45454, rta1Report.getNodeManagerPort());
+    assertEquals(9999, rta1Report.getNodeManagerHttpPort());
+  }
+}

File diff suppressed because it is too large
+ 1 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/resources/job_1329348432655_0001-1329348443227-user-Sleep+job-1329348468601-10-1-SUCCEEDED-default.jhist


+ 397 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/resources/job_1329348432655_0001_conf.xml

@@ -0,0 +1,397 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?><configuration>
+<property><!--Loaded from job.xml--><name>mapreduce.job.ubertask.enable</name><value>false</value></property>
+<property><!--Loaded from job.xml--><name>yarn.resourcemanager.max-completed-applications</name><value>10000</value></property>
+<property><!--Loaded from job.xml--><name>yarn.resourcemanager.delayed.delegation-token.removal-interval-ms</name><value>30000</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.client.submit.file.replication</name><value>10</value></property>
+<property><!--Loaded from job.xml--><name>yarn.nodemanager.container-manager.thread-count</name><value>20</value></property>
+<property><!--Loaded from job.xml--><name>mapred.queue.default.acl-administer-jobs</name><value>*</value></property>
+<property><!--Loaded from job.xml--><name>dfs.image.transfer.bandwidthPerSec</name><value>0</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.tasktracker.healthchecker.interval</name><value>60000</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.jobtracker.staging.root.dir</name><value>${hadoop.tmp.dir}/mapred/staging</value></property>
+<property><!--Loaded from job.xml--><name>dfs.block.access.token.lifetime</name><value>600</value></property>
+<property><!--Loaded from job.xml--><name>yarn.resourcemanager.am.max-retries</name><value>2</value></property>
+<property><!--Loaded from job.xml--><name>fs.AbstractFileSystem.file.impl</name><value>org.apache.hadoop.fs.local.LocalFs</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.client.completion.pollinterval</name><value>5000</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.job.ubertask.maxreduces</name><value>1</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.reduce.shuffle.memory.limit.percent</name><value>0.25</value></property>
+<property><!--Loaded from job.xml--><name>hadoop.http.authentication.kerberos.keytab</name><value>${user.home}/hadoop.keytab</value></property>
+<property><!--Loaded from job.xml--><name>yarn.nodemanager.keytab</name><value>/etc/krb5.keytab</value></property>
+<property><!--Loaded from job.xml--><name>io.seqfile.sorter.recordlimit</name><value>1000000</value></property>
+<property><!--Loaded from job.xml--><name>s3.blocksize</name><value>67108864</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.task.io.sort.factor</name><value>10</value></property>
+<property><!--Loaded from job.xml--><name>yarn.nodemanager.disk-health-checker.interval-ms</name><value>120000</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.job.working.dir</name><value>hdfs://localhost:8021/user/user</value></property>
+<property><!--Loaded from job.xml--><name>yarn.admin.acl</name><value>*</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.job.speculative.speculativecap</name><value>0.1</value></property>
+<property><!--Loaded from job.xml--><name>dfs.namenode.num.checkpoints.retained</name><value>2</value></property>
+<property><!--Loaded from job.xml--><name>dfs.namenode.delegation.token.renew-interval</name><value>86400000</value></property>
+<property><!--Loaded from job.xml--><name>yarn.nodemanager.resource.memory-mb</name><value>8192</value></property>
+<property><!--Loaded from job.xml--><name>io.map.index.interval</name><value>128</value></property>
+<property><!--Loaded from job.xml--><name>s3.client-write-packet-size</name><value>65536</value></property>
+<property><!--Loaded from job.xml--><name>dfs.namenode.http-address</name><value>0.0.0.0:50070</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.task.files.preserve.failedtasks</name><value>false</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.job.reduce.class</name><value>org.apache.hadoop.mapreduce.SleepJob$SleepReducer</value></property>
+<property><!--Loaded from job.xml--><name>hadoop.hdfs.configuration.version</name><value>1</value></property>
+<property><!--Loaded from job.xml--><name>s3.replication</name><value>3</value></property>
+<property><!--Loaded from job.xml--><name>dfs.datanode.balance.bandwidthPerSec</name><value>1048576</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.reduce.shuffle.connect.timeout</name><value>180000</value></property>
+<property><!--Loaded from job.xml--><name>yarn.nodemanager.aux-services</name><value>mapreduce.shuffle</value></property>
+<property><!--Loaded from job.xml--><name>dfs.datanode.block.volume.choice.policy</name><value>org.apache.hadoop.hdfs.server.datanode.RoundRobinVolumesPolicy</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.job.complete.cancel.delegation.tokens</name><value>true</value></property>
+<property><!--Loaded from job.xml--><name>yarn.server.nodemanager.connect.rm</name><value>true</value></property>
+<property><!--Loaded from job.xml--><name>dfs.namenode.checkpoint.dir</name><value>file://${hadoop.tmp.dir}/dfs/namesecondary</value></property>
+<property><!--Loaded from job.xml--><name>fs.trash.interval</name><value>0</value></property>
+<property><!--Loaded from job.xml--><name>yarn.resourcemanager.admin.address</name><value>0.0.0.0:8141</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.job.outputformat.class</name><value>org.apache.hadoop.mapreduce.lib.output.NullOutputFormat</value></property>
+<property><!--Loaded from job.xml--><name>yarn.log.server.url</name><value>http://localhost:19888/jobhistory/nmlogs</value></property>
+<property><!--Loaded from job.xml--><name>hadoop.http.authentication.kerberos.principal</name><value>HTTP/localhost@LOCALHOST</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.tasktracker.taskmemorymanager.monitoringinterval</name><value>5000</value></property>
+<property><!--Loaded from job.xml--><name>s3native.blocksize</name><value>67108864</value></property>
+<property><!--Loaded from job.xml--><name>dfs.namenode.edits.dir</name><value>${dfs.namenode.name.dir}</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.job.map.class</name><value>org.apache.hadoop.mapreduce.SleepJob$SleepMapper</value></property>
+<property><!--Loaded from job.xml--><name>dfs.datanode.http.address</name><value>0.0.0.0:50075</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.jobtracker.jobhistory.task.numberprogresssplits</name><value>12</value></property>
+<property><!--Loaded from job.xml--><name>yarn.acl.enable</name><value>true</value></property>
+<property><!--Loaded from job.xml--><name>yarn.nodemanager.localizer.fetch.thread-count</name><value>4</value></property>
+<property><!--Loaded from job.xml--><name>hadoop.proxyuser.user.hosts</name><value>127.0.0.1</value></property>
+<property><!--Loaded from job.xml--><name>hadoop.security.authorization</name><value>false</value></property>
+<property><!--Loaded from job.xml--><name>dfs.namenode.safemode.extension</name><value>30000</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.reduce.log.level</name><value>INFO</value></property>
+<property><!--Loaded from job.xml--><name>yarn.log-aggregation-enable</name><value>false</value></property>
+<property><!--Loaded from job.xml--><name>dfs.https.server.keystore.resource</name><value>ssl-server.xml</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.jobtracker.instrumentation</name><value>org.apache.hadoop.mapred.JobTrackerMetricsInst</value></property>
+<property><!--Loaded from job.xml--><name>dfs.namenode.replication.min</name><value>1</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.map.java.opts</name><value>-Xmx500m</value></property>
+<property><!--Loaded from job.xml--><name>s3native.bytes-per-checksum</name><value>512</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.tasktracker.tasks.sleeptimebeforesigkill</name><value>5000</value></property>
+<property><!--Loaded from job.xml--><name>tfile.fs.output.buffer.size</name><value>262144</value></property>
+<property><!--Loaded from job.xml--><name>yarn.nodemanager.local-dirs</name><value>/home/user/local-dir/</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.jobtracker.persist.jobstatus.active</name><value>true</value></property>
+<property><!--Loaded from job.xml--><name>fs.AbstractFileSystem.hdfs.impl</name><value>org.apache.hadoop.fs.Hdfs</value></property>
+<property><!--Loaded from job.xml--><name>dfs.namenode.safemode.min.datanodes</name><value>0</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.tasktracker.local.dir.minspacestart</name><value>0</value></property>
+<property><!--Loaded from job.xml--><name>dfs.client.https.need-auth</name><value>false</value></property>
+<property><!--Loaded from job.xml--><name>fs.har.impl.disable.cache</name><value>true</value></property>
+<property><!--Loaded from job.xml--><name>dfs.client.https.keystore.resource</name><value>ssl-client.xml</value></property>
+<property><!--Loaded from job.xml--><name>dfs.namenode.max.objects</name><value>0</value></property>
+<property><!--Loaded from job.xml--><name>dfs.namenode.safemode.threshold-pct</name><value>0.999f</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.tasktracker.local.dir.minspacekill</name><value>0</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.jobtracker.retiredjobs.cache.size</name><value>1000</value></property>
+<property><!--Loaded from job.xml--><name>dfs.blocksize</name><value>67108864</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.job.reduce.slowstart.completedmaps</name><value>0.05</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.job.end-notification.retry.attempts</name><value>5</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.job.inputformat.class</name><value>org.apache.hadoop.mapreduce.SleepJob$SleepInputFormat</value></property>
+<property><!--Loaded from job.xml--><name>fs.s3n.impl</name><value>org.apache.hadoop.fs.s3native.NativeS3FileSystem</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.map.memory.mb</name><value>512</value></property>
+<property><!--Loaded from Unknown--><name>mapreduce.job.user.name</name><value>user</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.tasktracker.outofband.heartbeat</name><value>false</value></property>
+<property><!--Loaded from job.xml--><name>io.native.lib.available</name><value>true</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.jobtracker.persist.jobstatus.hours</name><value>1</value></property>
+<property><!--Loaded from job.xml--><name>dfs.client-write-packet-size</name><value>65536</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.client.progressmonitor.pollinterval</name><value>1000</value></property>
+<property><!--Loaded from job.xml--><name>dfs.namenode.name.dir</name><value>file:///home/user/hadoop-user/dfs/name</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.output.fileoutputformat.compression.codec</name><value>org.apache.hadoop.io.compress.DefaultCodec</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.reduce.input.buffer.percent</name><value>0.0</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.map.output.compress.codec</name><value>org.apache.hadoop.io.compress.DefaultCodec</value></property>
+<property><!--Loaded from job.xml--><name>yarn.resourcemanager.delegation-token.keepalive-time-ms</name><value>300000</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.map.skip.proc.count.autoincr</name><value>true</value></property>
+<property><!--Loaded from job.xml--><name>dfs.datanode.directoryscan.threads</name><value>1</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.jobtracker.address</name><value>local</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.cluster.local.dir</name><value>${hadoop.tmp.dir}/mapred/local</value></property>
+<property><!--Loaded from Unknown--><name>mapreduce.job.application.attempt.id</name><value>1</value></property>
+<property><!--Loaded from job.xml--><name>dfs.permissions.enabled</name><value>true</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.tasktracker.taskcontroller</name><value>org.apache.hadoop.mapred.DefaultTaskController</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.reduce.shuffle.parallelcopies</name><value>5</value></property>
+<property><!--Loaded from job.xml--><name>yarn.nodemanager.env-whitelist</name><value>JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,YARN_HOME</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.jobtracker.heartbeats.in.second</name><value>100</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.job.maxtaskfailures.per.tracker</name><value>4</value></property>
+<property><!--Loaded from job.xml--><name>ipc.client.connection.maxidletime</name><value>10000</value></property>
+<property><!--Loaded from job.xml--><name>dfs.blockreport.intervalMsec</name><value>21600000</value></property>
+<property><!--Loaded from job.xml--><name>fs.s3.sleepTimeSeconds</name><value>10</value></property>
+<property><!--Loaded from job.xml--><name>dfs.namenode.replication.considerLoad</name><value>true</value></property>
+<property><!--Loaded from job.xml--><name>dfs.client.block.write.retries</name><value>3</value></property>
+<property><!--Loaded from job.xml--><name>hadoop.proxyuser.user.groups</name><value>users</value></property>
+<property><!--Loaded from job.xml--><name>dfs.namenode.name.dir.restore</name><value>false</value></property>
+<property><!--Loaded from job.xml--><name>io.seqfile.lazydecompress</name><value>true</value></property>
+<property><!--Loaded from job.xml--><name>dfs.https.enable</name><value>false</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.reduce.merge.inmem.threshold</name><value>1000</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.input.fileinputformat.split.minsize</name><value>0</value></property>
+<property><!--Loaded from job.xml--><name>dfs.replication</name><value>3</value></property>
+<property><!--Loaded from job.xml--><name>ipc.client.tcpnodelay</name><value>false</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.map.output.value.class</name><value>org.apache.hadoop.io.NullWritable</value></property>
+<property><!--Loaded from job.xml--><name>dfs.namenode.accesstime.precision</name><value>3600000</value></property>
+<property><!--Loaded from job.xml--><name>s3.stream-buffer-size</name><value>4096</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.jobtracker.tasktracker.maxblacklists</name><value>4</value></property>
+<property><!--Loaded from Unknown--><name>rpc.engine.com.google.protobuf.BlockingService</name><value>org.apache.hadoop.yarn.ipc.ProtoOverHadoopRpcEngine</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.job.jvm.numtasks</name><value>1</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.task.io.sort.mb</name><value>100</value></property>
+<property><!--Loaded from job.xml--><name>io.compression.codecs</name><value>org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.BZip2Codec,org.apache.hadoop.io.compress.DeflateCodec,org.apache.hadoop.io.compress.SnappyCodec,org.apache.hadoop.io.compress.Lz4Codec</value></property>
+<property><!--Loaded from job.xml--><name>io.file.buffer.size</name><value>4096</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.job.jar</name><value>/tmp/hadoop-yarn/staging/user/.staging/job_1329348432655_0001/job.jar</value></property>
+<property><!--Loaded from job.xml--><name>dfs.namenode.checkpoint.txns</name><value>40000</value></property>
+<property><!--Loaded from job.xml--><name>yarn.nodemanager.admin-env</name><value>MALLOC_ARENA_MAX=$MALLOC_ARENA_MAX</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.job.split.metainfo.maxsize</name><value>10000000</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.output.fileoutputformat.compression.type</name><value>RECORD</value></property>
+<property><!--Loaded from job.xml--><name>kfs.replication</name><value>3</value></property>
+<property><!--Loaded from job.xml--><name>yarn.app.mapreduce.am.scheduler.heartbeat.interval-ms</name><value>1000</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.reduce.maxattempts</name><value>4</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.sleepjob.map.sleep.time</name><value>1</value></property>
+<property><!--Loaded from job.xml--><name>kfs.stream-buffer-size</name><value>4096</value></property>
+<property><!--Loaded from job.xml--><name>fs.har.impl</name><value>org.apache.hadoop.fs.HarFileSystem</value></property>
+<property><!--Loaded from job.xml--><name>hadoop.security.authentication</name><value>simple</value></property>
+<property><!--Loaded from job.xml--><name>fs.s3.buffer.dir</name><value>${hadoop.tmp.dir}/s3</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.jobtracker.taskscheduler</name><value>org.apache.hadoop.mapred.JobQueueTaskScheduler</value></property>
+<property><!--Loaded from job.xml--><name>yarn.app.mapreduce.am.job.task.listener.thread-count</name><value>30</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.job.reduces</name><value>1</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.map.sort.spill.percent</name><value>0.80</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.job.end-notification.retry.interval</name><value>1</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.job.maps</name><value>10</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.job.speculative.slownodethreshold</name><value>1.0</value></property>
+<property><!--Loaded from job.xml--><name>dfs.block.access.token.enable</name><value>false</value></property>
+<property><!--Loaded from job.xml--><name>tfile.fs.input.buffer.size</name><value>262144</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.map.speculative</name><value>false</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.job.acl-view-job</name><value> </value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.map.output.key.class</name><value>org.apache.hadoop.io.IntWritable</value></property>
+<property><!--Loaded from job.xml--><name>yarn.ipc.serializer.type</name><value>protocolbuffers</value></property>
+<property><!--Loaded from mapred-default.xml--><name>mapreduce.job.end-notification.max.retry.interval</name><value>5</value></property>
+<property><!--Loaded from job.xml--><name>ftp.blocksize</name><value>67108864</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.tasktracker.http.threads</name><value>40</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.reduce.java.opts</name><value>-Xmx500m</value></property>
+<property><!--Loaded from job.xml--><name>dfs.datanode.data.dir</name><value>file:///home/user/hadoop-user/dfs/data</value></property>
+<property><!--Loaded from job.xml--><name>dfs.namenode.replication.interval</name><value>3</value></property>
+<property><!--Loaded from job.xml--><name>fs.file.impl</name><value>org.apache.hadoop.fs.LocalFileSystem</value></property>
+<property><!--Loaded from job.xml--><name>dfs.namenode.https-address</name><value>0.0.0.0:50470</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.task.skip.start.attempts</name><value>2</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.jobtracker.persist.jobstatus.dir</name><value>/jobtracker/jobsInfo</value></property>
+<property><!--Loaded from job.xml--><name>ipc.client.kill.max</name><value>10</value></property>
+<property><!--Loaded from mapred-default.xml--><name>mapreduce.job.end-notification.max.attempts</name><value>5</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.jobhistory.max-age-ms</name><value>10000000000</value></property>
+<property><!--Loaded from job.xml--><name>yarn.resourcemanager.zookeeper-store.session.timeout-ms</name><value>60000</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.task.tmp.dir</name><value>./tmp</value></property>
+<property><!--Loaded from job.xml--><name>dfs.default.chunk.view.size</name><value>32768</value></property>
+<property><!--Loaded from job.xml--><name>kfs.bytes-per-checksum</name><value>512</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.reduce.memory.mb</name><value>512</value></property>
+<property><!--Loaded from job.xml--><name>hadoop.http.filter.initializers</name><value>org.apache.hadoop.yarn.server.webproxy.amfilter.AmFilterInitializer</value></property>
+<property><!--Loaded from job.xml--><name>dfs.datanode.failed.volumes.tolerated</name><value>0</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.sleepjob.reduce.sleep.count</name><value>1</value></property>
+<property><!--Loaded from job.xml--><name>hadoop.http.authentication.type</name><value>simple</value></property>
+<property><!--Loaded from job.xml--><name>dfs.datanode.data.dir.perm</name><value>700</value></property>
+<property><!--Loaded from job.xml--><name>yarn.resourcemanager.client.thread-count</name><value>50</value></property>
+<property><!--Loaded from job.xml--><name>ipc.server.listen.queue.size</name><value>128</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.reduce.skip.maxgroups</name><value>0</value></property>
+<property><!--Loaded from job.xml--><name>file.stream-buffer-size</name><value>4096</value></property>
+<property><!--Loaded from job.xml--><name>dfs.namenode.fs-limits.max-directory-items</name><value>0</value></property>
+<property><!--Loaded from job.xml--><name>io.mapfile.bloom.size</name><value>1048576</value></property>
+<property><!--Loaded from job.xml--><name>fs.hsftp.impl</name><value>org.apache.hadoop.hdfs.HsftpFileSystem</value></property>
+<property><!--Loaded from job.xml--><name>yarn.nodemanager.container-executor.class</name><value>org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.map.maxattempts</name><value>4</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.jobtracker.jobhistory.block.size</name><value>3145728</value></property>
+<property><!--Loaded from job.xml--><name>ftp.replication</name><value>3</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.jobtracker.http.address</name><value>0.0.0.0:50030</value></property>
+<property><!--Loaded from job.xml--><name>yarn.nodemanager.health-checker.script.timeout-ms</name><value>1200000</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.jobhistory.address</name><value>0.0.0.0:10020</value></property>
+<property><!--Loaded from job.xml--><name>dfs.datanode.dns.nameserver</name><value>default</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.jobtracker.taskcache.levels</name><value>2</value></property>
+<property><!--Loaded from job.xml--><name>yarn.nodemanager.log.retain-seconds</name><value>12000</value></property>
+<property><!--Loaded from job.xml--><name>mapred.child.java.opts</name><value>-Xmx200m</value></property>
+<property><!--Loaded from job.xml--><name>dfs.replication.max</name><value>512</value></property>
+<property><!--Loaded from job.xml--><name>map.sort.class</name><value>org.apache.hadoop.util.QuickSort</value></property>
+<property><!--Loaded from job.xml--><name>dfs.stream-buffer-size</name><value>4096</value></property>
+<property><!--Loaded from job.xml--><name>dfs.namenode.backup.address</name><value>0.0.0.0:50100</value></property>
+<property><!--Loaded from job.xml--><name>hadoop.util.hash.type</name><value>murmur</value></property>
+<property><!--Loaded from job.xml--><name>dfs.block.access.key.update.interval</name><value>600</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.jobhistory.move.interval-ms</name><value>30000</value></property>
+<property><!--Loaded from job.xml--><name>dfs.datanode.dns.interface</name><value>default</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.reduce.skip.proc.count.autoincr</name><value>true</value></property>
+<property><!--Loaded from job.xml--><name>dfs.namenode.backup.http-address</name><value>0.0.0.0:50105</value></property>
+<property><!--Loaded from job.xml--><name>yarn.nodemanager.container-monitor.interval-ms</name><value>3000</value></property>
+<property><!--Loaded from job.xml--><name>mapred.reducer.new-api</name><value>true</value></property>
+<property><!--Loaded from job.xml--><name>yarn.nodemanager.disk-health-checker.min-healthy-disks</name><value>0.25</value></property>
+<property><!--Loaded from job.xml--><name>kfs.client-write-packet-size</name><value>65536</value></property>
+<property><!--Loaded from job.xml--><name>yarn.nodemanager.sleep-delay-before-sigkill.ms</name><value>250</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.job.dir</name><value>/tmp/hadoop-yarn/staging/user/.staging/job_1329348432655_0001</value></property>
+<property><!--Loaded from job.xml--><name>io.map.index.skip</name><value>0</value></property>
+<property><!--Loaded from job.xml--><name>net.topology.node.switch.mapping.impl</name><value>org.apache.hadoop.net.ScriptBasedMapping</value></property>
+<property><!--Loaded from job.xml--><name>dfs.namenode.logging.level</name><value>info</value></property>
+<property><!--Loaded from job.xml--><name>fs.s3.maxRetries</name><value>4</value></property>
+<property><!--Loaded from job.xml--><name>s3native.client-write-packet-size</name><value>65536</value></property>
+<property><!--Loaded from job.xml--><name>yarn.resourcemanager.amliveliness-monitor.interval-ms</name><value>1000</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.reduce.speculative</name><value>false</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.client.output.filter</name><value>FAILED</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.tasktracker.report.address</name><value>127.0.0.1:0</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.task.userlog.limit.kb</name><value>0</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.tasktracker.map.tasks.maximum</name><value>2</value></property>
+<property><!--Loaded from job.xml--><name>hadoop.http.authentication.simple.anonymous.allowed</name><value>true</value></property>
+<property><!--Loaded from job.xml--><name>hadoop.rpc.socket.factory.class.default</name><value>org.apache.hadoop.net.StandardSocketFactory</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.job.submithostname</name><value>localhost</value></property>
+<property><!--Loaded from job.xml--><name>fs.hftp.impl</name><value>org.apache.hadoop.hdfs.HftpFileSystem</value></property>
+<property><!--Loaded from job.xml--><name>dfs.namenode.handler.count</name><value>10</value></property>
+<property><!--Loaded from job.xml--><name>fs.automatic.close</name><value>true</value></property>
+<property><!--Loaded from job.xml--><name>fs.kfs.impl</name><value>org.apache.hadoop.fs.kfs.KosmosFileSystem</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.job.submithostaddress</name><value>127.0.0.1</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.tasktracker.healthchecker.script.timeout</name><value>600000</value></property>
+<property><!--Loaded from job.xml--><name>dfs.datanode.directoryscan.interval</name><value>21600</value></property>
+<property><!--Loaded from job.xml--><name>yarn.resourcemanager.address</name><value>0.0.0.0:8040</value></property>
+<property><!--Loaded from job.xml--><name>yarn.nodemanager.log-aggregation-enable</name><value>false</value></property>
+<property><!--Loaded from job.xml--><name>fs.hdfs.impl</name><value>org.apache.hadoop.hdfs.DistributedFileSystem</value></property>
+<property><!--Loaded from job.xml--><name>yarn.nodemanager.health-checker.interval-ms</name><value>600000</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.reduce.markreset.buffer.percent</name><value>0.0</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.map.log.level</name><value>INFO</value></property>
+<property><!--Loaded from job.xml--><name>yarn.nodemanager.localizer.address</name><value>0.0.0.0:4344</value></property>
+<property><!--Loaded from job.xml--><name>dfs.bytes-per-checksum</name><value>512</value></property>
+<property><!--Loaded from job.xml--><name>ftp.stream-buffer-size</name><value>4096</value></property>
+<property><!--Loaded from job.xml--><name>yarn.resourcemanager.keytab</name><value>/etc/krb5.keytab</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.sleepjob.map.sleep.count</name><value>1</value></property>
+<property><!--Loaded from job.xml--><name>dfs.blockreport.initialDelay</name><value>0</value></property>
+<property><!--Loaded from job.xml--><name>yarn.nm.liveness-monitor.expiry-interval-ms</name><value>600000</value></property>
+<property><!--Loaded from job.xml--><name>hadoop.http.authentication.token.validity</name><value>36000</value></property>
+<property><!--Loaded from job.xml--><name>dfs.namenode.delegation.token.max-lifetime</name><value>604800000</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.job.hdfs-servers</name><value>${fs.default.name}</value></property>
+<property><!--Loaded from job.xml--><name>fs.ftp.impl</name><value>org.apache.hadoop.fs.ftp.FTPFileSystem</value></property>
+<property><!--Loaded from job.xml--><name>dfs.web.ugi</name><value>webuser,webgroup</value></property>
+<property><!--Loaded from job.xml--><name>s3native.replication</name><value>3</value></property>
+<property><!--Loaded from job.xml--><name>dfs.heartbeat.interval</name><value>3</value></property>
+<property><!--Loaded from job.xml--><name>yarn.nodemanager.localizer.client.thread-count</name><value>5</value></property>
+<property><!--Loaded from job.xml--><name>yarn.resourcemanager.container.liveness-monitor.interval-ms</name><value>600000</value></property>
+<property><!--Loaded from job.xml--><name>yarn.am.liveness-monitor.expiry-interval-ms</name><value>600000</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.task.profile</name><value>false</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.tasktracker.instrumentation</name><value>org.apache.hadoop.mapred.TaskTrackerMetricsInst</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.tasktracker.http.address</name><value>0.0.0.0:50060</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.jobhistory.webapp.address</name><value>0.0.0.0:19888</value></property>
+<property><!--Loaded from Unknown--><name>rpc.engine.org.apache.hadoop.yarn.proto.AMRMProtocol$AMRMProtocolService$BlockingInterface</name><value>org.apache.hadoop.yarn.ipc.ProtoOverHadoopRpcEngine</value></property>
+<property><!--Loaded from job.xml--><name>yarn.ipc.rpc.class</name><value>org.apache.hadoop.yarn.ipc.HadoopYarnProtoRPC</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.job.name</name><value>Sleep job</value></property>
+<property><!--Loaded from job.xml--><name>kfs.blocksize</name><value>67108864</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.job.ubertask.maxmaps</name><value>9</value></property>
+<property><!--Loaded from job.xml--><name>yarn.nodemanager.heartbeat.interval-ms</name><value>1000</value></property>
+<property><!--Loaded from job.xml--><name>dfs.namenode.secondary.http-address</name><value>0.0.0.0:50090</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.job.userlog.retain.hours</name><value>24</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.task.timeout</name><value>600000</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.jobhistory.loadedjobs.cache.size</name><value>1</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.framework.name</name><value>yarn</value></property>
+<property><!--Loaded from job.xml--><name>ipc.client.idlethreshold</name><value>4000</value></property>
+<property><!--Loaded from job.xml--><name>ipc.server.tcpnodelay</name><value>false</value></property>
+<property><!--Loaded from job.xml--><name>ftp.bytes-per-checksum</name><value>512</value></property>
+<property><!--Loaded from job.xml--><name>s3.bytes-per-checksum</name><value>512</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.job.speculative.slowtaskthreshold</name><value>1.0</value></property>
+<property><!--Loaded from job.xml--><name>yarn.nodemanager.localizer.cache.target-size-mb</name><value>1</value></property>
+<property><!--Loaded from job.xml--><name>yarn.nodemanager.remote-app-log-dir</name><value>/tmp/logs</value></property>
+<property><!--Loaded from job.xml--><name>fs.s3.block.size</name><value>67108864</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.job.queuename</name><value>default</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.sleepjob.reduce.sleep.time</name><value>1</value></property>
+<property><!--Loaded from job.xml--><name>hadoop.rpc.protection</name><value>authentication</value></property>
+<property><!--Loaded from job.xml--><name>yarn.app.mapreduce.client-am.ipc.max-retries</name><value>1</value></property>
+<property><!--Loaded from job.xml--><name>ftp.client-write-packet-size</name><value>65536</value></property>
+<property><!--Loaded from job.xml--><name>yarn.nodemanager.address</name><value>0.0.0.0:45454</value></property>
+<property><!--Loaded from job.xml--><name>fs.defaultFS</name><value>hdfs://localhost:8021</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.task.merge.progress.records</name><value>10000</value></property>
+<property><!--Loaded from job.xml--><name>yarn.resourcemanager.scheduler.client.thread-count</name><value>50</value></property>
+<property><!--Loaded from job.xml--><name>file.client-write-packet-size</name><value>65536</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.job.partitioner.class</name><value>org.apache.hadoop.mapreduce.SleepJob$SleepJobPartitioner</value></property>
+<property><!--Loaded from job.xml--><name>yarn.nodemanager.delete.thread-count</name><value>4</value></property>
+<property><!--Loaded from job.xml--><name>yarn.resourcemanager.scheduler.address</name><value>0.0.0.0:8030</value></property>
+<property><!--Loaded from job.xml--><name>fs.trash.checkpoint.interval</name><value>0</value></property>
+<property><!--Loaded from job.xml--><name>s3native.stream-buffer-size</name><value>4096</value></property>
+<property><!--Loaded from job.xml--><name>yarn.scheduler.fifo.minimum-allocation-mb</name><value>1024</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.reduce.shuffle.read.timeout</name><value>180000</value></property>
+<property><!--Loaded from job.xml--><name>yarn.app.mapreduce.am.command-opts</name><value>-Xmx500m</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.admin.user.env</name><value>LD_LIBRARY_PATH=$HADOOP_COMMON_HOME/lib/native</value></property>
+<property><!--Loaded from job.xml--><name>dfs.namenode.checkpoint.edits.dir</name><value>${dfs.namenode.checkpoint.dir}</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.local.clientfactory.class.name</name><value>org.apache.hadoop.mapred.LocalClientFactory</value></property>
+<property><!--Loaded from job.xml--><name>hadoop.common.configuration.version</name><value>0.23.0</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.tasktracker.dns.interface</name><value>default</value></property>
+<property><!--Loaded from job.xml--><name>io.serializations</name><value>org.apache.hadoop.io.serializer.WritableSerialization,org.apache.hadoop.io.serializer.avro.AvroSpecificSerialization,org.apache.hadoop.io.serializer.avro.AvroReflectSerialization</value></property>
+<property><!--Loaded from job.xml--><name>yarn.nodemanager.aux-service.mapreduce.shuffle.class</name><value>org.apache.hadoop.mapred.ShuffleHandler</value></property>
+<property><!--Loaded from job.xml--><name>yarn.nodemanager.aux-services.mapreduce.shuffle.class</name><value>org.apache.hadoop.mapred.ShuffleHandler</value></property>
+<property><!--Loaded from job.xml--><name>fs.df.interval</name><value>60000</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.reduce.shuffle.input.buffer.percent</name><value>0.70</value></property>
+<property><!--Loaded from job.xml--><name>io.seqfile.compress.blocksize</name><value>1000000</value></property>
+<property><!--Loaded from job.xml--><name>ipc.client.connect.max.retries</name><value>10</value></property>
+<property><!--Loaded from job.xml--><name>fs.viewfs.impl</name><value>org.apache.hadoop.fs.viewfs.ViewFileSystem</value></property>
+<property><!--Loaded from job.xml--><name>hadoop.security.groups.cache.secs</name><value>300</value></property>
+<property><!--Loaded from job.xml--><name>dfs.namenode.delegation.key.update-interval</name><value>86400000</value></property>
+<property><!--Loaded from job.xml--><name>yarn.nodemanager.process-kill-wait.ms</name><value>2000</value></property>
+<property><!--Loaded from job.xml--><name>yarn.application.classpath</name><value>
+        $HADOOP_CONF_DIR,
+        $HADOOP_COMMON_HOME/share/hadoop/common/*,
+        $HADOOP_COMMON_HOME/share/hadoop/common/lib/*,
+        $HADOOP_HDFS_HOME/share/hadoop/hdfs/*,
+        $HADOOP_HDFS_HOME/share/hadoop/hdfs/lib/*,
+        $YARN_HOME/share/hadoop/mapreduce/*,
+        $YARN_HOME/share/hadoop/mapreduce/lib/*
+     </value></property>
+<property><!--Loaded from job.xml--><name>yarn.nodemanager.log-aggregation.compression-type</name><value>gz</value></property>
+<property><!--Loaded from job.xml--><name>dfs.image.compress</name><value>false</value></property>
+<property><!--Loaded from job.xml--><name>yarn.nodemanager.localizer.cache.cleanup.interval-ms</name><value>30000</value></property>
+<property><!--Loaded from job.xml--><name>mapred.mapper.new-api</name><value>true</value></property>
+<property><!--Loaded from job.xml--><name>yarn.nodemanager.log-dirs</name><value>/home/user/logs</value></property>
+<property><!--Loaded from job.xml--><name>fs.s3n.block.size</name><value>67108864</value></property>
+<property><!--Loaded from job.xml--><name>fs.ftp.host</name><value>0.0.0.0</value></property>
+<property><!--Loaded from job.xml--><name>hadoop.security.group.mapping</name><value>org.apache.hadoop.security.ShellBasedUnixGroupsMapping</value></property>
+<property><!--Loaded from job.xml--><name>dfs.datanode.address</name><value>0.0.0.0:50010</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.map.skip.maxrecords</name><value>0</value></property>
+<property><!--Loaded from job.xml--><name>dfs.datanode.https.address</name><value>0.0.0.0:50475</value></property>
+<property><!--Loaded from job.xml--><name>fs.s3.impl</name><value>org.apache.hadoop.fs.s3.S3FileSystem</value></property>
+<property><!--Loaded from job.xml--><name>file.replication</name><value>1</value></property>
+<property><!--Loaded from job.xml--><name>yarn.resourcemanager.resource-tracker.address</name><value>0.0.0.0:8025</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.jobtracker.restart.recover</name><value>false</value></property>
+<property><!--Loaded from job.xml--><name>hadoop.work.around.non.threadsafe.getpwuid</name><value>false</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.client.genericoptionsparser.used</name><value>true</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.tasktracker.indexcache.mb</name><value>10</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.output.fileoutputformat.compress</name><value>false</value></property>
+<property><!--Loaded from job.xml--><name>hadoop.tmp.dir</name><value>/tmp/hadoop-${user.name}</value></property>
+<property><!--Loaded from job.xml--><name>dfs.client.block.write.replace-datanode-on-failure.policy</name><value>DEFAULT</value></property>
+<property><!--Loaded from job.xml--><name>hadoop.kerberos.kinit.command</name><value>kinit</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.job.committer.setup.cleanup.needed</name><value>true</value></property>
+<property><!--Loaded from job.xml--><name>dfs.datanode.du.reserved</name><value>0</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.task.profile.reduces</name><value>0-2</value></property>
+<property><!--Loaded from job.xml--><name>file.bytes-per-checksum</name><value>512</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.input.fileinputformat.inputdir</name><value>hdfs://localhost:8021/user/user/ignored</value></property>
+<property><!--Loaded from job.xml--><name>dfs.client.block.write.replace-datanode-on-failure.enable</name><value>ture</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.jobtracker.handler.count</name><value>10</value></property>
+<property><!--Loaded from job.xml--><name>net.topology.script.number.args</name><value>100</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.task.profile.maps</name><value>0-2</value></property>
+<property><!--Loaded from job.xml--><name>dfs.namenode.decommission.interval</name><value>30</value></property>
+<property><!--Loaded from job.xml--><name>dfs.image.compression.codec</name><value>org.apache.hadoop.io.compress.DefaultCodec</value></property>
+<property><!--Loaded from job.xml--><name>yarn.resourcemanager.webapp.address</name><value>0.0.0.0:8088</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.jobtracker.system.dir</name><value>${hadoop.tmp.dir}/mapred/system</value></property>
+<property><!--Loaded from job.xml--><name>dfs.namenode.support.allow.format</name><value>true</value></property>
+<property><!--Loaded from job.xml--><name>yarn.nodemanager.vmem-pmem-ratio</name><value>2.1</value></property>
+<property><!--Loaded from job.xml--><name>io.mapfile.bloom.error.rate</name><value>0.005</value></property>
+<property><!--Loaded from job.xml--><name>dfs.permissions.superusergroup</name><value>supergroup</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.jobtracker.expire.trackers.interval</name><value>600000</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.cluster.acls.enabled</name><value>false</value></property>
+<property><!--Loaded from job.xml--><name>yarn.nodemanager.remote-app-log-dir-suffix</name><value>logs</value></property>
+<property><!--Loaded from job.xml--><name>dfs.namenode.checkpoint.check.period</name><value>60</value></property>
+<property><!--Loaded from job.xml--><name>io.seqfile.local.dir</name><value>${hadoop.tmp.dir}/io/local</value></property>
+<property><!--Loaded from job.xml--><name>yarn.app.mapreduce.am.resource.mb</name><value>512</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.reduce.shuffle.merge.percent</name><value>0.66</value></property>
+<property><!--Loaded from job.xml--><name>tfile.io.chunk.size</name><value>1048576</value></property>
+<property><!--Loaded from job.xml--><name>file.blocksize</name><value>67108864</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.jobtracker.jobhistory.lru.cache.size</name><value>5</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.jobtracker.maxtasks.perjob</name><value>-1</value></property>
+<property><!--Loaded from job.xml--><name>yarn.resourcemanager.nm.liveness-monitor.interval-ms</name><value>1000</value></property>
+<property><!--Loaded from job.xml--><name>yarn.nodemanager.webapp.address</name><value>0.0.0.0:9999</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.job.acl-modify-job</name><value> </value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.tasktracker.reduce.tasks.maximum</name><value>2</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.cluster.temp.dir</name><value>${hadoop.tmp.dir}/mapred/temp</value></property>
+<property><!--Loaded from job.xml--><name>io.skip.checksum.errors</name><value>false</value></property>
+<property><!--Loaded from job.xml--><name>yarn.app.mapreduce.am.staging-dir</name><value>/tmp/hadoop-yarn/staging</value></property>
+<property><!--Loaded from job.xml--><name>dfs.datanode.handler.count</name><value>3</value></property>
+<property><!--Loaded from job.xml--><name>hadoop.http.authentication.signature.secret</name><value>hadoop</value></property>
+<property><!--Loaded from job.xml--><name>dfs.namenode.decommission.nodes.per.interval</name><value>5</value></property>
+<property><!--Loaded from job.xml--><name>fs.ftp.host.port</name><value>21</value></property>
+<property><!--Loaded from job.xml--><name>dfs.namenode.checkpoint.period</name><value>3600</value></property>
+<property><!--Loaded from job.xml--><name>dfs.namenode.fs-limits.max-component-length</name><value>0</value></property>
+<property><!--Loaded from job.xml--><name>yarn.resourcemanager.admin.client.thread-count</name><value>1</value></property>
+<property><!--Loaded from job.xml--><name>fs.AbstractFileSystem.viewfs.impl</name><value>org.apache.hadoop.fs.viewfs.ViewFs</value></property>
+<property><!--Loaded from job.xml--><name>yarn.resourcemanager.resource-tracker.client.thread-count</name><value>50</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.tasktracker.dns.nameserver</name><value>default</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.clientfactory.class.name</name><value>org.apache.hadoop.mapred.YarnClientFactory</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.map.output.compress</name><value>false</value></property>
+<property><!--Loaded from job.xml--><name>mapreduce.job.counters.limit</name><value>120</value></property>
+<property><!--Loaded from job.xml--><name>dfs.datanode.ipc.address</name><value>0.0.0.0:50020</value></property>
+<property><!--Loaded from job.xml--><name>fs.webhdfs.impl</name><value>org.apache.hadoop.hdfs.web.WebHdfsFileSystem</value></property>
+<property><!--Loaded from job.xml--><name>yarn.nodemanager.delete.debug-delay-sec</name><value>0</value></property>
+<property><!--Loaded from job.xml--><name>dfs.datanode.max.transfer.threads</name><value>4096</value></property>
+</configuration>

Some files were not shown because too many files changed in this diff