浏览代码

Merging trunk to HDFS-1623 branch

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-1623@1177130 13f79535-47bb-0310-9956-ffa450edef68
Suresh Srinivas 13 年之前
父节点
当前提交
ab0402bc1d
共有 100 个文件被更改,包括 3437 次插入1206 次删除
  1. 2 2
      dev-support/test-patch.sh
  2. 39 3
      hadoop-common-project/hadoop-common/CHANGES.txt
  3. 5 3
      hadoop-common-project/hadoop-common/src/main/docs/src/documentation/content/xdocs/HttpAuthentication.xml
  4. 4 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
  5. 9 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java
  6. 28 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java
  7. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtocolSignature.java
  8. 21 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java
  9. 25 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationFilterInitializer.java
  10. 4 1
      hadoop-common-project/hadoop-common/src/main/packages/hadoop-setup-conf.sh
  11. 20 0
      hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hadoop-metrics2.properties
  12. 20 0
      hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hdfs-site.xml
  13. 213 0
      hadoop-common-project/hadoop-common/src/main/packages/templates/conf/log4j.properties
  14. 2 2
      hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
  15. 14 2
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
  16. 147 74
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalDirAllocator.java
  17. 3 2
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
  18. 27 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/HttpServerFunctionalTest.java
  19. 145 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestPathFilter.java
  20. 32 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java
  21. 15 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestAuthenticationFilter.java
  22. 36 0
      hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
  23. 74 11
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
  24. 20 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
  25. 7 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
  26. 16 7
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java
  27. 6 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
  28. 4 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java
  29. 6 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
  30. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
  31. 0 5
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
  32. 16 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
  33. 14 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
  34. 8 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
  35. 60 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
  36. 32 13
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java
  37. 277 9
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
  38. 85 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/ParamFilter.java
  39. 57 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
  40. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/AccessTimeParam.java
  41. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/BlockSizeParam.java
  42. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/BufferSizeParam.java
  43. 1 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/DelegationParam.java
  44. 0 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/DeleteOpParam.java
  45. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/DstPathParam.java
  46. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java
  47. 3 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/HttpOpParam.java
  48. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ModificationTimeParam.java
  49. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/OverwriteParam.java
  50. 0 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PostOpParam.java
  51. 0 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PutOpParam.java
  52. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/RenameOptionSetParam.java
  53. 41 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/RenewerParam.java
  54. 0 20
      hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
  55. 30 20
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java
  56. 129 71
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
  57. 15 5
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
  58. 33 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java
  59. 16 10
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHost2NodesMap.java
  60. 3 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestMulitipleNNDataBlockScanner.java
  61. 10 7
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestReplicasMap.java
  62. 7 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java
  63. 290 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProcessCorruptBlocks.java
  64. 44 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java
  65. 102 0
      hadoop-mapreduce-project/CHANGES.txt
  66. 43 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml
  67. 107 111
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/MapReduceChildJVM.java
  68. 3 4
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/YarnChild.java
  69. 30 32
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
  70. 1 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java
  71. 27 31
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java
  72. 35 48
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java
  73. 29 6
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java
  74. 20 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/local/LocalContainerAllocator.java
  75. 9 8
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/recover/RecoveryService.java
  76. 32 22
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java
  77. 12 28
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java
  78. 5 8
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerRequestor.java
  79. 1 2
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/DefaultSpeculator.java
  80. 1 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JobConfPage.java
  81. 3 3
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/NavBlock.java
  82. 1 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TaskPage.java
  83. 19 7
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java
  84. 624 445
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRMContainerAllocator.java
  85. 24 10
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/TypeConverter.java
  86. 0 50
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/MRConstants.java
  87. 4 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/JobReport.java
  88. 24 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/JobReportPBImpl.java
  89. 1 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JobHistoryUtils.java
  90. 75 42
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java
  91. 24 8
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRBuilderUtils.java
  92. 2 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/proto/mr_protos.proto
  93. 13 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/TestTypeConverter.java
  94. 2 2
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/util/TestMRApps.java
  95. 2 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/BackupStore.java
  96. 1 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java
  97. 4 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobStatus.java
  98. 7 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MRConstants.java
  99. 12 11
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MROutputFiles.java
  100. 11 2
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskLog.java

+ 2 - 2
dev-support/test-patch.sh

@@ -598,8 +598,8 @@ runTests () {
   echo ""
   echo ""
   echo ""
   echo ""
   
   
-  echo "$MVN clean test -Pnative -D${PROJECT_NAME}PatchProcess"
-  $MVN clean test -Pnative -D${PROJECT_NAME}PatchProcess
+  echo "$MVN clean install test -Pnative -D${PROJECT_NAME}PatchProcess"
+  $MVN clean install test -Pnative -D${PROJECT_NAME}PatchProcess
   if [[ $? != 0 ]] ; then
   if [[ $? != 0 ]] ; then
     ### Find and format names of failed tests
     ### Find and format names of failed tests
     failed_tests=`find . -name 'TEST*.xml' | xargs $GREP  -l -E "<failure|<error" | sed -e "s|.*target/surefire-reports/TEST-|                  |g" | sed -e "s|\.xml||g"`
     failed_tests=`find . -name 'TEST*.xml' | xargs $GREP  -l -E "<failure|<error" | sed -e "s|.*target/surefire-reports/TEST-|                  |g" | sed -e "s|\.xml||g"`

+ 39 - 3
hadoop-common-project/hadoop-common/CHANGES.txt

@@ -2,6 +2,12 @@ Hadoop Change Log
 
 
 Trunk (unreleased changes)
 Trunk (unreleased changes)
 
 
+  INCOMPATIBLE CHANGES
+   
+   HADOOP-7542. Change Configuration XML format to 1.1 to add support for
+                serializing additional characters. This requires XML1.1
+                support in the XML parser (Christopher Egner via harsh)
+
   IMPROVEMENTS
   IMPROVEMENTS
 
 
     HADOOP-7595. Upgrade dependency to Avro 1.5.3. (Alejandro Abdelnur via atm)
     HADOOP-7595. Upgrade dependency to Avro 1.5.3. (Alejandro Abdelnur via atm)
@@ -13,6 +19,11 @@ Trunk (unreleased changes)
 
 
     HADOOP-7635. RetryInvocationHandler should release underlying resources on
     HADOOP-7635. RetryInvocationHandler should release underlying resources on
                  close (atm)
                  close (atm)
+    
+    HADOOP-7668. Add a NetUtils method that can tell if an InetAddress 
+    belongs to local host. (suresh)
+
+    HADOOP-7687 Make getProtocolSignature public  (sanjay)
 
 
   BUGS
   BUGS
 
 
@@ -23,6 +34,16 @@ Trunk (unreleased changes)
 
 
     HADOOP-7641. Add Apache License to template config files (Eric Yang via atm)
     HADOOP-7641. Add Apache License to template config files (Eric Yang via atm)
 
 
+    HADOOP-7621. alfredo config should be in a file not readable by users
+                 (Alejandro Abdelnur via atm)
+    
+    HADOOP-7669  Fix newly introduced release audit warning. 
+                 (Uma Maheswara Rao G via stevel)
+    
+    HADOOP-6220. HttpServer wraps InterruptedExceptions by IOExceptions if interrupted 
+                 in startup (stevel)
+                 
+
 Release 0.23.0 - Unreleased
 Release 0.23.0 - Unreleased
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES
@@ -287,9 +308,6 @@ Release 0.23.0 - Unreleased
     HADOOP-7430. Improve error message when moving to trash fails due to 
     HADOOP-7430. Improve error message when moving to trash fails due to 
     quota issue. (Ravi Prakash via mattf)
     quota issue. (Ravi Prakash via mattf)
 
 
-    HADOOP-7457. Remove out-of-date Chinese language documentation.
-    (Jakob Homan via eli)
-
     HADOOP-7444. Add Checksum API to verify and calculate checksums "in bulk"
     HADOOP-7444. Add Checksum API to verify and calculate checksums "in bulk"
     (todd)
     (todd)
 
 
@@ -388,6 +406,13 @@ Release 0.23.0 - Unreleased
     HADOOP-7599. Script improvements to setup a secure Hadoop cluster
     HADOOP-7599. Script improvements to setup a secure Hadoop cluster
     (Eric Yang via ddas)
     (Eric Yang via ddas)
 
 
+    HADOOP-7639. Enhance HttpServer to allow passing path-specs for filtering,
+    so that servers like Yarn WebApp can get filtered the paths served by
+    their own injected servlets. (Thomas Graves via vinodkv)
+
+    HADOOP-7575. Enhanced LocalDirAllocator to support fully-qualified
+    paths. (Jonathan Eagles via vinodkv)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
   
   
     HADOOP-7333. Performance improvement in PureJavaCrc32. (Eric Caspole
     HADOOP-7333. Performance improvement in PureJavaCrc32. (Eric Caspole
@@ -398,6 +423,9 @@ Release 0.23.0 - Unreleased
 
 
   BUG FIXES
   BUG FIXES
 
 
+    HADOOP-7630. hadoop-metrics2.properties should have a property *.period 
+    set to a default value for metrics. (Eric Yang via mattf)
+
     HADOOP-7327. FileSystem.listStatus() throws NullPointerException instead of
     HADOOP-7327. FileSystem.listStatus() throws NullPointerException instead of
     IOException upon access permission failure. (mattf)
     IOException upon access permission failure. (mattf)
 
 
@@ -603,6 +631,9 @@ Release 0.23.0 - Unreleased
     HADOOP-7631. Fixes a config problem to do with running streaming jobs
     HADOOP-7631. Fixes a config problem to do with running streaming jobs
     (Eric Yang via ddas)
     (Eric Yang via ddas)
 
 
+    HADOOP-7662. Fixed logs servlet to use the pathspec '/*' instead of '/'
+    for correct filtering. (Thomas Graves via vinodkv)
+
 Release 0.22.0 - Unreleased
 Release 0.22.0 - Unreleased
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES
@@ -1118,6 +1149,11 @@ Release 0.22.0 - Unreleased
     HADOOP-7568. SequenceFile should not print into stdout.
     HADOOP-7568. SequenceFile should not print into stdout.
     (Plamen Jeliazkov via shv)
     (Plamen Jeliazkov via shv)
 
 
+    HADOOP-7663. Fix TestHDFSTrash failure. (Mayank Bansal via shv)
+
+    HADOOP-7457. Remove out-of-date Chinese language documentation.
+    (Jakob Homan via eli)
+
 Release 0.21.1 - Unreleased
 Release 0.21.1 - Unreleased
 
 
   IMPROVEMENTS
   IMPROVEMENTS

+ 5 - 3
hadoop-common-project/hadoop-common/src/main/docs/src/documentation/content/xdocs/HttpAuthentication.xml

@@ -82,10 +82,12 @@
       <code>36000</code>.
       <code>36000</code>.
       </p>
       </p>
 
 
-      <p><code>hadoop.http.authentication.signature.secret</code>: The signature secret for  
-      signing the authentication tokens. If not set a random secret is generated at 
+      <p><code>hadoop.http.authentication.signature.secret.file</code>: The signature secret 
+      file for signing the authentication tokens. If not set a random secret is generated at 
       startup time. The same secret should be used for all nodes in the cluster, JobTracker, 
       startup time. The same secret should be used for all nodes in the cluster, JobTracker, 
-      NameNode, DataNode and TastTracker. The default value is a <code>hadoop</code> value.
+      NameNode, DataNode and TastTracker. The default value is 
+      <code>${user.home}/hadoop-http-auth-signature-secret</code>.
+      IMPORTANT: This file should be readable only by the Unix user running the daemons.
       </p>
       </p>
         
         
       <p><code>hadoop.http.authentication.cookie.domain</code>: The domain to use for the HTTP 
       <p><code>hadoop.http.authentication.cookie.domain</code>: The domain to use for the HTTP 

+ 4 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java

@@ -1632,6 +1632,10 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
     try {
     try {
       doc =
       doc =
         DocumentBuilderFactory.newInstance().newDocumentBuilder().newDocument();
         DocumentBuilderFactory.newInstance().newDocumentBuilder().newDocument();
+
+      // Allow a broader set of control characters to appear in job confs.
+      // cf https://issues.apache.org/jira/browse/MAPREDUCE-109 
+      doc.setXmlVersion( "1.1" );
     } catch (ParserConfigurationException pe) {
     } catch (ParserConfigurationException pe) {
       throw new IOException(pe);
       throw new IOException(pe);
     }
     }

+ 9 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java

@@ -264,9 +264,15 @@ public class LocalDirAllocator {
             Path tmpDir = new Path(localDirs[i]);
             Path tmpDir = new Path(localDirs[i]);
             if(localFS.mkdirs(tmpDir)|| localFS.exists(tmpDir)) {
             if(localFS.mkdirs(tmpDir)|| localFS.exists(tmpDir)) {
               try {
               try {
-                DiskChecker.checkDir(new File(localDirs[i]));
-                dirs.add(localDirs[i]);
-                dfList.add(new DF(new File(localDirs[i]), 30000));
+
+                File tmpFile = tmpDir.isAbsolute()
+                  ? new File(localFS.makeQualified(tmpDir).toUri())
+                  : new File(localDirs[i]);
+
+                DiskChecker.checkDir(tmpFile);
+                dirs.add(tmpFile.getPath());
+                dfList.add(new DF(tmpFile, 30000));
+
               } catch (DiskErrorException de) {
               } catch (DiskErrorException de) {
                 LOG.warn( localDirs[i] + " is not writable\n", de);
                 LOG.warn( localDirs[i] + " is not writable\n", de);
               }
               }

+ 28 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java

@@ -20,6 +20,7 @@ package org.apache.hadoop.http;
 import java.io.FileNotFoundException;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.IOException;
 import java.io.PrintWriter;
 import java.io.PrintWriter;
+import java.io.InterruptedIOException;
 import java.net.BindException;
 import java.net.BindException;
 import java.net.InetSocketAddress;
 import java.net.InetSocketAddress;
 import java.net.URL;
 import java.net.URL;
@@ -124,6 +125,29 @@ public class HttpServer implements FilterContainer {
       boolean findPort, Configuration conf, Connector connector) throws IOException {
       boolean findPort, Configuration conf, Connector connector) throws IOException {
     this(name, bindAddress, port, findPort, conf, null, connector);
     this(name, bindAddress, port, findPort, conf, null, connector);
   }
   }
+
+  /**
+   * Create a status server on the given port. Allows you to specify the
+   * path specifications that this server will be serving so that they will be
+   * added to the filters properly.  
+   * 
+   * @param name The name of the server
+   * @param bindAddress The address for this server
+   * @param port The port to use on the server
+   * @param findPort whether the server should start at the given port and 
+   *        increment by 1 until it finds a free port.
+   * @param conf Configuration 
+   * @param pathSpecs Path specifications that this httpserver will be serving. 
+   *        These will be added to any filters.
+   */
+  public HttpServer(String name, String bindAddress, int port,
+      boolean findPort, Configuration conf, String[] pathSpecs) throws IOException {
+    this(name, bindAddress, port, findPort, conf, null, null);
+    for (String path : pathSpecs) {
+        LOG.info("adding path spec: " + path);
+      addFilterPathMapping(path, webAppContext);
+    }
+  }
   
   
   /**
   /**
    * Create a status server on the given port.
    * Create a status server on the given port.
@@ -259,7 +283,7 @@ public class HttpServer implements FilterContainer {
     if (logDir != null) {
     if (logDir != null) {
       Context logContext = new Context(parent, "/logs");
       Context logContext = new Context(parent, "/logs");
       logContext.setResourceBase(logDir);
       logContext.setResourceBase(logDir);
-      logContext.addServlet(AdminAuthorizedServlet.class, "/");
+      logContext.addServlet(AdminAuthorizedServlet.class, "/*");
       logContext.setDisplayName("logs");
       logContext.setDisplayName("logs");
       setContextAttributes(logContext, conf);
       setContextAttributes(logContext, conf);
       defaultContexts.put(logContext, true);
       defaultContexts.put(logContext, true);
@@ -660,6 +684,9 @@ public class HttpServer implements FilterContainer {
       }
       }
     } catch (IOException e) {
     } catch (IOException e) {
       throw e;
       throw e;
+    } catch (InterruptedException e) {
+      throw (IOException) new InterruptedIOException(
+          "Interrupted while starting HTTP server").initCause(e);
     } catch (Exception e) {
     } catch (Exception e) {
       throw new IOException("Problem starting http server", e);
       throw new IOException("Problem starting http server", e);
     }
     }

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtocolSignature.java

@@ -199,7 +199,7 @@ public class ProtocolSignature implements Writable {
    * @param protocol protocol
    * @param protocol protocol
    * @return the server's protocol signature
    * @return the server's protocol signature
    */
    */
-  static ProtocolSignature getProtocolSignature(
+  public static ProtocolSignature getProtocolSignature(
       int clientMethodsHashCode,
       int clientMethodsHashCode,
       long serverVersion,
       long serverVersion,
       Class<? extends VersionedProtocol> protocol) {
       Class<? extends VersionedProtocol> protocol) {

+ 21 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java

@@ -516,4 +516,25 @@ public class NetUtils {
     } catch (UnknownHostException ignore) { }
     } catch (UnknownHostException ignore) { }
     return addr;
     return addr;
   }
   }
+  
+  /**
+   * Given an InetAddress, checks to see if the address is a local address, by
+   * comparing the address with all the interfaces on the node.
+   * @param addr address to check if it is local node's address
+   * @return true if the address corresponds to the local node
+   */
+  public static boolean isLocalAddress(InetAddress addr) {
+    // Check if the address is any local or loop back
+    boolean local = addr.isAnyLocalAddress() || addr.isLoopbackAddress();
+
+    // Check if the address is defined on any interface
+    if (!local) {
+      try {
+        local = NetworkInterface.getByInetAddress(addr) != null;
+      } catch (SocketException e) {
+        local = false;
+      }
+    }
+    return local;
+  }
 }
 }

+ 25 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationFilterInitializer.java

@@ -22,6 +22,9 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.http.FilterContainer;
 import org.apache.hadoop.http.FilterContainer;
 import org.apache.hadoop.http.FilterInitializer;
 import org.apache.hadoop.http.FilterInitializer;
 
 
+import java.io.FileReader;
+import java.io.IOException;
+import java.io.Reader;
 import java.util.HashMap;
 import java.util.HashMap;
 import java.util.Map;
 import java.util.Map;
 
 
@@ -40,8 +43,10 @@ import java.util.Map;
  */
  */
 public class AuthenticationFilterInitializer extends FilterInitializer {
 public class AuthenticationFilterInitializer extends FilterInitializer {
 
 
-  private static final String PREFIX = "hadoop.http.authentication.";
+  static final String PREFIX = "hadoop.http.authentication.";
 
 
+  static final String SIGNATURE_SECRET_FILE = AuthenticationFilter.SIGNATURE_SECRET + ".file";
+  
   /**
   /**
    * Initializes Alfredo AuthenticationFilter.
    * Initializes Alfredo AuthenticationFilter.
    * <p/>
    * <p/>
@@ -67,6 +72,25 @@ public class AuthenticationFilterInitializer extends FilterInitializer {
       }
       }
     }
     }
 
 
+    String signatureSecretFile = filterConfig.get(SIGNATURE_SECRET_FILE);
+    if (signatureSecretFile == null) {
+      throw new RuntimeException("Undefined property: " + SIGNATURE_SECRET_FILE);      
+    }
+    
+    try {
+      StringBuilder secret = new StringBuilder();
+      Reader reader = new FileReader(signatureSecretFile);
+      int c = reader.read();
+      while (c > -1) {
+        secret.append((char)c);
+        c = reader.read();
+      }
+      reader.close();
+      filterConfig.put(AuthenticationFilter.SIGNATURE_SECRET, secret.toString());
+    } catch (IOException ex) {
+      throw new RuntimeException("Could not read HTTP signature secret file: " + signatureSecretFile);            
+    }
+    
     container.addFilter("authentication",
     container.addFilter("authentication",
                         AuthenticationFilter.class.getName(),
                         AuthenticationFilter.class.getName(),
                         filterConfig);
                         filterConfig);

+ 4 - 1
hadoop-common-project/hadoop-common/src/main/packages/hadoop-setup-conf.sh

@@ -475,7 +475,10 @@ else
   template_generator ${HADOOP_PREFIX}/share/hadoop/common/templates/conf/taskcontroller.cfg ${HADOOP_CONF_DIR}/taskcontroller.cfg
   template_generator ${HADOOP_PREFIX}/share/hadoop/common/templates/conf/taskcontroller.cfg ${HADOOP_CONF_DIR}/taskcontroller.cfg
   template_generator ${HADOOP_PREFIX}/share/hadoop/common/templates/conf/hadoop-metrics2.properties ${HADOOP_CONF_DIR}/hadoop-metrics2.properties
   template_generator ${HADOOP_PREFIX}/share/hadoop/common/templates/conf/hadoop-metrics2.properties ${HADOOP_CONF_DIR}/hadoop-metrics2.properties
   if [ ! -e ${HADOOP_CONF_DIR}/capacity-scheduler.xml ]; then
   if [ ! -e ${HADOOP_CONF_DIR}/capacity-scheduler.xml ]; then
-    template_generator ${HADOOP_PREFIX}/share/hadoop/templates/conf/capacity-scheduler.xml ${HADOOP_CONF_DIR}/capacity-scheduler.xml
+    template_generator ${HADOOP_PREFIX}/share/hadoop/common/templates/conf/capacity-scheduler.xml ${HADOOP_CONF_DIR}/capacity-scheduler.xml
+  fi
+  if [ ! -e ${HADOOP_CONF_DIR}/hadoop-metrics2.properties ]; then
+    cp ${HADOOP_PREFIX}/share/hadoop/common/templates/conf/hadoop-metrics2.properties ${HADOOP_CONF_DIR}/hadoop-metrics2.properties
   fi
   fi
   if [ ! -e ${HADOOP_CONF_DIR}/log4j.properties ]; then
   if [ ! -e ${HADOOP_CONF_DIR}/log4j.properties ]; then
     cp ${HADOOP_PREFIX}/share/hadoop/common/templates/conf/log4j.properties ${HADOOP_CONF_DIR}/log4j.properties
     cp ${HADOOP_PREFIX}/share/hadoop/common/templates/conf/log4j.properties ${HADOOP_CONF_DIR}/log4j.properties

+ 20 - 0
hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hadoop-metrics2.properties

@@ -0,0 +1,20 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# syntax: [prefix].[source|sink|jmx].[instance].[options]
+# See package.html for org.apache.hadoop.metrics2 for details
+
+*.period=60
+

+ 20 - 0
hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hdfs-site.xml

@@ -144,6 +144,26 @@
     </description>
     </description>
   </property>
   </property>
 
 
+  <property>
+    <name>dfs.web.authentication.kerberos.principal</name>
+    <value>HTTP/_HOST@${local.realm}</value>
+    <description>
+      The HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
+
+      The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
+      HTTP SPENGO specification.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.web.authentication.kerberos.keytab</name>
+    <value>/etc/security/keytabs/nn.service.keytab</value>
+    <description>
+      The Kerberos keytab file with the credentials for the
+      HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
+    </description>
+  </property>
+
   <property>
   <property>
     <name>dfs.namenode.keytab.file</name>
     <name>dfs.namenode.keytab.file</name>
     <value>/etc/security/keytabs/nn.service.keytab</value>
     <value>/etc/security/keytabs/nn.service.keytab</value>

+ 213 - 0
hadoop-common-project/hadoop-common/src/main/packages/templates/conf/log4j.properties

@@ -0,0 +1,213 @@
+# Copyright 2011 The Apache Software Foundation
+# 
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Define some default values that can be overridden by system properties
+hadoop.root.logger=INFO,console
+hadoop.log.dir=.
+hadoop.log.file=hadoop.log
+
+#
+# Job Summary Appender 
+#
+# Use following logger to send summary to separate file defined by 
+# hadoop.mapreduce.jobsummary.log.file rolled daily:
+# hadoop.mapreduce.jobsummary.logger=INFO,JSA
+# 
+hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
+hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
+
+# Define the root logger to the system property "hadoop.root.logger".
+log4j.rootLogger=${hadoop.root.logger}, EventCounter
+
+# Logging Threshold
+log4j.threshold=ALL
+
+#
+# Daily Rolling File Appender
+#
+
+log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Rollver at midnight
+log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
+
+# 30-day backup
+#log4j.appender.DRFA.MaxBackupIndex=30
+log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+# Debugging Pattern format
+#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+#
+# console
+# Add "console" to rootlogger above if you want to use this 
+#
+
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+
+#
+# TaskLog Appender
+#
+
+#Default values
+hadoop.tasklog.taskid=null
+hadoop.tasklog.iscleanup=false
+hadoop.tasklog.noKeepSplits=4
+hadoop.tasklog.totalLogFileSize=100
+hadoop.tasklog.purgeLogSplits=true
+hadoop.tasklog.logsRetainHours=12
+
+log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
+log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
+log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
+log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
+
+log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
+log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+
+#
+#Security appender
+#
+hadoop.security.log.file=SecurityAuth.audit
+log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender 
+log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
+
+log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout
+log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+#new logger
+# Define some default values that can be overridden by system properties
+hadoop.security.logger=INFO,console
+log4j.category.SecurityLogger=${hadoop.security.logger}
+
+# hdfs audit logging
+
+hdfs.audit.logger=INFO,console
+log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
+log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false
+log4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log
+log4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout
+log4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+log4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd
+
+# mapred audit logging
+
+mapred.audit.logger=INFO,console
+log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}
+log4j.additivity.org.apache.hadoop.mapred.AuditLogger=false
+log4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log
+log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout
+log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+log4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd
+
+#
+# Rolling File Appender
+#
+
+#log4j.appender.RFA=org.apache.log4j.RollingFileAppender
+#log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Logfile size and and 30-day backups
+#log4j.appender.RFA.MaxFileSize=1MB
+#log4j.appender.RFA.MaxBackupIndex=30
+
+#log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
+#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
+#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+#
+# FSNamesystem Audit logging
+# All audit events are logged at INFO level
+#
+log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=WARN
+
+# Custom Logging levels
+
+#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
+#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
+#log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=DEBUG
+
+# Jets3t library
+log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
+
+#
+# Event Counter Appender
+# Sends counts of logging messages at different severity levels to Hadoop Metrics.
+#
+log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
+
+#
+# Job Summary Appender
+#
+log4j.appender.JSA=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.JSA.File=${hadoop.log.dir}/${hadoop.mapreduce.jobsummary.log.file}
+log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
+log4j.appender.JSA.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+log4j.appender.JSA.DatePattern=.yyyy-MM-dd
+log4j.logger.org.apache.hadoop.mapred.JobInProgress$JobSummary=${hadoop.mapreduce.jobsummary.logger}
+log4j.additivity.org.apache.hadoop.mapred.JobInProgress$JobSummary=false
+
+#
+# MapReduce Audit Log Appender
+#
+
+# Set the MapReduce audit log filename
+#hadoop.mapreduce.audit.log.file=hadoop-mapreduce.audit.log
+
+# Appender for AuditLogger.
+# Requires the following system properties to be set
+#    - hadoop.log.dir (Hadoop Log directory)
+#    - hadoop.mapreduce.audit.log.file (MapReduce audit log filename)
+
+#log4j.logger.org.apache.hadoop.mapred.AuditLogger=INFO,MRAUDIT
+#log4j.additivity.org.apache.hadoop.mapred.AuditLogger=false
+#log4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender
+#log4j.appender.MRAUDIT.File=${hadoop.log.dir}/${hadoop.mapreduce.audit.log.file}
+#log4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd
+#log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout
+#log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+
+#
+# Yarn ResourceManager Application Summary Log 
+#
+# Set the ResourceManager summary log filename
+#yarn.server.resourcemanager.appsummary.log.file=rm-appsummary.log
+# Set the ResourceManager summary log level and appender
+#yarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY
+
+# Appender for ResourceManager Application Summary Log - rolled daily
+# Requires the following properties to be set
+#    - hadoop.log.dir (Hadoop Log directory)
+#    - yarn.server.resourcemanager.appsummary.log.file (resource manager app summary log filename)
+#    - yarn.server.resourcemanager.appsummary.logger (resource manager app summary log level and appender)
+
+#log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=${yarn.server.resourcemanager.appsummary.logger}
+#log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=false
+#log4j.appender.RMSUMMARY=org.apache.log4j.DailyRollingFileAppender
+#log4j.appender.RMSUMMARY.File=${hadoop.log.dir}/${yarn.server.resourcemanager.appsummary.log.file}
+#log4j.appender.RMSUMMARY.layout=org.apache.log4j.PatternLayout
+#log4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+#log4j.appender.RMSUMMARY.DatePattern=.yyyy-MM-dd

+ 2 - 2
hadoop-common-project/hadoop-common/src/main/resources/core-default.xml

@@ -808,8 +808,8 @@
 </property>
 </property>
 
 
 <property>
 <property>
-  <name>hadoop.http.authentication.signature.secret</name>
-  <value>hadoop</value>
+  <name>hadoop.http.authentication.signature.secret.file</name>
+  <value>${user.home}/hadoop-http-auth-signature-secret</value>
   <description>
   <description>
     The signature secret for signing the authentication tokens.
     The signature secret for signing the authentication tokens.
     If not set a random secret is generated at startup time.
     If not set a random secret is generated at startup time.

+ 14 - 2
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java

@@ -58,7 +58,7 @@ public class TestConfiguration extends TestCase {
   }
   }
   
   
   private void startConfig() throws IOException{
   private void startConfig() throws IOException{
-    out.write("<?xml version=\"1.0\"?>\n");
+    out.write("<?xml version=\"1.1\"?>\n");
     out.write("<configuration>\n");
     out.write("<configuration>\n");
   }
   }
 
 
@@ -221,6 +221,18 @@ public class TestConfiguration extends TestCase {
     assertEquals("this  contains a comment", conf.get("my.comment"));
     assertEquals("this  contains a comment", conf.get("my.comment"));
   }
   }
   
   
+  public void testControlAInValue() throws IOException {
+    out = new BufferedWriter(new FileWriter(CONFIG));
+    startConfig();
+    appendProperty("my.char", "&#1;");
+    appendProperty("my.string", "some&#1;string");
+    endConfig();
+    Path fileResource = new Path(CONFIG);
+    conf.addResource(fileResource);
+    assertEquals("\u0001", conf.get("my.char"));
+    assertEquals("some\u0001string", conf.get("my.string"));
+  }
+
   public void testTrim() throws IOException {
   public void testTrim() throws IOException {
     out=new BufferedWriter(new FileWriter(CONFIG));
     out=new BufferedWriter(new FileWriter(CONFIG));
     startConfig();
     startConfig();
@@ -298,7 +310,7 @@ public class TestConfiguration extends TestCase {
     conf.writeXml(baos);
     conf.writeXml(baos);
     String result = baos.toString();
     String result = baos.toString();
     assertTrue("Result has proper header", result.startsWith(
     assertTrue("Result has proper header", result.startsWith(
-        "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"no\"?><configuration>"));
+        "<?xml version=\"1.1\" encoding=\"UTF-8\" standalone=\"no\"?><configuration>"));
     assertTrue("Result has proper footer", result.endsWith("</configuration>"));
     assertTrue("Result has proper footer", result.endsWith("</configuration>"));
   }
   }
   
   

+ 147 - 74
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalDirAllocator.java

@@ -20,40 +20,48 @@ package org.apache.hadoop.fs;
 import java.io.File;
 import java.io.File;
 import java.io.FileNotFoundException;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.IOException;
+import java.util.Arrays;
+import java.util.Collection;
 
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.Shell;
 
 
-import junit.framework.TestCase;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameters;
+import org.junit.Test;
+
+import static org.junit.Assert.*;
 
 
 /** This test LocalDirAllocator works correctly;
 /** This test LocalDirAllocator works correctly;
- * Every test case uses different buffer dirs to 
+ * Every test case uses different buffer dirs to
  * enforce the AllocatorPerContext initialization.
  * enforce the AllocatorPerContext initialization.
  * This test does not run on Cygwin because under Cygwin
  * This test does not run on Cygwin because under Cygwin
  * a directory can be created in a read-only directory
  * a directory can be created in a read-only directory
  * which breaks this test.
  * which breaks this test.
- */ 
-public class TestLocalDirAllocator extends TestCase {
+ */
+@RunWith(Parameterized.class)
+public class TestLocalDirAllocator {
   final static private Configuration conf = new Configuration();
   final static private Configuration conf = new Configuration();
   final static private String BUFFER_DIR_ROOT = "build/test/temp";
   final static private String BUFFER_DIR_ROOT = "build/test/temp";
+  final static private String ABSOLUTE_DIR_ROOT;
+  final static private String QUALIFIED_DIR_ROOT;
   final static private Path BUFFER_PATH_ROOT = new Path(BUFFER_DIR_ROOT);
   final static private Path BUFFER_PATH_ROOT = new Path(BUFFER_DIR_ROOT);
   final static private File BUFFER_ROOT = new File(BUFFER_DIR_ROOT);
   final static private File BUFFER_ROOT = new File(BUFFER_DIR_ROOT);
-  final static private String BUFFER_DIR[] = new String[] {
-    BUFFER_DIR_ROOT+"/tmp0",  BUFFER_DIR_ROOT+"/tmp1", BUFFER_DIR_ROOT+"/tmp2",
-    BUFFER_DIR_ROOT+"/tmp3", BUFFER_DIR_ROOT+"/tmp4", BUFFER_DIR_ROOT+"/tmp5",
-    BUFFER_DIR_ROOT+"/tmp6"};
-  final static private Path BUFFER_PATH[] = new Path[] {
-    new Path(BUFFER_DIR[0]), new Path(BUFFER_DIR[1]), new Path(BUFFER_DIR[2]),
-    new Path(BUFFER_DIR[3]), new Path(BUFFER_DIR[4]), new Path(BUFFER_DIR[5]),
-    new Path(BUFFER_DIR[6])};
-  final static private String CONTEXT = "dfs.client.buffer.dir";
+  final static private String CONTEXT = "fs.client.buffer.dir";
   final static private String FILENAME = "block";
   final static private String FILENAME = "block";
-  final static private LocalDirAllocator dirAllocator = 
+  final static private LocalDirAllocator dirAllocator =
     new LocalDirAllocator(CONTEXT);
     new LocalDirAllocator(CONTEXT);
   static LocalFileSystem localFs;
   static LocalFileSystem localFs;
   final static private boolean isWindows =
   final static private boolean isWindows =
     System.getProperty("os.name").startsWith("Windows");
     System.getProperty("os.name").startsWith("Windows");
   final static int SMALL_FILE_SIZE = 100;
   final static int SMALL_FILE_SIZE = 100;
+  final static private String RELATIVE = "/RELATIVE";
+  final static private String ABSOLUTE = "/ABSOLUTE";
+  final static private String QUALIFIED = "/QUALIFIED";
+  final private String ROOT;
+  final private String PREFIX;
+
   static {
   static {
     try {
     try {
       localFs = FileSystem.getLocal(conf);
       localFs = FileSystem.getLocal(conf);
@@ -63,170 +71,214 @@ public class TestLocalDirAllocator extends TestCase {
       e.printStackTrace();
       e.printStackTrace();
       System.exit(-1);
       System.exit(-1);
     }
     }
+
+    ABSOLUTE_DIR_ROOT = new Path(localFs.getWorkingDirectory(),
+        BUFFER_DIR_ROOT).toUri().getPath();
+    QUALIFIED_DIR_ROOT = new Path(localFs.getWorkingDirectory(),
+        BUFFER_DIR_ROOT).toUri().toString();
+  }
+
+  public TestLocalDirAllocator(String root, String prefix) {
+    ROOT = root;
+    PREFIX = prefix;
+  }
+
+  @Parameters
+  public static Collection<Object[]> params() {
+    Object [][] data = new Object[][] {
+      { BUFFER_DIR_ROOT, RELATIVE },
+      { ABSOLUTE_DIR_ROOT, ABSOLUTE },
+      { QUALIFIED_DIR_ROOT, QUALIFIED }
+    };
+
+    return Arrays.asList(data);
   }
   }
 
 
   private static void rmBufferDirs() throws IOException {
   private static void rmBufferDirs() throws IOException {
     assertTrue(!localFs.exists(BUFFER_PATH_ROOT) ||
     assertTrue(!localFs.exists(BUFFER_PATH_ROOT) ||
         localFs.delete(BUFFER_PATH_ROOT, true));
         localFs.delete(BUFFER_PATH_ROOT, true));
   }
   }
-  
-  private void validateTempDirCreation(int i) throws IOException {
+
+  private static void validateTempDirCreation(String dir) throws IOException {
     File result = createTempFile(SMALL_FILE_SIZE);
     File result = createTempFile(SMALL_FILE_SIZE);
-    assertTrue("Checking for " + BUFFER_DIR[i] + " in " + result + " - FAILED!", 
-        result.getPath().startsWith(new File(BUFFER_DIR[i], FILENAME).getPath()));
+    assertTrue("Checking for " + dir + " in " + result + " - FAILED!",
+        result.getPath().startsWith(new Path(dir, FILENAME).toUri().getPath()));
   }
   }
-  
-  private File createTempFile() throws IOException {
-    File result = dirAllocator.createTmpFileForWrite(FILENAME, -1, conf);
-    result.delete();
-    return result;
+
+  private static File createTempFile() throws IOException {
+    return createTempFile(-1);
   }
   }
-  
-  private File createTempFile(long size) throws IOException {
+
+  private static File createTempFile(long size) throws IOException {
     File result = dirAllocator.createTmpFileForWrite(FILENAME, size, conf);
     File result = dirAllocator.createTmpFileForWrite(FILENAME, size, conf);
     result.delete();
     result.delete();
     return result;
     return result;
   }
   }
-  
-  /** Two buffer dirs. The first dir does not exist & is on a read-only disk; 
+
+  private String buildBufferDir(String dir, int i) {
+    return dir + PREFIX + i;
+  }
+
+  /** Two buffer dirs. The first dir does not exist & is on a read-only disk;
    * The second dir exists & is RW
    * The second dir exists & is RW
    * @throws Exception
    * @throws Exception
    */
    */
+  @Test
   public void test0() throws Exception {
   public void test0() throws Exception {
     if (isWindows) return;
     if (isWindows) return;
+    String dir0 = buildBufferDir(ROOT, 0);
+    String dir1 = buildBufferDir(ROOT, 1);
     try {
     try {
-      conf.set(CONTEXT, BUFFER_DIR[0]+","+BUFFER_DIR[1]);
-      assertTrue(localFs.mkdirs(BUFFER_PATH[1]));
+      conf.set(CONTEXT, dir0 + "," + dir1);
+      assertTrue(localFs.mkdirs(new Path(dir1)));
       BUFFER_ROOT.setReadOnly();
       BUFFER_ROOT.setReadOnly();
-      validateTempDirCreation(1);
-      validateTempDirCreation(1);
+      validateTempDirCreation(dir1);
+      validateTempDirCreation(dir1);
     } finally {
     } finally {
       Shell.execCommand(new String[]{"chmod", "u+w", BUFFER_DIR_ROOT});
       Shell.execCommand(new String[]{"chmod", "u+w", BUFFER_DIR_ROOT});
       rmBufferDirs();
       rmBufferDirs();
     }
     }
   }
   }
-    
-  /** Two buffer dirs. The first dir exists & is on a read-only disk; 
+
+  /** Two buffer dirs. The first dir exists & is on a read-only disk;
    * The second dir exists & is RW
    * The second dir exists & is RW
    * @throws Exception
    * @throws Exception
    */
    */
+  @Test
   public void test1() throws Exception {
   public void test1() throws Exception {
     if (isWindows) return;
     if (isWindows) return;
+    String dir1 = buildBufferDir(ROOT, 1);
+    String dir2 = buildBufferDir(ROOT, 2);
     try {
     try {
-      conf.set(CONTEXT, BUFFER_DIR[1]+","+BUFFER_DIR[2]);
-      assertTrue(localFs.mkdirs(BUFFER_PATH[2]));
+      conf.set(CONTEXT, dir1 + "," + dir2);
+      assertTrue(localFs.mkdirs(new Path(dir2)));
       BUFFER_ROOT.setReadOnly();
       BUFFER_ROOT.setReadOnly();
-      validateTempDirCreation(2);
-      validateTempDirCreation(2);
+      validateTempDirCreation(dir2);
+      validateTempDirCreation(dir2);
     } finally {
     } finally {
       Shell.execCommand(new String[]{"chmod", "u+w", BUFFER_DIR_ROOT});
       Shell.execCommand(new String[]{"chmod", "u+w", BUFFER_DIR_ROOT});
       rmBufferDirs();
       rmBufferDirs();
     }
     }
   }
   }
   /** Two buffer dirs. Both do not exist but on a RW disk.
   /** Two buffer dirs. Both do not exist but on a RW disk.
-   * Check if tmp dirs are allocated in a round-robin 
+   * Check if tmp dirs are allocated in a round-robin
    */
    */
+  @Test
   public void test2() throws Exception {
   public void test2() throws Exception {
     if (isWindows) return;
     if (isWindows) return;
+    String dir2 = buildBufferDir(ROOT, 2);
+    String dir3 = buildBufferDir(ROOT, 3);
     try {
     try {
-      conf.set(CONTEXT, BUFFER_DIR[2]+","+BUFFER_DIR[3]);
+      conf.set(CONTEXT, dir2 + "," + dir3);
 
 
       // create the first file, and then figure the round-robin sequence
       // create the first file, and then figure the round-robin sequence
       createTempFile(SMALL_FILE_SIZE);
       createTempFile(SMALL_FILE_SIZE);
       int firstDirIdx = (dirAllocator.getCurrentDirectoryIndex() == 0) ? 2 : 3;
       int firstDirIdx = (dirAllocator.getCurrentDirectoryIndex() == 0) ? 2 : 3;
       int secondDirIdx = (firstDirIdx == 2) ? 3 : 2;
       int secondDirIdx = (firstDirIdx == 2) ? 3 : 2;
-      
+
       // check if tmp dirs are allocated in a round-robin manner
       // check if tmp dirs are allocated in a round-robin manner
-      validateTempDirCreation(firstDirIdx);
-      validateTempDirCreation(secondDirIdx);
-      validateTempDirCreation(firstDirIdx);
+      validateTempDirCreation(buildBufferDir(ROOT, firstDirIdx));
+      validateTempDirCreation(buildBufferDir(ROOT, secondDirIdx));
+      validateTempDirCreation(buildBufferDir(ROOT, firstDirIdx));
     } finally {
     } finally {
       rmBufferDirs();
       rmBufferDirs();
     }
     }
   }
   }
 
 
-  /** Two buffer dirs. Both exists and on a R/W disk. 
+  /** Two buffer dirs. Both exists and on a R/W disk.
    * Later disk1 becomes read-only.
    * Later disk1 becomes read-only.
    * @throws Exception
    * @throws Exception
    */
    */
+  @Test
   public void test3() throws Exception {
   public void test3() throws Exception {
     if (isWindows) return;
     if (isWindows) return;
+    String dir3 = buildBufferDir(ROOT, 3);
+    String dir4 = buildBufferDir(ROOT, 4);
     try {
     try {
-      conf.set(CONTEXT, BUFFER_DIR[3]+","+BUFFER_DIR[4]);
-      assertTrue(localFs.mkdirs(BUFFER_PATH[3]));
-      assertTrue(localFs.mkdirs(BUFFER_PATH[4]));
-      
-      // create the first file with size, and then figure the round-robin sequence
+      conf.set(CONTEXT, dir3 + "," + dir4);
+      assertTrue(localFs.mkdirs(new Path(dir3)));
+      assertTrue(localFs.mkdirs(new Path(dir4)));
+
+      // Create the first small file
       createTempFile(SMALL_FILE_SIZE);
       createTempFile(SMALL_FILE_SIZE);
 
 
+      // Determine the round-robin sequence
       int nextDirIdx = (dirAllocator.getCurrentDirectoryIndex() == 0) ? 3 : 4;
       int nextDirIdx = (dirAllocator.getCurrentDirectoryIndex() == 0) ? 3 : 4;
-      validateTempDirCreation(nextDirIdx);
+      validateTempDirCreation(buildBufferDir(ROOT, nextDirIdx));
 
 
       // change buffer directory 2 to be read only
       // change buffer directory 2 to be read only
-      new File(BUFFER_DIR[4]).setReadOnly();
-      validateTempDirCreation(3);
-      validateTempDirCreation(3);
+      new File(new Path(dir4).toUri().getPath()).setReadOnly();
+      validateTempDirCreation(dir3);
+      validateTempDirCreation(dir3);
     } finally {
     } finally {
       rmBufferDirs();
       rmBufferDirs();
     }
     }
   }
   }
-  
+
   /**
   /**
    * Two buffer dirs, on read-write disk.
    * Two buffer dirs, on read-write disk.
-   * 
+   *
    * Try to create a whole bunch of files.
    * Try to create a whole bunch of files.
    *  Verify that they do indeed all get created where they should.
    *  Verify that they do indeed all get created where they should.
-   *  
+   *
    *  Would ideally check statistical properties of distribution, but
    *  Would ideally check statistical properties of distribution, but
    *  we don't have the nerve to risk false-positives here.
    *  we don't have the nerve to risk false-positives here.
-   * 
+   *
    * @throws Exception
    * @throws Exception
    */
    */
   static final int TRIALS = 100;
   static final int TRIALS = 100;
+  @Test
   public void test4() throws Exception {
   public void test4() throws Exception {
     if (isWindows) return;
     if (isWindows) return;
+    String dir5 = buildBufferDir(ROOT, 5);
+    String dir6 = buildBufferDir(ROOT, 6);
     try {
     try {
 
 
-      conf.set(CONTEXT, BUFFER_DIR[5]+","+BUFFER_DIR[6]);
-      assertTrue(localFs.mkdirs(BUFFER_PATH[5]));
-      assertTrue(localFs.mkdirs(BUFFER_PATH[6]));
-        
+      conf.set(CONTEXT, dir5 + "," + dir6);
+      assertTrue(localFs.mkdirs(new Path(dir5)));
+      assertTrue(localFs.mkdirs(new Path(dir6)));
+
       int inDir5=0, inDir6=0;
       int inDir5=0, inDir6=0;
       for(int i = 0; i < TRIALS; ++i) {
       for(int i = 0; i < TRIALS; ++i) {
         File result = createTempFile();
         File result = createTempFile();
-        if(result.getPath().startsWith(new File(BUFFER_DIR[5], FILENAME).getPath())) {
+        if(result.getPath().startsWith(
+              new Path(dir5, FILENAME).toUri().getPath())) {
           inDir5++;
           inDir5++;
-        } else  if(result.getPath().startsWith(new File(BUFFER_DIR[6], FILENAME).getPath())) {
+        } else if(result.getPath().startsWith(
+              new Path(dir6, FILENAME).toUri().getPath())) {
           inDir6++;
           inDir6++;
         }
         }
         result.delete();
         result.delete();
       }
       }
-      
-      assertTrue( inDir5 + inDir6 == TRIALS);
-        
+
+      assertTrue(inDir5 + inDir6 == TRIALS);
+
     } finally {
     } finally {
       rmBufferDirs();
       rmBufferDirs();
     }
     }
   }
   }
-  
-  /** Two buffer dirs. The first dir does not exist & is on a read-only disk; 
+
+  /** Two buffer dirs. The first dir does not exist & is on a read-only disk;
    * The second dir exists & is RW
    * The second dir exists & is RW
    * getLocalPathForWrite with checkAccess set to false should create a parent
    * getLocalPathForWrite with checkAccess set to false should create a parent
    * directory. With checkAccess true, the directory should not be created.
    * directory. With checkAccess true, the directory should not be created.
    * @throws Exception
    * @throws Exception
    */
    */
+  @Test
   public void testLocalPathForWriteDirCreation() throws IOException {
   public void testLocalPathForWriteDirCreation() throws IOException {
+    String dir0 = buildBufferDir(ROOT, 0);
+    String dir1 = buildBufferDir(ROOT, 1);
     try {
     try {
-      conf.set(CONTEXT, BUFFER_DIR[0] + "," + BUFFER_DIR[1]);
-      assertTrue(localFs.mkdirs(BUFFER_PATH[1]));
+      conf.set(CONTEXT, dir0 + "," + dir1);
+      assertTrue(localFs.mkdirs(new Path(dir1)));
       BUFFER_ROOT.setReadOnly();
       BUFFER_ROOT.setReadOnly();
       Path p1 =
       Path p1 =
-          dirAllocator.getLocalPathForWrite("p1/x", SMALL_FILE_SIZE, conf);
+        dirAllocator.getLocalPathForWrite("p1/x", SMALL_FILE_SIZE, conf);
       assertTrue(localFs.getFileStatus(p1.getParent()).isDirectory());
       assertTrue(localFs.getFileStatus(p1.getParent()).isDirectory());
 
 
       Path p2 =
       Path p2 =
-          dirAllocator.getLocalPathForWrite("p2/x", SMALL_FILE_SIZE, conf,
-              false);
+        dirAllocator.getLocalPathForWrite("p2/x", SMALL_FILE_SIZE, conf,
+            false);
       try {
       try {
         localFs.getFileStatus(p2.getParent());
         localFs.getFileStatus(p2.getParent());
       } catch (Exception e) {
       } catch (Exception e) {
@@ -237,5 +289,26 @@ public class TestLocalDirAllocator extends TestCase {
       rmBufferDirs();
       rmBufferDirs();
     }
     }
   }
   }
-  
+
+  /** Test no side effect files are left over. After creating a temp
+   * temp file, remove both the temp file and its parent. Verify that
+   * no files or directories are left over as can happen when File objects
+   * are mistakenly created from fully qualified path strings.
+   * @throws IOException
+   */
+  @Test
+  public void testNoSideEffects() throws IOException {
+    if (isWindows) return;
+    String dir = buildBufferDir(ROOT, 0);
+    try {
+      conf.set(CONTEXT, dir);
+      File result = dirAllocator.createTmpFileForWrite(FILENAME, -1, conf);
+      assertTrue(result.delete());
+      assertTrue(result.getParentFile().delete());
+      assertFalse(new File(dir).exists());
+    } finally {
+      Shell.execCommand(new String[]{"chmod", "u+w", BUFFER_DIR_ROOT});
+      rmBufferDirs();
+    }
+  }
 }
 }

+ 3 - 2
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java

@@ -486,6 +486,9 @@ public class TestTrash extends TestCase {
     conf.set(FS_TRASH_INTERVAL_KEY, "0.2"); // 12 seconds
     conf.set(FS_TRASH_INTERVAL_KEY, "0.2"); // 12 seconds
     conf.setClass("fs.file.impl", TestLFS.class, FileSystem.class);
     conf.setClass("fs.file.impl", TestLFS.class, FileSystem.class);
     conf.set(FS_TRASH_CHECKPOINT_INTERVAL_KEY, "0.1"); // 6 seconds
     conf.set(FS_TRASH_CHECKPOINT_INTERVAL_KEY, "0.1"); // 6 seconds
+    FileSystem fs = FileSystem.getLocal(conf);
+    conf.set("fs.default.name", fs.getUri().toString());
+    
     Trash trash = new Trash(conf);
     Trash trash = new Trash(conf);
 
 
     // Start Emptier in background
     // Start Emptier in background
@@ -493,8 +496,6 @@ public class TestTrash extends TestCase {
     Thread emptierThread = new Thread(emptier);
     Thread emptierThread = new Thread(emptier);
     emptierThread.start();
     emptierThread.start();
 
 
-    FileSystem fs = FileSystem.getLocal(conf);
-    conf.set("fs.defaultFS", fs.getUri().toString());
     FsShell shell = new FsShell();
     FsShell shell = new FsShell();
     shell.setConf(conf);
     shell.setConf(conf);
     shell.init();
     shell.init();

+ 27 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/HttpServerFunctionalTest.java

@@ -70,6 +70,21 @@ public class HttpServerFunctionalTest extends Assert {
     return createServer(TEST, conf);
     return createServer(TEST, conf);
   }
   }
 
 
+  /**
+   * Create but do not start the test webapp server. The test webapp dir is
+   * prepared/checked in advance.
+   * @param conf the server configuration to use
+   * @return the server instance
+   *
+   * @throws IOException if a problem occurs
+   * @throws AssertionError if a condition was not met
+   */
+  public static HttpServer createTestServer(Configuration conf, 
+      String[] pathSpecs) throws IOException {
+    prepareTestWebapp();
+    return createServer(TEST, conf, pathSpecs);
+  }
+
   /**
   /**
    * Prepare the test webapp by creating the directory from the test properties
    * Prepare the test webapp by creating the directory from the test properties
    * fail if the directory cannot be created.
    * fail if the directory cannot be created.
@@ -104,6 +119,18 @@ public class HttpServerFunctionalTest extends Assert {
       throws IOException {
       throws IOException {
     return new HttpServer(webapp, "0.0.0.0", 0, true, conf);
     return new HttpServer(webapp, "0.0.0.0", 0, true, conf);
   }
   }
+  /**
+   * Create an HttpServer instance for the given webapp
+   * @param webapp the webapp to work with
+   * @param conf the configuration to use for the server
+   * @param pathSpecs the paths specifications the server will service
+   * @return the server
+   * @throws IOException if it could not be created
+   */
+  public static HttpServer createServer(String webapp, Configuration conf,
+      String[] pathSpecs) throws IOException {
+    return new HttpServer(webapp, "0.0.0.0", 0, true, conf, pathSpecs);
+  }
 
 
   /**
   /**
    * Create and start a server with the test webapp
    * Create and start a server with the test webapp

+ 145 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestPathFilter.java

@@ -0,0 +1,145 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.http;
+
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.net.URL;
+import java.net.URLConnection;
+import java.util.Set;
+import java.util.TreeSet;
+
+import javax.servlet.Filter;
+import javax.servlet.FilterChain;
+import javax.servlet.FilterConfig;
+import javax.servlet.ServletException;
+import javax.servlet.ServletRequest;
+import javax.servlet.ServletResponse;
+import javax.servlet.http.HttpServletRequest;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.junit.Test;
+
+public class TestPathFilter extends HttpServerFunctionalTest {
+  static final Log LOG = LogFactory.getLog(HttpServer.class);
+  static final Set<String> RECORDS = new TreeSet<String>(); 
+
+  /** A very simple filter that records accessed uri's */
+  static public class RecordingFilter implements Filter {
+    private FilterConfig filterConfig = null;
+
+    public void init(FilterConfig filterConfig) {
+      this.filterConfig = filterConfig;
+    }
+
+    public void destroy() {
+      this.filterConfig = null;
+    }
+
+    public void doFilter(ServletRequest request, ServletResponse response,
+        FilterChain chain) throws IOException, ServletException {
+      if (filterConfig == null)
+         return;
+
+      String uri = ((HttpServletRequest)request).getRequestURI();
+      LOG.info("filtering " + uri);
+      RECORDS.add(uri);
+      chain.doFilter(request, response);
+    }
+
+    /** Configuration for RecordingFilter */
+    static public class Initializer extends FilterInitializer {
+      public Initializer() {}
+
+      public void initFilter(FilterContainer container, Configuration conf) {
+        container.addFilter("recording", RecordingFilter.class.getName(), null);
+      }
+    }
+  }
+  
+  
+  /** access a url, ignoring some IOException such as the page does not exist */
+  static void access(String urlstring) throws IOException {
+    LOG.warn("access " + urlstring);
+    URL url = new URL(urlstring);
+    
+    URLConnection connection = url.openConnection();
+    connection.connect();
+    
+    try {
+      BufferedReader in = new BufferedReader(new InputStreamReader(
+          connection.getInputStream()));
+      try {
+        for(; in.readLine() != null; );
+      } finally {
+        in.close();
+      }
+    } catch(IOException ioe) {
+      LOG.warn("urlstring=" + urlstring, ioe);
+    }
+  }
+
+  @Test
+  public void testPathSpecFilters() throws Exception {
+    Configuration conf = new Configuration();
+    
+    //start a http server with CountingFilter
+    conf.set(HttpServer.FILTER_INITIALIZER_PROPERTY,
+        RecordingFilter.Initializer.class.getName());
+    String[] pathSpecs = { "/path", "/path/*" };
+    HttpServer http = createTestServer(conf, pathSpecs);
+    http.start();
+
+    final String baseURL = "/path";
+    final String baseSlashURL = "/path/";
+    final String addedURL = "/path/nodes";
+    final String addedSlashURL = "/path/nodes/";
+    final String longURL = "/path/nodes/foo/job";
+    final String rootURL = "/";
+    final String allURL = "/*";
+
+    final String[] filteredUrls = {baseURL, baseSlashURL, addedURL, 
+        addedSlashURL, longURL};
+    final String[] notFilteredUrls = {rootURL, allURL};
+
+    // access the urls and verify our paths specs got added to the 
+    // filters
+    final String prefix = "http://localhost:" + http.getPort();
+    try {
+      for(int i = 0; i < filteredUrls.length; i++) {
+        access(prefix + filteredUrls[i]);
+      }
+      for(int i = 0; i < notFilteredUrls.length; i++) {
+        access(prefix + notFilteredUrls[i]);
+      }
+    } finally {
+      http.stop();
+    }
+
+    LOG.info("RECORDS = " + RECORDS);
+    
+    //verify records
+    for(int i = 0; i < filteredUrls.length; i++) {
+      assertTrue(RECORDS.remove(filteredUrls[i]));
+    }
+    assertTrue(RECORDS.isEmpty());
+  }
+}

+ 32 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java

@@ -18,13 +18,17 @@
 package org.apache.hadoop.net;
 package org.apache.hadoop.net;
 
 
 import org.junit.Test;
 import org.junit.Test;
+
 import static org.junit.Assert.*;
 import static org.junit.Assert.*;
 
 
+import java.net.InetAddress;
+import java.net.NetworkInterface;
 import java.net.Socket;
 import java.net.Socket;
 import java.net.ConnectException;
 import java.net.ConnectException;
 import java.net.SocketException;
 import java.net.SocketException;
 import java.net.InetSocketAddress;
 import java.net.InetSocketAddress;
 import java.net.UnknownHostException;
 import java.net.UnknownHostException;
+import java.util.Enumeration;
 
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 
 
@@ -88,4 +92,32 @@ public class TestNetUtils {
       fail("NetUtils.verifyHostnames threw unexpected UnknownHostException");
       fail("NetUtils.verifyHostnames threw unexpected UnknownHostException");
     }
     }
   }
   }
+  
+  /** 
+   * Test for {@link NetUtils#isLocalAddress(java.net.InetAddress)}
+   */
+  @Test
+  public void testIsLocalAddress() throws Exception {
+    // Test - local host is local address
+    assertTrue(NetUtils.isLocalAddress(InetAddress.getLocalHost()));
+    
+    // Test - all addresses bound network interface is local address
+    Enumeration<NetworkInterface> interfaces = NetworkInterface
+        .getNetworkInterfaces();
+    if (interfaces != null) { // Iterate through all network interfaces
+      while (interfaces.hasMoreElements()) {
+        NetworkInterface i = interfaces.nextElement();
+        Enumeration<InetAddress> addrs = i.getInetAddresses();
+        if (addrs == null) {
+          continue;
+        }
+        // Iterate through all the addresses of a network interface
+        while (addrs.hasMoreElements()) {
+          InetAddress addr = addrs.nextElement();
+          assertTrue(NetUtils.isLocalAddress(addr));
+        }
+      }
+    }
+    assertFalse(NetUtils.isLocalAddress(InetAddress.getByName("8.8.8.8")));
+  }
 }
 }

+ 15 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestAuthenticationFilter.java

@@ -25,14 +25,28 @@ import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
 import org.mockito.stubbing.Answer;
 
 
+import java.io.File;
+import java.io.FileWriter;
+import java.io.Writer;
 import java.util.Map;
 import java.util.Map;
 
 
 public class TestAuthenticationFilter extends TestCase {
 public class TestAuthenticationFilter extends TestCase {
 
 
   @SuppressWarnings("unchecked")
   @SuppressWarnings("unchecked")
-  public void testConfiguration() {
+  public void testConfiguration() throws Exception {
     Configuration conf = new Configuration();
     Configuration conf = new Configuration();
     conf.set("hadoop.http.authentication.foo", "bar");
     conf.set("hadoop.http.authentication.foo", "bar");
+    
+    File testDir = new File(System.getProperty("test.build.data", 
+                                               "target/test-dir"));
+    testDir.mkdirs();
+    File secretFile = new File(testDir, "http-secret.txt");
+    Writer writer = new FileWriter(new File(testDir, "http-secret.txt"));
+    writer.write("hadoop");
+    writer.close();
+    conf.set(AuthenticationFilterInitializer.PREFIX + 
+             AuthenticationFilterInitializer.SIGNATURE_SECRET_FILE, 
+             secretFile.getAbsolutePath());
 
 
     FilterContainer container = Mockito.mock(FilterContainer.class);
     FilterContainer container = Mockito.mock(FilterContainer.class);
     Mockito.doAnswer(
     Mockito.doAnswer(

+ 36 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -16,6 +16,9 @@ Trunk (unreleased changes)
     HDFS-2318. Provide authentication to webhdfs using SPNEGO and delegation
     HDFS-2318. Provide authentication to webhdfs using SPNEGO and delegation
     tokens.  (szetszwo)
     tokens.  (szetszwo)
 
 
+    HDFS-2340. Support getFileBlockLocations and getDelegationToken in webhdfs.
+    (szetszwo)
+
   IMPROVEMENTS
   IMPROVEMENTS
 
 
     HADOOP-7524 Change RPC to allow multiple protocols including multuple versions of the same protocol (sanjay Radia)
     HADOOP-7524 Change RPC to allow multiple protocols including multuple versions of the same protocol (sanjay Radia)
@@ -35,6 +38,18 @@ Trunk (unreleased changes)
     not use ArrayWritable for writing non-array items.  (Uma Maheswara Rao G
     not use ArrayWritable for writing non-array items.  (Uma Maheswara Rao G
     via szetszwo)
     via szetszwo)
 
 
+    HDFS-2351 Change Namenode and Datanode to register each of their protocols
+    seperately. (Sanjay Radia)
+
+    HDFS-2356.  Support case insensitive query parameter names in webhdfs.
+    (szetszwo)
+
+    HDFS-2368.  Move SPNEGO conf properties from hdfs-default.xml to
+    hdfs-site.xml.  (szetszwo)
+
+    HDFS-2355. Federation: enable using the same configuration file across 
+    all the nodes in the cluster. (suresh)
+
   BUG FIXES
   BUG FIXES
     HDFS-2287. TestParallelRead has a small off-by-one bug. (todd)
     HDFS-2287. TestParallelRead has a small off-by-one bug. (todd)
 
 
@@ -57,6 +72,17 @@ Trunk (unreleased changes)
     IOExceptions of stream closures can mask root exceptions.  (Uma Maheswara
     IOExceptions of stream closures can mask root exceptions.  (Uma Maheswara
     Rao G via szetszwo)
     Rao G via szetszwo)
 
 
+    HDFS-46.   Change default namespace quota of root directory from
+    Integer.MAX_VALUE to Long.MAX_VALUE.  (Uma Maheswara Rao G via szetszwo)
+
+    HDFS-2366. Initialize WebHdfsFileSystem.ugi in object construction.
+    (szetszwo)
+
+    HDFS-2373. Commands using webhdfs and hftp print unnecessary debug 
+    info on the console with security enabled. (Arpit Gupta via suresh)
+
+    HDFS-2361. hftp is broken, fixed username checks in JspHelper. (jitendra)
+
 Release 0.23.0 - Unreleased
 Release 0.23.0 - Unreleased
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES
@@ -739,6 +765,12 @@ Release 0.23.0 - Unreleased
     HDFS-1217.  Change some NameNode methods from public to package private.
     HDFS-1217.  Change some NameNode methods from public to package private.
     (Laxman via szetszwo)
     (Laxman via szetszwo)
 
 
+    HDFS-2332. Add test for HADOOP-7629 (using an immutable FsPermission
+    object as an RPC parameter fails). (todd)
+
+    HDFS-2363. Move datanodes size printing from FSNamesystem.metasave(..)
+    to BlockManager.  (Uma Maheswara Rao G via szetszwo)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
     HDFS-1458. Improve checkpoint performance by avoiding unnecessary image
     HDFS-1458. Improve checkpoint performance by avoiding unnecessary image
@@ -1607,7 +1639,11 @@ Release 0.22.0 - Unreleased
     HDFS-2232. Generalize regular expressions in TestHDFSCLI.
     HDFS-2232. Generalize regular expressions in TestHDFSCLI.
     (Plamen Jeliazkov via shv)
     (Plamen Jeliazkov via shv)
 
 
+    HDFS-2290. Block with corrupt replica is not getting replicated.
+    (Benoy Antony via shv)
+
 Release 0.21.1 - Unreleased
 Release 0.21.1 - Unreleased
+
     HDFS-1466. TestFcHdfsSymlink relies on /tmp/test not existing. (eli)
     HDFS-1466. TestFcHdfsSymlink relies on /tmp/test not existing. (eli)
 
 
     HDFS-874. TestHDFSFileContextMainOperations fails on weirdly 
     HDFS-874. TestHDFSFileContextMainOperations fails on weirdly 

+ 74 - 11
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java

@@ -38,6 +38,7 @@ import java.util.Random;
 import java.util.StringTokenizer;
 import java.util.StringTokenizer;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeUnit;
 
 
+import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.BlockLocation;
@@ -577,17 +578,6 @@ public class DFSUtil {
     }
     }
   }
   }
   
   
-  /**
-   * Returns the configured nameservice Id
-   * 
-   * @param conf
-   *          Configuration object to lookup the nameserviceId
-   * @return nameserviceId string from conf
-   */
-  public static String getNameServiceId(Configuration conf) {
-    return conf.get(DFS_FEDERATION_NAMESERVICE_ID);
-  }
-  
   /** Return used as percentage of capacity */
   /** Return used as percentage of capacity */
   public static float getPercentUsed(long used, long capacity) {
   public static float getPercentUsed(long used, long capacity) {
     return capacity <= 0 ? 100 : ((float)used * 100.0f)/(float)capacity; 
     return capacity <= 0 ? 100 : ((float)used * 100.0f)/(float)capacity; 
@@ -707,4 +697,77 @@ public class DFSUtil {
     // TODO:HA configuration changes pending
     // TODO:HA configuration changes pending
     return false;
     return false;
   }
   }
+  
+  /**
+   * Get name service Id for the {@link NameNode} based on namenode RPC address
+   * matching the local node address.
+   */
+  public static String getNamenodeNameServiceId(Configuration conf) {
+    return getNameServiceId(conf, DFS_NAMENODE_RPC_ADDRESS_KEY);
+  }
+  
+  /**
+   * Get name service Id for the BackupNode based on backup node RPC address
+   * matching the local node address.
+   */
+  public static String getBackupNameServiceId(Configuration conf) {
+    return getNameServiceId(conf, DFS_NAMENODE_BACKUP_ADDRESS_KEY);
+  }
+  
+  /**
+   * Get name service Id for the secondary node based on secondary http address
+   * matching the local node address.
+   */
+  public static String getSecondaryNameServiceId(Configuration conf) {
+    return getNameServiceId(conf, DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY);
+  }
+  
+  /**
+   * Get the nameservice Id by matching the {@code addressKey} with the
+   * the address of the local node. 
+   * 
+   * If {@link DFSConfigKeys#DFS_FEDERATION_NAMESERVICE_ID} is not specifically
+   * configured, this method determines the nameservice Id by matching the local
+   * nodes address with the configured addresses. When a match is found, it
+   * returns the nameservice Id from the corresponding configuration key.
+   * 
+   * @param conf Configuration
+   * @param addressKey configuration key to get the address.
+   * @return name service Id on success, null on failure.
+   * @throws HadoopIllegalArgumentException on error
+   */
+  private static String getNameServiceId(Configuration conf, String addressKey) {
+    String nameserviceId = conf.get(DFS_FEDERATION_NAMESERVICE_ID);
+    if (nameserviceId != null) {
+      return nameserviceId;
+    }
+    
+    Collection<String> ids = getNameServiceIds(conf);
+    if (ids == null || ids.size() == 0) {
+      // Not federation configuration, hence no nameservice Id
+      return null;
+    }
+    
+    // Match the rpc address with that of local address
+    int found = 0;
+    for (String id : ids) {
+      String addr = conf.get(getNameServiceIdKey(addressKey, id));
+      InetSocketAddress s = NetUtils.createSocketAddr(addr);
+      if (NetUtils.isLocalAddress(s.getAddress())) {
+        nameserviceId = id;
+        found++;
+      }
+    }
+    if (found > 1) { // Only one address must match the local address
+      throw new HadoopIllegalArgumentException(
+          "Configuration has multiple RPC addresses that matches "
+              + "the local node's address. Please configure the system with "
+              + "the parameter " + DFS_FEDERATION_NAMESERVICE_ID);
+    }
+    if (found == 0) {
+      throw new HadoopIllegalArgumentException("Configuration address "
+          + addressKey + " is missing in configuration with name service Id");
+    }
+    return nameserviceId;
+  }
 }
 }

+ 20 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java

@@ -115,6 +115,26 @@ public class DatanodeInfo extends DatanodeID implements Node {
     this.location = location;
     this.location = location;
     this.hostName = hostName;
     this.hostName = hostName;
   }
   }
+
+  /** Constructor */
+  public DatanodeInfo(final String name, final String storageID,
+      final int infoPort, final int ipcPort,
+      final long capacity, final long dfsUsed, final long remaining,
+      final long blockPoolUsed, final long lastUpdate, final int xceiverCount,
+      final String networkLocation, final String hostName,
+      final AdminStates adminState) {
+    super(name, storageID, infoPort, ipcPort);
+
+    this.capacity = capacity;
+    this.dfsUsed = dfsUsed;
+    this.remaining = remaining;
+    this.blockPoolUsed = blockPoolUsed;
+    this.lastUpdate = lastUpdate;
+    this.xceiverCount = xceiverCount;
+    this.location = networkLocation;
+    this.hostName = hostName;
+    this.adminState = adminState;
+  }
   
   
   /** The raw capacity. */
   /** The raw capacity. */
   public long getCapacity() { return capacity; }
   public long getCapacity() { return capacity; }

+ 7 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java

@@ -308,6 +308,11 @@ public class BlockManager {
   /** Dump meta data to out. */
   /** Dump meta data to out. */
   public void metaSave(PrintWriter out) {
   public void metaSave(PrintWriter out) {
     assert namesystem.hasWriteLock();
     assert namesystem.hasWriteLock();
+    final List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
+    final List<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
+    datanodeManager.fetchDatanodes(live, dead, false);
+    out.println("Live Datanodes: " + live.size());
+    out.println("Dead Datanodes: " + dead.size());
     //
     //
     // Dump contents of neededReplication
     // Dump contents of neededReplication
     //
     //
@@ -842,7 +847,7 @@ public class BlockManager {
 
 
     // Add this replica to corruptReplicas Map
     // Add this replica to corruptReplicas Map
     corruptReplicas.addToCorruptReplicasMap(storedBlock, node);
     corruptReplicas.addToCorruptReplicasMap(storedBlock, node);
-    if (countNodes(storedBlock).liveReplicas() > inode.getReplication()) {
+    if (countNodes(storedBlock).liveReplicas() >= inode.getReplication()) {
       // the block is over-replicated so invalidate the replicas immediately
       // the block is over-replicated so invalidate the replicas immediately
       invalidateBlock(storedBlock, node);
       invalidateBlock(storedBlock, node);
     } else if (namesystem.isPopulatingReplQueues()) {
     } else if (namesystem.isPopulatingReplQueues()) {
@@ -867,7 +872,7 @@ public class BlockManager {
     // Check how many copies we have of the block. If we have at least one
     // Check how many copies we have of the block. If we have at least one
     // copy on a live node, then we can delete it.
     // copy on a live node, then we can delete it.
     int count = countNodes(blk).liveReplicas();
     int count = countNodes(blk).liveReplicas();
-    if (count > 1) {
+    if (count >= 1) {
       addToInvalidates(blk, dn);
       addToInvalidates(blk, dn);
       removeStoredBlock(blk, node);
       removeStoredBlock(blk, node);
       if(NameNode.stateChangeLog.isDebugEnabled()) {
       if(NameNode.stateChangeLog.isDebugEnabled()) {

+ 16 - 7
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java

@@ -54,11 +54,13 @@ import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeHttpServer;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeHttpServer;
+import org.apache.hadoop.hdfs.web.resources.DelegationParam;
 import org.apache.hadoop.hdfs.web.resources.UserParam;
 import org.apache.hadoop.hdfs.web.resources.UserParam;
 import org.apache.hadoop.http.HtmlQuoting;
 import org.apache.hadoop.http.HtmlQuoting;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.authentication.util.KerberosName;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
 import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
@@ -68,7 +70,7 @@ import org.apache.hadoop.util.VersionInfo;
 public class JspHelper {
 public class JspHelper {
   public static final String CURRENT_CONF = "current.conf";
   public static final String CURRENT_CONF = "current.conf";
   final static public String WEB_UGI_PROPERTY_NAME = DFSConfigKeys.DFS_WEB_UGI_KEY;
   final static public String WEB_UGI_PROPERTY_NAME = DFSConfigKeys.DFS_WEB_UGI_KEY;
-  public static final String DELEGATION_PARAMETER_NAME = "delegation";
+  public static final String DELEGATION_PARAMETER_NAME = DelegationParam.NAME;
   public static final String NAMENODE_ADDRESS = "nnaddr";
   public static final String NAMENODE_ADDRESS = "nnaddr";
   static final String SET_DELEGATION = "&" + DELEGATION_PARAMETER_NAME +
   static final String SET_DELEGATION = "&" + DELEGATION_PARAMETER_NAME +
                                               "=";
                                               "=";
@@ -551,7 +553,8 @@ public class JspHelper {
         DelegationTokenIdentifier id = new DelegationTokenIdentifier();
         DelegationTokenIdentifier id = new DelegationTokenIdentifier();
         id.readFields(in);
         id.readFields(in);
         ugi = id.getUser();
         ugi = id.getUser();
-        checkUsername(ugi.getUserName(), user);
+        checkUsername(ugi.getShortUserName(), usernameFromQuery);
+        checkUsername(ugi.getShortUserName(), user);
         ugi.addToken(token);
         ugi.addToken(token);
         ugi.setAuthenticationMethod(AuthenticationMethod.TOKEN);
         ugi.setAuthenticationMethod(AuthenticationMethod.TOKEN);
       } else {
       } else {
@@ -560,13 +563,11 @@ public class JspHelper {
                                 "authenticated by filter");
                                 "authenticated by filter");
         }
         }
         ugi = UserGroupInformation.createRemoteUser(user);
         ugi = UserGroupInformation.createRemoteUser(user);
+        checkUsername(ugi.getShortUserName(), usernameFromQuery);
         // This is not necessarily true, could have been auth'ed by user-facing
         // This is not necessarily true, could have been auth'ed by user-facing
         // filter
         // filter
         ugi.setAuthenticationMethod(secureAuthMethod);
         ugi.setAuthenticationMethod(secureAuthMethod);
       }
       }
-
-      checkUsername(user, usernameFromQuery);
-
     } else { // Security's not on, pull from url
     } else { // Security's not on, pull from url
       ugi = usernameFromQuery == null?
       ugi = usernameFromQuery == null?
           getDefaultWebUser(conf) // not specified in request
           getDefaultWebUser(conf) // not specified in request
@@ -579,10 +580,18 @@ public class JspHelper {
     return ugi;
     return ugi;
   }
   }
 
 
+  /**
+   * Expected user name should be a short name.
+   */
   private static void checkUsername(final String expected, final String name
   private static void checkUsername(final String expected, final String name
       ) throws IOException {
       ) throws IOException {
-    if (name != null && !name.equals(expected)) {
-      throw new IOException("Usernames not matched: name=" + name
+    if (name == null) {
+      return;
+    }
+    KerberosName u = new KerberosName(name);
+    String shortName = u.getShortName();
+    if (!shortName.equals(expected)) {
+      throw new IOException("Usernames not matched: name=" + shortName
           + " != expected=" + expected);
           + " != expected=" + expected);
     }
     }
   }
   }

+ 6 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java

@@ -425,7 +425,7 @@ public class DataNode extends Configured
   private List<ServicePlugin> plugins;
   private List<ServicePlugin> plugins;
   
   
   // For InterDataNodeProtocol
   // For InterDataNodeProtocol
-  public Server ipcServer;
+  public RPC.Server ipcServer;
 
 
   private SecureResources secureResources = null;
   private SecureResources secureResources = null;
   private AbstractList<File> dataDirs;
   private AbstractList<File> dataDirs;
@@ -575,11 +575,15 @@ public class DataNode extends Configured
   private void initIpcServer(Configuration conf) throws IOException {
   private void initIpcServer(Configuration conf) throws IOException {
     InetSocketAddress ipcAddr = NetUtils.createSocketAddr(
     InetSocketAddress ipcAddr = NetUtils.createSocketAddr(
         conf.get("dfs.datanode.ipc.address"));
         conf.get("dfs.datanode.ipc.address"));
-    ipcServer = RPC.getServer(DataNode.class, this, ipcAddr.getHostName(),
+    
+    // Add all the RPC protocols that the Datanode implements
+    ipcServer = RPC.getServer(ClientDatanodeProtocol.class, this, ipcAddr.getHostName(),
                               ipcAddr.getPort(), 
                               ipcAddr.getPort(), 
                               conf.getInt(DFS_DATANODE_HANDLER_COUNT_KEY, 
                               conf.getInt(DFS_DATANODE_HANDLER_COUNT_KEY, 
                                           DFS_DATANODE_HANDLER_COUNT_DEFAULT), 
                                           DFS_DATANODE_HANDLER_COUNT_DEFAULT), 
                               false, conf, blockPoolTokenSecretManager);
                               false, conf, blockPoolTokenSecretManager);
+    ipcServer.addProtocol(InterDatanodeProtocol.class, this);
+    
     // set service-level authorization security policy
     // set service-level authorization security policy
     if (conf.getBoolean(
     if (conf.getBoolean(
         CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) {
         CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) {

+ 4 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java

@@ -50,6 +50,7 @@ import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream;
 import org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.web.ParamFilter;
 import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
 import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
 import org.apache.hadoop.hdfs.web.resources.BlockSizeParam;
 import org.apache.hadoop.hdfs.web.resources.BlockSizeParam;
 import org.apache.hadoop.hdfs.web.resources.BufferSizeParam;
 import org.apache.hadoop.hdfs.web.resources.BufferSizeParam;
@@ -66,8 +67,11 @@ import org.apache.hadoop.hdfs.web.resources.UriFsPathParam;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 
 
+import com.sun.jersey.spi.container.ResourceFilters;
+
 /** Web-hdfs DataNode implementation. */
 /** Web-hdfs DataNode implementation. */
 @Path("")
 @Path("")
+@ResourceFilters(ParamFilter.class)
 public class DatanodeWebHdfsMethods {
 public class DatanodeWebHdfsMethods {
   public static final Log LOG = LogFactory.getLog(DatanodeWebHdfsMethods.class);
   public static final Log LOG = LogFactory.getLog(DatanodeWebHdfsMethods.class);
 
 

+ 6 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java

@@ -25,6 +25,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
@@ -372,4 +373,9 @@ public class BackupNode extends NameNode {
       throw new UnsupportedActionException(msg);
       throw new UnsupportedActionException(msg);
     }
     }
   }
   }
+  
+  @Override
+  protected String getNameServiceId(Configuration conf) {
+    return DFSUtil.getBackupNameServiceId(conf);
+  }
 }
 }

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java

@@ -120,7 +120,7 @@ public class FSDirectory implements Closeable {
     this.cond = dirLock.writeLock().newCondition();
     this.cond = dirLock.writeLock().newCondition();
     rootDir = new INodeDirectoryWithQuota(INodeDirectory.ROOT_NAME,
     rootDir = new INodeDirectoryWithQuota(INodeDirectory.ROOT_NAME,
         ns.createFsOwnerPermissions(new FsPermission((short)0755)),
         ns.createFsOwnerPermissions(new FsPermission((short)0755)),
-        Integer.MAX_VALUE, UNKNOWN_DISK_SPACE);
+        Long.MAX_VALUE, UNKNOWN_DISK_SPACE);
     this.fsImage = fsImage;
     this.fsImage = fsImage;
     int configuredLimit = conf.getInt(
     int configuredLimit = conf.getInt(
         DFSConfigKeys.DFS_LIST_LIMIT, DFSConfigKeys.DFS_LIST_LIMIT_DEFAULT);
         DFSConfigKeys.DFS_LIST_LIMIT, DFSConfigKeys.DFS_LIST_LIMIT_DEFAULT);

+ 0 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

@@ -564,11 +564,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
       out.println(totalInodes + " files and directories, " + totalBlocks
       out.println(totalInodes + " files and directories, " + totalBlocks
           + " blocks = " + (totalInodes + totalBlocks) + " total");
           + " blocks = " + (totalInodes + totalBlocks) + " total");
 
 
-      final List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
-      final List<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
-      blockManager.getDatanodeManager().fetchDatanodes(live, dead, false);
-      out.println("Live Datanodes: "+live.size());
-      out.println("Dead Datanodes: "+dead.size());
       blockManager.metaSave(out);
       blockManager.metaSave(out);
 
 
       out.flush();
       out.flush();

+ 16 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java

@@ -27,6 +27,7 @@ import java.util.List;
 
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.ha.HealthCheckFailedException;
 import org.apache.hadoop.ha.HealthCheckFailedException;
@@ -380,7 +381,6 @@ public class NameNode {
    * @param conf the configuration
    * @param conf the configuration
    */
    */
   protected void initialize(Configuration conf) throws IOException {
   protected void initialize(Configuration conf) throws IOException {
-    initializeGenericKeys(conf);
     UserGroupInformation.setConfiguration(conf);
     UserGroupInformation.setConfiguration(conf);
     loginAsNameNodeUser(conf);
     loginAsNameNodeUser(conf);
 
 
@@ -513,10 +513,14 @@ public class NameNode {
     this.haEnabled = DFSUtil.isHAEnabled(conf);
     this.haEnabled = DFSUtil.isHAEnabled(conf);
     this.state = !haEnabled ? ACTIVE_STATE : STANDBY_STATE;
     this.state = !haEnabled ? ACTIVE_STATE : STANDBY_STATE;
     try {
     try {
+      initializeGenericKeys(conf, getNameServiceId(conf));
       initialize(conf);
       initialize(conf);
     } catch (IOException e) {
     } catch (IOException e) {
       this.stop();
       this.stop();
       throw e;
       throw e;
+    } catch (HadoopIllegalArgumentException e) {
+      this.stop();
+      throw e;
     }
     }
   }
   }
 
 
@@ -821,16 +825,16 @@ public class NameNode {
    * @param conf
    * @param conf
    *          Configuration object to lookup specific key and to set the value
    *          Configuration object to lookup specific key and to set the value
    *          to the key passed. Note the conf object is modified
    *          to the key passed. Note the conf object is modified
+   * @param nameserviceId name service Id
    * @see DFSUtil#setGenericConf(Configuration, String, String...)
    * @see DFSUtil#setGenericConf(Configuration, String, String...)
    */
    */
-  public static void initializeGenericKeys(Configuration conf) {
-    final String nameserviceId = DFSUtil.getNameServiceId(conf);
+  public static void initializeGenericKeys(Configuration conf, String
+      nameserviceId) {
     if ((nameserviceId == null) || nameserviceId.isEmpty()) {
     if ((nameserviceId == null) || nameserviceId.isEmpty()) {
       return;
       return;
     }
     }
     
     
     DFSUtil.setGenericConf(conf, nameserviceId, NAMESERVICE_SPECIFIC_KEYS);
     DFSUtil.setGenericConf(conf, nameserviceId, NAMESERVICE_SPECIFIC_KEYS);
-    
     if (conf.get(DFS_NAMENODE_RPC_ADDRESS_KEY) != null) {
     if (conf.get(DFS_NAMENODE_RPC_ADDRESS_KEY) != null) {
       URI defaultUri = URI.create(HdfsConstants.HDFS_URI_SCHEME + "://"
       URI defaultUri = URI.create(HdfsConstants.HDFS_URI_SCHEME + "://"
           + conf.get(DFS_NAMENODE_RPC_ADDRESS_KEY));
           + conf.get(DFS_NAMENODE_RPC_ADDRESS_KEY));
@@ -838,6 +842,14 @@ public class NameNode {
     }
     }
   }
   }
     
     
+  /** 
+   * Get the name service Id for the node
+   * @return name service Id or null if federation is not configured
+   */
+  protected String getNameServiceId(Configuration conf) {
+    return DFSUtil.getNamenodeNameServiceId(conf);
+  }
+  
   /**
   /**
    */
    */
   public static void main(String argv[]) throws Exception {
   public static void main(String argv[]) throws Exception {

+ 14 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java

@@ -66,6 +66,7 @@ import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
 import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory;
 import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory;
 import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
 import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
+import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
@@ -145,10 +146,17 @@ class NameNodeRpcServer implements NamenodeProtocols {
       serviceRpcServer = null;
       serviceRpcServer = null;
       serviceRPCAddress = null;
       serviceRPCAddress = null;
     }
     }
-    this.server = RPC.getServer(NamenodeProtocols.class, this,
+    // Add all the RPC protocols that the namenode implements
+    this.server = RPC.getServer(ClientProtocol.class, this,
                                 socAddr.getHostName(), socAddr.getPort(),
                                 socAddr.getHostName(), socAddr.getPort(),
                                 handlerCount, false, conf, 
                                 handlerCount, false, conf, 
                                 namesystem.getDelegationTokenSecretManager());
                                 namesystem.getDelegationTokenSecretManager());
+    this.server.addProtocol(DatanodeProtocol.class, this);
+    this.server.addProtocol(NamenodeProtocol.class, this);
+    this.server.addProtocol(RefreshAuthorizationPolicyProtocol.class, this);
+    this.server.addProtocol(RefreshUserMappingsProtocol.class, this);
+    this.server.addProtocol(GetUserMappingsProtocol.class, this);
+    
 
 
     // set service-level authorization security policy
     // set service-level authorization security policy
     if (serviceAuthEnabled =
     if (serviceAuthEnabled =
@@ -971,8 +979,11 @@ class NameNodeRpcServer implements NamenodeProtocols {
   }
   }
 
 
   private static String getClientMachine() {
   private static String getClientMachine() {
-    String clientMachine = Server.getRemoteAddress();
-    if (clientMachine == null) {
+    String clientMachine = NamenodeWebHdfsMethods.getRemoteAddress();
+    if (clientMachine == null) { //not a web client
+      clientMachine = Server.getRemoteAddress();
+    }
+    if (clientMachine == null) { //not a RPC client
       clientMachine = "";
       clientMachine = "";
     }
     }
     return clientMachine;
     return clientMachine;

+ 8 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java

@@ -38,10 +38,12 @@ import org.apache.commons.cli.ParseException;
 import org.apache.commons.cli.PosixParser;
 import org.apache.commons.cli.PosixParser;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
+
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DFSUtil.ErrorSimulator;
 import org.apache.hadoop.hdfs.DFSUtil.ErrorSimulator;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
@@ -173,12 +175,17 @@ public class SecondaryNameNode implements Runnable {
   public SecondaryNameNode(Configuration conf,
   public SecondaryNameNode(Configuration conf,
       CommandLineOpts commandLineOpts) throws IOException {
       CommandLineOpts commandLineOpts) throws IOException {
     try {
     try {
-      NameNode.initializeGenericKeys(conf);
+      NameNode.initializeGenericKeys(conf,
+          DFSUtil.getSecondaryNameServiceId(conf));
       initialize(conf, commandLineOpts);
       initialize(conf, commandLineOpts);
     } catch(IOException e) {
     } catch(IOException e) {
       shutdown();
       shutdown();
       LOG.fatal("Failed to start secondary namenode. ", e);
       LOG.fatal("Failed to start secondary namenode. ", e);
       throw e;
       throw e;
+    } catch(HadoopIllegalArgumentException e) {
+      shutdown();
+      LOG.fatal("Failed to start secondary namenode. ", e);
+      throw e;
     }
     }
   }
   }
   
   

+ 60 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java

@@ -57,6 +57,7 @@ import org.apache.hadoop.hdfs.server.common.JspHelper;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.hdfs.web.JsonUtil;
 import org.apache.hadoop.hdfs.web.JsonUtil;
+import org.apache.hadoop.hdfs.web.ParamFilter;
 import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
 import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
 import org.apache.hadoop.hdfs.web.resources.AccessTimeParam;
 import org.apache.hadoop.hdfs.web.resources.AccessTimeParam;
 import org.apache.hadoop.hdfs.web.resources.BlockSizeParam;
 import org.apache.hadoop.hdfs.web.resources.BlockSizeParam;
@@ -78,6 +79,7 @@ import org.apache.hadoop.hdfs.web.resources.PostOpParam;
 import org.apache.hadoop.hdfs.web.resources.PutOpParam;
 import org.apache.hadoop.hdfs.web.resources.PutOpParam;
 import org.apache.hadoop.hdfs.web.resources.RecursiveParam;
 import org.apache.hadoop.hdfs.web.resources.RecursiveParam;
 import org.apache.hadoop.hdfs.web.resources.RenameOptionSetParam;
 import org.apache.hadoop.hdfs.web.resources.RenameOptionSetParam;
+import org.apache.hadoop.hdfs.web.resources.RenewerParam;
 import org.apache.hadoop.hdfs.web.resources.ReplicationParam;
 import org.apache.hadoop.hdfs.web.resources.ReplicationParam;
 import org.apache.hadoop.hdfs.web.resources.UriFsPathParam;
 import org.apache.hadoop.hdfs.web.resources.UriFsPathParam;
 import org.apache.hadoop.hdfs.web.resources.UserParam;
 import org.apache.hadoop.hdfs.web.resources.UserParam;
@@ -89,10 +91,20 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.security.token.TokenIdentifier;
 
 
+import com.sun.jersey.spi.container.ResourceFilters;
+
 /** Web-hdfs NameNode implementation. */
 /** Web-hdfs NameNode implementation. */
 @Path("")
 @Path("")
+@ResourceFilters(ParamFilter.class)
 public class NamenodeWebHdfsMethods {
 public class NamenodeWebHdfsMethods {
-  private static final Log LOG = LogFactory.getLog(NamenodeWebHdfsMethods.class);
+  public static final Log LOG = LogFactory.getLog(NamenodeWebHdfsMethods.class);
+
+  private static final ThreadLocal<String> REMOTE_ADDRESS = new ThreadLocal<String>(); 
+
+  /** @return the remote client address. */
+  public static String getRemoteAddress() {
+    return REMOTE_ADDRESS.get();
+  }
 
 
   private @Context ServletContext context;
   private @Context ServletContext context;
   private @Context HttpServletRequest request;
   private @Context HttpServletRequest request;
@@ -215,6 +227,8 @@ public class NamenodeWebHdfsMethods {
     return ugi.doAs(new PrivilegedExceptionAction<Response>() {
     return ugi.doAs(new PrivilegedExceptionAction<Response>() {
       @Override
       @Override
       public Response run() throws IOException, URISyntaxException {
       public Response run() throws IOException, URISyntaxException {
+        REMOTE_ADDRESS.set(request.getRemoteAddr());
+        try {
 
 
     final String fullpath = path.getAbsolutePath();
     final String fullpath = path.getAbsolutePath();
     final NameNode namenode = (NameNode)context.getAttribute("name.node");
     final NameNode namenode = (NameNode)context.getAttribute("name.node");
@@ -272,6 +286,10 @@ public class NamenodeWebHdfsMethods {
     default:
     default:
       throw new UnsupportedOperationException(op + " is not supported");
       throw new UnsupportedOperationException(op + " is not supported");
     }
     }
+
+        } finally {
+          REMOTE_ADDRESS.set(null);
+        }
       }
       }
     });
     });
   }
   }
@@ -301,6 +319,8 @@ public class NamenodeWebHdfsMethods {
     return ugi.doAs(new PrivilegedExceptionAction<Response>() {
     return ugi.doAs(new PrivilegedExceptionAction<Response>() {
       @Override
       @Override
       public Response run() throws IOException, URISyntaxException {
       public Response run() throws IOException, URISyntaxException {
+        REMOTE_ADDRESS.set(request.getRemoteAddr());
+        try {
 
 
     final String fullpath = path.getAbsolutePath();
     final String fullpath = path.getAbsolutePath();
     final NameNode namenode = (NameNode)context.getAttribute("name.node");
     final NameNode namenode = (NameNode)context.getAttribute("name.node");
@@ -315,6 +335,10 @@ public class NamenodeWebHdfsMethods {
     default:
     default:
       throw new UnsupportedOperationException(op + " is not supported");
       throw new UnsupportedOperationException(op + " is not supported");
     }
     }
+
+        } finally {
+          REMOTE_ADDRESS.set(null);
+        }
       }
       }
     });
     });
   }
   }
@@ -335,10 +359,12 @@ public class NamenodeWebHdfsMethods {
           final OffsetParam offset,
           final OffsetParam offset,
       @QueryParam(LengthParam.NAME) @DefaultValue(LengthParam.DEFAULT)
       @QueryParam(LengthParam.NAME) @DefaultValue(LengthParam.DEFAULT)
           final LengthParam length,
           final LengthParam length,
+      @QueryParam(RenewerParam.NAME) @DefaultValue(RenewerParam.DEFAULT)
+          final RenewerParam renewer,
       @QueryParam(BufferSizeParam.NAME) @DefaultValue(BufferSizeParam.DEFAULT)
       @QueryParam(BufferSizeParam.NAME) @DefaultValue(BufferSizeParam.DEFAULT)
           final BufferSizeParam bufferSize
           final BufferSizeParam bufferSize
       ) throws IOException, URISyntaxException, InterruptedException {
       ) throws IOException, URISyntaxException, InterruptedException {
-    return get(ugi, delegation, ROOT, op, offset, length, bufferSize);
+    return get(ugi, delegation, ROOT, op, offset, length, renewer, bufferSize);
   }
   }
 
 
   /** Handle HTTP GET request. */
   /** Handle HTTP GET request. */
@@ -356,19 +382,23 @@ public class NamenodeWebHdfsMethods {
           final OffsetParam offset,
           final OffsetParam offset,
       @QueryParam(LengthParam.NAME) @DefaultValue(LengthParam.DEFAULT)
       @QueryParam(LengthParam.NAME) @DefaultValue(LengthParam.DEFAULT)
           final LengthParam length,
           final LengthParam length,
+      @QueryParam(RenewerParam.NAME) @DefaultValue(RenewerParam.DEFAULT)
+          final RenewerParam renewer,
       @QueryParam(BufferSizeParam.NAME) @DefaultValue(BufferSizeParam.DEFAULT)
       @QueryParam(BufferSizeParam.NAME) @DefaultValue(BufferSizeParam.DEFAULT)
           final BufferSizeParam bufferSize
           final BufferSizeParam bufferSize
       ) throws IOException, URISyntaxException, InterruptedException {
       ) throws IOException, URISyntaxException, InterruptedException {
 
 
     if (LOG.isTraceEnabled()) {
     if (LOG.isTraceEnabled()) {
       LOG.trace(op + ": " + path + ", ugi=" + ugi
       LOG.trace(op + ": " + path + ", ugi=" + ugi
-          + Param.toSortedString(", ", offset, length, bufferSize));
+          + Param.toSortedString(", ", offset, length, renewer, bufferSize));
     }
     }
 
 
 
 
     return ugi.doAs(new PrivilegedExceptionAction<Response>() {
     return ugi.doAs(new PrivilegedExceptionAction<Response>() {
       @Override
       @Override
       public Response run() throws IOException, URISyntaxException {
       public Response run() throws IOException, URISyntaxException {
+        REMOTE_ADDRESS.set(request.getRemoteAddr());
+        try {
 
 
     final NameNode namenode = (NameNode)context.getAttribute("name.node");
     final NameNode namenode = (NameNode)context.getAttribute("name.node");
     final String fullpath = path.getAbsolutePath();
     final String fullpath = path.getAbsolutePath();
@@ -381,6 +411,15 @@ public class NamenodeWebHdfsMethods {
           op.getValue(), offset.getValue(), offset, length, bufferSize);
           op.getValue(), offset.getValue(), offset, length, bufferSize);
       return Response.temporaryRedirect(uri).build();
       return Response.temporaryRedirect(uri).build();
     }
     }
+    case GETFILEBLOCKLOCATIONS:
+    {
+      final long offsetValue = offset.getValue();
+      final Long lengthValue = length.getValue();
+      final LocatedBlocks locatedblocks = np.getBlockLocations(fullpath,
+          offsetValue, lengthValue != null? lengthValue: offsetValue + 1);
+      final String js = JsonUtil.toJsonString(locatedblocks);
+      return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
+    }
     case GETFILESTATUS:
     case GETFILESTATUS:
     {
     {
       final HdfsFileStatus status = np.getFileInfo(fullpath);
       final HdfsFileStatus status = np.getFileInfo(fullpath);
@@ -392,9 +431,20 @@ public class NamenodeWebHdfsMethods {
       final StreamingOutput streaming = getListingStream(np, fullpath);
       final StreamingOutput streaming = getListingStream(np, fullpath);
       return Response.ok(streaming).type(MediaType.APPLICATION_JSON).build();
       return Response.ok(streaming).type(MediaType.APPLICATION_JSON).build();
     }
     }
+    case GETDELEGATIONTOKEN:
+    {
+      final Token<? extends TokenIdentifier> token = generateDelegationToken(
+          namenode, ugi, renewer.getValue());
+      final String js = JsonUtil.toJsonString(token);
+      return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
+    }
     default:
     default:
       throw new UnsupportedOperationException(op + " is not supported");
       throw new UnsupportedOperationException(op + " is not supported");
     }    
     }    
+
+        } finally {
+          REMOTE_ADDRESS.set(null);
+        }
       }
       }
     });
     });
   }
   }
@@ -462,6 +512,9 @@ public class NamenodeWebHdfsMethods {
     return ugi.doAs(new PrivilegedExceptionAction<Response>() {
     return ugi.doAs(new PrivilegedExceptionAction<Response>() {
       @Override
       @Override
       public Response run() throws IOException {
       public Response run() throws IOException {
+        REMOTE_ADDRESS.set(request.getRemoteAddr());
+        try {
+
         final NameNode namenode = (NameNode)context.getAttribute("name.node");
         final NameNode namenode = (NameNode)context.getAttribute("name.node");
         final String fullpath = path.getAbsolutePath();
         final String fullpath = path.getAbsolutePath();
 
 
@@ -475,6 +528,10 @@ public class NamenodeWebHdfsMethods {
         default:
         default:
           throw new UnsupportedOperationException(op + " is not supported");
           throw new UnsupportedOperationException(op + " is not supported");
         }
         }
+
+        } finally {
+          REMOTE_ADDRESS.set(null);
+        }
       }
       }
     });
     });
   }
   }

+ 32 - 13
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java

@@ -149,7 +149,9 @@ public class DelegationTokenFetcher {
                 DataInputStream in = new DataInputStream(
                 DataInputStream in = new DataInputStream(
                     new ByteArrayInputStream(token.getIdentifier()));
                     new ByteArrayInputStream(token.getIdentifier()));
                 id.readFields(in);
                 id.readFields(in);
-                System.out.println("Token (" + id + ") for " + token.getService());
+                if(LOG.isDebugEnabled()) {
+                  LOG.debug("Token (" + id + ") for " + token.getService());
+                }
               }
               }
               return null;
               return null;
             }
             }
@@ -160,22 +162,28 @@ public class DelegationTokenFetcher {
                 for (Token<?> token : readTokens(tokenFile, conf)) {
                 for (Token<?> token : readTokens(tokenFile, conf)) {
                   result = renewDelegationToken(webUrl,
                   result = renewDelegationToken(webUrl,
                       (Token<DelegationTokenIdentifier>) token);
                       (Token<DelegationTokenIdentifier>) token);
-                  System.out.println("Renewed token via " + webUrl + " for "
-                      + token.getService() + " until: " + new Date(result));
+                  if(LOG.isDebugEnabled()) {
+                	  LOG.debug("Renewed token via " + webUrl + " for "
+                          + token.getService() + " until: " + new Date(result));
+                  }
                 }
                 }
               } else if (cancel) {
               } else if (cancel) {
                 for (Token<?> token : readTokens(tokenFile, conf)) {
                 for (Token<?> token : readTokens(tokenFile, conf)) {
                   cancelDelegationToken(webUrl,
                   cancelDelegationToken(webUrl,
                       (Token<DelegationTokenIdentifier>) token);
                       (Token<DelegationTokenIdentifier>) token);
-                  System.out.println("Cancelled token via " + webUrl + " for "
-                      + token.getService());
+                  if(LOG.isDebugEnabled()) {
+                    LOG.debug("Cancelled token via " + webUrl + " for "
+                	    + token.getService());
+                  }
                 }
                 }
               } else {
               } else {
                 Credentials creds = getDTfromRemote(webUrl, renewer);
                 Credentials creds = getDTfromRemote(webUrl, renewer);
                 creds.writeTokenStorageFile(tokenFile, conf);
                 creds.writeTokenStorageFile(tokenFile, conf);
                 for (Token<?> token : creds.getAllTokens()) {
                 for (Token<?> token : creds.getAllTokens()) {
-                  System.out.println("Fetched token via " + webUrl + " for "
-                      + token.getService() + " into " + tokenFile);
+                  if(LOG.isDebugEnabled()) {	
+                    LOG.debug("Fetched token via " + webUrl + " for "
+                        + token.getService() + " into " + tokenFile);
+                  }
                 }
                 }
               }
               }
             } else {
             } else {
@@ -184,24 +192,30 @@ public class DelegationTokenFetcher {
                 for (Token<?> token : readTokens(tokenFile, conf)) {
                 for (Token<?> token : readTokens(tokenFile, conf)) {
                   ((DistributedFileSystem) fs)
                   ((DistributedFileSystem) fs)
                       .cancelDelegationToken((Token<DelegationTokenIdentifier>) token);
                       .cancelDelegationToken((Token<DelegationTokenIdentifier>) token);
-                  System.out.println("Cancelled token for "
-                      + token.getService());
+                  if(LOG.isDebugEnabled()) {
+                    LOG.debug("Cancelled token for "
+                        + token.getService());
+                  }
                 }
                 }
               } else if (renew) {
               } else if (renew) {
                 long result;
                 long result;
                 for (Token<?> token : readTokens(tokenFile, conf)) {
                 for (Token<?> token : readTokens(tokenFile, conf)) {
                   result = ((DistributedFileSystem) fs)
                   result = ((DistributedFileSystem) fs)
                       .renewDelegationToken((Token<DelegationTokenIdentifier>) token);
                       .renewDelegationToken((Token<DelegationTokenIdentifier>) token);
-                  System.out.println("Renewed token for " + token.getService()
-                      + " until: " + new Date(result));
+                  if(LOG.isDebugEnabled()) {
+                    LOG.debug("Renewed token for " + token.getService()
+                        + " until: " + new Date(result));
+                  }
                 }
                 }
               } else {
               } else {
                 Token<?> token = fs.getDelegationToken(renewer);
                 Token<?> token = fs.getDelegationToken(renewer);
                 Credentials cred = new Credentials();
                 Credentials cred = new Credentials();
                 cred.addToken(token.getService(), token);
                 cred.addToken(token.getService(), token);
                 cred.writeTokenStorageFile(tokenFile, conf);
                 cred.writeTokenStorageFile(tokenFile, conf);
-                System.out.println("Fetched token for " + token.getService()
-                    + " into " + tokenFile);
+                if(LOG.isDebugEnabled()) {
+                  LOG.debug("Fetched token for " + token.getService()
+                      + " into " + tokenFile);
+                }
               }
               }
             }
             }
             return null;
             return null;
@@ -221,6 +235,11 @@ public class DelegationTokenFetcher {
       } else {
       } else {
         url.append(nnAddr).append(GetDelegationTokenServlet.PATH_SPEC);
         url.append(nnAddr).append(GetDelegationTokenServlet.PATH_SPEC);
       }
       }
+      
+      if(LOG.isDebugEnabled()) {
+        LOG.debug("Retrieving token from: " + url);
+      }
+      
       URL remoteURL = new URL(url.toString());
       URL remoteURL = new URL(url.toString());
       SecurityUtil.fetchServiceTicket(remoteURL);
       SecurityUtil.fetchServiceTicket(remoteURL);
       URLConnection connection = remoteURL.openConnection();
       URLConnection connection = remoteURL.openConnection();

+ 277 - 9
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java

@@ -17,19 +17,31 @@
  */
  */
 package org.apache.hadoop.hdfs.web;
 package org.apache.hadoop.hdfs.web;
 
 
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
 import java.util.Map;
 import java.util.Map;
 import java.util.TreeMap;
 import java.util.TreeMap;
 
 
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
+import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
 import org.mortbay.util.ajax.JSON;
 import org.mortbay.util.ajax.JSON;
 
 
 /** JSON Utilities */
 /** JSON Utilities */
 public class JsonUtil {
 public class JsonUtil {
-  private static final ThreadLocal<Map<String, Object>> jsonMap
-      = new ThreadLocal<Map<String, Object>>() {
+  private static class ThreadLocalMap extends ThreadLocal<Map<String, Object>> {
     @Override
     @Override
     protected Map<String, Object> initialValue() {
     protected Map<String, Object> initialValue() {
       return new TreeMap<String, Object>();
       return new TreeMap<String, Object>();
@@ -41,7 +53,54 @@ public class JsonUtil {
       m.clear();
       m.clear();
       return m;
       return m;
     }
     }
-  };
+  }
+
+  private static final ThreadLocalMap jsonMap = new ThreadLocalMap();
+  private static final ThreadLocalMap tokenMap = new ThreadLocalMap();
+  private static final ThreadLocalMap datanodeInfoMap = new ThreadLocalMap();
+  private static final ThreadLocalMap extendedBlockMap = new ThreadLocalMap();
+  private static final ThreadLocalMap locatedBlockMap = new ThreadLocalMap();
+
+  private static final DatanodeInfo[] EMPTY_DATANODE_INFO_ARRAY = {};
+
+  /** Convert a token object to a Json string. */
+  public static String toJsonString(final Token<? extends TokenIdentifier> token
+      ) throws IOException {
+    if (token == null) {
+      return null;
+    }
+
+    final Map<String, Object> m = tokenMap.get();
+    m.put("urlString", token.encodeToUrlString());
+    return JSON.toString(m);
+  }
+
+  /** Convert a Json map to a Token. */
+  public static Token<? extends TokenIdentifier> toToken(
+      final Map<?, ?> m) throws IOException {
+    if (m == null) {
+      return null;
+    }
+
+    final Token<DelegationTokenIdentifier> token
+        = new Token<DelegationTokenIdentifier>();
+    token.decodeFromUrlString((String)m.get("urlString"));
+    return token;
+  }
+
+  /** Convert a Json map to a Token of DelegationTokenIdentifier. */
+  @SuppressWarnings("unchecked")
+  public static Token<DelegationTokenIdentifier> toDelegationToken(
+      final Map<?, ?> m) throws IOException {
+    return (Token<DelegationTokenIdentifier>)toToken(m);
+  }
+
+  /** Convert a Json map to a Token of BlockTokenIdentifier. */
+  @SuppressWarnings("unchecked")
+  public static Token<BlockTokenIdentifier> toBlockToken(
+      final Map<?, ?> m) throws IOException {
+    return (Token<BlockTokenIdentifier>)toToken(m);
+  }
 
 
   /** Convert an exception object to a Json string. */
   /** Convert an exception object to a Json string. */
   public static String toJsonString(final Exception e) {
   public static String toJsonString(final Exception e) {
@@ -77,11 +136,10 @@ public class JsonUtil {
 
 
   /** Convert a HdfsFileStatus object to a Json string. */
   /** Convert a HdfsFileStatus object to a Json string. */
   public static String toJsonString(final HdfsFileStatus status) {
   public static String toJsonString(final HdfsFileStatus status) {
-    final Map<String, Object> m = jsonMap.get();
     if (status == null) {
     if (status == null) {
-      m.put("isNull", true);
+      return null;
     } else {
     } else {
-      m.put("isNull", false);
+      final Map<String, Object> m = jsonMap.get();
       m.put("localName", status.getLocalName());
       m.put("localName", status.getLocalName());
       m.put("isDir", status.isDir());
       m.put("isDir", status.isDir());
       m.put("isSymlink", status.isSymlink());
       m.put("isSymlink", status.isSymlink());
@@ -97,8 +155,8 @@ public class JsonUtil {
       m.put("modificationTime", status.getModificationTime());
       m.put("modificationTime", status.getModificationTime());
       m.put("blockSize", status.getBlockSize());
       m.put("blockSize", status.getBlockSize());
       m.put("replication", status.getReplication());
       m.put("replication", status.getReplication());
+      return JSON.toString(m);
     }
     }
-    return JSON.toString(m);
   }
   }
 
 
   @SuppressWarnings("unchecked")
   @SuppressWarnings("unchecked")
@@ -106,9 +164,9 @@ public class JsonUtil {
     return (Map<String, Object>) JSON.parse(jsonString);
     return (Map<String, Object>) JSON.parse(jsonString);
   }
   }
 
 
-  /** Convert a Json string to a HdfsFileStatus object. */
+  /** Convert a Json map to a HdfsFileStatus object. */
   public static HdfsFileStatus toFileStatus(final Map<String, Object> m) {
   public static HdfsFileStatus toFileStatus(final Map<String, Object> m) {
-    if ((Boolean)m.get("isNull")) {
+    if (m == null) {
       return null;
       return null;
     }
     }
 
 
@@ -130,4 +188,214 @@ public class JsonUtil {
         permission, owner, group,
         permission, owner, group,
         symlink, DFSUtil.string2Bytes(localName));
         symlink, DFSUtil.string2Bytes(localName));
   }
   }
+
+  /** Convert a LocatedBlock to a Json string. */
+  public static String toJsonString(final ExtendedBlock extendedblock) {
+    if (extendedblock == null) {
+      return null;
+    }
+
+    final Map<String, Object> m = extendedBlockMap.get();
+    m.put("blockPoolId", extendedblock.getBlockPoolId());
+    m.put("blockId", extendedblock.getBlockId());
+    m.put("numBytes", extendedblock.getNumBytes());
+    m.put("generationStamp", extendedblock.getGenerationStamp());
+    return JSON.toString(m);
+  }
+
+  /** Convert a Json map to an ExtendedBlock object. */
+  public static ExtendedBlock toExtendedBlock(final Map<?, ?> m) {
+    if (m == null) {
+      return null;
+    }
+    
+    final String blockPoolId = (String)m.get("blockPoolId");
+    final long blockId = (Long)m.get("blockId");
+    final long numBytes = (Long)m.get("numBytes");
+    final long generationStamp = (Long)m.get("generationStamp");
+    return new ExtendedBlock(blockPoolId, blockId, numBytes, generationStamp);
+  }
+  
+  /** Convert a DatanodeInfo to a Json string. */
+  public static String toJsonString(final DatanodeInfo datanodeinfo) {
+    if (datanodeinfo == null) {
+      return null;
+    }
+
+    final Map<String, Object> m = datanodeInfoMap.get();
+    m.put("name", datanodeinfo.getName());
+    m.put("storageID", datanodeinfo.getStorageID());
+    m.put("infoPort", datanodeinfo.getInfoPort());
+
+    m.put("ipcPort", datanodeinfo.getIpcPort());
+
+    m.put("capacity", datanodeinfo.getCapacity());
+    m.put("dfsUsed", datanodeinfo.getDfsUsed());
+    m.put("remaining", datanodeinfo.getRemaining());
+    m.put("blockPoolUsed", datanodeinfo.getBlockPoolUsed());
+    m.put("lastUpdate", datanodeinfo.getLastUpdate());
+    m.put("xceiverCount", datanodeinfo.getXceiverCount());
+    m.put("networkLocation", datanodeinfo.getNetworkLocation());
+    m.put("hostName", datanodeinfo.getHostName());
+    m.put("adminState", datanodeinfo.getAdminState().name());
+    return JSON.toString(m);
+  }
+
+  /** Convert a Json map to an DatanodeInfo object. */
+  public static DatanodeInfo toDatanodeInfo(final Map<?, ?> m) {
+    if (m == null) {
+      return null;
+    }
+
+    return new DatanodeInfo(
+        (String)m.get("name"),
+        (String)m.get("storageID"),
+        (int)(long)(Long)m.get("infoPort"),
+        (int)(long)(Long)m.get("ipcPort"),
+
+        (Long)m.get("capacity"),
+        (Long)m.get("dfsUsed"),
+        (Long)m.get("remaining"),
+        (Long)m.get("blockPoolUsed"),
+        (Long)m.get("lastUpdate"),
+        (int)(long)(Long)m.get("xceiverCount"),
+        (String)m.get("networkLocation"),
+        (String)m.get("hostName"),
+        AdminStates.valueOf((String)m.get("adminState")));
+  }
+
+  /** Convert a DatanodeInfo[] to a Json string. */
+  public static String toJsonString(final DatanodeInfo[] array
+      ) throws IOException {
+    if (array == null) {
+      return null;
+    } else if (array.length == 0) {
+      return "[]";
+    } else {
+      final StringBuilder b = new StringBuilder().append('[').append(
+          toJsonString(array[0]));
+      for(int i = 1; i < array.length; i++) {
+        b.append(", ").append(toJsonString(array[i]));
+      }
+      return b.append(']').toString();
+    }
+  }
+
+  /** Convert an Object[] to a DatanodeInfo[]. */
+  public static DatanodeInfo[] toDatanodeInfoArray(final Object[] objects) {
+    if (objects == null) {
+      return null;
+    } else if (objects.length == 0) {
+      return EMPTY_DATANODE_INFO_ARRAY;
+    } else {
+      final DatanodeInfo[] array = new DatanodeInfo[objects.length];
+      for(int i = 0; i < array.length; i++) {
+        array[i] = (DatanodeInfo)toDatanodeInfo((Map<?, ?>) objects[i]);
+      }
+      return array;
+    }
+  }
+
+  /** Convert a LocatedBlock to a Json string. */
+  public static String toJsonString(final LocatedBlock locatedblock
+      ) throws IOException {
+    if (locatedblock == null) {
+      return null;
+    }
+ 
+    final Map<String, Object> m = locatedBlockMap.get();
+    m.put("blockToken", toJsonString(locatedblock.getBlockToken()));
+    m.put("isCorrupt", locatedblock.isCorrupt());
+    m.put("startOffset", locatedblock.getStartOffset());
+    m.put("block", toJsonString(locatedblock.getBlock()));
+
+    m.put("locations", toJsonString(locatedblock.getLocations()));
+    return JSON.toString(m);
+  }
+
+  /** Convert a Json map to LocatedBlock. */
+  public static LocatedBlock toLocatedBlock(final Map<?, ?> m) throws IOException {
+    if (m == null) {
+      return null;
+    }
+
+    final ExtendedBlock b = toExtendedBlock((Map<?, ?>)JSON.parse((String)m.get("block")));
+    final DatanodeInfo[] locations = toDatanodeInfoArray(
+        (Object[])JSON.parse((String)m.get("locations")));
+    final long startOffset = (Long)m.get("startOffset");
+    final boolean isCorrupt = (Boolean)m.get("isCorrupt");
+
+    final LocatedBlock locatedblock = new LocatedBlock(b, locations, startOffset, isCorrupt);
+    locatedblock.setBlockToken(toBlockToken((Map<?, ?>)JSON.parse((String)m.get("blockToken"))));
+    return locatedblock;
+  }
+
+  /** Convert a LocatedBlock[] to a Json string. */
+  public static String toJsonString(final List<LocatedBlock> array
+      ) throws IOException {
+    if (array == null) {
+      return null;
+    } else if (array.size() == 0) {
+      return "[]";
+    } else {
+      final StringBuilder b = new StringBuilder().append('[').append(
+          toJsonString(array.get(0)));
+      for(int i = 1; i < array.size(); i++) {
+        b.append(",\n  ").append(toJsonString(array.get(i)));
+      }
+      return b.append(']').toString();
+    }
+  }
+
+  /** Convert an Object[] to a List of LocatedBlock. 
+   * @throws IOException */
+  public static List<LocatedBlock> toLocatedBlockList(final Object[] objects
+      ) throws IOException {
+    if (objects == null) {
+      return null;
+    } else if (objects.length == 0) {
+      return Collections.emptyList();
+    } else {
+      final List<LocatedBlock> list = new ArrayList<LocatedBlock>(objects.length);
+      for(int i = 0; i < objects.length; i++) {
+        list.add((LocatedBlock)toLocatedBlock((Map<?, ?>)objects[i]));
+      }
+      return list;
+    }
+  }
+
+  /** Convert LocatedBlocks to a Json string. */
+  public static String toJsonString(final LocatedBlocks locatedblocks
+      ) throws IOException {
+    if (locatedblocks == null) {
+      return null;
+    }
+
+    final Map<String, Object> m = jsonMap.get();
+    m.put("fileLength", locatedblocks.getFileLength());
+    m.put("isUnderConstruction", locatedblocks.isUnderConstruction());
+
+    m.put("locatedBlocks", toJsonString(locatedblocks.getLocatedBlocks()));
+    m.put("lastLocatedBlock", toJsonString(locatedblocks.getLastLocatedBlock()));
+    m.put("isLastBlockComplete", locatedblocks.isLastBlockComplete());
+    return JSON.toString(m);
+  }
+
+  /** Convert a Json map to LocatedBlock. */
+  public static LocatedBlocks toLocatedBlocks(final Map<String, Object> m
+      ) throws IOException {
+    if (m == null) {
+      return null;
+    }
+    
+    final long fileLength = (Long)m.get("fileLength");
+    final boolean isUnderConstruction = (Boolean)m.get("isUnderConstruction");
+    final List<LocatedBlock> locatedBlocks = toLocatedBlockList(
+        (Object[])JSON.parse((String) m.get("locatedBlocks")));
+    final LocatedBlock lastLocatedBlock = toLocatedBlock(
+        (Map<?, ?>)JSON.parse((String)m.get("lastLocatedBlock")));
+    final boolean isLastBlockComplete = (Boolean)m.get("isLastBlockComplete");
+    return new LocatedBlocks(fileLength, isUnderConstruction, locatedBlocks,
+        lastLocatedBlock, isLastBlockComplete);
+  }
 }
 }

+ 85 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/ParamFilter.java

@@ -0,0 +1,85 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.web;
+
+import java.net.URI;
+import java.util.List;
+import java.util.Map;
+
+import javax.ws.rs.core.MultivaluedMap;
+import javax.ws.rs.core.UriBuilder;
+
+import com.sun.jersey.spi.container.ContainerRequest;
+import com.sun.jersey.spi.container.ContainerRequestFilter;
+import com.sun.jersey.spi.container.ContainerResponseFilter;
+import com.sun.jersey.spi.container.ResourceFilter;
+
+/**
+ * A filter to change parameter names to lower cases
+ * so that parameter names are considered as case insensitive.
+ */
+public class ParamFilter implements ResourceFilter {
+  private static final ContainerRequestFilter LOWER_CASE
+      = new ContainerRequestFilter() {
+    @Override
+    public ContainerRequest filter(final ContainerRequest request) {
+      final MultivaluedMap<String, String> parameters = request.getQueryParameters();
+      if (containsUpperCase(parameters.keySet())) {
+        //rebuild URI
+        final URI lower = rebuildQuery(request.getRequestUri(), parameters);
+        request.setUris(request.getBaseUri(), lower);
+      }
+      return request;
+    }
+  };
+
+  @Override
+  public ContainerRequestFilter getRequestFilter() {
+    return LOWER_CASE;
+  }
+
+  @Override
+  public ContainerResponseFilter getResponseFilter() {
+    return null;
+  }
+
+  /** Do the strings contain upper case letters? */
+  private static boolean containsUpperCase(final Iterable<String> strings) {
+    for(String s : strings) {
+      for(int i = 0; i < s.length(); i++) {
+        if (Character.isUpperCase(s.charAt(i))) {
+          return true;
+        }
+      }
+    }
+    return false;
+  }
+
+  /** Rebuild the URI query with lower case parameter names. */
+  private static URI rebuildQuery(final URI uri,
+      final MultivaluedMap<String, String> parameters) {
+    UriBuilder b = UriBuilder.fromUri(uri).replaceQuery("");
+    for(Map.Entry<String, List<String>> e : parameters.entrySet()) {
+      final String key = e.getKey().toLowerCase();
+      for(String v : e.getValue()) {
+        b = b.queryParam(key, v);
+      }
+    }
+    return b.build();
+  }
+}

+ 57 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java

@@ -27,9 +27,12 @@ import java.net.HttpURLConnection;
 import java.net.URI;
 import java.net.URI;
 import java.net.URISyntaxException;
 import java.net.URISyntaxException;
 import java.net.URL;
 import java.net.URL;
+import java.util.Arrays;
+import java.util.List;
 import java.util.Map;
 import java.util.Map;
 
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
@@ -45,6 +48,7 @@ import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
 import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
+import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
 import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
 import org.apache.hadoop.hdfs.web.resources.AccessTimeParam;
 import org.apache.hadoop.hdfs.web.resources.AccessTimeParam;
 import org.apache.hadoop.hdfs.web.resources.BlockSizeParam;
 import org.apache.hadoop.hdfs.web.resources.BlockSizeParam;
@@ -54,7 +58,9 @@ import org.apache.hadoop.hdfs.web.resources.DstPathParam;
 import org.apache.hadoop.hdfs.web.resources.GetOpParam;
 import org.apache.hadoop.hdfs.web.resources.GetOpParam;
 import org.apache.hadoop.hdfs.web.resources.GroupParam;
 import org.apache.hadoop.hdfs.web.resources.GroupParam;
 import org.apache.hadoop.hdfs.web.resources.HttpOpParam;
 import org.apache.hadoop.hdfs.web.resources.HttpOpParam;
+import org.apache.hadoop.hdfs.web.resources.LengthParam;
 import org.apache.hadoop.hdfs.web.resources.ModificationTimeParam;
 import org.apache.hadoop.hdfs.web.resources.ModificationTimeParam;
+import org.apache.hadoop.hdfs.web.resources.OffsetParam;
 import org.apache.hadoop.hdfs.web.resources.OverwriteParam;
 import org.apache.hadoop.hdfs.web.resources.OverwriteParam;
 import org.apache.hadoop.hdfs.web.resources.OwnerParam;
 import org.apache.hadoop.hdfs.web.resources.OwnerParam;
 import org.apache.hadoop.hdfs.web.resources.Param;
 import org.apache.hadoop.hdfs.web.resources.Param;
@@ -63,13 +69,16 @@ import org.apache.hadoop.hdfs.web.resources.PostOpParam;
 import org.apache.hadoop.hdfs.web.resources.PutOpParam;
 import org.apache.hadoop.hdfs.web.resources.PutOpParam;
 import org.apache.hadoop.hdfs.web.resources.RecursiveParam;
 import org.apache.hadoop.hdfs.web.resources.RecursiveParam;
 import org.apache.hadoop.hdfs.web.resources.RenameOptionSetParam;
 import org.apache.hadoop.hdfs.web.resources.RenameOptionSetParam;
+import org.apache.hadoop.hdfs.web.resources.RenewerParam;
 import org.apache.hadoop.hdfs.web.resources.ReplicationParam;
 import org.apache.hadoop.hdfs.web.resources.ReplicationParam;
 import org.apache.hadoop.hdfs.web.resources.UserParam;
 import org.apache.hadoop.hdfs.web.resources.UserParam;
+import org.apache.hadoop.io.Text;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
 import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
 import org.apache.hadoop.security.authentication.client.AuthenticationException;
 import org.apache.hadoop.security.authentication.client.AuthenticationException;
+import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.Progressable;
 import org.mortbay.util.ajax.JSON;
 import org.mortbay.util.ajax.JSON;
 
 
@@ -82,17 +91,24 @@ public class WebHdfsFileSystem extends HftpFileSystem {
 
 
   private static final KerberosUgiAuthenticator AUTH = new KerberosUgiAuthenticator();
   private static final KerberosUgiAuthenticator AUTH = new KerberosUgiAuthenticator();
 
 
-  private UserGroupInformation ugi;
+  private final UserGroupInformation ugi;
   private final AuthenticatedURL.Token authToken = new AuthenticatedURL.Token();
   private final AuthenticatedURL.Token authToken = new AuthenticatedURL.Token();
   protected Path workingDir;
   protected Path workingDir;
 
 
+  {
+    try {
+      ugi = UserGroupInformation.getCurrentUser();
+    } catch (IOException e) {
+      throw new RuntimeException(e);
+    }
+  }
+
   @Override
   @Override
   public synchronized void initialize(URI uri, Configuration conf
   public synchronized void initialize(URI uri, Configuration conf
       ) throws IOException {
       ) throws IOException {
     super.initialize(uri, conf);
     super.initialize(uri, conf);
     setConf(conf);
     setConf(conf);
 
 
-    ugi = UserGroupInformation.getCurrentUser();
     this.workingDir = getHomeDirectory();
     this.workingDir = getHomeDirectory();
   }
   }
 
 
@@ -163,11 +179,11 @@ public class WebHdfsFileSystem extends HftpFileSystem {
     }
     }
   }
   }
 
 
-  private URL toUrl(final HttpOpParam.Op op, final Path fspath,
+  URL toUrl(final HttpOpParam.Op op, final Path fspath,
       final Param<?,?>... parameters) throws IOException {
       final Param<?,?>... parameters) throws IOException {
     //initialize URI path and query
     //initialize URI path and query
     final String path = "/" + PATH_PREFIX
     final String path = "/" + PATH_PREFIX
-        + makeQualified(fspath).toUri().getPath();
+        + (fspath == null? "/": makeQualified(fspath).toUri().getPath());
     final String query = op.toQueryString()
     final String query = op.toQueryString()
         + '&' + new UserParam(ugi)
         + '&' + new UserParam(ugi)
         + Param.toSortedString("&", parameters);
         + Param.toSortedString("&", parameters);
@@ -396,4 +412,41 @@ public class WebHdfsFileSystem extends HftpFileSystem {
     }
     }
     return statuses;
     return statuses;
   }
   }
+
+  @Override
+  public Token<DelegationTokenIdentifier> getDelegationToken(final String renewer
+      ) throws IOException {
+    final HttpOpParam.Op op = GetOpParam.Op.GETDELEGATIONTOKEN;
+    final Map<String, Object> m = run(op, null, new RenewerParam(renewer));
+    final Token<DelegationTokenIdentifier> token = JsonUtil.toDelegationToken(m); 
+    token.setService(new Text(getCanonicalServiceName()));
+    return token;
+  }
+
+  @Override
+  public List<Token<?>> getDelegationTokens(final String renewer
+      ) throws IOException {
+    final Token<?>[] t = {getDelegationToken(renewer)};
+    return Arrays.asList(t);
+  }
+
+  @Override
+  public BlockLocation[] getFileBlockLocations(final FileStatus status,
+      final long offset, final long length) throws IOException {
+    if (status == null) {
+      return null;
+    }
+    return getFileBlockLocations(status.getPath(), offset, length);
+  }
+
+  @Override
+  public BlockLocation[] getFileBlockLocations(final Path p, 
+      final long offset, final long length) throws IOException {
+    statistics.incrementReadOps(1);
+
+    final HttpOpParam.Op op = GetOpParam.Op.GETFILEBLOCKLOCATIONS;
+    final Map<String, Object> m = run(op, p, new OffsetParam(offset),
+        new LengthParam(length));
+    return DFSUtil.locatedBlocks2Locations(JsonUtil.toLocatedBlocks(m));
+  }
 }
 }

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/AccessTimeParam.java

@@ -20,7 +20,7 @@ package org.apache.hadoop.hdfs.web.resources;
 /** Access time parameter. */
 /** Access time parameter. */
 public class AccessTimeParam extends LongParam {
 public class AccessTimeParam extends LongParam {
   /** Parameter name. */
   /** Parameter name. */
-  public static final String NAME = "accessTime";
+  public static final String NAME = "accesstime";
   /** Default parameter value. */
   /** Default parameter value. */
   public static final String DEFAULT = "-1";
   public static final String DEFAULT = "-1";
 
 

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/BlockSizeParam.java

@@ -25,7 +25,7 @@ import org.apache.hadoop.conf.Configuration;
 /** Block size parameter. */
 /** Block size parameter. */
 public class BlockSizeParam extends LongParam {
 public class BlockSizeParam extends LongParam {
   /** Parameter name. */
   /** Parameter name. */
-  public static final String NAME = "blockSize";
+  public static final String NAME = "blocksize";
   /** Default parameter value. */
   /** Default parameter value. */
   public static final String DEFAULT = NULL;
   public static final String DEFAULT = NULL;
 
 

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/BufferSizeParam.java

@@ -23,7 +23,7 @@ import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 /** Buffer size parameter. */
 /** Buffer size parameter. */
 public class BufferSizeParam extends IntegerParam {
 public class BufferSizeParam extends IntegerParam {
   /** Parameter name. */
   /** Parameter name. */
-  public static final String NAME = "bufferSize";
+  public static final String NAME = "buffersize";
   /** Default parameter value. */
   /** Default parameter value. */
   public static final String DEFAULT = NULL;
   public static final String DEFAULT = NULL;
 
 

+ 1 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/DelegationParam.java

@@ -17,13 +17,12 @@
  */
  */
 package org.apache.hadoop.hdfs.web.resources;
 package org.apache.hadoop.hdfs.web.resources;
 
 
-import org.apache.hadoop.hdfs.server.common.JspHelper;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 
 
 /** Delegation token parameter. */
 /** Delegation token parameter. */
 public class DelegationParam extends StringParam {
 public class DelegationParam extends StringParam {
   /** Parameter name. */
   /** Parameter name. */
-  public static final String NAME = JspHelper.DELEGATION_PARAMETER_NAME;
+  public static final String NAME = "delegation";
   /** Default parameter value. */
   /** Default parameter value. */
   public static final String DEFAULT = "";
   public static final String DEFAULT = "";
 
 

+ 0 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/DeleteOpParam.java

@@ -21,9 +21,6 @@ import java.net.HttpURLConnection;
 
 
 /** Http DELETE operation parameter. */
 /** Http DELETE operation parameter. */
 public class DeleteOpParam extends HttpOpParam<DeleteOpParam.Op> {
 public class DeleteOpParam extends HttpOpParam<DeleteOpParam.Op> {
-  /** Parameter name. */
-  public static final String NAME = "deleteOp";
-
   /** Delete operations. */
   /** Delete operations. */
   public static enum Op implements HttpOpParam.Op {
   public static enum Op implements HttpOpParam.Op {
     DELETE(HttpURLConnection.HTTP_OK),
     DELETE(HttpURLConnection.HTTP_OK),

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/DstPathParam.java

@@ -22,7 +22,7 @@ import org.apache.hadoop.fs.Path;
 /** Destination path parameter. */
 /** Destination path parameter. */
 public class DstPathParam extends StringParam {
 public class DstPathParam extends StringParam {
   /** Parameter name. */
   /** Parameter name. */
-  public static final String NAME = "dstPath";
+  public static final String NAME = "dstpath";
   /** Default parameter value. */
   /** Default parameter value. */
   public static final String DEFAULT = "";
   public static final String DEFAULT = "";
 
 

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java

@@ -21,16 +21,16 @@ import java.net.HttpURLConnection;
 
 
 /** Http GET operation parameter. */
 /** Http GET operation parameter. */
 public class GetOpParam extends HttpOpParam<GetOpParam.Op> {
 public class GetOpParam extends HttpOpParam<GetOpParam.Op> {
-  /** Parameter name. */
-  public static final String NAME = "getOp";
-
   /** Get operations. */
   /** Get operations. */
   public static enum Op implements HttpOpParam.Op {
   public static enum Op implements HttpOpParam.Op {
     OPEN(HttpURLConnection.HTTP_OK),
     OPEN(HttpURLConnection.HTTP_OK),
+    GETFILEBLOCKLOCATIONS(HttpURLConnection.HTTP_OK),
 
 
     GETFILESTATUS(HttpURLConnection.HTTP_OK),
     GETFILESTATUS(HttpURLConnection.HTTP_OK),
     LISTSTATUS(HttpURLConnection.HTTP_OK),
     LISTSTATUS(HttpURLConnection.HTTP_OK),
 
 
+    GETDELEGATIONTOKEN(HttpURLConnection.HTTP_OK),
+
     NULL(HttpURLConnection.HTTP_NOT_IMPLEMENTED);
     NULL(HttpURLConnection.HTTP_NOT_IMPLEMENTED);
 
 
     final int expectedHttpResponseCode;
     final int expectedHttpResponseCode;

+ 3 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/HttpOpParam.java

@@ -20,6 +20,9 @@ package org.apache.hadoop.hdfs.web.resources;
 /** Http operation parameter. */
 /** Http operation parameter. */
 public abstract class HttpOpParam<E extends Enum<E> & HttpOpParam.Op>
 public abstract class HttpOpParam<E extends Enum<E> & HttpOpParam.Op>
     extends EnumParam<E> {
     extends EnumParam<E> {
+  /** Parameter name. */
+  public static final String NAME = "op";
+
   /** Default parameter value. */
   /** Default parameter value. */
   public static final String DEFAULT = NULL;
   public static final String DEFAULT = NULL;
 
 

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ModificationTimeParam.java

@@ -20,7 +20,7 @@ package org.apache.hadoop.hdfs.web.resources;
 /** Modification time parameter. */
 /** Modification time parameter. */
 public class ModificationTimeParam extends LongParam {
 public class ModificationTimeParam extends LongParam {
   /** Parameter name. */
   /** Parameter name. */
-  public static final String NAME = "modificationTime";
+  public static final String NAME = "modificationtime";
   /** Default parameter value. */
   /** Default parameter value. */
   public static final String DEFAULT = "-1";
   public static final String DEFAULT = "-1";
 
 

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/OverwriteParam.java

@@ -17,7 +17,7 @@
  */
  */
 package org.apache.hadoop.hdfs.web.resources;
 package org.apache.hadoop.hdfs.web.resources;
 
 
-/** Recursive parameter. */
+/** Overwrite parameter. */
 public class OverwriteParam extends BooleanParam {
 public class OverwriteParam extends BooleanParam {
   /** Parameter name. */
   /** Parameter name. */
   public static final String NAME = "overwrite";
   public static final String NAME = "overwrite";

+ 0 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PostOpParam.java

@@ -21,9 +21,6 @@ import java.net.HttpURLConnection;
 
 
 /** Http POST operation parameter. */
 /** Http POST operation parameter. */
 public class PostOpParam extends HttpOpParam<PostOpParam.Op> {
 public class PostOpParam extends HttpOpParam<PostOpParam.Op> {
-  /** Parameter name. */
-  public static final String NAME = "postOp";
-
   /** Post operations. */
   /** Post operations. */
   public static enum Op implements HttpOpParam.Op {
   public static enum Op implements HttpOpParam.Op {
     APPEND(HttpURLConnection.HTTP_OK),
     APPEND(HttpURLConnection.HTTP_OK),

+ 0 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PutOpParam.java

@@ -21,9 +21,6 @@ import java.net.HttpURLConnection;
 
 
 /** Http POST operation parameter. */
 /** Http POST operation parameter. */
 public class PutOpParam extends HttpOpParam<PutOpParam.Op> {
 public class PutOpParam extends HttpOpParam<PutOpParam.Op> {
-  /** Parameter name. */
-  public static final String NAME = "putOp";
-
   /** Put operations. */
   /** Put operations. */
   public static enum Op implements HttpOpParam.Op {
   public static enum Op implements HttpOpParam.Op {
     CREATE(true, HttpURLConnection.HTTP_CREATED),
     CREATE(true, HttpURLConnection.HTTP_CREATED),

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/RenameOptionSetParam.java

@@ -22,7 +22,7 @@ import org.apache.hadoop.fs.Options;
 /** Rename option set parameter. */
 /** Rename option set parameter. */
 public class RenameOptionSetParam extends EnumSetParam<Options.Rename> {
 public class RenameOptionSetParam extends EnumSetParam<Options.Rename> {
   /** Parameter name. */
   /** Parameter name. */
-  public static final String NAME = "renameOptions";
+  public static final String NAME = "renameoptions";
   /** Default parameter value. */
   /** Default parameter value. */
   public static final String DEFAULT = "";
   public static final String DEFAULT = "";
 
 

+ 41 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/RenewerParam.java

@@ -0,0 +1,41 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.web.resources;
+
+/** Renewer parameter. */
+public class RenewerParam extends StringParam {
+  /** Parameter name. */
+  public static final String NAME = "renewer";
+  /** Default parameter value. */
+  public static final String DEFAULT = NULL;
+
+  private static final Domain DOMAIN = new Domain(NAME, null);
+
+  /**
+   * Constructor.
+   * @param str a string representation of the parameter value.
+   */
+  public RenewerParam(final String str) {
+    super(DOMAIN, str == null || str.equals(DEFAULT)? null: str);
+  }
+
+  @Override
+  public String getName() {
+    return NAME;
+  }
+}

+ 0 - 20
hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml

@@ -683,24 +683,4 @@ creations/deletions), or "all".</description>
   </description>
   </description>
 </property>
 </property>
 
 
-<property>
-  <name>dfs.web.authentication.kerberos.principal</name>
-  <value>HTTP/${dfs.web.hostname}@${kerberos.realm}</value>
-  <description>
-    The HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
-
-    The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
-    HTTP SPENGO specification.
-  </description>
-</property>
-
-<property>
-  <name>dfs.web.authentication.kerberos.keytab</name>
-  <value>${user.home}/dfs.web.keytab</value>
-  <description>
-    The Kerberos keytab file with the credentials for the
-    HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
-  </description>
-</property>
-
 </configuration>
 </configuration>

+ 30 - 20
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java

@@ -72,6 +72,7 @@ public class TestDFSPermission extends TestCase {
   final private static Path NON_EXISTENT_FILE = new Path("/NonExistentFile");
   final private static Path NON_EXISTENT_FILE = new Path("/NonExistentFile");
 
 
   private FileSystem fs;
   private FileSystem fs;
+  private MiniDFSCluster cluster;
   private static Random r;
   private static Random r;
 
 
   static {
   static {
@@ -105,18 +106,25 @@ public class TestDFSPermission extends TestCase {
     }
     }
   }
   }
 
 
+  @Override
+  public void setUp() throws IOException {
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
+    cluster.waitActive();
+  }
+  
+  @Override
+  public void tearDown() throws IOException {
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
+  
   /** This tests if permission setting in create, mkdir, and 
   /** This tests if permission setting in create, mkdir, and 
    * setPermission works correctly
    * setPermission works correctly
    */
    */
   public void testPermissionSetting() throws Exception {
   public void testPermissionSetting() throws Exception {
-    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
-    try {
-      cluster.waitActive();
-      testPermissionSetting(OpType.CREATE); // test file creation
-      testPermissionSetting(OpType.MKDIRS); // test directory creation
-    } finally {
-      cluster.shutdown();
-    }
+    testPermissionSetting(OpType.CREATE); // test file creation
+    testPermissionSetting(OpType.MKDIRS); // test directory creation
   }
   }
 
 
   private void initFileSystem(short umask) throws Exception {
   private void initFileSystem(short umask) throws Exception {
@@ -245,17 +253,22 @@ public class TestDFSPermission extends TestCase {
     }
     }
   }
   }
 
 
+  /**
+   * check that ImmutableFsPermission can be used as the argument
+   * to setPermission
+   */
+  public void testImmutableFsPermission() throws IOException {
+    fs = FileSystem.get(conf);
+
+    // set the permission of the root to be world-wide rwx
+    fs.setPermission(new Path("/"),
+        FsPermission.createImmutable((short)0777));
+  }
+  
   /* check if the ownership of a file/directory is set correctly */
   /* check if the ownership of a file/directory is set correctly */
   public void testOwnership() throws Exception {
   public void testOwnership() throws Exception {
-    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
-    try {
-      cluster.waitActive();
-      testOwnership(OpType.CREATE); // test file creation
-      testOwnership(OpType.MKDIRS); // test directory creation
-    } finally {
-      fs.close();
-      cluster.shutdown();
-    }
+    testOwnership(OpType.CREATE); // test file creation
+    testOwnership(OpType.MKDIRS); // test directory creation
   }
   }
 
 
   /* change a file/directory's owner and group.
   /* change a file/directory's owner and group.
@@ -342,9 +355,7 @@ public class TestDFSPermission extends TestCase {
   /* Check if namenode performs permission checking correctly for
   /* Check if namenode performs permission checking correctly for
    * superuser, file owner, group owner, and other users */
    * superuser, file owner, group owner, and other users */
   public void testPermissionChecking() throws Exception {
   public void testPermissionChecking() throws Exception {
-    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
     try {
     try {
-      cluster.waitActive();
       fs = FileSystem.get(conf);
       fs = FileSystem.get(conf);
 
 
       // set the permission of the root to be world-wide rwx
       // set the permission of the root to be world-wide rwx
@@ -401,7 +412,6 @@ public class TestDFSPermission extends TestCase {
           parentPermissions, permissions, parentPaths, filePaths, dirPaths);
           parentPermissions, permissions, parentPaths, filePaths, dirPaths);
     } finally {
     } finally {
       fs.close();
       fs.close();
-      cluster.shutdown();
     }
     }
   }
   }
 
 

+ 129 - 71
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java

@@ -29,8 +29,7 @@ import java.util.Collection;
 import java.util.Iterator;
 import java.util.Iterator;
 import java.util.List;
 import java.util.List;
 
 
-import junit.framework.Assert;
-
+import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -40,8 +39,7 @@ import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.BlockLocation;
-import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION;
-
+import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
 
 
 public class TestDFSUtil {
 public class TestDFSUtil {
   /**
   /**
@@ -76,79 +74,141 @@ public class TestDFSUtil {
       }
       }
     }
     }
 
 
-    assertTrue("expected 1 corrupt files but got " + corruptCount, 
-               corruptCount == 1);
-    
+    assertTrue("expected 1 corrupt files but got " + corruptCount,
+        corruptCount == 1);
+
     // test an empty location
     // test an empty location
     bs = DFSUtil.locatedBlocks2Locations(new LocatedBlocks());
     bs = DFSUtil.locatedBlocks2Locations(new LocatedBlocks());
     assertEquals(0, bs.length);
     assertEquals(0, bs.length);
   }
   }
 
 
-  /** 
-   * Test for
-   * {@link DFSUtil#getNameServiceIds(Configuration)}
-   * {@link DFSUtil#getNameServiceId(Configuration)}
-   * {@link DFSUtil#getNNServiceRpcAddresses(Configuration)}
+
+  private Configuration setupAddress(String key) {
+    HdfsConfiguration conf = new HdfsConfiguration();
+    conf.set(DFS_FEDERATION_NAMESERVICES, "nn1");
+    conf.set(DFSUtil.getNameServiceIdKey(key, "nn1"), "localhost:9000");
+    return conf;
+  }
+
+  /**
+   * Test {@link DFSUtil#getNamenodeNameServiceId(Configuration)} to ensure
+   * nameserviceId from the configuration returned
    */
    */
   @Test
   @Test
-  public void testMultipleNamenodes() throws IOException {
+  public void getNameServiceId() {
+    HdfsConfiguration conf = new HdfsConfiguration();
+    conf.set(DFS_FEDERATION_NAMESERVICE_ID, "nn1");
+    assertEquals("nn1", DFSUtil.getNamenodeNameServiceId(conf));
+  }
+  
+  /**
+   * Test {@link DFSUtil#getNameNodeNameServiceId(Configuration)} to ensure
+   * nameserviceId for namenode is determined based on matching the address with
+   * local node's address
+   */
+  @Test
+  public void getNameNodeNameServiceId() {
+    Configuration conf = setupAddress(DFS_NAMENODE_RPC_ADDRESS_KEY);
+    assertEquals("nn1", DFSUtil.getNamenodeNameServiceId(conf));
+  }
+
+  /**
+   * Test {@link DFSUtil#getBackupNameServiceId(Configuration)} to ensure
+   * nameserviceId for backup node is determined based on matching the address
+   * with local node's address
+   */
+  @Test
+  public void getBackupNameServiceId() {
+    Configuration conf = setupAddress(DFS_NAMENODE_BACKUP_ADDRESS_KEY);
+    assertEquals("nn1", DFSUtil.getBackupNameServiceId(conf));
+  }
+
+  /**
+   * Test {@link DFSUtil#getSecondaryNameServiceId(Configuration)} to ensure
+   * nameserviceId for backup node is determined based on matching the address
+   * with local node's address
+   */
+  @Test
+  public void getSecondaryNameServiceId() {
+    Configuration conf = setupAddress(DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY);
+    assertEquals("nn1", DFSUtil.getSecondaryNameServiceId(conf));
+  }
+
+  /**
+   * Test {@link DFSUtil#getNameServiceId(Configuration, String))} to ensure
+   * exception is thrown when multiple rpc addresses match the local node's
+   * address
+   */
+  @Test(expected = HadoopIllegalArgumentException.class)
+  public void testGetNameServiceIdException() {
+    HdfsConfiguration conf = new HdfsConfiguration();
+    conf.set(DFS_FEDERATION_NAMESERVICES, "nn1,nn2");
+    conf.set(DFSUtil.getNameServiceIdKey(DFS_NAMENODE_RPC_ADDRESS_KEY, "nn1"),
+        "localhost:9000");
+    conf.set(DFSUtil.getNameServiceIdKey(DFS_NAMENODE_RPC_ADDRESS_KEY, "nn2"),
+        "localhost:9001");
+    DFSUtil.getNamenodeNameServiceId(conf);
+    fail("Expected exception is not thrown");
+  }
+
+  /**
+   * Test {@link DFSUtil#getNameServiceIds(Configuration)}
+   */
+  @Test
+  public void testGetNameServiceIds() {
     HdfsConfiguration conf = new HdfsConfiguration();
     HdfsConfiguration conf = new HdfsConfiguration();
-    conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, "nn1,nn2");
-    
-    // Test - The configured nameserviceIds are returned
+    conf.set(DFS_FEDERATION_NAMESERVICES, "nn1,nn2");
     Collection<String> nameserviceIds = DFSUtil.getNameServiceIds(conf);
     Collection<String> nameserviceIds = DFSUtil.getNameServiceIds(conf);
     Iterator<String> it = nameserviceIds.iterator();
     Iterator<String> it = nameserviceIds.iterator();
     assertEquals(2, nameserviceIds.size());
     assertEquals(2, nameserviceIds.size());
     assertEquals("nn1", it.next().toString());
     assertEquals("nn1", it.next().toString());
     assertEquals("nn2", it.next().toString());
     assertEquals("nn2", it.next().toString());
-    
-    // Tests default nameserviceId is returned
-    conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICE_ID, "nn1");
-    assertEquals("nn1", DFSUtil.getNameServiceId(conf));
-    
+  }
+
+  /**
+   * Test for {@link DFSUtil#getNNServiceRpcAddresses(Configuration)}
+   * {@link DFSUtil#getNameServiceIdFromAddress(Configuration, InetSocketAddress, String...)
+   * (Configuration)}
+   */
+  @Test
+  public void testMultipleNamenodes() throws IOException {
+    HdfsConfiguration conf = new HdfsConfiguration();
+    conf.set(DFS_FEDERATION_NAMESERVICES, "nn1,nn2");
     // Test - configured list of namenodes are returned
     // Test - configured list of namenodes are returned
     final String NN1_ADDRESS = "localhost:9000";
     final String NN1_ADDRESS = "localhost:9000";
     final String NN2_ADDRESS = "localhost:9001";
     final String NN2_ADDRESS = "localhost:9001";
     final String NN3_ADDRESS = "localhost:9002";
     final String NN3_ADDRESS = "localhost:9002";
-    conf.set(DFSUtil.getNameServiceIdKey(
-        DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY, "nn1"), NN1_ADDRESS);
-    conf.set(DFSUtil.getNameServiceIdKey(
-        DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY, "nn2"), NN2_ADDRESS);
-    
-    Collection<InetSocketAddress> nnAddresses = 
-      DFSUtil.getNNServiceRpcAddresses(conf);
+    conf.set(DFSUtil.getNameServiceIdKey(DFS_NAMENODE_RPC_ADDRESS_KEY, "nn1"),
+        NN1_ADDRESS);
+    conf.set(DFSUtil.getNameServiceIdKey(DFS_NAMENODE_RPC_ADDRESS_KEY, "nn2"),
+        NN2_ADDRESS);
+
+    Collection<InetSocketAddress> nnAddresses = DFSUtil
+        .getNNServiceRpcAddresses(conf);
     assertEquals(2, nnAddresses.size());
     assertEquals(2, nnAddresses.size());
     Iterator<InetSocketAddress> iterator = nnAddresses.iterator();
     Iterator<InetSocketAddress> iterator = nnAddresses.iterator();
-    assertEquals(2, nameserviceIds.size());
     InetSocketAddress addr = iterator.next();
     InetSocketAddress addr = iterator.next();
     assertEquals("localhost", addr.getHostName());
     assertEquals("localhost", addr.getHostName());
     assertEquals(9000, addr.getPort());
     assertEquals(9000, addr.getPort());
     addr = iterator.next();
     addr = iterator.next();
     assertEquals("localhost", addr.getHostName());
     assertEquals("localhost", addr.getHostName());
     assertEquals(9001, addr.getPort());
     assertEquals(9001, addr.getPort());
-    
+
     // Test - can look up nameservice ID from service address
     // Test - can look up nameservice ID from service address
-    InetSocketAddress testAddress1 = NetUtils.createSocketAddr(NN1_ADDRESS);
-    String nameserviceId = DFSUtil.getNameServiceIdFromAddress(
-        conf, testAddress1,
-        DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
-        DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
-    assertEquals("nn1", nameserviceId);
-    InetSocketAddress testAddress2 = NetUtils.createSocketAddr(NN2_ADDRESS);
-    nameserviceId = DFSUtil.getNameServiceIdFromAddress(
-        conf, testAddress2,
-        DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
-        DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
-    assertEquals("nn2", nameserviceId);
-    InetSocketAddress testAddress3 = NetUtils.createSocketAddr(NN3_ADDRESS);
-    nameserviceId = DFSUtil.getNameServiceIdFromAddress(
-        conf, testAddress3,
-        DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
-        DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
-    assertNull(nameserviceId);
+    checkNameServiceId(conf, NN1_ADDRESS, "nn1");
+    checkNameServiceId(conf, NN2_ADDRESS, "nn2");
+    checkNameServiceId(conf, NN3_ADDRESS, null);
   }
   }
-  
-  /** 
+
+  public void checkNameServiceId(Configuration conf, String addr,
+      String expectedNameServiceId) {
+    InetSocketAddress s = NetUtils.createSocketAddr(addr);
+    String nameserviceId = DFSUtil.getNameServiceIdFromAddress(conf, s,
+        DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_RPC_ADDRESS_KEY);
+    assertEquals(expectedNameServiceId, nameserviceId);
+  }
+
+  /**
    * Test for
    * Test for
    * {@link DFSUtil#isDefaultNamenodeAddress(Configuration, InetSocketAddress, String...)}
    * {@link DFSUtil#isDefaultNamenodeAddress(Configuration, InetSocketAddress, String...)}
    */
    */
@@ -157,27 +217,25 @@ public class TestDFSUtil {
     HdfsConfiguration conf = new HdfsConfiguration();
     HdfsConfiguration conf = new HdfsConfiguration();
     final String DEFAULT_ADDRESS = "localhost:9000";
     final String DEFAULT_ADDRESS = "localhost:9000";
     final String NN2_ADDRESS = "localhost:9001";
     final String NN2_ADDRESS = "localhost:9001";
-    conf.set(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY, DEFAULT_ADDRESS);
-    
+    conf.set(DFS_NAMENODE_RPC_ADDRESS_KEY, DEFAULT_ADDRESS);
+
     InetSocketAddress testAddress1 = NetUtils.createSocketAddr(DEFAULT_ADDRESS);
     InetSocketAddress testAddress1 = NetUtils.createSocketAddr(DEFAULT_ADDRESS);
     boolean isDefault = DFSUtil.isDefaultNamenodeAddress(conf, testAddress1,
     boolean isDefault = DFSUtil.isDefaultNamenodeAddress(conf, testAddress1,
-        DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
-        DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
+        DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_RPC_ADDRESS_KEY);
     assertTrue(isDefault);
     assertTrue(isDefault);
     InetSocketAddress testAddress2 = NetUtils.createSocketAddr(NN2_ADDRESS);
     InetSocketAddress testAddress2 = NetUtils.createSocketAddr(NN2_ADDRESS);
     isDefault = DFSUtil.isDefaultNamenodeAddress(conf, testAddress2,
     isDefault = DFSUtil.isDefaultNamenodeAddress(conf, testAddress2,
-        DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
-        DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
+        DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_RPC_ADDRESS_KEY);
     assertFalse(isDefault);
     assertFalse(isDefault);
   }
   }
-  
+
   /** Tests to ensure default namenode is used as fallback */
   /** Tests to ensure default namenode is used as fallback */
   @Test
   @Test
   public void testDefaultNamenode() throws IOException {
   public void testDefaultNamenode() throws IOException {
     HdfsConfiguration conf = new HdfsConfiguration();
     HdfsConfiguration conf = new HdfsConfiguration();
     final String hdfs_default = "hdfs://localhost:9999/";
     final String hdfs_default = "hdfs://localhost:9999/";
-    conf.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY, hdfs_default);
-    // If DFSConfigKeys.DFS_FEDERATION_NAMESERVICES is not set, verify that 
+    conf.set(FS_DEFAULT_NAME_KEY, hdfs_default);
+    // If DFS_FEDERATION_NAMESERVICES is not set, verify that
     // default namenode address is returned.
     // default namenode address is returned.
     List<InetSocketAddress> addrList = DFSUtil.getNNServiceRpcAddresses(conf);
     List<InetSocketAddress> addrList = DFSUtil.getNNServiceRpcAddresses(conf);
     assertEquals(1, addrList.size());
     assertEquals(1, addrList.size());
@@ -191,26 +249,26 @@ public class TestDFSUtil {
   @Test
   @Test
   public void testConfModification() throws IOException {
   public void testConfModification() throws IOException {
     final HdfsConfiguration conf = new HdfsConfiguration();
     final HdfsConfiguration conf = new HdfsConfiguration();
-    conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, "nn1");
-    conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICE_ID, "nn1");
-    final String nameserviceId = DFSUtil.getNameServiceId(conf);
-    
+    conf.set(DFS_FEDERATION_NAMESERVICES, "nn1");
+    conf.set(DFS_FEDERATION_NAMESERVICE_ID, "nn1");
+    final String nameserviceId = DFSUtil.getNamenodeNameServiceId(conf);
+
     // Set the nameservice specific keys with nameserviceId in the config key
     // Set the nameservice specific keys with nameserviceId in the config key
     for (String key : NameNode.NAMESERVICE_SPECIFIC_KEYS) {
     for (String key : NameNode.NAMESERVICE_SPECIFIC_KEYS) {
       // Note: value is same as the key
       // Note: value is same as the key
       conf.set(DFSUtil.getNameServiceIdKey(key, nameserviceId), key);
       conf.set(DFSUtil.getNameServiceIdKey(key, nameserviceId), key);
     }
     }
-    
+
     // Initialize generic keys from specific keys
     // Initialize generic keys from specific keys
-    NameNode.initializeGenericKeys(conf);
-    
+    NameNode.initializeGenericKeys(conf, nameserviceId);
+
     // Retrieve the keys without nameserviceId and Ensure generic keys are set
     // Retrieve the keys without nameserviceId and Ensure generic keys are set
     // to the correct value
     // to the correct value
     for (String key : NameNode.NAMESERVICE_SPECIFIC_KEYS) {
     for (String key : NameNode.NAMESERVICE_SPECIFIC_KEYS) {
       assertEquals(key, conf.get(key));
       assertEquals(key, conf.get(key));
     }
     }
   }
   }
-  
+
   /**
   /**
    * Tests for empty configuration, an exception is thrown from
    * Tests for empty configuration, an exception is thrown from
    * {@link DFSUtil#getNNServiceRpcAddresses(Configuration)}
    * {@link DFSUtil#getNNServiceRpcAddresses(Configuration)}
@@ -238,16 +296,16 @@ public class TestDFSUtil {
     } catch (IOException expected) {
     } catch (IOException expected) {
     }
     }
   }
   }
-  
+
   @Test
   @Test
-  public void testGetServerInfo(){
+  public void testGetServerInfo() {
     HdfsConfiguration conf = new HdfsConfiguration();
     HdfsConfiguration conf = new HdfsConfiguration();
     conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
     conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
     UserGroupInformation.setConfiguration(conf);
     UserGroupInformation.setConfiguration(conf);
     String httpsport = DFSUtil.getInfoServer(null, conf, true);
     String httpsport = DFSUtil.getInfoServer(null, conf, true);
-    Assert.assertEquals("0.0.0.0:50470", httpsport);
+    assertEquals("0.0.0.0:50470", httpsport);
     String httpport = DFSUtil.getInfoServer(null, conf, false);
     String httpport = DFSUtil.getInfoServer(null, conf, false);
-    Assert.assertEquals("0.0.0.0:50070", httpport);
+    assertEquals("0.0.0.0:50070", httpport);
   }
   }
 
 
 }
 }

+ 15 - 5
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java

@@ -17,6 +17,10 @@
  */
  */
 package org.apache.hadoop.hdfs;
 package org.apache.hadoop.hdfs;
 
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
 import java.io.OutputStream;
 import java.io.OutputStream;
 import java.security.PrivilegedExceptionAction;
 import java.security.PrivilegedExceptionAction;
 
 
@@ -24,17 +28,15 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
+import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
 import org.apache.hadoop.hdfs.tools.DFSAdmin;
 import org.apache.hadoop.hdfs.tools.DFSAdmin;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
-import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
-
 import org.junit.Test;
 import org.junit.Test;
-import static org.junit.Assert.*;
 
 
 /** A class for testing quota-related commands */
 /** A class for testing quota-related commands */
 public class TestQuota {
 public class TestQuota {
@@ -841,6 +843,14 @@ public class TestQuota {
     DFSAdmin admin = new DFSAdmin(conf);
     DFSAdmin admin = new DFSAdmin(conf);
 
 
     try {
     try {
+      
+      //Test for deafult NameSpace Quota
+      long nsQuota = FSImageTestUtil.getNSQuota(cluster.getNameNode()
+          .getNamesystem());
+      assertTrue(
+          "Default namespace quota expected as long max. But the value is :"
+              + nsQuota, nsQuota == Long.MAX_VALUE);
+      
       Path dir = new Path("/test");
       Path dir = new Path("/test");
       boolean exceededQuota = false;
       boolean exceededQuota = false;
       ContentSummary c;
       ContentSummary c;

+ 33 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java

@@ -23,12 +23,12 @@ package org.apache.hadoop.hdfs.security;
 import java.io.ByteArrayInputStream;
 import java.io.ByteArrayInputStream;
 import java.io.DataInputStream;
 import java.io.DataInputStream;
 import java.io.IOException;
 import java.io.IOException;
+import java.net.URI;
 import java.security.PrivilegedExceptionAction;
 import java.security.PrivilegedExceptionAction;
 
 
-import junit.framework.Assert;
-
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
+import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -38,12 +38,16 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
+import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;
+import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
+import org.apache.log4j.Level;
 import org.junit.After;
 import org.junit.After;
+import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Before;
 import org.junit.Test;
 import org.junit.Test;
 
 
@@ -56,12 +60,13 @@ public class TestDelegationToken {
   @Before
   @Before
   public void setUp() throws Exception {
   public void setUp() throws Exception {
     config = new HdfsConfiguration();
     config = new HdfsConfiguration();
+    config.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
     config.setLong(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_KEY, 10000);
     config.setLong(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_KEY, 10000);
     config.setLong(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_KEY, 5000);
     config.setLong(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_KEY, 5000);
     config.set("hadoop.security.auth_to_local",
     config.set("hadoop.security.auth_to_local",
         "RULE:[2:$1@$0](JobTracker@.*FOO.COM)s/@.*//" + "DEFAULT");
         "RULE:[2:$1@$0](JobTracker@.*FOO.COM)s/@.*//" + "DEFAULT");
     FileSystem.setDefaultUri(config, "hdfs://localhost:" + "0");
     FileSystem.setDefaultUri(config, "hdfs://localhost:" + "0");
-    cluster = new MiniDFSCluster.Builder(config).build();
+    cluster = new MiniDFSCluster.Builder(config).numDataNodes(0).build();
     cluster.waitActive();
     cluster.waitActive();
     dtSecretManager = NameNodeAdapter.getDtSecretManager(
     dtSecretManager = NameNodeAdapter.getDtSecretManager(
         cluster.getNamesystem());
         cluster.getNamesystem());
@@ -153,6 +158,31 @@ public class TestDelegationToken {
     dtSecretManager.renewToken(token, "JobTracker");
     dtSecretManager.renewToken(token, "JobTracker");
   }
   }
   
   
+  @Test
+  public void testDelegationTokenWebHdfsApi() throws Exception {
+    ((Log4JLogger)NamenodeWebHdfsMethods.LOG).getLogger().setLevel(Level.ALL);
+    final String uri = WebHdfsFileSystem.SCHEME  + "://"
+        + config.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
+    //get file system as JobTracker
+    final UserGroupInformation ugi = UserGroupInformation.createUserForTesting(
+        "JobTracker", new String[]{"user"});
+    final WebHdfsFileSystem webhdfs = ugi.doAs(
+        new PrivilegedExceptionAction<WebHdfsFileSystem>() {
+      @Override
+      public WebHdfsFileSystem run() throws Exception {
+        return (WebHdfsFileSystem)FileSystem.get(new URI(uri), config);
+      }
+    });
+
+    final Token<DelegationTokenIdentifier> token = webhdfs.getDelegationToken("JobTracker");
+    DelegationTokenIdentifier identifier = new DelegationTokenIdentifier();
+    byte[] tokenId = token.getIdentifier();
+    identifier.readFields(new DataInputStream(new ByteArrayInputStream(tokenId)));
+    LOG.info("A valid token should have non-null password, and should be renewed successfully");
+    Assert.assertTrue(null != dtSecretManager.retrievePassword(identifier));
+    dtSecretManager.renewToken(token, "JobTracker");
+  }
+
   @Test
   @Test
   public void testDelegationTokenWithDoAs() throws Exception {
   public void testDelegationTokenWithDoAs() throws Exception {
     final DistributedFileSystem dfs = (DistributedFileSystem) cluster.getFileSystem();
     final DistributedFileSystem dfs = (DistributedFileSystem) cluster.getFileSystem();

+ 16 - 10
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHost2NodesMap.java

@@ -18,31 +18,34 @@
 
 
 package org.apache.hadoop.hdfs.server.blockmanagement;
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
 
-import junit.framework.TestCase;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
 
 
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
-import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
-import org.apache.hadoop.hdfs.server.blockmanagement.Host2NodesMap;
+import org.junit.Before;
+import org.junit.Test;
 
 
-public class TestHost2NodesMap extends TestCase {
-  static private Host2NodesMap map = new Host2NodesMap();
-  private final static DatanodeDescriptor dataNodes[] = new DatanodeDescriptor[] {
+public class TestHost2NodesMap {
+  private Host2NodesMap map = new Host2NodesMap();
+  private final DatanodeDescriptor dataNodes[] = new DatanodeDescriptor[] {
     new DatanodeDescriptor(new DatanodeID("h1:5020"), "/d1/r1"),
     new DatanodeDescriptor(new DatanodeID("h1:5020"), "/d1/r1"),
     new DatanodeDescriptor(new DatanodeID("h2:5020"), "/d1/r1"),
     new DatanodeDescriptor(new DatanodeID("h2:5020"), "/d1/r1"),
     new DatanodeDescriptor(new DatanodeID("h3:5020"), "/d1/r2"),
     new DatanodeDescriptor(new DatanodeID("h3:5020"), "/d1/r2"),
     new DatanodeDescriptor(new DatanodeID("h3:5030"), "/d1/r2"),
     new DatanodeDescriptor(new DatanodeID("h3:5030"), "/d1/r2"),
   };
   };
-  private final static DatanodeDescriptor NULL_NODE = null; 
-  private final static DatanodeDescriptor NODE = 
-    new DatanodeDescriptor(new DatanodeID("h3:5040"), "/d1/r4");
+  private final DatanodeDescriptor NULL_NODE = null; 
+  private final DatanodeDescriptor NODE = new DatanodeDescriptor(new DatanodeID("h3:5040"),
+      "/d1/r4");
 
 
-  static {
+  @Before
+  public void setup() {
     for(DatanodeDescriptor node:dataNodes) {
     for(DatanodeDescriptor node:dataNodes) {
       map.add(node);
       map.add(node);
     }
     }
     map.add(NULL_NODE);
     map.add(NULL_NODE);
   }
   }
   
   
+  @Test
   public void testContains() throws Exception {
   public void testContains() throws Exception {
     for(int i=0; i<dataNodes.length; i++) {
     for(int i=0; i<dataNodes.length; i++) {
       assertTrue(map.contains(dataNodes[i]));
       assertTrue(map.contains(dataNodes[i]));
@@ -51,6 +54,7 @@ public class TestHost2NodesMap extends TestCase {
     assertFalse(map.contains(NODE));
     assertFalse(map.contains(NODE));
   }
   }
 
 
+  @Test
   public void testGetDatanodeByHost() throws Exception {
   public void testGetDatanodeByHost() throws Exception {
     assertTrue(map.getDatanodeByHost("h1")==dataNodes[0]);
     assertTrue(map.getDatanodeByHost("h1")==dataNodes[0]);
     assertTrue(map.getDatanodeByHost("h2")==dataNodes[1]);
     assertTrue(map.getDatanodeByHost("h2")==dataNodes[1]);
@@ -59,6 +63,7 @@ public class TestHost2NodesMap extends TestCase {
     assertTrue(null==map.getDatanodeByHost("h4"));
     assertTrue(null==map.getDatanodeByHost("h4"));
   }
   }
 
 
+  @Test
   public void testGetDatanodeByName() throws Exception {
   public void testGetDatanodeByName() throws Exception {
     assertTrue(map.getDatanodeByName("h1:5020")==dataNodes[0]);
     assertTrue(map.getDatanodeByName("h1:5020")==dataNodes[0]);
     assertTrue(map.getDatanodeByName("h1:5030")==null);
     assertTrue(map.getDatanodeByName("h1:5030")==null);
@@ -71,6 +76,7 @@ public class TestHost2NodesMap extends TestCase {
     assertTrue(map.getDatanodeByName(null)==null);
     assertTrue(map.getDatanodeByName(null)==null);
   }
   }
 
 
+  @Test
   public void testRemove() throws Exception {
   public void testRemove() throws Exception {
     assertFalse(map.remove(NODE));
     assertFalse(map.remove(NODE));
     
     

+ 3 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestMulitipleNNDataBlockScanner.java

@@ -96,7 +96,8 @@ public class TestMulitipleNNDataBlockScanner {
 
 
       String bpidToShutdown = cluster.getNamesystem(2).getBlockPoolId();
       String bpidToShutdown = cluster.getNamesystem(2).getBlockPoolId();
       for (int i = 0; i < 2; i++) {
       for (int i = 0; i < 2; i++) {
-        String nsId = DFSUtil.getNameServiceId(cluster.getConfiguration(i));
+        String nsId = DFSUtil.getNamenodeNameServiceId(cluster
+            .getConfiguration(i));
         namenodesBuilder.append(nsId);
         namenodesBuilder.append(nsId);
         namenodesBuilder.append(",");
         namenodesBuilder.append(",");
       }
       }
@@ -116,7 +117,7 @@ public class TestMulitipleNNDataBlockScanner {
         LOG.info(ex.getMessage());
         LOG.info(ex.getMessage());
       }
       }
 
 
-      namenodesBuilder.append(DFSUtil.getNameServiceId(cluster
+      namenodesBuilder.append(DFSUtil.getNamenodeNameServiceId(cluster
           .getConfiguration(2)));
           .getConfiguration(2)));
       conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, namenodesBuilder
       conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, namenodesBuilder
           .toString());
           .toString());

+ 10 - 7
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestReplicasMap.java

@@ -17,21 +17,24 @@
  */
  */
 package org.apache.hadoop.hdfs.server.datanode;
 package org.apache.hadoop.hdfs.server.datanode;
 
 
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.fail;
+
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.Block;
-import static org.junit.Assert.*;
-import org.junit.BeforeClass;
+import org.junit.Before;
 import org.junit.Test;
 import org.junit.Test;
 
 
 /**
 /**
  * Unit test for ReplicasMap class
  * Unit test for ReplicasMap class
  */
  */
 public class TestReplicasMap {
 public class TestReplicasMap {
-  private static final ReplicasMap map = new ReplicasMap(TestReplicasMap.class);
-  private static final String bpid = "BP-TEST";
-  private static final  Block block = new Block(1234, 1234, 1234);
+  private final ReplicasMap map = new ReplicasMap(TestReplicasMap.class);
+  private final String bpid = "BP-TEST";
+  private final  Block block = new Block(1234, 1234, 1234);
   
   
-  @BeforeClass
-  public static void setup() {
+  @Before
+  public void setup() {
     map.add(bpid, new FinalizedReplica(block, null, null));
     map.add(bpid, new FinalizedReplica(block, null, null));
   }
   }
   
   

+ 7 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java

@@ -412,4 +412,11 @@ public abstract class FSImageTestUtil {
   public static FSImage getFSImage(NameNode node) {
   public static FSImage getFSImage(NameNode node) {
     return node.getFSImage();
     return node.getFSImage();
   }
   }
+
+  /**
+   * get NameSpace quota.
+   */
+  public static long getNSQuota(FSNamesystem ns) {
+    return ns.dir.rootDir.getNsQuota();
+  }
 }
 }

+ 290 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProcessCorruptBlocks.java

@@ -0,0 +1,290 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.io.File;
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.server.blockmanagement.NumberReplicas;
+import org.junit.Test;
+
+public class TestProcessCorruptBlocks {
+  /**
+   * The corrupt block has to be removed when the number of valid replicas
+   * matches replication factor for the file. In this the above condition is
+   * tested by reducing the replication factor 
+   * The test strategy : 
+   *   Bring up Cluster with 3 DataNodes
+   *   Create a file of replication factor 3 
+   *   Corrupt one replica of a block of the file 
+   *   Verify that there are still 2 good replicas and 1 corrupt replica
+   *    (corrupt replica should not be removed since number of good
+   *     replicas (2) is less than replication factor (3))
+   *   Set the replication factor to 2 
+   *   Verify that the corrupt replica is removed. 
+   *     (corrupt replica  should not be removed since number of good
+   *      replicas (2) is equal to replication factor (2))
+   */
+  @Test
+  public void testWhenDecreasingReplication() throws IOException {
+    Configuration conf = new HdfsConfiguration();
+    conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
+    conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, Integer.toString(2));
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
+    FileSystem fs = cluster.getFileSystem();
+    final FSNamesystem namesystem = cluster.getNamesystem();
+
+    try {
+      final Path fileName = new Path("/foo1");
+      DFSTestUtil.createFile(fs, fileName, 2, (short) 3, 0L);
+      DFSTestUtil.waitReplication(fs, fileName, (short) 3);
+
+      ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, fileName);
+      corruptBlock(cluster, fs, fileName, 0, block);
+
+      DFSTestUtil.waitReplication(fs, fileName, (short) 2);
+
+      assertEquals(2, countReplicas(namesystem, block).liveReplicas());
+      assertEquals(1, countReplicas(namesystem, block).corruptReplicas());
+
+      namesystem.setReplication(fileName.toString(), (short) 2);
+
+      // wait for 3 seconds so that all block reports are processed.
+      try {
+        Thread.sleep(3000);
+      } catch (InterruptedException ignored) {
+      }
+
+      assertEquals(2, countReplicas(namesystem, block).liveReplicas());
+      assertEquals(0, countReplicas(namesystem, block).corruptReplicas());
+
+    } finally {
+      cluster.shutdown();
+    }
+  }
+
+  /**
+   * The corrupt block has to be removed when the number of valid replicas
+   * matches replication factor for the file. In this test, the above 
+   * condition is achieved by increasing the number of good replicas by 
+   * replicating on a new Datanode. 
+   * The test strategy : 
+   *   Bring up Cluster with 3 DataNodes
+   *   Create a file  of replication factor 3
+   *   Corrupt one replica of a block of the file 
+   *   Verify that there are still 2 good replicas and 1 corrupt replica 
+   *     (corrupt replica should not be removed since number of good replicas
+   *      (2) is less  than replication factor (3)) 
+   *   Start a new data node 
+   *   Verify that the a new replica is created and corrupt replica is
+   *   removed.
+   * 
+   */
+  @Test
+  public void testByAddingAnExtraDataNode() throws IOException {
+    Configuration conf = new HdfsConfiguration();
+    conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
+    conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, Integer.toString(2));
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
+    FileSystem fs = cluster.getFileSystem();
+    final FSNamesystem namesystem = cluster.getNamesystem();
+    DataNodeProperties dnPropsFourth = cluster.stopDataNode(3);
+
+    try {
+      final Path fileName = new Path("/foo1");
+      DFSTestUtil.createFile(fs, fileName, 2, (short) 3, 0L);
+      DFSTestUtil.waitReplication(fs, fileName, (short) 3);
+
+      ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, fileName);
+      corruptBlock(cluster, fs, fileName, 0, block);
+
+      DFSTestUtil.waitReplication(fs, fileName, (short) 2);
+
+      assertEquals(2, countReplicas(namesystem, block).liveReplicas());
+      assertEquals(1, countReplicas(namesystem, block).corruptReplicas());
+
+      cluster.restartDataNode(dnPropsFourth);
+
+      DFSTestUtil.waitReplication(fs, fileName, (short) 3);
+
+      assertEquals(3, countReplicas(namesystem, block).liveReplicas());
+      assertEquals(0, countReplicas(namesystem, block).corruptReplicas());
+    } finally {
+      cluster.shutdown();
+    }
+  }
+
+  /**
+   * The corrupt block has to be removed when the number of valid replicas
+   * matches replication factor for the file. The above condition should hold
+   * true as long as there is one good replica. This test verifies that.
+   * 
+   * The test strategy : 
+   *   Bring up Cluster with 2 DataNodes
+   *   Create a file of replication factor 2 
+   *   Corrupt one replica of a block of the file 
+   *   Verify that there is  one good replicas and 1 corrupt replica 
+   *     (corrupt replica should not be removed since number of good 
+   *     replicas (1) is less than replication factor (2)).
+   *   Set the replication factor to 1 
+   *   Verify that the corrupt replica is removed. 
+   *     (corrupt replica should  be removed since number of good
+   *      replicas (1) is equal to replication factor (1))
+   */
+  @Test
+  public void testWithReplicationFactorAsOne() throws IOException {
+    Configuration conf = new HdfsConfiguration();
+    conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
+    conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, Integer.toString(2));
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
+    FileSystem fs = cluster.getFileSystem();
+    final FSNamesystem namesystem = cluster.getNamesystem();
+
+    try {
+      final Path fileName = new Path("/foo1");
+      DFSTestUtil.createFile(fs, fileName, 2, (short) 2, 0L);
+      DFSTestUtil.waitReplication(fs, fileName, (short) 2);
+
+      ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, fileName);
+      corruptBlock(cluster, fs, fileName, 0, block);
+
+      DFSTestUtil.waitReplication(fs, fileName, (short) 1);
+
+      assertEquals(1, countReplicas(namesystem, block).liveReplicas());
+      assertEquals(1, countReplicas(namesystem, block).corruptReplicas());
+
+      namesystem.setReplication(fileName.toString(), (short) 1);
+
+      // wait for 3 seconds so that all block reports are processed.
+      try {
+        Thread.sleep(3000);
+      } catch (InterruptedException ignored) {
+      }
+
+      assertEquals(1, countReplicas(namesystem, block).liveReplicas());
+      assertEquals(0, countReplicas(namesystem, block).corruptReplicas());
+
+    } finally {
+      cluster.shutdown();
+    }
+  }
+
+  /**
+   * None of the blocks can be removed if all blocks are corrupt.
+   * 
+   * The test strategy : 
+   *    Bring up Cluster with 3 DataNodes
+   *    Create a file of replication factor 3 
+   *    Corrupt all three replicas 
+   *    Verify that all replicas are corrupt and 3 replicas are present.
+   *    Set the replication factor to 1 
+   *    Verify that all replicas are corrupt and 3 replicas are present.
+   */
+  @Test
+  public void testWithAllCorruptReplicas() throws IOException {
+    Configuration conf = new HdfsConfiguration();
+    conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
+    conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, Integer.toString(2));
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
+    FileSystem fs = cluster.getFileSystem();
+    final FSNamesystem namesystem = cluster.getNamesystem();
+
+    try {
+      final Path fileName = new Path("/foo1");
+      DFSTestUtil.createFile(fs, fileName, 2, (short) 3, 0L);
+      DFSTestUtil.waitReplication(fs, fileName, (short) 3);
+
+      ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, fileName);
+      corruptBlock(cluster, fs, fileName, 0, block);
+
+      corruptBlock(cluster, fs, fileName, 1, block);
+
+      corruptBlock(cluster, fs, fileName, 2, block);
+
+      // wait for 3 seconds so that all block reports are processed.
+      try {
+        Thread.sleep(3000);
+      } catch (InterruptedException ignored) {
+      }
+
+      assertEquals(0, countReplicas(namesystem, block).liveReplicas());
+      assertEquals(3, countReplicas(namesystem, block).corruptReplicas());
+
+      namesystem.setReplication(fileName.toString(), (short) 1);
+
+      // wait for 3 seconds so that all block reports are processed.
+      try {
+        Thread.sleep(3000);
+      } catch (InterruptedException ignored) {
+      }
+
+      assertEquals(0, countReplicas(namesystem, block).liveReplicas());
+      assertEquals(3, countReplicas(namesystem, block).corruptReplicas());
+
+    } finally {
+      cluster.shutdown();
+    }
+  }
+
+  private static NumberReplicas countReplicas(final FSNamesystem namesystem, ExtendedBlock block) {
+    return namesystem.getBlockManager().countNodes(block.getLocalBlock());
+  }
+
+  private void corruptBlock(MiniDFSCluster cluster, FileSystem fs, final Path fileName,
+      int dnIndex, ExtendedBlock block) throws IOException {
+    // corrupt the block on datanode dnIndex
+    // the indexes change once the nodes are restarted.
+    // But the datadirectory will not change
+    assertTrue(MiniDFSCluster.corruptReplica(dnIndex, block));
+
+    DataNodeProperties dnProps = cluster.stopDataNode(0);
+
+    // Each datanode has multiple data dirs, check each
+    for (int dirIndex = 0; dirIndex < 2; dirIndex++) {
+      final String bpid = cluster.getNamesystem().getBlockPoolId();
+      File storageDir = MiniDFSCluster.getStorageDir(dnIndex, dirIndex);
+      File dataDir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
+      File scanLogFile = new File(dataDir, "dncp_block_verification.log.curr");
+      if (scanLogFile.exists()) {
+        // wait for one minute for deletion to succeed;
+        for (int i = 0; !scanLogFile.delete(); i++) {
+          assertTrue("Could not delete log file in one minute", i < 60);
+          try {
+            Thread.sleep(1000);
+          } catch (InterruptedException ignored) {
+          }
+        }
+      }
+    }
+
+    // restart the detained so the corrupt replica will be detected
+    cluster.restartDataNode(dnProps);
+  }
+}

+ 44 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java

@@ -18,17 +18,23 @@
 
 
 package org.apache.hadoop.hdfs.web;
 package org.apache.hadoop.hdfs.web;
 
 
+import java.io.BufferedReader;
 import java.io.IOException;
 import java.io.IOException;
+import java.io.InputStreamReader;
+import java.net.HttpURLConnection;
 import java.net.URI;
 import java.net.URI;
+import java.net.URL;
 import java.security.PrivilegedExceptionAction;
 import java.security.PrivilegedExceptionAction;
 
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystemContractBaseTest;
 import org.apache.hadoop.fs.FileSystemContractBaseTest;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.web.resources.PutOpParam;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 
 
@@ -114,4 +120,42 @@ public class TestWebHdfsFileSystemContract extends FileSystemContractBaseTest {
       // also okay for HDFS.
       // also okay for HDFS.
     }    
     }    
   }
   }
+  
+  public void testGetFileBlockLocations() throws IOException {
+    final String f = "/test/testGetFileBlockLocations";
+    createFile(path(f));
+    final BlockLocation[] computed = fs.getFileBlockLocations(new Path(f), 0L, 1L);
+    final BlockLocation[] expected = cluster.getFileSystem().getFileBlockLocations(
+        new Path(f), 0L, 1L);
+    assertEquals(expected.length, computed.length);
+    for(int i = 0; i < computed.length; i++) {
+      assertEquals(expected[i].toString(), computed[i].toString());
+    }
+  }
+
+  public void testCaseInsensitive() throws IOException {
+    final Path p = new Path("/test/testCaseInsensitive");
+    final WebHdfsFileSystem webhdfs = (WebHdfsFileSystem)fs;
+    final PutOpParam.Op op = PutOpParam.Op.MKDIRS;
+
+    //replace query with mix case letters
+    final URL url = webhdfs.toUrl(op, p);
+    WebHdfsFileSystem.LOG.info("url      = " + url);
+    final URL replaced = new URL(url.toString().replace(op.toQueryString(),
+        "Op=mkDIrs"));
+    WebHdfsFileSystem.LOG.info("replaced = " + replaced);
+
+    //connect with the replaced URL.
+    final HttpURLConnection conn = (HttpURLConnection)replaced.openConnection();
+    conn.setRequestMethod(op.getType().toString());
+    conn.connect();
+    final BufferedReader in = new BufferedReader(new InputStreamReader(
+        conn.getInputStream()));
+    for(String line; (line = in.readLine()) != null; ) {
+      WebHdfsFileSystem.LOG.info("> " + line);
+    }
+
+    //check if the command successes.
+    assertTrue(fs.getFileStatus(p).isDirectory());
+  }
 }
 }

+ 102 - 0
hadoop-mapreduce-project/CHANGES.txt

@@ -29,6 +29,8 @@ Trunk (unreleased changes)
     findBugs, correct links to findBugs artifacts and no links to the
     findBugs, correct links to findBugs artifacts and no links to the
     artifacts when there are no warnings. (Tom White via vinodkv).
     artifacts when there are no warnings. (Tom White via vinodkv).
 
 
+    MAPREDUCE-3081. Fix vaidya startup script. (gkesavan via suhas).
+
 Release 0.23.0 - Unreleased
 Release 0.23.0 - Unreleased
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES
@@ -70,6 +72,9 @@ Release 0.23.0 - Unreleased
     MAPREDUCE-2037. Capture intermediate progress, CPU and memory usage for
     MAPREDUCE-2037. Capture intermediate progress, CPU and memory usage for
     tasks. (Dick King via acmurthy) 
     tasks. (Dick King via acmurthy) 
 
 
+    MAPREDUCE-2930. Added the ability to be able to generate graphs from the
+    state-machine definitions. (Binglin Chang via vinodkv)
+
   IMPROVEMENTS
   IMPROVEMENTS
 
 
     MAPREDUCE-2187. Reporter sends progress during sort/merge. (Anupam Seth via
     MAPREDUCE-2187. Reporter sends progress during sort/merge. (Anupam Seth via
@@ -307,6 +312,15 @@ Release 0.23.0 - Unreleased
     MAPREDUCE-2726. Added job-file to the AM and JobHistoryServer web
     MAPREDUCE-2726. Added job-file to the AM and JobHistoryServer web
     interfaces. (Jeffrey Naisbitt via vinodkv)
     interfaces. (Jeffrey Naisbitt via vinodkv)
 
 
+    MAPREDUCE-3055. Simplified ApplicationAttemptId passing to
+    ApplicationMaster via environment variable. (vinodkv)
+
+    MAPREDUCE-3092. Removed a special comparator for JobIDs in JobHistory as
+    JobIDs are already comparable. (Devaraj K via vinodkv)
+
+    MAPREDUCE-3099. Add docs for setting up a single node MRv2 cluster.
+    (mahadev)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
     MAPREDUCE-2026. Make JobTracker.getJobCounters() and
     MAPREDUCE-2026. Make JobTracker.getJobCounters() and
@@ -318,6 +332,9 @@ Release 0.23.0 - Unreleased
 
 
     MAPREDUCE-901. Efficient framework counters. (llu via acmurthy)
     MAPREDUCE-901. Efficient framework counters. (llu via acmurthy)
 
 
+    MAPREDUCE-2880. Improve classpath-construction for mapreduce AM and
+    containers. (Arun C Murthy via vinodkv)
+
   BUG FIXES
   BUG FIXES
 
 
     MAPREDUCE-2603. Disable High-Ram emulation in system tests. 
     MAPREDUCE-2603. Disable High-Ram emulation in system tests. 
@@ -1370,6 +1387,91 @@ Release 0.23.0 - Unreleased
     YarnClientProtocolProvider and ensured MiniMRYarnCluster sets JobHistory
     YarnClientProtocolProvider and ensured MiniMRYarnCluster sets JobHistory
     configuration for tests. (acmurthy) 
     configuration for tests. (acmurthy) 
 
 
+    MAPREDUCE-3018. Fixed -file option for streaming. (mahadev via acmurthy) 
+
+    MAPREDUCE-3036. Fixed metrics for reserved resources in CS. (Robert Evans
+    via acmurthy)
+
+    MAPREDUCE-2998. Fixed a bug in TaskAttemptImpl which caused it to fork
+    bin/mapred too many times. (vinodkv via acmurthy)
+
+    MAPREDUCE-3023. Fixed clients to display queue state correctly. (Ravi
+    Prakash via acmurthy) 
+
+    MAPREDUCE-2970. Fixed NPEs in corner cases with different configurations
+    for mapreduce.framework.name. (Venu Gopala Rao via vinodkv)
+
+    MAPREDUCE-3062. Fixed default RMAdmin address. (Chris Riccomini
+    via acmurthy) 
+
+    MAPREDUCE-3066. Fixed default ResourceTracker address for the NodeManager. 
+    (Chris Riccomini via acmurthy) 
+
+    MAPREDUCE-3044. Pipes jobs stuck without making progress. (mahadev)
+
+    MAPREDUCE-2754. Fixed MR AM stdout, stderr and syslog to redirect to
+    correct log-files. (Ravi Teja Ch N V via vinodkv)
+
+    MAPREDUCE-3073. Fixed build issues in MR1. (mahadev via acmurthy)
+
+    MAPREDUCE-2691. Increase threadpool size for launching containers in
+    MapReduce ApplicationMaster. (vinodkv via acmurthy)
+
+
+    MAPREDUCE-2990. Fixed display of NodeHealthStatus. (Subroto Sanyal via
+    acmurthy) 
+
+    MAPREDUCE-3053. Better diagnostic message for unknown methods in ProtoBuf
+    RPCs. (vinodkv via acmurthy)
+
+    MAPREDUCE-2952. Fixed ResourceManager/MR-client to consume diagnostics
+    for AM failures in a couple of corner cases. (Arun C Murthy via vinodkv)
+
+    MAPREDUCE-3064. 27 unit test failures with Invalid 
+    "mapreduce.jobtracker.address" configuration value for 
+    JobTracker: "local" (Venu Gopala Rao via mahadev)
+
+    MAPREDUCE-3090. Fix MR AM to use ApplicationAttemptId rather than
+    (ApplicationId, startCount) consistently. (acmurthy)  
+
+    MAPREDUCE-2646. Fixed AMRMProtocol to return containers based on
+    priority. (Sharad Agarwal and Arun C Murthy via vinodkv)
+
+    MAPREDUCE-3031. Proper handling of killed containers to prevent stuck
+    containers/AMs on an external kill signal. (Siddharth Seth via vinodkv)
+
+    MAPREDUCE-2984. Better error message for displaying completed containers.
+    (Devaraj K via acmurthy)
+
+    MAPREDUCE-3071. app master configuration web UI link under the Job menu 
+    opens up application menu. (thomas graves  via mahadev)
+
+    MAPREDUCE-3067. Ensure exit-code is set correctly for containers. (Hitesh
+    Shah via acmurthy)
+
+    MAPREDUCE-2999. Fix YARN webapp framework to properly filter servlet
+    paths. (Thomas Graves via vinodkv)
+
+    MAPREDUCE-3095. fairscheduler ivy including wrong version for hdfs.
+    (John George via mahadev)
+
+    MAPREDUCE-3054. Unable to kill submitted jobs. (mahadev)
+
+    MAPREDUCE-3021. Change base urls for RM web-ui. (Thomas Graves via
+    acmurthy) 
+
+    MAPREDUCE-3041. Fixed ClientRMProtocol to provide min/max resource
+    capabilities along-with new ApplicationId for application submission.
+    (Hitesh Shah via acmurthy)
+
+    MAPREDUCE-2843. Fixed the node-table to be completely displayed and making
+    node entries on RM UI to be sortable. (Abhijit Suresh Shingate via vinodkv)
+
+    MAPREDUCE-3110. Fixed TestRPC failure. (vinodkv)
+
+    MAPREDUCE-3078. Ensure MapReduce AM reports progress correctly for
+    displaying on the RM Web-UI. (vinodkv via acmurthy)
+
 Release 0.22.0 - Unreleased
 Release 0.22.0 - Unreleased
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES

+ 43 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml

@@ -55,6 +55,12 @@
       <artifactId>hadoop-yarn-server-resourcemanager</artifactId>
       <artifactId>hadoop-yarn-server-resourcemanager</artifactId>
       <scope>test</scope>
       <scope>test</scope>
     </dependency>
     </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-yarn-server-resourcemanager</artifactId>
+      <type>test-jar</type>
+      <scope>test</scope>
+    </dependency>
     <dependency>
     <dependency>
       <groupId>org.apache.hadoop</groupId>
       <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-mapreduce-client-shuffle</artifactId>
       <artifactId>hadoop-mapreduce-client-shuffle</artifactId>
@@ -113,4 +119,41 @@
       </plugin>
       </plugin>
     </plugins>
     </plugins>
   </build>
   </build>
+
+  <profiles>
+    <profile>
+      <id>visualize</id>
+      <activation>
+        <activeByDefault>false</activeByDefault>
+      </activation>
+      <build>
+        <plugins>
+          <plugin>
+            <groupId>org.codehaus.mojo</groupId>
+            <artifactId>exec-maven-plugin</artifactId>
+            <version>1.2</version>
+            <executions>
+              <execution>
+                <phase>compile</phase>
+                <goals>
+                  <goal>java</goal>
+                </goals>
+                <configuration>
+                  <classpathScope>test</classpathScope>
+                  <mainClass>org.apache.hadoop.yarn.util.VisualizeStateMachine</mainClass>
+                  <arguments>
+                    <argument>MapReduce</argument>
+                    <argument>org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl,
+                      org.apache.hadoop.mapreduce.v2.app.job.impl.TaskImpl,
+                      org.apache.hadoop.mapreduce.v2.app.job.impl.TaskAttemptImpl</argument>
+                    <argument>MapReduce.gv</argument>
+                  </arguments>
+                </configuration>
+              </execution>
+            </executions>
+          </plugin>
+        </plugins>
+      </build>
+    </profile>
+  </profiles>
 </project>
 </project>

+ 107 - 111
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/MapReduceChildJVM.java

@@ -18,27 +18,27 @@
 
 
 package org.apache.hadoop.mapred;
 package org.apache.hadoop.mapred;
 
 
-import java.io.File;
 import java.net.InetSocketAddress;
 import java.net.InetSocketAddress;
 import java.util.Iterator;
 import java.util.Iterator;
 import java.util.List;
 import java.util.List;
 import java.util.Map;
 import java.util.Map;
 import java.util.Vector;
 import java.util.Vector;
 
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.mapred.TaskLog.LogName;
 import org.apache.hadoop.mapred.TaskLog.LogName;
 import org.apache.hadoop.mapreduce.ID;
 import org.apache.hadoop.mapreduce.ID;
+import org.apache.hadoop.mapreduce.MRJobConfig;
+import org.apache.hadoop.mapreduce.v2.util.MRApps;
+import org.apache.hadoop.yarn.api.ApplicationConstants;
+import org.apache.hadoop.yarn.api.ApplicationConstants.Environment;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
 
 
 public class MapReduceChildJVM {
 public class MapReduceChildJVM {
-  private static final String SYSTEM_PATH_SEPARATOR = 
-    System.getProperty("path.separator");
 
 
-  private static final Log LOG = LogFactory.getLog(MapReduceChildJVM.class);
-
-  private static File getTaskLogFile(String logDir, LogName filter) {
-    return new File(logDir, filter.toString());
+  private static String getTaskLogFile(LogName filter) {
+    return ApplicationConstants.LOG_DIR_EXPANSION_VAR + Path.SEPARATOR + 
+        filter.toString();
   }
   }
 
 
   private static String getChildEnv(JobConf jobConf, boolean isMap) {
   private static String getChildEnv(JobConf jobConf, boolean isMap) {
@@ -50,32 +50,53 @@ public class MapReduceChildJVM {
         jobConf.get(jobConf.MAPRED_TASK_ENV));
         jobConf.get(jobConf.MAPRED_TASK_ENV));
   }
   }
 
 
-  public static void setVMEnv(Map<String, String> env,
-      List<String> classPaths, String pwd, String containerLogDir,
-      String nmLdLibraryPath, Task task, CharSequence applicationTokensFile) {
-
-    JobConf conf = task.conf;
-
-    // Add classpath.
-    CharSequence cp = env.get("CLASSPATH");
-    String classpath = StringUtils.join(SYSTEM_PATH_SEPARATOR, classPaths);
-    if (null == cp) {
-      env.put("CLASSPATH", classpath);
+  private static String getChildLogLevel(JobConf conf, boolean isMap) {
+    if (isMap) {
+      return conf.get(
+          MRJobConfig.MAP_LOG_LEVEL, 
+          JobConf.DEFAULT_LOG_LEVEL.toString()
+          );
     } else {
     } else {
-      env.put("CLASSPATH", classpath + SYSTEM_PATH_SEPARATOR + cp);
+      return conf.get(
+          MRJobConfig.REDUCE_LOG_LEVEL, 
+          JobConf.DEFAULT_LOG_LEVEL.toString()
+          );
     }
     }
+  }
+  
+  public static void setVMEnv(Map<String, String> environment,
+      Task task) {
 
 
-    /////// Environmental variable LD_LIBRARY_PATH
-    StringBuilder ldLibraryPath = new StringBuilder();
+    JobConf conf = task.conf;
 
 
-    ldLibraryPath.append(nmLdLibraryPath);
-    ldLibraryPath.append(SYSTEM_PATH_SEPARATOR);
-    ldLibraryPath.append(pwd);
-    env.put("LD_LIBRARY_PATH", ldLibraryPath.toString());
-    /////// Environmental variable LD_LIBRARY_PATH
+    // Shell
+    environment.put(
+        Environment.SHELL.name(), 
+        conf.get(
+            MRJobConfig.MAPRED_ADMIN_USER_SHELL, 
+            MRJobConfig.DEFAULT_SHELL)
+            );
+    
+    // Add pwd to LD_LIBRARY_PATH, add this before adding anything else
+    MRApps.addToEnvironment(
+        environment, 
+        Environment.LD_LIBRARY_PATH.name(), 
+        Environment.PWD.$());
+
+    // Add the env variables passed by the user & admin
+    String mapredChildEnv = getChildEnv(conf, task.isMapTask());
+    MRApps.setEnvFromInputString(environment, mapredChildEnv);
+    MRApps.setEnvFromInputString(
+        environment, 
+        conf.get(
+            MRJobConfig.MAPRED_ADMIN_USER_ENV, 
+            MRJobConfig.DEFAULT_MAPRED_ADMIN_USER_ENV)
+        );
 
 
-    // for the child of task jvm, set hadoop.root.logger
-    env.put("HADOOP_ROOT_LOGGER", "DEBUG,CLA"); // TODO: Debug
+    // Set logging level
+    environment.put(
+        "HADOOP_ROOT_LOGGER", 
+        getChildLogLevel(conf, task.isMapTask()) + ",CLA"); 
 
 
     // TODO: The following is useful for instance in streaming tasks. Should be
     // TODO: The following is useful for instance in streaming tasks. Should be
     // set in ApplicationMaster's env by the RM.
     // set in ApplicationMaster's env by the RM.
@@ -89,76 +110,69 @@ public class MapReduceChildJVM {
     // properties.
     // properties.
     long logSize = TaskLog.getTaskLogLength(conf);
     long logSize = TaskLog.getTaskLogLength(conf);
     Vector<String> logProps = new Vector<String>(4);
     Vector<String> logProps = new Vector<String>(4);
-    setupLog4jProperties(logProps, logSize, containerLogDir);
+    setupLog4jProperties(logProps, logSize);
     Iterator<String> it = logProps.iterator();
     Iterator<String> it = logProps.iterator();
     StringBuffer buffer = new StringBuffer();
     StringBuffer buffer = new StringBuffer();
     while (it.hasNext()) {
     while (it.hasNext()) {
       buffer.append(" " + it.next());
       buffer.append(" " + it.next());
     }
     }
     hadoopClientOpts = hadoopClientOpts + buffer.toString();
     hadoopClientOpts = hadoopClientOpts + buffer.toString();
-    
-    env.put("HADOOP_CLIENT_OPTS", hadoopClientOpts);
+    environment.put("HADOOP_CLIENT_OPTS", hadoopClientOpts);
 
 
-    // add the env variables passed by the user
-    String mapredChildEnv = getChildEnv(conf, task.isMapTask());
-    if (mapredChildEnv != null && mapredChildEnv.length() > 0) {
-      String childEnvs[] = mapredChildEnv.split(",");
-      for (String cEnv : childEnvs) {
-        String[] parts = cEnv.split("="); // split on '='
-        String value = (String) env.get(parts[0]);
-        if (value != null) {
-          // replace $env with the child's env constructed by tt's
-          // example LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/tmp
-          value = parts[1].replace("$" + parts[0], value);
-        } else {
-          // this key is not configured by the tt for the child .. get it 
-          // from the tt's env
-          // example PATH=$PATH:/tmp
-          value = System.getenv(parts[0]); // Get from NM?
-          if (value != null) {
-            // the env key is present in the tt's env
-            value = parts[1].replace("$" + parts[0], value);
-          } else {
-            // the env key is note present anywhere .. simply set it
-            // example X=$X:/tmp or X=/tmp
-            value = parts[1].replace("$" + parts[0], "");
-          }
-        }
-        env.put(parts[0], value);
-      }
-    }
-
-    //This should not be set here (If an OS check is requied. moved to ContainerLuanch)
-    // env.put("JVM_PID", "`echo $$`");
-
-    env.put(Constants.STDOUT_LOGFILE_ENV,
-        getTaskLogFile(containerLogDir, TaskLog.LogName.STDOUT).toString());
-    env.put(Constants.STDERR_LOGFILE_ENV,
-        getTaskLogFile(containerLogDir, TaskLog.LogName.STDERR).toString());
+    // Add stdout/stderr env
+    environment.put(
+        MRJobConfig.STDOUT_LOGFILE_ENV,
+        getTaskLogFile(TaskLog.LogName.STDOUT)
+        );
+    environment.put(
+        MRJobConfig.STDERR_LOGFILE_ENV,
+        getTaskLogFile(TaskLog.LogName.STDERR)
+        );
   }
   }
 
 
   private static String getChildJavaOpts(JobConf jobConf, boolean isMapTask) {
   private static String getChildJavaOpts(JobConf jobConf, boolean isMapTask) {
+    String userClasspath = "";
+    String adminClasspath = "";
     if (isMapTask) {
     if (isMapTask) {
-      return jobConf.get(JobConf.MAPRED_MAP_TASK_JAVA_OPTS, jobConf.get(
-          JobConf.MAPRED_TASK_JAVA_OPTS,
-          JobConf.DEFAULT_MAPRED_TASK_JAVA_OPTS));
+      userClasspath = 
+          jobConf.get(
+              JobConf.MAPRED_MAP_TASK_JAVA_OPTS, 
+              jobConf.get(
+                  JobConf.MAPRED_TASK_JAVA_OPTS, 
+                  JobConf.DEFAULT_MAPRED_TASK_JAVA_OPTS)
+          );
+      adminClasspath = 
+          jobConf.get(
+              MRJobConfig.MAPRED_MAP_ADMIN_JAVA_OPTS,
+              MRJobConfig.DEFAULT_MAPRED_ADMIN_JAVA_OPTS);
+    } else {
+      userClasspath =
+          jobConf.get(
+              JobConf.MAPRED_REDUCE_TASK_JAVA_OPTS, 
+              jobConf.get(
+                  JobConf.MAPRED_TASK_JAVA_OPTS,
+                  JobConf.DEFAULT_MAPRED_TASK_JAVA_OPTS)
+              );
+      adminClasspath =
+          jobConf.get(
+              MRJobConfig.MAPRED_REDUCE_ADMIN_JAVA_OPTS,
+              MRJobConfig.DEFAULT_MAPRED_ADMIN_JAVA_OPTS);
     }
     }
-    return jobConf
-        .get(JobConf.MAPRED_REDUCE_TASK_JAVA_OPTS, jobConf.get(
-            JobConf.MAPRED_TASK_JAVA_OPTS,
-            JobConf.DEFAULT_MAPRED_TASK_JAVA_OPTS));
+    
+    // Add admin classpath first so it can be overridden by user.
+    return adminClasspath + " " + userClasspath;
   }
   }
 
 
   private static void setupLog4jProperties(Vector<String> vargs,
   private static void setupLog4jProperties(Vector<String> vargs,
-      long logSize, String containerLogDir) {
+      long logSize) {
     vargs.add("-Dlog4j.configuration=container-log4j.properties");
     vargs.add("-Dlog4j.configuration=container-log4j.properties");
-    vargs.add("-Dhadoop.yarn.mr.containerLogDir=" + containerLogDir);
-    vargs.add("-Dhadoop.yarn.mr.totalLogFileSize=" + logSize);
+    vargs.add("-D" + MRJobConfig.TASK_LOG_DIR + "=" + ApplicationConstants.LOG_DIR_EXPANSION_VAR);
+    vargs.add("-D" + MRJobConfig.TASK_LOG_SIZE + "=" + logSize);
   }
   }
 
 
   public static List<String> getVMCommand(
   public static List<String> getVMCommand(
-      InetSocketAddress taskAttemptListenerAddr, Task task, String javaHome,
-      String workDir, String logDir, String childTmpDir, ID jvmID) {
+      InetSocketAddress taskAttemptListenerAddr, Task task, 
+      ID jvmID) {
 
 
     TaskAttemptID attemptID = task.getTaskID();
     TaskAttemptID attemptID = task.getTaskID();
     JobConf conf = task.conf;
     JobConf conf = task.conf;
@@ -166,7 +180,7 @@ public class MapReduceChildJVM {
     Vector<String> vargs = new Vector<String>(8);
     Vector<String> vargs = new Vector<String>(8);
 
 
     vargs.add("exec");
     vargs.add("exec");
-    vargs.add(javaHome + "/bin/java");
+    vargs.add(Environment.JAVA_HOME.$() + "/bin/java");
 
 
     // Add child (task) java-vm options.
     // Add child (task) java-vm options.
     //
     //
@@ -199,44 +213,26 @@ public class MapReduceChildJVM {
     String javaOpts = getChildJavaOpts(conf, task.isMapTask());
     String javaOpts = getChildJavaOpts(conf, task.isMapTask());
     javaOpts = javaOpts.replace("@taskid@", attemptID.toString());
     javaOpts = javaOpts.replace("@taskid@", attemptID.toString());
     String [] javaOptsSplit = javaOpts.split(" ");
     String [] javaOptsSplit = javaOpts.split(" ");
-    
-    // Add java.library.path; necessary for loading native libraries.
-    //
-    // 1. We add the 'cwd' of the task to it's java.library.path to help 
-    //    users distribute native libraries via the DistributedCache.
-    // 2. The user can also specify extra paths to be added to the 
-    //    java.library.path via mapred.{map|reduce}.child.java.opts.
-    //
-    String libraryPath = workDir;
-    boolean hasUserLDPath = false;
-    for(int i=0; i<javaOptsSplit.length ;i++) { 
-      if(javaOptsSplit[i].startsWith("-Djava.library.path=")) {
-        // TODO: Does the above take care of escaped space chars
-        javaOptsSplit[i] += SYSTEM_PATH_SEPARATOR + libraryPath;
-        hasUserLDPath = true;
-        break;
-      }
-    }
-    if(!hasUserLDPath) {
-      vargs.add("-Djava.library.path=" + libraryPath);
-    }
     for (int i = 0; i < javaOptsSplit.length; i++) {
     for (int i = 0; i < javaOptsSplit.length; i++) {
       vargs.add(javaOptsSplit[i]);
       vargs.add(javaOptsSplit[i]);
     }
     }
 
 
-    if (childTmpDir != null) {
-      vargs.add("-Djava.io.tmpdir=" + childTmpDir);
-    }
+    String childTmpDir = Environment.PWD.$() + Path.SEPARATOR + "tmp";
+    vargs.add("-Djava.io.tmpdir=" + childTmpDir);
 
 
     // Setup the log4j prop
     // Setup the log4j prop
     long logSize = TaskLog.getTaskLogLength(conf);
     long logSize = TaskLog.getTaskLogLength(conf);
-    setupLog4jProperties(vargs, logSize, logDir);
+    setupLog4jProperties(vargs, logSize);
 
 
     if (conf.getProfileEnabled()) {
     if (conf.getProfileEnabled()) {
       if (conf.getProfileTaskRange(task.isMapTask()
       if (conf.getProfileTaskRange(task.isMapTask()
                                    ).isIncluded(task.getPartition())) {
                                    ).isIncluded(task.getPartition())) {
-        File prof = getTaskLogFile(logDir, TaskLog.LogName.PROFILE);
-        vargs.add(String.format(conf.getProfileParams(), prof.toString()));
+        vargs.add(
+            String.format(
+                conf.getProfileParams(), 
+                getTaskLogFile(TaskLog.LogName.PROFILE)
+                )
+            );
       }
       }
     }
     }
 
 
@@ -249,8 +245,8 @@ public class MapReduceChildJVM {
 
 
     // Finally add the jvmID
     // Finally add the jvmID
     vargs.add(String.valueOf(jvmID.getId()));
     vargs.add(String.valueOf(jvmID.getId()));
-    vargs.add("1>" + getTaskLogFile(logDir, TaskLog.LogName.STDERR));
-    vargs.add("2>" + getTaskLogFile(logDir, TaskLog.LogName.STDOUT));
+    vargs.add("1>" + getTaskLogFile(TaskLog.LogName.STDOUT));
+    vargs.add("2>" + getTaskLogFile(TaskLog.LogName.STDERR));
 
 
     // Final commmand
     // Final commmand
     StringBuilder mergedCommand = new StringBuilder();
     StringBuilder mergedCommand = new StringBuilder();

+ 3 - 4
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/YarnChild.java

@@ -47,7 +47,6 @@ import org.apache.hadoop.mapreduce.filecache.DistributedCache;
 import org.apache.hadoop.mapreduce.security.TokenCache;
 import org.apache.hadoop.mapreduce.security.TokenCache;
 import org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier;
 import org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier;
 import org.apache.hadoop.mapreduce.security.token.JobTokenSecretManager;
 import org.apache.hadoop.mapreduce.security.token.JobTokenSecretManager;
-import org.apache.hadoop.mapreduce.v2.MRConstants;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.metrics2.source.JvmMetrics;
 import org.apache.hadoop.metrics2.source.JvmMetrics;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.Credentials;
@@ -71,7 +70,7 @@ class YarnChild {
     LOG.debug("Child starting");
     LOG.debug("Child starting");
 
 
     final JobConf defaultConf = new JobConf();
     final JobConf defaultConf = new JobConf();
-    defaultConf.addResource(MRConstants.JOB_CONF_FILE);
+    defaultConf.addResource(MRJobConfig.JOB_CONF_FILE);
     UserGroupInformation.setConfiguration(defaultConf);
     UserGroupInformation.setConfiguration(defaultConf);
 
 
     String host = args[0];
     String host = args[0];
@@ -238,7 +237,7 @@ class YarnChild {
 
 
   private static JobConf configureTask(Task task, Credentials credentials,
   private static JobConf configureTask(Task task, Credentials credentials,
       Token<JobTokenIdentifier> jt) throws IOException {
       Token<JobTokenIdentifier> jt) throws IOException {
-    final JobConf job = new JobConf(MRConstants.JOB_CONF_FILE);
+    final JobConf job = new JobConf(MRJobConfig.JOB_CONF_FILE);
     job.setCredentials(credentials);
     job.setCredentials(credentials);
     // set tcp nodelay
     // set tcp nodelay
     job.setBoolean("ipc.client.tcpnodelay", true);
     job.setBoolean("ipc.client.tcpnodelay", true);
@@ -260,7 +259,7 @@ class YarnChild {
 
 
     // Overwrite the localized task jobconf which is linked to in the current
     // Overwrite the localized task jobconf which is linked to in the current
     // work-dir.
     // work-dir.
-    Path localTaskFile = new Path(Constants.JOBFILE);
+    Path localTaskFile = new Path(MRJobConfig.JOB_CONF_FILE);
     writeLocalJobFile(localTaskFile, job);
     writeLocalJobFile(localTaskFile, job);
     task.setJobFile(localTaskFile.toString());
     task.setJobFile(localTaskFile.toString());
     task.setConf(job);
     task.setConf(job);

+ 30 - 32
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java

@@ -39,7 +39,6 @@ import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent;
 import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent;
 import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEventHandler;
 import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEventHandler;
 import org.apache.hadoop.mapreduce.security.token.JobTokenSecretManager;
 import org.apache.hadoop.mapreduce.security.token.JobTokenSecretManager;
-import org.apache.hadoop.mapreduce.v2.MRConstants;
 import org.apache.hadoop.mapreduce.v2.api.records.JobId;
 import org.apache.hadoop.mapreduce.v2.api.records.JobId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
 import org.apache.hadoop.mapreduce.v2.app.client.ClientService;
 import org.apache.hadoop.mapreduce.v2.app.client.ClientService;
@@ -78,6 +77,7 @@ import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.yarn.Clock;
 import org.apache.hadoop.yarn.Clock;
 import org.apache.hadoop.yarn.SystemClock;
 import org.apache.hadoop.yarn.SystemClock;
 import org.apache.hadoop.yarn.YarnException;
 import org.apache.hadoop.yarn.YarnException;
+import org.apache.hadoop.yarn.api.ApplicationConstants;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
@@ -88,6 +88,7 @@ import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.service.AbstractService;
 import org.apache.hadoop.yarn.service.AbstractService;
 import org.apache.hadoop.yarn.service.CompositeService;
 import org.apache.hadoop.yarn.service.CompositeService;
 import org.apache.hadoop.yarn.service.Service;
 import org.apache.hadoop.yarn.service.Service;
+import org.apache.hadoop.yarn.util.ConverterUtils;
 
 
 /**
 /**
  * The Map-Reduce Application Master.
  * The Map-Reduce Application Master.
@@ -114,8 +115,6 @@ public class MRAppMaster extends CompositeService {
   private Clock clock;
   private Clock clock;
   private final long startTime = System.currentTimeMillis();
   private final long startTime = System.currentTimeMillis();
   private String appName;
   private String appName;
-  private final int startCount;
-  private final ApplicationId appID;
   private final ApplicationAttemptId appAttemptID;
   private final ApplicationAttemptId appAttemptID;
   protected final MRAppMetrics metrics;
   protected final MRAppMetrics metrics;
   private Set<TaskId> completedTasksFromPreviousRun;
   private Set<TaskId> completedTasksFromPreviousRun;
@@ -133,21 +132,16 @@ public class MRAppMaster extends CompositeService {
 
 
   private Job job;
   private Job job;
   
   
-  public MRAppMaster(ApplicationId applicationId, int startCount) {
-    this(applicationId, new SystemClock(), startCount);
+  public MRAppMaster(ApplicationAttemptId applicationAttemptId) {
+    this(applicationAttemptId, new SystemClock());
   }
   }
 
 
-  public MRAppMaster(ApplicationId applicationId, Clock clock, int startCount) {
+  public MRAppMaster(ApplicationAttemptId applicationAttemptId, Clock clock) {
     super(MRAppMaster.class.getName());
     super(MRAppMaster.class.getName());
     this.clock = clock;
     this.clock = clock;
-    this.appID = applicationId;
-    this.appAttemptID = RecordFactoryProvider.getRecordFactory(null)
-        .newRecordInstance(ApplicationAttemptId.class);
-    this.appAttemptID.setApplicationId(appID);
-    this.appAttemptID.setAttemptId(startCount);
-    this.startCount = startCount;
+    this.appAttemptID = applicationAttemptId;
     this.metrics = MRAppMetrics.create();
     this.metrics = MRAppMetrics.create();
-    LOG.info("Created MRAppMaster for application " + applicationId);
+    LOG.info("Created MRAppMaster for application " + applicationAttemptId);
   }
   }
 
 
   @Override
   @Override
@@ -159,9 +153,9 @@ public class MRAppMaster extends CompositeService {
     appName = conf.get(MRJobConfig.JOB_NAME, "<missing app name>");
     appName = conf.get(MRJobConfig.JOB_NAME, "<missing app name>");
 
 
     if (conf.getBoolean(MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE, false)
     if (conf.getBoolean(MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE, false)
-         && startCount > 1) {
+         && appAttemptID.getAttemptId() > 1) {
       LOG.info("Recovery is enabled. Will try to recover from previous life.");
       LOG.info("Recovery is enabled. Will try to recover from previous life.");
-      Recovery recoveryServ = new RecoveryService(appID, clock, startCount);
+      Recovery recoveryServ = new RecoveryService(appAttemptID, clock);
       addIfService(recoveryServ);
       addIfService(recoveryServ);
       dispatcher = recoveryServ.getDispatcher();
       dispatcher = recoveryServ.getDispatcher();
       clock = recoveryServ.getClock();
       clock = recoveryServ.getClock();
@@ -243,10 +237,10 @@ public class MRAppMaster extends CompositeService {
         // Read the file-system tokens from the localized tokens-file.
         // Read the file-system tokens from the localized tokens-file.
         Path jobSubmitDir = 
         Path jobSubmitDir = 
             FileContext.getLocalFSFileContext().makeQualified(
             FileContext.getLocalFSFileContext().makeQualified(
-                new Path(new File(MRConstants.JOB_SUBMIT_DIR)
+                new Path(new File(MRJobConfig.JOB_SUBMIT_DIR)
                     .getAbsolutePath()));
                     .getAbsolutePath()));
         Path jobTokenFile = 
         Path jobTokenFile = 
-            new Path(jobSubmitDir, MRConstants.APPLICATION_TOKENS_FILE);
+            new Path(jobSubmitDir, MRJobConfig.APPLICATION_TOKENS_FILE);
         fsTokens.addAll(Credentials.readTokenStorageFile(jobTokenFile, conf));
         fsTokens.addAll(Credentials.readTokenStorageFile(jobTokenFile, conf));
         LOG.info("jobSubmitDir=" + jobSubmitDir + " jobTokenFile="
         LOG.info("jobSubmitDir=" + jobSubmitDir + " jobTokenFile="
             + jobTokenFile);
             + jobTokenFile);
@@ -264,8 +258,8 @@ public class MRAppMaster extends CompositeService {
     // ////////// End of obtaining the tokens needed by the job. //////////
     // ////////// End of obtaining the tokens needed by the job. //////////
 
 
     // create single job
     // create single job
-    Job newJob = new JobImpl(appID, conf, dispatcher.getEventHandler(),
-        taskAttemptListener, jobTokenSecretManager, fsTokens, clock, startCount,
+    Job newJob = new JobImpl(appAttemptID, conf, dispatcher.getEventHandler(),
+        taskAttemptListener, jobTokenSecretManager, fsTokens, clock,
         completedTasksFromPreviousRun, metrics, currentUser.getUserName());
         completedTasksFromPreviousRun, metrics, currentUser.getUserName());
     ((RunningAppContext) context).jobs.put(newJob.getID(), newJob);
     ((RunningAppContext) context).jobs.put(newJob.getID(), newJob);
 
 
@@ -376,11 +370,11 @@ public class MRAppMaster extends CompositeService {
   }
   }
 
 
   public ApplicationId getAppID() {
   public ApplicationId getAppID() {
-    return appID;
+    return appAttemptID.getApplicationId();
   }
   }
 
 
   public int getStartCount() {
   public int getStartCount() {
-    return startCount;
+    return appAttemptID.getAttemptId();
   }
   }
 
 
   public AppContext getContext() {
   public AppContext getContext() {
@@ -505,7 +499,7 @@ public class MRAppMaster extends CompositeService {
 
 
     @Override
     @Override
     public ApplicationId getApplicationID() {
     public ApplicationId getApplicationID() {
-      return appID;
+      return appAttemptID.getApplicationId();
     }
     }
 
 
     @Override
     @Override
@@ -555,9 +549,9 @@ public class MRAppMaster extends CompositeService {
     // It's more test friendly to put it here.
     // It's more test friendly to put it here.
     DefaultMetricsSystem.initialize("MRAppMaster");
     DefaultMetricsSystem.initialize("MRAppMaster");
 
 
-    /** create a job event for job intialization */
+    // create a job event for job intialization
     JobEvent initJobEvent = new JobEvent(job.getID(), JobEventType.JOB_INIT);
     JobEvent initJobEvent = new JobEvent(job.getID(), JobEventType.JOB_INIT);
-    /** send init to the job (this does NOT trigger job execution) */
+    // Send init to the job (this does NOT trigger job execution)
     // This is a synchronous call, not an event through dispatcher. We want
     // This is a synchronous call, not an event through dispatcher. We want
     // job-init to be done completely here.
     // job-init to be done completely here.
     jobEventDispatcher.handle(initJobEvent);
     jobEventDispatcher.handle(initJobEvent);
@@ -648,17 +642,21 @@ public class MRAppMaster extends CompositeService {
 
 
   public static void main(String[] args) {
   public static void main(String[] args) {
     try {
     try {
-      //Configuration.addDefaultResource("job.xml");
-      ApplicationId applicationId = RecordFactoryProvider
-          .getRecordFactory(null).newRecordInstance(ApplicationId.class);
-      applicationId.setClusterTimestamp(Long.valueOf(args[0]));
-      applicationId.setId(Integer.valueOf(args[1]));
-      int failCount = Integer.valueOf(args[2]);
-      MRAppMaster appMaster = new MRAppMaster(applicationId, failCount);
+      String applicationAttemptIdStr = System
+          .getenv(ApplicationConstants.APPLICATION_ATTEMPT_ID_ENV);
+      if (applicationAttemptIdStr == null) {
+        String msg = ApplicationConstants.APPLICATION_ATTEMPT_ID_ENV
+            + " is null";
+        LOG.error(msg);
+        throw new IOException(msg);
+      }
+      ApplicationAttemptId applicationAttemptId = ConverterUtils
+          .toApplicationAttemptId(applicationAttemptIdStr);
+      MRAppMaster appMaster = new MRAppMaster(applicationAttemptId);
       Runtime.getRuntime().addShutdownHook(
       Runtime.getRuntime().addShutdownHook(
           new CompositeServiceShutdownHook(appMaster));
           new CompositeServiceShutdownHook(appMaster));
       YarnConfiguration conf = new YarnConfiguration(new JobConf());
       YarnConfiguration conf = new YarnConfiguration(new JobConf());
-      conf.addResource(new Path(MRConstants.JOB_CONF_FILE));
+      conf.addResource(new Path(MRJobConfig.JOB_CONF_FILE));
       conf.set(MRJobConfig.USER_NAME, 
       conf.set(MRJobConfig.USER_NAME, 
           System.getProperty("user.name")); 
           System.getProperty("user.name")); 
       UserGroupInformation.setConfiguration(conf);
       UserGroupInformation.setConfiguration(conf);

+ 1 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java

@@ -149,7 +149,7 @@ public class MRClientService extends AbstractService
             + ":" + server.getPort());
             + ":" + server.getPort());
     LOG.info("Instantiated MRClientService at " + this.bindAddress);
     LOG.info("Instantiated MRClientService at " + this.bindAddress);
     try {
     try {
-      webApp = WebApps.$for("yarn", AppContext.class, appContext).with(conf).
+      webApp = WebApps.$for("mapreduce", AppContext.class, appContext).with(conf).
           start(new AMWebApp());
           start(new AMWebApp());
     } catch (Exception e) {
     } catch (Exception e) {
       LOG.error("Webapps failed to start. Ignoring for now:", e);
       LOG.error("Webapps failed to start. Ignoring for now:", e);

+ 27 - 31
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java

@@ -64,7 +64,6 @@ import org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitMetaInfo;
 import org.apache.hadoop.mapreduce.split.SplitMetaInfoReader;
 import org.apache.hadoop.mapreduce.split.SplitMetaInfoReader;
 import org.apache.hadoop.mapreduce.task.JobContextImpl;
 import org.apache.hadoop.mapreduce.task.JobContextImpl;
 import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl;
 import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl;
-import org.apache.hadoop.mapreduce.v2.MRConstants;
 import org.apache.hadoop.mapreduce.v2.api.records.Counter;
 import org.apache.hadoop.mapreduce.v2.api.records.Counter;
 import org.apache.hadoop.mapreduce.v2.api.records.CounterGroup;
 import org.apache.hadoop.mapreduce.v2.api.records.CounterGroup;
 import org.apache.hadoop.mapreduce.v2.api.records.Counters;
 import org.apache.hadoop.mapreduce.v2.api.records.Counters;
@@ -93,6 +92,7 @@ import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEventType;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEventType;
 import org.apache.hadoop.mapreduce.v2.app.metrics.MRAppMetrics;
 import org.apache.hadoop.mapreduce.v2.app.metrics.MRAppMetrics;
 import org.apache.hadoop.mapreduce.v2.util.MRApps;
 import org.apache.hadoop.mapreduce.v2.util.MRApps;
+import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authorize.AccessControlList;
 import org.apache.hadoop.security.authorize.AccessControlList;
@@ -101,6 +101,7 @@ import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.yarn.Clock;
 import org.apache.hadoop.yarn.Clock;
 import org.apache.hadoop.yarn.YarnException;
 import org.apache.hadoop.yarn.YarnException;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factories.RecordFactory;
@@ -129,11 +130,11 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
       RecordFactoryProvider.getRecordFactory(null);
       RecordFactoryProvider.getRecordFactory(null);
   
   
   //final fields
   //final fields
+  private final ApplicationAttemptId applicationAttemptId;
   private final Clock clock;
   private final Clock clock;
   private final JobACLsManager aclsManager;
   private final JobACLsManager aclsManager;
   private final String username;
   private final String username;
   private final Map<JobACL, AccessControlList> jobACLs;
   private final Map<JobACL, AccessControlList> jobACLs;
-  private final int startCount;
   private final Set<TaskId> completedTasksFromPreviousRun;
   private final Set<TaskId> completedTasksFromPreviousRun;
   private final Lock readLock;
   private final Lock readLock;
   private final Lock writeLock;
   private final Lock writeLock;
@@ -365,26 +366,26 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
   private Token<JobTokenIdentifier> jobToken;
   private Token<JobTokenIdentifier> jobToken;
   private JobTokenSecretManager jobTokenSecretManager;
   private JobTokenSecretManager jobTokenSecretManager;
 
 
-  public JobImpl(ApplicationId appID, Configuration conf,
+  public JobImpl(ApplicationAttemptId applicationAttemptId, Configuration conf,
       EventHandler eventHandler, TaskAttemptListener taskAttemptListener,
       EventHandler eventHandler, TaskAttemptListener taskAttemptListener,
       JobTokenSecretManager jobTokenSecretManager,
       JobTokenSecretManager jobTokenSecretManager,
-      Credentials fsTokenCredentials, Clock clock, int startCount, 
+      Credentials fsTokenCredentials, Clock clock, 
       Set<TaskId> completedTasksFromPreviousRun, MRAppMetrics metrics,
       Set<TaskId> completedTasksFromPreviousRun, MRAppMetrics metrics,
       String userName) {
       String userName) {
-
+    this.applicationAttemptId = applicationAttemptId;
     this.jobId = recordFactory.newRecordInstance(JobId.class);
     this.jobId = recordFactory.newRecordInstance(JobId.class);
     this.jobName = conf.get(JobContext.JOB_NAME, "<missing job name>");
     this.jobName = conf.get(JobContext.JOB_NAME, "<missing job name>");
     this.conf = conf;
     this.conf = conf;
     this.metrics = metrics;
     this.metrics = metrics;
     this.clock = clock;
     this.clock = clock;
     this.completedTasksFromPreviousRun = completedTasksFromPreviousRun;
     this.completedTasksFromPreviousRun = completedTasksFromPreviousRun;
-    this.startCount = startCount;
     this.userName = userName;
     this.userName = userName;
-    jobId.setAppId(appID);
-    jobId.setId(appID.getId());
+    ApplicationId applicationId = applicationAttemptId.getApplicationId();
+    jobId.setAppId(applicationId);
+    jobId.setId(applicationId.getId());
     oldJobId = TypeConverter.fromYarn(jobId);
     oldJobId = TypeConverter.fromYarn(jobId);
     LOG.info("Job created" +
     LOG.info("Job created" +
-    		" appId=" + appID + 
+    		" appId=" + applicationId + 
     		" jobId=" + jobId + 
     		" jobId=" + jobId + 
     		" oldJobId=" + oldJobId);
     		" oldJobId=" + oldJobId);
     
     
@@ -584,25 +585,17 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
   public JobReport getReport() {
   public JobReport getReport() {
     readLock.lock();
     readLock.lock();
     try {
     try {
-      JobReport report = recordFactory.newRecordInstance(JobReport.class);
-      report.setJobId(jobId);
-      report.setJobState(getState());
-      
-      // TODO - Fix to correctly setup report and to check state
-      if (report.getJobState() == JobState.NEW) {
-        return report;
+      JobState state = getState();
+
+      if (getState() == JobState.NEW) {
+        return MRBuilderUtils.newJobReport(jobId, jobName, username, state,
+            startTime, finishTime, setupProgress, 0.0f,
+            0.0f, cleanupProgress);
       }
       }
-      
-      report.setStartTime(startTime);
-      report.setFinishTime(finishTime);
-      report.setSetupProgress(setupProgress);
-      report.setCleanupProgress(cleanupProgress);
-      report.setMapProgress(computeProgress(mapTasks));
-      report.setReduceProgress(computeProgress(reduceTasks));
-      report.setJobName(jobName);
-      report.setUser(username);
-
-      return report;
+
+      return MRBuilderUtils.newJobReport(jobId, jobName, username, state,
+          startTime, finishTime, setupProgress, computeProgress(mapTasks),
+          computeProgress(reduceTasks), cleanupProgress);
     } finally {
     } finally {
       readLock.unlock();
       readLock.unlock();
     }
     }
@@ -1007,7 +1000,7 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
           FileSystem.get(job.conf).makeQualified(
           FileSystem.get(job.conf).makeQualified(
               new Path(path, oldJobIDString));
               new Path(path, oldJobIDString));
       job.remoteJobConfFile =
       job.remoteJobConfFile =
-          new Path(job.remoteJobSubmitDir, MRConstants.JOB_CONF_FILE);
+          new Path(job.remoteJobSubmitDir, MRJobConfig.JOB_CONF_FILE);
 
 
       // Prepare the TaskAttemptListener server for authentication of Containers
       // Prepare the TaskAttemptListener server for authentication of Containers
       // TaskAttemptListener gets the information via jobTokenSecretManager.
       // TaskAttemptListener gets the information via jobTokenSecretManager.
@@ -1033,7 +1026,7 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
 
 
       Path remoteJobTokenFile =
       Path remoteJobTokenFile =
           new Path(job.remoteJobSubmitDir,
           new Path(job.remoteJobSubmitDir,
-              MRConstants.APPLICATION_TOKENS_FILE);
+              MRJobConfig.APPLICATION_TOKENS_FILE);
       tokenStorage.writeTokenStorageFile(remoteJobTokenFile, job.conf);
       tokenStorage.writeTokenStorageFile(remoteJobTokenFile, job.conf);
       LOG.info("Writing back the job-token file on the remote file system:"
       LOG.info("Writing back the job-token file on the remote file system:"
           + remoteJobTokenFile.toString());
           + remoteJobTokenFile.toString());
@@ -1078,7 +1071,8 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
                 job.conf, splits[i], 
                 job.conf, splits[i], 
                 job.taskAttemptListener, 
                 job.taskAttemptListener, 
                 job.committer, job.jobToken, job.fsTokens.getAllTokens(), 
                 job.committer, job.jobToken, job.fsTokens.getAllTokens(), 
-                job.clock, job.completedTasksFromPreviousRun, job.startCount,
+                job.clock, job.completedTasksFromPreviousRun, 
+                job.applicationAttemptId.getAttemptId(),
                 job.metrics);
                 job.metrics);
         job.addTask(task);
         job.addTask(task);
       }
       }
@@ -1095,7 +1089,9 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
                 job.conf, job.numMapTasks, 
                 job.conf, job.numMapTasks, 
                 job.taskAttemptListener, job.committer, job.jobToken,
                 job.taskAttemptListener, job.committer, job.jobToken,
                 job.fsTokens.getAllTokens(), job.clock, 
                 job.fsTokens.getAllTokens(), job.clock, 
-                job.completedTasksFromPreviousRun, job.startCount, job.metrics);
+                job.completedTasksFromPreviousRun, 
+                job.applicationAttemptId.getAttemptId(),
+                job.metrics);
         job.addTask(task);
         job.addTask(task);
       }
       }
       LOG.info("Number of reduces for job " + job.jobId + " = "
       LOG.info("Number of reduces for job " + job.jobId + " = "

+ 35 - 48
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java

@@ -21,7 +21,6 @@ package org.apache.hadoop.mapreduce.v2.app.job.impl;
 import java.io.File;
 import java.io.File;
 import java.io.IOException;
 import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.net.InetSocketAddress;
-import java.net.URI;
 import java.nio.ByteBuffer;
 import java.nio.ByteBuffer;
 import java.util.ArrayList;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collection;
@@ -62,7 +61,6 @@ import org.apache.hadoop.mapreduce.jobhistory.TaskAttemptStartedEvent;
 import org.apache.hadoop.mapreduce.jobhistory.TaskAttemptUnsuccessfulCompletionEvent;
 import org.apache.hadoop.mapreduce.jobhistory.TaskAttemptUnsuccessfulCompletionEvent;
 import org.apache.hadoop.mapreduce.security.TokenCache;
 import org.apache.hadoop.mapreduce.security.TokenCache;
 import org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier;
 import org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier;
-import org.apache.hadoop.mapreduce.v2.MRConstants;
 import org.apache.hadoop.mapreduce.v2.api.records.Counter;
 import org.apache.hadoop.mapreduce.v2.api.records.Counter;
 import org.apache.hadoop.mapreduce.v2.api.records.Counters;
 import org.apache.hadoop.mapreduce.v2.api.records.Counters;
 import org.apache.hadoop.mapreduce.v2.api.records.Phase;
 import org.apache.hadoop.mapreduce.v2.api.records.Phase;
@@ -103,6 +101,7 @@ import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.yarn.Clock;
 import org.apache.hadoop.yarn.Clock;
 import org.apache.hadoop.yarn.YarnException;
 import org.apache.hadoop.yarn.YarnException;
 import org.apache.hadoop.yarn.api.ApplicationConstants;
 import org.apache.hadoop.yarn.api.ApplicationConstants;
+import org.apache.hadoop.yarn.api.ApplicationConstants.Environment;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
 import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
 import org.apache.hadoop.yarn.api.records.ContainerToken;
 import org.apache.hadoop.yarn.api.records.ContainerToken;
@@ -117,7 +116,6 @@ import org.apache.hadoop.yarn.state.InvalidStateTransitonException;
 import org.apache.hadoop.yarn.state.SingleArcTransition;
 import org.apache.hadoop.yarn.state.SingleArcTransition;
 import org.apache.hadoop.yarn.state.StateMachine;
 import org.apache.hadoop.yarn.state.StateMachine;
 import org.apache.hadoop.yarn.state.StateMachineFactory;
 import org.apache.hadoop.yarn.state.StateMachineFactory;
-import org.apache.hadoop.yarn.util.BuilderUtils;
 import org.apache.hadoop.yarn.util.ConverterUtils;
 import org.apache.hadoop.yarn.util.ConverterUtils;
 import org.apache.hadoop.yarn.util.RackResolver;
 import org.apache.hadoop.yarn.util.RackResolver;
 
 
@@ -153,7 +151,7 @@ public abstract class TaskAttemptImpl implements
   private Token<JobTokenIdentifier> jobToken;
   private Token<JobTokenIdentifier> jobToken;
   private static AtomicBoolean initialClasspathFlag = new AtomicBoolean();
   private static AtomicBoolean initialClasspathFlag = new AtomicBoolean();
   private static String initialClasspath = null;
   private static String initialClasspath = null;
-  private final Object classpathLock = new Object();
+  private static final Object classpathLock = new Object();
   private long launchTime;
   private long launchTime;
   private long finishTime;
   private long finishTime;
   private WrappedProgressSplitsBlock progressSplitBlock;
   private WrappedProgressSplitsBlock progressSplitBlock;
@@ -518,8 +516,8 @@ public abstract class TaskAttemptImpl implements
         return initialClasspath;
         return initialClasspath;
       }
       }
       Map<String, String> env = new HashMap<String, String>();
       Map<String, String> env = new HashMap<String, String>();
-      MRApps.setInitialClasspath(env);
-      initialClasspath = env.get(MRApps.CLASSPATH);
+      MRApps.setClasspath(env);
+      initialClasspath = env.get(Environment.CLASSPATH.name());
       initialClasspathFlag.set(true);
       initialClasspathFlag.set(true);
       return initialClasspath;
       return initialClasspath;
     }
     }
@@ -531,16 +529,18 @@ public abstract class TaskAttemptImpl implements
    */
    */
   private ContainerLaunchContext createContainerLaunchContext() {
   private ContainerLaunchContext createContainerLaunchContext() {
 
 
-    ContainerLaunchContext container =
-        recordFactory.newRecordInstance(ContainerLaunchContext.class);
-
     // Application resources
     // Application resources
     Map<String, LocalResource> localResources = 
     Map<String, LocalResource> localResources = 
         new HashMap<String, LocalResource>();
         new HashMap<String, LocalResource>();
     
     
     // Application environment
     // Application environment
     Map<String, String> environment = new HashMap<String, String>();
     Map<String, String> environment = new HashMap<String, String>();
-    
+
+    // Service data
+    Map<String, ByteBuffer> serviceData = new HashMap<String, ByteBuffer>();
+
+    // Tokens
+    ByteBuffer tokens = ByteBuffer.wrap(new byte[]{});
     try {
     try {
       FileSystem remoteFS = FileSystem.get(conf);
       FileSystem remoteFS = FileSystem.get(conf);
 
 
@@ -550,7 +550,7 @@ public abstract class TaskAttemptImpl implements
               MRJobConfig.JAR))).makeQualified(remoteFS.getUri(), 
               MRJobConfig.JAR))).makeQualified(remoteFS.getUri(), 
                                                remoteFS.getWorkingDirectory());
                                                remoteFS.getWorkingDirectory());
         localResources.put(
         localResources.put(
-            MRConstants.JOB_JAR,
+            MRJobConfig.JOB_JAR,
             createLocalResource(remoteFS, recordFactory, remoteJobJar,
             createLocalResource(remoteFS, recordFactory, remoteJobJar,
                 LocalResourceType.FILE, LocalResourceVisibility.APPLICATION));
                 LocalResourceType.FILE, LocalResourceVisibility.APPLICATION));
         LOG.info("The job-jar file on the remote FS is "
         LOG.info("The job-jar file on the remote FS is "
@@ -570,9 +570,9 @@ public abstract class TaskAttemptImpl implements
       Path remoteJobSubmitDir =
       Path remoteJobSubmitDir =
           new Path(path, oldJobId.toString());
           new Path(path, oldJobId.toString());
       Path remoteJobConfPath = 
       Path remoteJobConfPath = 
-          new Path(remoteJobSubmitDir, MRConstants.JOB_CONF_FILE);
+          new Path(remoteJobSubmitDir, MRJobConfig.JOB_CONF_FILE);
       localResources.put(
       localResources.put(
-          MRConstants.JOB_CONF_FILE,
+          MRJobConfig.JOB_CONF_FILE,
           createLocalResource(remoteFS, recordFactory, remoteJobConfPath,
           createLocalResource(remoteFS, recordFactory, remoteJobConfPath,
               LocalResourceType.FILE, LocalResourceVisibility.APPLICATION));
               LocalResourceType.FILE, LocalResourceVisibility.APPLICATION));
       LOG.info("The job-conf file on the remote FS is "
       LOG.info("The job-conf file on the remote FS is "
@@ -580,12 +580,8 @@ public abstract class TaskAttemptImpl implements
       // //////////// End of JobConf setup
       // //////////// End of JobConf setup
 
 
       // Setup DistributedCache
       // Setup DistributedCache
-      MRApps.setupDistributedCache(conf, localResources, environment);
+      MRApps.setupDistributedCache(conf, localResources);
 
 
-      // Set local-resources and environment
-      container.setLocalResources(localResources);
-      container.setEnvironment(environment);
-      
       // Setup up tokens
       // Setup up tokens
       Credentials taskCredentials = new Credentials();
       Credentials taskCredentials = new Credentials();
 
 
@@ -606,52 +602,43 @@ public abstract class TaskAttemptImpl implements
       LOG.info("Size of containertokens_dob is "
       LOG.info("Size of containertokens_dob is "
           + taskCredentials.numberOfTokens());
           + taskCredentials.numberOfTokens());
       taskCredentials.writeTokenStorageToStream(containerTokens_dob);
       taskCredentials.writeTokenStorageToStream(containerTokens_dob);
-      container.setContainerTokens(
+      tokens = 
           ByteBuffer.wrap(containerTokens_dob.getData(), 0,
           ByteBuffer.wrap(containerTokens_dob.getData(), 0,
-              containerTokens_dob.getLength()));
+              containerTokens_dob.getLength());
 
 
       // Add shuffle token
       // Add shuffle token
       LOG.info("Putting shuffle token in serviceData");
       LOG.info("Putting shuffle token in serviceData");
-      Map<String, ByteBuffer> serviceData = new HashMap<String, ByteBuffer>();
       serviceData.put(ShuffleHandler.MAPREDUCE_SHUFFLE_SERVICEID,
       serviceData.put(ShuffleHandler.MAPREDUCE_SHUFFLE_SERVICEID,
           ShuffleHandler.serializeServiceData(jobToken));
           ShuffleHandler.serializeServiceData(jobToken));
-      container.setServiceData(serviceData);
 
 
-      MRApps.addToClassPath(container.getEnvironment(), getInitialClasspath());
+      MRApps.addToEnvironment(
+          environment,  
+          Environment.CLASSPATH.name(), 
+          getInitialClasspath());
     } catch (IOException e) {
     } catch (IOException e) {
       throw new YarnException(e);
       throw new YarnException(e);
     }
     }
-    
-    container.setContainerId(containerID);
-    container.setUser(conf.get(MRJobConfig.USER_NAME)); // TODO: Fix
-
-    File workDir = new File("$PWD"); // Will be expanded by the shell.
-    String containerLogDir =
-        new File(ApplicationConstants.LOG_DIR_EXPANSION_VAR).toString();
-    String childTmpDir = new File(workDir, "tmp").toString();
-    String javaHome = "${JAVA_HOME}"; // Will be expanded by the shell.
-    String nmLdLibraryPath = "{LD_LIBRARY_PATH}"; // Expanded by the shell?
-    List<String> classPaths = new ArrayList<String>();
-
-    String localizedApplicationTokensFile =
-        new File(workDir, MRConstants.APPLICATION_TOKENS_FILE).toString();
-    classPaths.add(MRConstants.JOB_JAR);
-    classPaths.add(MRConstants.YARN_MAPREDUCE_APP_JAR_PATH);
-    classPaths.add(workDir.toString()); // TODO
-
-    // Construct the actual Container
-    container.setCommands(MapReduceChildJVM.getVMCommand(
-        taskAttemptListener.getAddress(), remoteTask, javaHome,
-        workDir.toString(), containerLogDir, childTmpDir, jvmID));
 
 
-    MapReduceChildJVM.setVMEnv(container.getEnvironment(), classPaths,
-        workDir.toString(), containerLogDir, nmLdLibraryPath, remoteTask,
-        localizedApplicationTokensFile);
+    // Setup environment
+    MapReduceChildJVM.setVMEnv(environment, remoteTask);
 
 
+    // Set up the launch command
+    List<String> commands = MapReduceChildJVM.getVMCommand(
+        taskAttemptListener.getAddress(), remoteTask,
+        jvmID);
+    
     // Construct the actual Container
     // Construct the actual Container
+    ContainerLaunchContext container =
+        recordFactory.newRecordInstance(ContainerLaunchContext.class);
     container.setContainerId(containerID);
     container.setContainerId(containerID);
     container.setUser(conf.get(MRJobConfig.USER_NAME));
     container.setUser(conf.get(MRJobConfig.USER_NAME));
     container.setResource(assignedCapability);
     container.setResource(assignedCapability);
+    container.setLocalResources(localResources);
+    container.setEnvironment(environment);
+    container.setCommands(commands);
+    container.setServiceData(serviceData);
+    container.setContainerTokens(tokens);
+    
     return container;
     return container;
   }
   }
 
 

+ 29 - 6
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java

@@ -73,6 +73,8 @@ public class ContainerLauncherImpl extends AbstractService implements
 
 
   private AppContext context;
   private AppContext context;
   private ThreadPoolExecutor launcherPool;
   private ThreadPoolExecutor launcherPool;
+  private static final int INITIAL_POOL_SIZE = 10;
+  private int limitOnPoolSize;
   private Thread eventHandlingThread;
   private Thread eventHandlingThread;
   private BlockingQueue<ContainerLauncherEvent> eventQueue =
   private BlockingQueue<ContainerLauncherEvent> eventQueue =
       new LinkedBlockingQueue<ContainerLauncherEvent>();
       new LinkedBlockingQueue<ContainerLauncherEvent>();
@@ -96,16 +98,17 @@ public class ContainerLauncherImpl extends AbstractService implements
         YarnConfiguration.YARN_SECURITY_INFO,
         YarnConfiguration.YARN_SECURITY_INFO,
         ContainerManagerSecurityInfo.class, SecurityInfo.class);
         ContainerManagerSecurityInfo.class, SecurityInfo.class);
     this.recordFactory = RecordFactoryProvider.getRecordFactory(conf);
     this.recordFactory = RecordFactoryProvider.getRecordFactory(conf);
+    this.limitOnPoolSize = conf.getInt(
+        MRJobConfig.MR_AM_CONTAINERLAUNCHER_THREAD_COUNT_LIMIT,
+        MRJobConfig.DEFAULT_MR_AM_CONTAINERLAUNCHER_THREAD_COUNT_LIMIT);
     super.init(myLocalConfig);
     super.init(myLocalConfig);
   }
   }
 
 
   public void start() {
   public void start() {
-    launcherPool =
-        new ThreadPoolExecutor(getConfig().getInt(
-            MRJobConfig.MR_AM_CONTAINERLAUNCHER_THREAD_COUNT, 10),
-            Integer.MAX_VALUE, 1, TimeUnit.HOURS,
-            new LinkedBlockingQueue<Runnable>());
-    launcherPool.prestartAllCoreThreads(); // Wait for work.
+    // Start with a default core-pool size of 10 and change it dynamically.
+    launcherPool = new ThreadPoolExecutor(INITIAL_POOL_SIZE,
+        Integer.MAX_VALUE, 1, TimeUnit.HOURS,
+        new LinkedBlockingQueue<Runnable>());
     eventHandlingThread = new Thread(new Runnable() {
     eventHandlingThread = new Thread(new Runnable() {
       @Override
       @Override
       public void run() {
       public void run() {
@@ -117,6 +120,26 @@ public class ContainerLauncherImpl extends AbstractService implements
             LOG.error("Returning, interrupted : " + e);
             LOG.error("Returning, interrupted : " + e);
             return;
             return;
           }
           }
+
+          int poolSize = launcherPool.getCorePoolSize();
+
+          // See if we need up the pool size only if haven't reached the
+          // maximum limit yet.
+          if (poolSize != limitOnPoolSize) {
+
+            // nodes where containers will run at *this* point of time. This is
+            // *not* the cluster size and doesn't need to be.
+            int numNodes = ugiMap.size();
+            int idealPoolSize = Math.min(limitOnPoolSize, numNodes);
+
+            if (poolSize <= idealPoolSize) {
+              // Bump up the pool size to idealPoolSize+INITIAL_POOL_SIZE, the
+              // later is just a buffer so we are not always increasing the
+              // pool-size
+              launcherPool.setCorePoolSize(idealPoolSize + INITIAL_POOL_SIZE);
+            }
+          }
+
           // the events from the queue are handled in parallel
           // the events from the queue are handled in parallel
           // using a thread pool
           // using a thread pool
           launcherPool.execute(new EventProcessor(event));
           launcherPool.execute(new EventProcessor(event));

+ 20 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/local/LocalContainerAllocator.java

@@ -18,6 +18,7 @@
 
 
 package org.apache.hadoop.mapreduce.v2.app.local;
 package org.apache.hadoop.mapreduce.v2.app.local;
 
 
+import java.util.ArrayList;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.atomic.AtomicInteger;
 
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
@@ -30,15 +31,19 @@ import org.apache.hadoop.mapreduce.v2.app.job.event.JobCounterUpdateEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerAssignedEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerAssignedEvent;
 import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocator;
 import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocator;
 import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocatorEvent;
 import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocatorEvent;
-import org.apache.hadoop.mapreduce.v2.app.rm.ContainerRequestEvent;
 import org.apache.hadoop.mapreduce.v2.app.rm.RMCommunicator;
 import org.apache.hadoop.mapreduce.v2.app.rm.RMCommunicator;
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
+import org.apache.hadoop.yarn.api.records.AMResponse;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.util.BuilderUtils;
 import org.apache.hadoop.yarn.util.Records;
 import org.apache.hadoop.yarn.util.Records;
 
 
 /**
 /**
@@ -65,6 +70,20 @@ public class LocalContainerAllocator extends RMCommunicator
     this.appID = context.getApplicationID();
     this.appID = context.getApplicationID();
   }
   }
 
 
+  @Override
+  protected synchronized void heartbeat() throws Exception {
+    AllocateRequest allocateRequest = BuilderUtils.newAllocateRequest(
+        this.applicationAttemptId, this.lastResponseID, super
+            .getApplicationProgress(), new ArrayList<ResourceRequest>(),
+        new ArrayList<ContainerId>());
+    AllocateResponse allocateResponse = scheduler.allocate(allocateRequest);
+    AMResponse response = allocateResponse.getAMResponse();
+    if (response.getReboot()) {
+      // TODO
+      LOG.info("Event from RM: shutting down Application Master");
+    }
+  }
+
   @Override
   @Override
   public void handle(ContainerAllocatorEvent event) {
   public void handle(ContainerAllocatorEvent event) {
     if (event.getType() == ContainerAllocator.EventType.CONTAINER_REQ) {
     if (event.getType() == ContainerAllocator.EventType.CONTAINER_REQ) {

+ 9 - 8
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/recover/RecoveryService.java

@@ -58,7 +58,7 @@ import org.apache.hadoop.mapreduce.v2.app.taskclean.TaskCleaner;
 import org.apache.hadoop.mapreduce.v2.app.taskclean.TaskCleanupEvent;
 import org.apache.hadoop.mapreduce.v2.app.taskclean.TaskCleanupEvent;
 import org.apache.hadoop.mapreduce.v2.jobhistory.JobHistoryUtils;
 import org.apache.hadoop.mapreduce.v2.jobhistory.JobHistoryUtils;
 import org.apache.hadoop.yarn.Clock;
 import org.apache.hadoop.yarn.Clock;
-import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.NodeId;
@@ -92,10 +92,9 @@ public class RecoveryService extends CompositeService implements Recovery {
 
 
   private static final Log LOG = LogFactory.getLog(RecoveryService.class);
   private static final Log LOG = LogFactory.getLog(RecoveryService.class);
 
 
-  private final ApplicationId appID;
+  private final ApplicationAttemptId applicationAttemptId;
   private final Dispatcher dispatcher;
   private final Dispatcher dispatcher;
   private final ControlledClock clock;
   private final ControlledClock clock;
-  private final int startCount;
 
 
   private JobInfo jobInfo = null;
   private JobInfo jobInfo = null;
   private final Map<TaskId, TaskInfo> completedTasks =
   private final Map<TaskId, TaskInfo> completedTasks =
@@ -106,10 +105,10 @@ public class RecoveryService extends CompositeService implements Recovery {
 
 
   private volatile boolean recoveryMode = false;
   private volatile boolean recoveryMode = false;
 
 
-  public RecoveryService(ApplicationId appID, Clock clock, int startCount) {
+  public RecoveryService(ApplicationAttemptId applicationAttemptId, 
+      Clock clock) {
     super("RecoveringDispatcher");
     super("RecoveringDispatcher");
-    this.appID = appID;
-    this.startCount = startCount;
+    this.applicationAttemptId = applicationAttemptId;
     this.dispatcher = new RecoveryDispatcher();
     this.dispatcher = new RecoveryDispatcher();
     this.clock = new ControlledClock(clock);
     this.clock = new ControlledClock(clock);
       addService((Service) dispatcher);
       addService((Service) dispatcher);
@@ -152,7 +151,8 @@ public class RecoveryService extends CompositeService implements Recovery {
 
 
   private void parse() throws IOException {
   private void parse() throws IOException {
     // TODO: parse history file based on startCount
     // TODO: parse history file based on startCount
-    String jobName = TypeConverter.fromYarn(appID).toString();
+    String jobName = 
+        TypeConverter.fromYarn(applicationAttemptId.getApplicationId()).toString();
     String jobhistoryDir = JobHistoryUtils.getConfiguredHistoryStagingDirPrefix(getConfig());
     String jobhistoryDir = JobHistoryUtils.getConfiguredHistoryStagingDirPrefix(getConfig());
     FSDataInputStream in = null;
     FSDataInputStream in = null;
     Path historyFile = null;
     Path historyFile = null;
@@ -160,8 +160,9 @@ public class RecoveryService extends CompositeService implements Recovery {
         new Path(jobhistoryDir));
         new Path(jobhistoryDir));
     FileContext fc = FileContext.getFileContext(histDirPath.toUri(),
     FileContext fc = FileContext.getFileContext(histDirPath.toUri(),
         getConfig());
         getConfig());
+    //read the previous history file
     historyFile = fc.makeQualified(JobHistoryUtils.getStagingJobHistoryFile(
     historyFile = fc.makeQualified(JobHistoryUtils.getStagingJobHistoryFile(
-        histDirPath, jobName, startCount - 1));          //read the previous history file
+        histDirPath, jobName, (applicationAttemptId.getAttemptId() - 1)));          
     in = fc.open(historyFile);
     in = fc.open(historyFile);
     JobHistoryParser parser = new JobHistoryParser(in);
     JobHistoryParser parser = new JobHistoryParser(in);
     jobInfo = parser.parse();
     jobInfo = parser.parse();

+ 32 - 22
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java

@@ -20,7 +20,6 @@ package org.apache.hadoop.mapreduce.v2.app.rm;
 
 
 import java.io.IOException;
 import java.io.IOException;
 import java.security.PrivilegedAction;
 import java.security.PrivilegedAction;
-import java.util.ArrayList;
 
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
@@ -29,6 +28,7 @@ import org.apache.hadoop.mapreduce.JobID;
 import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.TypeConverter;
 import org.apache.hadoop.mapreduce.TypeConverter;
 import org.apache.hadoop.mapreduce.v2.api.records.JobId;
 import org.apache.hadoop.mapreduce.v2.api.records.JobId;
+import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
 import org.apache.hadoop.mapreduce.v2.api.records.JobState;
 import org.apache.hadoop.mapreduce.v2.api.records.JobState;
 import org.apache.hadoop.mapreduce.v2.app.AppContext;
 import org.apache.hadoop.mapreduce.v2.app.AppContext;
 import org.apache.hadoop.mapreduce.v2.app.client.ClientService;
 import org.apache.hadoop.mapreduce.v2.app.client.ClientService;
@@ -42,17 +42,12 @@ import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.yarn.YarnException;
 import org.apache.hadoop.yarn.YarnException;
 import org.apache.hadoop.yarn.api.AMRMProtocol;
 import org.apache.hadoop.yarn.api.AMRMProtocol;
 import org.apache.hadoop.yarn.api.ApplicationConstants;
 import org.apache.hadoop.yarn.api.ApplicationConstants;
-import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse;
-import org.apache.hadoop.yarn.api.records.AMResponse;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
-import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.Resource;
-import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factories.RecordFactory;
@@ -64,7 +59,7 @@ import org.apache.hadoop.yarn.service.AbstractService;
 /**
 /**
  * Registers/unregisters to RM and sends heartbeats to RM.
  * Registers/unregisters to RM and sends heartbeats to RM.
  */
  */
-public class RMCommunicator extends AbstractService  {
+public abstract class RMCommunicator extends AbstractService  {
   private static final Log LOG = LogFactory.getLog(RMContainerAllocator.class);
   private static final Log LOG = LogFactory.getLog(RMContainerAllocator.class);
   private int rmPollInterval;//millis
   private int rmPollInterval;//millis
   protected ApplicationId applicationId;
   protected ApplicationId applicationId;
@@ -74,7 +69,7 @@ public class RMCommunicator extends AbstractService  {
   protected EventHandler eventHandler;
   protected EventHandler eventHandler;
   protected AMRMProtocol scheduler;
   protected AMRMProtocol scheduler;
   private final ClientService clientService;
   private final ClientService clientService;
-  private int lastResponseID;
+  protected int lastResponseID;
   private Resource minContainerCapability;
   private Resource minContainerCapability;
   private Resource maxContainerCapability;
   private Resource maxContainerCapability;
 
 
@@ -121,6 +116,34 @@ public class RMCommunicator extends AbstractService  {
     return job;
     return job;
   }
   }
 
 
+  /**
+   * Get the appProgress. Can be used only after this component is started.
+   * @return the appProgress.
+   */
+  protected float getApplicationProgress() {
+    // For now just a single job. In future when we have a DAG, we need an
+    // aggregate progress.
+    JobReport report = this.job.getReport();
+    float setupWeight = 0.05f;
+    float cleanupWeight = 0.05f;
+    float mapWeight = 0.0f;
+    float reduceWeight = 0.0f;
+    int numMaps = this.job.getTotalMaps();
+    int numReduces = this.job.getTotalReduces();
+    if (numMaps == 0 && numReduces == 0) {
+    } else if (numMaps == 0) {
+      reduceWeight = 0.9f;
+    } else if (numReduces == 0) {
+      mapWeight = 0.9f;
+    } else {
+      mapWeight = reduceWeight = 0.45f;
+    }
+    return (report.getSetupProgress() * setupWeight
+        + report.getCleanupProgress() * cleanupWeight
+        + report.getMapProgress() * mapWeight + report.getReduceProgress()
+        * reduceWeight);
+  }
+
   protected void register() {
   protected void register() {
     //Register
     //Register
     String host = 
     String host = 
@@ -262,18 +285,5 @@ public class RMCommunicator extends AbstractService  {
     });
     });
   }
   }
 
 
-  protected synchronized void heartbeat() throws Exception {
-    AllocateRequest allocateRequest =
-        recordFactory.newRecordInstance(AllocateRequest.class);
-    allocateRequest.setApplicationAttemptId(applicationAttemptId);
-    allocateRequest.setResponseId(lastResponseID);
-    allocateRequest.addAllAsks(new ArrayList<ResourceRequest>());
-    allocateRequest.addAllReleases(new ArrayList<ContainerId>());
-    AllocateResponse allocateResponse = scheduler.allocate(allocateRequest);
-    AMResponse response = allocateResponse.getAMResponse();
-    if (response.getReboot()) {
-      LOG.info("Event from RM: shutting down Application Master");
-    }
-  }
-
+  protected abstract void heartbeat() throws Exception;
 }
 }

+ 12 - 28
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java

@@ -586,37 +586,21 @@ public class RMContainerAllocator extends RMContainerRequestor
     private ContainerRequest assign(Container allocated) {
     private ContainerRequest assign(Container allocated) {
       ContainerRequest assigned = null;
       ContainerRequest assigned = null;
       
       
-      if (mapResourceReqt != reduceResourceReqt) {
-        //assign based on size
-        LOG.info("Assigning based on container size");
-        if (allocated.getResource().getMemory() == mapResourceReqt) {
-          assigned = assignToFailedMap(allocated);
-          if (assigned == null) {
-            assigned = assignToMap(allocated);
-          }
-        } else if (allocated.getResource().getMemory() == reduceResourceReqt) {
-          assigned = assignToReduce(allocated);
-        }
-        
-        return assigned;
-      }
-      
-      //container can be given to either map or reduce
-      //assign based on priority
-      
-      //try to assign to earlierFailedMaps if present
-      assigned = assignToFailedMap(allocated);
-      
-      //Assign to reduces before assigning to maps ?
-      if (assigned == null) {
+      Priority priority = allocated.getPriority();
+      if (PRIORITY_FAST_FAIL_MAP.equals(priority)) {
+        LOG.info("Assigning container " + allocated + " to fast fail map");
+        assigned = assignToFailedMap(allocated);
+      } else if (PRIORITY_REDUCE.equals(priority)) {
+        LOG.info("Assigning container " + allocated + " to reduce");
         assigned = assignToReduce(allocated);
         assigned = assignToReduce(allocated);
-      }
-      
-      //try to assign to maps if present
-      if (assigned == null) {
+      } else if (PRIORITY_MAP.equals(priority)) {
+        LOG.info("Assigning container " + allocated + " to map");
         assigned = assignToMap(allocated);
         assigned = assignToMap(allocated);
+      } else {
+        LOG.warn("Container allocated at unwanted priority: " + priority + 
+            ". Returning to RM...");
       }
       }
-      
+        
       return assigned;
       return assigned;
     }
     }
     
     

+ 5 - 8
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerRequestor.java

@@ -43,6 +43,7 @@ import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
 import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.util.BuilderUtils;
 
 
 /**
 /**
  * Keeps the data structures to send container requests to RM.
  * Keeps the data structures to send container requests to RM.
@@ -107,15 +108,11 @@ public abstract class RMContainerRequestor extends RMCommunicator {
     LOG.info("maxTaskFailuresPerNode is " + maxTaskFailuresPerNode);
     LOG.info("maxTaskFailuresPerNode is " + maxTaskFailuresPerNode);
   }
   }
 
 
-  protected abstract void heartbeat() throws Exception;
-
   protected AMResponse makeRemoteRequest() throws YarnRemoteException {
   protected AMResponse makeRemoteRequest() throws YarnRemoteException {
-    AllocateRequest allocateRequest = recordFactory
-        .newRecordInstance(AllocateRequest.class);
-    allocateRequest.setApplicationAttemptId(applicationAttemptId);
-    allocateRequest.setResponseId(lastResponseID);
-    allocateRequest.addAllAsks(new ArrayList<ResourceRequest>(ask));
-    allocateRequest.addAllReleases(new ArrayList<ContainerId>(release));
+    AllocateRequest allocateRequest = BuilderUtils.newAllocateRequest(
+        applicationAttemptId, lastResponseID, super.getApplicationProgress(),
+        new ArrayList<ResourceRequest>(ask), new ArrayList<ContainerId>(
+            release));
     AllocateResponse allocateResponse = scheduler.allocate(allocateRequest);
     AllocateResponse allocateResponse = scheduler.allocate(allocateRequest);
     AMResponse response = allocateResponse.getAMResponse();
     AMResponse response = allocateResponse.getAMResponse();
     lastResponseID = response.getResponseId();
     lastResponseID = response.getResponseId();

+ 1 - 2
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/DefaultSpeculator.java

@@ -35,7 +35,6 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.MRJobConfig;
-import org.apache.hadoop.mapreduce.v2.MRConstants;
 import org.apache.hadoop.mapreduce.v2.api.records.JobId;
 import org.apache.hadoop.mapreduce.v2.api.records.JobId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
@@ -87,7 +86,7 @@ public class DefaultSpeculator extends AbstractService implements
   private final ConcurrentMap<JobId, AtomicInteger> reduceContainerNeeds
   private final ConcurrentMap<JobId, AtomicInteger> reduceContainerNeeds
       = new ConcurrentHashMap<JobId, AtomicInteger>();
       = new ConcurrentHashMap<JobId, AtomicInteger>();
 
 
-  private final Set<TaskId> mayHaveSpeculated = new HashSet();
+  private final Set<TaskId> mayHaveSpeculated = new HashSet<TaskId>();
 
 
   private final Configuration conf;
   private final Configuration conf;
   private AppContext context;
   private AppContext context;

+ 1 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/JobConfPage.java

@@ -44,6 +44,7 @@ public class JobConfPage extends AppView {
     set(TITLE, jobID.isEmpty() ? "Bad request: missing job ID"
     set(TITLE, jobID.isEmpty() ? "Bad request: missing job ID"
         : join("Configuration for MapReduce Job ", $(JOB_ID)));
         : join("Configuration for MapReduce Job ", $(JOB_ID)));
     commonPreHead(html);
     commonPreHead(html);
+    set(initID(ACCORDION, "nav"), "{autoHeight:false, active:2}");
     set(DATATABLES_ID, "conf");
     set(DATATABLES_ID, "conf");
     set(initID(DATATABLES, "conf"), confTableInit());
     set(initID(DATATABLES, "conf"), confTableInit());
     set(postInitID(DATATABLES, "conf"), confPostTableInit());
     set(postInitID(DATATABLES, "conf"), confPostTableInit());

+ 3 - 3
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/NavBlock.java

@@ -38,9 +38,9 @@ public class NavBlock extends HtmlBlock {
       div("#nav").
       div("#nav").
         h3("Cluster").
         h3("Cluster").
         ul().
         ul().
-          li().a(url(rmweb, prefix(), "cluster"), "About")._().
-          li().a(url(rmweb, prefix(), "apps"), "Applications")._().
-          li().a(url(rmweb, prefix(), "scheduler"), "Scheduler")._()._().
+          li().a(url(rmweb, "cluster", "cluster"), "About")._().
+          li().a(url(rmweb, "cluster", "apps"), "Applications")._().
+          li().a(url(rmweb, "cluster", "scheduler"), "Scheduler")._()._().
         h3("Application").
         h3("Application").
         ul().
         ul().
           li().a(url("app/info"), "About")._().
           li().a(url("app/info"), "About")._().

+ 1 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TaskPage.java

@@ -85,7 +85,7 @@ public class TaskPage extends AppView {
         if (containerId != null) {
         if (containerId != null) {
           String containerIdStr = ConverterUtils.toString(containerId);
           String containerIdStr = ConverterUtils.toString(containerId);
           nodeTd._(" ").
           nodeTd._(" ").
-            a(".logslink", url("http://", nodeHttpAddr, "yarn", "containerlogs",
+            a(".logslink", url("http://", nodeHttpAddr, "node", "containerlogs",
               containerIdStr), "logs");
               containerIdStr), "logs");
         }
         }
         nodeTd._().
         nodeTd._().

+ 19 - 7
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java

@@ -66,6 +66,7 @@ import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.Clock;
 import org.apache.hadoop.yarn.Clock;
 import org.apache.hadoop.yarn.YarnException;
 import org.apache.hadoop.yarn.YarnException;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerId;
@@ -91,7 +92,7 @@ public class MRApp extends MRAppMaster {
   private File testWorkDir;
   private File testWorkDir;
   private Path testAbsPath;
   private Path testAbsPath;
 
 
-  private final RecordFactory recordFactory =
+  private static final RecordFactory recordFactory =
       RecordFactoryProvider.getRecordFactory(null);
       RecordFactoryProvider.getRecordFactory(null);
 
 
   //if true, tasks complete automatically as soon as they are launched
   //if true, tasks complete automatically as soon as they are launched
@@ -100,7 +101,7 @@ public class MRApp extends MRAppMaster {
   static ApplicationId applicationId;
   static ApplicationId applicationId;
 
 
   static {
   static {
-    applicationId = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(ApplicationId.class);
+    applicationId = recordFactory.newRecordInstance(ApplicationId.class);
     applicationId.setClusterTimestamp(0);
     applicationId.setClusterTimestamp(0);
     applicationId.setId(0);
     applicationId.setId(0);
   }
   }
@@ -108,9 +109,19 @@ public class MRApp extends MRAppMaster {
   public MRApp(int maps, int reduces, boolean autoComplete, String testName, boolean cleanOnStart) {
   public MRApp(int maps, int reduces, boolean autoComplete, String testName, boolean cleanOnStart) {
     this(maps, reduces, autoComplete, testName, cleanOnStart, 1);
     this(maps, reduces, autoComplete, testName, cleanOnStart, 1);
   }
   }
+  
+  private static ApplicationAttemptId getApplicationAttemptId(
+      ApplicationId applicationId, int startCount) {
+    ApplicationAttemptId applicationAttemptId =
+        recordFactory.newRecordInstance(ApplicationAttemptId.class);
+    applicationAttemptId.setApplicationId(applicationId);
+    applicationAttemptId.setAttemptId(startCount);
+    return applicationAttemptId;
+  }
 
 
-  public MRApp(int maps, int reduces, boolean autoComplete, String testName, boolean cleanOnStart, int startCount) {
-    super(applicationId, startCount);
+  public MRApp(int maps, int reduces, boolean autoComplete, String testName, 
+      boolean cleanOnStart, int startCount) {
+    super(getApplicationAttemptId(applicationId, startCount));
     this.testWorkDir = new File("target", testName);
     this.testWorkDir = new File("target", testName);
     testAbsPath = new Path(testWorkDir.getAbsolutePath());
     testAbsPath = new Path(testWorkDir.getAbsolutePath());
     LOG.info("PathUsed: " + testAbsPath);
     LOG.info("PathUsed: " + testAbsPath);
@@ -391,11 +402,12 @@ public class MRApp extends MRAppMaster {
       return localStateMachine;
       return localStateMachine;
     }
     }
 
 
-    public TestJob(Configuration conf, ApplicationId appID,
+    public TestJob(Configuration conf, ApplicationId applicationId,
         EventHandler eventHandler, TaskAttemptListener taskAttemptListener,
         EventHandler eventHandler, TaskAttemptListener taskAttemptListener,
         Clock clock, String user) {
         Clock clock, String user) {
-      super(appID, conf, eventHandler, taskAttemptListener,
-          new JobTokenSecretManager(), new Credentials(), clock, getStartCount(), 
+      super(getApplicationAttemptId(applicationId, getStartCount()), 
+          conf, eventHandler, taskAttemptListener,
+          new JobTokenSecretManager(), new Credentials(), clock, 
           getCompletedTaskFromPreviousRun(), metrics, user);
           getCompletedTaskFromPreviousRun(), metrics, user);
 
 
       // This "this leak" is okay because the retained pointer is in an
       // This "this leak" is okay because the retained pointer is in an

+ 624 - 445
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRMContainerAllocator.java

@@ -18,12 +18,15 @@
 
 
 package org.apache.hadoop.mapreduce.v2.app;
 package org.apache.hadoop.mapreduce.v2.app;
 
 
+import static org.mockito.Matchers.isA;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
 import java.io.IOException;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Arrays;
 import java.util.HashSet;
 import java.util.HashSet;
 import java.util.List;
 import java.util.List;
-import java.util.Map;
 import java.util.Set;
 import java.util.Set;
 
 
 import junit.framework.Assert;
 import junit.framework.Assert;
@@ -32,475 +35,651 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.mapreduce.v2.api.records.JobId;
 import org.apache.hadoop.mapreduce.v2.api.records.JobId;
+import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
+import org.apache.hadoop.mapreduce.v2.api.records.JobState;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
+import org.apache.hadoop.mapreduce.v2.app.client.ClientService;
 import org.apache.hadoop.mapreduce.v2.app.job.Job;
 import org.apache.hadoop.mapreduce.v2.app.job.Job;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerAssignedEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerAssignedEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl;
 import org.apache.hadoop.mapreduce.v2.app.rm.ContainerRequestEvent;
 import org.apache.hadoop.mapreduce.v2.app.rm.ContainerRequestEvent;
 import org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator;
 import org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator;
+import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.net.NetworkTopology;
-import org.apache.hadoop.yarn.Clock;
 import org.apache.hadoop.yarn.YarnException;
 import org.apache.hadoop.yarn.YarnException;
 import org.apache.hadoop.yarn.api.AMRMProtocol;
 import org.apache.hadoop.yarn.api.AMRMProtocol;
-import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
-import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterResponse;
-import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse;
-import org.apache.hadoop.yarn.api.records.AMResponse;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
-import org.apache.hadoop.yarn.api.records.ApplicationMaster;
-import org.apache.hadoop.yarn.api.records.ApplicationStatus;
-import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerId;
-import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.event.Dispatcher;
+import org.apache.hadoop.yarn.event.DrainDispatcher;
 import org.apache.hadoop.yarn.event.Event;
 import org.apache.hadoop.yarn.event.Event;
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.event.EventHandler;
-import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
-import org.apache.hadoop.yarn.ipc.RPCUtil;
-import org.apache.hadoop.yarn.server.resourcemanager.ResourceTrackerService;
-import org.apache.hadoop.yarn.server.resourcemanager.recovery.StoreFactory;
-import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
-import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeImpl;
+import org.apache.hadoop.yarn.server.resourcemanager.MockNM;
+import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Allocation;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Allocation;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler;
 import org.apache.hadoop.yarn.server.security.ContainerTokenSecretManager;
 import org.apache.hadoop.yarn.server.security.ContainerTokenSecretManager;
-import org.junit.BeforeClass;
+import org.apache.hadoop.yarn.util.BuilderUtils;
+import org.junit.After;
 import org.junit.Test;
 import org.junit.Test;
 
 
 public class TestRMContainerAllocator {
 public class TestRMContainerAllocator {
-//  private static final Log LOG = LogFactory.getLog(TestRMContainerAllocator.class);
-//  private static final RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
-//
-//  @BeforeClass
-//  public static void preTests() {
-//    DefaultMetricsSystem.shutdown();
-//  }
-//
-//  @Test
-//  public void testSimple() throws Exception {
-//    FifoScheduler scheduler = createScheduler();
-//    LocalRMContainerAllocator allocator = new LocalRMContainerAllocator(
-//        scheduler, new Configuration());
-//
-//    //add resources to scheduler
-//    RMNode nodeManager1 = addNode(scheduler, "h1", 10240);
-//    RMNode nodeManager2 = addNode(scheduler, "h2", 10240);
-//    RMNode nodeManager3 = addNode(scheduler, "h3", 10240);
-//
-//    //create the container request
-//    ContainerRequestEvent event1 = 
-//      createReq(1, 1024, new String[]{"h1"});
-//    allocator.sendRequest(event1);
-//
-//    //send 1 more request with different resource req
-//    ContainerRequestEvent event2 = createReq(2, 1024, new String[]{"h2"});
-//    allocator.sendRequest(event2);
-//
-//    //this tells the scheduler about the requests
-//    //as nodes are not added, no allocations
-//    List<TaskAttemptContainerAssignedEvent> assigned = allocator.schedule();
-//    Assert.assertEquals("No of assignments must be 0", 0, assigned.size());
-//
-//    //send another request with different resource and priority
-//    ContainerRequestEvent event3 = createReq(3, 1024, new String[]{"h3"});
-//    allocator.sendRequest(event3);
-//
-//    //this tells the scheduler about the requests
-//    //as nodes are not added, no allocations
-//    assigned = allocator.schedule();
-//    Assert.assertEquals("No of assignments must be 0", 0, assigned.size());
-//
-//    //update resources in scheduler
-//    scheduler.nodeUpdate(nodeManager1); // Node heartbeat
-//    scheduler.nodeUpdate(nodeManager2); // Node heartbeat
-//    scheduler.nodeUpdate(nodeManager3); // Node heartbeat
-//
-//
-//    assigned = allocator.schedule();
-//    checkAssignments(
-//        new ContainerRequestEvent[]{event1, event2, event3}, assigned, false);
-//  }
-//
-//  //TODO: Currently Scheduler seems to have bug where it does not work
-//  //for Application asking for containers with different capabilities.
-//  //@Test
-//  public void testResource() throws Exception {
-//    FifoScheduler scheduler = createScheduler();
-//    LocalRMContainerAllocator allocator = new LocalRMContainerAllocator(
-//        scheduler, new Configuration());
-//
-//    //add resources to scheduler
-//    RMNode nodeManager1 = addNode(scheduler, "h1", 10240);
-//    RMNode nodeManager2 = addNode(scheduler, "h2", 10240);
-//    RMNode nodeManager3 = addNode(scheduler, "h3", 10240);
-//
-//    //create the container request
-//    ContainerRequestEvent event1 = 
-//      createReq(1, 1024, new String[]{"h1"});
-//    allocator.sendRequest(event1);
-//
-//    //send 1 more request with different resource req
-//    ContainerRequestEvent event2 = createReq(2, 2048, new String[]{"h2"});
-//    allocator.sendRequest(event2);
-//
-//    //this tells the scheduler about the requests
-//    //as nodes are not added, no allocations
-//    List<TaskAttemptContainerAssignedEvent> assigned = allocator.schedule();
-//    Assert.assertEquals("No of assignments must be 0", 0, assigned.size());
-//
-//    //update resources in scheduler
-//    scheduler.nodeUpdate(nodeManager1); // Node heartbeat
-//    scheduler.nodeUpdate(nodeManager2); // Node heartbeat
-//    scheduler.nodeUpdate(nodeManager3); // Node heartbeat
-//
-//    assigned = allocator.schedule();
-//    checkAssignments(
-//        new ContainerRequestEvent[]{event1, event2}, assigned, false);
-//  }
-//
-//  @Test
-//  public void testMapReduceScheduling() throws Exception {
-//    FifoScheduler scheduler = createScheduler();
-//    Configuration conf = new Configuration();
-//    LocalRMContainerAllocator allocator = new LocalRMContainerAllocator(
-//        scheduler, conf);
-//
-//    //add resources to scheduler
-//    RMNode nodeManager1 = addNode(scheduler, "h1", 1024);
-//    RMNode nodeManager2 = addNode(scheduler, "h2", 10240);
-//    RMNode nodeManager3 = addNode(scheduler, "h3", 10240);
-//
-//    //create the container request
-//    //send MAP request
-//    ContainerRequestEvent event1 = 
-//      createReq(1, 2048, new String[]{"h1", "h2"}, true, false);
-//    allocator.sendRequest(event1);
-//
-//    //send REDUCE request
-//    ContainerRequestEvent event2 = createReq(2, 3000, new String[]{"h1"}, false, true);
-//    allocator.sendRequest(event2);
-//
-//    //send MAP request
-//    ContainerRequestEvent event3 = createReq(3, 2048, new String[]{"h3"}, false, false);
-//    allocator.sendRequest(event3);
-//
-//    //this tells the scheduler about the requests
-//    //as nodes are not added, no allocations
-//    List<TaskAttemptContainerAssignedEvent> assigned = allocator.schedule();
-//    Assert.assertEquals("No of assignments must be 0", 0, assigned.size());
-//
-//    //update resources in scheduler
-//    scheduler.nodeUpdate(nodeManager1); // Node heartbeat
-//    scheduler.nodeUpdate(nodeManager2); // Node heartbeat
-//    scheduler.nodeUpdate(nodeManager3); // Node heartbeat
-//
-//    assigned = allocator.schedule();
-//    checkAssignments(
-//        new ContainerRequestEvent[]{event1, event3}, assigned, false);
-//
-//    //validate that no container is assigned to h1 as it doesn't have 2048
-//    for (TaskAttemptContainerAssignedEvent assig : assigned) {
-//      Assert.assertFalse("Assigned count not correct", 
-//          "h1".equals(assig.getContainer().getNodeId().getHost()));
-//    }
-//  }
-//
-//
-//
-//  private RMNode addNode(FifoScheduler scheduler, 
-//      String nodeName, int memory) {
-//    NodeId nodeId = recordFactory.newRecordInstance(NodeId.class);
-//    nodeId.setHost(nodeName);
-//    nodeId.setPort(1234);
-//    Resource resource = recordFactory.newRecordInstance(Resource.class);
-//    resource.setMemory(memory);
-//    RMNode nodeManager = new RMNodeImpl(nodeId, null, nodeName, 0, 0,
-//        ResourceTrackerService.resolve(nodeName), resource);
-//    scheduler.addNode(nodeManager); // Node registration
-//    return nodeManager;
-//  }
-//
-//  private FifoScheduler createScheduler() throws YarnRemoteException {
-//    FifoScheduler fsc = new FifoScheduler() {
-//      //override this to copy the objects
-//      //otherwise FifoScheduler updates the numContainers in same objects as kept by
-//      //RMContainerAllocator
-//      
-//      @Override
-//      public synchronized void allocate(ApplicationAttemptId applicationId,
-//          List<ResourceRequest> ask) {
-//        List<ResourceRequest> askCopy = new ArrayList<ResourceRequest>();
-//        for (ResourceRequest req : ask) {
-//          ResourceRequest reqCopy = recordFactory.newRecordInstance(ResourceRequest.class);
-//          reqCopy.setPriority(req.getPriority());
-//          reqCopy.setHostName(req.getHostName());
-//          reqCopy.setCapability(req.getCapability());
-//          reqCopy.setNumContainers(req.getNumContainers());
-//          askCopy.add(reqCopy);
-//        }
-//        super.allocate(applicationId, askCopy);
-//      }
-//    };
-//    try {
-//      fsc.reinitialize(new Configuration(), new ContainerTokenSecretManager(), null);
-//      fsc.addApplication(recordFactory.newRecordInstance(ApplicationId.class),
-//          recordFactory.newRecordInstance(ApplicationMaster.class),
-//          "test", null, null, StoreFactory.createVoidAppStore());
-//    } catch(IOException ie) {
-//      LOG.info("add application failed with ", ie);
-//      assert(false);
-//    }
-//    return fsc;
-//  }
-//
-//  private ContainerRequestEvent createReq(
-//      int attemptid, int memory, String[] hosts) {
-//    return createReq(attemptid, memory, hosts, false, false);
-//  }
-//  
-//  private ContainerRequestEvent createReq(
-//      int attemptid, int memory, String[] hosts, boolean earlierFailedAttempt, boolean reduce) {
-//    ApplicationId appId = recordFactory.newRecordInstance(ApplicationId.class);
-//    appId.setClusterTimestamp(0);
-//    appId.setId(0);
-//    JobId jobId = recordFactory.newRecordInstance(JobId.class);
-//    jobId.setAppId(appId);
-//    jobId.setId(0);
-//    TaskId taskId = recordFactory.newRecordInstance(TaskId.class);
-//    taskId.setId(0);
-//    taskId.setJobId(jobId);
-//    if (reduce) {
-//      taskId.setTaskType(TaskType.REDUCE);
-//    } else {
-//      taskId.setTaskType(TaskType.MAP);
-//    }
-//    TaskAttemptId attemptId = recordFactory.newRecordInstance(TaskAttemptId.class);
-//    attemptId.setId(attemptid);
-//    attemptId.setTaskId(taskId);
-//    Resource containerNeed = recordFactory.newRecordInstance(Resource.class);
-//    containerNeed.setMemory(memory);
-//    if (earlierFailedAttempt) {
-//      return ContainerRequestEvent.
-//           createContainerRequestEventForFailedContainer(attemptId, containerNeed);
-//    }
-//    return new ContainerRequestEvent(attemptId, 
-//        containerNeed, 
-//        hosts, new String[] {NetworkTopology.DEFAULT_RACK});
-//  }
-//
-//  private void checkAssignments(ContainerRequestEvent[] requests, 
-//      List<TaskAttemptContainerAssignedEvent> assignments, 
-//      boolean checkHostMatch) {
-//    Assert.assertNotNull("Container not assigned", assignments);
-//    Assert.assertEquals("Assigned count not correct", 
-//        requests.length, assignments.size());
-//
-//    //check for uniqueness of containerIDs
-//    Set<ContainerId> containerIds = new HashSet<ContainerId>();
-//    for (TaskAttemptContainerAssignedEvent assigned : assignments) {
-//      containerIds.add(assigned.getContainer().getId());
-//    }
-//    Assert.assertEquals("Assigned containers must be different", 
-//        assignments.size(), containerIds.size());
-//
-//    //check for all assignment
-//    for (ContainerRequestEvent req : requests) {
-//      TaskAttemptContainerAssignedEvent assigned = null;
-//      for (TaskAttemptContainerAssignedEvent ass : assignments) {
-//        if (ass.getTaskAttemptID().equals(req.getAttemptID())){
-//          assigned = ass;
-//          break;
-//        }
-//      }
-//      checkAssignment(req, assigned, checkHostMatch);
-//    }
-//  }
-//
-//  private void checkAssignment(ContainerRequestEvent request, 
-//      TaskAttemptContainerAssignedEvent assigned, boolean checkHostMatch) {
-//    Assert.assertNotNull("Nothing assigned to attempt " + request.getAttemptID(),
-//        assigned);
-//    Assert.assertEquals("assigned to wrong attempt", request.getAttemptID(),
-//        assigned.getTaskAttemptID());
-//    if (checkHostMatch) {
-//      Assert.assertTrue("Not assigned to requested host", Arrays.asList(
-//          request.getHosts()).contains(
-//          assigned.getContainer().getNodeId().toString()));
-//    }
-//
-//  }
-//
-//  //Mock RMContainerAllocator
-//  //Instead of talking to remote Scheduler,uses the local Scheduler
-//  public static class LocalRMContainerAllocator extends RMContainerAllocator {
-//    private static final List<TaskAttemptContainerAssignedEvent> events = 
-//      new ArrayList<TaskAttemptContainerAssignedEvent>();
-//
-//    public static class AMRMProtocolImpl implements AMRMProtocol {
-//
-//      private ResourceScheduler resourceScheduler;
-//
-//      public AMRMProtocolImpl(ResourceScheduler resourceScheduler) {
-//        this.resourceScheduler = resourceScheduler;
-//      }
-//
-//      @Override
-//      public RegisterApplicationMasterResponse registerApplicationMaster(RegisterApplicationMasterRequest request) throws YarnRemoteException {
-//        RegisterApplicationMasterResponse response = recordFactory.newRecordInstance(RegisterApplicationMasterResponse.class);
-//        return response;
-//      }
-//
-//      public AllocateResponse allocate(AllocateRequest request) throws YarnRemoteException {
-//        List<ResourceRequest> ask = request.getAskList();
-//        List<Container> release = request.getReleaseList();
-//        try {
-//          AMResponse response = recordFactory.newRecordInstance(AMResponse.class);
-//          Allocation allocation = resourceScheduler.allocate(request.getApplicationAttemptId(), ask);
-//          response.addAllNewContainers(allocation.getContainers());
-//          response.setAvailableResources(allocation.getResourceLimit());
-//          AllocateResponse allocateResponse = recordFactory.newRecordInstance(AllocateResponse.class);
-//          allocateResponse.setAMResponse(response);
-//          return allocateResponse;
-//        } catch(IOException ie) {
-//          throw RPCUtil.getRemoteException(ie);
-//        }
-//      }
-//
-//      @Override
-//      public FinishApplicationMasterResponse finishApplicationMaster(FinishApplicationMasterRequest request) throws YarnRemoteException {
-//        FinishApplicationMasterResponse response = recordFactory.newRecordInstance(FinishApplicationMasterResponse.class);
-//        return response;
-//      }
-//
-//    }
-//
-//    private ResourceScheduler scheduler;
-//    LocalRMContainerAllocator(ResourceScheduler scheduler, Configuration conf) {
-//      super(null, new TestContext(events));
-//      this.scheduler = scheduler;
-//      super.init(conf);
-//      super.start();
-//    }
-//
-//    protected AMRMProtocol createSchedulerProxy() {
-//      return new AMRMProtocolImpl(scheduler);
-//    }
-//
-//    @Override
-//    protected void register() {}
-//    @Override
-//    protected void unregister() {}
-//
-//    @Override
-//    protected Resource getMinContainerCapability() {
-//      Resource res = recordFactory.newRecordInstance(Resource.class);
-//      res.setMemory(1024);
-//      return res;
-//    }
-//    
-//    @Override
-//    protected Resource getMaxContainerCapability() {
-//      Resource res = recordFactory.newRecordInstance(Resource.class);
-//      res.setMemory(10240);
-//      return res;
-//    }
-//    
-//    public void sendRequest(ContainerRequestEvent req) {
-//      sendRequests(Arrays.asList(new ContainerRequestEvent[]{req}));
-//    }
-//
-//    public void sendRequests(List<ContainerRequestEvent> reqs) {
-//      for (ContainerRequestEvent req : reqs) {
-//        handle(req);
-//      }
-//    }
-//
-//    //API to be used by tests
-//    public List<TaskAttemptContainerAssignedEvent> schedule() {
-//      //run the scheduler
-//      try {
-//        heartbeat();
-//      } catch (Exception e) {
-//        LOG.error("error in heartbeat ", e);
-//        throw new YarnException(e);
-//      }
-//
-//      List<TaskAttemptContainerAssignedEvent> result = new ArrayList(events);
-//      events.clear();
-//      return result;
-//    }
-//
-//    protected void startAllocatorThread() {
-//      //override to NOT start thread
-//    }
-//
-//    static class TestContext implements AppContext {
-//      private List<TaskAttemptContainerAssignedEvent> events;
-//      TestContext(List<TaskAttemptContainerAssignedEvent> events) {
-//        this.events = events;
-//      }
-//      @Override
-//      public Map<JobId, Job> getAllJobs() {
-//        return null;
-//      }
-//      @Override
-//      public ApplicationAttemptId getApplicationAttemptId() {
-//        return recordFactory.newRecordInstance(ApplicationAttemptId.class);
-//      }
-//      @Override
-//      public ApplicationId getApplicationID() {
-//        return recordFactory.newRecordInstance(ApplicationId.class);
-//      }
-//      @Override
-//      public EventHandler getEventHandler() {
-//        return new EventHandler() {
-//          @Override
-//          public void handle(Event event) {
-//            events.add((TaskAttemptContainerAssignedEvent) event);
-//          }
-//        };
-//      }
-//      @Override
-//      public Job getJob(JobId jobID) {
-//        return null;
-//      }
-//
-//      @Override
-//      public String getUser() {
-//        return null;
-//      }
-//
-//      @Override
-//      public Clock getClock() {
-//        return null;
-//      }
-//
-//      @Override
-//      public String getApplicationName() {
-//        return null;
-//      }
-//
-//      @Override
-//      public long getStartTime() {
-//        return 0;
-//      }
-//    }
-//  }
-//
-//  public static void main(String[] args) throws Exception {
-//    TestRMContainerAllocator t = new TestRMContainerAllocator();
-//    t.testSimple();
-//    //t.testResource();
-//    t.testMapReduceScheduling();
-//  }
+
+  static final Log LOG = LogFactory
+      .getLog(TestRMContainerAllocator.class);
+  static final RecordFactory recordFactory = RecordFactoryProvider
+      .getRecordFactory(null);
+
+  @After
+  public void tearDown() {
+    DefaultMetricsSystem.shutdown();
+  }
+
+  @Test
+  public void testSimple() throws Exception {
+
+    LOG.info("Running testSimple");
+
+    Configuration conf = new Configuration();
+    MyResourceManager rm = new MyResourceManager(conf);
+    rm.start();
+    DrainDispatcher dispatcher = (DrainDispatcher) rm.getRMContext()
+        .getDispatcher();
+
+    // Submit the application
+    RMApp app = rm.submitApp(1024);
+    dispatcher.await();
+
+    MockNM amNodeManager = rm.registerNode("amNM:1234", 2048);
+    amNodeManager.nodeHeartbeat(true);
+    dispatcher.await();
+
+    ApplicationAttemptId appAttemptId = app.getCurrentAppAttempt()
+        .getAppAttemptId();
+    rm.sendAMLaunched(appAttemptId);
+    dispatcher.await();
+
+    JobId jobId = MRBuilderUtils.newJobId(appAttemptId.getApplicationId(), 0);
+    Job mockJob = mock(Job.class);
+    when(mockJob.getReport()).thenReturn(
+        MRBuilderUtils.newJobReport(jobId, "job", "user", JobState.RUNNING,
+            0, 0, 0, 0, 0, 0));
+    MyContainerAllocator allocator = new MyContainerAllocator(rm, conf,
+        appAttemptId, mockJob);
+
+    // add resources to scheduler
+    MockNM nodeManager1 = rm.registerNode("h1:1234", 10240);
+    MockNM nodeManager2 = rm.registerNode("h2:1234", 10240);
+    MockNM nodeManager3 = rm.registerNode("h3:1234", 10240);
+    dispatcher.await();
+
+    // create the container request
+    ContainerRequestEvent event1 = createReq(jobId, 1, 1024,
+        new String[] { "h1" });
+    allocator.sendRequest(event1);
+
+    // send 1 more request with different resource req
+    ContainerRequestEvent event2 = createReq(jobId, 2, 1024,
+        new String[] { "h2" });
+    allocator.sendRequest(event2);
+
+    // this tells the scheduler about the requests
+    // as nodes are not added, no allocations
+    List<TaskAttemptContainerAssignedEvent> assigned = allocator.schedule();
+    dispatcher.await();
+    Assert.assertEquals("No of assignments must be 0", 0, assigned.size());
+
+    // send another request with different resource and priority
+    ContainerRequestEvent event3 = createReq(jobId, 3, 1024,
+        new String[] { "h3" });
+    allocator.sendRequest(event3);
+
+    // this tells the scheduler about the requests
+    // as nodes are not added, no allocations
+    assigned = allocator.schedule();
+    dispatcher.await();
+    Assert.assertEquals("No of assignments must be 0", 0, assigned.size());
+
+    // update resources in scheduler
+    nodeManager1.nodeHeartbeat(true); // Node heartbeat
+    nodeManager2.nodeHeartbeat(true); // Node heartbeat
+    nodeManager3.nodeHeartbeat(true); // Node heartbeat
+    dispatcher.await();
+
+    assigned = allocator.schedule();
+    dispatcher.await();
+    checkAssignments(new ContainerRequestEvent[] { event1, event2, event3 },
+        assigned, false);
+  }
+
+  @Test
+  public void testResource() throws Exception {
+
+    LOG.info("Running testResource");
+
+    Configuration conf = new Configuration();
+    MyResourceManager rm = new MyResourceManager(conf);
+    rm.start();
+    DrainDispatcher dispatcher = (DrainDispatcher) rm.getRMContext()
+        .getDispatcher();
+
+    // Submit the application
+    RMApp app = rm.submitApp(1024);
+    dispatcher.await();
+
+    MockNM amNodeManager = rm.registerNode("amNM:1234", 2048);
+    amNodeManager.nodeHeartbeat(true);
+    dispatcher.await();
+
+    ApplicationAttemptId appAttemptId = app.getCurrentAppAttempt()
+        .getAppAttemptId();
+    rm.sendAMLaunched(appAttemptId);
+    dispatcher.await();
+
+    JobId jobId = MRBuilderUtils.newJobId(appAttemptId.getApplicationId(), 0);
+    Job mockJob = mock(Job.class);
+    when(mockJob.getReport()).thenReturn(
+        MRBuilderUtils.newJobReport(jobId, "job", "user", JobState.RUNNING,
+            0, 0, 0, 0, 0, 0));
+    MyContainerAllocator allocator = new MyContainerAllocator(rm, conf,
+        appAttemptId, mockJob);
+
+    // add resources to scheduler
+    MockNM nodeManager1 = rm.registerNode("h1:1234", 10240);
+    MockNM nodeManager2 = rm.registerNode("h2:1234", 10240);
+    MockNM nodeManager3 = rm.registerNode("h3:1234", 10240);
+    dispatcher.await();
+
+    // create the container request
+    ContainerRequestEvent event1 = createReq(jobId, 1, 1024,
+        new String[] { "h1" });
+    allocator.sendRequest(event1);
+
+    // send 1 more request with different resource req
+    ContainerRequestEvent event2 = createReq(jobId, 2, 2048,
+        new String[] { "h2" });
+    allocator.sendRequest(event2);
+
+    // this tells the scheduler about the requests
+    // as nodes are not added, no allocations
+    List<TaskAttemptContainerAssignedEvent> assigned = allocator.schedule();
+    dispatcher.await();
+    Assert.assertEquals("No of assignments must be 0", 0, assigned.size());
+
+    // update resources in scheduler
+    nodeManager1.nodeHeartbeat(true); // Node heartbeat
+    nodeManager2.nodeHeartbeat(true); // Node heartbeat
+    nodeManager3.nodeHeartbeat(true); // Node heartbeat
+    dispatcher.await();
+
+    assigned = allocator.schedule();
+    dispatcher.await();
+    checkAssignments(new ContainerRequestEvent[] { event1, event2 },
+        assigned, false);
+  }
+
+  @Test
+  public void testMapReduceScheduling() throws Exception {
+
+    LOG.info("Running testMapReduceScheduling");
+
+    Configuration conf = new Configuration();
+    MyResourceManager rm = new MyResourceManager(conf);
+    rm.start();
+    DrainDispatcher dispatcher = (DrainDispatcher) rm.getRMContext()
+        .getDispatcher();
+
+    // Submit the application
+    RMApp app = rm.submitApp(1024);
+    dispatcher.await();
+
+    MockNM amNodeManager = rm.registerNode("amNM:1234", 2048);
+    amNodeManager.nodeHeartbeat(true);
+    dispatcher.await();
+
+    ApplicationAttemptId appAttemptId = app.getCurrentAppAttempt()
+        .getAppAttemptId();
+    rm.sendAMLaunched(appAttemptId);
+    dispatcher.await();
+
+    JobId jobId = MRBuilderUtils.newJobId(appAttemptId.getApplicationId(), 0);
+    Job mockJob = mock(Job.class);
+    when(mockJob.getReport()).thenReturn(
+        MRBuilderUtils.newJobReport(jobId, "job", "user", JobState.RUNNING,
+            0, 0, 0, 0, 0, 0));
+    MyContainerAllocator allocator = new MyContainerAllocator(rm, conf,
+        appAttemptId, mockJob);
+
+    // add resources to scheduler
+    MockNM nodeManager1 = rm.registerNode("h1:1234", 1024);
+    MockNM nodeManager2 = rm.registerNode("h2:1234", 10240);
+    MockNM nodeManager3 = rm.registerNode("h3:1234", 10240);
+    dispatcher.await();
+
+    // create the container request
+    // send MAP request
+    ContainerRequestEvent event1 = createReq(jobId, 1, 2048, new String[] {
+        "h1", "h2" }, true, false);
+    allocator.sendRequest(event1);
+
+    // send REDUCE request
+    ContainerRequestEvent event2 = createReq(jobId, 2, 3000,
+        new String[] { "h1" }, false, true);
+    allocator.sendRequest(event2);
+
+    // send MAP request
+    ContainerRequestEvent event3 = createReq(jobId, 3, 2048,
+        new String[] { "h3" }, false, false);
+    allocator.sendRequest(event3);
+
+    // this tells the scheduler about the requests
+    // as nodes are not added, no allocations
+    List<TaskAttemptContainerAssignedEvent> assigned = allocator.schedule();
+    dispatcher.await();
+    Assert.assertEquals("No of assignments must be 0", 0, assigned.size());
+
+    // update resources in scheduler
+    nodeManager1.nodeHeartbeat(true); // Node heartbeat
+    nodeManager2.nodeHeartbeat(true); // Node heartbeat
+    nodeManager3.nodeHeartbeat(true); // Node heartbeat
+    dispatcher.await();
+
+    assigned = allocator.schedule();
+    dispatcher.await();
+    checkAssignments(new ContainerRequestEvent[] { event1, event3 },
+        assigned, false);
+
+    // validate that no container is assigned to h1 as it doesn't have 2048
+    for (TaskAttemptContainerAssignedEvent assig : assigned) {
+      Assert.assertFalse("Assigned count not correct", "h1".equals(assig
+          .getContainer().getNodeId().getHost()));
+    }
+  }
+
+  private static class MyResourceManager extends MockRM {
+
+    public MyResourceManager(Configuration conf) {
+      super(conf);
+    }
+
+    @Override
+    protected Dispatcher createDispatcher() {
+      return new DrainDispatcher();
+    }
+
+    @Override
+    protected EventHandler<SchedulerEvent> createSchedulerEventDispatcher() {
+      // Dispatch inline for test sanity
+      return new EventHandler<SchedulerEvent>() {
+        @Override
+        public void handle(SchedulerEvent event) {
+          scheduler.handle(event);
+        }
+      };
+    }
+    @Override
+    protected ResourceScheduler createScheduler() {
+      return new MyFifoScheduler(getRMContext());
+    }
+  }
+
+  private static class FakeJob extends JobImpl {
+
+    public FakeJob(ApplicationAttemptId appAttemptID, Configuration conf,
+        int numMaps, int numReduces) {
+      super(appAttemptID, conf, null, null, null, null, null, null, null,
+          null);
+      this.jobId = MRBuilderUtils
+          .newJobId(appAttemptID.getApplicationId(), 0);
+      this.numMaps = numMaps;
+      this.numReduces = numReduces;
+    }
+
+    private float setupProgress;
+    private float mapProgress;
+    private float reduceProgress;
+    private float cleanupProgress;
+    private final int numMaps;
+    private final int numReduces;
+    private JobId jobId;
+
+    void setProgress(float setupProgress, float mapProgress,
+        float reduceProgress, float cleanupProgress) {
+      this.setupProgress = setupProgress;
+      this.mapProgress = mapProgress;
+      this.reduceProgress = reduceProgress;
+      this.cleanupProgress = cleanupProgress;
+    }
+
+    @Override
+    public int getTotalMaps() { return this.numMaps; }
+    @Override
+    public int getTotalReduces() { return this.numReduces;}
+
+    @Override
+    public JobReport getReport() {
+      return MRBuilderUtils.newJobReport(this.jobId, "job", "user",
+          JobState.RUNNING, 0, 0, this.setupProgress, this.mapProgress,
+          this.reduceProgress, this.cleanupProgress);
+    }
+  }
+
+  @Test
+  public void testReportedAppProgress() throws Exception {
+
+    LOG.info("Running testReportedAppProgress");
+
+    Configuration conf = new Configuration();
+    MyResourceManager rm = new MyResourceManager(conf);
+    rm.start();
+    DrainDispatcher dispatcher = (DrainDispatcher) rm.getRMContext()
+        .getDispatcher();
+
+    // Submit the application
+    RMApp app = rm.submitApp(1024);
+    dispatcher.await();
+
+    MockNM amNodeManager = rm.registerNode("amNM:1234", 2048);
+    amNodeManager.nodeHeartbeat(true);
+    dispatcher.await();
+
+    ApplicationAttemptId appAttemptId = app.getCurrentAppAttempt()
+        .getAppAttemptId();
+    rm.sendAMLaunched(appAttemptId);
+    dispatcher.await();
+
+    FakeJob job = new FakeJob(appAttemptId, conf, 2, 2);
+    MyContainerAllocator allocator = new MyContainerAllocator(rm, conf,
+        appAttemptId, job);
+
+    allocator.schedule(); // Send heartbeat
+    dispatcher.await();
+    Assert.assertEquals(0.0, app.getProgress(), 0.0);
+
+    job.setProgress(100, 10, 0, 0);
+    allocator.schedule();
+    dispatcher.await();
+    Assert.assertEquals(9.5f, app.getProgress(), 0.0);
+
+    job.setProgress(100, 80, 0, 0);
+    allocator.schedule();
+    dispatcher.await();
+    Assert.assertEquals(41.0f, app.getProgress(), 0.0);
+
+    job.setProgress(100, 100, 20, 0);
+    allocator.schedule();
+    dispatcher.await();
+    Assert.assertEquals(59.0f, app.getProgress(), 0.0);
+
+    job.setProgress(100, 100, 100, 100);
+    allocator.schedule();
+    dispatcher.await();
+    Assert.assertEquals(100.0f, app.getProgress(), 0.0);
+  }
+
+  @Test
+  public void testReportedAppProgressWithOnlyMaps() throws Exception {
+
+    LOG.info("Running testReportedAppProgressWithOnlyMaps");
+
+    Configuration conf = new Configuration();
+    MyResourceManager rm = new MyResourceManager(conf);
+    rm.start();
+    DrainDispatcher dispatcher = (DrainDispatcher) rm.getRMContext()
+        .getDispatcher();
+
+    // Submit the application
+    RMApp app = rm.submitApp(1024);
+    dispatcher.await();
+
+    MockNM amNodeManager = rm.registerNode("amNM:1234", 2048);
+    amNodeManager.nodeHeartbeat(true);
+    dispatcher.await();
+
+    ApplicationAttemptId appAttemptId = app.getCurrentAppAttempt()
+        .getAppAttemptId();
+    rm.sendAMLaunched(appAttemptId);
+    dispatcher.await();
+
+    FakeJob job = new FakeJob(appAttemptId, conf, 2, 0);
+    MyContainerAllocator allocator = new MyContainerAllocator(rm, conf,
+        appAttemptId, job);
+
+    allocator.schedule(); // Send heartbeat
+    dispatcher.await();
+    Assert.assertEquals(0.0, app.getProgress(), 0.0);
+
+    job.setProgress(100, 10, 0, 0);
+    allocator.schedule();
+    dispatcher.await();
+    Assert.assertEquals(14f, app.getProgress(), 0.0);
+
+    job.setProgress(100, 60, 0, 0);
+    allocator.schedule();
+    dispatcher.await();
+    Assert.assertEquals(59.0f, app.getProgress(), 0.0);
+
+    job.setProgress(100, 100, 0, 100);
+    allocator.schedule();
+    dispatcher.await();
+    Assert.assertEquals(100.0f, app.getProgress(), 0.0);
+  }
+
+  private static class MyFifoScheduler extends FifoScheduler {
+
+    public MyFifoScheduler(RMContext rmContext) {
+      super();
+      try {
+        reinitialize(new Configuration(), new ContainerTokenSecretManager(),
+            rmContext);
+      } catch (IOException ie) {
+        LOG.info("add application failed with ", ie);
+        assert (false);
+      }
+    }
+
+    // override this to copy the objects otherwise FifoScheduler updates the
+    // numContainers in same objects as kept by RMContainerAllocator
+    @Override
+    public synchronized Allocation allocate(
+        ApplicationAttemptId applicationAttemptId, List<ResourceRequest> ask,
+        List<ContainerId> release) {
+      List<ResourceRequest> askCopy = new ArrayList<ResourceRequest>();
+      for (ResourceRequest req : ask) {
+        ResourceRequest reqCopy = BuilderUtils.newResourceRequest(req
+            .getPriority(), req.getHostName(), req.getCapability(), req
+            .getNumContainers());
+        askCopy.add(reqCopy);
+      }
+      return super.allocate(applicationAttemptId, askCopy, release);
+    }
+  }
+
+  private ContainerRequestEvent createReq(JobId jobId, int taskAttemptId,
+      int memory, String[] hosts) {
+    return createReq(jobId, taskAttemptId, memory, hosts, false, false);
+  }
+
+  private ContainerRequestEvent
+      createReq(JobId jobId, int taskAttemptId, int memory, String[] hosts,
+          boolean earlierFailedAttempt, boolean reduce) {
+    TaskId taskId;
+    if (reduce) {
+      taskId = MRBuilderUtils.newTaskId(jobId, 0, TaskType.REDUCE);
+    } else {
+      taskId = MRBuilderUtils.newTaskId(jobId, 0, TaskType.MAP);
+    }
+    TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId,
+        taskAttemptId);
+    Resource containerNeed = BuilderUtils.newResource(memory);
+    if (earlierFailedAttempt) {
+      return ContainerRequestEvent
+          .createContainerRequestEventForFailedContainer(attemptId,
+              containerNeed);
+    }
+    return new ContainerRequestEvent(attemptId, containerNeed, hosts,
+        new String[] { NetworkTopology.DEFAULT_RACK });
+  }
+
+  private void checkAssignments(ContainerRequestEvent[] requests,
+      List<TaskAttemptContainerAssignedEvent> assignments,
+      boolean checkHostMatch) {
+    Assert.assertNotNull("Container not assigned", assignments);
+    Assert.assertEquals("Assigned count not correct", requests.length,
+        assignments.size());
+
+    // check for uniqueness of containerIDs
+    Set<ContainerId> containerIds = new HashSet<ContainerId>();
+    for (TaskAttemptContainerAssignedEvent assigned : assignments) {
+      containerIds.add(assigned.getContainer().getId());
+    }
+    Assert.assertEquals("Assigned containers must be different", assignments
+        .size(), containerIds.size());
+
+    // check for all assignment
+    for (ContainerRequestEvent req : requests) {
+      TaskAttemptContainerAssignedEvent assigned = null;
+      for (TaskAttemptContainerAssignedEvent ass : assignments) {
+        if (ass.getTaskAttemptID().equals(req.getAttemptID())) {
+          assigned = ass;
+          break;
+        }
+      }
+      checkAssignment(req, assigned, checkHostMatch);
+    }
+  }
+
+  private void checkAssignment(ContainerRequestEvent request,
+      TaskAttemptContainerAssignedEvent assigned, boolean checkHostMatch) {
+    Assert.assertNotNull("Nothing assigned to attempt "
+        + request.getAttemptID(), assigned);
+    Assert.assertEquals("assigned to wrong attempt", request.getAttemptID(),
+        assigned.getTaskAttemptID());
+    if (checkHostMatch) {
+      Assert.assertTrue("Not assigned to requested host", Arrays.asList(
+          request.getHosts()).contains(
+          assigned.getContainer().getNodeId().toString()));
+    }
+  }
+
+  // Mock RMContainerAllocator
+  // Instead of talking to remote Scheduler,uses the local Scheduler
+  private static class MyContainerAllocator extends RMContainerAllocator {
+    static final List<TaskAttemptContainerAssignedEvent> events
+      = new ArrayList<TaskAttemptContainerAssignedEvent>();
+
+    private MyResourceManager rm;
+
+    @SuppressWarnings("rawtypes")
+    private static AppContext createAppContext(
+        ApplicationAttemptId appAttemptId, Job job) {
+      AppContext context = mock(AppContext.class);
+      ApplicationId appId = appAttemptId.getApplicationId();
+      when(context.getApplicationID()).thenReturn(appId);
+      when(context.getApplicationAttemptId()).thenReturn(appAttemptId);
+      when(context.getJob(isA(JobId.class))).thenReturn(job);
+      when(context.getEventHandler()).thenReturn(new EventHandler() {
+        @Override
+        public void handle(Event event) {
+          // Only capture interesting events.
+          if (event instanceof TaskAttemptContainerAssignedEvent) {
+            events.add((TaskAttemptContainerAssignedEvent) event);
+          }
+        }
+      });
+      return context;
+    }
+
+    private static ClientService createMockClientService() {
+      ClientService service = mock(ClientService.class);
+      when(service.getBindAddress()).thenReturn(
+          NetUtils.createSocketAddr("localhost:4567"));
+      when(service.getHttpPort()).thenReturn(890);
+      return service;
+    }
+
+    MyContainerAllocator(MyResourceManager rm, Configuration conf,
+        ApplicationAttemptId appAttemptId, Job job) {
+      super(createMockClientService(), createAppContext(appAttemptId, job));
+      this.rm = rm;
+      super.init(conf);
+      super.start();
+    }
+
+    @Override
+    protected AMRMProtocol createSchedulerProxy() {
+      return this.rm.getApplicationMasterService();
+    }
+
+    @Override
+    protected void register() {
+      super.register();
+    }
+
+    @Override
+    protected void unregister() {
+    }
+
+    @Override
+    protected Resource getMinContainerCapability() {
+      return BuilderUtils.newResource(1024);
+    }
+
+    @Override
+    protected Resource getMaxContainerCapability() {
+      return BuilderUtils.newResource(10240);
+    }
+
+    public void sendRequest(ContainerRequestEvent req) {
+      sendRequests(Arrays.asList(new ContainerRequestEvent[] { req }));
+    }
+
+    public void sendRequests(List<ContainerRequestEvent> reqs) {
+      for (ContainerRequestEvent req : reqs) {
+        super.handle(req);
+      }
+    }
+
+    // API to be used by tests
+    public List<TaskAttemptContainerAssignedEvent> schedule() {
+      // run the scheduler
+      try {
+        super.heartbeat();
+      } catch (Exception e) {
+        LOG.error("error in heartbeat ", e);
+        throw new YarnException(e);
+      }
+
+      List<TaskAttemptContainerAssignedEvent> result
+        = new ArrayList<TaskAttemptContainerAssignedEvent>(events);
+      events.clear();
+      return result;
+    }
+
+    protected void startAllocatorThread() {
+      // override to NOT start thread
+    }
+  }
+
+  public static void main(String[] args) throws Exception {
+    TestRMContainerAllocator t = new TestRMContainerAllocator();
+    t.testSimple();
+    t.testResource();
+    t.testMapReduceScheduling();
+    t.testReportedAppProgress();
+    t.testReportedAppProgressWithOnlyMaps();
+  }
 }
 }

+ 24 - 10
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/TypeConverter.java

@@ -47,6 +47,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationReport;
 import org.apache.hadoop.yarn.api.records.ApplicationState;
 import org.apache.hadoop.yarn.api.records.ApplicationState;
 import org.apache.hadoop.yarn.api.records.NodeReport;
 import org.apache.hadoop.yarn.api.records.NodeReport;
 import org.apache.hadoop.yarn.api.records.QueueACL;
 import org.apache.hadoop.yarn.api.records.QueueACL;
+import org.apache.hadoop.yarn.api.records.QueueState;
 import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
 import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
@@ -280,15 +281,27 @@ public class TypeConverter {
   }
   }
   
   
   public static org.apache.hadoop.mapred.JobStatus fromYarn(
   public static org.apache.hadoop.mapred.JobStatus fromYarn(
-      JobReport jobreport, String jobFile, String trackingUrl) {
+      JobReport jobreport, String jobFile) {
     JobPriority jobPriority = JobPriority.NORMAL;
     JobPriority jobPriority = JobPriority.NORMAL;
-    return new org.apache.hadoop.mapred.JobStatus(fromYarn(jobreport.getJobId()),
-        jobreport.getSetupProgress(), jobreport.getMapProgress(),
-        jobreport.getReduceProgress(), jobreport.getCleanupProgress(),
-        fromYarn(jobreport.getJobState()),
-        jobPriority, jobreport.getUser(), jobreport.getJobName(),
-        jobFile, trackingUrl);
+    org.apache.hadoop.mapred.JobStatus jobStatus =
+        new org.apache.hadoop.mapred.JobStatus(fromYarn(jobreport.getJobId()),
+            jobreport.getSetupProgress(), jobreport.getMapProgress(),
+            jobreport.getReduceProgress(), jobreport.getCleanupProgress(),
+            fromYarn(jobreport.getJobState()),
+            jobPriority, jobreport.getUser(), jobreport.getJobName(),
+            jobFile, jobreport.getTrackingUrl());
+    jobStatus.setFailureInfo(jobreport.getDiagnostics());
+    return jobStatus;
+  }
+  
+  public static org.apache.hadoop.mapreduce.QueueState fromYarn(
+      QueueState state) {
+    org.apache.hadoop.mapreduce.QueueState qState = 
+      org.apache.hadoop.mapreduce.QueueState.getState(
+        state.toString().toLowerCase());
+    return qState;
   }
   }
+
   
   
   public static int fromYarn(JobState state) {
   public static int fromYarn(JobState state) {
     switch (state) {
     switch (state) {
@@ -412,6 +425,7 @@ public class TypeConverter {
       );
       );
     jobStatus.setSchedulingInfo(trackingUrl); // Set AM tracking url
     jobStatus.setSchedulingInfo(trackingUrl); // Set AM tracking url
     jobStatus.setStartTime(application.getStartTime());
     jobStatus.setStartTime(application.getStartTime());
+    jobStatus.setFailureInfo(application.getDiagnostics());
     return jobStatus;
     return jobStatus;
   }
   }
 
 
@@ -431,9 +445,9 @@ public class TypeConverter {
   
   
   public static QueueInfo fromYarn(org.apache.hadoop.yarn.api.records.QueueInfo 
   public static QueueInfo fromYarn(org.apache.hadoop.yarn.api.records.QueueInfo 
       queueInfo, Configuration conf) {
       queueInfo, Configuration conf) {
-    return new QueueInfo(queueInfo.getQueueName(), 
-        queueInfo.toString(), QueueState.RUNNING, 
-        TypeConverter.fromYarnApps(queueInfo.getApplications(), conf));
+    return new QueueInfo(queueInfo.getQueueName(),queueInfo.toString(),
+        fromYarn(queueInfo.getQueueState()), TypeConverter.fromYarnApps(
+        queueInfo.getApplications(), conf));
   }
   }
   
   
   public static QueueInfo[] fromYarnQueueInfo(
   public static QueueInfo[] fromYarnQueueInfo(

+ 0 - 50
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/MRConstants.java

@@ -1,50 +0,0 @@
-/**
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-*/
-
-package org.apache.hadoop.mapreduce.v2;
-
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-
-@InterfaceAudience.Private
-@InterfaceStability.Evolving
-public interface MRConstants {
-  // This should be the directory where splits file gets localized on the node
-  // running ApplicationMaster.
-  public static final String JOB_SUBMIT_DIR = "jobSubmitDir";
-  
-  // This should be the name of the localized job-configuration file on the node
-  // running ApplicationMaster and Task
-  public static final String JOB_CONF_FILE = "job.xml";
-  // This should be the name of the localized job-jar file on the node running
-  // individual containers/tasks.
-  public static final String JOB_JAR = "job.jar";
-
-  public static final String HADOOP_MAPREDUCE_CLIENT_APP_JAR_NAME =
-      "hadoop-mapreduce-client-app-0.24.0-SNAPSHOT.jar";
-
-  public static final String YARN_MAPREDUCE_APP_JAR_PATH =
-    "$YARN_HOME/modules/" + HADOOP_MAPREDUCE_CLIENT_APP_JAR_NAME;
-
-  // The token file for the application. Should contain tokens for access to
-  // remote file system and may optionally contain application specific tokens.
-  // For now, generated by the AppManagers and used by NodeManagers and the
-  // Containers.
-  public static final String APPLICATION_TOKENS_FILE = "appTokens";
-}

+ 4 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/JobReport.java

@@ -29,6 +29,8 @@ public interface JobReport {
   public abstract long getFinishTime();
   public abstract long getFinishTime();
   public abstract String getUser();
   public abstract String getUser();
   public abstract String getJobName();
   public abstract String getJobName();
+  public abstract String getTrackingUrl();
+  public abstract String getDiagnostics();
 
 
   public abstract void setJobId(JobId jobId);
   public abstract void setJobId(JobId jobId);
   public abstract void setJobState(JobState jobState);
   public abstract void setJobState(JobState jobState);
@@ -40,4 +42,6 @@ public interface JobReport {
   public abstract void setFinishTime(long finishTime);
   public abstract void setFinishTime(long finishTime);
   public abstract void setUser(String user);
   public abstract void setUser(String user);
   public abstract void setJobName(String jobName);
   public abstract void setJobName(String jobName);
+  public abstract void setTrackingUrl(String trackingUrl);
+  public abstract void setDiagnostics(String diagnostics);
 }
 }

+ 24 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/JobReportPBImpl.java

@@ -206,6 +206,30 @@ public class JobReportPBImpl extends ProtoBase<JobReportProto> implements JobRep
     builder.setJobName((jobName));
     builder.setJobName((jobName));
   }
   }
 
 
+  @Override
+  public String getTrackingUrl() {
+    JobReportProtoOrBuilder p = viaProto ? proto : builder;
+    return (p.getTrackingUrl());
+  }
+
+  @Override
+  public void setTrackingUrl(String trackingUrl) {
+    maybeInitBuilder();
+    builder.setTrackingUrl(trackingUrl);
+  }
+
+  @Override
+  public String getDiagnostics() {
+    JobReportProtoOrBuilder p = viaProto ? proto : builder;
+    return p.getDiagnostics();
+  }
+
+  @Override
+  public void setDiagnostics(String diagnostics) {
+    maybeInitBuilder();
+    builder.setDiagnostics(diagnostics);
+  }
+
   private JobIdPBImpl convertFromProtoFormat(JobIdProto p) {
   private JobIdPBImpl convertFromProtoFormat(JobIdProto p) {
     return new JobIdPBImpl(p);
     return new JobIdPBImpl(p);
   }
   }

+ 1 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JobHistoryUtils.java

@@ -489,7 +489,7 @@ public class JobHistoryUtils {
       sb.append(address.getHostName());
       sb.append(address.getHostName());
     }
     }
     sb.append(":").append(address.getPort());
     sb.append(":").append(address.getPort());
-    sb.append("/yarn/job/"); // TODO This will change when the history server
+    sb.append("/jobhistory/job/"); // TODO This will change when the history server
                             // understands apps.
                             // understands apps.
     // TOOD Use JobId toString once UI stops using _id_id
     // TOOD Use JobId toString once UI stops using _id_id
     sb.append("job_").append(appId.getClusterTimestamp());
     sb.append("job_").append(appId.getClusterTimestamp());

+ 75 - 42
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java

@@ -39,14 +39,14 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.filecache.DistributedCache;
 import org.apache.hadoop.mapreduce.filecache.DistributedCache;
-import org.apache.hadoop.mapreduce.v2.MRConstants;
 import org.apache.hadoop.mapreduce.v2.api.records.JobId;
 import org.apache.hadoop.mapreduce.v2.api.records.JobId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
-import org.apache.hadoop.util.Shell.ShellCommandExecutor;
 import org.apache.hadoop.yarn.YarnException;
 import org.apache.hadoop.yarn.YarnException;
+import org.apache.hadoop.yarn.api.ApplicationConstants.Environment;
+import org.apache.hadoop.yarn.api.ApplicationConstants;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.LocalResource;
 import org.apache.hadoop.yarn.api.records.LocalResource;
 import org.apache.hadoop.yarn.api.records.LocalResourceType;
 import org.apache.hadoop.yarn.api.records.LocalResourceType;
@@ -167,7 +167,7 @@ public class MRApps extends Apps {
     return TaskAttemptStateUI.valueOf(attemptStateStr);
     return TaskAttemptStateUI.valueOf(attemptStateStr);
   }
   }
 
 
-  public static void setInitialClasspath(
+  private static void setMRFrameworkClasspath(
       Map<String, String> environment) throws IOException {
       Map<String, String> environment) throws IOException {
     InputStream classpathFileStream = null;
     InputStream classpathFileStream = null;
     BufferedReader reader = null;
     BufferedReader reader = null;
@@ -182,30 +182,17 @@ public class MRApps extends Apps {
       reader = new BufferedReader(new InputStreamReader(classpathFileStream));
       reader = new BufferedReader(new InputStreamReader(classpathFileStream));
       String cp = reader.readLine();
       String cp = reader.readLine();
       if (cp != null) {
       if (cp != null) {
-        addToClassPath(environment, cp.trim());
+        addToEnvironment(environment, Environment.CLASSPATH.name(), cp.trim());
       }
       }
       // Put the file itself on classpath for tasks.
       // Put the file itself on classpath for tasks.
-      addToClassPath(environment,
+      addToEnvironment(
+          environment,
+          Environment.CLASSPATH.name(),
           thisClassLoader.getResource(mrAppGeneratedClasspathFile).getFile());
           thisClassLoader.getResource(mrAppGeneratedClasspathFile).getFile());
 
 
-      // If runtime env is different.
-      if (System.getenv().get("YARN_HOME") != null) {
-        ShellCommandExecutor exec =
-            new ShellCommandExecutor(new String[] {
-                System.getenv().get("YARN_HOME") + "/bin/yarn",
-            "classpath" });
-        exec.execute();
-        addToClassPath(environment, exec.getOutput().trim());
-      }
-
-      // Get yarn mapreduce-app classpath
-      if (System.getenv().get("HADOOP_MAPRED_HOME")!= null) {
-        ShellCommandExecutor exec =
-            new ShellCommandExecutor(new String[] {
-                System.getenv().get("HADOOP_MAPRED_HOME") + "/bin/mapred",
-            "classpath" });
-        exec.execute();
-        addToClassPath(environment, exec.getOutput().trim());
+      // Add standard Hadoop classes
+      for (String c : ApplicationConstants.APPLICATION_CLASSPATH) {
+        addToEnvironment(environment, Environment.CLASSPATH.name(), c);
       }
       }
     } finally {
     } finally {
       if (classpathFileStream != null) {
       if (classpathFileStream != null) {
@@ -217,20 +204,35 @@ public class MRApps extends Apps {
     }
     }
     // TODO: Remove duplicates.
     // TODO: Remove duplicates.
   }
   }
-
-  public static void addToClassPath(
-      Map<String, String> environment, String fileName) {
-    String classpath = environment.get(CLASSPATH);
-    if (classpath == null) {
-      classpath = fileName;
+  
+  private static final String SYSTEM_PATH_SEPARATOR = 
+      System.getProperty("path.separator");
+
+  public static void addToEnvironment(
+      Map<String, String> environment, 
+      String variable, String value) {
+    String val = environment.get(variable);
+    if (val == null) {
+      val = value;
     } else {
     } else {
-      classpath = classpath + ":" + fileName;
+      val = val + SYSTEM_PATH_SEPARATOR + value;
     }
     }
-    environment.put(CLASSPATH, classpath);
+    environment.put(variable, val);
   }
   }
 
 
-  public static final String CLASSPATH = "CLASSPATH";
-
+  public static void setClasspath(Map<String, String> environment) 
+      throws IOException {
+    MRApps.addToEnvironment(
+        environment, 
+        Environment.CLASSPATH.name(), 
+        MRJobConfig.JOB_JAR);
+    MRApps.addToEnvironment(
+        environment, 
+        Environment.CLASSPATH.name(),
+        Environment.PWD.$() + Path.SEPARATOR + "*");
+    MRApps.setMRFrameworkClasspath(environment);
+  }
+  
   private static final String STAGING_CONSTANT = ".staging";
   private static final String STAGING_CONSTANT = ".staging";
   public static Path getStagingAreaDir(Configuration conf, String user) {
   public static Path getStagingAreaDir(Configuration conf, String user) {
     return new Path(
     return new Path(
@@ -241,7 +243,7 @@ public class MRApps extends Apps {
   public static String getJobFile(Configuration conf, String user, 
   public static String getJobFile(Configuration conf, String user, 
       org.apache.hadoop.mapreduce.JobID jobId) {
       org.apache.hadoop.mapreduce.JobID jobId) {
     Path jobFile = new Path(MRApps.getStagingAreaDir(conf, user),
     Path jobFile = new Path(MRApps.getStagingAreaDir(conf, user),
-        jobId.toString() + Path.SEPARATOR + MRConstants.JOB_CONF_FILE);
+        jobId.toString() + Path.SEPARATOR + MRJobConfig.JOB_CONF_FILE);
     return jobFile.toString();
     return jobFile.toString();
   }
   }
   
   
@@ -260,12 +262,11 @@ public class MRApps extends Apps {
 
 
   public static void setupDistributedCache( 
   public static void setupDistributedCache( 
       Configuration conf, 
       Configuration conf, 
-      Map<String, LocalResource> localResources,
-      Map<String, String> env) 
+      Map<String, LocalResource> localResources) 
   throws IOException {
   throws IOException {
     
     
     // Cache archives
     // Cache archives
-    parseDistributedCacheArtifacts(conf, localResources, env, 
+    parseDistributedCacheArtifacts(conf, localResources,  
         LocalResourceType.ARCHIVE, 
         LocalResourceType.ARCHIVE, 
         DistributedCache.getCacheArchives(conf), 
         DistributedCache.getCacheArchives(conf), 
         parseTimeStamps(DistributedCache.getArchiveTimestamps(conf)), 
         parseTimeStamps(DistributedCache.getArchiveTimestamps(conf)), 
@@ -275,7 +276,7 @@ public class MRApps extends Apps {
     
     
     // Cache files
     // Cache files
     parseDistributedCacheArtifacts(conf, 
     parseDistributedCacheArtifacts(conf, 
-        localResources, env, 
+        localResources,  
         LocalResourceType.FILE, 
         LocalResourceType.FILE, 
         DistributedCache.getCacheFiles(conf),
         DistributedCache.getCacheFiles(conf),
         parseTimeStamps(DistributedCache.getFileTimestamps(conf)),
         parseTimeStamps(DistributedCache.getFileTimestamps(conf)),
@@ -290,7 +291,6 @@ public class MRApps extends Apps {
   private static void parseDistributedCacheArtifacts(
   private static void parseDistributedCacheArtifacts(
       Configuration conf,
       Configuration conf,
       Map<String, LocalResource> localResources,
       Map<String, LocalResource> localResources,
-      Map<String, String> env,
       LocalResourceType type,
       LocalResourceType type,
       URI[] uris, long[] timestamps, long[] sizes, boolean visibilities[], 
       URI[] uris, long[] timestamps, long[] sizes, boolean visibilities[], 
       Path[] pathsToPutOnClasspath) throws IOException {
       Path[] pathsToPutOnClasspath) throws IOException {
@@ -339,9 +339,6 @@ public class MRApps extends Apps {
                   : LocalResourceVisibility.PRIVATE,
                   : LocalResourceVisibility.PRIVATE,
                 sizes[i], timestamps[i])
                 sizes[i], timestamps[i])
         );
         );
-        if (classPaths.containsKey(u.getPath())) {
-          MRApps.addToClassPath(env, linkName);
-        }
       }
       }
     }
     }
   }
   }
@@ -358,6 +355,42 @@ public class MRApps extends Apps {
     }
     }
     return result;
     return result;
   }
   }
+
+  public static void setEnvFromInputString(Map<String, String> env,
+      String envString) {
+    if (envString != null && envString.length() > 0) {
+      String childEnvs[] = envString.split(",");
+      for (String cEnv : childEnvs) {
+        String[] parts = cEnv.split("="); // split on '='
+        String value = env.get(parts[0]);
+  
+        if (value != null) {
+          // Replace $env with the child's env constructed by NM's
+          // For example: LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/tmp
+          value = parts[1].replace("$" + parts[0], value);
+        } else {
+          // example PATH=$PATH:/tmp
+          value = System.getenv(parts[0]);
+          if (value != null) {
+            // the env key is present in the tt's env
+            value = parts[1].replace("$" + parts[0], value);
+          } else {
+            // check for simple variable substitution
+            // for e.g. ROOT=$HOME
+            String envValue = System.getenv(parts[1].substring(1)); 
+            if (envValue != null) {
+              value = envValue;
+            } else {
+              // the env key is note present anywhere .. simply set it
+              // example X=$X:/tmp or X=/tmp
+              value = parts[1].replace("$" + parts[0], "");
+            }
+          }
+        }
+        addToEnvironment(env, parts[0], value);
+      }
+    }
+  }
   
   
 
 
 
 

+ 24 - 8
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRBuilderUtils.java

@@ -19,27 +19,25 @@
 package org.apache.hadoop.mapreduce.v2.util;
 package org.apache.hadoop.mapreduce.v2.util;
 
 
 import org.apache.hadoop.mapreduce.v2.api.records.JobId;
 import org.apache.hadoop.mapreduce.v2.api.records.JobId;
+import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
+import org.apache.hadoop.mapreduce.v2.api.records.JobState;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
-import org.apache.hadoop.yarn.factories.RecordFactory;
-import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.util.Records;
 
 
 public class MRBuilderUtils {
 public class MRBuilderUtils {
 
 
-  private static final RecordFactory recordFactory = RecordFactoryProvider
-      .getRecordFactory(null);
-
   public static JobId newJobId(ApplicationId appId, int id) {
   public static JobId newJobId(ApplicationId appId, int id) {
-    JobId jobId = recordFactory.newRecordInstance(JobId.class);
+    JobId jobId = Records.newRecord(JobId.class);
     jobId.setAppId(appId);
     jobId.setAppId(appId);
     jobId.setId(id);
     jobId.setId(id);
     return jobId;
     return jobId;
   }
   }
 
 
   public static TaskId newTaskId(JobId jobId, int id, TaskType taskType) {
   public static TaskId newTaskId(JobId jobId, int id, TaskType taskType) {
-    TaskId taskId = recordFactory.newRecordInstance(TaskId.class);
+    TaskId taskId = Records.newRecord(TaskId.class);
     taskId.setJobId(jobId);
     taskId.setJobId(jobId);
     taskId.setId(id);
     taskId.setId(id);
     taskId.setTaskType(taskType);
     taskId.setTaskType(taskType);
@@ -48,9 +46,27 @@ public class MRBuilderUtils {
 
 
   public static TaskAttemptId newTaskAttemptId(TaskId taskId, int attemptId) {
   public static TaskAttemptId newTaskAttemptId(TaskId taskId, int attemptId) {
     TaskAttemptId taskAttemptId =
     TaskAttemptId taskAttemptId =
-        recordFactory.newRecordInstance(TaskAttemptId.class);
+        Records.newRecord(TaskAttemptId.class);
     taskAttemptId.setTaskId(taskId);
     taskAttemptId.setTaskId(taskId);
     taskAttemptId.setId(attemptId);
     taskAttemptId.setId(attemptId);
     return taskAttemptId;
     return taskAttemptId;
   }
   }
+
+  public static JobReport newJobReport(JobId jobId, String jobName,
+      String userName, JobState state, long startTime, long finishTime,
+      float setupProgress, float mapProgress, float reduceProgress,
+      float cleanupProgress) {
+    JobReport report = Records.newRecord(JobReport.class);
+    report.setJobId(jobId);
+    report.setJobName(jobName);
+    report.setUser(userName);
+    report.setJobState(state);
+    report.setStartTime(startTime);
+    report.setFinishTime(finishTime);
+    report.setSetupProgress(setupProgress);
+    report.setCleanupProgress(cleanupProgress);
+    report.setMapProgress(mapProgress);
+    report.setReduceProgress(reduceProgress);
+    return report;
+  }
 }
 }

+ 2 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/proto/mr_protos.proto

@@ -143,6 +143,8 @@ message JobReportProto {
   optional int64 finish_time = 8;
   optional int64 finish_time = 8;
   optional string user = 9;
   optional string user = 9;
   optional string jobName = 10;
   optional string jobName = 10;
+  optional string trackingUrl = 11;
+  optional string diagnostics = 12;
 }
 }
 
 
 enum TaskAttemptCompletionEventStatusProto {
 enum TaskAttemptCompletionEventStatusProto {

+ 13 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/TestTypeConverter.java

@@ -19,11 +19,14 @@ package org.apache.hadoop.mapreduce;
 
 
 import junit.framework.Assert;
 import junit.framework.Assert;
 
 
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationState;
 import org.apache.hadoop.yarn.api.records.ApplicationState;
 import org.apache.hadoop.yarn.api.records.ApplicationReport;
 import org.apache.hadoop.yarn.api.records.ApplicationReport;
 import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationIdPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationIdPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationReportPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationReportPBImpl;
+import org.apache.hadoop.yarn.api.records.impl.pb.QueueInfoPBImpl;
+
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 import static org.mockito.Mockito.when;
 import org.junit.Test;
 import org.junit.Test;
@@ -67,4 +70,14 @@ public class TestTypeConverter {
     Assert.assertEquals("jobId set incorrectly", 6789, status.getJobID().getId());
     Assert.assertEquals("jobId set incorrectly", 6789, status.getJobID().getId());
     Assert.assertEquals("state set incorrectly", JobStatus.State.KILLED, status.getState());
     Assert.assertEquals("state set incorrectly", JobStatus.State.KILLED, status.getState());
   }
   }
+
+  @Test
+  public void testFromYarnQueueInfo() {
+    org.apache.hadoop.yarn.api.records.QueueInfo queueInfo = new QueueInfoPBImpl();
+    queueInfo.setQueueState(org.apache.hadoop.yarn.api.records.QueueState.STOPPED);
+    org.apache.hadoop.mapreduce.QueueInfo returned =
+      TypeConverter.fromYarn(queueInfo, new Configuration());
+    Assert.assertEquals("queueInfo translation didn't work.",
+      returned.getState().toString(), queueInfo.getQueueState().toString().toLowerCase());
+  }
 }
 }

+ 2 - 2
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/util/TestMRApps.java

@@ -25,7 +25,6 @@ import org.apache.hadoop.mapreduce.v2.api.records.JobId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
-import org.apache.hadoop.mapreduce.v2.MRConstants;
 import org.apache.hadoop.mapreduce.v2.util.MRApps;
 import org.apache.hadoop.mapreduce.v2.util.MRApps;
 import org.apache.hadoop.yarn.YarnException;
 import org.apache.hadoop.yarn.YarnException;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
@@ -115,7 +114,8 @@ public class TestMRApps {
   @Test public void testGetJobFileWithUser() {
   @Test public void testGetJobFileWithUser() {
     Configuration conf = new Configuration();
     Configuration conf = new Configuration();
     conf.set(MRJobConfig.MR_AM_STAGING_DIR, "/my/path/to/staging");
     conf.set(MRJobConfig.MR_AM_STAGING_DIR, "/my/path/to/staging");
-    String jobFile = MRApps.getJobFile(conf, "dummy-user", new JobID("dummy-job", 12345));
+    String jobFile = MRApps.getJobFile(conf, "dummy-user", 
+        new JobID("dummy-job", 12345));
     assertNotNull("getJobFile results in null.", jobFile);
     assertNotNull("getJobFile results in null.", jobFile);
     assertEquals("jobFile with specified user is not as expected.",
     assertEquals("jobFile with specified user is not as expected.",
         "/my/path/to/staging/dummy-user/.staging/job_dummy-job_12345/job.xml", jobFile);
         "/my/path/to/staging/dummy-user/.staging/job_dummy-job_12345/job.xml", jobFile);

+ 2 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/BackupStore.java

@@ -41,6 +41,7 @@ import org.apache.hadoop.mapred.IFile.Reader;
 import org.apache.hadoop.mapred.IFile.Writer;
 import org.apache.hadoop.mapred.IFile.Writer;
 import org.apache.hadoop.mapred.Merger.Segment;
 import org.apache.hadoop.mapred.Merger.Segment;
 import org.apache.hadoop.mapreduce.MRConfig;
 import org.apache.hadoop.mapreduce.MRConfig;
+import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.TaskAttemptID;
 import org.apache.hadoop.mapreduce.TaskAttemptID;
 
 
 /**
 /**
@@ -560,7 +561,7 @@ public class BackupStore<K,V> {
 
 
     private Writer<K,V> createSpillFile() throws IOException {
     private Writer<K,V> createSpillFile() throws IOException {
       Path tmp =
       Path tmp =
-          new Path(Constants.OUTPUT + "/backup_" + tid.getId() + "_"
+          new Path(MRJobConfig.OUTPUT + "/backup_" + tid.getId() + "_"
               + (spillNumber++) + ".out");
               + (spillNumber++) + ".out");
 
 
       LOG.info("Created file: " + tmp);
       LOG.info("Created file: " + tmp);

+ 1 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java

@@ -348,6 +348,7 @@ public class JobConf extends Configuration {
    */
    */
   public static final Level DEFAULT_LOG_LEVEL = Level.INFO;
   public static final Level DEFAULT_LOG_LEVEL = Level.INFO;
   
   
+  
   /**
   /**
    * Construct a map/reduce job configuration.
    * Construct a map/reduce job configuration.
    */
    */

+ 4 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobStatus.java

@@ -321,6 +321,10 @@ public class JobStatus extends org.apache.hadoop.mapreduce.JobStatus {
      super.setJobACLs(acls);
      super.setJobACLs(acls);
    }
    }
 
 
+   public synchronized void setFailureInfo(String failureInfo) {
+     super.setFailureInfo(failureInfo);
+   }
+   
   /**
   /**
    * Set the priority of the job, defaulting to NORMAL.
    * Set the priority of the job, defaulting to NORMAL.
    * @param jp new job priority
    * @param jp new job priority

+ 7 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MRConstants.java

@@ -17,11 +17,16 @@
  */
  */
 package org.apache.hadoop.mapred;
 package org.apache.hadoop.mapred;
 
 
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+
 /*******************************
 /*******************************
  * Some handy constants
  * Some handy constants
  * 
  * 
  *******************************/
  *******************************/
-interface MRConstants {
+@Private
+@Unstable
+public interface MRConstants {
   //
   //
   // Timeouts, constants
   // Timeouts, constants
   //
   //
@@ -53,5 +58,6 @@ interface MRConstants {
    */
    */
   public static final String FOR_REDUCE_TASK = "for-reduce-task";
   public static final String FOR_REDUCE_TASK = "for-reduce-task";
   
   
+  /** Used in MRv1, mostly in TaskTracker code **/
   public static final String WORKDIR = "work";
   public static final String WORKDIR = "work";
 }
 }

+ 12 - 11
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MROutputFiles.java

@@ -27,6 +27,7 @@ import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.fs.LocalDirAllocator;
 import org.apache.hadoop.fs.LocalDirAllocator;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.mapreduce.MRConfig;
 import org.apache.hadoop.mapreduce.MRConfig;
+import org.apache.hadoop.mapreduce.MRJobConfig;
 
 
 /**
 /**
  * Manipulate the working area for the transient store for maps and reduces.
  * Manipulate the working area for the transient store for maps and reduces.
@@ -54,7 +55,7 @@ public class MROutputFiles extends MapOutputFile {
   @Override
   @Override
   public Path getOutputFile()
   public Path getOutputFile()
       throws IOException {
       throws IOException {
-    return lDirAlloc.getLocalPathToRead(Constants.OUTPUT + Path.SEPARATOR
+    return lDirAlloc.getLocalPathToRead(MRJobConfig.OUTPUT + Path.SEPARATOR
         + MAP_OUTPUT_FILENAME_STRING, getConf());
         + MAP_OUTPUT_FILENAME_STRING, getConf());
   }
   }
 
 
@@ -68,7 +69,7 @@ public class MROutputFiles extends MapOutputFile {
   @Override
   @Override
   public Path getOutputFileForWrite(long size)
   public Path getOutputFileForWrite(long size)
       throws IOException {
       throws IOException {
-    return lDirAlloc.getLocalPathForWrite(Constants.OUTPUT + Path.SEPARATOR
+    return lDirAlloc.getLocalPathForWrite(MRJobConfig.OUTPUT + Path.SEPARATOR
         + MAP_OUTPUT_FILENAME_STRING, size, getConf());
         + MAP_OUTPUT_FILENAME_STRING, size, getConf());
   }
   }
 
 
@@ -89,7 +90,7 @@ public class MROutputFiles extends MapOutputFile {
   @Override
   @Override
   public Path getOutputIndexFile()
   public Path getOutputIndexFile()
       throws IOException {
       throws IOException {
-    return lDirAlloc.getLocalPathToRead(Constants.OUTPUT + Path.SEPARATOR
+    return lDirAlloc.getLocalPathToRead(MRJobConfig.OUTPUT + Path.SEPARATOR
         + MAP_OUTPUT_FILENAME_STRING + MAP_OUTPUT_INDEX_SUFFIX_STRING,
         + MAP_OUTPUT_FILENAME_STRING + MAP_OUTPUT_INDEX_SUFFIX_STRING,
         getConf());
         getConf());
   }
   }
@@ -104,7 +105,7 @@ public class MROutputFiles extends MapOutputFile {
   @Override
   @Override
   public Path getOutputIndexFileForWrite(long size)
   public Path getOutputIndexFileForWrite(long size)
       throws IOException {
       throws IOException {
-    return lDirAlloc.getLocalPathForWrite(Constants.OUTPUT + Path.SEPARATOR
+    return lDirAlloc.getLocalPathForWrite(MRJobConfig.OUTPUT + Path.SEPARATOR
         + MAP_OUTPUT_FILENAME_STRING + MAP_OUTPUT_INDEX_SUFFIX_STRING,
         + MAP_OUTPUT_FILENAME_STRING + MAP_OUTPUT_INDEX_SUFFIX_STRING,
         size, getConf());
         size, getConf());
   }
   }
@@ -128,7 +129,7 @@ public class MROutputFiles extends MapOutputFile {
   @Override
   @Override
   public Path getSpillFile(int spillNumber)
   public Path getSpillFile(int spillNumber)
       throws IOException {
       throws IOException {
-    return lDirAlloc.getLocalPathToRead(Constants.OUTPUT + "/spill"
+    return lDirAlloc.getLocalPathToRead(MRJobConfig.OUTPUT + "/spill"
         + spillNumber + ".out", getConf());
         + spillNumber + ".out", getConf());
   }
   }
 
 
@@ -143,7 +144,7 @@ public class MROutputFiles extends MapOutputFile {
   @Override
   @Override
   public Path getSpillFileForWrite(int spillNumber, long size)
   public Path getSpillFileForWrite(int spillNumber, long size)
       throws IOException {
       throws IOException {
-    return lDirAlloc.getLocalPathForWrite(Constants.OUTPUT + "/spill"
+    return lDirAlloc.getLocalPathForWrite(MRJobConfig.OUTPUT + "/spill"
         + spillNumber + ".out", size, getConf());
         + spillNumber + ".out", size, getConf());
   }
   }
 
 
@@ -157,7 +158,7 @@ public class MROutputFiles extends MapOutputFile {
   @Override
   @Override
   public Path getSpillIndexFile(int spillNumber)
   public Path getSpillIndexFile(int spillNumber)
       throws IOException {
       throws IOException {
-    return lDirAlloc.getLocalPathToRead(Constants.OUTPUT + "/spill"
+    return lDirAlloc.getLocalPathToRead(MRJobConfig.OUTPUT + "/spill"
         + spillNumber + ".out.index", getConf());
         + spillNumber + ".out.index", getConf());
   }
   }
 
 
@@ -172,7 +173,7 @@ public class MROutputFiles extends MapOutputFile {
   @Override
   @Override
   public Path getSpillIndexFileForWrite(int spillNumber, long size)
   public Path getSpillIndexFileForWrite(int spillNumber, long size)
       throws IOException {
       throws IOException {
-    return lDirAlloc.getLocalPathForWrite(Constants.OUTPUT + "/spill"
+    return lDirAlloc.getLocalPathForWrite(MRJobConfig.OUTPUT + "/spill"
         + spillNumber + ".out.index", size, getConf());
         + spillNumber + ".out.index", size, getConf());
   }
   }
 
 
@@ -187,7 +188,7 @@ public class MROutputFiles extends MapOutputFile {
   public Path getInputFile(int mapId)
   public Path getInputFile(int mapId)
       throws IOException {
       throws IOException {
     return lDirAlloc.getLocalPathToRead(String.format(
     return lDirAlloc.getLocalPathToRead(String.format(
-        REDUCE_INPUT_FILE_FORMAT_STRING, Constants.OUTPUT, Integer
+        REDUCE_INPUT_FILE_FORMAT_STRING, MRJobConfig.OUTPUT, Integer
             .valueOf(mapId)), getConf());
             .valueOf(mapId)), getConf());
   }
   }
 
 
@@ -204,7 +205,7 @@ public class MROutputFiles extends MapOutputFile {
                                    long size)
                                    long size)
       throws IOException {
       throws IOException {
     return lDirAlloc.getLocalPathForWrite(String.format(
     return lDirAlloc.getLocalPathForWrite(String.format(
-        REDUCE_INPUT_FILE_FORMAT_STRING, Constants.OUTPUT, mapId.getId()),
+        REDUCE_INPUT_FILE_FORMAT_STRING, MRJobConfig.OUTPUT, mapId.getId()),
         size, getConf());
         size, getConf());
   }
   }
 
 
@@ -212,7 +213,7 @@ public class MROutputFiles extends MapOutputFile {
   @Override
   @Override
   public void removeAll()
   public void removeAll()
       throws IOException {
       throws IOException {
-    ((JobConf)getConf()).deleteLocalFiles(Constants.OUTPUT);
+    ((JobConf)getConf()).deleteLocalFiles(MRJobConfig.OUTPUT);
   }
   }
 
 
   @Override
   @Override

+ 11 - 2
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskLog.java

@@ -44,6 +44,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.SecureIOUtils;
 import org.apache.hadoop.io.SecureIOUtils;
 import org.apache.hadoop.mapreduce.JobID;
 import org.apache.hadoop.mapreduce.JobID;
+import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.util.ProcessTree;
 import org.apache.hadoop.mapreduce.util.ProcessTree;
 import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.Shell;
 import org.apache.log4j.Appender;
 import org.apache.log4j.Appender;
@@ -75,10 +76,18 @@ public class TaskLog {
       }
       }
     }
     }
   }
   }
-
+  
+  public static String getMRv2LogDir() {
+    return System.getProperty(MRJobConfig.TASK_LOG_DIR);
+  }
+  
   public static File getTaskLogFile(TaskAttemptID taskid, boolean isCleanup,
   public static File getTaskLogFile(TaskAttemptID taskid, boolean isCleanup,
       LogName filter) {
       LogName filter) {
-    return new File(getAttemptDir(taskid, isCleanup), filter.toString());
+    if (getMRv2LogDir() != null) {
+      return new File(getMRv2LogDir(), filter.toString());
+    } else {
+      return new File(getAttemptDir(taskid, isCleanup), filter.toString());
+    }
   }
   }
 
 
   static File getRealTaskLogFileLocation(TaskAttemptID taskid,
   static File getRealTaskLogFileLocation(TaskAttemptID taskid,

部分文件因为文件数量过多而无法显示