浏览代码

Merge r1555021 through r1563041 from trunk.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-5535@1563042 13f79535-47bb-0310-9956-ffa450edef68
Tsz-wo Sze 11 年之前
父节点
当前提交
f2972402b7
共有 100 个文件被更改,包括 2102 次插入1485 次删除
  1. 135 116
      hadoop-common-project/hadoop-common/CHANGES.txt
  2. 3 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfServlet.java
  3. 1 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
  4. 9 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileChecksum.java
  5. 8 7
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MD5MD5CRC32FileChecksum.java
  6. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/AdminAuthorizedServlet.java
  7. 59 187
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
  8. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
  9. 2 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/RpcMetrics.java
  10. 2 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/jmx/JMXJsonServlet.java
  11. 2 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java
  12. 2 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/MetricsServlet.java
  13. 2 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationFilterInitializer.java
  14. 22 0
      hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
  15. 236 143
      hadoop-common-project/hadoop-common/src/site/apt/SingleCluster.apt.vm
  16. 17 17
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/HttpServerFunctionalTest.java
  17. 3 3
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestGlobalFilter.java
  18. 2 2
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHtmlQuoting.java
  19. 27 38
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java
  20. 16 16
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServerLifecycle.java
  21. 2 2
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServerWebapps.java
  22. 3 3
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestPathFilter.java
  23. 3 3
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
  24. 7 7
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestServletFilter.java
  25. 2 2
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/jmx/TestJMXJsonServlet.java
  26. 2 2
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLogLevel.java
  27. 2 2
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestAuthenticationFilter.java
  28. 3 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestSecurityUtil.java
  29. 239 220
      hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
  30. 4 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
  31. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeHttpServer.java
  32. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
  33. 3 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
  34. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java
  35. 11 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java
  36. 5 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
  37. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java
  38. 0 36
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
  39. 9 9
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java
  40. 9 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
  41. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java
  42. 8 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java
  43. 49 26
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/HftpFileSystem.java
  44. 123 81
      hadoop-hdfs-project/hadoop-hdfs/src/site/apt/CentralizedCacheManagement.apt.vm
  45. 41 41
      hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsDesign.apt.vm
  46. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm
  47. 11 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSOutputSummer.java
  48. 103 68
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java
  49. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
  50. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetImageServlet.java
  51. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java
  52. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotTestHelper.java
  53. 78 69
      hadoop-mapreduce-project/CHANGES.txt
  54. 6 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
  55. 3 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/Job.java
  56. 9 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java
  57. 26 34
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java
  58. 5 2
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java
  59. 9 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestEvents.java
  60. 22 8
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java
  61. 1 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockAppContext.java
  62. 5 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockJobs.java
  63. 6 6
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestJobEndNotifier.java
  64. 5 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java
  65. 22 11
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java
  66. 9 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/avro/Events.avpr
  67. 7 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobCounter.java
  68. 2 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/EventReader.java
  69. 7 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryParser.java
  70. 63 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobQueueChangeEvent.java
  71. 7 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/org/apache/hadoop/mapreduce/JobCounter.properties
  72. 3 3
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestJobEndNotifier.java
  73. 5 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java
  74. 5 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/PartialJob.java
  75. 40 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistoryEvents.java
  76. 4 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsWebServicesAcls.java
  77. 1 1
      hadoop-project/src/site/apt/index.apt.vm
  78. 8 7
      hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptionSwitch.java
  79. 1 1
      hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java
  80. 3 3
      hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/OptionsParser.java
  81. 3 3
      hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyMapper.java
  82. 75 32
      hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java
  83. 8 6
      hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/DistCpUtils.java
  84. 32 3
      hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestOptionsParser.java
  85. 92 30
      hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/mapred/TestCopyMapper.java
  86. 113 95
      hadoop-yarn-project/CHANGES.txt
  87. 11 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
  88. 34 4
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java
  89. 28 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
  90. 3 4
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/RMDelegationTokenIdentifier.java
  91. 4 4
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApp.java
  92. 7 5
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java
  93. 3 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppEventType.java
  94. 4 7
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
  95. 33 34
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java
  96. 2 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/Queue.java
  97. 41 8
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
  98. 23 2
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
  99. 9 9
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
  100. 7 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java

+ 135 - 116
hadoop-common-project/hadoop-common/CHANGES.txt

@@ -285,9 +285,6 @@ Trunk (Unreleased)
     HADOOP-9740. Fix FsShell '-text' command to be able to read Avro
     HADOOP-9740. Fix FsShell '-text' command to be able to read Avro
     files stored in HDFS and other filesystems. (Allan Yan via cutting)
     files stored in HDFS and other filesystems. (Allan Yan via cutting)
 
 
-    HDFS-5471. CacheAdmin -listPools fails when user lacks permissions to view
-    all pools (Andrew Wang via Colin Patrick McCabe)
-
     HADOOP-10044 Improve the javadoc of rpc code (sanjay Radia)
     HADOOP-10044 Improve the javadoc of rpc code (sanjay Radia)
 
 
   OPTIMIZATIONS
   OPTIMIZATIONS
@@ -302,11 +299,50 @@ Release 2.4.0 - UNRELEASED
 
 
   NEW FEATURES
   NEW FEATURES
 
 
+  IMPROVEMENTS
+
+    HADOOP-10139. Update and improve the Single Cluster Setup document.
+    (Akira Ajisaka via Arpit Agarwal)
+
+    HADOOP-10295. Allow distcp to automatically identify the checksum type of 
+    source files and use it for the target. (jing9 and Laurent Goujon)
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
+Release 2.3.0 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
     HADOOP-8545. Filesystem Implementation for OpenStack Swift
     HADOOP-8545. Filesystem Implementation for OpenStack Swift
     (Dmitry Mezhensky, David Dobbins, Stevel via stevel)
     (Dmitry Mezhensky, David Dobbins, Stevel via stevel)
 
 
+  NEW FEATURES
+
   IMPROVEMENTS
   IMPROVEMENTS
 
 
+    HADOOP-10046. Print a log message when SSL is enabled.
+    (David S. Wang via wang)
+
+    HADOOP-10079. log a warning message if group resolution takes too long.
+    (cmccabe)
+
+    HADOOP-9623 Update jets3t dependency to 0.9.0.  (Amandeep Khurana via Colin
+    Patrick McCabe)
+
+    HADOOP-10132. RPC#stopProxy() should log the class of proxy when IllegalArgumentException 
+    is encountered (Ted yu via umamahesh)
+
+    HADOOP-10248. Property name should be included in the exception where property value 
+    is null (Akira AJISAKA via umamahesh)
+
+    HADOOP-10086. User document for authentication in secure cluster.
+    (Masatake Iwasaki via Arpit Agarwal)
+
+    HADOOP-10274 Lower the logging level from ERROR to WARN for UGI.doAs method
+    (Takeshi Miao via stack)
+
     HADOOP-9784. Add a builder for HttpServer. (Junping Du via llu)
     HADOOP-9784. Add a builder for HttpServer. (Junping Du via llu)
 
 
     HADOOP 9871. Fix intermittent findbugs warnings in DefaultMetricsSystem.
     HADOOP 9871. Fix intermittent findbugs warnings in DefaultMetricsSystem.
@@ -427,8 +463,18 @@ Release 2.4.0 - UNRELEASED
     HADOOP-9652. Allow RawLocalFs#getFileLinkStatus to fill in the link owner
     HADOOP-9652. Allow RawLocalFs#getFileLinkStatus to fill in the link owner
     and mode if requested. (Andrew Wang via Colin Patrick McCabe)
     and mode if requested. (Andrew Wang via Colin Patrick McCabe)
 
 
+    HADOOP-10305. Add "rpc.metrics.quantile.enable" and
+    "rpc.metrics.percentiles.intervals" to core-default.xml.
+    (Akira Ajisaka via wang)
+
+    HADOOP-10317. Rename branch-2.3 release version from 2.4.0-SNAPSHOT
+    to 2.3.0-SNAPSHOT. (wang)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
+    HADOOP-10142. Avoid groups lookup for unprivileged users such as "dr.who"
+    (vinay via cmccabe)
+
     HADOOP-9748. Reduce blocking on UGI.ensureInitialized (daryn)
     HADOOP-9748. Reduce blocking on UGI.ensureInitialized (daryn)
 
 
     HADOOP-10047. Add a direct-buffer based apis for compression. (Gopal V
     HADOOP-10047. Add a direct-buffer based apis for compression. (Gopal V
@@ -444,6 +490,90 @@ Release 2.4.0 - UNRELEASED
 
 
   BUG FIXES
   BUG FIXES
 
 
+    HADOOP-10028. Malformed ssl-server.xml.example. (Haohui Mai via jing9)
+
+    HADOOP-10030. FsShell -put/copyFromLocal should support Windows local path.
+    (Chuan Liu via cnauroth)
+
+    HADOOP-10031. FsShell -get/copyToLocal/moveFromLocal should support Windows
+    local path. (Chuan Liu via cnauroth)
+
+    HADOOP-10039. Add Hive to the list of projects using 
+    AbstractDelegationTokenSecretManager. (Haohui Mai via jing9)
+
+    HADOOP-10040. hadoop.cmd in UNIX format and would not run by default on
+    Windows. (cnauroth)
+
+    HADOOP-10055. FileSystemShell.apt.vm doc has typo "numRepicas".
+    (Akira Ajisaka via cnauroth)
+
+    HADOOP-10072. TestNfsExports#testMultiMatchers fails due to non-deterministic
+    timing around cache expiry check. (cnauroth)
+
+    HADOOP-9898. Set SO_KEEPALIVE on all our sockets. (todd via wang)
+
+    HADOOP-9478. Fix race conditions during the initialization of Configuration
+    related to deprecatedKeyMap (cmccabe)
+
+    HADOOP-9660. [WINDOWS] Powershell / cmd parses -Dkey=value from command line
+    as [-Dkey, value] which breaks GenericsOptionParser.
+    (Enis Soztutar via cnauroth)
+
+    HADOOP-10078. KerberosAuthenticator always does SPNEGO. (rkanter via tucu)
+
+    HADOOP-10110. hadoop-auth has a build break due to missing dependency.
+    (Chuan Liu via arp)
+
+    HADOOP-9114. After defined the dfs.checksum.type as the NULL, write file and hflush will 
+    through java.lang.ArrayIndexOutOfBoundsException (Sathish via umamahesh)
+
+    HADOOP-10130. RawLocalFS::LocalFSFileInputStream.pread does not track
+    FS::Statistics (Binglin Chang via Colin Patrick McCabe)
+
+    HDFS-5560. Trash configuration log statements prints incorrect units.
+    (Josh Elser via Andrew Wang)
+
+    HADOOP-10081. Client.setupIOStreams can leak socket resources on exception
+    or error (Tsuyoshi OZAWA via jlowe)
+
+    HADOOP-10087. UserGroupInformation.getGroupNames() fails to return primary
+    group first when JniBasedUnixGroupsMappingWithFallback is used (cmccabe)
+
+    HADOOP-10175. Har files system authority should preserve userinfo.
+    (Chuan Liu via cnauroth)
+
+    HADOOP-10090. Jobtracker metrics not updated properly after execution
+    of a mapreduce job. (ivanmi)
+
+    HADOOP-10193. hadoop-auth's PseudoAuthenticationHandler can consume getInputStream. 
+    (gchanan via tucu)
+
+    HADOOP-10178. Configuration deprecation always emit "deprecated" warnings
+    when a new key is used. (Shanyu Zhao via cnauroth)
+
+    HADOOP-10234. "hadoop.cmd jar" does not propagate exit code. (cnauroth)
+
+    HADOOP-10240. Windows build instructions incorrectly state requirement of
+    protoc 2.4.1 instead of 2.5.0. (cnauroth)
+
+    HADOOP-10167. Mark hadoop-common source as UTF-8 in Maven pom files / refactoring
+    (Mikhail Antonov via cos)
+
+    HADOOP-9982. Fix dead links in hadoop site docs. (Akira Ajisaka via Arpit
+    Agarwal)
+
+    HADOOP-10212. Incorrect compile command in Native Library document.
+    (Akira Ajisaka via Arpit Agarwal)
+
+    HADOOP-9830. Fix typo at http://hadoop.apache.org/docs/current/
+    (Kousuke Saruta via Arpit Agarwal)
+
+    HADOOP-10255. Rename HttpServer to HttpServer2 to retain older 
+    HttpServer in branch-2 for compatibility. (Haohui Mai via suresh)
+
+    HADOOP-10291. TestSecurityUtil#testSocketAddrWithIP fails due to test
+    order dependency. (Mit Desai via Arpit Agarwal)
+
     HADOOP-9964. Fix deadlocks in TestHttpServer by synchronize
     HADOOP-9964. Fix deadlocks in TestHttpServer by synchronize
     ReflectionUtils.printThreadInfo. (Junping Du via llu)
     ReflectionUtils.printThreadInfo. (Junping Du via llu)
 
 
@@ -459,7 +589,6 @@ Release 2.4.0 - UNRELEASED
     HADOOP-9865.  FileContext#globStatus has a regression with respect to
     HADOOP-9865.  FileContext#globStatus has a regression with respect to
     relative path.  (Chuan Lin via Colin Patrick McCabe)
     relative path.  (Chuan Lin via Colin Patrick McCabe)
 
 
-
     HADOOP-9909. org.apache.hadoop.fs.Stat should permit other LANG.
     HADOOP-9909. org.apache.hadoop.fs.Stat should permit other LANG.
     (Shinichi Yamashita via Andrew Wang)
     (Shinichi Yamashita via Andrew Wang)
 
 
@@ -545,118 +674,8 @@ Release 2.4.0 - UNRELEASED
     HADOOP-10288. Explicit reference to Log4JLogger breaks non-log4j users
     HADOOP-10288. Explicit reference to Log4JLogger breaks non-log4j users
     (todd)
     (todd)
 
 
-Release 2.3.0 - UNRELEASED
-
-  INCOMPATIBLE CHANGES
-
-  NEW FEATURES
-
-  IMPROVEMENTS
-
-    HADOOP-10046. Print a log message when SSL is enabled.
-    (David S. Wang via wang)
-
-    HADOOP-10079. log a warning message if group resolution takes too long.
-    (cmccabe)
-
-    HADOOP-9623 Update jets3t dependency to 0.9.0.  (Amandeep Khurana via Colin
-    Patrick McCabe)
-
-    HADOOP-10132. RPC#stopProxy() should log the class of proxy when IllegalArgumentException 
-    is encountered (Ted yu via umamahesh)
-
-    HADOOP-10248. Property name should be included in the exception where property value 
-    is null (Akira AJISAKA via umamahesh)
-
-    HADOOP-10086. User document for authentication in secure cluster.
-    (Masatake Iwasaki via Arpit Agarwal)
-
-    HADOOP-10274 Lower the logging level from ERROR to WARN for UGI.doAs method
-    (Takeshi Miao via stack)
-
-  OPTIMIZATIONS
-
-    HADOOP-10142. Avoid groups lookup for unprivileged users such as "dr.who"
-    (vinay via cmccabe)
-
-  BUG FIXES
-
-    HADOOP-10028. Malformed ssl-server.xml.example. (Haohui Mai via jing9)
-
-    HADOOP-10030. FsShell -put/copyFromLocal should support Windows local path.
-    (Chuan Liu via cnauroth)
-
-    HADOOP-10031. FsShell -get/copyToLocal/moveFromLocal should support Windows
-    local path. (Chuan Liu via cnauroth)
-
-    HADOOP-10039. Add Hive to the list of projects using 
-    AbstractDelegationTokenSecretManager. (Haohui Mai via jing9)
-
-    HADOOP-10040. hadoop.cmd in UNIX format and would not run by default on
-    Windows. (cnauroth)
-
-    HADOOP-10055. FileSystemShell.apt.vm doc has typo "numRepicas".
-    (Akira Ajisaka via cnauroth)
-
-    HADOOP-10072. TestNfsExports#testMultiMatchers fails due to non-deterministic
-    timing around cache expiry check. (cnauroth)
-
-    HADOOP-9898. Set SO_KEEPALIVE on all our sockets. (todd via wang)
-
-    HADOOP-9478. Fix race conditions during the initialization of Configuration
-    related to deprecatedKeyMap (cmccabe)
-
-    HADOOP-9660. [WINDOWS] Powershell / cmd parses -Dkey=value from command line
-    as [-Dkey, value] which breaks GenericsOptionParser.
-    (Enis Soztutar via cnauroth)
-
-    HADOOP-10078. KerberosAuthenticator always does SPNEGO. (rkanter via tucu)
-
-    HADOOP-10110. hadoop-auth has a build break due to missing dependency.
-    (Chuan Liu via arp)
-
-    HADOOP-9114. After defined the dfs.checksum.type as the NULL, write file and hflush will 
-    through java.lang.ArrayIndexOutOfBoundsException (Sathish via umamahesh)
-
-    HADOOP-10130. RawLocalFS::LocalFSFileInputStream.pread does not track
-    FS::Statistics (Binglin Chang via Colin Patrick McCabe)
-
-    HDFS-5560. Trash configuration log statements prints incorrect units.
-    (Josh Elser via Andrew Wang)
-
-    HADOOP-10081. Client.setupIOStreams can leak socket resources on exception
-    or error (Tsuyoshi OZAWA via jlowe)
-
-    HADOOP-10087. UserGroupInformation.getGroupNames() fails to return primary
-    group first when JniBasedUnixGroupsMappingWithFallback is used (cmccabe)
-
-    HADOOP-10175. Har files system authority should preserve userinfo.
-    (Chuan Liu via cnauroth)
-
-    HADOOP-10090. Jobtracker metrics not updated properly after execution
-    of a mapreduce job. (ivanmi)
-
-    HADOOP-10193. hadoop-auth's PseudoAuthenticationHandler can consume getInputStream. 
-    (gchanan via tucu)
-
-    HADOOP-10178. Configuration deprecation always emit "deprecated" warnings
-    when a new key is used. (Shanyu Zhao via cnauroth)
-
-    HADOOP-10234. "hadoop.cmd jar" does not propagate exit code. (cnauroth)
-
-    HADOOP-10240. Windows build instructions incorrectly state requirement of
-    protoc 2.4.1 instead of 2.5.0. (cnauroth)
-
-    HADOOP-10112. har file listing doesn't work with wild card. (brandonli)
-
-    HADOOP-10167. Mark hadoop-common source as UTF-8 in Maven pom files / refactoring
-    (Mikhail Antonov via cos)
-
-    HADOOP-9982. Fix dead links in hadoop site docs. (Akira Ajisaka via Arpit
-    Agarwal)
-
-    HADOOP-10212. Incorrect compile command in Native Library document.
-    (Akira Ajisaka via Arpit Agarwal)
+    HADOOP-10310. SaslRpcServer should be initialized even when no secret
+    manager present. (atm)
 
 
 Release 2.2.0 - 2013-10-13
 Release 2.2.0 - 2013-10-13
 
 

+ 3 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfServlet.java

@@ -27,7 +27,7 @@ import javax.servlet.http.HttpServletResponse;
 
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.http.HttpServer;
+import org.apache.hadoop.http.HttpServer2;
 
 
 /**
 /**
  * A servlet to print out the running configuration data.
  * A servlet to print out the running configuration data.
@@ -47,7 +47,7 @@ public class ConfServlet extends HttpServlet {
    */
    */
   private Configuration getConfFromContext() {
   private Configuration getConfFromContext() {
     Configuration conf = (Configuration)getServletContext().getAttribute(
     Configuration conf = (Configuration)getServletContext().getAttribute(
-        HttpServer.CONF_CONTEXT_ATTRIBUTE);
+        HttpServer2.CONF_CONTEXT_ATTRIBUTE);
     assert conf != null;
     assert conf != null;
     return conf;
     return conf;
   }
   }
@@ -56,7 +56,7 @@ public class ConfServlet extends HttpServlet {
   public void doGet(HttpServletRequest request, HttpServletResponse response)
   public void doGet(HttpServletRequest request, HttpServletResponse response)
       throws ServletException, IOException {
       throws ServletException, IOException {
 
 
-    if (!HttpServer.isInstrumentationAccessAllowed(getServletContext(),
+    if (!HttpServer2.isInstrumentationAccessAllowed(getServletContext(),
                                                    request, response)) {
                                                    request, response)) {
       return;
       return;
     }
     }

+ 1 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java

@@ -245,6 +245,7 @@ public class CommonConfigurationKeys extends CommonConfigurationKeysPublic {
 
 
   public static final String RPC_METRICS_QUANTILE_ENABLE =
   public static final String RPC_METRICS_QUANTILE_ENABLE =
       "rpc.metrics.quantile.enable";
       "rpc.metrics.quantile.enable";
+  public static final boolean RPC_METRICS_QUANTILE_ENABLE_DEFAULT = false;
   public static final String  RPC_METRICS_PERCENTILES_INTERVALS_KEY =
   public static final String  RPC_METRICS_PERCENTILES_INTERVALS_KEY =
       "rpc.metrics.percentiles.intervals";
       "rpc.metrics.percentiles.intervals";
 }
 }

+ 9 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileChecksum.java

@@ -21,21 +21,26 @@ import java.util.Arrays;
 
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.Options.ChecksumOpt;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.Writable;
 
 
 /** An abstract class representing file checksums for files. */
 /** An abstract class representing file checksums for files. */
 @InterfaceAudience.Public
 @InterfaceAudience.Public
 @InterfaceStability.Stable
 @InterfaceStability.Stable
 public abstract class FileChecksum implements Writable {
 public abstract class FileChecksum implements Writable {
-  /** The checksum algorithm name */ 
+  /** The checksum algorithm name */
   public abstract String getAlgorithmName();
   public abstract String getAlgorithmName();
 
 
-  /** The length of the checksum in bytes */ 
+  /** The length of the checksum in bytes */
   public abstract int getLength();
   public abstract int getLength();
 
 
-  /** The value of the checksum in bytes */ 
+  /** The value of the checksum in bytes */
   public abstract byte[] getBytes();
   public abstract byte[] getBytes();
 
 
+  public ChecksumOpt getChecksumOpt() {
+    return null;
+  }
+
   /** Return true if both the algorithms and the values are the same. */
   /** Return true if both the algorithms and the values are the same. */
   @Override
   @Override
   public boolean equals(Object other) {
   public boolean equals(Object other) {
@@ -50,7 +55,7 @@ public abstract class FileChecksum implements Writable {
     return this.getAlgorithmName().equals(that.getAlgorithmName())
     return this.getAlgorithmName().equals(that.getAlgorithmName())
       && Arrays.equals(this.getBytes(), that.getBytes());
       && Arrays.equals(this.getBytes(), that.getBytes());
   }
   }
-  
+
   @Override
   @Override
   public int hashCode() {
   public int hashCode() {
     return getAlgorithmName().hashCode() ^ Arrays.hashCode(getBytes());
     return getAlgorithmName().hashCode() ^ Arrays.hashCode(getBytes());

+ 8 - 7
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/MD5MD5CRC32FileChecksum.java

@@ -56,7 +56,7 @@ public class MD5MD5CRC32FileChecksum extends FileChecksum {
     this.crcPerBlock = crcPerBlock;
     this.crcPerBlock = crcPerBlock;
     this.md5 = md5;
     this.md5 = md5;
   }
   }
-  
+
   @Override
   @Override
   public String getAlgorithmName() {
   public String getAlgorithmName() {
     return "MD5-of-" + crcPerBlock + "MD5-of-" + bytesPerCRC +
     return "MD5-of-" + crcPerBlock + "MD5-of-" + bytesPerCRC +
@@ -73,10 +73,10 @@ public class MD5MD5CRC32FileChecksum extends FileChecksum {
 
 
     throw new IOException("Unknown checksum type in " + algorithm);
     throw new IOException("Unknown checksum type in " + algorithm);
   }
   }
- 
+
   @Override
   @Override
   public int getLength() {return LENGTH;}
   public int getLength() {return LENGTH;}
- 
+
   @Override
   @Override
   public byte[] getBytes() {
   public byte[] getBytes() {
     return WritableUtils.toByteArray(this);
     return WritableUtils.toByteArray(this);
@@ -88,6 +88,7 @@ public class MD5MD5CRC32FileChecksum extends FileChecksum {
     return DataChecksum.Type.CRC32;
     return DataChecksum.Type.CRC32;
   }
   }
 
 
+  @Override
   public ChecksumOpt getChecksumOpt() {
   public ChecksumOpt getChecksumOpt() {
     return new ChecksumOpt(getCrcType(), bytesPerCRC);
     return new ChecksumOpt(getCrcType(), bytesPerCRC);
   }
   }
@@ -98,12 +99,12 @@ public class MD5MD5CRC32FileChecksum extends FileChecksum {
     crcPerBlock = in.readLong();
     crcPerBlock = in.readLong();
     md5 = MD5Hash.read(in);
     md5 = MD5Hash.read(in);
   }
   }
- 
+
   @Override
   @Override
   public void write(DataOutput out) throws IOException {
   public void write(DataOutput out) throws IOException {
     out.writeInt(bytesPerCRC);
     out.writeInt(bytesPerCRC);
     out.writeLong(crcPerBlock);
     out.writeLong(crcPerBlock);
-    md5.write(out);    
+    md5.write(out);
   }
   }
 
 
   /** Write that object to xml output. */
   /** Write that object to xml output. */
@@ -157,11 +158,11 @@ public class MD5MD5CRC32FileChecksum extends FileChecksum {
       }
       }
     } catch (Exception e) {
     } catch (Exception e) {
       throw new SAXException("Invalid attributes: bytesPerCRC=" + bytesPerCRC
       throw new SAXException("Invalid attributes: bytesPerCRC=" + bytesPerCRC
-          + ", crcPerBlock=" + crcPerBlock + ", crcType=" + crcType 
+          + ", crcPerBlock=" + crcPerBlock + ", crcType=" + crcType
           + ", md5=" + md5, e);
           + ", md5=" + md5, e);
     }
     }
   }
   }
- 
+
   @Override
   @Override
   public String toString() {
   public String toString() {
     return getAlgorithmName() + ":" + md5;
     return getAlgorithmName() + ":" + md5;

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/AdminAuthorizedServlet.java

@@ -37,7 +37,7 @@ public class AdminAuthorizedServlet extends DefaultServlet {
   protected void doGet(HttpServletRequest request, HttpServletResponse response)
   protected void doGet(HttpServletRequest request, HttpServletResponse response)
  throws ServletException, IOException {
  throws ServletException, IOException {
     // Do the authorization
     // Do the authorization
-    if (HttpServer.hasAdministratorAccess(getServletContext(), request,
+    if (HttpServer2.hasAdministratorAccess(getServletContext(), request,
         response)) {
         response)) {
       // Authorization is done. Just call super.
       // Authorization is done. Just call super.
       super.doGet(request, response);
       super.doGet(request, response);

+ 59 - 187
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java → hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java

@@ -24,7 +24,6 @@ import java.io.PrintWriter;
 import java.net.BindException;
 import java.net.BindException;
 import java.net.InetSocketAddress;
 import java.net.InetSocketAddress;
 import java.net.URI;
 import java.net.URI;
-import java.net.URISyntaxException;
 import java.net.URL;
 import java.net.URL;
 import java.util.ArrayList;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.Collections;
@@ -89,17 +88,19 @@ import com.google.common.collect.Lists;
 import com.sun.jersey.spi.container.servlet.ServletContainer;
 import com.sun.jersey.spi.container.servlet.ServletContainer;
 
 
 /**
 /**
- * Create a Jetty embedded server to answer http requests. The primary goal
- * is to serve up status information for the server.
- * There are three contexts:
- *   "/logs/" -> points to the log directory
- *   "/static/" -> points to common static files (src/webapps/static)
- *   "/" -> the jsp server code from (src/webapps/<name>)
+ * Create a Jetty embedded server to answer http requests. The primary goal is
+ * to serve up status information for the server. There are three contexts:
+ * "/logs/" -> points to the log directory "/static/" -> points to common static
+ * files (src/webapps/static) "/" -> the jsp server code from
+ * (src/webapps/<name>)
+ *
+ * This class is a fork of the old HttpServer. HttpServer exists for
+ * compatibility reasons. See HBASE-10336 for more details.
  */
  */
-@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce", "HBase"})
+@InterfaceAudience.Private
 @InterfaceStability.Evolving
 @InterfaceStability.Evolving
-public class HttpServer implements FilterContainer {
-  public static final Log LOG = LogFactory.getLog(HttpServer.class);
+public final class HttpServer2 implements FilterContainer {
+  public static final Log LOG = LogFactory.getLog(HttpServer2.class);
 
 
   static final String FILTER_INITIALIZER_PROPERTY
   static final String FILTER_INITIALIZER_PROPERTY
       = "hadoop.http.filter.initializers";
       = "hadoop.http.filter.initializers";
@@ -166,11 +167,6 @@ public class HttpServer implements FilterContainer {
     // The -keypass option in keytool
     // The -keypass option in keytool
     private String keyPassword;
     private String keyPassword;
 
 
-    @Deprecated
-    private String bindAddress;
-    @Deprecated
-    private int port = -1;
-
     private boolean findPort;
     private boolean findPort;
 
 
     private String hostName;
     private String hostName;
@@ -204,7 +200,7 @@ public class HttpServer implements FilterContainer {
       this.hostName = hostName;
       this.hostName = hostName;
       return this;
       return this;
     }
     }
-    
+
     public Builder trustStore(String location, String password, String type) {
     public Builder trustStore(String location, String password, String type) {
       this.trustStore = location;
       this.trustStore = location;
       this.trustStorePassword = password;
       this.trustStorePassword = password;
@@ -233,78 +229,51 @@ public class HttpServer implements FilterContainer {
       return this;
       return this;
     }
     }
 
 
-    /**
-     * Use addEndpoint() instead.
-     */
-    @Deprecated
-    public Builder setBindAddress(String bindAddress){
-      this.bindAddress = bindAddress;
-      return this;
-    }
-
-    /**
-     * Use addEndpoint() instead.
-     */
-    @Deprecated
-    public Builder setPort(int port) {
-      this.port = port;
-      return this;
-    }
-    
     public Builder setFindPort(boolean findPort) {
     public Builder setFindPort(boolean findPort) {
       this.findPort = findPort;
       this.findPort = findPort;
       return this;
       return this;
     }
     }
-    
+
     public Builder setConf(Configuration conf) {
     public Builder setConf(Configuration conf) {
       this.conf = conf;
       this.conf = conf;
       return this;
       return this;
     }
     }
-    
+
     public Builder setConnector(Connector connector) {
     public Builder setConnector(Connector connector) {
       this.connector = connector;
       this.connector = connector;
       return this;
       return this;
     }
     }
-    
+
     public Builder setPathSpec(String[] pathSpec) {
     public Builder setPathSpec(String[] pathSpec) {
       this.pathSpecs = pathSpec;
       this.pathSpecs = pathSpec;
       return this;
       return this;
     }
     }
-    
+
     public Builder setACL(AccessControlList acl) {
     public Builder setACL(AccessControlList acl) {
       this.adminsAcl = acl;
       this.adminsAcl = acl;
       return this;
       return this;
     }
     }
-    
+
     public Builder setSecurityEnabled(boolean securityEnabled) {
     public Builder setSecurityEnabled(boolean securityEnabled) {
       this.securityEnabled = securityEnabled;
       this.securityEnabled = securityEnabled;
       return this;
       return this;
     }
     }
-    
+
     public Builder setUsernameConfKey(String usernameConfKey) {
     public Builder setUsernameConfKey(String usernameConfKey) {
       this.usernameConfKey = usernameConfKey;
       this.usernameConfKey = usernameConfKey;
       return this;
       return this;
     }
     }
-    
+
     public Builder setKeytabConfKey(String keytabConfKey) {
     public Builder setKeytabConfKey(String keytabConfKey) {
       this.keytabConfKey = keytabConfKey;
       this.keytabConfKey = keytabConfKey;
       return this;
       return this;
     }
     }
-    
-    public HttpServer build() throws IOException {
+
+    public HttpServer2 build() throws IOException {
       if (this.name == null) {
       if (this.name == null) {
         throw new HadoopIllegalArgumentException("name is not set");
         throw new HadoopIllegalArgumentException("name is not set");
       }
       }
 
 
-      // Make the behavior compatible with deprecated interfaces
-      if (bindAddress != null && port != -1) {
-        try {
-          endpoints.add(0, new URI("http", "", bindAddress, port, "", "", ""));
-        } catch (URISyntaxException e) {
-          throw new HadoopIllegalArgumentException("Invalid endpoint: "+ e);
-        }
-      }
-
       if (endpoints.size() == 0 && connector == null) {
       if (endpoints.size() == 0 && connector == null) {
         throw new HadoopIllegalArgumentException("No endpoints specified");
         throw new HadoopIllegalArgumentException("No endpoints specified");
       }
       }
@@ -313,12 +282,12 @@ public class HttpServer implements FilterContainer {
         hostName = endpoints.size() == 0 ? connector.getHost() : endpoints.get(
         hostName = endpoints.size() == 0 ? connector.getHost() : endpoints.get(
             0).getHost();
             0).getHost();
       }
       }
-      
+
       if (this.conf == null) {
       if (this.conf == null) {
         conf = new Configuration();
         conf = new Configuration();
       }
       }
-      
-      HttpServer server = new HttpServer(this);
+
+      HttpServer2 server = new HttpServer2(this);
 
 
       if (this.securityEnabled) {
       if (this.securityEnabled) {
         server.initSpnego(conf, hostName, usernameConfKey, keytabConfKey);
         server.initSpnego(conf, hostName, usernameConfKey, keytabConfKey);
@@ -332,7 +301,7 @@ public class HttpServer implements FilterContainer {
         Connector listener = null;
         Connector listener = null;
         String scheme = ep.getScheme();
         String scheme = ep.getScheme();
         if ("http".equals(scheme)) {
         if ("http".equals(scheme)) {
-          listener = HttpServer.createDefaultChannelConnector();
+          listener = HttpServer2.createDefaultChannelConnector();
         } else if ("https".equals(scheme)) {
         } else if ("https".equals(scheme)) {
           SslSocketConnector c = new SslSocketConnector();
           SslSocketConnector c = new SslSocketConnector();
           c.setNeedClientAuth(needsClientAuth);
           c.setNeedClientAuth(needsClientAuth);
@@ -363,105 +332,8 @@ public class HttpServer implements FilterContainer {
       return server;
       return server;
     }
     }
   }
   }
-  
-  /** Same as this(name, bindAddress, port, findPort, null); */
-  @Deprecated
-  public HttpServer(String name, String bindAddress, int port, boolean findPort
-      ) throws IOException {
-    this(name, bindAddress, port, findPort, new Configuration());
-  }
 
 
-  @Deprecated
-  public HttpServer(String name, String bindAddress, int port,
-      boolean findPort, Configuration conf, Connector connector) throws IOException {
-    this(name, bindAddress, port, findPort, conf, null, connector, null);
-  }
-
-  /**
-   * Create a status server on the given port. Allows you to specify the
-   * path specifications that this server will be serving so that they will be
-   * added to the filters properly.  
-   * 
-   * @param name The name of the server
-   * @param bindAddress The address for this server
-   * @param port The port to use on the server
-   * @param findPort whether the server should start at the given port and 
-   *        increment by 1 until it finds a free port.
-   * @param conf Configuration 
-   * @param pathSpecs Path specifications that this httpserver will be serving. 
-   *        These will be added to any filters.
-   */
-  @Deprecated
-  public HttpServer(String name, String bindAddress, int port,
-      boolean findPort, Configuration conf, String[] pathSpecs) throws IOException {
-    this(name, bindAddress, port, findPort, conf, null, null, pathSpecs);
-  }
-  
-  /**
-   * Create a status server on the given port.
-   * The jsp scripts are taken from src/webapps/<name>.
-   * @param name The name of the server
-   * @param port The port to use on the server
-   * @param findPort whether the server should start at the given port and 
-   *        increment by 1 until it finds a free port.
-   * @param conf Configuration 
-   */
-  @Deprecated
-  public HttpServer(String name, String bindAddress, int port,
-      boolean findPort, Configuration conf) throws IOException {
-    this(name, bindAddress, port, findPort, conf, null, null, null);
-  }
-
-  @Deprecated
-  public HttpServer(String name, String bindAddress, int port,
-      boolean findPort, Configuration conf, AccessControlList adminsAcl) 
-      throws IOException {
-    this(name, bindAddress, port, findPort, conf, adminsAcl, null, null);
-  }
-
-  /**
-   * Create a status server on the given port.
-   * The jsp scripts are taken from src/webapps/<name>.
-   * @param name The name of the server
-   * @param bindAddress The address for this server
-   * @param port The port to use on the server
-   * @param findPort whether the server should start at the given port and 
-   *        increment by 1 until it finds a free port.
-   * @param conf Configuration 
-   * @param adminsAcl {@link AccessControlList} of the admins
-   */
-  @Deprecated
-  public HttpServer(String name, String bindAddress, int port,
-      boolean findPort, Configuration conf, AccessControlList adminsAcl, 
-      Connector connector) throws IOException {
-    this(name, bindAddress, port, findPort, conf, adminsAcl, connector, null);
-  }
-
-  /**
-   * Create a status server on the given port.
-   * The jsp scripts are taken from src/webapps/<name>.
-   * @param name The name of the server
-   * @param bindAddress The address for this server
-   * @param port The port to use on the server
-   * @param findPort whether the server should start at the given port and 
-   *        increment by 1 until it finds a free port.
-   * @param conf Configuration 
-   * @param adminsAcl {@link AccessControlList} of the admins
-   * @param connector A jetty connection listener
-   * @param pathSpecs Path specifications that this httpserver will be serving. 
-   *        These will be added to any filters.
-   */
-  @Deprecated
-  public HttpServer(String name, String bindAddress, int port,
-      boolean findPort, Configuration conf, AccessControlList adminsAcl, 
-      Connector connector, String[] pathSpecs) throws IOException {
-    this(new Builder().setName(name).hostName(bindAddress)
-        .addEndpoint(URI.create("http://" + bindAddress + ":" + port))
-        .setFindPort(findPort).setConf(conf).setACL(adminsAcl)
-        .setConnector(connector).setPathSpec(pathSpecs));
-  }
-
-  private HttpServer(final Builder b) throws IOException {
+  private HttpServer2(final Builder b) throws IOException {
     final String appDir = getWebAppsPath(b.name);
     final String appDir = getWebAppsPath(b.name);
     this.webServer = new Server();
     this.webServer = new Server();
     this.adminsAcl = b.adminsAcl;
     this.adminsAcl = b.adminsAcl;
@@ -554,9 +426,9 @@ public class HttpServer implements FilterContainer {
    * listener.
    * listener.
    */
    */
   public Connector createBaseListener(Configuration conf) throws IOException {
   public Connector createBaseListener(Configuration conf) throws IOException {
-    return HttpServer.createDefaultChannelConnector();
+    return HttpServer2.createDefaultChannelConnector();
   }
   }
-  
+
   @InterfaceAudience.Private
   @InterfaceAudience.Private
   public static Connector createDefaultChannelConnector() {
   public static Connector createDefaultChannelConnector() {
     SelectChannelConnector ret = new SelectChannelConnector();
     SelectChannelConnector ret = new SelectChannelConnector();
@@ -567,7 +439,7 @@ public class HttpServer implements FilterContainer {
     if(Shell.WINDOWS) {
     if(Shell.WINDOWS) {
       // result of setting the SO_REUSEADDR flag is different on Windows
       // result of setting the SO_REUSEADDR flag is different on Windows
       // http://msdn.microsoft.com/en-us/library/ms740621(v=vs.85).aspx
       // http://msdn.microsoft.com/en-us/library/ms740621(v=vs.85).aspx
-      // without this 2 NN's can start on the same machine and listen on 
+      // without this 2 NN's can start on the same machine and listen on
       // the same port with indeterminate routing of incoming requests to them
       // the same port with indeterminate routing of incoming requests to them
       ret.setReuseAddress(false);
       ret.setReuseAddress(false);
     }
     }
@@ -601,7 +473,7 @@ public class HttpServer implements FilterContainer {
    */
    */
   protected void addDefaultApps(ContextHandlerCollection parent,
   protected void addDefaultApps(ContextHandlerCollection parent,
       final String appDir, Configuration conf) throws IOException {
       final String appDir, Configuration conf) throws IOException {
-    // set up the context for "/logs/" if "hadoop.log.dir" property is defined. 
+    // set up the context for "/logs/" if "hadoop.log.dir" property is defined.
     String logDir = System.getProperty("hadoop.log.dir");
     String logDir = System.getProperty("hadoop.log.dir");
     if (logDir != null) {
     if (logDir != null) {
       Context logContext = new Context(parent, "/logs");
       Context logContext = new Context(parent, "/logs");
@@ -628,7 +500,7 @@ public class HttpServer implements FilterContainer {
     setContextAttributes(staticContext, conf);
     setContextAttributes(staticContext, conf);
     defaultContexts.put(staticContext, true);
     defaultContexts.put(staticContext, true);
   }
   }
-  
+
   private void setContextAttributes(Context context, Configuration conf) {
   private void setContextAttributes(Context context, Configuration conf) {
     context.getServletContext().setAttribute(CONF_CONTEXT_ATTRIBUTE, conf);
     context.getServletContext().setAttribute(CONF_CONTEXT_ATTRIBUTE, conf);
     context.getServletContext().setAttribute(ADMINS_ACL, adminsAcl);
     context.getServletContext().setAttribute(ADMINS_ACL, adminsAcl);
@@ -654,10 +526,10 @@ public class HttpServer implements FilterContainer {
   }
   }
 
 
   /**
   /**
-   * Add a context 
+   * Add a context
    * @param pathSpec The path spec for the context
    * @param pathSpec The path spec for the context
    * @param dir The directory containing the context
    * @param dir The directory containing the context
-   * @param isFiltered if true, the servlet is added to the filter path mapping 
+   * @param isFiltered if true, the servlet is added to the filter path mapping
    * @throws IOException
    * @throws IOException
    */
    */
   protected void addContext(String pathSpec, String dir, boolean isFiltered) throws IOException {
   protected void addContext(String pathSpec, String dir, boolean isFiltered) throws IOException {
@@ -680,7 +552,7 @@ public class HttpServer implements FilterContainer {
     webAppContext.setAttribute(name, value);
     webAppContext.setAttribute(name, value);
   }
   }
 
 
-  /** 
+  /**
    * Add a Jersey resource package.
    * Add a Jersey resource package.
    * @param packageName The Java package name containing the Jersey resource.
    * @param packageName The Java package name containing the Jersey resource.
    * @param pathSpec The path spec for the servlet
    * @param pathSpec The path spec for the servlet
@@ -709,11 +581,11 @@ public class HttpServer implements FilterContainer {
   }
   }
 
 
   /**
   /**
-   * Add an internal servlet in the server. 
+   * Add an internal servlet in the server.
    * Note: This method is to be used for adding servlets that facilitate
    * Note: This method is to be used for adding servlets that facilitate
    * internal communication and not for user facing functionality. For
    * internal communication and not for user facing functionality. For
-   * servlets added using this method, filters are not enabled. 
-   * 
+   * servlets added using this method, filters are not enabled.
+   *
    * @param name The name of the servlet (can be passed as null)
    * @param name The name of the servlet (can be passed as null)
    * @param pathSpec The path spec for the servlet
    * @param pathSpec The path spec for the servlet
    * @param clazz The servlet class
    * @param clazz The servlet class
@@ -725,18 +597,18 @@ public class HttpServer implements FilterContainer {
 
 
   /**
   /**
    * Add an internal servlet in the server, specifying whether or not to
    * Add an internal servlet in the server, specifying whether or not to
-   * protect with Kerberos authentication. 
+   * protect with Kerberos authentication.
    * Note: This method is to be used for adding servlets that facilitate
    * Note: This method is to be used for adding servlets that facilitate
    * internal communication and not for user facing functionality. For
    * internal communication and not for user facing functionality. For
    +   * servlets added using this method, filters (except internal Kerberos
    +   * servlets added using this method, filters (except internal Kerberos
-   * filters) are not enabled. 
-   * 
+   * filters) are not enabled.
+   *
    * @param name The name of the servlet (can be passed as null)
    * @param name The name of the servlet (can be passed as null)
    * @param pathSpec The path spec for the servlet
    * @param pathSpec The path spec for the servlet
    * @param clazz The servlet class
    * @param clazz The servlet class
    * @param requireAuth Require Kerberos authenticate to access servlet
    * @param requireAuth Require Kerberos authenticate to access servlet
    */
    */
-  public void addInternalServlet(String name, String pathSpec, 
+  public void addInternalServlet(String name, String pathSpec,
       Class<? extends HttpServlet> clazz, boolean requireAuth) {
       Class<? extends HttpServlet> clazz, boolean requireAuth) {
     ServletHolder holder = new ServletHolder(clazz);
     ServletHolder holder = new ServletHolder(clazz);
     if (name != null) {
     if (name != null) {
@@ -820,7 +692,7 @@ public class HttpServer implements FilterContainer {
       handler.addFilterMapping(fmap);
       handler.addFilterMapping(fmap);
     }
     }
   }
   }
-  
+
   /**
   /**
    * Get the value in the webapp context.
    * Get the value in the webapp context.
    * @param name The name of the attribute
    * @param name The name of the attribute
@@ -829,7 +701,7 @@ public class HttpServer implements FilterContainer {
   public Object getAttribute(String name) {
   public Object getAttribute(String name) {
     return webAppContext.getAttribute(name);
     return webAppContext.getAttribute(name);
   }
   }
-  
+
   public WebAppContext getWebAppContext(){
   public WebAppContext getWebAppContext(){
     return this.webAppContext;
     return this.webAppContext;
   }
   }
@@ -842,7 +714,7 @@ public class HttpServer implements FilterContainer {
    */
    */
   protected String getWebAppsPath(String appName) throws FileNotFoundException {
   protected String getWebAppsPath(String appName) throws FileNotFoundException {
     URL url = getClass().getClassLoader().getResource("webapps/" + appName);
     URL url = getClass().getClassLoader().getResource("webapps/" + appName);
-    if (url == null) 
+    if (url == null)
       throw new FileNotFoundException("webapps/" + appName
       throw new FileNotFoundException("webapps/" + appName
           + " not found in CLASSPATH");
           + " not found in CLASSPATH");
     String urlString = url.toString();
     String urlString = url.toString();
@@ -900,7 +772,7 @@ public class HttpServer implements FilterContainer {
       params.put("kerberos.keytab", httpKeytab);
       params.put("kerberos.keytab", httpKeytab);
     }
     }
     params.put(AuthenticationFilter.AUTH_TYPE, "kerberos");
     params.put(AuthenticationFilter.AUTH_TYPE, "kerberos");
-  
+
     defineFilter(webAppContext, SPNEGO_FILTER,
     defineFilter(webAppContext, SPNEGO_FILTER,
                  AuthenticationFilter.class.getName(), params, null);
                  AuthenticationFilter.class.getName(), params, null);
   }
   }
@@ -987,7 +859,7 @@ public class HttpServer implements FilterContainer {
       }
       }
     }
     }
   }
   }
-  
+
   /**
   /**
    * stop the server
    * stop the server
    */
    */
@@ -1105,7 +977,7 @@ public class HttpServer implements FilterContainer {
   /**
   /**
    * Does the user sending the HttpServletRequest has the administrator ACLs? If
    * Does the user sending the HttpServletRequest has the administrator ACLs? If
    * it isn't the case, response will be modified to send an error to the user.
    * it isn't the case, response will be modified to send an error to the user.
-   * 
+   *
    * @param servletContext
    * @param servletContext
    * @param request
    * @param request
    * @param response used to send the error response if user does not have admin access.
    * @param response used to send the error response if user does not have admin access.
@@ -1130,7 +1002,7 @@ public class HttpServer implements FilterContainer {
                          "authorized to access this page.");
                          "authorized to access this page.");
       return false;
       return false;
     }
     }
-    
+
     if (servletContext.getAttribute(ADMINS_ACL) != null &&
     if (servletContext.getAttribute(ADMINS_ACL) != null &&
         !userHasAdministratorAccess(servletContext, remoteUser)) {
         !userHasAdministratorAccess(servletContext, remoteUser)) {
       response.sendError(HttpServletResponse.SC_UNAUTHORIZED, "User "
       response.sendError(HttpServletResponse.SC_UNAUTHORIZED, "User "
@@ -1144,7 +1016,7 @@ public class HttpServer implements FilterContainer {
   /**
   /**
    * Get the admin ACLs from the given ServletContext and check if the given
    * Get the admin ACLs from the given ServletContext and check if the given
    * user is in the ACL.
    * user is in the ACL.
-   * 
+   *
    * @param servletContext the context containing the admin ACL.
    * @param servletContext the context containing the admin ACL.
    * @param remoteUser the remote user to check for.
    * @param remoteUser the remote user to check for.
    * @return true if the user is present in the ACL, false if no ACL is set or
    * @return true if the user is present in the ACL, false if no ACL is set or
@@ -1171,7 +1043,7 @@ public class HttpServer implements FilterContainer {
     @Override
     @Override
     public void doGet(HttpServletRequest request, HttpServletResponse response)
     public void doGet(HttpServletRequest request, HttpServletResponse response)
       throws ServletException, IOException {
       throws ServletException, IOException {
-      if (!HttpServer.isInstrumentationAccessAllowed(getServletContext(),
+      if (!HttpServer2.isInstrumentationAccessAllowed(getServletContext(),
                                                      request, response)) {
                                                      request, response)) {
         return;
         return;
       }
       }
@@ -1179,10 +1051,10 @@ public class HttpServer implements FilterContainer {
       PrintWriter out = response.getWriter();
       PrintWriter out = response.getWriter();
       ReflectionUtils.printThreadInfo(out, "");
       ReflectionUtils.printThreadInfo(out, "");
       out.close();
       out.close();
-      ReflectionUtils.logThreadInfo(LOG, "jsp requested", 1);      
+      ReflectionUtils.logThreadInfo(LOG, "jsp requested", 1);
     }
     }
   }
   }
-  
+
   /**
   /**
    * A Servlet input filter that quotes all HTML active characters in the
    * A Servlet input filter that quotes all HTML active characters in the
    * parameter names and values. The goal is to quote the characters to make
    * parameter names and values. The goal is to quote the characters to make
@@ -1197,7 +1069,7 @@ public class HttpServer implements FilterContainer {
         super(rawRequest);
         super(rawRequest);
         this.rawRequest = rawRequest;
         this.rawRequest = rawRequest;
       }
       }
-      
+
       /**
       /**
        * Return the set of parameter names, quoting each name.
        * Return the set of parameter names, quoting each name.
        */
        */
@@ -1218,7 +1090,7 @@ public class HttpServer implements FilterContainer {
           }
           }
         };
         };
       }
       }
-      
+
       /**
       /**
        * Unquote the name and quote the value.
        * Unquote the name and quote the value.
        */
        */
@@ -1227,7 +1099,7 @@ public class HttpServer implements FilterContainer {
         return HtmlQuoting.quoteHtmlChars(rawRequest.getParameter
         return HtmlQuoting.quoteHtmlChars(rawRequest.getParameter
                                      (HtmlQuoting.unquoteHtmlChars(name)));
                                      (HtmlQuoting.unquoteHtmlChars(name)));
       }
       }
-      
+
       @Override
       @Override
       public String[] getParameterValues(String name) {
       public String[] getParameterValues(String name) {
         String unquoteName = HtmlQuoting.unquoteHtmlChars(name);
         String unquoteName = HtmlQuoting.unquoteHtmlChars(name);
@@ -1257,7 +1129,7 @@ public class HttpServer implements FilterContainer {
         }
         }
         return result;
         return result;
       }
       }
-      
+
       /**
       /**
        * Quote the url so that users specifying the HOST HTTP header
        * Quote the url so that users specifying the HOST HTTP header
        * can't inject attacks.
        * can't inject attacks.
@@ -1267,7 +1139,7 @@ public class HttpServer implements FilterContainer {
         String url = rawRequest.getRequestURL().toString();
         String url = rawRequest.getRequestURL().toString();
         return new StringBuffer(HtmlQuoting.quoteHtmlChars(url));
         return new StringBuffer(HtmlQuoting.quoteHtmlChars(url));
       }
       }
-      
+
       /**
       /**
        * Quote the server name so that users specifying the HOST HTTP header
        * Quote the server name so that users specifying the HOST HTTP header
        * can't inject attacks.
        * can't inject attacks.
@@ -1288,11 +1160,11 @@ public class HttpServer implements FilterContainer {
     }
     }
 
 
     @Override
     @Override
-    public void doFilter(ServletRequest request, 
+    public void doFilter(ServletRequest request,
                          ServletResponse response,
                          ServletResponse response,
                          FilterChain chain
                          FilterChain chain
                          ) throws IOException, ServletException {
                          ) throws IOException, ServletException {
-      HttpServletRequestWrapper quoted = 
+      HttpServletRequestWrapper quoted =
         new RequestQuoter((HttpServletRequest) request);
         new RequestQuoter((HttpServletRequest) request);
       HttpServletResponse httpResponse = (HttpServletResponse) response;
       HttpServletResponse httpResponse = (HttpServletResponse) response;
 
 

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java

@@ -2206,7 +2206,7 @@ public abstract class Server {
     // Create the responder here
     // Create the responder here
     responder = new Responder();
     responder = new Responder();
     
     
-    if (secretManager != null) {
+    if (secretManager != null || UserGroupInformation.isSecurityEnabled()) {
       SaslRpcServer.init(conf);
       SaslRpcServer.init(conf);
     }
     }
     
     

+ 2 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/RpcMetrics.java

@@ -54,7 +54,8 @@ public class RpcMetrics {
     int[] intervals = conf.getInts(
     int[] intervals = conf.getInts(
         CommonConfigurationKeys.RPC_METRICS_PERCENTILES_INTERVALS_KEY);
         CommonConfigurationKeys.RPC_METRICS_PERCENTILES_INTERVALS_KEY);
     rpcQuantileEnable = (intervals.length > 0) && conf.getBoolean(
     rpcQuantileEnable = (intervals.length > 0) && conf.getBoolean(
-        CommonConfigurationKeys.RPC_METRICS_QUANTILE_ENABLE, false);
+        CommonConfigurationKeys.RPC_METRICS_QUANTILE_ENABLE,
+        CommonConfigurationKeys.RPC_METRICS_QUANTILE_ENABLE_DEFAULT);
     if (rpcQuantileEnable) {
     if (rpcQuantileEnable) {
       rpcQueueTimeMillisQuantiles =
       rpcQueueTimeMillisQuantiles =
           new MutableQuantiles[intervals.length];
           new MutableQuantiles[intervals.length];

+ 2 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/jmx/JMXJsonServlet.java

@@ -46,7 +46,7 @@ import javax.servlet.http.HttpServletResponse;
 
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.http.HttpServer;
+import org.apache.hadoop.http.HttpServer2;
 import org.codehaus.jackson.JsonFactory;
 import org.codehaus.jackson.JsonFactory;
 import org.codehaus.jackson.JsonGenerator;
 import org.codehaus.jackson.JsonGenerator;
 
 
@@ -154,7 +154,7 @@ public class JMXJsonServlet extends HttpServlet {
   @Override
   @Override
   public void doGet(HttpServletRequest request, HttpServletResponse response) {
   public void doGet(HttpServletRequest request, HttpServletResponse response) {
     try {
     try {
-      if (!HttpServer.isInstrumentationAccessAllowed(getServletContext(),
+      if (!HttpServer2.isInstrumentationAccessAllowed(getServletContext(),
                                                      request, response)) {
                                                      request, response)) {
         return;
         return;
       }
       }

+ 2 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java

@@ -28,7 +28,7 @@ import org.apache.commons.logging.*;
 import org.apache.commons.logging.impl.*;
 import org.apache.commons.logging.impl.*;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.http.HttpServer;
+import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.util.ServletUtil;
 import org.apache.hadoop.util.ServletUtil;
 
 
 /**
 /**
@@ -93,7 +93,7 @@ public class LogLevel {
         ) throws ServletException, IOException {
         ) throws ServletException, IOException {
 
 
       // Do the authorization
       // Do the authorization
-      if (!HttpServer.hasAdministratorAccess(getServletContext(), request,
+      if (!HttpServer2.hasAdministratorAccess(getServletContext(), request,
           response)) {
           response)) {
         return;
         return;
       }
       }

+ 2 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/MetricsServlet.java

@@ -32,7 +32,7 @@ import javax.servlet.http.HttpServletResponse;
 
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.http.HttpServer;
+import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.metrics.spi.OutputRecord;
 import org.apache.hadoop.metrics.spi.OutputRecord;
 import org.apache.hadoop.metrics.spi.AbstractMetricsContext.MetricMap;
 import org.apache.hadoop.metrics.spi.AbstractMetricsContext.MetricMap;
 import org.apache.hadoop.metrics.spi.AbstractMetricsContext.TagMap;
 import org.apache.hadoop.metrics.spi.AbstractMetricsContext.TagMap;
@@ -108,7 +108,7 @@ public class MetricsServlet extends HttpServlet {
   public void doGet(HttpServletRequest request, HttpServletResponse response)
   public void doGet(HttpServletRequest request, HttpServletResponse response)
       throws ServletException, IOException {
       throws ServletException, IOException {
 
 
-    if (!HttpServer.isInstrumentationAccessAllowed(getServletContext(),
+    if (!HttpServer2.isInstrumentationAccessAllowed(getServletContext(),
                                                    request, response)) {
                                                    request, response)) {
       return;
       return;
     }
     }

+ 2 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationFilterInitializer.java

@@ -17,7 +17,7 @@
  */
  */
 package org.apache.hadoop.security;
 package org.apache.hadoop.security;
 
 
-import org.apache.hadoop.http.HttpServer;
+import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
 import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.http.FilterContainer;
 import org.apache.hadoop.http.FilterContainer;
@@ -94,7 +94,7 @@ public class AuthenticationFilterInitializer extends FilterInitializer {
     }
     }
 
 
     //Resolve _HOST into bind address
     //Resolve _HOST into bind address
-    String bindAddress = conf.get(HttpServer.BIND_ADDRESS);
+    String bindAddress = conf.get(HttpServer2.BIND_ADDRESS);
     String principal = filterConfig.get(KerberosAuthenticationHandler.PRINCIPAL);
     String principal = filterConfig.get(KerberosAuthenticationHandler.PRINCIPAL);
     if (principal != null) {
     if (principal != null) {
       try {
       try {

+ 22 - 0
hadoop-common-project/hadoop-common/src/main/resources/core-default.xml

@@ -1275,4 +1275,26 @@
     Default, "dr.who=;" will consider "dr.who" as user without groups.
     Default, "dr.who=;" will consider "dr.who" as user without groups.
   </description>
   </description>
 </property>
 </property>
+
+<property>
+  <name>rpc.metrics.quantile.enable</name>
+  <value>false</value>
+  <description>
+    Setting this property to true and rpc.metrics.percentiles.intervals
+    to a comma-separated list of the granularity in seconds, the
+    50/75/90/95/99th percentile latency for rpc queue/processing time in
+    milliseconds are added to rpc metrics.
+  </description>
+</property>
+
+<property>
+  <name>rpc.metrics.percentiles.intervals</name>
+  <value></value>
+  <description>
+    A comma-separated list of the granularity in seconds for the metrics which
+    describe the 50/75/90/95/99th percentile latency for rpc queue/processing
+    time. The metrics are outputted if rpc.metrics.quantile.enable is set to
+    true.
+  </description>
+</property>
 </configuration>
 </configuration>

+ 236 - 143
hadoop-common-project/hadoop-common/src/site/apt/SingleCluster.apt.vm

@@ -20,174 +20,267 @@ Hadoop MapReduce Next Generation - Setting up a Single Node Cluster.
 
 
 %{toc|section=1|fromDepth=0}
 %{toc|section=1|fromDepth=0}
 
 
-* Mapreduce Tarball
+* Purpose
 
 
-  You should be able to obtain the MapReduce tarball from the release.
-  If not, you should be able to create a tarball from the source.
+  This document describes how to set up and configure a single-node Hadoop
+  installation so that you can quickly perform simple operations using Hadoop
+  MapReduce and the Hadoop Distributed File System (HDFS).
 
 
-+---+
-$ mvn clean install -DskipTests
-$ cd hadoop-mapreduce-project
-$ mvn clean install assembly:assembly -Pnative
-+---+
-  <<NOTE:>> You will need {{{http://code.google.com/p/protobuf}protoc 2.5.0}}
-            installed.
+* Prerequisites
 
 
-  To ignore the native builds in mapreduce you can omit the <<<-Pnative>>> argument
-  for maven. The tarball should be available in <<<target/>>> directory. 
+** Supported Platforms
 
 
-  
-* Setting up the environment.
+   * GNU/Linux is supported as a development and production platform.
+     Hadoop has been demonstrated on GNU/Linux clusters with 2000 nodes.
 
 
-  Assuming you have installed hadoop-common/hadoop-hdfs and exported
-  <<$HADOOP_COMMON_HOME>>/<<$HADOOP_HDFS_HOME>>, untar hadoop mapreduce 
-  tarball and set environment variable <<$HADOOP_MAPRED_HOME>> to the 
-  untarred directory. Set <<$HADOOP_YARN_HOME>> the same as <<$HADOOP_MAPRED_HOME>>. 
- 
-  <<NOTE:>> The following instructions assume you have hdfs running.
+   * Windows is also a supported platform but the followings steps
+     are for Linux only. To set up Hadoop on Windows, see
+     {{{http://wiki.apache.org/hadoop/Hadoop2OnWindows}wiki page}}.
 
 
-* Setting up Configuration.
+** Required Software
 
 
-  To start the ResourceManager and NodeManager, you will have to update the configs.
-  Assuming your $HADOOP_CONF_DIR is the configuration directory and has the installed
-  configs for HDFS and <<<core-site.xml>>>. There are 2 config files you will have to setup
-  <<<mapred-site.xml>>> and <<<yarn-site.xml>>>.
+   Required software for Linux include:
 
 
-** Setting up <<<mapred-site.xml>>>
+   [[1]] Java\u2122 must be installed. Recommended Java versions are described
+         at {{{http://wiki.apache.org/hadoop/HadoopJavaVersions}
+         HadoopJavaVersions}}.
 
 
-  Add the following configs to your <<<mapred-site.xml>>>.
+   [[2]] ssh must be installed and sshd must be running to use the Hadoop
+         scripts that manage remote Hadoop daemons.
 
 
-+---+
-  <property>
-    <name>mapreduce.cluster.temp.dir</name>
-    <value></value>
-    <description>No description</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>mapreduce.cluster.local.dir</name>
-    <value></value>
-    <description>No description</description>
-    <final>true</final>
-  </property>
-+---+
+** Installing Software
 
 
-** Setting up <<<yarn-site.xml>>>
+  If your cluster doesn't have the requisite software you will need to install
+  it.
 
 
-Add the following configs to your <<<yarn-site.xml>>>
+  For example on Ubuntu Linux:
 
 
-+---+
-  <property>
-    <name>yarn.resourcemanager.resource-tracker.address</name>
-    <value>host:port</value>
-    <description>host is the hostname of the resource manager and 
-    port is the port on which the NodeManagers contact the Resource Manager.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.resourcemanager.scheduler.address</name>
-    <value>host:port</value>
-    <description>host is the hostname of the resourcemanager and port is the port
-    on which the Applications in the cluster talk to the Resource Manager.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.resourcemanager.scheduler.class</name>
-    <value>org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler</value>
-    <description>In case you do not want to use the default scheduler</description>
-  </property>
-
-  <property>
-    <name>yarn.resourcemanager.address</name>
-    <value>host:port</value>
-    <description>the host is the hostname of the ResourceManager and the port is the port on
-    which the clients can talk to the Resource Manager. </description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.local-dirs</name>
-    <value></value>
-    <description>the local directories used by the nodemanager</description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.address</name>
-    <value>0.0.0.0:port</value>
-    <description>the nodemanagers bind to this port</description>
-  </property>  
-
-  <property>
-    <name>yarn.nodemanager.resource.memory-mb</name>
-    <value>10240</value>
-    <description>the amount of memory on the NodeManager in GB</description>
-  </property>
- 
-  <property>
-    <name>yarn.nodemanager.remote-app-log-dir</name>
-    <value>/app-logs</value>
-    <description>directory on hdfs where the application logs are moved to </description>
-  </property>
-
-   <property>
-    <name>yarn.nodemanager.log-dirs</name>
-    <value></value>
-    <description>the directories used by Nodemanagers as log directories</description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.aux-services</name>
-    <value>mapreduce_shuffle</value>
-    <description>shuffle service that needs to be set for Map Reduce to run </description>
-  </property>
-+---+
+----
+  $ sudo apt-get install ssh
+  $ sudo apt-get install rsync
+----
+
+* Download
+
+  To get a Hadoop distribution, download a recent stable release from one of
+  the {{{http://www.apache.org/dyn/closer.cgi/hadoop/common/}
+  Apache Download Mirrors}}.
+
+* Prepare to Start the Hadoop Cluster
+
+  Unpack the downloaded Hadoop distribution. In the distribution, edit
+  the file <<<etc/hadoop/hadoop-env.sh>>> to define some parameters as
+  follows:
+
+----
+  # set to the root of your Java installation
+  export JAVA_HOME=/usr/java/latest
+
+  # Assuming your installation directory is /usr/local/hadoop
+  export HADOOP_PREFIX=/usr/local/hadoop
+----
+
+  Try the following command:
+
+----
+  $ bin/hadoop
+----
+
+  This will display the usage documentation for the hadoop script.
+
+  Now you are ready to start your Hadoop cluster in one of the three supported
+  modes:
+
+   * {{{Standalone Operation}Local (Standalone) Mode}}
+
+   * {{{Pseudo-Distributed Operation}Pseudo-Distributed Mode}}
 
 
-* Setting up <<<capacity-scheduler.xml>>>
+   * {{{Fully-Distributed Operation}Fully-Distributed Mode}}
 
 
-   Make sure you populate the root queues in <<<capacity-scheduler.xml>>>.
+* Standalone Operation
+
+  By default, Hadoop is configured to run in a non-distributed mode, as a
+  single Java process. This is useful for debugging.
+
+  The following example copies the unpacked conf directory to use as input
+  and then finds and displays every match of the given regular expression.
+  Output is written to the given output directory.
+
+----
+  $ mkdir input
+  $ cp etc/hadoop/*.xml input
+  $ bin/hadoop jar share/hadoop/mapreduce/hadoop-mapreduce-examples-${project.version}.jar grep input output 'dfs[a-z.]+'
+  $ cat output/*
+----
+
+* Pseudo-Distributed Operation
+
+  Hadoop can also be run on a single-node in a pseudo-distributed mode where
+  each Hadoop daemon runs in a separate Java process.
+
+** Configuration
+
+  Use the following:
+
+  etc/hadoop/core-site.xml:
 
 
 +---+
 +---+
-  <property>
-    <name>yarn.scheduler.capacity.root.queues</name>
-    <value>unfunded,default</value>
-  </property>
-  
-  <property>
-    <name>yarn.scheduler.capacity.root.capacity</name>
-    <value>100</value>
-  </property>
-  
-  <property>
-    <name>yarn.scheduler.capacity.root.unfunded.capacity</name>
-    <value>50</value>
-  </property>
+<configuration>
+    <property>
+        <name>fs.defaultFS</name>
+        <value>hdfs://localhost:9000</value>
+    </property>
+</configuration>
++---+
+
+  etc/hadoop/hdfs-site.xml:
   
   
-  <property>
-    <name>yarn.scheduler.capacity.root.default.capacity</name>
-    <value>50</value>
-  </property>
++---+
+<configuration>
+    <property>
+        <name>dfs.replication</name>
+        <value>1</value>
+    </property>
+</configuration>
 +---+
 +---+
 
 
-* Running daemons.
+** Setup passphraseless ssh
+
+  Now check that you can ssh to the localhost without a passphrase:
+
+----
+  $ ssh localhost
+----
+
+  If you cannot ssh to localhost without a passphrase, execute the
+  following commands:
+
+----
+  $ ssh-keygen -t dsa -P '' -f ~/.ssh/id_dsa
+  $ cat ~/.ssh/id_dsa.pub >> ~/.ssh/authorized_keys
+----
+
+** Execution
+
+  The following instructions are to run a MapReduce job locally.
+  If you want to execute a job on YARN, see {{YARN on Single Node}}.
+
+  [[1]] Format the filesystem:
+
+----
+  $ bin/hdfs namenode -format
+----
+
+  [[2]] Start NameNode daemon and DataNode daemon:
+
+----
+  $ sbin/start-dfs.sh
+----
+
+        The hadoop daemon log output is written to the <<<${HADOOP_LOG_DIR}>>>
+        directory (defaults to <<<${HADOOP_HOME}/logs>>>).
+
+  [[3]] Browse the web interface for the NameNode; by default it is
+        available at:
+
+        * NameNode - <<<http://localhost:50070/>>>
+
+  [[4]] Make the HDFS directories required to execute MapReduce jobs:
+
+----
+  $ bin/hdfs dfs -mkdir /user
+  $ bin/hdfs dfs -mkdir /user/<username>
+----
+
+  [[5]] Copy the input files into the distributed filesystem:
+
+----
+  $ bin/hdfs dfs -put etc/hadoop input
+----
+
+  [[6]] Run some of the examples provided:
+
+----
+  $ bin/hadoop jar share/hadoop/mapreduce/hadoop-mapreduce-examples-${project.version}.jar grep input output 'dfs[a-z.]+'
+----
+
+  [[7]] Examine the output files:
+
+        Copy the output files from the distributed filesystem to the local
+        filesystem and examine them:
+
+----
+  $ bin/hdfs dfs -get output output
+  $ cat output/*
+----
+
+        or
+
+        View the output files on the distributed filesystem:
+
+----
+  $ bin/hdfs dfs -cat output/*
+----
+
+  [[8]] When you're done, stop the daemons with:
+
+----
+  $ sbin/stop-dfs.sh
+----
+
+** YARN on Single Node
+
+  You can run a MapReduce job on YARN in a pseudo-distributed mode by setting
+  a few parameters and running ResourceManager daemon and NodeManager daemon
+  in addition.
+
+  The following instructions assume that 1. ~ 4. steps of
+  {{{Execution}the above instructions}} are already executed.
+
+  [[1]] Configure parameters as follows:
+
+        etc/hadoop/mapred-site.xml:
 
 
-  Assuming that the environment variables <<$HADOOP_COMMON_HOME>>, <<$HADOOP_HDFS_HOME>>, <<$HADOO_MAPRED_HOME>>,
-  <<$HADOOP_YARN_HOME>>, <<$JAVA_HOME>> and <<$HADOOP_CONF_DIR>> have been set appropriately.
-  Set $<<$YARN_CONF_DIR>> the same as $<<HADOOP_CONF_DIR>>
- 
-  Run ResourceManager and NodeManager as:
-  
 +---+
 +---+
-$ cd $HADOOP_MAPRED_HOME
-$ sbin/yarn-daemon.sh start resourcemanager
-$ sbin/yarn-daemon.sh start nodemanager
+<configuration>
+    <property>
+        <name>mapreduce.framework.name</name>
+        <value>yarn</value>
+    </property>
+</configuration>
 +---+
 +---+
 
 
-  You should be up and running. You can run randomwriter as:
+        etc/hadoop/yarn-site.xml:
 
 
 +---+
 +---+
-$ $HADOOP_COMMON_HOME/bin/hadoop jar hadoop-examples.jar randomwriter out
+<configuration>
+    <property>
+        <name>yarn.nodemanager.aux-services</name>
+        <value>mapreduce_shuffle</value>
+    </property>
+</configuration>
 +---+
 +---+
 
 
-Good luck.
+  [[2]] Start ResourceManager daemon and NodeManager daemon:
+
+----
+  $ sbin/start-yarn.sh
+----
+
+  [[3]] Browse the web interface for the ResourceManager; by default it is
+        available at:
+
+        * ResourceManager - <<<http://localhost:8088/>>>
+
+  [[4]] Run a MapReduce job.
+
+  [[5]] When you're done, stop the daemons with:
+
+----
+  $ sbin/stop-yarn.sh
+----
+
+* Fully-Distributed Operation
+
+  For information on setting up fully-distributed, non-trivial clusters
+  see {{{./ClusterSetup.html}Cluster Setup}}.

+ 17 - 17
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/HttpServerFunctionalTest.java

@@ -23,7 +23,7 @@ import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.authorize.AccessControlList;
 import org.apache.hadoop.security.authorize.AccessControlList;
 import org.junit.Assert;
 import org.junit.Assert;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.http.HttpServer.Builder;
+import org.apache.hadoop.http.HttpServer2.Builder;
 
 
 import java.io.File;
 import java.io.File;
 import java.io.IOException;
 import java.io.IOException;
@@ -33,7 +33,7 @@ import java.net.URL;
 import java.net.MalformedURLException;
 import java.net.MalformedURLException;
 
 
 /**
 /**
- * This is a base class for functional tests of the {@link HttpServer}.
+ * This is a base class for functional tests of the {@link HttpServer2}.
  * The methods are static for other classes to import statically.
  * The methods are static for other classes to import statically.
  */
  */
 public class HttpServerFunctionalTest extends Assert {
 public class HttpServerFunctionalTest extends Assert {
@@ -54,7 +54,7 @@ public class HttpServerFunctionalTest extends Assert {
    * @throws IOException if a problem occurs
    * @throws IOException if a problem occurs
    * @throws AssertionError if a condition was not met
    * @throws AssertionError if a condition was not met
    */
    */
-  public static HttpServer createTestServer() throws IOException {
+  public static HttpServer2 createTestServer() throws IOException {
     prepareTestWebapp();
     prepareTestWebapp();
     return createServer(TEST);
     return createServer(TEST);
   }
   }
@@ -68,13 +68,13 @@ public class HttpServerFunctionalTest extends Assert {
    * @throws IOException if a problem occurs
    * @throws IOException if a problem occurs
    * @throws AssertionError if a condition was not met
    * @throws AssertionError if a condition was not met
    */
    */
-  public static HttpServer createTestServer(Configuration conf)
+  public static HttpServer2 createTestServer(Configuration conf)
       throws IOException {
       throws IOException {
     prepareTestWebapp();
     prepareTestWebapp();
     return createServer(TEST, conf);
     return createServer(TEST, conf);
   }
   }
 
 
-  public static HttpServer createTestServer(Configuration conf, AccessControlList adminsAcl)
+  public static HttpServer2 createTestServer(Configuration conf, AccessControlList adminsAcl)
       throws IOException {
       throws IOException {
     prepareTestWebapp();
     prepareTestWebapp();
     return createServer(TEST, conf, adminsAcl);
     return createServer(TEST, conf, adminsAcl);
@@ -89,7 +89,7 @@ public class HttpServerFunctionalTest extends Assert {
    * @throws IOException if a problem occurs
    * @throws IOException if a problem occurs
    * @throws AssertionError if a condition was not met
    * @throws AssertionError if a condition was not met
    */
    */
-  public static HttpServer createTestServer(Configuration conf, 
+  public static HttpServer2 createTestServer(Configuration conf,
       String[] pathSpecs) throws IOException {
       String[] pathSpecs) throws IOException {
     prepareTestWebapp();
     prepareTestWebapp();
     return createServer(TEST, conf, pathSpecs);
     return createServer(TEST, conf, pathSpecs);
@@ -120,10 +120,10 @@ public class HttpServerFunctionalTest extends Assert {
    * @return the server
    * @return the server
    * @throws IOException if it could not be created
    * @throws IOException if it could not be created
    */
    */
-  public static HttpServer createServer(String host, int port)
+  public static HttpServer2 createServer(String host, int port)
       throws IOException {
       throws IOException {
     prepareTestWebapp();
     prepareTestWebapp();
-    return new HttpServer.Builder().setName(TEST)
+    return new HttpServer2.Builder().setName(TEST)
         .addEndpoint(URI.create("http://" + host + ":" + port))
         .addEndpoint(URI.create("http://" + host + ":" + port))
         .setFindPort(true).build();
         .setFindPort(true).build();
   }
   }
@@ -134,7 +134,7 @@ public class HttpServerFunctionalTest extends Assert {
    * @return the server
    * @return the server
    * @throws IOException if it could not be created
    * @throws IOException if it could not be created
    */
    */
-  public static HttpServer createServer(String webapp) throws IOException {
+  public static HttpServer2 createServer(String webapp) throws IOException {
     return localServerBuilder(webapp).setFindPort(true).build();
     return localServerBuilder(webapp).setFindPort(true).build();
   }
   }
   /**
   /**
@@ -144,18 +144,18 @@ public class HttpServerFunctionalTest extends Assert {
    * @return the server
    * @return the server
    * @throws IOException if it could not be created
    * @throws IOException if it could not be created
    */
    */
-  public static HttpServer createServer(String webapp, Configuration conf)
+  public static HttpServer2 createServer(String webapp, Configuration conf)
       throws IOException {
       throws IOException {
     return localServerBuilder(webapp).setFindPort(true).setConf(conf).build();
     return localServerBuilder(webapp).setFindPort(true).setConf(conf).build();
   }
   }
 
 
-  public static HttpServer createServer(String webapp, Configuration conf, AccessControlList adminsAcl)
+  public static HttpServer2 createServer(String webapp, Configuration conf, AccessControlList adminsAcl)
       throws IOException {
       throws IOException {
     return localServerBuilder(webapp).setFindPort(true).setConf(conf).setACL(adminsAcl).build();
     return localServerBuilder(webapp).setFindPort(true).setConf(conf).setACL(adminsAcl).build();
   }
   }
 
 
   private static Builder localServerBuilder(String webapp) {
   private static Builder localServerBuilder(String webapp) {
-    return new HttpServer.Builder().setName(webapp).addEndpoint(
+    return new HttpServer2.Builder().setName(webapp).addEndpoint(
         URI.create("http://localhost:0"));
         URI.create("http://localhost:0"));
   }
   }
   
   
@@ -167,7 +167,7 @@ public class HttpServerFunctionalTest extends Assert {
    * @return the server
    * @return the server
    * @throws IOException if it could not be created
    * @throws IOException if it could not be created
    */
    */
-  public static HttpServer createServer(String webapp, Configuration conf,
+  public static HttpServer2 createServer(String webapp, Configuration conf,
       String[] pathSpecs) throws IOException {
       String[] pathSpecs) throws IOException {
     return localServerBuilder(webapp).setFindPort(true).setConf(conf).setPathSpec(pathSpecs).build();
     return localServerBuilder(webapp).setFindPort(true).setConf(conf).setPathSpec(pathSpecs).build();
   }
   }
@@ -180,8 +180,8 @@ public class HttpServerFunctionalTest extends Assert {
    * @throws IOException on any failure
    * @throws IOException on any failure
    * @throws AssertionError if a condition was not met
    * @throws AssertionError if a condition was not met
    */
    */
-  public static HttpServer createAndStartTestServer() throws IOException {
-    HttpServer server = createTestServer();
+  public static HttpServer2 createAndStartTestServer() throws IOException {
+    HttpServer2 server = createTestServer();
     server.start();
     server.start();
     return server;
     return server;
   }
   }
@@ -191,7 +191,7 @@ public class HttpServerFunctionalTest extends Assert {
    * @param server to stop
    * @param server to stop
    * @throws Exception on any failure
    * @throws Exception on any failure
    */
    */
-  public static void stop(HttpServer server) throws Exception {
+  public static void stop(HttpServer2 server) throws Exception {
     if (server != null) {
     if (server != null) {
       server.stop();
       server.stop();
     }
     }
@@ -203,7 +203,7 @@ public class HttpServerFunctionalTest extends Assert {
    * @return a URL bonded to the base of the server
    * @return a URL bonded to the base of the server
    * @throws MalformedURLException if the URL cannot be created.
    * @throws MalformedURLException if the URL cannot be created.
    */
    */
-  public static URL getServerURL(HttpServer server)
+  public static URL getServerURL(HttpServer2 server)
       throws MalformedURLException {
       throws MalformedURLException {
     assertNotNull("No server", server);
     assertNotNull("No server", server);
     return new URL("http://"
     return new URL("http://"

+ 3 - 3
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestGlobalFilter.java

@@ -40,7 +40,7 @@ import org.apache.hadoop.net.NetUtils;
 import org.junit.Test;
 import org.junit.Test;
 
 
 public class TestGlobalFilter extends HttpServerFunctionalTest {
 public class TestGlobalFilter extends HttpServerFunctionalTest {
-  static final Log LOG = LogFactory.getLog(HttpServer.class);
+  static final Log LOG = LogFactory.getLog(HttpServer2.class);
   static final Set<String> RECORDS = new TreeSet<String>(); 
   static final Set<String> RECORDS = new TreeSet<String>(); 
 
 
   /** A very simple filter that records accessed uri's */
   /** A very simple filter that records accessed uri's */
@@ -106,9 +106,9 @@ public class TestGlobalFilter extends HttpServerFunctionalTest {
     Configuration conf = new Configuration();
     Configuration conf = new Configuration();
     
     
     //start a http server with CountingFilter
     //start a http server with CountingFilter
-    conf.set(HttpServer.FILTER_INITIALIZER_PROPERTY,
+    conf.set(HttpServer2.FILTER_INITIALIZER_PROPERTY,
         RecordingFilter.Initializer.class.getName());
         RecordingFilter.Initializer.class.getName());
-    HttpServer http = createTestServer(conf);
+    HttpServer2 http = createTestServer(conf);
     http.start();
     http.start();
 
 
     final String fsckURL = "/fsck";
     final String fsckURL = "/fsck";

+ 2 - 2
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHtmlQuoting.java

@@ -68,8 +68,8 @@ public class TestHtmlQuoting {
   @Test
   @Test
   public void testRequestQuoting() throws Exception {
   public void testRequestQuoting() throws Exception {
     HttpServletRequest mockReq = Mockito.mock(HttpServletRequest.class);
     HttpServletRequest mockReq = Mockito.mock(HttpServletRequest.class);
-    HttpServer.QuotingInputFilter.RequestQuoter quoter =
-      new HttpServer.QuotingInputFilter.RequestQuoter(mockReq);
+    HttpServer2.QuotingInputFilter.RequestQuoter quoter =
+      new HttpServer2.QuotingInputFilter.RequestQuoter(mockReq);
     
     
     Mockito.doReturn("a<b").when(mockReq).getParameter("x");
     Mockito.doReturn("a<b").when(mockReq).getParameter("x");
     assertEquals("Test simple param quoting",
     assertEquals("Test simple param quoting",

+ 27 - 38
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java

@@ -51,7 +51,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
-import org.apache.hadoop.http.HttpServer.QuotingInputFilter.RequestQuoter;
+import org.apache.hadoop.http.HttpServer2.QuotingInputFilter.RequestQuoter;
 import org.apache.hadoop.http.resource.JerseyResource;
 import org.apache.hadoop.http.resource.JerseyResource;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.Groups;
 import org.apache.hadoop.security.Groups;
@@ -70,7 +70,7 @@ import static org.mockito.Mockito.*;
 
 
 public class TestHttpServer extends HttpServerFunctionalTest {
 public class TestHttpServer extends HttpServerFunctionalTest {
   static final Log LOG = LogFactory.getLog(TestHttpServer.class);
   static final Log LOG = LogFactory.getLog(TestHttpServer.class);
-  private static HttpServer server;
+  private static HttpServer2 server;
   private static URL baseUrl;
   private static URL baseUrl;
   private static final int MAX_THREADS = 10;
   private static final int MAX_THREADS = 10;
   
   
@@ -150,7 +150,7 @@ public class TestHttpServer extends HttpServerFunctionalTest {
 
 
   @BeforeClass public static void setup() throws Exception {
   @BeforeClass public static void setup() throws Exception {
     Configuration conf = new Configuration();
     Configuration conf = new Configuration();
-    conf.setInt(HttpServer.HTTP_MAX_THREADS, 10);
+    conf.setInt(HttpServer2.HTTP_MAX_THREADS, 10);
     server = createTestServer(conf);
     server = createTestServer(conf);
     server.addServlet("echo", "/echo", EchoServlet.class);
     server.addServlet("echo", "/echo", EchoServlet.class);
     server.addServlet("echomap", "/echomap", EchoMapServlet.class);
     server.addServlet("echomap", "/echomap", EchoMapServlet.class);
@@ -357,7 +357,7 @@ public class TestHttpServer extends HttpServerFunctionalTest {
     Configuration conf = new Configuration();
     Configuration conf = new Configuration();
 
 
     // Authorization is disabled by default
     // Authorization is disabled by default
-    conf.set(HttpServer.FILTER_INITIALIZER_PROPERTY,
+    conf.set(HttpServer2.FILTER_INITIALIZER_PROPERTY,
         DummyFilterInitializer.class.getName());
         DummyFilterInitializer.class.getName());
     conf.set(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,
     conf.set(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,
         MyGroupsProvider.class.getName());
         MyGroupsProvider.class.getName());
@@ -366,9 +366,9 @@ public class TestHttpServer extends HttpServerFunctionalTest {
     MyGroupsProvider.mapping.put("userA", Arrays.asList("groupA"));
     MyGroupsProvider.mapping.put("userA", Arrays.asList("groupA"));
     MyGroupsProvider.mapping.put("userB", Arrays.asList("groupB"));
     MyGroupsProvider.mapping.put("userB", Arrays.asList("groupB"));
 
 
-    HttpServer myServer = new HttpServer.Builder().setName("test")
+    HttpServer2 myServer = new HttpServer2.Builder().setName("test")
         .addEndpoint(new URI("http://localhost:0")).setFindPort(true).build();
         .addEndpoint(new URI("http://localhost:0")).setFindPort(true).build();
-    myServer.setAttribute(HttpServer.CONF_CONTEXT_ATTRIBUTE, conf);
+    myServer.setAttribute(HttpServer2.CONF_CONTEXT_ATTRIBUTE, conf);
     myServer.start();
     myServer.start();
     String serverURL = "http://" + NetUtils.getHostPortString(myServer.getConnectorAddress(0)) + "/";
     String serverURL = "http://" + NetUtils.getHostPortString(myServer.getConnectorAddress(0)) + "/";
     for (String servlet : new String[] { "conf", "logs", "stacks",
     for (String servlet : new String[] { "conf", "logs", "stacks",
@@ -394,7 +394,7 @@ public class TestHttpServer extends HttpServerFunctionalTest {
         true);
         true);
     conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_INSTRUMENTATION_REQUIRES_ADMIN,
     conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_INSTRUMENTATION_REQUIRES_ADMIN,
         true);
         true);
-    conf.set(HttpServer.FILTER_INITIALIZER_PROPERTY,
+    conf.set(HttpServer2.FILTER_INITIALIZER_PROPERTY,
         DummyFilterInitializer.class.getName());
         DummyFilterInitializer.class.getName());
 
 
     conf.set(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,
     conf.set(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,
@@ -407,10 +407,10 @@ public class TestHttpServer extends HttpServerFunctionalTest {
     MyGroupsProvider.mapping.put("userD", Arrays.asList("groupD"));
     MyGroupsProvider.mapping.put("userD", Arrays.asList("groupD"));
     MyGroupsProvider.mapping.put("userE", Arrays.asList("groupE"));
     MyGroupsProvider.mapping.put("userE", Arrays.asList("groupE"));
 
 
-    HttpServer myServer = new HttpServer.Builder().setName("test")
+    HttpServer2 myServer = new HttpServer2.Builder().setName("test")
         .addEndpoint(new URI("http://localhost:0")).setFindPort(true).setConf(conf)
         .addEndpoint(new URI("http://localhost:0")).setFindPort(true).setConf(conf)
         .setACL(new AccessControlList("userA,userB groupC,groupD")).build();
         .setACL(new AccessControlList("userA,userB groupC,groupD")).build();
-    myServer.setAttribute(HttpServer.CONF_CONTEXT_ATTRIBUTE, conf);
+    myServer.setAttribute(HttpServer2.CONF_CONTEXT_ATTRIBUTE, conf);
     myServer.start();
     myServer.start();
 
 
     String serverURL = "http://"
     String serverURL = "http://"
@@ -468,39 +468,39 @@ public class TestHttpServer extends HttpServerFunctionalTest {
     Configuration conf = new Configuration();
     Configuration conf = new Configuration();
     conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false);
     conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false);
     ServletContext context = Mockito.mock(ServletContext.class);
     ServletContext context = Mockito.mock(ServletContext.class);
-    Mockito.when(context.getAttribute(HttpServer.CONF_CONTEXT_ATTRIBUTE)).thenReturn(conf);
-    Mockito.when(context.getAttribute(HttpServer.ADMINS_ACL)).thenReturn(null);
+    Mockito.when(context.getAttribute(HttpServer2.CONF_CONTEXT_ATTRIBUTE)).thenReturn(conf);
+    Mockito.when(context.getAttribute(HttpServer2.ADMINS_ACL)).thenReturn(null);
     HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
     HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
     Mockito.when(request.getRemoteUser()).thenReturn(null);
     Mockito.when(request.getRemoteUser()).thenReturn(null);
     HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
     HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
 
 
     //authorization OFF
     //authorization OFF
-    Assert.assertTrue(HttpServer.hasAdministratorAccess(context, request, response));
+    Assert.assertTrue(HttpServer2.hasAdministratorAccess(context, request, response));
 
 
     //authorization ON & user NULL
     //authorization ON & user NULL
     response = Mockito.mock(HttpServletResponse.class);
     response = Mockito.mock(HttpServletResponse.class);
     conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, true);
     conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, true);
-    Assert.assertFalse(HttpServer.hasAdministratorAccess(context, request, response));
+    Assert.assertFalse(HttpServer2.hasAdministratorAccess(context, request, response));
     Mockito.verify(response).sendError(Mockito.eq(HttpServletResponse.SC_UNAUTHORIZED), Mockito.anyString());
     Mockito.verify(response).sendError(Mockito.eq(HttpServletResponse.SC_UNAUTHORIZED), Mockito.anyString());
 
 
     //authorization ON & user NOT NULL & ACLs NULL
     //authorization ON & user NOT NULL & ACLs NULL
     response = Mockito.mock(HttpServletResponse.class);
     response = Mockito.mock(HttpServletResponse.class);
     Mockito.when(request.getRemoteUser()).thenReturn("foo");
     Mockito.when(request.getRemoteUser()).thenReturn("foo");
-    Assert.assertTrue(HttpServer.hasAdministratorAccess(context, request, response));
+    Assert.assertTrue(HttpServer2.hasAdministratorAccess(context, request, response));
 
 
     //authorization ON & user NOT NULL & ACLs NOT NULL & user not in ACLs
     //authorization ON & user NOT NULL & ACLs NOT NULL & user not in ACLs
     response = Mockito.mock(HttpServletResponse.class);
     response = Mockito.mock(HttpServletResponse.class);
     AccessControlList acls = Mockito.mock(AccessControlList.class);
     AccessControlList acls = Mockito.mock(AccessControlList.class);
     Mockito.when(acls.isUserAllowed(Mockito.<UserGroupInformation>any())).thenReturn(false);
     Mockito.when(acls.isUserAllowed(Mockito.<UserGroupInformation>any())).thenReturn(false);
-    Mockito.when(context.getAttribute(HttpServer.ADMINS_ACL)).thenReturn(acls);
-    Assert.assertFalse(HttpServer.hasAdministratorAccess(context, request, response));
+    Mockito.when(context.getAttribute(HttpServer2.ADMINS_ACL)).thenReturn(acls);
+    Assert.assertFalse(HttpServer2.hasAdministratorAccess(context, request, response));
     Mockito.verify(response).sendError(Mockito.eq(HttpServletResponse.SC_UNAUTHORIZED), Mockito.anyString());
     Mockito.verify(response).sendError(Mockito.eq(HttpServletResponse.SC_UNAUTHORIZED), Mockito.anyString());
 
 
     //authorization ON & user NOT NULL & ACLs NOT NULL & user in in ACLs
     //authorization ON & user NOT NULL & ACLs NOT NULL & user in in ACLs
     response = Mockito.mock(HttpServletResponse.class);
     response = Mockito.mock(HttpServletResponse.class);
     Mockito.when(acls.isUserAllowed(Mockito.<UserGroupInformation>any())).thenReturn(true);
     Mockito.when(acls.isUserAllowed(Mockito.<UserGroupInformation>any())).thenReturn(true);
-    Mockito.when(context.getAttribute(HttpServer.ADMINS_ACL)).thenReturn(acls);
-    Assert.assertTrue(HttpServer.hasAdministratorAccess(context, request, response));
+    Mockito.when(context.getAttribute(HttpServer2.ADMINS_ACL)).thenReturn(acls);
+    Assert.assertTrue(HttpServer2.hasAdministratorAccess(context, request, response));
 
 
   }
   }
 
 
@@ -508,38 +508,27 @@ public class TestHttpServer extends HttpServerFunctionalTest {
   public void testRequiresAuthorizationAccess() throws Exception {
   public void testRequiresAuthorizationAccess() throws Exception {
     Configuration conf = new Configuration();
     Configuration conf = new Configuration();
     ServletContext context = Mockito.mock(ServletContext.class);
     ServletContext context = Mockito.mock(ServletContext.class);
-    Mockito.when(context.getAttribute(HttpServer.CONF_CONTEXT_ATTRIBUTE)).thenReturn(conf);
+    Mockito.when(context.getAttribute(HttpServer2.CONF_CONTEXT_ATTRIBUTE)).thenReturn(conf);
     HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
     HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
     HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
     HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
 
 
     //requires admin access to instrumentation, FALSE by default
     //requires admin access to instrumentation, FALSE by default
-    Assert.assertTrue(HttpServer.isInstrumentationAccessAllowed(context, request, response));
+    Assert.assertTrue(HttpServer2.isInstrumentationAccessAllowed(context, request, response));
 
 
     //requires admin access to instrumentation, TRUE
     //requires admin access to instrumentation, TRUE
     conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_INSTRUMENTATION_REQUIRES_ADMIN, true);
     conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_INSTRUMENTATION_REQUIRES_ADMIN, true);
     conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, true);
     conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, true);
     AccessControlList acls = Mockito.mock(AccessControlList.class);
     AccessControlList acls = Mockito.mock(AccessControlList.class);
     Mockito.when(acls.isUserAllowed(Mockito.<UserGroupInformation>any())).thenReturn(false);
     Mockito.when(acls.isUserAllowed(Mockito.<UserGroupInformation>any())).thenReturn(false);
-    Mockito.when(context.getAttribute(HttpServer.ADMINS_ACL)).thenReturn(acls);
-    Assert.assertFalse(HttpServer.isInstrumentationAccessAllowed(context, request, response));
-  }
-
-  @Test
-  @SuppressWarnings("deprecation")
-  public void testOldConstructor() throws Exception {
-    HttpServer server = new HttpServer("test", "0.0.0.0", 0, false);
-    try {
-      server.start();
-    } finally {
-      server.stop();
-    }
+    Mockito.when(context.getAttribute(HttpServer2.ADMINS_ACL)).thenReturn(acls);
+    Assert.assertFalse(HttpServer2.isInstrumentationAccessAllowed(context, request, response));
   }
   }
 
 
   @Test public void testBindAddress() throws Exception {
   @Test public void testBindAddress() throws Exception {
     checkBindAddress("localhost", 0, false).stop();
     checkBindAddress("localhost", 0, false).stop();
     // hang onto this one for a bit more testing
     // hang onto this one for a bit more testing
-    HttpServer myServer = checkBindAddress("localhost", 0, false);
-    HttpServer myServer2 = null;
+    HttpServer2 myServer = checkBindAddress("localhost", 0, false);
+    HttpServer2 myServer2 = null;
     try { 
     try { 
       int port = myServer.getConnectorAddress(0).getPort();
       int port = myServer.getConnectorAddress(0).getPort();
       // it's already in use, true = expect a higher port
       // it's already in use, true = expect a higher port
@@ -558,9 +547,9 @@ public class TestHttpServer extends HttpServerFunctionalTest {
     }
     }
   }
   }
   
   
-  private HttpServer checkBindAddress(String host, int port, boolean findPort)
+  private HttpServer2 checkBindAddress(String host, int port, boolean findPort)
       throws Exception {
       throws Exception {
-    HttpServer server = createServer(host, port);
+    HttpServer2 server = createServer(host, port);
     try {
     try {
       // not bound, ephemeral should return requested port (0 for ephemeral)
       // not bound, ephemeral should return requested port (0 for ephemeral)
       List<?> listeners = (List<?>) Whitebox.getInternalState(server,
       List<?> listeners = (List<?>) Whitebox.getInternalState(server,
@@ -608,7 +597,7 @@ public class TestHttpServer extends HttpServerFunctionalTest {
   public void testHttpServerBuilderWithExternalConnector() throws Exception {
   public void testHttpServerBuilderWithExternalConnector() throws Exception {
     Connector c = mock(Connector.class);
     Connector c = mock(Connector.class);
     doReturn("localhost").when(c).getHost();
     doReturn("localhost").when(c).getHost();
-    HttpServer s = new HttpServer.Builder().setName("test").setConnector(c)
+    HttpServer2 s = new HttpServer2.Builder().setName("test").setConnector(c)
         .build();
         .build();
     s.stop();
     s.stop();
   }
   }

+ 16 - 16
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServerLifecycle.java

@@ -23,18 +23,18 @@ import org.junit.Test;
 public class TestHttpServerLifecycle extends HttpServerFunctionalTest {
 public class TestHttpServerLifecycle extends HttpServerFunctionalTest {
 
 
   /**
   /**
-   * Check that a server is alive by probing the {@link HttpServer#isAlive()} method
+   * Check that a server is alive by probing the {@link HttpServer2#isAlive()} method
    * and the text of its toString() description
    * and the text of its toString() description
    * @param server server
    * @param server server
    */
    */
-  private void assertAlive(HttpServer server) {
+  private void assertAlive(HttpServer2 server) {
     assertTrue("Server is not alive", server.isAlive());
     assertTrue("Server is not alive", server.isAlive());
-    assertToStringContains(server, HttpServer.STATE_DESCRIPTION_ALIVE);
+    assertToStringContains(server, HttpServer2.STATE_DESCRIPTION_ALIVE);
   }
   }
 
 
-  private void assertNotLive(HttpServer server) {
+  private void assertNotLive(HttpServer2 server) {
     assertTrue("Server should not be live", !server.isAlive());
     assertTrue("Server should not be live", !server.isAlive());
-    assertToStringContains(server, HttpServer.STATE_DESCRIPTION_NOT_LIVE);
+    assertToStringContains(server, HttpServer2.STATE_DESCRIPTION_NOT_LIVE);
   }
   }
 
 
   /**
   /**
@@ -43,12 +43,12 @@ public class TestHttpServerLifecycle extends HttpServerFunctionalTest {
    * @throws Throwable on failure
    * @throws Throwable on failure
    */
    */
   @Test public void testCreatedServerIsNotAlive() throws Throwable {
   @Test public void testCreatedServerIsNotAlive() throws Throwable {
-    HttpServer server = createTestServer();
+    HttpServer2 server = createTestServer();
     assertNotLive(server);
     assertNotLive(server);
   }
   }
 
 
   @Test public void testStopUnstartedServer() throws Throwable {
   @Test public void testStopUnstartedServer() throws Throwable {
-    HttpServer server = createTestServer();
+    HttpServer2 server = createTestServer();
     stop(server);
     stop(server);
   }
   }
 
 
@@ -59,7 +59,7 @@ public class TestHttpServerLifecycle extends HttpServerFunctionalTest {
    */
    */
   @Test
   @Test
   public void testStartedServerIsAlive() throws Throwable {
   public void testStartedServerIsAlive() throws Throwable {
-    HttpServer server = null;
+    HttpServer2 server = null;
     server = createTestServer();
     server = createTestServer();
     assertNotLive(server);
     assertNotLive(server);
     server.start();
     server.start();
@@ -78,22 +78,22 @@ public class TestHttpServerLifecycle extends HttpServerFunctionalTest {
     requestLogAppender.setName("httprequestlog");
     requestLogAppender.setName("httprequestlog");
     requestLogAppender.setFilename(System.getProperty("test.build.data", "/tmp/")
     requestLogAppender.setFilename(System.getProperty("test.build.data", "/tmp/")
         + "jetty-name-yyyy_mm_dd.log");
         + "jetty-name-yyyy_mm_dd.log");
-    Logger.getLogger(HttpServer.class.getName() + ".test").addAppender(requestLogAppender);
-    HttpServer server = null;
+    Logger.getLogger(HttpServer2.class.getName() + ".test").addAppender(requestLogAppender);
+    HttpServer2 server = null;
     server = createTestServer();
     server = createTestServer();
     assertNotLive(server);
     assertNotLive(server);
     server.start();
     server.start();
     assertAlive(server);
     assertAlive(server);
     stop(server);
     stop(server);
-    Logger.getLogger(HttpServer.class.getName() + ".test").removeAppender(requestLogAppender);
+    Logger.getLogger(HttpServer2.class.getName() + ".test").removeAppender(requestLogAppender);
   }
   }
 
 
   /**
   /**
-   * Assert that the result of {@link HttpServer#toString()} contains the specific text
+   * Assert that the result of {@link HttpServer2#toString()} contains the specific text
    * @param server server to examine
    * @param server server to examine
    * @param text text to search for
    * @param text text to search for
    */
    */
-  private void assertToStringContains(HttpServer server, String text) {
+  private void assertToStringContains(HttpServer2 server, String text) {
     String description = server.toString();
     String description = server.toString();
     assertTrue("Did not find \"" + text + "\" in \"" + description + "\"",
     assertTrue("Did not find \"" + text + "\" in \"" + description + "\"",
                description.contains(text));
                description.contains(text));
@@ -105,7 +105,7 @@ public class TestHttpServerLifecycle extends HttpServerFunctionalTest {
    * @throws Throwable on failure
    * @throws Throwable on failure
    */
    */
   @Test public void testStoppedServerIsNotAlive() throws Throwable {
   @Test public void testStoppedServerIsNotAlive() throws Throwable {
-    HttpServer server = createAndStartTestServer();
+    HttpServer2 server = createAndStartTestServer();
     assertAlive(server);
     assertAlive(server);
     stop(server);
     stop(server);
     assertNotLive(server);
     assertNotLive(server);
@@ -117,7 +117,7 @@ public class TestHttpServerLifecycle extends HttpServerFunctionalTest {
    * @throws Throwable on failure
    * @throws Throwable on failure
    */
    */
   @Test public void testStoppingTwiceServerIsAllowed() throws Throwable {
   @Test public void testStoppingTwiceServerIsAllowed() throws Throwable {
-    HttpServer server = createAndStartTestServer();
+    HttpServer2 server = createAndStartTestServer();
     assertAlive(server);
     assertAlive(server);
     stop(server);
     stop(server);
     assertNotLive(server);
     assertNotLive(server);
@@ -133,7 +133,7 @@ public class TestHttpServerLifecycle extends HttpServerFunctionalTest {
    */
    */
   @Test
   @Test
   public void testWepAppContextAfterServerStop() throws Throwable {
   public void testWepAppContextAfterServerStop() throws Throwable {
-    HttpServer server = null;
+    HttpServer2 server = null;
     String key = "test.attribute.key";
     String key = "test.attribute.key";
     String value = "test.attribute.value";
     String value = "test.attribute.value";
     server = createTestServer();
     server = createTestServer();

+ 2 - 2
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServerWebapps.java

@@ -36,7 +36,7 @@ public class TestHttpServerWebapps extends HttpServerFunctionalTest {
    */
    */
   @Test
   @Test
   public void testValidServerResource() throws Throwable {
   public void testValidServerResource() throws Throwable {
-    HttpServer server = null;
+    HttpServer2 server = null;
     try {
     try {
       server = createServer("test");
       server = createServer("test");
     } finally {
     } finally {
@@ -51,7 +51,7 @@ public class TestHttpServerWebapps extends HttpServerFunctionalTest {
   @Test
   @Test
   public void testMissingServerResource() throws Throwable {
   public void testMissingServerResource() throws Throwable {
     try {
     try {
-      HttpServer server = createServer("NoSuchWebapp");
+      HttpServer2 server = createServer("NoSuchWebapp");
       //should not have got here.
       //should not have got here.
       //close the server
       //close the server
       String serverDescription = server.toString();
       String serverDescription = server.toString();

+ 3 - 3
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestPathFilter.java

@@ -40,7 +40,7 @@ import org.apache.hadoop.net.NetUtils;
 import org.junit.Test;
 import org.junit.Test;
 
 
 public class TestPathFilter extends HttpServerFunctionalTest {
 public class TestPathFilter extends HttpServerFunctionalTest {
-  static final Log LOG = LogFactory.getLog(HttpServer.class);
+  static final Log LOG = LogFactory.getLog(HttpServer2.class);
   static final Set<String> RECORDS = new TreeSet<String>(); 
   static final Set<String> RECORDS = new TreeSet<String>(); 
 
 
   /** A very simple filter that records accessed uri's */
   /** A very simple filter that records accessed uri's */
@@ -107,10 +107,10 @@ public class TestPathFilter extends HttpServerFunctionalTest {
     Configuration conf = new Configuration();
     Configuration conf = new Configuration();
     
     
     //start a http server with CountingFilter
     //start a http server with CountingFilter
-    conf.set(HttpServer.FILTER_INITIALIZER_PROPERTY,
+    conf.set(HttpServer2.FILTER_INITIALIZER_PROPERTY,
         RecordingFilter.Initializer.class.getName());
         RecordingFilter.Initializer.class.getName());
     String[] pathSpecs = { "/path", "/path/*" };
     String[] pathSpecs = { "/path", "/path/*" };
-    HttpServer http = createTestServer(conf, pathSpecs);
+    HttpServer2 http = createTestServer(conf, pathSpecs);
     http.start();
     http.start();
 
 
     final String baseURL = "/path";
     final String baseURL = "/path";

+ 3 - 3
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java

@@ -48,7 +48,7 @@ public class TestSSLHttpServer extends HttpServerFunctionalTest {
 
 
   private static final Log LOG = LogFactory.getLog(TestSSLHttpServer.class);
   private static final Log LOG = LogFactory.getLog(TestSSLHttpServer.class);
   private static Configuration conf;
   private static Configuration conf;
-  private static HttpServer server;
+  private static HttpServer2 server;
   private static URL baseUrl;
   private static URL baseUrl;
   private static String keystoresDir;
   private static String keystoresDir;
   private static String sslConfDir;
   private static String sslConfDir;
@@ -57,7 +57,7 @@ public class TestSSLHttpServer extends HttpServerFunctionalTest {
   @BeforeClass
   @BeforeClass
   public static void setup() throws Exception {
   public static void setup() throws Exception {
     conf = new Configuration();
     conf = new Configuration();
-    conf.setInt(HttpServer.HTTP_MAX_THREADS, 10);
+    conf.setInt(HttpServer2.HTTP_MAX_THREADS, 10);
 
 
     File base = new File(BASEDIR);
     File base = new File(BASEDIR);
     FileUtil.fullyDelete(base);
     FileUtil.fullyDelete(base);
@@ -73,7 +73,7 @@ public class TestSSLHttpServer extends HttpServerFunctionalTest {
     clientSslFactory = new SSLFactory(SSLFactory.Mode.CLIENT, sslConf);
     clientSslFactory = new SSLFactory(SSLFactory.Mode.CLIENT, sslConf);
     clientSslFactory.init();
     clientSslFactory.init();
 
 
-    server = new HttpServer.Builder()
+    server = new HttpServer2.Builder()
         .setName("test")
         .setName("test")
         .addEndpoint(new URI("https://localhost"))
         .addEndpoint(new URI("https://localhost"))
         .setConf(conf)
         .setConf(conf)

+ 7 - 7
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestServletFilter.java

@@ -40,7 +40,7 @@ import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Test;
 import org.junit.Test;
 
 
 public class TestServletFilter extends HttpServerFunctionalTest {
 public class TestServletFilter extends HttpServerFunctionalTest {
-  static final Log LOG = LogFactory.getLog(HttpServer.class);
+  static final Log LOG = LogFactory.getLog(HttpServer2.class);
   static volatile String uri = null; 
   static volatile String uri = null; 
 
 
   /** A very simple filter which record the uri filtered. */
   /** A very simple filter which record the uri filtered. */
@@ -105,9 +105,9 @@ public class TestServletFilter extends HttpServerFunctionalTest {
     Configuration conf = new Configuration();
     Configuration conf = new Configuration();
     
     
     //start a http server with CountingFilter
     //start a http server with CountingFilter
-    conf.set(HttpServer.FILTER_INITIALIZER_PROPERTY,
+    conf.set(HttpServer2.FILTER_INITIALIZER_PROPERTY,
         SimpleFilter.Initializer.class.getName());
         SimpleFilter.Initializer.class.getName());
-    HttpServer http = createTestServer(conf);
+    HttpServer2 http = createTestServer(conf);
     http.start();
     http.start();
 
 
     final String fsckURL = "/fsck";
     final String fsckURL = "/fsck";
@@ -167,9 +167,9 @@ public class TestServletFilter extends HttpServerFunctionalTest {
   public void testServletFilterWhenInitThrowsException() throws Exception {
   public void testServletFilterWhenInitThrowsException() throws Exception {
     Configuration conf = new Configuration();
     Configuration conf = new Configuration();
     // start a http server with ErrorFilter
     // start a http server with ErrorFilter
-    conf.set(HttpServer.FILTER_INITIALIZER_PROPERTY,
+    conf.set(HttpServer2.FILTER_INITIALIZER_PROPERTY,
         ErrorFilter.Initializer.class.getName());
         ErrorFilter.Initializer.class.getName());
-    HttpServer http = createTestServer(conf);
+    HttpServer2 http = createTestServer(conf);
     try {
     try {
       http.start();
       http.start();
       fail("expecting exception");
       fail("expecting exception");
@@ -186,8 +186,8 @@ public class TestServletFilter extends HttpServerFunctionalTest {
   public void testContextSpecificServletFilterWhenInitThrowsException()
   public void testContextSpecificServletFilterWhenInitThrowsException()
       throws Exception {
       throws Exception {
     Configuration conf = new Configuration();
     Configuration conf = new Configuration();
-    HttpServer http = createTestServer(conf);
-    HttpServer.defineFilter(http.webAppContext,
+    HttpServer2 http = createTestServer(conf);
+    HttpServer2.defineFilter(http.webAppContext,
         "ErrorFilter", ErrorFilter.class.getName(),
         "ErrorFilter", ErrorFilter.class.getName(),
         null, null);
         null, null);
     try {
     try {

+ 2 - 2
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/jmx/TestJMXJsonServlet.java

@@ -24,7 +24,7 @@ import java.util.regex.Pattern;
 
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.http.HttpServer;
+import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.http.HttpServerFunctionalTest;
 import org.apache.hadoop.http.HttpServerFunctionalTest;
 import org.junit.AfterClass;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.BeforeClass;
@@ -32,7 +32,7 @@ import org.junit.Test;
 
 
 public class TestJMXJsonServlet extends HttpServerFunctionalTest {
 public class TestJMXJsonServlet extends HttpServerFunctionalTest {
   private   static final Log LOG = LogFactory.getLog(TestJMXJsonServlet.class);
   private   static final Log LOG = LogFactory.getLog(TestJMXJsonServlet.class);
-  private static HttpServer server;
+  private static HttpServer2 server;
   private static URL baseUrl;
   private static URL baseUrl;
 
 
   @BeforeClass public static void setup() throws Exception {
   @BeforeClass public static void setup() throws Exception {

+ 2 - 2
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLogLevel.java

@@ -20,7 +20,7 @@ package org.apache.hadoop.log;
 import java.io.*;
 import java.io.*;
 import java.net.*;
 import java.net.*;
 
 
-import org.apache.hadoop.http.HttpServer;
+import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetUtils;
 
 
 import junit.framework.TestCase;
 import junit.framework.TestCase;
@@ -44,7 +44,7 @@ public class TestLogLevel extends TestCase {
       log.error("log.error1");
       log.error("log.error1");
       assertTrue(!Level.ERROR.equals(log.getEffectiveLevel()));
       assertTrue(!Level.ERROR.equals(log.getEffectiveLevel()));
 
 
-      HttpServer server = new HttpServer.Builder().setName("..")
+      HttpServer2 server = new HttpServer2.Builder().setName("..")
           .addEndpoint(new URI("http://localhost:0")).setFindPort(true)
           .addEndpoint(new URI("http://localhost:0")).setFindPort(true)
           .build();
           .build();
       
       

+ 2 - 2
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestAuthenticationFilter.java

@@ -18,7 +18,7 @@ package org.apache.hadoop.security;
 
 
 
 
 import junit.framework.TestCase;
 import junit.framework.TestCase;
-import org.apache.hadoop.http.HttpServer;
+import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
 import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.http.FilterContainer;
 import org.apache.hadoop.http.FilterContainer;
@@ -49,7 +49,7 @@ public class TestAuthenticationFilter extends TestCase {
              AuthenticationFilterInitializer.SIGNATURE_SECRET_FILE, 
              AuthenticationFilterInitializer.SIGNATURE_SECRET_FILE, 
              secretFile.getAbsolutePath());
              secretFile.getAbsolutePath());
 
 
-    conf.set(HttpServer.BIND_ADDRESS, "barhost");
+    conf.set(HttpServer2.BIND_ADDRESS, "barhost");
     
     
     FilterContainer container = Mockito.mock(FilterContainer.class);
     FilterContainer container = Mockito.mock(FilterContainer.class);
     Mockito.doAnswer(
     Mockito.doAnswer(

+ 3 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestSecurityUtil.java

@@ -331,7 +331,9 @@ public class TestSecurityUtil {
 
 
   @Test
   @Test
   public void testSocketAddrWithIP() {
   public void testSocketAddrWithIP() {
-    verifyServiceAddr("127.0.0.1", "127.0.0.1");
+    String staticHost = "127.0.0.1";
+    NetUtils.addStaticResolution(staticHost, "localhost");
+    verifyServiceAddr(staticHost, "127.0.0.1");
   }
   }
 
 
   @Test
   @Test

+ 239 - 220
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -290,6 +290,30 @@ Release 2.4.0 - UNRELEASED
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES
 
 
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+    HDFS-5781. Use an array to record the mapping between FSEditLogOpCode and 
+    the corresponding byte value. (jing9)
+
+  OPTIMIZATIONS
+
+    HDFS-5790. LeaseManager.findPath is very slow when many leases need recovery
+    (todd)
+
+  BUG FIXES
+
+    HDFS-5492. Port HDFS-2069 (Incorrect default trash interval in the
+    docs) to trunk. (Akira Ajisaka via Arpit Agarwal)
+
+    HDFS-5843. DFSClient.getFileChecksum() throws IOException if checksum is 
+    disabled. (Laurent Goujon via jing9)
+
+Release 2.3.0 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
   NEW FEATURES
   NEW FEATURES
 
 
     HDFS-5122. Support failover and retry in WebHdfsFileSystem for NN HA.
     HDFS-5122. Support failover and retry in WebHdfsFileSystem for NN HA.
@@ -329,6 +353,43 @@ Release 2.4.0 - UNRELEASED
 
 
   IMPROVEMENTS
   IMPROVEMENTS
 
 
+    HDFS-5360. Improvement of usage message of renameSnapshot and
+    deleteSnapshot. (Shinichi Yamashita via wang)
+
+    HDFS-5331. make SnapshotDiff.java to a o.a.h.util.Tool interface implementation. 
+    (Vinayakumar B via umamahesh)
+
+    HDFS-4657.  Limit the number of blocks logged by the NN after a block
+    report to a configurable value.  (Aaron T. Myers via Colin Patrick
+    McCabe)
+
+    HDFS-5344. Make LsSnapshottableDir as Tool interface implementation. (Sathish via umamahesh)
+
+    HDFS-5544. Adding Test case For Checking dfs.checksum type as NULL value. (Sathish via umamahesh)
+
+    HDFS-5568. Support includeSnapshots option with Fsck command. (Vinayakumar B via umamahesh)
+
+    HDFS-4983. Numeric usernames do not work with WebHDFS FS. (Yongjun Zhang via
+    jing9)
+
+    HDFS-5592. statechangeLog of completeFile should be logged only in case of success. 
+    (Vinayakumar via umamahesh)
+
+    HDFS-5662. Can't decommission a DataNode due to file's replication factor
+    larger than the rest of the cluster size. (brandonli)
+
+    HDFS-5068. Convert NNThroughputBenchmark to a Tool to allow generic options.
+    (shv)
+
+    HDFS-5675. Add Mkdirs operation to NNThroughputBenchmark.
+    (Plamen Jeliazkov via shv)
+
+    HDFS-5677. Need error checking for HA cluster configuration.
+    (Vincent Sheffer via cos)
+
+    HDFS-5825. Use FileUtils.copyFile() to implement DFSTestUtils.copyFile().
+    (Haohui Mai via Arpit Agarwal)
+
     HDFS-5267. Remove volatile from LightWeightHashSet. (Junping Du via llu)
     HDFS-5267. Remove volatile from LightWeightHashSet. (Junping Du via llu)
 
 
     HDFS-4278. Log an ERROR when DFS_BLOCK_ACCESS_TOKEN_ENABLE config is
     HDFS-4278. Log an ERROR when DFS_BLOCK_ACCESS_TOKEN_ENABLE config is
@@ -504,8 +565,7 @@ Release 2.4.0 - UNRELEASED
     HDFS-5788. listLocatedStatus response can be very large. (Nathan Roberts
     HDFS-5788. listLocatedStatus response can be very large. (Nathan Roberts
     via kihwal)
     via kihwal)
 
 
-    HDFS-5781. Use an array to record the mapping between FSEditLogOpCode and 
-    the corresponding byte value. (jing9)
+    HDFS-5841. Update HDFS caching documentation with new changes. (wang)
 
 
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
@@ -521,6 +581,177 @@ Release 2.4.0 - UNRELEASED
 
 
   BUG FIXES
   BUG FIXES
 
 
+    HDFS-5307. Support both HTTP and HTTPS in jsp pages (Haohui Mai via
+    brandonli)
+
+    HDFS-5291. Standby namenode after transition to active goes into safemode.
+    (jing9)
+
+    HDFS-5317. Go back to DFS Home link does not work on datanode webUI
+    (Haohui Mai via brandonli)
+
+    HDFS-5316. Namenode ignores the default https port (Haohui Mai via
+    brandonli)
+
+    HDFS-5281. COMMIT request should not block. (brandonli)
+
+    HDFS-5337. should do hsync for a commit request even there is no pending
+    writes (brandonli)
+
+    HDFS-5335. Hive query failed with possible race in dfs output stream.
+    (Haohui Mai via suresh)
+
+    HDFS-5322. HDFS delegation token not found in cache errors seen on secure HA 
+    clusters. (jing9)
+
+    HDFS-5329. Update FSNamesystem#getListing() to handle inode path in startAfter
+    token. (brandonli)
+
+    HDFS-5330. fix readdir and readdirplus for large directories (brandonli)
+
+    HDFS-5370. Typo in Error Message: different between range in condition
+    and range in error message. (Kousuke Saruta via suresh)
+
+    HDFS-5365. Fix libhdfs compile error on FreeBSD9. (Radim Kolar via cnauroth)
+    
+    HDFS-5347. Add HDFS NFS user guide. (brandonli)
+
+    HDFS-5403. WebHdfs client cannot communicate with older WebHdfs servers
+    post HDFS-5306. (atm)
+
+    HDFS-5171. NFS should create input stream for a file and try to share it
+    with multiple read requests. (Haohui Mai via brandonli)
+
+    HDFS-5413. hdfs.cmd does not support passthrough to any arbitrary class.
+    (cnauroth)
+
+    HDFS-5433. When reloading fsimage during checkpointing, we should clear
+    existing snapshottable directories. (Aaron T. Myers via wang)
+
+    HDFS-5432. TestDatanodeJsp fails on Windows due to assumption that loopback
+    address resolves to host name localhost. (cnauroth)
+
+    HDFS-5065. TestSymlinkHdfsDisable fails on Windows. (ivanmi)
+
+    HDFS-4633 TestDFSClientExcludedNodes fails sporadically if excluded nodes
+    cache expires too quickly  (Chris Nauroth via Sanjay)
+
+    HDFS-5037. Active NN should trigger its own edit log rolls (wang)
+
+    HDFS-5035.  getFileLinkStatus and rename do not correctly check permissions
+    of symlinks.  (Andrew Wang via Colin Patrick McCabe)
+
+    HDFS-5456. NameNode startup progress creates new steps if caller attempts to
+    create a counter for a step that doesn't already exist.  (cnauroth)
+
+    HDFS-5458. Datanode failed volume threshold ignored if exception is thrown
+    in getDataDirsFromURIs. (Mike Mellenthin via wang)
+
+    HDFS-5252. Stable write is not handled correctly in someplace. (brandonli)
+
+    HDFS-5364. Add OpenFileCtx cache. (brandonli)
+
+    HDFS-5469. Add configuration property for the sub-directroy export path
+    (brandonli)
+
+    HDFS-5519. COMMIT handler should update the commit status after sync
+    (brandonli)
+
+    HDFS-5372. In FSNamesystem, hasReadLock() returns false if the current thread 
+    holds the write lock (VinayaKumar B via umamahesh)
+
+    HDFS-4516. Client crash after block allocation and NN switch before lease recovery for 
+    the same file can cause readers to fail forever (VinaayKumar B via umamahesh)
+
+    HDFS-5014. Process register commands with out holding BPOfferService lock. 
+    (Vinaykumar B via umamahesh)
+
+    HDFS-5288. Close idle connections in portmap (Haohui Mai via brandonli)
+
+    HDFS-5407. Fix typos in DFSClientCache (Haohui Mai via brandonli)
+
+    HDFS-5548. Use ConcurrentHashMap in portmap (Haohui Mai via brandonli)
+
+    HDFS-5577. NFS user guide update (brandonli)
+
+    HDFS-5563. NFS gateway should commit the buffered data when read request comes
+    after write to the same file (brandonli)
+
+    HDFS-4997. libhdfs doesn't return correct error codes in most cases (cmccabe)
+
+    HDFS-5587. add debug information when NFS fails to start with duplicate user
+    or group names (brandonli)
+
+    HDFS-5590. Block ID and generation stamp may be reused when persistBlocks is 
+    set to false. (jing9)
+
+    HDFS-5353. Short circuit reads fail when dfs.encrypt.data.transfer is 
+    enabled. (Colin Patrick McCabe via jing9)
+
+    HDFS-5283. Under construction blocks only inside snapshots should not be
+    counted in safemode threshhold.  (Vinay via szetszwo)
+
+    HDFS-5257. addBlock() retry should return LocatedBlock with locations else client 
+    will get AIOBE. (Vinay via jing9)
+
+    HDFS-5427. Not able to read deleted files from snapshot directly under 
+    snapshottable dir after checkpoint and NN restart. (Vinay via jing9)
+
+    HDFS-5443. Delete 0-sized block when deleting an under-construction file that 
+    is included in snapshot. (jing9)
+
+    HDFS-5476. Snapshot: clean the blocks/files/directories under a renamed 
+    file/directory while deletion. (jing9)
+
+    HDFS-5425. Renaming underconstruction file with snapshots can make NN failure on 
+    restart. (jing9 and Vinay)
+
+    HDFS-5474. Deletesnapshot can make Namenode in safemode on NN restarts. 
+    (Sathish via jing9)
+
+    HDFS-5504. In HA mode, OP_DELETE_SNAPSHOT is not decrementing the safemode threshold, 
+    leads to NN safemode. (Vinay via jing9)
+
+    HDFS-5428. Under construction files deletion after snapshot+checkpoint+nn restart 
+    leads nn safemode. (jing9)
+
+    HDFS-5074. Allow starting up from an fsimage checkpoint in the middle of a
+    segment. (Todd Lipcon via atm)
+
+    HDFS-4201. NPE in BPServiceActor#sendHeartBeat. (jxiang via cmccabe)
+
+    HDFS-5666. Fix inconsistent synchronization in BPOfferService (jxiang via cmccabe)
+    
+    HDFS-5657. race condition causes writeback state error in NFS gateway (brandonli)
+
+    HDFS-5661. Browsing FileSystem via web ui, should use datanode's fqdn instead of ip 
+    address. (Benoy Antony via jing9)
+
+    HDFS-5582. hdfs getconf -excludeFile or -includeFile always failed (sathish
+    via cmccabe)
+
+    HDFS-5671. Fix socket leak in DFSInputStream#getBlockReader. (JamesLi via umamahesh) 
+
+    HDFS-5649. Unregister NFS and Mount service when NFS gateway is shutting down.
+    (brandonli)
+
+    HDFS-5789. Some of snapshot APIs missing checkOperation double check in fsn. (umamahesh)
+
+    HDFS-5343. When cat command is issued on snapshot files getting unexpected result.
+    (Sathish via umamahesh)
+
+    HDFS-5297. Fix dead links in HDFS site documents. (Akira Ajisaka via
+    Arpit Agarwal)
+
+    HDFS-5830. WebHdfsFileSystem.getFileBlockLocations throws
+    IllegalArgumentException when accessing another cluster. (Yongjun Zhang via
+    Colin Patrick McCabe)
+
+    HDFS-5833. Fix SecondaryNameNode javadoc. (Bangtao Zhou via Arpit Agarwal)
+
+    HDFS-5844. Fix broken link in WebHDFS.apt.vm. (Akira Ajisaka via
+    Arpit Agarwal)
+
     HDFS-5034.  Remove debug prints from GetFileLinkInfo (Andrew Wang via Colin
     HDFS-5034.  Remove debug prints from GetFileLinkInfo (Andrew Wang via Colin
     Patrick McCabe)
     Patrick McCabe)
 
 
@@ -602,6 +833,12 @@ Release 2.4.0 - UNRELEASED
     HDFS-5728. Block recovery will fail if the metafile does not have crc 
     HDFS-5728. Block recovery will fail if the metafile does not have crc 
     for all chunks of the block (Vinay via kihwal)
     for all chunks of the block (Vinay via kihwal)
 
 
+    HDFS-5845. SecondaryNameNode dies when checkpointing with cache pools.
+    (wang)
+
+    HDFS-5842. Cannot create hftp filesystem when using a proxy user ugi and a doAs 
+    on a secure cluster. (jing9)
+
   BREAKDOWN OF HDFS-2832 SUBTASKS AND RELATED JIRAS
   BREAKDOWN OF HDFS-2832 SUBTASKS AND RELATED JIRAS
 
 
     HDFS-4985. Add storage type to the protocol and expose it in block report
     HDFS-4985. Add storage type to the protocol and expose it in block report
@@ -939,224 +1176,6 @@ Release 2.4.0 - UNRELEASED
     HDFS-5724. modifyCacheDirective logging audit log command wrongly as
     HDFS-5724. modifyCacheDirective logging audit log command wrongly as
     addCacheDirective (Uma Maheswara Rao G via Colin Patrick McCabe)
     addCacheDirective (Uma Maheswara Rao G via Colin Patrick McCabe)
 
 
-
-Release 2.3.0 - UNRELEASED
-
-  INCOMPATIBLE CHANGES
-
-  NEW FEATURES
-
-  IMPROVEMENTS
-
-    HDFS-5360. Improvement of usage message of renameSnapshot and
-    deleteSnapshot. (Shinichi Yamashita via wang)
-
-    HDFS-5331. make SnapshotDiff.java to a o.a.h.util.Tool interface implementation. 
-    (Vinayakumar B via umamahesh)
-
-    HDFS-4657.  Limit the number of blocks logged by the NN after a block
-    report to a configurable value.  (Aaron T. Myers via Colin Patrick
-    McCabe)
-
-    HDFS-5344. Make LsSnapshottableDir as Tool interface implementation. (Sathish via umamahesh)
-
-    HDFS-5544. Adding Test case For Checking dfs.checksum type as NULL value. (Sathish via umamahesh)
-
-    HDFS-5568. Support includeSnapshots option with Fsck command. (Vinayakumar B via umamahesh)
-
-    HDFS-4983. Numeric usernames do not work with WebHDFS FS. (Yongjun Zhang via
-    jing9)
-
-    HDFS-5592. statechangeLog of completeFile should be logged only in case of success. 
-    (Vinayakumar via umamahesh)
-
-    HDFS-5662. Can't decommission a DataNode due to file's replication factor
-    larger than the rest of the cluster size. (brandonli)
-
-    HDFS-5068. Convert NNThroughputBenchmark to a Tool to allow generic options.
-    (shv)
-
-    HDFS-5675. Add Mkdirs operation to NNThroughputBenchmark.
-    (Plamen Jeliazkov via shv)
-
-    HDFS-5677. Need error checking for HA cluster configuration.
-    (Vincent Sheffer via cos)
-
-    HDFS-5825. Use FileUtils.copyFile() to implement DFSTestUtils.copyFile().
-    (Haohui Mai via Arpit Agarwal)
-
-  OPTIMIZATIONS
-
-  BUG FIXES
-
-    HDFS-5307. Support both HTTP and HTTPS in jsp pages (Haohui Mai via
-    brandonli)
-
-    HDFS-5291. Standby namenode after transition to active goes into safemode.
-    (jing9)
-
-    HDFS-5317. Go back to DFS Home link does not work on datanode webUI
-    (Haohui Mai via brandonli)
-
-    HDFS-5316. Namenode ignores the default https port (Haohui Mai via
-    brandonli)
-
-    HDFS-5281. COMMIT request should not block. (brandonli)
-
-    HDFS-5337. should do hsync for a commit request even there is no pending
-    writes (brandonli)
-
-    HDFS-5335. Hive query failed with possible race in dfs output stream.
-    (Haohui Mai via suresh)
-
-    HDFS-5322. HDFS delegation token not found in cache errors seen on secure HA 
-    clusters. (jing9)
-
-    HDFS-5329. Update FSNamesystem#getListing() to handle inode path in startAfter
-    token. (brandonli)
-
-    HDFS-5330. fix readdir and readdirplus for large directories (brandonli)
-
-    HDFS-5370. Typo in Error Message: different between range in condition
-    and range in error message. (Kousuke Saruta via suresh)
-
-    HDFS-5365. Fix libhdfs compile error on FreeBSD9. (Radim Kolar via cnauroth)
-    
-    HDFS-5347. Add HDFS NFS user guide. (brandonli)
-
-    HDFS-5403. WebHdfs client cannot communicate with older WebHdfs servers
-    post HDFS-5306. (atm)
-
-    HDFS-5171. NFS should create input stream for a file and try to share it
-    with multiple read requests. (Haohui Mai via brandonli)
-
-    HDFS-5413. hdfs.cmd does not support passthrough to any arbitrary class.
-    (cnauroth)
-
-    HDFS-5433. When reloading fsimage during checkpointing, we should clear
-    existing snapshottable directories. (Aaron T. Myers via wang)
-
-    HDFS-5432. TestDatanodeJsp fails on Windows due to assumption that loopback
-    address resolves to host name localhost. (cnauroth)
-
-    HDFS-5065. TestSymlinkHdfsDisable fails on Windows. (ivanmi)
-
-    HDFS-4633 TestDFSClientExcludedNodes fails sporadically if excluded nodes
-    cache expires too quickly  (Chris Nauroth via Sanjay)
-
-    HDFS-5037. Active NN should trigger its own edit log rolls (wang)
-
-    HDFS-5035.  getFileLinkStatus and rename do not correctly check permissions
-    of symlinks.  (Andrew Wang via Colin Patrick McCabe)
-
-    HDFS-5456. NameNode startup progress creates new steps if caller attempts to
-    create a counter for a step that doesn't already exist.  (cnauroth)
-
-    HDFS-5458. Datanode failed volume threshold ignored if exception is thrown
-    in getDataDirsFromURIs. (Mike Mellenthin via wang)
-
-    HDFS-5252. Stable write is not handled correctly in someplace. (brandonli)
-
-    HDFS-5364. Add OpenFileCtx cache. (brandonli)
-
-    HDFS-5469. Add configuration property for the sub-directroy export path
-    (brandonli)
-
-    HDFS-5519. COMMIT handler should update the commit status after sync
-    (brandonli)
-
-    HDFS-5372. In FSNamesystem, hasReadLock() returns false if the current thread 
-    holds the write lock (VinayaKumar B via umamahesh)
-
-    HDFS-4516. Client crash after block allocation and NN switch before lease recovery for 
-    the same file can cause readers to fail forever (VinaayKumar B via umamahesh)
-
-    HDFS-5014. Process register commands with out holding BPOfferService lock. 
-    (Vinaykumar B via umamahesh)
-
-    HDFS-5288. Close idle connections in portmap (Haohui Mai via brandonli)
-
-    HDFS-5407. Fix typos in DFSClientCache (Haohui Mai via brandonli)
-
-    HDFS-5548. Use ConcurrentHashMap in portmap (Haohui Mai via brandonli)
-
-    HDFS-5577. NFS user guide update (brandonli)
-
-    HDFS-5563. NFS gateway should commit the buffered data when read request comes
-    after write to the same file (brandonli)
-
-    HDFS-4997. libhdfs doesn't return correct error codes in most cases (cmccabe)
-
-    HDFS-5587. add debug information when NFS fails to start with duplicate user
-    or group names (brandonli)
-
-    HDFS-5590. Block ID and generation stamp may be reused when persistBlocks is 
-    set to false. (jing9)
-
-    HDFS-5353. Short circuit reads fail when dfs.encrypt.data.transfer is 
-    enabled. (Colin Patrick McCabe via jing9)
-
-    HDFS-5283. Under construction blocks only inside snapshots should not be
-    counted in safemode threshhold.  (Vinay via szetszwo)
-
-    HDFS-5257. addBlock() retry should return LocatedBlock with locations else client 
-    will get AIOBE. (Vinay via jing9)
-
-    HDFS-5427. Not able to read deleted files from snapshot directly under 
-    snapshottable dir after checkpoint and NN restart. (Vinay via jing9)
-
-    HDFS-5443. Delete 0-sized block when deleting an under-construction file that 
-    is included in snapshot. (jing9)
-
-    HDFS-5476. Snapshot: clean the blocks/files/directories under a renamed 
-    file/directory while deletion. (jing9)
-
-    HDFS-5425. Renaming underconstruction file with snapshots can make NN failure on 
-    restart. (jing9 and Vinay)
-
-    HDFS-5474. Deletesnapshot can make Namenode in safemode on NN restarts. 
-    (Sathish via jing9)
-
-    HDFS-5504. In HA mode, OP_DELETE_SNAPSHOT is not decrementing the safemode threshold, 
-    leads to NN safemode. (Vinay via jing9)
-
-    HDFS-5428. Under construction files deletion after snapshot+checkpoint+nn restart 
-    leads nn safemode. (jing9)
-
-    HDFS-5074. Allow starting up from an fsimage checkpoint in the middle of a
-    segment. (Todd Lipcon via atm)
-
-    HDFS-4201. NPE in BPServiceActor#sendHeartBeat. (jxiang via cmccabe)
-
-    HDFS-5666. Fix inconsistent synchronization in BPOfferService (jxiang via cmccabe)
-    
-    HDFS-5657. race condition causes writeback state error in NFS gateway (brandonli)
-
-    HDFS-5661. Browsing FileSystem via web ui, should use datanode's fqdn instead of ip 
-    address. (Benoy Antony via jing9)
-
-    HDFS-5582. hdfs getconf -excludeFile or -includeFile always failed (sathish
-    via cmccabe)
-
-    HDFS-5671. Fix socket leak in DFSInputStream#getBlockReader. (JamesLi via umamahesh) 
-
-    HDFS-5649. Unregister NFS and Mount service when NFS gateway is shutting down.
-    (brandonli)
-
-    HDFS-5789. Some of snapshot APIs missing checkOperation double check in fsn. (umamahesh)
-
-    HDFS-5343. When cat command is issued on snapshot files getting unexpected result.
-    (Sathish via umamahesh)
-
-    HDFS-5297. Fix dead links in HDFS site documents. (Akira Ajisaka via
-    Arpit Agarwal)
-
-    HDFS-5830. WebHdfsFileSystem.getFileBlockLocations throws
-    IllegalArgumentException when accessing another cluster. (Yongjun Zhang via
-    Colin Patrick McCabe)
-
-    HDFS-5833. Fix SecondaryNameNode javadoc. (Bangtao Zhou via Arpit Agarwal)
-
 Release 2.2.0 - 2013-10-13
 Release 2.2.0 - 2013-10-13
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java

@@ -84,7 +84,7 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.web.SWebHdfsFileSystem;
 import org.apache.hadoop.hdfs.web.SWebHdfsFileSystem;
 import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
 import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
 import org.apache.hadoop.http.HttpConfig;
 import org.apache.hadoop.http.HttpConfig;
-import org.apache.hadoop.http.HttpServer;
+import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.ipc.ProtobufRpcEngine;
 import org.apache.hadoop.ipc.ProtobufRpcEngine;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetUtils;
@@ -1539,7 +1539,7 @@ public class DFSUtil {
     return policy;
     return policy;
   }
   }
 
 
-  public static HttpServer.Builder loadSslConfToHttpServerBuilder(HttpServer.Builder builder,
+  public static HttpServer2.Builder loadSslConfToHttpServerBuilder(HttpServer2.Builder builder,
       Configuration sslConf) {
       Configuration sslConf) {
     return builder
     return builder
         .needsClientAuth(
         .needsClientAuth(
@@ -1644,13 +1644,13 @@ public class DFSUtil {
    * namenode can use to initialize their HTTP / HTTPS server.
    * namenode can use to initialize their HTTP / HTTPS server.
    *
    *
    */
    */
-  public static HttpServer.Builder httpServerTemplateForNNAndJN(
+  public static HttpServer2.Builder httpServerTemplateForNNAndJN(
       Configuration conf, final InetSocketAddress httpAddr,
       Configuration conf, final InetSocketAddress httpAddr,
       final InetSocketAddress httpsAddr, String name, String spnegoUserNameKey,
       final InetSocketAddress httpsAddr, String name, String spnegoUserNameKey,
       String spnegoKeytabFileKey) throws IOException {
       String spnegoKeytabFileKey) throws IOException {
     HttpConfig.Policy policy = getHttpPolicy(conf);
     HttpConfig.Policy policy = getHttpPolicy(conf);
 
 
-    HttpServer.Builder builder = new HttpServer.Builder().setName(name)
+    HttpServer2.Builder builder = new HttpServer2.Builder().setName(name)
         .setConf(conf).setACL(new AccessControlList(conf.get(DFS_ADMIN, " ")))
         .setConf(conf).setACL(new AccessControlList(conf.get(DFS_ADMIN, " ")))
         .setSecurityEnabled(UserGroupInformation.isSecurityEnabled())
         .setSecurityEnabled(UserGroupInformation.isSecurityEnabled())
         .setUsernameConfKey(spnegoUserNameKey)
         .setUsernameConfKey(spnegoUserNameKey)

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeHttpServer.java

@@ -28,7 +28,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.server.common.JspHelper;
 import org.apache.hadoop.hdfs.server.common.JspHelper;
-import org.apache.hadoop.http.HttpServer;
+import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetUtils;
 
 
 /**
 /**
@@ -38,7 +38,7 @@ import org.apache.hadoop.net.NetUtils;
 public class JournalNodeHttpServer {
 public class JournalNodeHttpServer {
   public static final String JN_ATTRIBUTE_KEY = "localjournal";
   public static final String JN_ATTRIBUTE_KEY = "localjournal";
 
 
-  private HttpServer httpServer;
+  private HttpServer2 httpServer;
   private JournalNode localJournalNode;
   private JournalNode localJournalNode;
 
 
   private final Configuration conf;
   private final Configuration conf;
@@ -56,7 +56,7 @@ public class JournalNodeHttpServer {
         DFSConfigKeys.DFS_JOURNALNODE_HTTPS_ADDRESS_DEFAULT);
         DFSConfigKeys.DFS_JOURNALNODE_HTTPS_ADDRESS_DEFAULT);
     InetSocketAddress httpsAddr = NetUtils.createSocketAddr(httpsAddrString);
     InetSocketAddress httpsAddr = NetUtils.createSocketAddr(httpsAddrString);
 
 
-    HttpServer.Builder builder = DFSUtil.httpServerTemplateForNNAndJN(conf,
+    HttpServer2.Builder builder = DFSUtil.httpServerTemplateForNNAndJN(conf,
         httpAddr, httpsAddr, "journal",
         httpAddr, httpsAddr, "journal",
         DFSConfigKeys.DFS_JOURNALNODE_INTERNAL_SPNEGO_USER_NAME_KEY,
         DFSConfigKeys.DFS_JOURNALNODE_INTERNAL_SPNEGO_USER_NAME_KEY,
         DFSConfigKeys.DFS_JOURNALNODE_KEYTAB_FILE_KEY);
         DFSConfigKeys.DFS_JOURNALNODE_KEYTAB_FILE_KEY);

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java

@@ -120,7 +120,7 @@ import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
 import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
 import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
 import org.apache.hadoop.hdfs.web.resources.Param;
 import org.apache.hadoop.hdfs.web.resources.Param;
 import org.apache.hadoop.http.HttpConfig;
 import org.apache.hadoop.http.HttpConfig;
-import org.apache.hadoop.http.HttpServer;
+import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.ReadaheadPool;
 import org.apache.hadoop.io.ReadaheadPool;
 import org.apache.hadoop.io.nativeio.NativeIO;
 import org.apache.hadoop.io.nativeio.NativeIO;
@@ -235,7 +235,7 @@ public class DataNode extends Configured
   private volatile boolean heartbeatsDisabledForTests = false;
   private volatile boolean heartbeatsDisabledForTests = false;
   private DataStorage storage = null;
   private DataStorage storage = null;
 
 
-  private HttpServer infoServer = null;
+  private HttpServer2 infoServer = null;
   private int infoPort;
   private int infoPort;
   private int infoSecurePort;
   private int infoSecurePort;
 
 
@@ -358,7 +358,7 @@ public class DataNode extends Configured
    * Http Policy is decided.
    * Http Policy is decided.
    */
    */
   private void startInfoServer(Configuration conf) throws IOException {
   private void startInfoServer(Configuration conf) throws IOException {
-    HttpServer.Builder builder = new HttpServer.Builder().setName("datanode")
+    HttpServer2.Builder builder = new HttpServer2.Builder().setName("datanode")
         .setConf(conf).setACL(new AccessControlList(conf.get(DFS_ADMIN, " ")));
         .setConf(conf).setACL(new AccessControlList(conf.get(DFS_ADMIN, " ")));
 
 
     HttpConfig.Policy policy = DFSUtil.getHttpPolicy(conf);
     HttpConfig.Policy policy = DFSUtil.getHttpPolicy(conf);

+ 3 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java

@@ -655,8 +655,9 @@ class DataXceiver extends Receiver implements Runnable {
       final BlockMetadataHeader header = BlockMetadataHeader.readHeader(checksumIn);
       final BlockMetadataHeader header = BlockMetadataHeader.readHeader(checksumIn);
       final DataChecksum checksum = header.getChecksum(); 
       final DataChecksum checksum = header.getChecksum(); 
       final int bytesPerCRC = checksum.getBytesPerChecksum();
       final int bytesPerCRC = checksum.getBytesPerChecksum();
-      final long crcPerBlock = (metadataIn.getLength()
-          - BlockMetadataHeader.getHeaderSize())/checksum.getChecksumSize();
+      final long crcPerBlock = checksum.getChecksumSize() > 0 
+              ? (metadataIn.getLength() - BlockMetadataHeader.getHeaderSize())/checksum.getChecksumSize()
+              : 0;
       
       
       //compute block checksum
       //compute block checksum
       final MD5Hash md5 = MD5Hash.digest(checksumIn);
       final MD5Hash md5 = MD5Hash.digest(checksumIn);

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java

@@ -27,7 +27,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.http.HttpConfig;
 import org.apache.hadoop.http.HttpConfig;
-import org.apache.hadoop.http.HttpServer;
+import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.mortbay.jetty.Connector;
 import org.mortbay.jetty.Connector;
 
 
@@ -119,7 +119,7 @@ public class SecureDataNodeStarter implements Daemon {
     // certificates if they are communicating through SSL.
     // certificates if they are communicating through SSL.
     Connector listener = null;
     Connector listener = null;
     if (policy.isHttpEnabled()) {
     if (policy.isHttpEnabled()) {
-      listener = HttpServer.createDefaultChannelConnector();
+      listener = HttpServer2.createDefaultChannelConnector();
       InetSocketAddress infoSocAddr = DataNode.getInfoAddr(conf);
       InetSocketAddress infoSocAddr = DataNode.getInfoAddr(conf);
       listener.setHost(infoSocAddr.getHostName());
       listener.setHost(infoSocAddr.getHostName());
       listener.setPort(infoSocAddr.getPort());
       listener.setPort(infoSocAddr.getPort());

+ 11 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java

@@ -195,6 +195,17 @@ public final class CacheManager {
 
 
   }
   }
 
 
+  /**
+   * Resets all tracked directives and pools. Called during 2NN checkpointing to
+   * reset FSNamesystem state. See {FSNamesystem{@link #clear()}.
+   */
+  void clear() {
+    directivesById.clear();
+    directivesByPath.clear();
+    cachePools.clear();
+    nextDirectiveId = 1;
+  }
+
   public void startMonitorThread() {
   public void startMonitorThread() {
     crmLock.lock();
     crmLock.lock();
     try {
     try {

+ 5 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

@@ -549,6 +549,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
     leaseManager.removeAllLeases();
     leaseManager.removeAllLeases();
     inodeId.setCurrentValue(INodeId.LAST_RESERVED_ID);
     inodeId.setCurrentValue(INodeId.LAST_RESERVED_ID);
     snapshotManager.clearSnapshottableDirs();
     snapshotManager.clearSnapshottableDirs();
+    cacheManager.clear();
   }
   }
 
 
   @VisibleForTesting
   @VisibleForTesting
@@ -3817,7 +3818,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
     final long diff = fileINode.getPreferredBlockSize() - commitBlock.getNumBytes();    
     final long diff = fileINode.getPreferredBlockSize() - commitBlock.getNumBytes();    
     if (diff > 0) {
     if (diff > 0) {
       try {
       try {
-        String path = leaseManager.findPath(fileINode);
+        String path = fileINode.getFullPathName();
         dir.updateSpaceConsumed(path, 0, -diff*fileINode.getFileReplication());
         dir.updateSpaceConsumed(path, 0, -diff*fileINode.getFileReplication());
       } catch (IOException e) {
       } catch (IOException e) {
         LOG.warn("Unexpected exception while updating disk space.", e);
         LOG.warn("Unexpected exception while updating disk space.", e);
@@ -4019,7 +4020,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
   @VisibleForTesting
   @VisibleForTesting
   String closeFileCommitBlocks(INodeFile pendingFile, BlockInfo storedBlock)
   String closeFileCommitBlocks(INodeFile pendingFile, BlockInfo storedBlock)
       throws IOException {
       throws IOException {
-    String src = leaseManager.findPath(pendingFile);
+    String src = pendingFile.getFullPathName();
 
 
     // commit the last block and complete it if it has minimum replicas
     // commit the last block and complete it if it has minimum replicas
     commitOrCompleteLastBlock(pendingFile, storedBlock);
     commitOrCompleteLastBlock(pendingFile, storedBlock);
@@ -4041,7 +4042,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
   @VisibleForTesting
   @VisibleForTesting
   String persistBlocks(INodeFile pendingFile, boolean logRetryCache)
   String persistBlocks(INodeFile pendingFile, boolean logRetryCache)
       throws IOException {
       throws IOException {
-    String src = leaseManager.findPath(pendingFile);
+    String src = pendingFile.getFullPathName();
     dir.persistBlocks(src, pendingFile, logRetryCache);
     dir.persistBlocks(src, pendingFile, logRetryCache);
     return src;
     return src;
   }
   }
@@ -5940,7 +5941,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
         .getDatanodeStorageInfos(newNodes, newStorageIDs);
         .getDatanodeStorageInfos(newNodes, newStorageIDs);
     blockinfo.setExpectedLocations(storages);
     blockinfo.setExpectedLocations(storages);
 
 
-    String src = leaseManager.findPath(pendingFile);
+    String src = pendingFile.getFullPathName();
     dir.persistBlocks(src, pendingFile, logRetryCache);
     dir.persistBlocks(src, pendingFile, logRetryCache);
   }
   }
 
 

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java

@@ -47,7 +47,7 @@ import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
 import org.apache.hadoop.hdfs.util.DataTransferThrottler;
 import org.apache.hadoop.hdfs.util.DataTransferThrottler;
 import org.apache.hadoop.hdfs.util.MD5FileUtils;
 import org.apache.hadoop.hdfs.util.MD5FileUtils;
-import org.apache.hadoop.http.HttpServer;
+import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.MD5Hash;
 import org.apache.hadoop.io.MD5Hash;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -287,7 +287,7 @@ public class GetImageServlet extends HttpServlet {
       }
       }
     }
     }
     
     
-    if (HttpServer.userHasAdministratorAccess(context, remoteUser)) {
+    if (HttpServer2.userHasAdministratorAccess(context, remoteUser)) {
       LOG.info("GetImageServlet allowing administrator: " + remoteUser);
       LOG.info("GetImageServlet allowing administrator: " + remoteUser);
       return true;
       return true;
     }
     }

+ 0 - 36
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java

@@ -179,24 +179,6 @@ public class LeaseManager {
     return addLease(newHolder, src);
     return addLease(newHolder, src);
   }
   }
 
 
-  /**
-   * Finds the pathname for the specified pendingFile
-   */
-  public synchronized String findPath(INodeFile pendingFile)
-      throws IOException {
-    FileUnderConstructionFeature uc = pendingFile.getFileUnderConstructionFeature();
-    Preconditions.checkArgument(uc != null);
-    Lease lease = getLease(uc.getClientName());
-    if (lease != null) {
-      String src = lease.findPath(pendingFile);
-      if (src != null) {
-        return src;
-      }
-    }
-    throw new IOException("pendingFile (=" + pendingFile + ") not found."
-        + "(lease=" + lease + ")");
-  }
-
   /**
   /**
    * Renew the lease(s) held by the given client
    * Renew the lease(s) held by the given client
    */
    */
@@ -252,24 +234,6 @@ public class LeaseManager {
       return now() - lastUpdate > softLimit;
       return now() - lastUpdate > softLimit;
     }
     }
 
 
-    /**
-     * @return the path associated with the pendingFile and null if not found.
-     */
-    private String findPath(INodeFile pendingFile) {
-      try {
-        for (String src : paths) {
-          INode node = fsnamesystem.dir.getINode(src);
-          if (node == pendingFile
-              || (node.isFile() && node.asFile() == pendingFile)) {
-            return src;
-          }
-        }
-      } catch (UnresolvedLinkException e) {
-        throw new AssertionError("Lease files should reside on this FS");
-      }
-      return null;
-    }
-
     /** Does this lease contain any path? */
     /** Does this lease contain any path? */
     boolean hasPath() {return !paths.isEmpty();}
     boolean hasPath() {return !paths.isEmpty();}
 
 

+ 9 - 9
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java

@@ -37,7 +37,7 @@ import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
 import org.apache.hadoop.hdfs.web.resources.Param;
 import org.apache.hadoop.hdfs.web.resources.Param;
 import org.apache.hadoop.hdfs.web.resources.UserParam;
 import org.apache.hadoop.hdfs.web.resources.UserParam;
 import org.apache.hadoop.http.HttpConfig;
 import org.apache.hadoop.http.HttpConfig;
-import org.apache.hadoop.http.HttpServer;
+import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -47,7 +47,7 @@ import org.apache.hadoop.security.UserGroupInformation;
  */
  */
 @InterfaceAudience.Private
 @InterfaceAudience.Private
 public class NameNodeHttpServer {
 public class NameNodeHttpServer {
-  private HttpServer httpServer;
+  private HttpServer2 httpServer;
   private final Configuration conf;
   private final Configuration conf;
   private final NameNode nn;
   private final NameNode nn;
   
   
@@ -68,7 +68,7 @@ public class NameNodeHttpServer {
   }
   }
 
 
   private void initWebHdfs(Configuration conf) throws IOException {
   private void initWebHdfs(Configuration conf) throws IOException {
-    if (WebHdfsFileSystem.isEnabled(conf, HttpServer.LOG)) {
+    if (WebHdfsFileSystem.isEnabled(conf, HttpServer2.LOG)) {
       // set user pattern based on configuration file
       // set user pattern based on configuration file
       UserParam.setUserPattern(conf.get(DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY, DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT));
       UserParam.setUserPattern(conf.get(DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY, DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT));
 
 
@@ -77,9 +77,9 @@ public class NameNodeHttpServer {
       final String classname = AuthFilter.class.getName();
       final String classname = AuthFilter.class.getName();
       final String pathSpec = WebHdfsFileSystem.PATH_PREFIX + "/*";
       final String pathSpec = WebHdfsFileSystem.PATH_PREFIX + "/*";
       Map<String, String> params = getAuthFilterParams(conf);
       Map<String, String> params = getAuthFilterParams(conf);
-      HttpServer.defineFilter(httpServer.getWebAppContext(), name, classname, params,
+      HttpServer2.defineFilter(httpServer.getWebAppContext(), name, classname, params,
           new String[]{pathSpec});
           new String[]{pathSpec});
-      HttpServer.LOG.info("Added filter '" + name + "' (class=" + classname + ")");
+      HttpServer2.LOG.info("Added filter '" + name + "' (class=" + classname + ")");
 
 
       // add webhdfs packages
       // add webhdfs packages
       httpServer.addJerseyResourcePackage(
       httpServer.addJerseyResourcePackage(
@@ -103,7 +103,7 @@ public class NameNodeHttpServer {
         DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT);
         DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT);
     InetSocketAddress httpsAddr = NetUtils.createSocketAddr(httpsAddrString);
     InetSocketAddress httpsAddr = NetUtils.createSocketAddr(httpsAddrString);
 
 
-    HttpServer.Builder builder = DFSUtil.httpServerTemplateForNNAndJN(conf,
+    HttpServer2.Builder builder = DFSUtil.httpServerTemplateForNNAndJN(conf,
         httpAddr, httpsAddr, "hdfs",
         httpAddr, httpsAddr, "hdfs",
         DFSConfigKeys.DFS_NAMENODE_INTERNAL_SPNEGO_USER_NAME_KEY,
         DFSConfigKeys.DFS_NAMENODE_INTERNAL_SPNEGO_USER_NAME_KEY,
         DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY);
         DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY);
@@ -152,7 +152,7 @@ public class NameNodeHttpServer {
               SecurityUtil.getServerPrincipal(principalInConf,
               SecurityUtil.getServerPrincipal(principalInConf,
                                               bindAddress.getHostName()));
                                               bindAddress.getHostName()));
     } else if (UserGroupInformation.isSecurityEnabled()) {
     } else if (UserGroupInformation.isSecurityEnabled()) {
-      HttpServer.LOG.error(
+      HttpServer2.LOG.error(
           "WebHDFS and security are enabled, but configuration property '" +
           "WebHDFS and security are enabled, but configuration property '" +
           DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY +
           DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY +
           "' is not set.");
           "' is not set.");
@@ -164,7 +164,7 @@ public class NameNodeHttpServer {
           DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY,
           DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY,
           httpKeytab);
           httpKeytab);
     } else if (UserGroupInformation.isSecurityEnabled()) {
     } else if (UserGroupInformation.isSecurityEnabled()) {
-      HttpServer.LOG.error(
+      HttpServer2.LOG.error(
           "WebHDFS and security are enabled, but configuration property '" +
           "WebHDFS and security are enabled, but configuration property '" +
           DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY +
           DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY +
           "' is not set.");
           "' is not set.");
@@ -214,7 +214,7 @@ public class NameNodeHttpServer {
     httpServer.setAttribute(STARTUP_PROGRESS_ATTRIBUTE_KEY, prog);
     httpServer.setAttribute(STARTUP_PROGRESS_ATTRIBUTE_KEY, prog);
   }
   }
 
 
-  private static void setupServlets(HttpServer httpServer, Configuration conf) {
+  private static void setupServlets(HttpServer2 httpServer, Configuration conf) {
     httpServer.addInternalServlet("startupProgress",
     httpServer.addInternalServlet("startupProgress",
         StartupProgressServlet.PATH_SPEC, StartupProgressServlet.class);
         StartupProgressServlet.PATH_SPEC, StartupProgressServlet.class);
     httpServer.addInternalServlet("getDelegationToken",
     httpServer.addInternalServlet("getDelegationToken",

+ 9 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java

@@ -65,7 +65,7 @@ import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
 import org.apache.hadoop.http.HttpConfig;
 import org.apache.hadoop.http.HttpConfig;
-import org.apache.hadoop.http.HttpServer;
+import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.io.MD5Hash;
 import org.apache.hadoop.io.MD5Hash;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
@@ -113,7 +113,7 @@ public class SecondaryNameNode implements Runnable {
   private Configuration conf;
   private Configuration conf;
   private InetSocketAddress nameNodeAddr;
   private InetSocketAddress nameNodeAddr;
   private volatile boolean shouldRun;
   private volatile boolean shouldRun;
-  private HttpServer infoServer;
+  private HttpServer2 infoServer;
   private URL imageListenURL;
   private URL imageListenURL;
 
 
   private Collection<URI> checkpointDirs;
   private Collection<URI> checkpointDirs;
@@ -257,7 +257,7 @@ public class SecondaryNameNode implements Runnable {
         DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTPS_ADDRESS_DEFAULT);
         DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTPS_ADDRESS_DEFAULT);
     InetSocketAddress httpsAddr = NetUtils.createSocketAddr(httpsAddrString);
     InetSocketAddress httpsAddr = NetUtils.createSocketAddr(httpsAddrString);
 
 
-    HttpServer.Builder builder = DFSUtil.httpServerTemplateForNNAndJN(conf,
+    HttpServer2.Builder builder = DFSUtil.httpServerTemplateForNNAndJN(conf,
         httpAddr, httpsAddr, "secondary",
         httpAddr, httpsAddr, "secondary",
         DFSConfigKeys.DFS_SECONDARY_NAMENODE_INTERNAL_SPNEGO_USER_NAME_KEY,
         DFSConfigKeys.DFS_SECONDARY_NAMENODE_INTERNAL_SPNEGO_USER_NAME_KEY,
         DFSConfigKeys.DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY);
         DFSConfigKeys.DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY);
@@ -1003,7 +1003,12 @@ public class SecondaryNameNode implements Runnable {
             sig.mostRecentCheckpointTxId + " even though it should have " +
             sig.mostRecentCheckpointTxId + " even though it should have " +
             "just been downloaded");
             "just been downloaded");
       }
       }
-      dstImage.reloadFromImageFile(file, dstNamesystem);
+      dstNamesystem.writeLock();
+      try {
+        dstImage.reloadFromImageFile(file, dstNamesystem);
+      } finally {
+        dstNamesystem.writeUnlock();
+      }
       dstNamesystem.dir.imageLoadComplete();
       dstNamesystem.dir.imageLoadComplete();
     }
     }
     // error simulation code for junit test
     // error simulation code for junit test

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java

@@ -620,7 +620,7 @@ public class CacheAdmin extends Configured implements Tool {
           "directives being added to the pool. This can be specified in " +
           "directives being added to the pool. This can be specified in " +
           "seconds, minutes, hours, and days, e.g. 120s, 30m, 4h, 2d. " +
           "seconds, minutes, hours, and days, e.g. 120s, 30m, 4h, 2d. " +
           "Valid units are [smhd]. By default, no maximum is set. " +
           "Valid units are [smhd]. By default, no maximum is set. " +
-          "This can also be manually specified by \"never\".");
+          "A value of \"never\" specifies that there is no limit.");
       return getShortUsage() + "\n" +
       return getShortUsage() + "\n" +
           "Add a new cache pool.\n\n" + 
           "Add a new cache pool.\n\n" + 
           listing.toString();
           listing.toString();

+ 8 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java

@@ -185,8 +185,8 @@ public class DelegationTokenFetcher {
             } else {
             } else {
               // otherwise we are fetching
               // otherwise we are fetching
               if (webUrl != null) {
               if (webUrl != null) {
-                Credentials creds = getDTfromRemote(connectionFactory, new URI(webUrl),
-                    renewer);
+                Credentials creds = getDTfromRemote(connectionFactory, new URI(
+                    webUrl), renewer, null);
                 creds.writeTokenStorageFile(tokenFile, conf);
                 creds.writeTokenStorageFile(tokenFile, conf);
                 for (Token<?> token : creds.getAllTokens()) {
                 for (Token<?> token : creds.getAllTokens()) {
                   if(LOG.isDebugEnabled()) {	
                   if(LOG.isDebugEnabled()) {	
@@ -213,12 +213,17 @@ public class DelegationTokenFetcher {
   }
   }
   
   
   static public Credentials getDTfromRemote(URLConnectionFactory factory,
   static public Credentials getDTfromRemote(URLConnectionFactory factory,
-      URI nnUri, String renewer) throws IOException {
+      URI nnUri, String renewer, String proxyUser) throws IOException {
     StringBuilder buf = new StringBuilder(nnUri.toString())
     StringBuilder buf = new StringBuilder(nnUri.toString())
         .append(GetDelegationTokenServlet.PATH_SPEC);
         .append(GetDelegationTokenServlet.PATH_SPEC);
+    String separator = "?";
     if (renewer != null) {
     if (renewer != null) {
       buf.append("?").append(GetDelegationTokenServlet.RENEWER).append("=")
       buf.append("?").append(GetDelegationTokenServlet.RENEWER).append("=")
           .append(renewer);
           .append(renewer);
+      separator = "&";
+    }
+    if (proxyUser != null) {
+      buf.append(separator).append("doas=").append(proxyUser);
     }
     }
 
 
     boolean isHttps = nnUri.getScheme().equals("https");
     boolean isHttps = nnUri.getScheme().equals("https");

+ 49 - 26
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/HftpFileSystem.java

@@ -57,7 +57,6 @@ import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.authentication.client.AuthenticationException;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.Progressable;
@@ -234,17 +233,23 @@ public class HftpFileSystem extends FileSystem
   }
   }
 
 
   @Override
   @Override
-  public synchronized Token<?> getDelegationToken(final String renewer
-                                                  ) throws IOException {
+  public synchronized Token<?> getDelegationToken(final String renewer)
+      throws IOException {
     try {
     try {
-      //Renew TGT if needed
-      ugi.checkTGTAndReloginFromKeytab();
-      return ugi.doAs(new PrivilegedExceptionAction<Token<?>>() {
+      // Renew TGT if needed
+      UserGroupInformation connectUgi = ugi.getRealUser();
+      final String proxyUser = connectUgi == null ? null : ugi
+          .getShortUserName();
+      if (connectUgi == null) {
+        connectUgi = ugi;
+      }
+      return connectUgi.doAs(new PrivilegedExceptionAction<Token<?>>() {
         @Override
         @Override
         public Token<?> run() throws IOException {
         public Token<?> run() throws IOException {
           Credentials c;
           Credentials c;
           try {
           try {
-            c = DelegationTokenFetcher.getDTfromRemote(connectionFactory, nnUri, renewer);
+            c = DelegationTokenFetcher.getDTfromRemote(connectionFactory,
+                nnUri, renewer, proxyUser);
           } catch (IOException e) {
           } catch (IOException e) {
             if (e.getCause() instanceof ConnectException) {
             if (e.getCause() instanceof ConnectException) {
               LOG.warn("Couldn't connect to " + nnUri +
               LOG.warn("Couldn't connect to " + nnUri +
@@ -299,13 +304,13 @@ public class HftpFileSystem extends FileSystem
    * @return user_shortname,group1,group2...
    * @return user_shortname,group1,group2...
    */
    */
   private String getEncodedUgiParameter() {
   private String getEncodedUgiParameter() {
-    StringBuilder ugiParamenter = new StringBuilder(
+    StringBuilder ugiParameter = new StringBuilder(
         ServletUtil.encodeQueryValue(ugi.getShortUserName()));
         ServletUtil.encodeQueryValue(ugi.getShortUserName()));
     for(String g: ugi.getGroupNames()) {
     for(String g: ugi.getGroupNames()) {
-      ugiParamenter.append(",");
-      ugiParamenter.append(ServletUtil.encodeQueryValue(g));
+      ugiParameter.append(",");
+      ugiParameter.append(ServletUtil.encodeQueryValue(g));
     }
     }
-    return ugiParamenter.toString();
+    return ugiParameter.toString();
   }
   }
 
 
   /**
   /**
@@ -675,30 +680,48 @@ public class HftpFileSystem extends FileSystem
 
 
   @SuppressWarnings("unchecked")
   @SuppressWarnings("unchecked")
   @Override
   @Override
-  public long renewDelegationToken(Token<?> token) throws IOException {
+  public long renewDelegationToken(final Token<?> token) throws IOException {
     // update the kerberos credentials, if they are coming from a keytab
     // update the kerberos credentials, if they are coming from a keytab
-    UserGroupInformation.getLoginUser().checkTGTAndReloginFromKeytab();
-    InetSocketAddress serviceAddr = SecurityUtil.getTokenServiceAddr(token);
+    UserGroupInformation connectUgi = ugi.getRealUser();
+    if (connectUgi == null) {
+      connectUgi = ugi;
+    }
     try {
     try {
-      return DelegationTokenFetcher.renewDelegationToken(connectionFactory,
-          DFSUtil.createUri(getUnderlyingProtocol(), serviceAddr),
-          (Token<DelegationTokenIdentifier>) token);
-    } catch (AuthenticationException e) {
+      return connectUgi.doAs(new PrivilegedExceptionAction<Long>() {
+        @Override
+        public Long run() throws Exception {
+          InetSocketAddress serviceAddr = SecurityUtil
+              .getTokenServiceAddr(token);
+          return DelegationTokenFetcher.renewDelegationToken(connectionFactory,
+              DFSUtil.createUri(getUnderlyingProtocol(), serviceAddr),
+              (Token<DelegationTokenIdentifier>) token);
+        }
+      });
+    } catch (InterruptedException e) {
       throw new IOException(e);
       throw new IOException(e);
     }
     }
   }
   }
 
 
   @SuppressWarnings("unchecked")
   @SuppressWarnings("unchecked")
   @Override
   @Override
-  public void cancelDelegationToken(Token<?> token) throws IOException {
-    // update the kerberos credentials, if they are coming from a keytab
-    UserGroupInformation.getLoginUser().checkTGTAndReloginFromKeytab();
-    InetSocketAddress serviceAddr = SecurityUtil.getTokenServiceAddr(token);
+  public void cancelDelegationToken(final Token<?> token) throws IOException {
+    UserGroupInformation connectUgi = ugi.getRealUser();
+    if (connectUgi == null) {
+      connectUgi = ugi;
+    }
     try {
     try {
-      DelegationTokenFetcher.cancelDelegationToken(connectionFactory, DFSUtil
-          .createUri(getUnderlyingProtocol(), serviceAddr),
-          (Token<DelegationTokenIdentifier>) token);
-    } catch (AuthenticationException e) {
+      connectUgi.doAs(new PrivilegedExceptionAction<Void>() {
+        @Override
+        public Void run() throws Exception {
+          InetSocketAddress serviceAddr = SecurityUtil
+              .getTokenServiceAddr(token);
+          DelegationTokenFetcher.cancelDelegationToken(connectionFactory,
+              DFSUtil.createUri(getUnderlyingProtocol(), serviceAddr),
+              (Token<DelegationTokenIdentifier>) token);
+          return null;
+        }
+      });
+    } catch (InterruptedException e) {
       throw new IOException(e);
       throw new IOException(e);
     }
     }
   }
   }

+ 123 - 81
hadoop-hdfs-project/hadoop-hdfs/src/site/apt/CentralizedCacheManagement.apt.vm

@@ -22,110 +22,140 @@ Centralized Cache Management in HDFS
 
 
 %{toc|section=1|fromDepth=2|toDepth=4}
 %{toc|section=1|fromDepth=2|toDepth=4}
 
 
-* {Background}
-
-  Normally, HDFS relies on the operating system to cache data it reads from disk.
-  However, HDFS can also be configured to use centralized cache management. Under
-  centralized cache management, the HDFS NameNode itself decides which blocks
-  should be cached, and where they should be cached.
-
-  Centralized cache management has several advantages. First of all, it
-  prevents frequently used block files from being evicted from memory. This is
-  particularly important when the size of the working set exceeds the size of
-  main memory, which is true for many big data applications. Secondly, when
-  HDFS decides what should be cached, it can let clients know about this
-  information through the getFileBlockLocations API. Finally, when the DataNode
-  knows a block is locked into memory, it can provide access to that block via
-  mmap.
+* {Overview}
+
+  <Centralized cache management> in HDFS is an explicit caching mechanism that
+  allows users to specify <paths> to be cached by HDFS. The NameNode will
+  communicate with DataNodes that have the desired blocks on disk, and instruct
+  them to cache the blocks in off-heap caches. 
+
+  Centralized cache management in HDFS has many significant advantages.
+
+  [[1]] Explicit pinning prevents frequently used data from being evicted from
+  memory. This is particularly important when the size of the working set
+  exceeds the size of main memory, which is common for many HDFS workloads.
+
+  [[1]] Because DataNode caches are managed by the NameNode, applications can
+  query the set of cached block locations when making task placement decisions.
+  Co-locating a task with a cached block replica improves read performance.
+
+  [[1]] When block has been cached by a DataNode, clients can use a new ,
+  more-efficient, zero-copy read API. Since checksum verification of cached
+  data is done once by the DataNode, clients can incur essentially zero
+  overhead when using this new API.
+
+  [[1]] Centralized caching can improve overall cluster memory utilization.
+  When relying on the OS buffer cache at each DataNode, repeated reads of
+  a block will result in all <n> replicas of the block being pulled into
+  buffer cache. With centralized cache management, a user can explicitly pin
+  only <m> of the <n> replicas, saving <n-m> memory.
 
 
 * {Use Cases}
 * {Use Cases}
 
 
-  Centralized cache management is most useful for files which are accessed very
-  often. For example, a "fact table" in Hive which is often used in joins is a
-  good candidate for caching. On the other hand, when running a classic
-  "word count" MapReduce job which counts the number of words in each
-  document, there may not be any good candidates for caching, since all the
-  files may be accessed exactly once.
+  Centralized cache management is useful for files that accessed repeatedly.
+  For example, a small <fact table> in Hive which is often used for joins is a
+  good candidate for caching. On the other hand, caching the input of a <
+  one year reporting query> is probably less useful, since the
+  historical data might only be read once.
+
+  Centralized cache management is also useful for mixed workloads with
+  performance SLAs. Caching the working set of a high-priority workload
+  insures that it does not contend for disk I/O with a low-priority workload.
 
 
 * {Architecture}
 * {Architecture}
 
 
 [images/caching.png] Caching Architecture
 [images/caching.png] Caching Architecture
 
 
-  With centralized cache management, the NameNode coordinates all caching
-  across the cluster. It receives cache information from each DataNode via the
-  cache report, a periodic message that describes all the blocks IDs cached on
-  a given DataNode. The NameNode will reply to DataNode heartbeat messages
-  with commands telling it which blocks to cache and which to uncache.
-
-  The NameNode stores a set of path cache directives, which tell it which files
-  to cache. The NameNode also stores a set of cache pools, which are groups of
-  cache directives.  These directives and pools are persisted to the edit log
-  and fsimage, and will be loaded if the cluster is restarted.
+  In this architecture, the NameNode is responsible for coordinating all the
+  DataNode off-heap caches in the cluster. The NameNode periodically receives
+  a <cache report> from each DataNode which describes all the blocks cached
+  on a given DN. The NameNode manages DataNode caches by piggybacking cache and
+  uncache commands on the DataNode heartbeat.
 
 
-  Periodically, the NameNode rescans the namespace, to see which blocks need to
-  be cached based on the current set of path cache directives. Rescans are also
-  triggered by relevant user actions, such as adding or removing a cache
-  directive or removing a cache pool.
+  The NameNode queries its set of <cache directives> to determine
+  which paths should be cached. Cache directives are persistently stored in the
+  fsimage and edit log, and can be added, removed, and modified via Java and
+  command-line APIs. The NameNode also stores a set of <cache pools>,
+  which are administrative entities used to group cache directives together for
+  resource management and enforcing permissions.
 
 
-  Cache directives also may specific a numeric cache replication, which is the
-  number of replicas to cache.  This number may be equal to or smaller than the
-  file's block replication.  If multiple cache directives cover the same file
-  with different cache replication settings, then the highest cache replication
-  setting is applied.
+  The NameNode periodically rescans the namespace and active cache directives
+  to determine which blocks need to be cached or uncached and assign caching
+  work to DataNodes. Rescans can also be triggered by user actions like adding
+  or removing a cache directive or removing a cache pool.
 
 
   We do not currently cache blocks which are under construction, corrupt, or
   We do not currently cache blocks which are under construction, corrupt, or
   otherwise incomplete.  If a cache directive covers a symlink, the symlink
   otherwise incomplete.  If a cache directive covers a symlink, the symlink
   target is not cached.
   target is not cached.
 
 
-  Caching is currently done on a per-file basis, although we would like to add
-  block-level granularity in the future.
+  Caching is currently done on the file or directory-level. Block and sub-block
+  caching is an item of future work.
+
+* {Concepts}
+
+** {Cache directive}
+
+  A <cache directive> defines a path that should be cached. Paths can be either
+  directories or files. Directories are cached non-recursively, meaning only
+  files in the first-level listing of the directory.
+
+  Directives also specify additional parameters, such as the cache replication
+  factor and expiration time. The replication factor specifies the number of
+  block replicas to cache. If multiple cache directives refer to the same file,
+  the maximum cache replication factor is applied.
 
 
-* {Interface}
+  The expiration time is specified on the command line as a <time-to-live
+  (TTL)>, a relative expiration time in the future. After a cache directive
+  expires, it is no longer considered by the NameNode when making caching
+  decisions.
 
 
-  The NameNode stores a list of "cache directives."  These directives contain a
-  path as well as the number of times blocks in that path should be replicated.
+** {Cache pool}
 
 
-  Paths can be either directories or files. If the path specifies a file, that
-  file is cached. If the path specifies a directory, all the files in the
-  directory will be cached. However, this process is not recursive-- only the
-  direct children of the directory will be cached.
+  A <cache pool> is an administrative entity used to manage groups of cache
+  directives. Cache pools have UNIX-like <permissions>, which restrict which
+  users and groups have access to the pool. Write permissions allow users to
+  add and remove cache directives to the pool. Read permissions allow users to
+  list the cache directives in a pool, as well as additional metadata. Execute
+  permissions are unused.
 
 
-** {hdfs cacheadmin Shell}
+  Cache pools are also used for resource management. Pools can enforce a
+  maximum <limit>, which restricts the number of bytes that can be cached in
+  aggregate by directives in the pool. Normally, the sum of the pool limits
+  will approximately equal the amount of aggregate memory reserved for
+  HDFS caching on the cluster. Cache pools also track a number of statistics
+  to help cluster users determine what is and should be cached.
 
 
-  Path cache directives can be created by the <<<hdfs cacheadmin
-  -addDirective>>> command and removed via the <<<hdfs cacheadmin
-  -removeDirective>>> command. To list the current path cache directives, use
-  <<<hdfs cacheadmin -listDirectives>>>. Each path cache directive has a
-  unique 64-bit ID number which will not be reused if it is deleted.  To remove
-  all path cache directives with a specified path, use <<<hdfs cacheadmin
-  -removeDirectives>>>.
+  Pools also can enforce a maximum time-to-live. This restricts the maximum
+  expiration time of directives being added to the pool.
 
 
-  Directives are grouped into "cache pools."  Each cache pool gets a share of
-  the cluster's resources. Additionally, cache pools are used for
-  authentication. Cache pools have a mode, user, and group, similar to regular
-  files. The same authentication rules are applied as for normal files. So, for
-  example, if the mode is 0777, any user can add or remove directives from the
-  cache pool. If the mode is 0644, only the owner can write to the cache pool,
-  but anyone can read from it. And so forth.
+* {<<<cacheadmin>>> command-line interface}
 
 
-  Cache pools are identified by name. They can be created by the <<<hdfs
-  cacheAdmin -addPool>>> command, modified by the <<<hdfs cacheadmin
-  -modifyPool>>> command, and removed via the <<<hdfs cacheadmin
-  -removePool>>> command. To list the current cache pools, use <<<hdfs
-  cacheAdmin -listPools>>>
+  On the command-line, administrators and users can interact with cache pools
+  and directives via the <<<hdfs cacheadmin>>> subcommand.
+
+  Cache directives are identified by a unique, non-repeating 64-bit integer ID.
+  IDs will not be reused even if a cache directive is later removed.
+
+  Cache pools are identified by a unique string name.
+
+** {Cache directive commands}
 
 
 *** {addDirective}
 *** {addDirective}
 
 
-  Usage: <<<hdfs cacheadmin -addDirective -path <path> -replication <replication> -pool <pool-name> >>>
+  Usage: <<<hdfs cacheadmin -addDirective -path <path> -pool <pool-name> [-force] [-replication <replication>] [-ttl <time-to-live>]>>>
 
 
   Add a new cache directive.
   Add a new cache directive.
 
 
 *--+--+
 *--+--+
 \<path\> | A path to cache. The path can be a directory or a file.
 \<path\> | A path to cache. The path can be a directory or a file.
 *--+--+
 *--+--+
+\<pool-name\> | The pool to which the directive will be added. You must have write permission on the cache pool in order to add new directives.
+*--+--+
+-force | Skips checking of cache pool resource limits.
+*--+--+
 \<replication\> | The cache replication factor to use. Defaults to 1.
 \<replication\> | The cache replication factor to use. Defaults to 1.
 *--+--+
 *--+--+
-\<pool-name\> | The pool to which the directive will be added. You must have write permission on the cache pool in order to add new directives.
+\<time-to-live\> | How long the directive is valid. Can be specified in minutes, hours, and days, e.g. 30m, 4h, 2d. Valid units are [smhd]. "never" indicates a directive that never expires. If unspecified, the directive never expires.
 *--+--+
 *--+--+
 
 
 *** {removeDirective}
 *** {removeDirective}
@@ -150,7 +180,7 @@ Centralized Cache Management in HDFS
 
 
 *** {listDirectives}
 *** {listDirectives}
 
 
-  Usage: <<<hdfs cacheadmin -listDirectives [-path <path>] [-pool <pool>] >>>
+  Usage: <<<hdfs cacheadmin -listDirectives [-stats] [-path <path>] [-pool <pool>]>>>
 
 
   List cache directives.
   List cache directives.
 
 
@@ -159,10 +189,14 @@ Centralized Cache Management in HDFS
 *--+--+
 *--+--+
 \<pool\> | List only path cache directives in that pool.
 \<pool\> | List only path cache directives in that pool.
 *--+--+
 *--+--+
+-stats | List path-based cache directive statistics.
+*--+--+
+
+** {Cache pool commands}
 
 
 *** {addPool}
 *** {addPool}
 
 
-  Usage: <<<hdfs cacheadmin -addPool <name> [-owner <owner>] [-group <group>] [-mode <mode>] [-weight <weight>] >>>
+  Usage: <<<hdfs cacheadmin -addPool <name> [-owner <owner>] [-group <group>] [-mode <mode>] [-limit <limit>] [-maxTtl <maxTtl>>>>
 
 
   Add a new cache pool.
   Add a new cache pool.
 
 
@@ -175,12 +209,14 @@ Centralized Cache Management in HDFS
 *--+--+
 *--+--+
 \<mode\> | UNIX-style permissions for the pool. Permissions are specified in octal, e.g. 0755. By default, this is set to 0755.
 \<mode\> | UNIX-style permissions for the pool. Permissions are specified in octal, e.g. 0755. By default, this is set to 0755.
 *--+--+
 *--+--+
-\<weight\> | Weight of the pool. This is a relative measure of the importance of the pool used during cache resource management. By default, it is set to 100.
+\<limit\> | The maximum number of bytes that can be cached by directives in this pool, in aggregate. By default, no limit is set.
+*--+--+
+\<maxTtl\> | The maximum allowed time-to-live for directives being added to the pool. This can be specified in seconds, minutes, hours, and days, e.g. 120s, 30m, 4h, 2d. Valid units are [smhd]. By default, no maximum is set. A value of \"never\" specifies that there is no limit.
 *--+--+
 *--+--+
 
 
 *** {modifyPool}
 *** {modifyPool}
 
 
-  Usage: <<<hdfs cacheadmin -modifyPool <name> [-owner <owner>] [-group <group>] [-mode <mode>] [-weight <weight>] >>>
+  Usage: <<<hdfs cacheadmin -modifyPool <name> [-owner <owner>] [-group <group>] [-mode <mode>] [-limit <limit>] [-maxTtl <maxTtl>]>>>
 
 
   Modifies the metadata of an existing cache pool.
   Modifies the metadata of an existing cache pool.
 
 
@@ -193,7 +229,9 @@ Centralized Cache Management in HDFS
 *--+--+
 *--+--+
 \<mode\> | Unix-style permissions of the pool in octal.
 \<mode\> | Unix-style permissions of the pool in octal.
 *--+--+
 *--+--+
-\<weight\> | Weight of the pool.
+\<limit\> | Maximum number of bytes that can be cached by this pool.
+*--+--+
+\<maxTtl\> | The maximum allowed time-to-live for directives being added to the pool.
 *--+--+
 *--+--+
 
 
 *** {removePool}
 *** {removePool}
@@ -208,11 +246,13 @@ Centralized Cache Management in HDFS
 
 
 *** {listPools}
 *** {listPools}
 
 
-  Usage: <<<hdfs cacheadmin -listPools [name] >>>
+  Usage: <<<hdfs cacheadmin -listPools [-stats] [<name>]>>>
 
 
   Display information about one or more cache pools, e.g. name, owner, group,
   Display information about one or more cache pools, e.g. name, owner, group,
   permissions, etc.
   permissions, etc.
 
 
+*--+--+
+-stats | Display additional cache pool statistics.
 *--+--+
 *--+--+
 \<name\> | If specified, list only the named cache pool.
 \<name\> | If specified, list only the named cache pool.
 *--+--+
 *--+--+
@@ -244,10 +284,12 @@ Centralized Cache Management in HDFS
 
 
   * dfs.datanode.max.locked.memory
   * dfs.datanode.max.locked.memory
 
 
-    The DataNode will treat this as the maximum amount of memory it can use for
-    its cache. When setting this value, please remember that you will need space
-    in memory for other things, such as the Java virtual machine (JVM) itself
-    and the operating system's page cache.
+    This determines the maximum amount of memory a DataNode will use for caching.
+    The "locked-in-memory size" ulimit (<<<ulimit -l>>>) of the DataNode user
+    also needs to be increased to match this parameter (see below section on
+    {{OS Limits}}). When setting this value, please remember that you will need
+    space in memory for other things as well, such as the DataNode and
+    application JVM heaps and the operating system page cache.
 
 
 *** Optional
 *** Optional
 
 

+ 41 - 41
hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsDesign.apt.vm

@@ -17,11 +17,11 @@
   ---
   ---
   ${maven.build.timestamp}
   ${maven.build.timestamp}
 
 
-%{toc|section=1|fromDepth=0}
-
 HDFS Architecture
 HDFS Architecture
 
 
-Introduction
+%{toc|section=1|fromDepth=0}
+
+* Introduction
 
 
    The Hadoop Distributed File System (HDFS) is a distributed file system
    The Hadoop Distributed File System (HDFS) is a distributed file system
    designed to run on commodity hardware. It has many similarities with
    designed to run on commodity hardware. It has many similarities with
@@ -35,9 +35,9 @@ Introduction
    is part of the Apache Hadoop Core project. The project URL is
    is part of the Apache Hadoop Core project. The project URL is
    {{http://hadoop.apache.org/}}.
    {{http://hadoop.apache.org/}}.
 
 
-Assumptions and Goals
+* Assumptions and Goals
 
 
-Hardware Failure
+** Hardware Failure
 
 
    Hardware failure is the norm rather than the exception. An HDFS
    Hardware failure is the norm rather than the exception. An HDFS
    instance may consist of hundreds or thousands of server machines, each
    instance may consist of hundreds or thousands of server machines, each
@@ -47,7 +47,7 @@ Hardware Failure
    non-functional. Therefore, detection of faults and quick, automatic
    non-functional. Therefore, detection of faults and quick, automatic
    recovery from them is a core architectural goal of HDFS.
    recovery from them is a core architectural goal of HDFS.
 
 
-Streaming Data Access
+** Streaming Data Access
 
 
    Applications that run on HDFS need streaming access to their data sets.
    Applications that run on HDFS need streaming access to their data sets.
    They are not general purpose applications that typically run on general
    They are not general purpose applications that typically run on general
@@ -58,7 +58,7 @@ Streaming Data Access
    targeted for HDFS. POSIX semantics in a few key areas has been traded
    targeted for HDFS. POSIX semantics in a few key areas has been traded
    to increase data throughput rates.
    to increase data throughput rates.
 
 
-Large Data Sets
+** Large Data Sets
 
 
    Applications that run on HDFS have large data sets. A typical file in
    Applications that run on HDFS have large data sets. A typical file in
    HDFS is gigabytes to terabytes in size. Thus, HDFS is tuned to support
    HDFS is gigabytes to terabytes in size. Thus, HDFS is tuned to support
@@ -66,7 +66,7 @@ Large Data Sets
    to hundreds of nodes in a single cluster. It should support tens of
    to hundreds of nodes in a single cluster. It should support tens of
    millions of files in a single instance.
    millions of files in a single instance.
 
 
-Simple Coherency Model
+** Simple Coherency Model
 
 
    HDFS applications need a write-once-read-many access model for files. A
    HDFS applications need a write-once-read-many access model for files. A
    file once created, written, and closed need not be changed. This
    file once created, written, and closed need not be changed. This
@@ -75,7 +75,7 @@ Simple Coherency Model
    perfectly with this model. There is a plan to support appending-writes
    perfectly with this model. There is a plan to support appending-writes
    to files in the future.
    to files in the future.
 
 
-“Moving Computation is Cheaper than Moving Data”
+** “Moving Computation is Cheaper than Moving Data”
 
 
    A computation requested by an application is much more efficient if it
    A computation requested by an application is much more efficient if it
    is executed near the data it operates on. This is especially true when
    is executed near the data it operates on. This is especially true when
@@ -86,13 +86,13 @@ Simple Coherency Model
    running. HDFS provides interfaces for applications to move themselves
    running. HDFS provides interfaces for applications to move themselves
    closer to where the data is located.
    closer to where the data is located.
 
 
-Portability Across Heterogeneous Hardware and Software Platforms
+** Portability Across Heterogeneous Hardware and Software Platforms
 
 
    HDFS has been designed to be easily portable from one platform to
    HDFS has been designed to be easily portable from one platform to
    another. This facilitates widespread adoption of HDFS as a platform of
    another. This facilitates widespread adoption of HDFS as a platform of
    choice for a large set of applications.
    choice for a large set of applications.
 
 
-NameNode and DataNodes
+* NameNode and DataNodes
 
 
    HDFS has a master/slave architecture. An HDFS cluster consists of a
    HDFS has a master/slave architecture. An HDFS cluster consists of a
    single NameNode, a master server that manages the file system namespace
    single NameNode, a master server that manages the file system namespace
@@ -127,7 +127,7 @@ NameNode and DataNodes
    repository for all HDFS metadata. The system is designed in such a way
    repository for all HDFS metadata. The system is designed in such a way
    that user data never flows through the NameNode.
    that user data never flows through the NameNode.
 
 
-The File System Namespace
+* The File System Namespace
 
 
    HDFS supports a traditional hierarchical file organization. A user or
    HDFS supports a traditional hierarchical file organization. A user or
    an application can create directories and store files inside these
    an application can create directories and store files inside these
@@ -145,7 +145,7 @@ The File System Namespace
    replication factor of that file. This information is stored by the
    replication factor of that file. This information is stored by the
    NameNode.
    NameNode.
 
 
-Data Replication
+* Data Replication
 
 
    HDFS is designed to reliably store very large files across machines in
    HDFS is designed to reliably store very large files across machines in
    a large cluster. It stores each file as a sequence of blocks; all
    a large cluster. It stores each file as a sequence of blocks; all
@@ -164,7 +164,7 @@ Data Replication
 
 
 [images/hdfsdatanodes.png] HDFS DataNodes
 [images/hdfsdatanodes.png] HDFS DataNodes
 
 
-Replica Placement: The First Baby Steps
+** Replica Placement: The First Baby Steps
 
 
    The placement of replicas is critical to HDFS reliability and
    The placement of replicas is critical to HDFS reliability and
    performance. Optimizing replica placement distinguishes HDFS from most
    performance. Optimizing replica placement distinguishes HDFS from most
@@ -210,7 +210,7 @@ Replica Placement: The First Baby Steps
    The current, default replica placement policy described here is a work
    The current, default replica placement policy described here is a work
    in progress.
    in progress.
 
 
-Replica Selection
+** Replica Selection
 
 
    To minimize global bandwidth consumption and read latency, HDFS tries
    To minimize global bandwidth consumption and read latency, HDFS tries
    to satisfy a read request from a replica that is closest to the reader.
    to satisfy a read request from a replica that is closest to the reader.
@@ -219,7 +219,7 @@ Replica Selection
    cluster spans multiple data centers, then a replica that is resident in
    cluster spans multiple data centers, then a replica that is resident in
    the local data center is preferred over any remote replica.
    the local data center is preferred over any remote replica.
 
 
-Safemode
+** Safemode
 
 
    On startup, the NameNode enters a special state called Safemode.
    On startup, the NameNode enters a special state called Safemode.
    Replication of data blocks does not occur when the NameNode is in the
    Replication of data blocks does not occur when the NameNode is in the
@@ -234,7 +234,7 @@ Safemode
    blocks (if any) that still have fewer than the specified number of
    blocks (if any) that still have fewer than the specified number of
    replicas. The NameNode then replicates these blocks to other DataNodes.
    replicas. The NameNode then replicates these blocks to other DataNodes.
 
 
-The Persistence of File System Metadata
+* The Persistence of File System Metadata
 
 
    The HDFS namespace is stored by the NameNode. The NameNode uses a
    The HDFS namespace is stored by the NameNode. The NameNode uses a
    transaction log called the EditLog to persistently record every change
    transaction log called the EditLog to persistently record every change
@@ -273,7 +273,7 @@ The Persistence of File System Metadata
    each of these local files and sends this report to the NameNode: this
    each of these local files and sends this report to the NameNode: this
    is the Blockreport.
    is the Blockreport.
 
 
-The Communication Protocols
+* The Communication Protocols
 
 
    All HDFS communication protocols are layered on top of the TCP/IP
    All HDFS communication protocols are layered on top of the TCP/IP
    protocol. A client establishes a connection to a configurable TCP port
    protocol. A client establishes a connection to a configurable TCP port
@@ -284,13 +284,13 @@ The Communication Protocols
    RPCs. Instead, it only responds to RPC requests issued by DataNodes or
    RPCs. Instead, it only responds to RPC requests issued by DataNodes or
    clients.
    clients.
 
 
-Robustness
+* Robustness
 
 
    The primary objective of HDFS is to store data reliably even in the
    The primary objective of HDFS is to store data reliably even in the
    presence of failures. The three common types of failures are NameNode
    presence of failures. The three common types of failures are NameNode
    failures, DataNode failures and network partitions.
    failures, DataNode failures and network partitions.
 
 
-Data Disk Failure, Heartbeats and Re-Replication
+** Data Disk Failure, Heartbeats and Re-Replication
 
 
    Each DataNode sends a Heartbeat message to the NameNode periodically. A
    Each DataNode sends a Heartbeat message to the NameNode periodically. A
    network partition can cause a subset of DataNodes to lose connectivity
    network partition can cause a subset of DataNodes to lose connectivity
@@ -306,7 +306,7 @@ Data Disk Failure, Heartbeats and Re-Replication
    corrupted, a hard disk on a DataNode may fail, or the replication
    corrupted, a hard disk on a DataNode may fail, or the replication
    factor of a file may be increased.
    factor of a file may be increased.
 
 
-Cluster Rebalancing
+** Cluster Rebalancing
 
 
    The HDFS architecture is compatible with data rebalancing schemes. A
    The HDFS architecture is compatible with data rebalancing schemes. A
    scheme might automatically move data from one DataNode to another if
    scheme might automatically move data from one DataNode to another if
@@ -316,7 +316,7 @@ Cluster Rebalancing
    cluster. These types of data rebalancing schemes are not yet
    cluster. These types of data rebalancing schemes are not yet
    implemented.
    implemented.
 
 
-Data Integrity
+** Data Integrity
 
 
    It is possible that a block of data fetched from a DataNode arrives
    It is possible that a block of data fetched from a DataNode arrives
    corrupted. This corruption can occur because of faults in a storage
    corrupted. This corruption can occur because of faults in a storage
@@ -330,7 +330,7 @@ Data Integrity
    to retrieve that block from another DataNode that has a replica of that
    to retrieve that block from another DataNode that has a replica of that
    block.
    block.
 
 
-Metadata Disk Failure
+** Metadata Disk Failure
 
 
    The FsImage and the EditLog are central data structures of HDFS. A
    The FsImage and the EditLog are central data structures of HDFS. A
    corruption of these files can cause the HDFS instance to be
    corruption of these files can cause the HDFS instance to be
@@ -350,16 +350,16 @@ Metadata Disk Failure
    Currently, automatic restart and failover of the NameNode software to
    Currently, automatic restart and failover of the NameNode software to
    another machine is not supported.
    another machine is not supported.
 
 
-Snapshots
+** Snapshots
 
 
    Snapshots support storing a copy of data at a particular instant of
    Snapshots support storing a copy of data at a particular instant of
    time. One usage of the snapshot feature may be to roll back a corrupted
    time. One usage of the snapshot feature may be to roll back a corrupted
    HDFS instance to a previously known good point in time. HDFS does not
    HDFS instance to a previously known good point in time. HDFS does not
    currently support snapshots but will in a future release.
    currently support snapshots but will in a future release.
 
 
-Data Organization
+* Data Organization
 
 
-Data Blocks
+** Data Blocks
 
 
    HDFS is designed to support very large files. Applications that are
    HDFS is designed to support very large files. Applications that are
    compatible with HDFS are those that deal with large data sets. These
    compatible with HDFS are those that deal with large data sets. These
@@ -370,7 +370,7 @@ Data Blocks
    chunks, and if possible, each chunk will reside on a different
    chunks, and if possible, each chunk will reside on a different
    DataNode.
    DataNode.
 
 
-Staging
+** Staging
 
 
    A client request to create a file does not reach the NameNode
    A client request to create a file does not reach the NameNode
    immediately. In fact, initially the HDFS client caches the file data
    immediately. In fact, initially the HDFS client caches the file data
@@ -397,7 +397,7 @@ Staging
    side caching to improve performance. A POSIX requirement has been
    side caching to improve performance. A POSIX requirement has been
    relaxed to achieve higher performance of data uploads.
    relaxed to achieve higher performance of data uploads.
 
 
-Replication Pipelining
+** Replication Pipelining
 
 
    When a client is writing data to an HDFS file, its data is first
    When a client is writing data to an HDFS file, its data is first
    written to a local file as explained in the previous section. Suppose
    written to a local file as explained in the previous section. Suppose
@@ -406,7 +406,7 @@ Replication Pipelining
    DataNodes from the NameNode. This list contains the DataNodes that will
    DataNodes from the NameNode. This list contains the DataNodes that will
    host a replica of that block. The client then flushes the data block to
    host a replica of that block. The client then flushes the data block to
    the first DataNode. The first DataNode starts receiving the data in
    the first DataNode. The first DataNode starts receiving the data in
-   small portions (4 KB), writes each portion to its local repository and
+   small portions, writes each portion to its local repository and
    transfers that portion to the second DataNode in the list. The second
    transfers that portion to the second DataNode in the list. The second
    DataNode, in turn starts receiving each portion of the data block,
    DataNode, in turn starts receiving each portion of the data block,
    writes that portion to its repository and then flushes that portion to
    writes that portion to its repository and then flushes that portion to
@@ -416,7 +416,7 @@ Replication Pipelining
    the next one in the pipeline. Thus, the data is pipelined from one
    the next one in the pipeline. Thus, the data is pipelined from one
    DataNode to the next.
    DataNode to the next.
 
 
-Accessibility
+* Accessibility
 
 
    HDFS can be accessed from applications in many different ways.
    HDFS can be accessed from applications in many different ways.
    Natively, HDFS provides a
    Natively, HDFS provides a
@@ -426,7 +426,7 @@ Accessibility
    of an HDFS instance. Work is in progress to expose HDFS through the WebDAV
    of an HDFS instance. Work is in progress to expose HDFS through the WebDAV
    protocol.
    protocol.
 
 
-FS Shell
+** FS Shell
 
 
    HDFS allows user data to be organized in the form of files and
    HDFS allows user data to be organized in the form of files and
    directories. It provides a commandline interface called FS shell that
    directories. It provides a commandline interface called FS shell that
@@ -447,7 +447,7 @@ FS Shell
    FS shell is targeted for applications that need a scripting language to
    FS shell is targeted for applications that need a scripting language to
    interact with the stored data.
    interact with the stored data.
 
 
-DFSAdmin
+** DFSAdmin
 
 
    The DFSAdmin command set is used for administering an HDFS cluster.
    The DFSAdmin command set is used for administering an HDFS cluster.
    These are commands that are used only by an HDFS administrator. Here
    These are commands that are used only by an HDFS administrator. Here
@@ -463,16 +463,16 @@ DFSAdmin
 |Recommission or decommission DataNode(s) | <<<bin/hadoop dfsadmin -refreshNodes>>>
 |Recommission or decommission DataNode(s) | <<<bin/hadoop dfsadmin -refreshNodes>>>
 *---------+---------+
 *---------+---------+
 
 
-Browser Interface
+** Browser Interface
 
 
    A typical HDFS install configures a web server to expose the HDFS
    A typical HDFS install configures a web server to expose the HDFS
    namespace through a configurable TCP port. This allows a user to
    namespace through a configurable TCP port. This allows a user to
    navigate the HDFS namespace and view the contents of its files using a
    navigate the HDFS namespace and view the contents of its files using a
    web browser.
    web browser.
 
 
-Space Reclamation
+* Space Reclamation
 
 
-File Deletes and Undeletes
+** File Deletes and Undeletes
 
 
    When a file is deleted by a user or an application, it is not
    When a file is deleted by a user or an application, it is not
    immediately removed from HDFS. Instead, HDFS first renames it to a file
    immediately removed from HDFS. Instead, HDFS first renames it to a file
@@ -490,12 +490,12 @@ File Deletes and Undeletes
    file. The <<</trash>>> directory contains only the latest copy of the file
    file. The <<</trash>>> directory contains only the latest copy of the file
    that was deleted. The <<</trash>>> directory is just like any other directory
    that was deleted. The <<</trash>>> directory is just like any other directory
    with one special feature: HDFS applies specified policies to
    with one special feature: HDFS applies specified policies to
-   automatically delete files from this directory. The current default
-   policy is to delete files from <<</trash>>> that are more than 6 hours old.
-   In the future, this policy will be configurable through a well defined
-   interface.
+   automatically delete files from this directory. Current default trash
+   interval is set to 0 (Deletes file without storing in trash). This value is
+   configurable parameter stored as <<<fs.trash.interval>>> stored in
+   core-site.xml.
 
 
-Decrease Replication Factor
+** Decrease Replication Factor
 
 
    When the replication factor of a file is reduced, the NameNode selects
    When the replication factor of a file is reduced, the NameNode selects
    excess replicas that can be deleted. The next Heartbeat transfers this
    excess replicas that can be deleted. The next Heartbeat transfers this
@@ -505,7 +505,7 @@ Decrease Replication Factor
    of the setReplication API call and the appearance of free space in the
    of the setReplication API call and the appearance of free space in the
    cluster.
    cluster.
 
 
-References
+* References
 
 
    Hadoop {{{http://hadoop.apache.org/docs/current/api/}JavaDoc API}}.
    Hadoop {{{http://hadoop.apache.org/docs/current/api/}JavaDoc API}}.
 
 

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm

@@ -869,7 +869,7 @@ Content-Length: 0
 * {Error Responses}
 * {Error Responses}
 
 
   When an operation fails, the server may throw an exception.
   When an operation fails, the server may throw an exception.
-  The JSON schema of error responses is defined in {{{RemoteException JSON Schema}}}.
+  The JSON schema of error responses is defined in {{RemoteException JSON Schema}}.
   The table below shows the mapping from exceptions to HTTP response codes.
   The table below shows the mapping from exceptions to HTTP response codes.
 
 
 ** {HTTP Response Codes}
 ** {HTTP Response Codes}

+ 11 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSOutputSummer.java

@@ -71,7 +71,7 @@ public class TestFSOutputSummer {
     cleanupFile(name);
     cleanupFile(name);
   }
   }
   
   
-  /* create a file, write data with vairable amount of data */
+  /* create a file, write data with variable amount of data */
   private void writeFile3(Path name) throws Exception {
   private void writeFile3(Path name) throws Exception {
     FSDataOutputStream stm = fileSys.create(name, true, 
     FSDataOutputStream stm = fileSys.create(name, true, 
         fileSys.getConf().getInt(IO_FILE_BUFFER_SIZE_KEY, 4096),
         fileSys.getConf().getInt(IO_FILE_BUFFER_SIZE_KEY, 4096),
@@ -103,6 +103,8 @@ public class TestFSOutputSummer {
     stm.readFully(0, actual);
     stm.readFully(0, actual);
     checkAndEraseData(actual, 0, expected, "Read Sanity Test");
     checkAndEraseData(actual, 0, expected, "Read Sanity Test");
     stm.close();
     stm.close();
+    // do a sanity check. Get the file checksum
+    fileSys.getFileChecksum(name);
   }
   }
 
 
   private void cleanupFile(Path name) throws IOException {
   private void cleanupFile(Path name) throws IOException {
@@ -112,13 +114,20 @@ public class TestFSOutputSummer {
   }
   }
   
   
   /**
   /**
-   * Test write opeation for output stream in DFS.
+   * Test write operation for output stream in DFS.
    */
    */
   @Test
   @Test
   public void testFSOutputSummer() throws Exception {
   public void testFSOutputSummer() throws Exception {
+    doTestFSOutputSummer("CRC32");
+    doTestFSOutputSummer("CRC32C");
+    doTestFSOutputSummer("NULL");
+  }
+  
+  private void doTestFSOutputSummer(String checksumType) throws Exception {
     Configuration conf = new HdfsConfiguration();
     Configuration conf = new HdfsConfiguration();
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
     conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, BYTES_PER_CHECKSUM);
     conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, BYTES_PER_CHECKSUM);
+    conf.set(DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY, checksumType);
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
                                                .numDataNodes(NUM_OF_DATANODES)
                                                .numDataNodes(NUM_OF_DATANODES)
                                                .build();
                                                .build();

+ 103 - 68
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java

@@ -69,6 +69,7 @@ import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
 import org.apache.hadoop.hdfs.protocol.CachePoolStats;
 import org.apache.hadoop.hdfs.protocol.CachePoolStats;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor;
 import org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList.Type;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList.Type;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
@@ -528,77 +529,111 @@ public class TestCacheDirectives {
 
 
   @Test(timeout=60000)
   @Test(timeout=60000)
   public void testCacheManagerRestart() throws Exception {
   public void testCacheManagerRestart() throws Exception {
-    // Create and validate a pool
-    final String pool = "poolparty";
-    String groupName = "partygroup";
-    FsPermission mode = new FsPermission((short)0777);
-    long limit = 747;
-    dfs.addCachePool(new CachePoolInfo(pool)
-        .setGroupName(groupName)
-        .setMode(mode)
-        .setLimit(limit));
-    RemoteIterator<CachePoolEntry> pit = dfs.listCachePools();
-    assertTrue("No cache pools found", pit.hasNext());
-    CachePoolInfo info = pit.next().getInfo();
-    assertEquals(pool, info.getPoolName());
-    assertEquals(groupName, info.getGroupName());
-    assertEquals(mode, info.getMode());
-    assertEquals(limit, (long)info.getLimit());
-    assertFalse("Unexpected # of cache pools found", pit.hasNext());
-  
-    // Create some cache entries
-    int numEntries = 10;
-    String entryPrefix = "/party-";
-    long prevId = -1;
-    final Date expiry = new Date();
-    for (int i=0; i<numEntries; i++) {
-      prevId = dfs.addCacheDirective(
-          new CacheDirectiveInfo.Builder().
-            setPath(new Path(entryPrefix + i)).setPool(pool).
-            setExpiration(
-                CacheDirectiveInfo.Expiration.newAbsolute(expiry.getTime())).
-            build());
-    }
-    RemoteIterator<CacheDirectiveEntry> dit
-        = dfs.listCacheDirectives(null);
-    for (int i=0; i<numEntries; i++) {
-      assertTrue("Unexpected # of cache entries: " + i, dit.hasNext());
-      CacheDirectiveInfo cd = dit.next().getInfo();
-      assertEquals(i+1, cd.getId().longValue());
-      assertEquals(entryPrefix + i, cd.getPath().toUri().getPath());
-      assertEquals(pool, cd.getPool());
-    }
-    assertFalse("Unexpected # of cache directives found", dit.hasNext());
-  
-    // Restart namenode
-    cluster.restartNameNode();
+    SecondaryNameNode secondary = null;
+    try {
+      // Start a secondary namenode
+      conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
+          "0.0.0.0:0");
+      secondary = new SecondaryNameNode(conf);
   
   
-    // Check that state came back up
-    pit = dfs.listCachePools();
-    assertTrue("No cache pools found", pit.hasNext());
-    info = pit.next().getInfo();
-    assertEquals(pool, info.getPoolName());
-    assertEquals(pool, info.getPoolName());
-    assertEquals(groupName, info.getGroupName());
-    assertEquals(mode, info.getMode());
-    assertEquals(limit, (long)info.getLimit());
-    assertFalse("Unexpected # of cache pools found", pit.hasNext());
+      // Create and validate a pool
+      final String pool = "poolparty";
+      String groupName = "partygroup";
+      FsPermission mode = new FsPermission((short)0777);
+      long limit = 747;
+      dfs.addCachePool(new CachePoolInfo(pool)
+          .setGroupName(groupName)
+          .setMode(mode)
+          .setLimit(limit));
+      RemoteIterator<CachePoolEntry> pit = dfs.listCachePools();
+      assertTrue("No cache pools found", pit.hasNext());
+      CachePoolInfo info = pit.next().getInfo();
+      assertEquals(pool, info.getPoolName());
+      assertEquals(groupName, info.getGroupName());
+      assertEquals(mode, info.getMode());
+      assertEquals(limit, (long)info.getLimit());
+      assertFalse("Unexpected # of cache pools found", pit.hasNext());
+    
+      // Create some cache entries
+      int numEntries = 10;
+      String entryPrefix = "/party-";
+      long prevId = -1;
+      final Date expiry = new Date();
+      for (int i=0; i<numEntries; i++) {
+        prevId = dfs.addCacheDirective(
+            new CacheDirectiveInfo.Builder().
+              setPath(new Path(entryPrefix + i)).setPool(pool).
+              setExpiration(
+                  CacheDirectiveInfo.Expiration.newAbsolute(expiry.getTime())).
+              build());
+      }
+      RemoteIterator<CacheDirectiveEntry> dit
+          = dfs.listCacheDirectives(null);
+      for (int i=0; i<numEntries; i++) {
+        assertTrue("Unexpected # of cache entries: " + i, dit.hasNext());
+        CacheDirectiveInfo cd = dit.next().getInfo();
+        assertEquals(i+1, cd.getId().longValue());
+        assertEquals(entryPrefix + i, cd.getPath().toUri().getPath());
+        assertEquals(pool, cd.getPool());
+      }
+      assertFalse("Unexpected # of cache directives found", dit.hasNext());
+      
+      // Checkpoint once to set some cache pools and directives on 2NN side
+      secondary.doCheckpoint();
+      
+      // Add some more CacheManager state
+      final String imagePool = "imagePool";
+      dfs.addCachePool(new CachePoolInfo(imagePool));
+      prevId = dfs.addCacheDirective(new CacheDirectiveInfo.Builder()
+        .setPath(new Path("/image")).setPool(imagePool).build());
+
+      // Save a new image to force a fresh fsimage download
+      dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
+      dfs.saveNamespace();
+      dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
+
+      // Checkpoint again forcing a reload of FSN state
+      boolean fetchImage = secondary.doCheckpoint();
+      assertTrue("Secondary should have fetched a new fsimage from NameNode",
+          fetchImage);
+
+      // Remove temp pool and directive
+      dfs.removeCachePool(imagePool);
+
+      // Restart namenode
+      cluster.restartNameNode();
+    
+      // Check that state came back up
+      pit = dfs.listCachePools();
+      assertTrue("No cache pools found", pit.hasNext());
+      info = pit.next().getInfo();
+      assertEquals(pool, info.getPoolName());
+      assertEquals(pool, info.getPoolName());
+      assertEquals(groupName, info.getGroupName());
+      assertEquals(mode, info.getMode());
+      assertEquals(limit, (long)info.getLimit());
+      assertFalse("Unexpected # of cache pools found", pit.hasNext());
+    
+      dit = dfs.listCacheDirectives(null);
+      for (int i=0; i<numEntries; i++) {
+        assertTrue("Unexpected # of cache entries: " + i, dit.hasNext());
+        CacheDirectiveInfo cd = dit.next().getInfo();
+        assertEquals(i+1, cd.getId().longValue());
+        assertEquals(entryPrefix + i, cd.getPath().toUri().getPath());
+        assertEquals(pool, cd.getPool());
+        assertEquals(expiry.getTime(), cd.getExpiration().getMillis());
+      }
+      assertFalse("Unexpected # of cache directives found", dit.hasNext());
   
   
-    dit = dfs.listCacheDirectives(null);
-    for (int i=0; i<numEntries; i++) {
-      assertTrue("Unexpected # of cache entries: " + i, dit.hasNext());
-      CacheDirectiveInfo cd = dit.next().getInfo();
-      assertEquals(i+1, cd.getId().longValue());
-      assertEquals(entryPrefix + i, cd.getPath().toUri().getPath());
-      assertEquals(pool, cd.getPool());
-      assertEquals(expiry.getTime(), cd.getExpiration().getMillis());
+      long nextId = dfs.addCacheDirective(
+            new CacheDirectiveInfo.Builder().
+              setPath(new Path("/foobar")).setPool(pool).build());
+      assertEquals(prevId + 1, nextId);
+    } finally {
+      if (secondary != null) {
+        secondary.shutdown();
+      }
     }
     }
-    assertFalse("Unexpected # of cache directives found", dit.hasNext());
-
-    long nextId = dfs.addCacheDirective(
-          new CacheDirectiveInfo.Builder().
-            setPath(new Path("/foobar")).setPool(pool).build());
-    assertEquals(prevId + 1, nextId);
   }
   }
 
 
   /**
   /**

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java

@@ -1635,7 +1635,7 @@ public class TestCheckpoint {
    * Test that the secondary namenode correctly deletes temporary edits
    * Test that the secondary namenode correctly deletes temporary edits
    * on startup.
    * on startup.
    */
    */
-  @Test(timeout = 30000)
+  @Test(timeout = 60000)
   public void testDeleteTemporaryEditsOnStartup() throws IOException {
   public void testDeleteTemporaryEditsOnStartup() throws IOException {
     Configuration conf = new HdfsConfiguration();
     Configuration conf = new HdfsConfiguration();
     SecondaryNameNode secondary = null;
     SecondaryNameNode secondary = null;

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetImageServlet.java

@@ -28,7 +28,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
-import org.apache.hadoop.http.HttpServer;
+import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authentication.util.KerberosName;
 import org.apache.hadoop.security.authentication.util.KerberosName;
 import org.apache.hadoop.security.authorize.AccessControlList;
 import org.apache.hadoop.security.authorize.AccessControlList;
@@ -66,7 +66,7 @@ public class TestGetImageServlet {
     AccessControlList acls = Mockito.mock(AccessControlList.class);
     AccessControlList acls = Mockito.mock(AccessControlList.class);
     Mockito.when(acls.isUserAllowed(Mockito.<UserGroupInformation>any())).thenReturn(false);
     Mockito.when(acls.isUserAllowed(Mockito.<UserGroupInformation>any())).thenReturn(false);
     ServletContext context = Mockito.mock(ServletContext.class);
     ServletContext context = Mockito.mock(ServletContext.class);
-    Mockito.when(context.getAttribute(HttpServer.ADMINS_ACL)).thenReturn(acls);
+    Mockito.when(context.getAttribute(HttpServer2.ADMINS_ACL)).thenReturn(acls);
     
     
     // Make sure that NN2 is considered a valid fsimage/edits requestor.
     // Make sure that NN2 is considered a valid fsimage/edits requestor.
     assertTrue(GetImageServlet.isValidRequestor(context,
     assertTrue(GetImageServlet.isValidRequestor(context,

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java

@@ -37,7 +37,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.http.HttpServer;
+import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.http.HttpServerFunctionalTest;
 import org.apache.hadoop.http.HttpServerFunctionalTest;
 import org.apache.hadoop.test.PathUtils;
 import org.apache.hadoop.test.PathUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.StringUtils;
@@ -119,7 +119,7 @@ public class TestTransferFsImage {
    */
    */
   @Test(timeout = 5000)
   @Test(timeout = 5000)
   public void testImageTransferTimeout() throws Exception {
   public void testImageTransferTimeout() throws Exception {
-    HttpServer testServer = HttpServerFunctionalTest.createServer("hdfs");
+    HttpServer2 testServer = HttpServerFunctionalTest.createServer("hdfs");
     try {
     try {
       testServer.addServlet("GetImage", "/getimage", TestGetImageServlet.class);
       testServer.addServlet("GetImage", "/getimage", TestGetImageServlet.class);
       testServer.start();
       testServer.start();

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotTestHelper.java

@@ -58,7 +58,7 @@ import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
 import org.apache.hadoop.hdfs.server.namenode.INodeFile;
 import org.apache.hadoop.hdfs.server.namenode.INodeFile;
 import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
 import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
-import org.apache.hadoop.http.HttpServer;
+import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.ipc.ProtobufRpcEngine.Server;
 import org.apache.hadoop.ipc.ProtobufRpcEngine.Server;
 import org.apache.hadoop.metrics2.impl.MetricsSystemImpl;
 import org.apache.hadoop.metrics2.impl.MetricsSystemImpl;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -89,7 +89,7 @@ public class SnapshotTestHelper {
     setLevel2OFF(LogFactory.getLog(MetricsSystemImpl.class));
     setLevel2OFF(LogFactory.getLog(MetricsSystemImpl.class));
     
     
     setLevel2OFF(DataBlockScanner.LOG);
     setLevel2OFF(DataBlockScanner.LOG);
-    setLevel2OFF(HttpServer.LOG);
+    setLevel2OFF(HttpServer2.LOG);
     setLevel2OFF(DataNode.LOG);
     setLevel2OFF(DataNode.LOG);
     setLevel2OFF(BlockPoolSliceStorage.LOG);
     setLevel2OFF(BlockPoolSliceStorage.LOG);
     setLevel2OFF(LeaseManager.LOG);
     setLevel2OFF(LeaseManager.LOG);

+ 78 - 69
hadoop-mapreduce-project/CHANGES.txt

@@ -82,9 +82,6 @@ Trunk (Unreleased)
 
 
   BUG FIXES
   BUG FIXES
 
 
-    MAPREDUCE-4272. SortedRanges.Range#compareTo is not spec compliant.
-    (Yu Gao via llu)
-
     MAPREDUCE-3194. "mapred mradmin" command is broken in mrv2
     MAPREDUCE-3194. "mapred mradmin" command is broken in mrv2
                      (Jason Lowe via bobby)
                      (Jason Lowe via bobby)
 
 
@@ -130,15 +127,9 @@ Trunk (Unreleased)
     MAPREDUCE-4574. Fix TotalOrderParitioner to work with
     MAPREDUCE-4574. Fix TotalOrderParitioner to work with
     non-WritableComparable key types. (harsh)
     non-WritableComparable key types. (harsh)
 
 
-    MAPREDUCE-4884. Streaming tests fail to start MiniMRCluster due to missing
-    queue configuration. (Chris Nauroth via suresh)
-
     MAPREDUCE-5012. Typo in javadoc for IdentityMapper class. (Adam Monsen
     MAPREDUCE-5012. Typo in javadoc for IdentityMapper class. (Adam Monsen
     via suresh)
     via suresh)
 
 
-    MAPREDUCE-4885. Streaming tests have multiple failures on Windows. (Chris
-    Nauroth via bikas)
-
     MAPREDUCE-4987. TestMRJobs#testDistributedCache fails on Windows due to
     MAPREDUCE-4987. TestMRJobs#testDistributedCache fails on Windows due to
     classpath problems and unexpected behavior of symlinks (Chris Nauroth via
     classpath problems and unexpected behavior of symlinks (Chris Nauroth via
     bikas)
     bikas)
@@ -152,6 +143,24 @@ Release 2.4.0 - UNRELEASED
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES
 
 
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+    MAPREDUCE-5464. Add analogs of the SLOTS_MILLIS counters that jive with the
+    YARN resource model (Sandy Ryza)
+
+    MAPREDUCE-5732. Report proper queue when job has been automatically placed
+    (Sandy Ryza)
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
+Release 2.3.0 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
   NEW FEATURES
   NEW FEATURES
 
 
     MAPREDUCE-5265. History server admin service to refresh user and superuser
     MAPREDUCE-5265. History server admin service to refresh user and superuser
@@ -169,6 +178,19 @@ Release 2.4.0 - UNRELEASED
     MAPREDUCE-5332. Support token-preserving restart of history server (jlowe)
     MAPREDUCE-5332. Support token-preserving restart of history server (jlowe)
 
 
   IMPROVEMENTS
   IMPROVEMENTS
+  
+    MAPREDUCE-5329. Allow MR applications to use additional AuxServices,
+    which are compatible with the default MapReduce shuffle.
+    (Avner BenHanoch via sseth)
+
+    MAPREDUCE-5463. Deprecate SLOTS_MILLIS counters (Tzuyoshi Ozawa via Sandy
+    Ryza)
+
+    MAPREDUCE-5457. Add a KeyOnlyTextOutputReader to enable streaming to write
+    out text files without separators (Sandy Ryza)
+
+    MAPREDUCE-5596. Allow configuring the number of threads used to serve
+    shuffle connections (Sandy Ryza via jlowe)
 
 
     MAPREDUCE-434. LocalJobRunner limited to single reducer (Sandy Ryza and
     MAPREDUCE-434. LocalJobRunner limited to single reducer (Sandy Ryza and
     Aaron Kimball via Sandy Ryza)
     Aaron Kimball via Sandy Ryza)
@@ -208,6 +230,9 @@ Release 2.4.0 - UNRELEASED
 
 
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
+    MAPREDUCE-4680. Job history cleaner should only check timestamps of files in
+    old enough directories (Robert Kanter via Sandy Ryza)
+
     MAPREDUCE-5484. YarnChild unnecessarily loads job conf twice (Sandy Ryza)
     MAPREDUCE-5484. YarnChild unnecessarily loads job conf twice (Sandy Ryza)
 
 
     MAPREDUCE-5487. In task processes, JobConf is unnecessarily loaded again
     MAPREDUCE-5487. In task processes, JobConf is unnecessarily loaded again
@@ -218,6 +243,37 @@ Release 2.4.0 - UNRELEASED
 
 
   BUG FIXES
   BUG FIXES
 
 
+    MAPREDUCE-5569. FloatSplitter is not generating correct splits (Nathan
+    Roberts via jlowe)
+
+    MAPREDUCE-5546. mapred.cmd on Windows set HADOOP_OPTS incorrectly (Chuan Liu
+    via cnauroth)
+
+    MAPREDUCE-5518. Fixed typo "can't read paritions file". (Albert Chu
+    via devaraj)
+
+    MAPREDUCE-5561. org.apache.hadoop.mapreduce.v2.app.job.impl.TestJobImpl
+    testcase failing on trunk (Karthik Kambatla via jlowe)
+
+    MAPREDUCE-5598. TestUserDefinedCounters.testMapReduceJob is flakey
+    (Robert Kanter via jlowe)
+
+    MAPREDUCE-5604. TestMRAMWithNonNormalizedCapabilities fails on Windows due to
+    exceeding max path length. (cnauroth)
+
+    MAPREDUCE-5451. MR uses LD_LIBRARY_PATH which doesn't mean anything in
+    Windows. (Yingda Chen via cnauroth)
+
+    MAPREDUCE-5409. MRAppMaster throws InvalidStateTransitonException: Invalid
+    event: TA_TOO_MANY_FETCH_FAILURE at KILLED for TaskAttemptImpl (Gera
+    Shegalov via jlowe)
+
+    MAPREDUCE-5674. Missing start and finish time in mapred.JobStatus.
+    (Chuan Liu via cnauroth)
+
+    MAPREDUCE-5650. Job fails when hprof mapreduce.task.profile.map/reduce.params
+    is specified (Gera Shegalov via Sandy Ryza)
+
     MAPREDUCE-5316. job -list-attempt-ids command does not handle illegal
     MAPREDUCE-5316. job -list-attempt-ids command does not handle illegal
     task-state (Ashwin Shankar via jlowe)
     task-state (Ashwin Shankar via jlowe)
 
 
@@ -291,65 +347,6 @@ Release 2.4.0 - UNRELEASED
     MAPREDUCE-5723. MR AM container log can be truncated or empty.
     MAPREDUCE-5723. MR AM container log can be truncated or empty.
     (Mohammad Kamrul Islam via kasha)
     (Mohammad Kamrul Islam via kasha)
 
 
-Release 2.3.0 - UNRELEASED
-
-  INCOMPATIBLE CHANGES
-
-  NEW FEATURES
-
-  IMPROVEMENTS
-  
-    MAPREDUCE-5329. Allow MR applications to use additional AuxServices,
-    which are compatible with the default MapReduce shuffle.
-    (Avner BenHanoch via sseth)
-
-    MAPREDUCE-5463. Deprecate SLOTS_MILLIS counters (Tzuyoshi Ozawa via Sandy
-    Ryza)
-
-    MAPREDUCE-5457. Add a KeyOnlyTextOutputReader to enable streaming to write
-    out text files without separators (Sandy Ryza)
-
-    MAPREDUCE-5596. Allow configuring the number of threads used to serve
-    shuffle connections (Sandy Ryza via jlowe)
-
-  OPTIMIZATIONS
-
-    MAPREDUCE-4680. Job history cleaner should only check timestamps of files in
-    old enough directories (Robert Kanter via Sandy Ryza)
-
-  BUG FIXES
-
-    MAPREDUCE-5569. FloatSplitter is not generating correct splits (Nathan
-    Roberts via jlowe)
-
-    MAPREDUCE-5546. mapred.cmd on Windows set HADOOP_OPTS incorrectly (Chuan Liu
-    via cnauroth)
-
-    MAPREDUCE-5518. Fixed typo "can't read paritions file". (Albert Chu
-    via devaraj)
-
-    MAPREDUCE-5561. org.apache.hadoop.mapreduce.v2.app.job.impl.TestJobImpl
-    testcase failing on trunk (Karthik Kambatla via jlowe)
-
-    MAPREDUCE-5598. TestUserDefinedCounters.testMapReduceJob is flakey
-    (Robert Kanter via jlowe)
-
-    MAPREDUCE-5604. TestMRAMWithNonNormalizedCapabilities fails on Windows due to
-    exceeding max path length. (cnauroth)
-
-    MAPREDUCE-5451. MR uses LD_LIBRARY_PATH which doesn't mean anything in
-    Windows. (Yingda Chen via cnauroth)
-
-    MAPREDUCE-5409. MRAppMaster throws InvalidStateTransitonException: Invalid
-    event: TA_TOO_MANY_FETCH_FAILURE at KILLED for TaskAttemptImpl (Gera
-    Shegalov via jlowe)
-
-    MAPREDUCE-5674. Missing start and finish time in mapred.JobStatus.
-    (Chuan Liu via cnauroth)
-
-    MAPREDUCE-5650. Job fails when hprof mapreduce.task.profile.map/reduce.params
-    is specified (Gera Shegalov via Sandy Ryza)
-
 Release 2.2.0 - 2013-10-13
 Release 2.2.0 - 2013-10-13
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES
@@ -996,9 +993,15 @@ Release 2.1.0-beta - 2013-08-22
     HADOOP-9372. Fix bad timeout annotations on tests.
     HADOOP-9372. Fix bad timeout annotations on tests.
     (Arpit Agarwal via suresh)
     (Arpit Agarwal via suresh)
 
 
+    MAPREDUCE-4885. Streaming tests have multiple failures on Windows. (Chris
+    Nauroth via bikas)
+
     MAPREDUCE-5177. Use common utils FileUtil#setReadable/Writable/Executable & 
     MAPREDUCE-5177. Use common utils FileUtil#setReadable/Writable/Executable & 
     FileUtil#canRead/Write/Execute. (Ivan Mitic via suresh)
     FileUtil#canRead/Write/Execute. (Ivan Mitic via suresh)
 
 
+    MAPREDUCE-5349. TestClusterMapReduceTestCase and TestJobName fail on Windows
+    in branch-2. (Chuan Liu via cnauroth)
+
     MAPREDUCE-5355. MiniMRYarnCluster with localFs does not work on Windows.
     MAPREDUCE-5355. MiniMRYarnCluster with localFs does not work on Windows.
     (Chuan Liu via cnauroth)
     (Chuan Liu via cnauroth)
 
 
@@ -1149,6 +1152,9 @@ Release 2.0.3-alpha - 2013-02-06
 
 
   BUG FIXES
   BUG FIXES
 
 
+    MAPREDUCE-4272. SortedRanges.Range#compareTo is not spec compliant.
+    (Yu Gao via llu)
+
     MAPREDUCE-4607. Race condition in ReduceTask completion can result in Task
     MAPREDUCE-4607. Race condition in ReduceTask completion can result in Task
     being incorrectly failed. (Bikas Saha via tomwhite)
     being incorrectly failed. (Bikas Saha via tomwhite)
 
 
@@ -1211,6 +1217,9 @@ Release 2.0.3-alpha - 2013-02-06
     MAPREDUCE-4969. TestKeyValueTextInputFormat test fails with Open JDK 7.
     MAPREDUCE-4969. TestKeyValueTextInputFormat test fails with Open JDK 7.
     (Arpit Agarwal via suresh)
     (Arpit Agarwal via suresh)
 
 
+    MAPREDUCE-4884. Streaming tests fail to start MiniMRCluster due to missing
+    queue configuration. (Chris Nauroth via suresh)
+
     MAPREDUCE-4953. HadoopPipes misuses fprintf. (Andy Isaacson via atm)
     MAPREDUCE-4953. HadoopPipes misuses fprintf. (Andy Isaacson via atm)
 
 
 Release 2.0.2-alpha - 2012-09-07 
 Release 2.0.2-alpha - 2012-09-07 
@@ -1219,7 +1228,7 @@ Release 2.0.2-alpha - 2012-09-07
 
 
   NEW FEATURES
   NEW FEATURES
 
 
-    MAPREDUCE-987. Exposing MiniDFS and MiniMR clusters as a single process 
+    MAPREDUCE-987. Exposing MiniDFS and MiniMR clusters as a single process
     command-line. (ahmed via tucu)
     command-line. (ahmed via tucu)
 
 
     MAPREDUCE-4417. add support for encrypted shuffle (tucu)
     MAPREDUCE-4417. add support for encrypted shuffle (tucu)

+ 6 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java

@@ -525,6 +525,12 @@ public class JobHistoryEventHandler extends AbstractService
         JobInitedEvent jie = (JobInitedEvent) event.getHistoryEvent();
         JobInitedEvent jie = (JobInitedEvent) event.getHistoryEvent();
         mi.getJobIndexInfo().setJobStartTime(jie.getLaunchTime());
         mi.getJobIndexInfo().setJobStartTime(jie.getLaunchTime());
       }
       }
+      
+      if (event.getHistoryEvent().getEventType() == EventType.JOB_QUEUE_CHANGED) {
+        JobQueueChangeEvent jQueueEvent =
+            (JobQueueChangeEvent) event.getHistoryEvent();
+        mi.getJobIndexInfo().setQueueName(jQueueEvent.getJobQueueName());
+      }
 
 
       // If this is JobFinishedEvent, close the writer and setup the job-index
       // If this is JobFinishedEvent, close the writer and setup the job-index
       if (event.getHistoryEvent().getEventType() == EventType.JOB_FINISHED) {
       if (event.getHistoryEvent().getEventType() == EventType.JOB_FINISHED) {

+ 3 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/Job.java

@@ -39,7 +39,7 @@ import org.apache.hadoop.security.authorize.AccessControlList;
 
 
 
 
 /**
 /**
- * Main interface to interact with the job. Provides only getters. 
+ * Main interface to interact with the job.
  */
  */
 public interface Job {
 public interface Job {
 
 
@@ -98,4 +98,6 @@ public interface Job {
   List<AMInfo> getAMInfos();
   List<AMInfo> getAMInfos();
   
   
   boolean checkAccess(UserGroupInformation callerUGI, JobACL jobOperation);
   boolean checkAccess(UserGroupInformation callerUGI, JobACL jobOperation);
+  
+  public void setQueueName(String queueName);
 }
 }

+ 9 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java

@@ -59,6 +59,7 @@ import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent;
 import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskInfo;
 import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskInfo;
 import org.apache.hadoop.mapreduce.jobhistory.JobInfoChangeEvent;
 import org.apache.hadoop.mapreduce.jobhistory.JobInfoChangeEvent;
 import org.apache.hadoop.mapreduce.jobhistory.JobInitedEvent;
 import org.apache.hadoop.mapreduce.jobhistory.JobInitedEvent;
+import org.apache.hadoop.mapreduce.jobhistory.JobQueueChangeEvent;
 import org.apache.hadoop.mapreduce.jobhistory.JobSubmittedEvent;
 import org.apache.hadoop.mapreduce.jobhistory.JobSubmittedEvent;
 import org.apache.hadoop.mapreduce.jobhistory.JobUnsuccessfulCompletionEvent;
 import org.apache.hadoop.mapreduce.jobhistory.JobUnsuccessfulCompletionEvent;
 import org.apache.hadoop.mapreduce.lib.chain.ChainMapper;
 import org.apache.hadoop.mapreduce.lib.chain.ChainMapper;
@@ -181,7 +182,7 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
   private final EventHandler eventHandler;
   private final EventHandler eventHandler;
   private final MRAppMetrics metrics;
   private final MRAppMetrics metrics;
   private final String userName;
   private final String userName;
-  private final String queueName;
+  private String queueName;
   private final long appSubmitTime;
   private final long appSubmitTime;
   private final AppContext appContext;
   private final AppContext appContext;
 
 
@@ -1123,6 +1124,13 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
     return queueName;
     return queueName;
   }
   }
   
   
+  @Override
+  public void setQueueName(String queueName) {
+    this.queueName = queueName;
+    JobQueueChangeEvent jqce = new JobQueueChangeEvent(oldJobId, queueName);
+    eventHandler.handle(new JobHistoryEvent(jobId, jqce));
+  }
+  
   /*
   /*
    * (non-Javadoc)
    * (non-Javadoc)
    * @see org.apache.hadoop.mapreduce.v2.app.job.Job#getConfFile()
    * @see org.apache.hadoop.mapreduce.v2.app.job.Job#getConfFile()

+ 26 - 34
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java

@@ -1265,57 +1265,56 @@ public abstract class TaskAttemptImpl implements
       }
       }
     }
     }
   }
   }
-
-  private static long computeSlotMillis(TaskAttemptImpl taskAttempt) {
+  
+  private static void updateMillisCounters(JobCounterUpdateEvent jce,
+      TaskAttemptImpl taskAttempt) {
     TaskType taskType = taskAttempt.getID().getTaskId().getTaskType();
     TaskType taskType = taskAttempt.getID().getTaskId().getTaskType();
-    int slotMemoryReq =
+    long duration = (taskAttempt.getFinishTime() - taskAttempt.getLaunchTime());
+    int mbRequired =
         taskAttempt.getMemoryRequired(taskAttempt.conf, taskType);
         taskAttempt.getMemoryRequired(taskAttempt.conf, taskType);
+    int vcoresRequired = taskAttempt.getCpuRequired(taskAttempt.conf, taskType);
 
 
     int minSlotMemSize = taskAttempt.conf.getInt(
     int minSlotMemSize = taskAttempt.conf.getInt(
       YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB,
       YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB,
       YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB);
       YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB);
 
 
     int simSlotsRequired =
     int simSlotsRequired =
-        minSlotMemSize == 0 ? 0 : (int) Math.ceil((float) slotMemoryReq
+        minSlotMemSize == 0 ? 0 : (int) Math.ceil((float) mbRequired
             / minSlotMemSize);
             / minSlotMemSize);
 
 
-    long slotMillisIncrement =
-        simSlotsRequired
-            * (taskAttempt.getFinishTime() - taskAttempt.getLaunchTime());
-    return slotMillisIncrement;
+    if (taskType == TaskType.MAP) {
+      jce.addCounterUpdate(JobCounter.SLOTS_MILLIS_MAPS, simSlotsRequired * duration);
+      jce.addCounterUpdate(JobCounter.MB_MILLIS_MAPS, duration * mbRequired);
+      jce.addCounterUpdate(JobCounter.VCORES_MILLIS_MAPS, duration * vcoresRequired);
+      jce.addCounterUpdate(JobCounter.MILLIS_MAPS, duration);
+    } else {
+      jce.addCounterUpdate(JobCounter.SLOTS_MILLIS_REDUCES, simSlotsRequired * duration);
+      jce.addCounterUpdate(JobCounter.MB_MILLIS_REDUCES, duration * mbRequired);
+      jce.addCounterUpdate(JobCounter.VCORES_MILLIS_REDUCES, duration * vcoresRequired);
+      jce.addCounterUpdate(JobCounter.MILLIS_REDUCES, duration);
+    }
   }
   }
 
 
   private static JobCounterUpdateEvent createJobCounterUpdateEventTASucceeded(
   private static JobCounterUpdateEvent createJobCounterUpdateEventTASucceeded(
       TaskAttemptImpl taskAttempt) {
       TaskAttemptImpl taskAttempt) {
-    long slotMillis = computeSlotMillis(taskAttempt);
     TaskId taskId = taskAttempt.attemptId.getTaskId();
     TaskId taskId = taskAttempt.attemptId.getTaskId();
     JobCounterUpdateEvent jce = new JobCounterUpdateEvent(taskId.getJobId());
     JobCounterUpdateEvent jce = new JobCounterUpdateEvent(taskId.getJobId());
-    jce.addCounterUpdate(
-      taskId.getTaskType() == TaskType.MAP ?
-        JobCounter.SLOTS_MILLIS_MAPS : JobCounter.SLOTS_MILLIS_REDUCES,
-        slotMillis);
+    updateMillisCounters(jce, taskAttempt);
     return jce;
     return jce;
   }
   }
-
+  
   private static JobCounterUpdateEvent createJobCounterUpdateEventTAFailed(
   private static JobCounterUpdateEvent createJobCounterUpdateEventTAFailed(
       TaskAttemptImpl taskAttempt, boolean taskAlreadyCompleted) {
       TaskAttemptImpl taskAttempt, boolean taskAlreadyCompleted) {
     TaskType taskType = taskAttempt.getID().getTaskId().getTaskType();
     TaskType taskType = taskAttempt.getID().getTaskId().getTaskType();
     JobCounterUpdateEvent jce = new JobCounterUpdateEvent(taskAttempt.getID().getTaskId().getJobId());
     JobCounterUpdateEvent jce = new JobCounterUpdateEvent(taskAttempt.getID().getTaskId().getJobId());
     
     
-    long slotMillisIncrement = computeSlotMillis(taskAttempt);
-    
     if (taskType == TaskType.MAP) {
     if (taskType == TaskType.MAP) {
       jce.addCounterUpdate(JobCounter.NUM_FAILED_MAPS, 1);
       jce.addCounterUpdate(JobCounter.NUM_FAILED_MAPS, 1);
-      if(!taskAlreadyCompleted) {
-        // dont double count the elapsed time
-        jce.addCounterUpdate(JobCounter.SLOTS_MILLIS_MAPS, slotMillisIncrement);
-      }
     } else {
     } else {
       jce.addCounterUpdate(JobCounter.NUM_FAILED_REDUCES, 1);
       jce.addCounterUpdate(JobCounter.NUM_FAILED_REDUCES, 1);
-      if(!taskAlreadyCompleted) {
-        // dont double count the elapsed time
-        jce.addCounterUpdate(JobCounter.SLOTS_MILLIS_REDUCES, slotMillisIncrement);
-      }
+    }
+    if (!taskAlreadyCompleted) {
+      updateMillisCounters(jce, taskAttempt);
     }
     }
     return jce;
     return jce;
   }
   }
@@ -1325,20 +1324,13 @@ public abstract class TaskAttemptImpl implements
     TaskType taskType = taskAttempt.getID().getTaskId().getTaskType();
     TaskType taskType = taskAttempt.getID().getTaskId().getTaskType();
     JobCounterUpdateEvent jce = new JobCounterUpdateEvent(taskAttempt.getID().getTaskId().getJobId());
     JobCounterUpdateEvent jce = new JobCounterUpdateEvent(taskAttempt.getID().getTaskId().getJobId());
     
     
-    long slotMillisIncrement = computeSlotMillis(taskAttempt);
-    
     if (taskType == TaskType.MAP) {
     if (taskType == TaskType.MAP) {
       jce.addCounterUpdate(JobCounter.NUM_KILLED_MAPS, 1);
       jce.addCounterUpdate(JobCounter.NUM_KILLED_MAPS, 1);
-      if(!taskAlreadyCompleted) {
-        // dont double count the elapsed time
-        jce.addCounterUpdate(JobCounter.SLOTS_MILLIS_MAPS, slotMillisIncrement);
-      }
     } else {
     } else {
       jce.addCounterUpdate(JobCounter.NUM_KILLED_REDUCES, 1);
       jce.addCounterUpdate(JobCounter.NUM_KILLED_REDUCES, 1);
-      if(!taskAlreadyCompleted) {
-        // dont double count the elapsed time
-        jce.addCounterUpdate(JobCounter.SLOTS_MILLIS_REDUCES, slotMillisIncrement);
-      }
+    }
+    if (!taskAlreadyCompleted) {
+      updateMillisCounters(jce, taskAttempt);
     }
     }
     return jce;
     return jce;
   }  
   }  

+ 5 - 2
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java

@@ -109,11 +109,11 @@ public abstract class RMCommunicator extends AbstractService
   @Override
   @Override
   protected void serviceStart() throws Exception {
   protected void serviceStart() throws Exception {
     scheduler= createSchedulerProxy();
     scheduler= createSchedulerProxy();
-    register();
-    startAllocatorThread();
     JobID id = TypeConverter.fromYarn(this.applicationId);
     JobID id = TypeConverter.fromYarn(this.applicationId);
     JobId jobId = TypeConverter.toYarn(id);
     JobId jobId = TypeConverter.toYarn(id);
     job = context.getJob(jobId);
     job = context.getJob(jobId);
+    register();
+    startAllocatorThread();
     super.serviceStart();
     super.serviceStart();
   }
   }
 
 
@@ -161,6 +161,9 @@ public abstract class RMCommunicator extends AbstractService
       }
       }
       this.applicationACLs = response.getApplicationACLs();
       this.applicationACLs = response.getApplicationACLs();
       LOG.info("maxContainerCapability: " + maxContainerCapability.getMemory());
       LOG.info("maxContainerCapability: " + maxContainerCapability.getMemory());
+      String queue = response.getQueue();
+      LOG.info("queue: " + queue);
+      job.setQueueName(queue);
     } catch (Exception are) {
     } catch (Exception are) {
       LOG.error("Exception while registering", are);
       LOG.error("Exception while registering", are);
       throw new YarnRuntimeException(are);
       throw new YarnRuntimeException(are);

+ 9 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestEvents.java

@@ -81,6 +81,15 @@ public class TestEvents {
     assertEquals(test.getPriority(), JobPriority.LOW);
     assertEquals(test.getPriority(), JobPriority.LOW);
 
 
   }
   }
+  
+  @Test(timeout = 10000)
+  public void testJobQueueChange() throws Exception {
+    org.apache.hadoop.mapreduce.JobID jid = new JobID("001", 1);
+    JobQueueChangeEvent test = new JobQueueChangeEvent(jid,
+        "newqueue");
+    assertEquals(test.getJobId().toString(), jid.toString());
+    assertEquals(test.getJobQueueName(), "newqueue");
+  }
 
 
   /**
   /**
    * simple test TaskUpdatedEvent and TaskUpdated
    * simple test TaskUpdatedEvent and TaskUpdated

+ 22 - 8
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java

@@ -117,6 +117,9 @@ public class MRApp extends MRAppMaster {
   private File testWorkDir;
   private File testWorkDir;
   private Path testAbsPath;
   private Path testAbsPath;
   private ClusterInfo clusterInfo;
   private ClusterInfo clusterInfo;
+  
+  // Queue to pretend the RM assigned us
+  private String assignedQueue;
 
 
   public static String NM_HOST = "localhost";
   public static String NM_HOST = "localhost";
   public static int NM_PORT = 1234;
   public static int NM_PORT = 1234;
@@ -133,7 +136,7 @@ public class MRApp extends MRAppMaster {
 
 
   public MRApp(int maps, int reduces, boolean autoComplete, String testName,
   public MRApp(int maps, int reduces, boolean autoComplete, String testName,
       boolean cleanOnStart, Clock clock) {
       boolean cleanOnStart, Clock clock) {
-    this(maps, reduces, autoComplete, testName, cleanOnStart, 1, clock);
+    this(maps, reduces, autoComplete, testName, cleanOnStart, 1, clock, null);
   }
   }
 
 
   public MRApp(int maps, int reduces, boolean autoComplete, String testName,
   public MRApp(int maps, int reduces, boolean autoComplete, String testName,
@@ -146,6 +149,12 @@ public class MRApp extends MRAppMaster {
       boolean cleanOnStart) {
       boolean cleanOnStart) {
     this(maps, reduces, autoComplete, testName, cleanOnStart, 1);
     this(maps, reduces, autoComplete, testName, cleanOnStart, 1);
   }
   }
+  
+  public MRApp(int maps, int reduces, boolean autoComplete, String testName,
+      boolean cleanOnStart, String assignedQueue) {
+    this(maps, reduces, autoComplete, testName, cleanOnStart, 1,
+        new SystemClock(), assignedQueue);
+  }
 
 
   public MRApp(int maps, int reduces, boolean autoComplete, String testName,
   public MRApp(int maps, int reduces, boolean autoComplete, String testName,
       boolean cleanOnStart, boolean unregistered) {
       boolean cleanOnStart, boolean unregistered) {
@@ -178,7 +187,7 @@ public class MRApp extends MRAppMaster {
   public MRApp(int maps, int reduces, boolean autoComplete, String testName,
   public MRApp(int maps, int reduces, boolean autoComplete, String testName,
       boolean cleanOnStart, int startCount) {
       boolean cleanOnStart, int startCount) {
     this(maps, reduces, autoComplete, testName, cleanOnStart, startCount,
     this(maps, reduces, autoComplete, testName, cleanOnStart, startCount,
-        new SystemClock());
+        new SystemClock(), null);
   }
   }
 
 
   public MRApp(int maps, int reduces, boolean autoComplete, String testName,
   public MRApp(int maps, int reduces, boolean autoComplete, String testName,
@@ -191,33 +200,34 @@ public class MRApp extends MRAppMaster {
       boolean cleanOnStart, int startCount, Clock clock, boolean unregistered) {
       boolean cleanOnStart, int startCount, Clock clock, boolean unregistered) {
     this(getApplicationAttemptId(applicationId, startCount), getContainerId(
     this(getApplicationAttemptId(applicationId, startCount), getContainerId(
       applicationId, startCount), maps, reduces, autoComplete, testName,
       applicationId, startCount), maps, reduces, autoComplete, testName,
-      cleanOnStart, startCount, clock, unregistered);
+      cleanOnStart, startCount, clock, unregistered, null);
   }
   }
 
 
   public MRApp(int maps, int reduces, boolean autoComplete, String testName,
   public MRApp(int maps, int reduces, boolean autoComplete, String testName,
-      boolean cleanOnStart, int startCount, Clock clock) {
+      boolean cleanOnStart, int startCount, Clock clock, String assignedQueue) {
     this(getApplicationAttemptId(applicationId, startCount), getContainerId(
     this(getApplicationAttemptId(applicationId, startCount), getContainerId(
       applicationId, startCount), maps, reduces, autoComplete, testName,
       applicationId, startCount), maps, reduces, autoComplete, testName,
-      cleanOnStart, startCount, clock, true);
+      cleanOnStart, startCount, clock, true, assignedQueue);
   }
   }
 
 
   public MRApp(ApplicationAttemptId appAttemptId, ContainerId amContainerId,
   public MRApp(ApplicationAttemptId appAttemptId, ContainerId amContainerId,
       int maps, int reduces, boolean autoComplete, String testName,
       int maps, int reduces, boolean autoComplete, String testName,
       boolean cleanOnStart, int startCount, boolean unregistered) {
       boolean cleanOnStart, int startCount, boolean unregistered) {
     this(appAttemptId, amContainerId, maps, reduces, autoComplete, testName,
     this(appAttemptId, amContainerId, maps, reduces, autoComplete, testName,
-        cleanOnStart, startCount, new SystemClock(), unregistered);
+        cleanOnStart, startCount, new SystemClock(), unregistered, null);
   }
   }
 
 
   public MRApp(ApplicationAttemptId appAttemptId, ContainerId amContainerId,
   public MRApp(ApplicationAttemptId appAttemptId, ContainerId amContainerId,
       int maps, int reduces, boolean autoComplete, String testName,
       int maps, int reduces, boolean autoComplete, String testName,
       boolean cleanOnStart, int startCount) {
       boolean cleanOnStart, int startCount) {
     this(appAttemptId, amContainerId, maps, reduces, autoComplete, testName,
     this(appAttemptId, amContainerId, maps, reduces, autoComplete, testName,
-        cleanOnStart, startCount, new SystemClock(), true);
+        cleanOnStart, startCount, new SystemClock(), true, null);
   }
   }
 
 
   public MRApp(ApplicationAttemptId appAttemptId, ContainerId amContainerId,
   public MRApp(ApplicationAttemptId appAttemptId, ContainerId amContainerId,
       int maps, int reduces, boolean autoComplete, String testName,
       int maps, int reduces, boolean autoComplete, String testName,
-      boolean cleanOnStart, int startCount, Clock clock, boolean unregistered) {
+      boolean cleanOnStart, int startCount, Clock clock, boolean unregistered,
+      String assignedQueue) {
     super(appAttemptId, amContainerId, NM_HOST, NM_PORT, NM_HTTP_PORT, clock, System
     super(appAttemptId, amContainerId, NM_HOST, NM_PORT, NM_HTTP_PORT, clock, System
         .currentTimeMillis(), MRJobConfig.DEFAULT_MR_AM_MAX_ATTEMPTS);
         .currentTimeMillis(), MRJobConfig.DEFAULT_MR_AM_MAX_ATTEMPTS);
     this.testWorkDir = new File("target", testName);
     this.testWorkDir = new File("target", testName);
@@ -239,6 +249,7 @@ public class MRApp extends MRAppMaster {
     // If safeToReportTerminationToUser is set to true, we can verify whether
     // If safeToReportTerminationToUser is set to true, we can verify whether
     // the job can reaches the final state when MRAppMaster shuts down.
     // the job can reaches the final state when MRAppMaster shuts down.
     this.successfullyUnregistered.set(unregistered);
     this.successfullyUnregistered.set(unregistered);
+    this.assignedQueue = assignedQueue;
   }
   }
 
 
   @Override
   @Override
@@ -285,6 +296,9 @@ public class MRApp extends MRAppMaster {
     start();
     start();
     DefaultMetricsSystem.shutdown();
     DefaultMetricsSystem.shutdown();
     Job job = getContext().getAllJobs().values().iterator().next();
     Job job = getContext().getAllJobs().values().iterator().next();
+    if (assignedQueue != null) {
+      job.setQueueName(assignedQueue);
+    }
 
 
     // Write job.xml
     // Write job.xml
     String jobFile = MRApps.getJobFile(conf, user,
     String jobFile = MRApps.getJobFile(conf, user,

+ 1 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockAppContext.java

@@ -39,6 +39,7 @@ public class MockAppContext implements AppContext {
   final Map<JobId, Job> jobs;
   final Map<JobId, Job> jobs;
   final long startTime = System.currentTimeMillis();
   final long startTime = System.currentTimeMillis();
   Set<String> blacklistedNodes;
   Set<String> blacklistedNodes;
+  String queue;
   
   
   public MockAppContext(int appid) {
   public MockAppContext(int appid) {
     appID = MockJobs.newAppID(appid);
     appID = MockJobs.newAppID(appid);

+ 5 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockJobs.java

@@ -629,6 +629,11 @@ public class MockJobs extends MockApps {
         jobConf.addResource(fc.open(configFile), configFile.toString());
         jobConf.addResource(fc.open(configFile), configFile.toString());
         return jobConf;
         return jobConf;
       }
       }
+
+      @Override
+      public void setQueueName(String queueName) {
+        // do nothing
+      }
     };
     };
   }
   }
 
 

+ 6 - 6
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestJobEndNotifier.java

@@ -37,7 +37,7 @@ import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
 import javax.servlet.http.HttpServletResponse;
 
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.http.HttpServer;
+import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapred.JobContext;
 import org.apache.hadoop.mapred.JobContext;
 import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.MRJobConfig;
@@ -199,7 +199,7 @@ public class TestJobEndNotifier extends JobEndNotifier {
 
 
   @Test
   @Test
   public void testNotificationOnLastRetryNormalShutdown() throws Exception {
   public void testNotificationOnLastRetryNormalShutdown() throws Exception {
-    HttpServer server = startHttpServer();
+    HttpServer2 server = startHttpServer();
     // Act like it is the second attempt. Default max attempts is 2
     // Act like it is the second attempt. Default max attempts is 2
     MRApp app = spy(new MRAppWithCustomContainerAllocator(
     MRApp app = spy(new MRAppWithCustomContainerAllocator(
         2, 2, true, this.getClass().getName(), true, 2, true));
         2, 2, true, this.getClass().getName(), true, 2, true));
@@ -223,7 +223,7 @@ public class TestJobEndNotifier extends JobEndNotifier {
   @Test
   @Test
   public void testAbsentNotificationOnNotLastRetryUnregistrationFailure()
   public void testAbsentNotificationOnNotLastRetryUnregistrationFailure()
       throws Exception {
       throws Exception {
-    HttpServer server = startHttpServer();
+    HttpServer2 server = startHttpServer();
     MRApp app = spy(new MRAppWithCustomContainerAllocator(2, 2, false,
     MRApp app = spy(new MRAppWithCustomContainerAllocator(2, 2, false,
         this.getClass().getName(), true, 1, false));
         this.getClass().getName(), true, 1, false));
     doNothing().when(app).sysexit();
     doNothing().when(app).sysexit();
@@ -250,7 +250,7 @@ public class TestJobEndNotifier extends JobEndNotifier {
   @Test
   @Test
   public void testNotificationOnLastRetryUnregistrationFailure()
   public void testNotificationOnLastRetryUnregistrationFailure()
       throws Exception {
       throws Exception {
-    HttpServer server = startHttpServer();
+    HttpServer2 server = startHttpServer();
     MRApp app = spy(new MRAppWithCustomContainerAllocator(2, 2, false,
     MRApp app = spy(new MRAppWithCustomContainerAllocator(2, 2, false,
         this.getClass().getName(), true, 2, false));
         this.getClass().getName(), true, 2, false));
     doNothing().when(app).sysexit();
     doNothing().when(app).sysexit();
@@ -274,10 +274,10 @@ public class TestJobEndNotifier extends JobEndNotifier {
     server.stop();
     server.stop();
   }
   }
 
 
-  private static HttpServer startHttpServer() throws Exception {
+  private static HttpServer2 startHttpServer() throws Exception {
     new File(System.getProperty(
     new File(System.getProperty(
         "build.webapps", "build/webapps") + "/test").mkdirs();
         "build.webapps", "build/webapps") + "/test").mkdirs();
-    HttpServer server = new HttpServer.Builder().setName("test")
+    HttpServer2 server = new HttpServer2.Builder().setName("test")
         .addEndpoint(URI.create("http://localhost:0"))
         .addEndpoint(URI.create("http://localhost:0"))
         .setFindPort(true).build();
         .setFindPort(true).build();
     server.addServlet("jobend", "/jobend", JobEndServlet.class);
     server.addServlet("jobend", "/jobend", JobEndServlet.class);

+ 5 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java

@@ -505,6 +505,11 @@ public class TestRuntimeEstimators {
     public Configuration loadConfFile() {
     public Configuration loadConfFile() {
       throw new UnsupportedOperationException();
       throw new UnsupportedOperationException();
     }
     }
+
+    @Override
+    public void setQueueName(String queueName) {
+      // do nothing
+    }
   }
   }
 
 
   /*
   /*

+ 22 - 11
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java

@@ -41,6 +41,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.RawLocalFileSystem;
 import org.apache.hadoop.fs.RawLocalFileSystem;
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapred.MapTaskAttemptImpl;
 import org.apache.hadoop.mapred.MapTaskAttemptImpl;
+import org.apache.hadoop.mapreduce.Counters;
 import org.apache.hadoop.mapreduce.JobCounter;
 import org.apache.hadoop.mapreduce.JobCounter;
 import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent;
 import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent;
@@ -182,13 +183,13 @@ public class TestTaskAttempt{
   }
   }
 
 
   @Test
   @Test
-  public void testSlotMillisCounterUpdate() throws Exception {
-    verifySlotMillis(2048, 2048, 1024);
-    verifySlotMillis(2048, 1024, 1024);
-    verifySlotMillis(10240, 1024, 2048);
+  public void testMillisCountersUpdate() throws Exception {
+    verifyMillisCounters(2048, 2048, 1024);
+    verifyMillisCounters(2048, 1024, 1024);
+    verifyMillisCounters(10240, 1024, 2048);
   }
   }
 
 
-  public void verifySlotMillis(int mapMemMb, int reduceMemMb,
+  public void verifyMillisCounters(int mapMemMb, int reduceMemMb,
       int minContainerSize) throws Exception {
       int minContainerSize) throws Exception {
     Clock actualClock = new SystemClock();
     Clock actualClock = new SystemClock();
     ControlledClock clock = new ControlledClock(actualClock);
     ControlledClock clock = new ControlledClock(actualClock);
@@ -232,13 +233,23 @@ public class TestTaskAttempt{
     Assert.assertEquals(mta.getLaunchTime(), 10);
     Assert.assertEquals(mta.getLaunchTime(), 10);
     Assert.assertEquals(rta.getFinishTime(), 11);
     Assert.assertEquals(rta.getFinishTime(), 11);
     Assert.assertEquals(rta.getLaunchTime(), 10);
     Assert.assertEquals(rta.getLaunchTime(), 10);
+    Counters counters = job.getAllCounters();
     Assert.assertEquals((int) Math.ceil((float) mapMemMb / minContainerSize),
     Assert.assertEquals((int) Math.ceil((float) mapMemMb / minContainerSize),
-        job.getAllCounters().findCounter(JobCounter.SLOTS_MILLIS_MAPS)
-            .getValue());
-    Assert.assertEquals(
-        (int) Math.ceil((float) reduceMemMb / minContainerSize), job
-            .getAllCounters().findCounter(JobCounter.SLOTS_MILLIS_REDUCES)
-            .getValue());
+        counters.findCounter(JobCounter.SLOTS_MILLIS_MAPS).getValue());
+    Assert.assertEquals((int) Math.ceil((float) reduceMemMb / minContainerSize),
+        counters.findCounter(JobCounter.SLOTS_MILLIS_REDUCES).getValue());
+    Assert.assertEquals(1,
+        counters.findCounter(JobCounter.MILLIS_MAPS).getValue());
+    Assert.assertEquals(1,
+        counters.findCounter(JobCounter.MILLIS_REDUCES).getValue());
+    Assert.assertEquals(mapMemMb,
+        counters.findCounter(JobCounter.MB_MILLIS_MAPS).getValue());
+    Assert.assertEquals(reduceMemMb,
+        counters.findCounter(JobCounter.MB_MILLIS_REDUCES).getValue());
+    Assert.assertEquals(1,
+        counters.findCounter(JobCounter.VCORES_MILLIS_MAPS).getValue());
+    Assert.assertEquals(1,
+        counters.findCounter(JobCounter.VCORES_MILLIS_REDUCES).getValue());
   }
   }
 
 
   private TaskAttemptImpl createMapTaskAttemptImplForTest(
   private TaskAttemptImpl createMapTaskAttemptImplForTest(

+ 9 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/avro/Events.avpr

@@ -122,6 +122,13 @@
       ]
       ]
      },
      },
 
 
+     {"type": "record", "name": "JobQueueChange",
+      "fields": [
+          {"name": "jobid", "type": "string"},
+          {"name": "jobQueueName", "type": "string"}
+      ]
+     },
+
      {"type": "record", "name": "JobUnsuccessfulCompletion",
      {"type": "record", "name": "JobUnsuccessfulCompletion",
       "fields": [
       "fields": [
           {"name": "jobid", "type": "string"},
           {"name": "jobid", "type": "string"},
@@ -267,6 +274,7 @@
           "JOB_FINISHED",
           "JOB_FINISHED",
           "JOB_PRIORITY_CHANGED",
           "JOB_PRIORITY_CHANGED",
           "JOB_STATUS_CHANGED",
           "JOB_STATUS_CHANGED",
+          "JOB_QUEUE_CHANGED",
           "JOB_FAILED",
           "JOB_FAILED",
           "JOB_KILLED",
           "JOB_KILLED",
           "JOB_ERROR",
           "JOB_ERROR",
@@ -306,6 +314,7 @@
                "JobInited",
                "JobInited",
                "AMStarted",
                "AMStarted",
                "JobPriorityChange",
                "JobPriorityChange",
+               "JobQueueChange",
                "JobStatusChanged",
                "JobStatusChanged",
                "JobSubmitted",
                "JobSubmitted",
                "JobUnsuccessfulCompletion",
                "JobUnsuccessfulCompletion",

+ 7 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobCounter.java

@@ -49,5 +49,11 @@ public enum JobCounter {
   TASKS_REQ_PREEMPT,
   TASKS_REQ_PREEMPT,
   CHECKPOINTS,
   CHECKPOINTS,
   CHECKPOINT_BYTES,
   CHECKPOINT_BYTES,
-  CHECKPOINT_TIME
+  CHECKPOINT_TIME,
+  MILLIS_MAPS,
+  MILLIS_REDUCES,
+  VCORES_MILLIS_MAPS,
+  VCORES_MILLIS_REDUCES,
+  MB_MILLIS_MAPS,
+  MB_MILLIS_REDUCES
 }
 }

+ 2 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/EventReader.java

@@ -98,6 +98,8 @@ public class EventReader implements Closeable {
       result = new JobFinishedEvent(); break;
       result = new JobFinishedEvent(); break;
     case JOB_PRIORITY_CHANGED:
     case JOB_PRIORITY_CHANGED:
       result = new JobPriorityChangeEvent(); break;
       result = new JobPriorityChangeEvent(); break;
+    case JOB_QUEUE_CHANGED:
+      result = new JobQueueChangeEvent(); break;
     case JOB_STATUS_CHANGED:
     case JOB_STATUS_CHANGED:
       result = new JobStatusChangedEvent(); break;
       result = new JobStatusChangedEvent(); break;
     case JOB_FAILED:
     case JOB_FAILED:

+ 7 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryParser.java

@@ -183,6 +183,9 @@ public class JobHistoryParser implements HistoryEventHandler {
     case JOB_PRIORITY_CHANGED:
     case JOB_PRIORITY_CHANGED:
       handleJobPriorityChangeEvent((JobPriorityChangeEvent) event);
       handleJobPriorityChangeEvent((JobPriorityChangeEvent) event);
       break;
       break;
+    case JOB_QUEUE_CHANGED:
+      handleJobQueueChangeEvent((JobQueueChangeEvent) event);
+      break;
     case JOB_FAILED:
     case JOB_FAILED:
     case JOB_KILLED:
     case JOB_KILLED:
     case JOB_ERROR:
     case JOB_ERROR:
@@ -385,6 +388,10 @@ public class JobHistoryParser implements HistoryEventHandler {
   private void handleJobPriorityChangeEvent(JobPriorityChangeEvent event) {
   private void handleJobPriorityChangeEvent(JobPriorityChangeEvent event) {
     info.priority = event.getPriority();
     info.priority = event.getPriority();
   }
   }
+  
+  private void handleJobQueueChangeEvent(JobQueueChangeEvent event) {
+    info.jobQueueName = event.getJobQueueName();
+  }
 
 
   private void handleJobInitedEvent(JobInitedEvent event) {
   private void handleJobInitedEvent(JobInitedEvent event) {
     info.launchTime = event.getLaunchTime();
     info.launchTime = event.getLaunchTime();

+ 63 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobQueueChangeEvent.java

@@ -0,0 +1,63 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.mapreduce.jobhistory;
+
+import org.apache.avro.util.Utf8;
+import org.apache.hadoop.mapreduce.JobID;
+
+@SuppressWarnings("deprecation")
+public class JobQueueChangeEvent implements HistoryEvent {
+  private JobQueueChange datum = new JobQueueChange();
+  
+  public JobQueueChangeEvent(JobID id, String queueName) {
+    datum.jobid = new Utf8(id.toString());
+    datum.jobQueueName = new Utf8(queueName);
+  }
+  
+  JobQueueChangeEvent() { }
+  
+  @Override
+  public EventType getEventType() {
+    return EventType.JOB_QUEUE_CHANGED;
+  }
+
+  @Override
+  public Object getDatum() {
+    return datum;
+  }
+
+  @Override
+  public void setDatum(Object datum) {
+    this.datum = (JobQueueChange) datum;
+  }
+  
+  /** Get the Job ID */
+  public JobID getJobId() {
+    return JobID.forName(datum.jobid.toString());
+  }
+  
+  /** Get the new Job queue name */
+  public String getJobQueueName() {
+    if (datum.jobQueueName != null) {
+      return datum.jobQueueName.toString();
+    }
+    return null;
+  }
+
+}

+ 7 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/org/apache/hadoop/mapreduce/JobCounter.properties

@@ -25,9 +25,15 @@ DATA_LOCAL_MAPS.name=              Data-local map tasks
 RACK_LOCAL_MAPS.name=              Rack-local map tasks
 RACK_LOCAL_MAPS.name=              Rack-local map tasks
 SLOTS_MILLIS_MAPS.name=            Total time spent by all maps in occupied slots (ms)
 SLOTS_MILLIS_MAPS.name=            Total time spent by all maps in occupied slots (ms)
 SLOTS_MILLIS_REDUCES.name=         Total time spent by all reduces in occupied slots (ms)
 SLOTS_MILLIS_REDUCES.name=         Total time spent by all reduces in occupied slots (ms)
+MILLIS_MAPS.name=                  Total time spent by all map tasks (ms)
+MILLIS_REDUCES.name=               Total time spent by all reduce tasks (ms)
+MB_MILLIS_MAPS.name=               Total megabyte-seconds taken by all map tasks
+MB_MILLIS_REDUCES.name=            Total megabyte-seconds taken by all reduce tasks
+VCORES_MILLIS_MAPS.name=           Total vcore-seconds taken by all map tasks
+VCORES_MILLIS_REDUCES.name=        Total vcore-seconds taken by all reduce tasks
 FALLOW_SLOTS_MILLIS_MAPS.name=     Total time spent by all maps waiting after reserving slots (ms)
 FALLOW_SLOTS_MILLIS_MAPS.name=     Total time spent by all maps waiting after reserving slots (ms)
 FALLOW_SLOTS_MILLIS_REDUCES.name=  Total time spent by all reduces waiting after reserving slots (ms)
 FALLOW_SLOTS_MILLIS_REDUCES.name=  Total time spent by all reduces waiting after reserving slots (ms)
 TASKS_REQ_PREEMPT.name=            Tasks that have been asked to preempt
 TASKS_REQ_PREEMPT.name=            Tasks that have been asked to preempt
 CHECKPOINTS.name=                  Number of checkpoints reported
 CHECKPOINTS.name=                  Number of checkpoints reported
 CHECKPOINT_BYTES.name=             Total amount of bytes in checkpoints
 CHECKPOINT_BYTES.name=             Total amount of bytes in checkpoints
-CHECKPOINT_TIME.name=              Total time spent checkpointing (ms)
+CHECKPOINT_TIME.name=              Total time spent checkpointing (ms)

+ 3 - 3
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestJobEndNotifier.java

@@ -34,10 +34,10 @@ import javax.servlet.http.HttpServletResponse;
 import junit.framework.TestCase;
 import junit.framework.TestCase;
 
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.http.HttpServer;
+import org.apache.hadoop.http.HttpServer2;
 
 
 public class TestJobEndNotifier extends TestCase {
 public class TestJobEndNotifier extends TestCase {
-  HttpServer server;
+  HttpServer2 server;
   URL baseUrl;
   URL baseUrl;
 
 
   @SuppressWarnings("serial")
   @SuppressWarnings("serial")
@@ -102,7 +102,7 @@ public class TestJobEndNotifier extends TestCase {
   public void setUp() throws Exception {
   public void setUp() throws Exception {
     new File(System.getProperty("build.webapps", "build/webapps") + "/test"
     new File(System.getProperty("build.webapps", "build/webapps") + "/test"
         ).mkdirs();
         ).mkdirs();
-    server = new HttpServer.Builder().setName("test")
+    server = new HttpServer2.Builder().setName("test")
         .addEndpoint(URI.create("http://localhost:0"))
         .addEndpoint(URI.create("http://localhost:0"))
         .setFindPort(true).build();
         .setFindPort(true).build();
     server.addServlet("delay", "/delay", DelayServlet.class);
     server.addServlet("delay", "/delay", DelayServlet.class);

+ 5 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java

@@ -453,4 +453,9 @@ public class CompletedJob implements org.apache.hadoop.mapreduce.v2.app.job.Job
     }
     }
     return amInfos;
     return amInfos;
   }
   }
+
+  @Override
+  public void setQueueName(String queueName) {
+    throw new UnsupportedOperationException("Can't set job's queue name in history");
+  }
 }
 }

+ 5 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/PartialJob.java

@@ -190,5 +190,10 @@ public class PartialJob implements org.apache.hadoop.mapreduce.v2.app.job.Job {
   public List<AMInfo> getAMInfos() {
   public List<AMInfo> getAMInfos() {
     return null;
     return null;
   }
   }
+  
+  @Override
+  public void setQueueName(String queueName) {
+    throw new UnsupportedOperationException("Can't set job's queue name in history");
+  }
 
 
 }
 }

+ 40 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistoryEvents.java

@@ -155,6 +155,41 @@ public class TestJobHistoryEvents {
     Assert.assertEquals("JobHistoryEventHandler",
     Assert.assertEquals("JobHistoryEventHandler",
         services[services.length - 1].getName());
         services[services.length - 1].getName());
   }
   }
+  
+  @Test
+  public void testAssignedQueue() throws Exception {
+    Configuration conf = new Configuration();
+    MRApp app = new MRAppWithHistory(2, 1, true, this.getClass().getName(),
+        true, "assignedQueue");
+    app.submit(conf);
+    Job job = app.getContext().getAllJobs().values().iterator().next();
+    JobId jobId = job.getID();
+    LOG.info("JOBID is " + TypeConverter.fromYarn(jobId).toString());
+    app.waitForState(job, JobState.SUCCEEDED);
+    
+    //make sure all events are flushed 
+    app.waitForState(Service.STATE.STOPPED);
+    /*
+     * Use HistoryContext to read logged events and verify the number of 
+     * completed maps 
+    */
+    HistoryContext context = new JobHistory();
+    // test start and stop states
+    ((JobHistory)context).init(conf);
+    ((JobHistory)context).start();
+    Assert.assertTrue( context.getStartTime()>0);
+    Assert.assertEquals(((JobHistory)context).getServiceState(),Service.STATE.STARTED);
+
+    // get job before stopping JobHistory
+    Job parsedJob = context.getJob(jobId);
+
+    // stop JobHistory
+    ((JobHistory)context).stop();
+    Assert.assertEquals(((JobHistory)context).getServiceState(),Service.STATE.STOPPED);
+
+    Assert.assertEquals("QueueName not correct", "assignedQueue",
+        parsedJob.getQueueName());
+  }
 
 
   private void verifyTask(Task task) {
   private void verifyTask(Task task) {
     Assert.assertEquals("Task state not currect", TaskState.SUCCEEDED,
     Assert.assertEquals("Task state not currect", TaskState.SUCCEEDED,
@@ -184,6 +219,11 @@ public class TestJobHistoryEvents {
       super(maps, reduces, autoComplete, testName, cleanOnStart);
       super(maps, reduces, autoComplete, testName, cleanOnStart);
     }
     }
 
 
+    public MRAppWithHistory(int maps, int reduces, boolean autoComplete,
+        String testName, boolean cleanOnStart, String assignedQueue) {
+      super(maps, reduces, autoComplete, testName, cleanOnStart, assignedQueue);
+    }
+
     @Override
     @Override
     protected EventHandler<JobHistoryEvent> createJobHistoryHandler(
     protected EventHandler<JobHistoryEvent> createJobHistoryHandler(
         AppContext context) {
         AppContext context) {

+ 4 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsWebServicesAcls.java

@@ -415,5 +415,9 @@ public class TestHsWebServicesAcls {
       return aclsMgr.checkAccess(callerUGI, jobOperation,
       return aclsMgr.checkAccess(callerUGI, jobOperation,
           this.getUserName(), jobAcls.get(jobOperation));
           this.getUserName(), jobAcls.get(jobOperation));
     }
     }
+
+    @Override
+    public void setQueueName(String queueName) {
+    }
   }
   }
 }
 }

+ 1 - 1
hadoop-project/src/site/apt/index.apt.vm

@@ -45,7 +45,7 @@ Apache Hadoop ${project.version}
 
 
   The new ResourceManager manages the global assignment of compute resources to 
   The new ResourceManager manages the global assignment of compute resources to 
   applications and the per-application ApplicationMaster manages the 
   applications and the per-application ApplicationMaster manages the 
-  application’s scheduling and coordination. 
+  application‚ scheduling and coordination. 
 
 
   An application is either a single job in the sense of classic MapReduce jobs 
   An application is either a single job in the sense of classic MapReduce jobs 
   or a DAG of such jobs. 
   or a DAG of such jobs. 

+ 8 - 7
hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptionSwitch.java

@@ -37,15 +37,16 @@ public enum DistCpOptionSwitch {
   /**
   /**
    * Preserves status of file/path in the target.
    * Preserves status of file/path in the target.
    * Default behavior with -p, is to preserve replication,
    * Default behavior with -p, is to preserve replication,
-   * block size, user, group and permission on the target file
+   * block size, user, group, permission and checksum type on the target file.
+   * Note that when preserving checksum type, block size is also preserved.
    *
    *
-   * If any of the optional switches are present among rbugp, then
-   * only the corresponding file attribute is preserved
+   * If any of the optional switches are present among rbugpc, then
+   * only the corresponding file attribute is preserved.
    *
    *
    */
    */
   PRESERVE_STATUS(DistCpConstants.CONF_LABEL_PRESERVE_STATUS,
   PRESERVE_STATUS(DistCpConstants.CONF_LABEL_PRESERVE_STATUS,
-      new Option("p", true, "preserve status (rbugp)" +
-          "(replication, block-size, user, group, permission)")),
+      new Option("p", true, "preserve status (rbugpc)" +
+          "(replication, block-size, user, group, permission, checksum-type)")),
 
 
   /**
   /**
    * Update target location by copying only files that are missing
    * Update target location by copying only files that are missing
@@ -53,7 +54,7 @@ public enum DistCpOptionSwitch {
    * across source and target. Typically used with DELETE_MISSING
    * across source and target. Typically used with DELETE_MISSING
    * Incompatible with ATOMIC_COMMIT
    * Incompatible with ATOMIC_COMMIT
    */
    */
-  SYNC_FOLDERS(DistCpConstants.CONF_LABEL_SYNC_FOLDERS, 
+  SYNC_FOLDERS(DistCpConstants.CONF_LABEL_SYNC_FOLDERS,
       new Option("update", false, "Update target, copying only missing" +
       new Option("update", false, "Update target, copying only missing" +
           "files or directories")),
           "files or directories")),
 
 
@@ -80,7 +81,7 @@ public enum DistCpOptionSwitch {
    * Max number of maps to use during copy. DistCp will split work
    * Max number of maps to use during copy. DistCp will split work
    * as equally as possible among these maps
    * as equally as possible among these maps
    */
    */
-  MAX_MAPS(DistCpConstants.CONF_LABEL_MAX_MAPS, 
+  MAX_MAPS(DistCpConstants.CONF_LABEL_MAX_MAPS,
       new Option("m", true, "Max number of concurrent maps to use for copy")),
       new Option("m", true, "Max number of concurrent maps to use for copy")),
 
 
   /**
   /**

+ 1 - 1
hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpOptions.java

@@ -61,7 +61,7 @@ public class DistCpOptions {
   private Path targetPath;
   private Path targetPath;
 
 
   public static enum FileAttribute{
   public static enum FileAttribute{
-    REPLICATION, BLOCKSIZE, USER, GROUP, PERMISSION;
+    REPLICATION, BLOCKSIZE, USER, GROUP, PERMISSION, CHECKSUMTYPE;
 
 
     public static FileAttribute getAttribute(char symbol) {
     public static FileAttribute getAttribute(char symbol) {
       for (FileAttribute attribute : values()) {
       for (FileAttribute attribute : values()) {

+ 3 - 3
hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/OptionsParser.java

@@ -34,7 +34,7 @@ public class OptionsParser {
 
 
   private static final Log LOG = LogFactory.getLog(OptionsParser.class);
   private static final Log LOG = LogFactory.getLog(OptionsParser.class);
 
 
-  private static final Options cliOptions = new Options();      
+  private static final Options cliOptions = new Options();
 
 
   static {
   static {
     for (DistCpOptionSwitch option : DistCpOptionSwitch.values()) {
     for (DistCpOptionSwitch option : DistCpOptionSwitch.values()) {
@@ -50,7 +50,7 @@ public class OptionsParser {
     protected String[] flatten(Options options, String[] arguments, boolean stopAtNonOption) {
     protected String[] flatten(Options options, String[] arguments, boolean stopAtNonOption) {
       for (int index = 0; index < arguments.length; index++) {
       for (int index = 0; index < arguments.length; index++) {
         if (arguments[index].equals("-" + DistCpOptionSwitch.PRESERVE_STATUS.getSwitch())) {
         if (arguments[index].equals("-" + DistCpOptionSwitch.PRESERVE_STATUS.getSwitch())) {
-          arguments[index] = "-prbugp";
+          arguments[index] = "-prbugpc";
         }
         }
       }
       }
       return super.flatten(options, arguments, stopAtNonOption);
       return super.flatten(options, arguments, stopAtNonOption);
@@ -125,7 +125,7 @@ public class OptionsParser {
         option.setAtomicWorkPath(new Path(workPath));
         option.setAtomicWorkPath(new Path(workPath));
       }
       }
     } else if (command.hasOption(DistCpOptionSwitch.WORK_PATH.getSwitch())) {
     } else if (command.hasOption(DistCpOptionSwitch.WORK_PATH.getSwitch())) {
-      throw new IllegalArgumentException("-tmp work-path can only be specified along with -atomic");      
+      throw new IllegalArgumentException("-tmp work-path can only be specified along with -atomic");
     }
     }
 
 
     if (command.hasOption(DistCpOptionSwitch.LOG_PATH.getSwitch())) {
     if (command.hasOption(DistCpOptionSwitch.LOG_PATH.getSwitch())) {

+ 3 - 3
hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyMapper.java

@@ -111,7 +111,7 @@ public class CopyMapper extends Mapper<Text, FileStatus, Text, Text> {
    */
    */
   private void initializeSSLConf(Context context) throws IOException {
   private void initializeSSLConf(Context context) throws IOException {
     LOG.info("Initializing SSL configuration");
     LOG.info("Initializing SSL configuration");
-    
+
     String workDir = conf.get(JobContext.JOB_LOCAL_DIR) + "/work";
     String workDir = conf.get(JobContext.JOB_LOCAL_DIR) + "/work";
     Path[] cacheFiles = context.getLocalCacheFiles();
     Path[] cacheFiles = context.getLocalCacheFiles();
 
 
@@ -294,7 +294,7 @@ public class CopyMapper extends Mapper<Text, FileStatus, Text, Text> {
             RetriableFileCopyCommand.CopyReadException) {
             RetriableFileCopyCommand.CopyReadException) {
       incrementCounter(context, Counter.FAIL, 1);
       incrementCounter(context, Counter.FAIL, 1);
       incrementCounter(context, Counter.BYTESFAILED, sourceFileStatus.getLen());
       incrementCounter(context, Counter.BYTESFAILED, sourceFileStatus.getLen());
-      context.write(null, new Text("FAIL: " + sourceFileStatus.getPath() + " - " + 
+      context.write(null, new Text("FAIL: " + sourceFileStatus.getPath() + " - " +
           StringUtils.stringifyException(exception)));
           StringUtils.stringifyException(exception)));
     }
     }
     else
     else
@@ -322,7 +322,7 @@ public class CopyMapper extends Mapper<Text, FileStatus, Text, Text> {
                    targetFileStatus.getLen() != source.getLen()
                    targetFileStatus.getLen() != source.getLen()
                 || (!skipCrc &&
                 || (!skipCrc &&
                        !DistCpUtils.checksumsAreEqual(sourceFS,
                        !DistCpUtils.checksumsAreEqual(sourceFS,
-                                          source.getPath(), targetFS, target))
+                          source.getPath(), null, targetFS, target))
                 || (source.getBlockSize() != targetFileStatus.getBlockSize() &&
                 || (source.getBlockSize() != targetFileStatus.getBlockSize() &&
                       preserve.contains(FileAttribute.BLOCKSIZE))
                       preserve.contains(FileAttribute.BLOCKSIZE))
                );
                );

+ 75 - 32
hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/RetriableFileCopyCommand.java

@@ -18,23 +18,33 @@
 
 
 package org.apache.hadoop.tools.mapred;
 package org.apache.hadoop.tools.mapred;
 
 
-import org.apache.hadoop.tools.util.RetriableCommand;
-import org.apache.hadoop.tools.util.ThrottledInputStream;
-import org.apache.hadoop.tools.util.DistCpUtils;
-import org.apache.hadoop.tools.DistCpOptions.*;
-import org.apache.hadoop.tools.DistCpConstants;
-import org.apache.hadoop.fs.*;
-import org.apache.hadoop.mapreduce.Mapper;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.IOUtils;
+import java.io.BufferedInputStream;
+import java.io.BufferedOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.util.EnumSet;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CreateFlag;
+import org.apache.hadoop.fs.FileChecksum;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Options.ChecksumOpt;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.mapreduce.Mapper;
+import org.apache.hadoop.tools.DistCpConstants;
+import org.apache.hadoop.tools.DistCpOptions.FileAttribute;
+import org.apache.hadoop.tools.util.DistCpUtils;
+import org.apache.hadoop.tools.util.RetriableCommand;
+import org.apache.hadoop.tools.util.ThrottledInputStream;
 
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.annotations.VisibleForTesting;
 
 
-import java.io.*;
-import java.util.EnumSet;
-
 /**
 /**
  * This class extends RetriableCommand to implement the copy of files,
  * This class extends RetriableCommand to implement the copy of files,
  * with retries on failure.
  * with retries on failure.
@@ -44,7 +54,7 @@ public class RetriableFileCopyCommand extends RetriableCommand {
   private static Log LOG = LogFactory.getLog(RetriableFileCopyCommand.class);
   private static Log LOG = LogFactory.getLog(RetriableFileCopyCommand.class);
   private static int BUFFER_SIZE = 8 * 1024;
   private static int BUFFER_SIZE = 8 * 1024;
   private boolean skipCrc = false;
   private boolean skipCrc = false;
-  
+
   /**
   /**
    * Constructor, taking a description of the action.
    * Constructor, taking a description of the action.
    * @param description Verbose description of the copy operation.
    * @param description Verbose description of the copy operation.
@@ -52,7 +62,7 @@ public class RetriableFileCopyCommand extends RetriableCommand {
   public RetriableFileCopyCommand(String description) {
   public RetriableFileCopyCommand(String description) {
     super(description);
     super(description);
   }
   }
- 
+
   /**
   /**
    * Create a RetriableFileCopyCommand.
    * Create a RetriableFileCopyCommand.
    *
    *
@@ -99,15 +109,21 @@ public class RetriableFileCopyCommand extends RetriableCommand {
         LOG.debug("Copying " + sourceFileStatus.getPath() + " to " + target);
         LOG.debug("Copying " + sourceFileStatus.getPath() + " to " + target);
         LOG.debug("Tmp-file path: " + tmpTargetPath);
         LOG.debug("Tmp-file path: " + tmpTargetPath);
       }
       }
-      FileSystem sourceFS = sourceFileStatus.getPath().getFileSystem(
-              configuration);
+      final Path sourcePath = sourceFileStatus.getPath();
+      final FileSystem sourceFS = sourcePath.getFileSystem(configuration);
+      final FileChecksum sourceChecksum = fileAttributes
+          .contains(FileAttribute.CHECKSUMTYPE) ? sourceFS
+          .getFileChecksum(sourcePath) : null;
+
       long bytesRead = copyToTmpFile(tmpTargetPath, targetFS, sourceFileStatus,
       long bytesRead = copyToTmpFile(tmpTargetPath, targetFS, sourceFileStatus,
-                                     context, fileAttributes);
+          context, fileAttributes, sourceChecksum);
 
 
-      compareFileLengths(sourceFileStatus, tmpTargetPath, configuration, bytesRead);
+      compareFileLengths(sourceFileStatus, tmpTargetPath, configuration,
+          bytesRead);
       //At this point, src&dest lengths are same. if length==0, we skip checksum
       //At this point, src&dest lengths are same. if length==0, we skip checksum
       if ((bytesRead != 0) && (!skipCrc)) {
       if ((bytesRead != 0) && (!skipCrc)) {
-        compareCheckSums(sourceFS, sourceFileStatus.getPath(), targetFS, tmpTargetPath);
+        compareCheckSums(sourceFS, sourceFileStatus.getPath(), sourceChecksum,
+            targetFS, tmpTargetPath);
       }
       }
       promoteTmpToTarget(tmpTargetPath, target, targetFS);
       promoteTmpToTarget(tmpTargetPath, target, targetFS);
       return bytesRead;
       return bytesRead;
@@ -118,14 +134,33 @@ public class RetriableFileCopyCommand extends RetriableCommand {
     }
     }
   }
   }
 
 
+  /**
+   * @return the checksum spec of the source checksum if checksum type should be
+   *         preserved
+   */
+  private ChecksumOpt getChecksumOpt(EnumSet<FileAttribute> fileAttributes,
+      FileChecksum sourceChecksum) {
+    if (fileAttributes.contains(FileAttribute.CHECKSUMTYPE)
+        && sourceChecksum != null) {
+      return sourceChecksum.getChecksumOpt();
+    }
+    return null;
+  }
+
   private long copyToTmpFile(Path tmpTargetPath, FileSystem targetFS,
   private long copyToTmpFile(Path tmpTargetPath, FileSystem targetFS,
-                             FileStatus sourceFileStatus, Mapper.Context context,
-                             EnumSet<FileAttribute> fileAttributes)
-                             throws IOException {
-    OutputStream outStream = new BufferedOutputStream(targetFS.create(
-            tmpTargetPath, true, BUFFER_SIZE,
-            getReplicationFactor(fileAttributes, sourceFileStatus, targetFS, tmpTargetPath),
-            getBlockSize(fileAttributes, sourceFileStatus, targetFS, tmpTargetPath), context));
+      FileStatus sourceFileStatus, Mapper.Context context,
+      EnumSet<FileAttribute> fileAttributes, final FileChecksum sourceChecksum)
+      throws IOException {
+    FsPermission permission = FsPermission.getFileDefault().applyUMask(
+        FsPermission.getUMask(targetFS.getConf()));
+    OutputStream outStream = new BufferedOutputStream(
+        targetFS.create(tmpTargetPath, permission,
+            EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE), BUFFER_SIZE,
+            getReplicationFactor(fileAttributes, sourceFileStatus, targetFS,
+                tmpTargetPath),
+            getBlockSize(fileAttributes, sourceFileStatus, targetFS,
+                tmpTargetPath),
+            context, getChecksumOpt(fileAttributes, sourceChecksum)));
     return copyBytes(sourceFileStatus, outStream, BUFFER_SIZE, context);
     return copyBytes(sourceFileStatus, outStream, BUFFER_SIZE, context);
   }
   }
 
 
@@ -140,9 +175,10 @@ public class RetriableFileCopyCommand extends RetriableCommand {
   }
   }
 
 
   private void compareCheckSums(FileSystem sourceFS, Path source,
   private void compareCheckSums(FileSystem sourceFS, Path source,
-                                FileSystem targetFS, Path target)
-                                throws IOException {
-    if (!DistCpUtils.checksumsAreEqual(sourceFS, source, targetFS, target)) {
+      FileChecksum sourceChecksum, FileSystem targetFS, Path target)
+      throws IOException {
+    if (!DistCpUtils.checksumsAreEqual(sourceFS, source, sourceChecksum,
+        targetFS, target)) {
       StringBuilder errorMessage = new StringBuilder("Check-sum mismatch between ")
       StringBuilder errorMessage = new StringBuilder("Check-sum mismatch between ")
           .append(source).append(" and ").append(target).append(".");
           .append(source).append(" and ").append(target).append(".");
       if (sourceFS.getFileStatus(source).getBlockSize() != targetFS.getFileStatus(target).getBlockSize()) {
       if (sourceFS.getFileStatus(source).getBlockSize() != targetFS.getFileStatus(target).getBlockSize()) {
@@ -249,11 +285,18 @@ public class RetriableFileCopyCommand extends RetriableCommand {
             sourceFile.getReplication() : targetFS.getDefaultReplication(tmpTargetPath);
             sourceFile.getReplication() : targetFS.getDefaultReplication(tmpTargetPath);
   }
   }
 
 
+  /**
+   * @return the block size of the source file if we need to preserve either
+   *         the block size or the checksum type. Otherwise the default block
+   *         size of the target FS.
+   */
   private static long getBlockSize(
   private static long getBlockSize(
           EnumSet<FileAttribute> fileAttributes,
           EnumSet<FileAttribute> fileAttributes,
           FileStatus sourceFile, FileSystem targetFS, Path tmpTargetPath) {
           FileStatus sourceFile, FileSystem targetFS, Path tmpTargetPath) {
-    return fileAttributes.contains(FileAttribute.BLOCKSIZE)?
-            sourceFile.getBlockSize() : targetFS.getDefaultBlockSize(tmpTargetPath);
+    boolean preserve = fileAttributes.contains(FileAttribute.BLOCKSIZE)
+        || fileAttributes.contains(FileAttribute.CHECKSUMTYPE);
+    return preserve ? sourceFile.getBlockSize() : targetFS
+        .getDefaultBlockSize(tmpTargetPath);
   }
   }
 
 
   /**
   /**
@@ -261,7 +304,7 @@ public class RetriableFileCopyCommand extends RetriableCommand {
    * failures from other kinds of IOExceptions.
    * failures from other kinds of IOExceptions.
    * The failure to read from source is dealt with specially, in the CopyMapper.
    * The failure to read from source is dealt with specially, in the CopyMapper.
    * Such failures may be skipped if the DistCpOptions indicate so.
    * Such failures may be skipped if the DistCpOptions indicate so.
-   * Write failures are intolerable, and amount to CopyMapper failure.  
+   * Write failures are intolerable, and amount to CopyMapper failure.
    */
    */
   public static class CopyReadException extends IOException {
   public static class CopyReadException extends IOException {
     public CopyReadException(Throwable rootCause) {
     public CopyReadException(Throwable rootCause) {

+ 8 - 6
hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/DistCpUtils.java

@@ -125,7 +125,7 @@ public class DistCpUtils {
    * @param sourceRootPath - Source root path
    * @param sourceRootPath - Source root path
    * @param childPath - Path for which relative path is required
    * @param childPath - Path for which relative path is required
    * @return - Relative portion of the child path (always prefixed with /
    * @return - Relative portion of the child path (always prefixed with /
-   *           unless it is empty 
+   *           unless it is empty
    */
    */
   public static String getRelativePath(Path sourceRootPath, Path childPath) {
   public static String getRelativePath(Path sourceRootPath, Path childPath) {
     String childPathString = childPath.toUri().getPath();
     String childPathString = childPath.toUri().getPath();
@@ -277,9 +277,11 @@ public class DistCpUtils {
    * If checksums's can't be retrieved, it doesn't fail the test
    * If checksums's can't be retrieved, it doesn't fail the test
    * Only time the comparison would fail is when checksums are
    * Only time the comparison would fail is when checksums are
    * available and they don't match
    * available and they don't match
-   *                                  
+   *
    * @param sourceFS FileSystem for the source path.
    * @param sourceFS FileSystem for the source path.
    * @param source The source path.
    * @param source The source path.
+   * @param sourceChecksum The checksum of the source file. If it is null we
+   * still need to retrieve it through sourceFS.
    * @param targetFS FileSystem for the target path.
    * @param targetFS FileSystem for the target path.
    * @param target The target path.
    * @param target The target path.
    * @return If either checksum couldn't be retrieved, the function returns
    * @return If either checksum couldn't be retrieved, the function returns
@@ -288,12 +290,12 @@ public class DistCpUtils {
    * @throws IOException if there's an exception while retrieving checksums.
    * @throws IOException if there's an exception while retrieving checksums.
    */
    */
   public static boolean checksumsAreEqual(FileSystem sourceFS, Path source,
   public static boolean checksumsAreEqual(FileSystem sourceFS, Path source,
-                                   FileSystem targetFS, Path target)
-                                   throws IOException {
-    FileChecksum sourceChecksum = null;
+      FileChecksum sourceChecksum, FileSystem targetFS, Path target)
+      throws IOException {
     FileChecksum targetChecksum = null;
     FileChecksum targetChecksum = null;
     try {
     try {
-      sourceChecksum = sourceFS.getFileChecksum(source);
+      sourceChecksum = sourceChecksum != null ? sourceChecksum : sourceFS
+          .getFileChecksum(source);
       targetChecksum = targetFS.getFileChecksum(target);
       targetChecksum = targetFS.getFileChecksum(target);
     } catch (IOException e) {
     } catch (IOException e) {
       LOG.error("Unable to retrieve checksum for " + source + " or " + target, e);
       LOG.error("Unable to retrieve checksum for " + source + " or " + target, e);

+ 32 - 3
hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/TestOptionsParser.java

@@ -110,7 +110,7 @@ public class TestOptionsParser {
         "hdfs://localhost:8020/target/"});
         "hdfs://localhost:8020/target/"});
     Assert.assertEquals(options.getMapBandwidth(), 11);
     Assert.assertEquals(options.getMapBandwidth(), 11);
   }
   }
-  
+
   @Test(expected=IllegalArgumentException.class)
   @Test(expected=IllegalArgumentException.class)
   public void testParseNonPositiveBandwidth() {
   public void testParseNonPositiveBandwidth() {
     OptionsParser.parse(new String[] {
     OptionsParser.parse(new String[] {
@@ -119,7 +119,7 @@ public class TestOptionsParser {
         "hdfs://localhost:8020/source/first",
         "hdfs://localhost:8020/source/first",
         "hdfs://localhost:8020/target/"});
         "hdfs://localhost:8020/target/"});
   }
   }
-  
+
   @Test(expected=IllegalArgumentException.class)
   @Test(expected=IllegalArgumentException.class)
   public void testParseZeroBandwidth() {
   public void testParseZeroBandwidth() {
     OptionsParser.parse(new String[] {
     OptionsParser.parse(new String[] {
@@ -397,6 +397,7 @@ public class TestOptionsParser {
     Assert.assertFalse(options.shouldPreserve(FileAttribute.PERMISSION));
     Assert.assertFalse(options.shouldPreserve(FileAttribute.PERMISSION));
     Assert.assertFalse(options.shouldPreserve(FileAttribute.USER));
     Assert.assertFalse(options.shouldPreserve(FileAttribute.USER));
     Assert.assertFalse(options.shouldPreserve(FileAttribute.GROUP));
     Assert.assertFalse(options.shouldPreserve(FileAttribute.GROUP));
+    Assert.assertFalse(options.shouldPreserve(FileAttribute.CHECKSUMTYPE));
 
 
     options = OptionsParser.parse(new String[] {
     options = OptionsParser.parse(new String[] {
         "-p",
         "-p",
@@ -408,6 +409,7 @@ public class TestOptionsParser {
     Assert.assertTrue(options.shouldPreserve(FileAttribute.PERMISSION));
     Assert.assertTrue(options.shouldPreserve(FileAttribute.PERMISSION));
     Assert.assertTrue(options.shouldPreserve(FileAttribute.USER));
     Assert.assertTrue(options.shouldPreserve(FileAttribute.USER));
     Assert.assertTrue(options.shouldPreserve(FileAttribute.GROUP));
     Assert.assertTrue(options.shouldPreserve(FileAttribute.GROUP));
+    Assert.assertTrue(options.shouldPreserve(FileAttribute.CHECKSUMTYPE));
 
 
     options = OptionsParser.parse(new String[] {
     options = OptionsParser.parse(new String[] {
         "-p",
         "-p",
@@ -418,6 +420,7 @@ public class TestOptionsParser {
     Assert.assertTrue(options.shouldPreserve(FileAttribute.PERMISSION));
     Assert.assertTrue(options.shouldPreserve(FileAttribute.PERMISSION));
     Assert.assertTrue(options.shouldPreserve(FileAttribute.USER));
     Assert.assertTrue(options.shouldPreserve(FileAttribute.USER));
     Assert.assertTrue(options.shouldPreserve(FileAttribute.GROUP));
     Assert.assertTrue(options.shouldPreserve(FileAttribute.GROUP));
+    Assert.assertTrue(options.shouldPreserve(FileAttribute.CHECKSUMTYPE));
 
 
     options = OptionsParser.parse(new String[] {
     options = OptionsParser.parse(new String[] {
         "-pbr",
         "-pbr",
@@ -429,6 +432,7 @@ public class TestOptionsParser {
     Assert.assertFalse(options.shouldPreserve(FileAttribute.PERMISSION));
     Assert.assertFalse(options.shouldPreserve(FileAttribute.PERMISSION));
     Assert.assertFalse(options.shouldPreserve(FileAttribute.USER));
     Assert.assertFalse(options.shouldPreserve(FileAttribute.USER));
     Assert.assertFalse(options.shouldPreserve(FileAttribute.GROUP));
     Assert.assertFalse(options.shouldPreserve(FileAttribute.GROUP));
+    Assert.assertFalse(options.shouldPreserve(FileAttribute.CHECKSUMTYPE));
 
 
     options = OptionsParser.parse(new String[] {
     options = OptionsParser.parse(new String[] {
         "-pbrgup",
         "-pbrgup",
@@ -440,6 +444,31 @@ public class TestOptionsParser {
     Assert.assertTrue(options.shouldPreserve(FileAttribute.PERMISSION));
     Assert.assertTrue(options.shouldPreserve(FileAttribute.PERMISSION));
     Assert.assertTrue(options.shouldPreserve(FileAttribute.USER));
     Assert.assertTrue(options.shouldPreserve(FileAttribute.USER));
     Assert.assertTrue(options.shouldPreserve(FileAttribute.GROUP));
     Assert.assertTrue(options.shouldPreserve(FileAttribute.GROUP));
+    Assert.assertFalse(options.shouldPreserve(FileAttribute.CHECKSUMTYPE));
+
+    options = OptionsParser.parse(new String[] {
+        "-pbrgupc",
+        "-f",
+        "hdfs://localhost:8020/source/first",
+        "hdfs://localhost:8020/target/"});
+    Assert.assertTrue(options.shouldPreserve(FileAttribute.BLOCKSIZE));
+    Assert.assertTrue(options.shouldPreserve(FileAttribute.REPLICATION));
+    Assert.assertTrue(options.shouldPreserve(FileAttribute.PERMISSION));
+    Assert.assertTrue(options.shouldPreserve(FileAttribute.USER));
+    Assert.assertTrue(options.shouldPreserve(FileAttribute.GROUP));
+    Assert.assertTrue(options.shouldPreserve(FileAttribute.CHECKSUMTYPE));
+
+    options = OptionsParser.parse(new String[] {
+        "-pc",
+        "-f",
+        "hdfs://localhost:8020/source/first",
+        "hdfs://localhost:8020/target/"});
+    Assert.assertFalse(options.shouldPreserve(FileAttribute.BLOCKSIZE));
+    Assert.assertFalse(options.shouldPreserve(FileAttribute.REPLICATION));
+    Assert.assertFalse(options.shouldPreserve(FileAttribute.PERMISSION));
+    Assert.assertFalse(options.shouldPreserve(FileAttribute.USER));
+    Assert.assertFalse(options.shouldPreserve(FileAttribute.GROUP));
+    Assert.assertTrue(options.shouldPreserve(FileAttribute.CHECKSUMTYPE));
 
 
     options = OptionsParser.parse(new String[] {
     options = OptionsParser.parse(new String[] {
         "-p",
         "-p",
@@ -452,7 +481,7 @@ public class TestOptionsParser {
       attribIterator.next();
       attribIterator.next();
       i++;
       i++;
     }
     }
-    Assert.assertEquals(i, 5);
+    Assert.assertEquals(i, 6);
 
 
     try {
     try {
       OptionsParser.parse(new String[] {
       OptionsParser.parse(new String[] {

+ 92 - 30
hadoop-tools/hadoop-distcp/src/test/java/org/apache/hadoop/tools/mapred/TestCopyMapper.java

@@ -18,18 +18,28 @@
 
 
 package org.apache.hadoop.tools.mapred;
 package org.apache.hadoop.tools.mapred;
 
 
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.security.PrivilegedAction;
+import java.util.ArrayList;
+import java.util.EnumSet;
+import java.util.List;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Options.ChecksumOpt;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Text;
-import org.apache.hadoop.mapreduce.*;
+import org.apache.hadoop.mapreduce.Mapper;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.tools.DistCpConstants;
 import org.apache.hadoop.tools.DistCpConstants;
@@ -37,23 +47,17 @@ import org.apache.hadoop.tools.DistCpOptionSwitch;
 import org.apache.hadoop.tools.DistCpOptions;
 import org.apache.hadoop.tools.DistCpOptions;
 import org.apache.hadoop.tools.StubContext;
 import org.apache.hadoop.tools.StubContext;
 import org.apache.hadoop.tools.util.DistCpUtils;
 import org.apache.hadoop.tools.util.DistCpUtils;
+import org.apache.hadoop.util.DataChecksum;
 import org.junit.Assert;
 import org.junit.Assert;
 import org.junit.BeforeClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.Test;
 
 
-import java.io.DataOutputStream;
-import java.io.IOException;
-import java.io.OutputStream;
-import java.security.PrivilegedAction;
-import java.util.ArrayList;
-import java.util.EnumSet;
-import java.util.List;
-
 public class TestCopyMapper {
 public class TestCopyMapper {
   private static final Log LOG = LogFactory.getLog(TestCopyMapper.class);
   private static final Log LOG = LogFactory.getLog(TestCopyMapper.class);
   private static List<Path> pathList = new ArrayList<Path>();
   private static List<Path> pathList = new ArrayList<Path>();
   private static int nFiles = 0;
   private static int nFiles = 0;
   private static final int DEFAULT_FILE_SIZE = 1024;
   private static final int DEFAULT_FILE_SIZE = 1024;
+  private static final long NON_DEFAULT_BLOCK_SIZE = 4096;
 
 
   private static MiniDFSCluster cluster;
   private static MiniDFSCluster cluster;
 
 
@@ -119,12 +123,27 @@ public class TestCopyMapper {
     mkdirs(SOURCE_PATH + "/2/3/4");
     mkdirs(SOURCE_PATH + "/2/3/4");
     mkdirs(SOURCE_PATH + "/2/3");
     mkdirs(SOURCE_PATH + "/2/3");
     mkdirs(SOURCE_PATH + "/5");
     mkdirs(SOURCE_PATH + "/5");
-    touchFile(SOURCE_PATH + "/5/6", true);
+    touchFile(SOURCE_PATH + "/5/6", true, null);
     mkdirs(SOURCE_PATH + "/7");
     mkdirs(SOURCE_PATH + "/7");
     mkdirs(SOURCE_PATH + "/7/8");
     mkdirs(SOURCE_PATH + "/7/8");
     touchFile(SOURCE_PATH + "/7/8/9");
     touchFile(SOURCE_PATH + "/7/8/9");
   }
   }
 
 
+  private static void createSourceDataWithDifferentChecksumType()
+      throws Exception {
+    mkdirs(SOURCE_PATH + "/1");
+    mkdirs(SOURCE_PATH + "/2");
+    mkdirs(SOURCE_PATH + "/2/3/4");
+    mkdirs(SOURCE_PATH + "/2/3");
+    mkdirs(SOURCE_PATH + "/5");
+    touchFile(SOURCE_PATH + "/5/6", new ChecksumOpt(DataChecksum.Type.CRC32,
+        512));
+    mkdirs(SOURCE_PATH + "/7");
+    mkdirs(SOURCE_PATH + "/7/8");
+    touchFile(SOURCE_PATH + "/7/8/9", new ChecksumOpt(DataChecksum.Type.CRC32C,
+        512));
+  }
+
   private static void mkdirs(String path) throws Exception {
   private static void mkdirs(String path) throws Exception {
     FileSystem fileSystem = cluster.getFileSystem();
     FileSystem fileSystem = cluster.getFileSystem();
     final Path qualifiedPath = new Path(path).makeQualified(fileSystem.getUri(),
     final Path qualifiedPath = new Path(path).makeQualified(fileSystem.getUri(),
@@ -134,21 +153,31 @@ public class TestCopyMapper {
   }
   }
 
 
   private static void touchFile(String path) throws Exception {
   private static void touchFile(String path) throws Exception {
-    touchFile(path, false);
+    touchFile(path, false, null);
   }
   }
 
 
-  private static void touchFile(String path, boolean createMultipleBlocks) throws Exception {
-    final long NON_DEFAULT_BLOCK_SIZE = 4096;
+  private static void touchFile(String path, ChecksumOpt checksumOpt)
+      throws Exception {
+    // create files with specific checksum opt and non-default block size
+    touchFile(path, true, checksumOpt);
+  }
+
+  private static void touchFile(String path, boolean createMultipleBlocks,
+      ChecksumOpt checksumOpt) throws Exception {
     FileSystem fs;
     FileSystem fs;
     DataOutputStream outputStream = null;
     DataOutputStream outputStream = null;
     try {
     try {
       fs = cluster.getFileSystem();
       fs = cluster.getFileSystem();
       final Path qualifiedPath = new Path(path).makeQualified(fs.getUri(),
       final Path qualifiedPath = new Path(path).makeQualified(fs.getUri(),
-                                                      fs.getWorkingDirectory());
-      final long blockSize = createMultipleBlocks? NON_DEFAULT_BLOCK_SIZE : fs.getDefaultBlockSize(qualifiedPath) * 2;
-      outputStream = fs.create(qualifiedPath, true, 0,
-              (short)(fs.getDefaultReplication(qualifiedPath)*2),
-              blockSize);
+          fs.getWorkingDirectory());
+      final long blockSize = createMultipleBlocks ? NON_DEFAULT_BLOCK_SIZE : fs
+          .getDefaultBlockSize(qualifiedPath) * 2;
+      FsPermission permission = FsPermission.getFileDefault().applyUMask(
+          FsPermission.getUMask(fs.getConf()));
+      outputStream = fs.create(qualifiedPath, permission,
+          EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE), 0,
+          (short) (fs.getDefaultReplication(qualifiedPath) * 2), blockSize,
+          null, checksumOpt);
       byte[] bytes = new byte[DEFAULT_FILE_SIZE];
       byte[] bytes = new byte[DEFAULT_FILE_SIZE];
       outputStream.write(bytes);
       outputStream.write(bytes);
       long fileSize = DEFAULT_FILE_SIZE;
       long fileSize = DEFAULT_FILE_SIZE;
@@ -171,17 +200,40 @@ public class TestCopyMapper {
     }
     }
   }
   }
 
 
+  @Test
+  public void testCopyWithDifferentChecksumType() throws Exception {
+    testCopy(true);
+  }
+
   @Test(timeout=40000)
   @Test(timeout=40000)
   public void testRun() {
   public void testRun() {
+    testCopy(false);
+  }
+
+  private void testCopy(boolean preserveChecksum) {
     try {
     try {
       deleteState();
       deleteState();
-      createSourceData();
+      if (preserveChecksum) {
+        createSourceDataWithDifferentChecksumType();
+      } else {
+        createSourceData();
+      }
 
 
       FileSystem fs = cluster.getFileSystem();
       FileSystem fs = cluster.getFileSystem();
       CopyMapper copyMapper = new CopyMapper();
       CopyMapper copyMapper = new CopyMapper();
       StubContext stubContext = new StubContext(getConfiguration(), null, 0);
       StubContext stubContext = new StubContext(getConfiguration(), null, 0);
       Mapper<Text, FileStatus, Text, Text>.Context context
       Mapper<Text, FileStatus, Text, Text>.Context context
               = stubContext.getContext();
               = stubContext.getContext();
+
+      Configuration configuration = context.getConfiguration();
+      EnumSet<DistCpOptions.FileAttribute> fileAttributes
+              = EnumSet.of(DistCpOptions.FileAttribute.REPLICATION);
+      if (preserveChecksum) {
+        fileAttributes.add(DistCpOptions.FileAttribute.CHECKSUMTYPE);
+      }
+      configuration.set(DistCpOptionSwitch.PRESERVE_STATUS.getConfigLabel(),
+              DistCpUtils.packAttributes(fileAttributes));
+
       copyMapper.setup(context);
       copyMapper.setup(context);
 
 
       for (Path path: pathList) {
       for (Path path: pathList) {
@@ -195,19 +247,29 @@ public class TestCopyMapper {
                 .replaceAll(SOURCE_PATH, TARGET_PATH));
                 .replaceAll(SOURCE_PATH, TARGET_PATH));
         Assert.assertTrue(fs.exists(targetPath));
         Assert.assertTrue(fs.exists(targetPath));
         Assert.assertTrue(fs.isFile(targetPath) == fs.isFile(path));
         Assert.assertTrue(fs.isFile(targetPath) == fs.isFile(path));
-        Assert.assertEquals(fs.getFileStatus(path).getReplication(),
-                fs.getFileStatus(targetPath).getReplication());
-        Assert.assertEquals(fs.getFileStatus(path).getBlockSize(),
-                fs.getFileStatus(targetPath).getBlockSize());
-        Assert.assertTrue(!fs.isFile(targetPath) ||
-                fs.getFileChecksum(targetPath).equals(
-                        fs.getFileChecksum(path)));
+        FileStatus sourceStatus = fs.getFileStatus(path);
+        FileStatus targetStatus = fs.getFileStatus(targetPath);
+        Assert.assertEquals(sourceStatus.getReplication(),
+            targetStatus.getReplication());
+        if (preserveChecksum) {
+          Assert.assertEquals(sourceStatus.getBlockSize(),
+              targetStatus.getBlockSize());
+        }
+        Assert.assertTrue(!fs.isFile(targetPath)
+            || fs.getFileChecksum(targetPath).equals(fs.getFileChecksum(path)));
       }
       }
 
 
       Assert.assertEquals(pathList.size(),
       Assert.assertEquals(pathList.size(),
               stubContext.getReporter().getCounter(CopyMapper.Counter.COPY).getValue());
               stubContext.getReporter().getCounter(CopyMapper.Counter.COPY).getValue());
-      Assert.assertEquals(nFiles * DEFAULT_FILE_SIZE,
-              stubContext.getReporter().getCounter(CopyMapper.Counter.BYTESCOPIED).getValue());
+      if (!preserveChecksum) {
+        Assert.assertEquals(nFiles * DEFAULT_FILE_SIZE, stubContext
+            .getReporter().getCounter(CopyMapper.Counter.BYTESCOPIED)
+            .getValue());
+      } else {
+        Assert.assertEquals(nFiles * NON_DEFAULT_BLOCK_SIZE * 2, stubContext
+            .getReporter().getCounter(CopyMapper.Counter.BYTESCOPIED)
+            .getValue());
+      }
 
 
       testCopyingExistingFiles(fs, copyMapper, context);
       testCopyingExistingFiles(fs, copyMapper, context);
       for (Text value : stubContext.getWriter().values()) {
       for (Text value : stubContext.getWriter().values()) {
@@ -309,7 +371,7 @@ public class TestCopyMapper {
       UserGroupInformation tmpUser = UserGroupInformation.createRemoteUser("guest");
       UserGroupInformation tmpUser = UserGroupInformation.createRemoteUser("guest");
 
 
       final CopyMapper copyMapper = new CopyMapper();
       final CopyMapper copyMapper = new CopyMapper();
-      
+
       final Mapper<Text, FileStatus, Text, Text>.Context context =  tmpUser.
       final Mapper<Text, FileStatus, Text, Text>.Context context =  tmpUser.
           doAs(new PrivilegedAction<Mapper<Text, FileStatus, Text, Text>.Context>() {
           doAs(new PrivilegedAction<Mapper<Text, FileStatus, Text, Text>.Context>() {
         @Override
         @Override
@@ -535,7 +597,7 @@ public class TestCopyMapper {
 
 
       final Mapper<Text, FileStatus, Text, Text>.Context context
       final Mapper<Text, FileStatus, Text, Text>.Context context
               = stubContext.getContext();
               = stubContext.getContext();
-      
+
       context.getConfiguration().set(DistCpConstants.CONF_LABEL_PRESERVE_STATUS,
       context.getConfiguration().set(DistCpConstants.CONF_LABEL_PRESERVE_STATUS,
         DistCpUtils.packAttributes(preserveStatus));
         DistCpUtils.packAttributes(preserveStatus));
 
 

+ 113 - 95
hadoop-yarn-project/CHANGES.txt

@@ -9,6 +9,24 @@ Trunk - Unreleased
     YARN-1496. Protocol additions to allow moving apps between queues (Sandy
     YARN-1496. Protocol additions to allow moving apps between queues (Sandy
     Ryza)
     Ryza)
 
 
+    YARN-1498. Common scheduler changes for moving apps between queues (Sandy
+    Ryza)
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
+    YARN-524 TestYarnVersionInfo failing if generated properties doesn't
+    include an SVN URL. (stevel)
+
+Release 2.4.0 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
     YARN-930. Bootstrapping ApplicationHistoryService module. (vinodkv)
     YARN-930. Bootstrapping ApplicationHistoryService module. (vinodkv)
   
   
     YARN-947. Implementing the data objects to be used by the History reader
     YARN-947. Implementing the data objects to be used by the History reader
@@ -70,13 +88,16 @@ Trunk - Unreleased
     YARN-987. Added ApplicationHistoryManager responsible for exposing reports to
     YARN-987. Added ApplicationHistoryManager responsible for exposing reports to
     all clients. (Mayank Bansal via vinodkv)
     all clients. (Mayank Bansal via vinodkv)
 
 
+    YARN-1630. Introduce timeout for async polling operations in YarnClientImpl
+    (Aditya Acharya via Sandy Ryza)
+
+    YARN-1617. Remove ancient comment and surround LOG.debug in
+    AppSchedulingInfo.allocate (Sandy Ryza)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
   BUG FIXES
   BUG FIXES
 
 
-    YARN-524 TestYarnVersionInfo failing if generated properties doesn't
-    include an SVN URL. (stevel)
-
     YARN-935. Correcting pom.xml to build applicationhistoryserver module
     YARN-935. Correcting pom.xml to build applicationhistoryserver module
     successfully. (Zhijie Shen via vinodkv)
     successfully. (Zhijie Shen via vinodkv)
   
   
@@ -112,7 +133,14 @@ Trunk - Unreleased
     YARN-1613. Fixed the typo with the configuration name
     YARN-1613. Fixed the typo with the configuration name
     YARN_HISTORY_SERVICE_ENABLED. (Akira Ajisaka via vinodkv)
     YARN_HISTORY_SERVICE_ENABLED. (Akira Ajisaka via vinodkv)
 
 
-Release 2.4.0 - UNRELEASED
+    YARN-1618. Fix invalid RMApp transition from NEW to FINAL_SAVING (kasha)
+
+    YARN-1600. RM does not startup when security is enabled without spnego
+    configured (Haohui Mai via jlowe)
+
+    YARN-1642. RMDTRenewer#getRMClient should use ClientRMProxy (kasha)
+
+Release 2.3.0 - UNRELEASED
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES
 
 
@@ -169,6 +197,30 @@ Release 2.4.0 - UNRELEASED
 
 
   IMPROVEMENTS
   IMPROVEMENTS
 
 
+    YARN-305. Fair scheduler logs too many "Node offered to app" messages.
+    (Lohit Vijayarenu via Sandy Ryza)
+
+    YARN-1258. Allow configuring the Fair Scheduler root queue (Sandy Ryza)
+
+    YARN-1288. Make Fair Scheduler ACLs more user friendly (Sandy Ryza)
+
+    YARN-1315. TestQueueACLs should also test FairScheduler (Sandy Ryza)
+
+    YARN-1335. Move duplicate code from FSSchedulerApp and FiCaSchedulerApp
+    into SchedulerApplication (Sandy Ryza)
+
+    YARN-1333. Support blacklisting in the Fair Scheduler (Tsuyoshi Ozawa via
+    Sandy Ryza)
+
+    YARN-1109. Demote NodeManager "Sending out status for container" logs to
+    debug (haosdent via Sandy Ryza)
+
+    YARN-1321. Changed NMTokenCache to support both singleton and an instance
+    usage. (Alejandro Abdelnur via vinodkv) 
+
+    YARN-1388. Fair Scheduler page always displays blank fair share (Liyin Liang
+    via Sandy Ryza)
+
     YARN-7. Support CPU resource for DistributedShell. (Junping Du via llu)
     YARN-7. Support CPU resource for DistributedShell. (Junping Du via llu)
 
 
     YARN-905. Add state filters to nodes CLI (Wei Yan via Sandy Ryza)
     YARN-905. Add state filters to nodes CLI (Wei Yan via Sandy Ryza)
@@ -338,6 +390,62 @@ Release 2.4.0 - UNRELEASED
 
 
   BUG FIXES
   BUG FIXES
 
 
+    YARN-1284. LCE: Race condition leaves dangling cgroups entries for killed
+    containers. (Alejandro Abdelnur via Sandy Ryza)
+
+    YARN-1283. Fixed RM to give a fully-qualified proxy URL for an application
+    so that clients don't need to do scheme-mangling. (Omkar Vinit Joshi via
+    vinodkv)
+
+    YARN-879. Fixed tests w.r.t o.a.h.y.server.resourcemanager.Application.
+    (Junping Du via devaraj)
+
+    YARN-1265. Fair Scheduler chokes on unhealthy node reconnect (Sandy Ryza)
+
+    YARN-1044. used/min/max resources do not display info in the scheduler page
+    (Sangjin Lee via Sandy Ryza)
+
+    YARN-1259. In Fair Scheduler web UI, queue num pending and num active apps
+    switched. (Robert Kanter via Sandy Ryza)
+
+    YARN-1295. In UnixLocalWrapperScriptBuilder, using bash -c can cause Text
+    file busy errors (Sandy Ryza)
+
+    YARN-1185. Fixed FileSystemRMStateStore to not leave partial files that
+    prevent subsequent ResourceManager recovery. (Omkar Vinit Joshi via vinodkv)
+
+    YARN-1331. yarn.cmd exits with NoClassDefFoundError trying to run rmadmin or
+    logs. (cnauroth)
+
+    YARN-1330. Fair Scheduler: defaultQueueSchedulingPolicy does not take effect
+    (Sandy Ryza)
+    
+    YARN-1022. Unnecessary INFO logs in AMRMClientAsync (haosdent via bikas)
+    
+    YARN-1349. yarn.cmd does not support passthrough to any arbitrary class.
+    (cnauroth)
+    
+    YARN-1357. TestContainerLaunch.testContainerEnvVariables fails on Windows.
+    (Chuan Liu via cnauroth)
+
+    YARN-1358. TestYarnCLI fails on Windows due to line endings. (Chuan Liu via
+    cnauroth)
+
+    YARN-1343. NodeManagers additions/restarts are not reported as node updates 
+    in AllocateResponse responses to AMs. (tucu)
+
+    YARN-1381. Same relaxLocality appears twice in exception message of
+    AMRMClientImpl#checkLocalityRelaxationConflict() (Ted Yu via Sandy Ryza)
+
+    YARN-1407. RM Web UI and REST APIs should uniformly use
+    YarnApplicationState (Sandy Ryza)
+
+    YARN-1438. Ensure container diagnostics includes exception from container
+    launch. (stevel via acmurthy)
+
+    YARN-1138. yarn.application.classpath is set to point to $HADOOP_CONF_DIR
+    etc., which does not work on Windows. (Chuan Liu via cnauroth)
+
     YARN-461. Fair scheduler should not accept apps with empty string queue name. 
     YARN-461. Fair scheduler should not accept apps with empty string queue name. 
     (ywskycn via tucu)
     (ywskycn via tucu)
 
 
@@ -463,97 +571,7 @@ Release 2.4.0 - UNRELEASED
     YARN-1575. Public localizer crashes with "Localized unkown resource"
     YARN-1575. Public localizer crashes with "Localized unkown resource"
     (jlowe)
     (jlowe)
 
 
-Release 2.3.0 - UNRELEASED
-
-  INCOMPATIBLE CHANGES
-
-  NEW FEATURES
-
-  IMPROVEMENTS
-
-    YARN-305. Fair scheduler logs too many "Node offered to app" messages.
-    (Lohit Vijayarenu via Sandy Ryza)
-
-    YARN-1258. Allow configuring the Fair Scheduler root queue (Sandy Ryza)
-
-    YARN-1288. Make Fair Scheduler ACLs more user friendly (Sandy Ryza)
-
-    YARN-1315. TestQueueACLs should also test FairScheduler (Sandy Ryza)
-
-    YARN-1335. Move duplicate code from FSSchedulerApp and FiCaSchedulerApp
-    into SchedulerApplication (Sandy Ryza)
-
-    YARN-1333. Support blacklisting in the Fair Scheduler (Tsuyoshi Ozawa via
-    Sandy Ryza)
-
-    YARN-1109. Demote NodeManager "Sending out status for container" logs to
-    debug (haosdent via Sandy Ryza)
-
-    YARN-1321. Changed NMTokenCache to support both singleton and an instance
-    usage. (Alejandro Abdelnur via vinodkv) 
-
-    YARN-1388. Fair Scheduler page always displays blank fair share (Liyin Liang
-    via Sandy Ryza)
-
-  OPTIMIZATIONS
-
-  BUG FIXES
-
-    YARN-1284. LCE: Race condition leaves dangling cgroups entries for killed
-    containers. (Alejandro Abdelnur via Sandy Ryza)
-
-    YARN-1283. Fixed RM to give a fully-qualified proxy URL for an application
-    so that clients don't need to do scheme-mangling. (Omkar Vinit Joshi via
-    vinodkv)
-
-    YARN-879. Fixed tests w.r.t o.a.h.y.server.resourcemanager.Application.
-    (Junping Du via devaraj)
-
-    YARN-1265. Fair Scheduler chokes on unhealthy node reconnect (Sandy Ryza)
-
-    YARN-1044. used/min/max resources do not display info in the scheduler page
-    (Sangjin Lee via Sandy Ryza)
-
-    YARN-1259. In Fair Scheduler web UI, queue num pending and num active apps
-    switched. (Robert Kanter via Sandy Ryza)
-
-    YARN-1295. In UnixLocalWrapperScriptBuilder, using bash -c can cause Text
-    file busy errors (Sandy Ryza)
-
-    YARN-1185. Fixed FileSystemRMStateStore to not leave partial files that
-    prevent subsequent ResourceManager recovery. (Omkar Vinit Joshi via vinodkv)
-
-    YARN-1331. yarn.cmd exits with NoClassDefFoundError trying to run rmadmin or
-    logs. (cnauroth)
-
-    YARN-1330. Fair Scheduler: defaultQueueSchedulingPolicy does not take effect
-    (Sandy Ryza)
-    
-    YARN-1022. Unnecessary INFO logs in AMRMClientAsync (haosdent via bikas)
-    
-    YARN-1349. yarn.cmd does not support passthrough to any arbitrary class.
-    (cnauroth)
-    
-    YARN-1357. TestContainerLaunch.testContainerEnvVariables fails on Windows.
-    (Chuan Liu via cnauroth)
-
-    YARN-1358. TestYarnCLI fails on Windows due to line endings. (Chuan Liu via
-    cnauroth)
-
-    YARN-1343. NodeManagers additions/restarts are not reported as node updates 
-    in AllocateResponse responses to AMs. (tucu)
-
-    YARN-1381. Same relaxLocality appears twice in exception message of
-    AMRMClientImpl#checkLocalityRelaxationConflict() (Ted Yu via Sandy Ryza)
-
-    YARN-1407. RM Web UI and REST APIs should uniformly use
-    YarnApplicationState (Sandy Ryza)
-
-    YARN-1438. Ensure container diagnostics includes exception from container
-    launch. (stevel via acmurthy)
-
-    YARN-1138. yarn.application.classpath is set to point to $HADOOP_CONF_DIR
-    etc., which does not work on Windows. (Chuan Liu via cnauroth)
+    YARN-1629. IndexOutOfBoundsException in MaxRunningAppsEnforcer (Sandy Ryza)
 
 
 Release 2.2.0 - 2013-10-13
 Release 2.2.0 - 2013-10-13
 
 

+ 11 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java

@@ -1020,6 +1020,17 @@ public class YarnConfiguration extends Configuration {
       YARN_PREFIX + "client.application-client-protocol.poll-interval-ms";
       YARN_PREFIX + "client.application-client-protocol.poll-interval-ms";
   public static final long DEFAULT_YARN_CLIENT_APPLICATION_CLIENT_PROTOCOL_POLL_INTERVAL_MS =
   public static final long DEFAULT_YARN_CLIENT_APPLICATION_CLIENT_PROTOCOL_POLL_INTERVAL_MS =
       200;
       200;
+
+  /**
+   * The duration that the yarn client library waits, cumulatively across polls,
+   * for an expected state change to occur. Defaults to -1, which indicates no
+   * limit.
+   */
+  public static final String YARN_CLIENT_APPLICATION_CLIENT_PROTOCOL_POLL_TIMEOUT_MS =
+      YARN_PREFIX + "client.application-client-protocol.poll-timeout-ms";
+  public static final long DEFAULT_YARN_CLIENT_APPLICATION_CLIENT_PROTOCOL_POLL_TIMEOUT_MS =
+      -1;
+
   /**
   /**
    * Max number of threads in NMClientAsync to process container management
    * Max number of threads in NMClientAsync to process container management
    * events
    * events

+ 34 - 4
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java

@@ -86,6 +86,7 @@ public class YarnClientImpl extends YarnClient {
   protected ApplicationClientProtocol rmClient;
   protected ApplicationClientProtocol rmClient;
   protected long submitPollIntervalMillis;
   protected long submitPollIntervalMillis;
   private long asyncApiPollIntervalMillis;
   private long asyncApiPollIntervalMillis;
+  private long asyncApiPollTimeoutMillis;
   protected AHSClient historyClient;
   protected AHSClient historyClient;
   private boolean historyServiceEnabled;
   private boolean historyServiceEnabled;
 
 
@@ -101,6 +102,9 @@ public class YarnClientImpl extends YarnClient {
     asyncApiPollIntervalMillis =
     asyncApiPollIntervalMillis =
         conf.getLong(YarnConfiguration.YARN_CLIENT_APPLICATION_CLIENT_PROTOCOL_POLL_INTERVAL_MS,
         conf.getLong(YarnConfiguration.YARN_CLIENT_APPLICATION_CLIENT_PROTOCOL_POLL_INTERVAL_MS,
           YarnConfiguration.DEFAULT_YARN_CLIENT_APPLICATION_CLIENT_PROTOCOL_POLL_INTERVAL_MS);
           YarnConfiguration.DEFAULT_YARN_CLIENT_APPLICATION_CLIENT_PROTOCOL_POLL_INTERVAL_MS);
+    asyncApiPollTimeoutMillis =
+        conf.getLong(YarnConfiguration.YARN_CLIENT_APPLICATION_CLIENT_PROTOCOL_POLL_TIMEOUT_MS,
+            YarnConfiguration.DEFAULT_YARN_CLIENT_APPLICATION_CLIENT_PROTOCOL_POLL_TIMEOUT_MS);
     submitPollIntervalMillis = asyncApiPollIntervalMillis;
     submitPollIntervalMillis = asyncApiPollIntervalMillis;
     if (conf.get(YarnConfiguration.YARN_CLIENT_APP_SUBMISSION_POLL_INTERVAL_MS)
     if (conf.get(YarnConfiguration.YARN_CLIENT_APP_SUBMISSION_POLL_INTERVAL_MS)
         != null) {
         != null) {
@@ -174,13 +178,24 @@ public class YarnClientImpl extends YarnClient {
     rmClient.submitApplication(request);
     rmClient.submitApplication(request);
 
 
     int pollCount = 0;
     int pollCount = 0;
+    long startTime = System.currentTimeMillis();
+
     while (true) {
     while (true) {
       YarnApplicationState state =
       YarnApplicationState state =
           getApplicationReport(applicationId).getYarnApplicationState();
           getApplicationReport(applicationId).getYarnApplicationState();
       if (!state.equals(YarnApplicationState.NEW) &&
       if (!state.equals(YarnApplicationState.NEW) &&
           !state.equals(YarnApplicationState.NEW_SAVING)) {
           !state.equals(YarnApplicationState.NEW_SAVING)) {
+        LOG.info("Submitted application " + applicationId);
         break;
         break;
       }
       }
+
+      long elapsedMillis = System.currentTimeMillis() - startTime;
+      if (enforceAsyncAPITimeout() &&
+          elapsedMillis >= asyncApiPollTimeoutMillis) {
+        throw new YarnException("Timed out while waiting for application " +
+          applicationId + " to be submitted successfully");
+      }
+
       // Notify the client through the log every 10 poll, in case the client
       // Notify the client through the log every 10 poll, in case the client
       // is blocked here too long.
       // is blocked here too long.
       if (++pollCount % 10 == 0) {
       if (++pollCount % 10 == 0) {
@@ -191,10 +206,11 @@ public class YarnClientImpl extends YarnClient {
       try {
       try {
         Thread.sleep(submitPollIntervalMillis);
         Thread.sleep(submitPollIntervalMillis);
       } catch (InterruptedException ie) {
       } catch (InterruptedException ie) {
+        LOG.error("Interrupted while waiting for application " + applicationId
+            + " to be successfully submitted.");
       }
       }
     }
     }
 
 
-    LOG.info("Submitted application " + applicationId);
     return applicationId;
     return applicationId;
   }
   }
 
 
@@ -207,15 +223,25 @@ public class YarnClientImpl extends YarnClient {
 
 
     try {
     try {
       int pollCount = 0;
       int pollCount = 0;
+      long startTime = System.currentTimeMillis();
+
       while (true) {
       while (true) {
         KillApplicationResponse response =
         KillApplicationResponse response =
             rmClient.forceKillApplication(request);
             rmClient.forceKillApplication(request);
         if (response.getIsKillCompleted()) {
         if (response.getIsKillCompleted()) {
+          LOG.info("Killed application " + applicationId);
           break;
           break;
         }
         }
+
+        long elapsedMillis = System.currentTimeMillis() - startTime;
+        if (enforceAsyncAPITimeout() &&
+            elapsedMillis >= this.asyncApiPollTimeoutMillis) {
+          throw new YarnException("Timed out while waiting for application " +
+            applicationId + " to be killed.");
+        }
+
         if (++pollCount % 10 == 0) {
         if (++pollCount % 10 == 0) {
-          LOG.info("Watiting for application " + applicationId
-              + " to be killed.");
+          LOG.info("Waiting for application " + applicationId + " to be killed.");
         }
         }
         Thread.sleep(asyncApiPollIntervalMillis);
         Thread.sleep(asyncApiPollIntervalMillis);
       }
       }
@@ -223,7 +249,11 @@ public class YarnClientImpl extends YarnClient {
       LOG.error("Interrupted while waiting for application " + applicationId
       LOG.error("Interrupted while waiting for application " + applicationId
           + " to be killed.");
           + " to be killed.");
     }
     }
-    LOG.info("Killed application " + applicationId);
+  }
+
+  @VisibleForTesting
+  boolean enforceAsyncAPITimeout() {
+    return asyncApiPollTimeoutMillis >= 0;
   }
   }
 
 
   @Override
   @Override

+ 28 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java

@@ -18,6 +18,7 @@
 
 
 package org.apache.hadoop.yarn.client.api.impl;
 package org.apache.hadoop.yarn.client.api.impl;
 
 
+import static org.junit.Assert.assertFalse;
 import static org.mockito.Matchers.any;
 import static org.mockito.Matchers.any;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.times;
 import static org.mockito.Mockito.times;
@@ -35,6 +36,7 @@ import java.util.Set;
 
 
 import junit.framework.Assert;
 import junit.framework.Assert;
 
 
+import org.apache.commons.io.IOUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
 import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
@@ -474,4 +476,30 @@ public class TestYarnClient {
     }
     }
   }
   }
 
 
+  @Test
+  public void testAsyncAPIPollTimeout() {
+    testAsyncAPIPollTimeoutHelper(null, false);
+    testAsyncAPIPollTimeoutHelper(0L, true);
+    testAsyncAPIPollTimeoutHelper(1L, true);
+  }
+
+  private void testAsyncAPIPollTimeoutHelper(Long valueForTimeout,
+      boolean expectedTimeoutEnforcement) {
+    YarnClientImpl client = new YarnClientImpl();
+    try {
+      Configuration conf = new Configuration();
+      if (valueForTimeout != null) {
+        conf.setLong(
+            YarnConfiguration.YARN_CLIENT_APPLICATION_CLIENT_PROTOCOL_POLL_TIMEOUT_MS,
+            valueForTimeout);
+      }
+
+      client.init(conf);
+
+      Assert.assertEquals(
+          expectedTimeoutEnforcement, client.enforceAsyncAPITimeout());
+    } finally {
+      IOUtils.closeQuietly(client);
+    }
+  }
 }
 }

+ 3 - 4
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/RMDelegationTokenIdentifier.java

@@ -37,8 +37,8 @@ import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecret
 import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
 import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
 import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.RenewDelegationTokenRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.RenewDelegationTokenRequest;
+import org.apache.hadoop.yarn.client.ClientRMProxy;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.exceptions.YarnException;
-import org.apache.hadoop.yarn.ipc.YarnRPC;
 import org.apache.hadoop.yarn.util.Records;
 import org.apache.hadoop.yarn.util.Records;
 
 
 /**
 /**
@@ -138,7 +138,7 @@ public class RMDelegationTokenIdentifier extends AbstractDelegationTokenIdentifi
     }
     }
     
     
     private static ApplicationClientProtocol getRmClient(Token<?> token,
     private static ApplicationClientProtocol getRmClient(Token<?> token,
-        Configuration conf) {
+        Configuration conf) throws IOException {
       InetSocketAddress addr = SecurityUtil.getTokenServiceAddr(token);
       InetSocketAddress addr = SecurityUtil.getTokenServiceAddr(token);
       if (localSecretManager != null) {
       if (localSecretManager != null) {
         // return null if it's our token
         // return null if it's our token
@@ -151,8 +151,7 @@ public class RMDelegationTokenIdentifier extends AbstractDelegationTokenIdentifi
           return null;
           return null;
         }
         }
       }
       }
-      final YarnRPC rpc = YarnRPC.create(conf);
-      return (ApplicationClientProtocol)rpc.getProxy(ApplicationClientProtocol.class, addr, conf);        
+      return ClientRMProxy.createRMProxy(conf, ApplicationClientProtocol.class);
     }
     }
 
 
     // get renewer so we can always renew our own tokens
     // get renewer so we can always renew our own tokens

+ 4 - 4
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApp.java

@@ -28,7 +28,7 @@ import java.util.Map;
 
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.http.HttpServer;
+import org.apache.hadoop.http.HttpServer2;
 import org.slf4j.Logger;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.LoggerFactory;
 
 
@@ -59,7 +59,7 @@ public abstract class WebApp extends ServletModule {
   private volatile String redirectPath;
   private volatile String redirectPath;
   private volatile String wsName;
   private volatile String wsName;
   private volatile Configuration conf;
   private volatile Configuration conf;
-  private volatile HttpServer httpServer;
+  private volatile HttpServer2 httpServer;
   private volatile GuiceFilter guiceFilter;
   private volatile GuiceFilter guiceFilter;
   private final Router router = new Router();
   private final Router router = new Router();
 
 
@@ -72,11 +72,11 @@ public abstract class WebApp extends ServletModule {
   static final Splitter pathSplitter =
   static final Splitter pathSplitter =
       Splitter.on('/').trimResults().omitEmptyStrings();
       Splitter.on('/').trimResults().omitEmptyStrings();
 
 
-  void setHttpServer(HttpServer server) {
+  void setHttpServer(HttpServer2 server) {
     httpServer = checkNotNull(server, "http server");
     httpServer = checkNotNull(server, "http server");
   }
   }
 
 
-  @Provides public HttpServer httpServer() { return httpServer; }
+  @Provides public HttpServer2 httpServer() { return httpServer; }
 
 
   /**
   /**
    * Get the address the http server is bound to
    * Get the address the http server is bound to

+ 7 - 5
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java

@@ -35,7 +35,7 @@ import javax.servlet.http.HttpServlet;
 import org.apache.commons.lang.StringUtils;
 import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.http.HttpServer;
+import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.security.AdminACLsManager;
 import org.apache.hadoop.yarn.security.AdminACLsManager;
 import org.slf4j.Logger;
 import org.slf4j.Logger;
@@ -216,20 +216,22 @@ public class WebApps {
             System.exit(1);
             System.exit(1);
           }
           }
         }
         }
-        HttpServer.Builder builder = new HttpServer.Builder().setName(name)
+        HttpServer2.Builder builder = new HttpServer2.Builder().setName(name)
             .addEndpoint(URI.create("http://" + bindAddress + ":" + port))
             .addEndpoint(URI.create("http://" + bindAddress + ":" + port))
             .setConf(conf).setFindPort(findPort)
             .setConf(conf).setFindPort(findPort)
             .setACL(new AdminACLsManager(conf).getAdminAcl())
             .setACL(new AdminACLsManager(conf).getAdminAcl())
             .setPathSpec(pathList.toArray(new String[0]));
             .setPathSpec(pathList.toArray(new String[0]));
 
 
         boolean hasSpnegoConf = spnegoPrincipalKey != null
         boolean hasSpnegoConf = spnegoPrincipalKey != null
-            && spnegoKeytabKey != null;
+            && conf.get(spnegoPrincipalKey) != null && spnegoKeytabKey != null
+            && conf.get(spnegoKeytabKey) != null;
+
         if (hasSpnegoConf) {
         if (hasSpnegoConf) {
           builder.setUsernameConfKey(spnegoPrincipalKey)
           builder.setUsernameConfKey(spnegoPrincipalKey)
               .setKeytabConfKey(spnegoKeytabKey)
               .setKeytabConfKey(spnegoKeytabKey)
               .setSecurityEnabled(UserGroupInformation.isSecurityEnabled());
               .setSecurityEnabled(UserGroupInformation.isSecurityEnabled());
         }
         }
-        HttpServer server = builder.build();
+        HttpServer2 server = builder.build();
 
 
         for(ServletStruct struct: servlets) {
         for(ServletStruct struct: servlets) {
           server.addServlet(struct.name, struct.spec, struct.clazz);
           server.addServlet(struct.name, struct.spec, struct.clazz);
@@ -237,7 +239,7 @@ public class WebApps {
         for(Map.Entry<String, Object> entry : attributes.entrySet()) {
         for(Map.Entry<String, Object> entry : attributes.entrySet()) {
           server.setAttribute(entry.getKey(), entry.getValue());
           server.setAttribute(entry.getKey(), entry.getValue());
         }
         }
-        HttpServer.defineFilter(server.getWebAppContext(), "guice",
+        HttpServer2.defineFilter(server.getWebAppContext(), "guice",
           GuiceFilter.class.getName(), null, new String[] { "/*" });
           GuiceFilter.class.getName(), null, new String[] { "/*" });
 
 
         webapp.setConf(conf);
         webapp.setConf(conf);

+ 3 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppEventType.java

@@ -24,8 +24,10 @@ public enum RMAppEventType {
   RECOVER,
   RECOVER,
   KILL,
   KILL,
 
 
-  // Source: Scheduler
+  // Source: Scheduler and RMAppManager
   APP_REJECTED,
   APP_REJECTED,
+
+  // Source: Scheduler
   APP_ACCEPTED,
   APP_ACCEPTED,
 
 
   // Source: RMAppAttempt
   // Source: RMAppAttempt

+ 4 - 7
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java

@@ -144,13 +144,10 @@ public class RMAppImpl implements RMApp, Recoverable {
             RMAppState.ACCEPTED, RMAppState.FINISHED, RMAppState.FAILED,
             RMAppState.ACCEPTED, RMAppState.FINISHED, RMAppState.FAILED,
             RMAppState.KILLED, RMAppState.FINAL_SAVING),
             RMAppState.KILLED, RMAppState.FINAL_SAVING),
         RMAppEventType.RECOVER, new RMAppRecoveredTransition())
         RMAppEventType.RECOVER, new RMAppRecoveredTransition())
-    .addTransition(RMAppState.NEW, RMAppState.FINAL_SAVING, RMAppEventType.KILL,
-        new FinalSavingTransition(
-          new AppKilledTransition(), RMAppState.KILLED))
-    .addTransition(RMAppState.NEW, RMAppState.FINAL_SAVING,
-        RMAppEventType.APP_REJECTED,
-        new FinalSavingTransition(
-          new AppRejectedTransition(), RMAppState.FAILED))
+    .addTransition(RMAppState.NEW, RMAppState.KILLED, RMAppEventType.KILL,
+        new AppKilledTransition())
+    .addTransition(RMAppState.NEW, RMAppState.FAILED,
+        RMAppEventType.APP_REJECTED, new AppRejectedTransition())
 
 
     // Transitions from NEW_SAVING state
     // Transitions from NEW_SAVING state
     .addTransition(RMAppState.NEW_SAVING, RMAppState.NEW_SAVING,
     .addTransition(RMAppState.NEW_SAVING, RMAppState.NEW_SAVING,

+ 33 - 34
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java

@@ -64,7 +64,7 @@ public class AppSchedulingInfo {
   private Set<String> blacklist = new HashSet<String>();
   private Set<String> blacklist = new HashSet<String>();
 
 
   //private final ApplicationStore store;
   //private final ApplicationStore store;
-  private final ActiveUsersManager activeUsersManager;
+  private ActiveUsersManager activeUsersManager;
   
   
   /* Allocated by scheduler */
   /* Allocated by scheduler */
   boolean pending = true; // for app metrics
   boolean pending = true; // for app metrics
@@ -171,11 +171,10 @@ public class AppSchedulingInfo {
             .getNumContainers() : 0;
             .getNumContainers() : 0;
         Resource lastRequestCapability = lastRequest != null ? lastRequest
         Resource lastRequestCapability = lastRequest != null ? lastRequest
             .getCapability() : Resources.none();
             .getCapability() : Resources.none();
-        metrics.incrPendingResources(user, request.getNumContainers()
-            - lastRequestContainers, Resources.subtractFrom( // save a clone
-            Resources.multiply(request.getCapability(), request
-                .getNumContainers()), Resources.multiply(lastRequestCapability,
-                lastRequestContainers)));
+        metrics.incrPendingResources(user, request.getNumContainers(),
+            request.getCapability());
+        metrics.decrPendingResources(user, lastRequestContainers,
+            lastRequestCapability);
       }
       }
     }
     }
   }
   }
@@ -262,9 +261,15 @@ public class AppSchedulingInfo {
       pending = false;
       pending = false;
       metrics.runAppAttempt(applicationId, user);
       metrics.runAppAttempt(applicationId, user);
     }
     }
-    LOG.debug("allocate: user: " + user + ", memory: "
-        + request.getCapability());
-    metrics.allocateResources(user, 1, request.getCapability());
+
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("allocate: applicationId=" + applicationId
+          + " container=" + container.getId()
+          + " host=" + container.getNodeId().toString()
+          + " user=" + user
+          + " resource=" + request.getCapability());
+    }
+    metrics.allocateResources(user, 1, request.getCapability(), true);
   }
   }
 
 
   /**
   /**
@@ -277,9 +282,6 @@ public class AppSchedulingInfo {
   synchronized private void allocateNodeLocal( 
   synchronized private void allocateNodeLocal( 
       SchedulerNode node, Priority priority, 
       SchedulerNode node, Priority priority, 
       ResourceRequest nodeLocalRequest, Container container) {
       ResourceRequest nodeLocalRequest, Container container) {
-    // Update consumption and track allocations
-    allocate(container);
-
     // Update future requirements
     // Update future requirements
     nodeLocalRequest.setNumContainers(nodeLocalRequest.getNumContainers() - 1);
     nodeLocalRequest.setNumContainers(nodeLocalRequest.getNumContainers() - 1);
     if (nodeLocalRequest.getNumContainers() == 0) {
     if (nodeLocalRequest.getNumContainers() == 0) {
@@ -306,10 +308,6 @@ public class AppSchedulingInfo {
   synchronized private void allocateRackLocal(
   synchronized private void allocateRackLocal(
       SchedulerNode node, Priority priority,
       SchedulerNode node, Priority priority,
       ResourceRequest rackLocalRequest, Container container) {
       ResourceRequest rackLocalRequest, Container container) {
-
-    // Update consumption and track allocations
-    allocate(container);
-
     // Update future requirements
     // Update future requirements
     rackLocalRequest.setNumContainers(rackLocalRequest.getNumContainers() - 1);
     rackLocalRequest.setNumContainers(rackLocalRequest.getNumContainers() - 1);
     if (rackLocalRequest.getNumContainers() == 0) {
     if (rackLocalRequest.getNumContainers() == 0) {
@@ -329,10 +327,6 @@ public class AppSchedulingInfo {
   synchronized private void allocateOffSwitch(
   synchronized private void allocateOffSwitch(
       SchedulerNode node, Priority priority,
       SchedulerNode node, Priority priority,
       ResourceRequest offSwitchRequest, Container container) {
       ResourceRequest offSwitchRequest, Container container) {
-
-    // Update consumption and track allocations
-    allocate(container);
-
     // Update future requirements
     // Update future requirements
     decrementOutstanding(offSwitchRequest);
     decrementOutstanding(offSwitchRequest);
   }
   }
@@ -365,18 +359,24 @@ public class AppSchedulingInfo {
     }
     }
   }
   }
   
   
-  synchronized private void allocate(Container container) {
-    // Update consumption and track allocations
-    //TODO: fixme sharad
-    /* try {
-        store.storeContainer(container);
-      } catch (IOException ie) {
-        // TODO fix this. we shouldnt ignore
-      }*/
-    
-    LOG.debug("allocate: applicationId=" + applicationId + " container="
-        + container.getId() + " host="
-        + container.getNodeId().toString());
+  synchronized public void move(Queue newQueue) {
+    QueueMetrics oldMetrics = queue.getMetrics();
+    QueueMetrics newMetrics = newQueue.getMetrics();
+    for (Map<String, ResourceRequest> asks : requests.values()) {
+      ResourceRequest request = asks.get(ResourceRequest.ANY);
+      if (request != null) {
+        oldMetrics.decrPendingResources(user, request.getNumContainers(),
+            request.getCapability());
+        newMetrics.incrPendingResources(user, request.getNumContainers(),
+            request.getCapability());
+      }
+    }
+    oldMetrics.moveAppFrom(this);
+    newMetrics.moveAppTo(this);
+    activeUsersManager.deactivateApplication(user, applicationId);
+    activeUsersManager = newQueue.getActiveUsersManager();
+    activeUsersManager.activateApplication(user, applicationId);
+    this.queue = newQueue;
   }
   }
 
 
   synchronized public void stop(RMAppAttemptState rmAppAttemptFinalState) {
   synchronized public void stop(RMAppAttemptState rmAppAttemptFinalState) {
@@ -386,8 +386,7 @@ public class AppSchedulingInfo {
       ResourceRequest request = asks.get(ResourceRequest.ANY);
       ResourceRequest request = asks.get(ResourceRequest.ANY);
       if (request != null) {
       if (request != null) {
         metrics.decrPendingResources(user, request.getNumContainers(),
         metrics.decrPendingResources(user, request.getNumContainers(),
-            Resources.multiply(request.getCapability(), request
-                .getNumContainers()));
+            request.getCapability());
       }
       }
     }
     }
     metrics.finishAppAttempt(applicationId, pending, user);
     metrics.finishAppAttempt(applicationId, pending, user);

+ 2 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/Queue.java

@@ -58,4 +58,6 @@ public interface Queue {
   List<QueueUserACLInfo> getQueueUserAclInfo(UserGroupInformation user);
   List<QueueUserACLInfo> getQueueUserAclInfo(UserGroupInformation user);
 
 
   boolean hasAccess(QueueACL acl, UserGroupInformation user);
   boolean hasAccess(QueueACL acl, UserGroupInformation user);
+  
+  public ActiveUsersManager getActiveUsersManager();
 }
 }

+ 41 - 8
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java

@@ -280,6 +280,36 @@ public class QueueMetrics implements MetricsSource {
       parent.finishApp(user, rmAppFinalState);
       parent.finishApp(user, rmAppFinalState);
     }
     }
   }
   }
+  
+  public void moveAppFrom(AppSchedulingInfo app) {
+    if (app.isPending()) {
+      appsPending.decr();
+    } else {
+      appsRunning.decr();
+    }
+    QueueMetrics userMetrics = getUserMetrics(app.getUser());
+    if (userMetrics != null) {
+      userMetrics.moveAppFrom(app);
+    }
+    if (parent != null) {
+      parent.moveAppFrom(app);
+    }
+  }
+  
+  public void moveAppTo(AppSchedulingInfo app) {
+    if (app.isPending()) {
+      appsPending.incr();
+    } else {
+      appsRunning.incr();
+    }
+    QueueMetrics userMetrics = getUserMetrics(app.getUser());
+    if (userMetrics != null) {
+      userMetrics.moveAppTo(app);
+    }
+    if (parent != null) {
+      parent.moveAppTo(app);
+    }
+  }
 
 
   /**
   /**
    * Set available resources. To be called by scheduler periodically as
    * Set available resources. To be called by scheduler periodically as
@@ -324,8 +354,8 @@ public class QueueMetrics implements MetricsSource {
 
 
   private void _incrPendingResources(int containers, Resource res) {
   private void _incrPendingResources(int containers, Resource res) {
     pendingContainers.incr(containers);
     pendingContainers.incr(containers);
-    pendingMB.incr(res.getMemory());
-    pendingVCores.incr(res.getVirtualCores());
+    pendingMB.incr(res.getMemory() * containers);
+    pendingVCores.incr(res.getVirtualCores() * containers);
   }
   }
 
 
   public void decrPendingResources(String user, int containers, Resource res) {
   public void decrPendingResources(String user, int containers, Resource res) {
@@ -341,22 +371,25 @@ public class QueueMetrics implements MetricsSource {
 
 
   private void _decrPendingResources(int containers, Resource res) {
   private void _decrPendingResources(int containers, Resource res) {
     pendingContainers.decr(containers);
     pendingContainers.decr(containers);
-    pendingMB.decr(res.getMemory());
-    pendingVCores.decr(res.getVirtualCores());
+    pendingMB.decr(res.getMemory() * containers);
+    pendingVCores.decr(res.getVirtualCores() * containers);
   }
   }
 
 
-  public void allocateResources(String user, int containers, Resource res) {
+  public void allocateResources(String user, int containers, Resource res,
+      boolean decrPending) {
     allocatedContainers.incr(containers);
     allocatedContainers.incr(containers);
     aggregateContainersAllocated.incr(containers);
     aggregateContainersAllocated.incr(containers);
     allocatedMB.incr(res.getMemory() * containers);
     allocatedMB.incr(res.getMemory() * containers);
     allocatedVCores.incr(res.getVirtualCores() * containers);
     allocatedVCores.incr(res.getVirtualCores() * containers);
-    _decrPendingResources(containers, Resources.multiply(res, containers));
+    if (decrPending) {
+      _decrPendingResources(containers, res);
+    }
     QueueMetrics userMetrics = getUserMetrics(user);
     QueueMetrics userMetrics = getUserMetrics(user);
     if (userMetrics != null) {
     if (userMetrics != null) {
-      userMetrics.allocateResources(user, containers, res);
+      userMetrics.allocateResources(user, containers, res, decrPending);
     }
     }
     if (parent != null) {
     if (parent != null) {
-      parent.allocateResources(user, containers, res);
+      parent.allocateResources(user, containers, res, decrPending);
     }
     }
   }
   }
 
 

+ 23 - 2
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java

@@ -57,7 +57,7 @@ import com.google.common.collect.Multiset;
  */
  */
 @Private
 @Private
 @Unstable
 @Unstable
-public abstract class SchedulerApplicationAttempt {
+public class SchedulerApplicationAttempt {
   
   
   private static final Log LOG = LogFactory
   private static final Log LOG = LogFactory
     .getLog(SchedulerApplicationAttempt.class);
     .getLog(SchedulerApplicationAttempt.class);
@@ -91,7 +91,7 @@ public abstract class SchedulerApplicationAttempt {
   protected Map<Priority, Long> lastScheduledContainer =
   protected Map<Priority, Long> lastScheduledContainer =
       new HashMap<Priority, Long>();
       new HashMap<Priority, Long>();
 
 
-  protected final Queue queue;
+  protected Queue queue;
   protected boolean isStopped = false;
   protected boolean isStopped = false;
   
   
   protected final RMContext rmContext;
   protected final RMContext rmContext;
@@ -431,4 +431,25 @@ public abstract class SchedulerApplicationAttempt {
     this.appSchedulingInfo
     this.appSchedulingInfo
       .transferStateFromPreviousAppSchedulingInfo(appAttempt.appSchedulingInfo);
       .transferStateFromPreviousAppSchedulingInfo(appAttempt.appSchedulingInfo);
   }
   }
+  
+  public void move(Queue newQueue) {
+    QueueMetrics oldMetrics = queue.getMetrics();
+    QueueMetrics newMetrics = newQueue.getMetrics();
+    String user = getUser();
+    for (RMContainer liveContainer : liveContainers.values()) {
+      Resource resource = liveContainer.getContainer().getResource();
+      oldMetrics.releaseResources(user, 1, resource);
+      newMetrics.allocateResources(user, 1, resource, false);
+    }
+    for (Map<NodeId, RMContainer> map : reservedContainers.values()) {
+      for (RMContainer reservedContainer : map.values()) {
+        Resource resource = reservedContainer.getReservedResource();
+        oldMetrics.unreserveResource(user, resource);
+        newMetrics.reserveResource(user, resource);
+      }
+    }
+
+    appSchedulingInfo.move(newQueue);
+    this.queue = newQueue;
+  }  
 }
 }

+ 9 - 9
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java

@@ -33,6 +33,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.QueueACL;
 import org.apache.hadoop.yarn.api.records.QueueACL;
 import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
 import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ActiveUsersManager;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerAppUtils;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerAppUtils;
 import org.apache.hadoop.yarn.util.resource.Resources;
 import org.apache.hadoop.yarn.util.resource.Resources;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplication;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplication;
@@ -54,11 +55,14 @@ public class FSLeafQueue extends FSQueue {
   private long lastTimeAtMinShare;
   private long lastTimeAtMinShare;
   private long lastTimeAtHalfFairShare;
   private long lastTimeAtHalfFairShare;
   
   
+  private final ActiveUsersManager activeUsersManager;
+  
   public FSLeafQueue(String name, FairScheduler scheduler,
   public FSLeafQueue(String name, FairScheduler scheduler,
       FSParentQueue parent) {
       FSParentQueue parent) {
     super(name, scheduler, parent);
     super(name, scheduler, parent);
     this.lastTimeAtMinShare = scheduler.getClock().getTime();
     this.lastTimeAtMinShare = scheduler.getClock().getTime();
     this.lastTimeAtHalfFairShare = scheduler.getClock().getTime();
     this.lastTimeAtHalfFairShare = scheduler.getClock().getTime();
+    activeUsersManager = new ActiveUsersManager(getMetrics());
   }
   }
   
   
   public void addApp(FSSchedulerApp app, boolean runnable) {
   public void addApp(FSSchedulerApp app, boolean runnable) {
@@ -91,15 +95,6 @@ public class FSLeafQueue extends FSQueue {
     }
     }
   }
   }
   
   
-  public void makeAppRunnable(AppSchedulable appSched) {
-    if (!nonRunnableAppScheds.remove(appSched)) {
-      throw new IllegalStateException("Can't make app runnable that does not " +
-      		"already exist in queue as non-runnable" + appSched);
-    }
-    
-    runnableAppScheds.add(appSched);
-  }
-  
   public Collection<AppSchedulable> getRunnableAppSchedulables() {
   public Collection<AppSchedulable> getRunnableAppSchedulables() {
     return runnableAppScheds;
     return runnableAppScheds;
   }
   }
@@ -254,4 +249,9 @@ public class FSLeafQueue extends FSQueue {
   public int getNumRunnableApps() {
   public int getNumRunnableApps() {
     return runnableAppScheds.size();
     return runnableAppScheds.size();
   }
   }
+  
+  @Override
+  public ActiveUsersManager getActiveUsersManager() {
+    return activeUsersManager;
+  }
 }
 }

+ 7 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java

@@ -33,7 +33,7 @@ import org.apache.hadoop.yarn.api.records.QueueACL;
 import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
 import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.util.resource.Resources;
 import org.apache.hadoop.yarn.util.resource.Resources;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ActiveUsersManager;
 
 
 @Private
 @Private
 @Unstable
 @Unstable
@@ -194,4 +194,10 @@ public class FSParentQueue extends FSQueue {
       childQueue.collectSchedulerApplications(apps);
       childQueue.collectSchedulerApplications(apps);
     }
     }
   }
   }
+  
+  @Override
+  public ActiveUsersManager getActiveUsersManager() {
+    // Should never be called since all applications are submitted to LeafQueues
+    return null;
+  }
 }
 }

部分文件因为文件数量过多而无法显示