Ver código fonte

Merging r1561802 through r1562961 from trunk.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-5698@1562964 13f79535-47bb-0310-9956-ffa450edef68
Jing Zhao 11 anos atrás
pai
commit
bf0ffb6085
100 arquivos alterados com 2184 adições e 1371 exclusões
  1. 2 5
      hadoop-common-project/hadoop-auth/src/site/apt/Configuration.apt.vm
  2. 0 4
      hadoop-common-project/hadoop-auth/src/site/apt/index.apt.vm
  3. 133 102
      hadoop-common-project/hadoop-common/CHANGES.txt
  4. 7 0
      hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
  5. 3 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfServlet.java
  6. 1 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
  7. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/AdminAuthorizedServlet.java
  8. 11 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpRequestLog.java
  9. 59 187
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
  10. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
  11. 2 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/RpcMetrics.java
  12. 2 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/jmx/JMXJsonServlet.java
  13. 2 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java
  14. 2 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/MetricsServlet.java
  15. 2 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationFilterInitializer.java
  16. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
  17. 479 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ComparableVersion.java
  18. 9 97
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/VersionUtil.java
  19. 22 0
      hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
  20. 2 3
      hadoop-common-project/hadoop-common/src/site/apt/CLIMiniCluster.apt.vm
  21. 1 3
      hadoop-common-project/hadoop-common/src/site/apt/ClusterSetup.apt.vm
  22. 13 8
      hadoop-common-project/hadoop-common/src/site/apt/CommandsManual.apt.vm
  23. 10 8
      hadoop-common-project/hadoop-common/src/site/apt/Compatibility.apt.vm
  24. 7 5
      hadoop-common-project/hadoop-common/src/site/apt/FileSystemShell.apt.vm
  25. 0 2
      hadoop-common-project/hadoop-common/src/site/apt/InterfaceClassification.apt.vm
  26. 4 8
      hadoop-common-project/hadoop-common/src/site/apt/NativeLibraries.apt.vm
  27. 4 2
      hadoop-common-project/hadoop-common/src/site/apt/ServiceLevelAuth.apt.vm
  28. 236 144
      hadoop-common-project/hadoop-common/src/site/apt/SingleCluster.apt.vm
  29. 17 17
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/HttpServerFunctionalTest.java
  30. 3 3
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestGlobalFilter.java
  31. 2 2
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHtmlQuoting.java
  32. 27 38
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java
  33. 16 16
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServerLifecycle.java
  34. 2 2
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServerWebapps.java
  35. 3 3
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestPathFilter.java
  36. 3 3
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
  37. 7 7
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestServletFilter.java
  38. 2 2
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/jmx/TestJMXJsonServlet.java
  39. 2 2
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLogLevel.java
  40. 2 2
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestAuthenticationFilter.java
  41. 3 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestSecurityUtil.java
  42. 36 4
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestVersionUtil.java
  43. 236 214
      hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
  44. 4 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
  45. 2 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
  46. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeHttpServer.java
  47. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
  48. 3 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
  49. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java
  50. 11 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java
  51. 1 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
  52. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java
  53. 9 9
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java
  54. 10 5
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
  55. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java
  56. 8 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java
  57. 49 26
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/HftpFileSystem.java
  58. 123 81
      hadoop-hdfs-project/hadoop-hdfs/src/site/apt/CentralizedCacheManagement.apt.vm
  59. 0 2
      hadoop-hdfs-project/hadoop-hdfs/src/site/apt/Federation.apt.vm
  60. 0 2
      hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HDFSHighAvailabilityWithNFS.apt.vm
  61. 0 2
      hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HDFSHighAvailabilityWithQJM.apt.vm
  62. 41 41
      hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsDesign.apt.vm
  63. 0 2
      hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsEditsViewer.apt.vm
  64. 3 5
      hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsImageViewer.apt.vm
  65. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsPermissionsGuide.apt.vm
  66. 0 2
      hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsQuotaAdminGuide.apt.vm
  67. 44 33
      hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsUserGuide.apt.vm
  68. 0 2
      hadoop-hdfs-project/hadoop-hdfs/src/site/apt/Hftp.apt.vm
  69. 0 2
      hadoop-hdfs-project/hadoop-hdfs/src/site/apt/ShortCircuitLocalReads.apt.vm
  70. 6 8
      hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm
  71. 14 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
  72. 11 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSOutputSummer.java
  73. 103 68
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java
  74. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
  75. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetImageServlet.java
  76. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java
  77. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotTestHelper.java
  78. 78 69
      hadoop-mapreduce-project/CHANGES.txt
  79. 6 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
  80. 3 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/Job.java
  81. 9 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java
  82. 26 34
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java
  83. 5 2
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java
  84. 9 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestEvents.java
  85. 22 8
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java
  86. 1 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockAppContext.java
  87. 5 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockJobs.java
  88. 6 6
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestJobEndNotifier.java
  89. 5 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java
  90. 22 11
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java
  91. 9 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/avro/Events.avpr
  92. 7 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobCounter.java
  93. 2 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/EventReader.java
  94. 7 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryParser.java
  95. 63 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobQueueChangeEvent.java
  96. 7 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/org/apache/hadoop/mapreduce/JobCounter.properties
  97. 3 3
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestJobEndNotifier.java
  98. 5 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java
  99. 5 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/PartialJob.java
  100. 40 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistoryEvents.java

+ 2 - 5
hadoop-common-project/hadoop-auth/src/site/apt/Configuration.apt.vm

@@ -24,8 +24,7 @@ Configuration
 
 * Server Side Configuration Setup
 
-  The {{{./apidocs/org/apache/hadoop/auth/server/AuthenticationFilter.html}
-  AuthenticationFilter filter}} is Hadoop Auth's server side component.
+  The AuthenticationFilter filter is Hadoop Auth's server side component.
 
   This filter must be configured in front of all the web application resources
   that required authenticated requests. For example:
@@ -46,9 +45,7 @@ Configuration
     must start with the prefix. The default value is no prefix.
 
   * <<<[PREFIX.]type>>>: the authentication type keyword (<<<simple>>> or
-    <<<kerberos>>>) or a
-    {{{./apidocs/org/apache/hadoop/auth/server/AuthenticationHandler.html}
-    Authentication handler implementation}}.
+    <<<kerberos>>>) or a Authentication handler implementation.
 
   * <<<[PREFIX.]signature.secret>>>: The secret to SHA-sign the generated
     authentication tokens. If a secret is not provided a random secret is

+ 0 - 4
hadoop-common-project/hadoop-auth/src/site/apt/index.apt.vm

@@ -52,7 +52,3 @@ Hadoop Auth, Java HTTP SPNEGO ${project.version}
 
   * {{{./BuildingIt.html}Building It}}
 
-  * {{{./apidocs/index.html}JavaDocs}}
-
-  * {{{./dependencies.html}Dependencies}}
-

+ 133 - 102
hadoop-common-project/hadoop-common/CHANGES.txt

@@ -285,9 +285,6 @@ Trunk (Unreleased)
     HADOOP-9740. Fix FsShell '-text' command to be able to read Avro
     files stored in HDFS and other filesystems. (Allan Yan via cutting)
 
-    HDFS-5471. CacheAdmin -listPools fails when user lacks permissions to view
-    all pools (Andrew Wang via Colin Patrick McCabe)
-
     HADOOP-10044 Improve the javadoc of rpc code (sanjay Radia)
 
   OPTIMIZATIONS
@@ -302,11 +299,47 @@ Release 2.4.0 - UNRELEASED
 
   NEW FEATURES
 
+  IMPROVEMENTS
+
+    HADOOP-10139. Update and improve the Single Cluster Setup document.
+    (Akira Ajisaka via Arpit Agarwal)
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
+Release 2.3.0 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
     HADOOP-8545. Filesystem Implementation for OpenStack Swift
     (Dmitry Mezhensky, David Dobbins, Stevel via stevel)
 
+  NEW FEATURES
+
   IMPROVEMENTS
 
+    HADOOP-10046. Print a log message when SSL is enabled.
+    (David S. Wang via wang)
+
+    HADOOP-10079. log a warning message if group resolution takes too long.
+    (cmccabe)
+
+    HADOOP-9623 Update jets3t dependency to 0.9.0.  (Amandeep Khurana via Colin
+    Patrick McCabe)
+
+    HADOOP-10132. RPC#stopProxy() should log the class of proxy when IllegalArgumentException 
+    is encountered (Ted yu via umamahesh)
+
+    HADOOP-10248. Property name should be included in the exception where property value 
+    is null (Akira AJISAKA via umamahesh)
+
+    HADOOP-10086. User document for authentication in secure cluster.
+    (Masatake Iwasaki via Arpit Agarwal)
+
+    HADOOP-10274 Lower the logging level from ERROR to WARN for UGI.doAs method
+    (Takeshi Miao via stack)
+
     HADOOP-9784. Add a builder for HttpServer. (Junping Du via llu)
 
     HADOOP 9871. Fix intermittent findbugs warnings in DefaultMetricsSystem.
@@ -427,8 +460,15 @@ Release 2.4.0 - UNRELEASED
     HADOOP-9652. Allow RawLocalFs#getFileLinkStatus to fill in the link owner
     and mode if requested. (Andrew Wang via Colin Patrick McCabe)
 
+    HADOOP-10305. Add "rpc.metrics.quantile.enable" and
+    "rpc.metrics.percentiles.intervals" to core-default.xml.
+    (Akira Ajisaka via wang)
+
   OPTIMIZATIONS
 
+    HADOOP-10142. Avoid groups lookup for unprivileged users such as "dr.who"
+    (vinay via cmccabe)
+
     HADOOP-9748. Reduce blocking on UGI.ensureInitialized (daryn)
 
     HADOOP-10047. Add a direct-buffer based apis for compression. (Gopal V
@@ -444,6 +484,90 @@ Release 2.4.0 - UNRELEASED
 
   BUG FIXES
 
+    HADOOP-10028. Malformed ssl-server.xml.example. (Haohui Mai via jing9)
+
+    HADOOP-10030. FsShell -put/copyFromLocal should support Windows local path.
+    (Chuan Liu via cnauroth)
+
+    HADOOP-10031. FsShell -get/copyToLocal/moveFromLocal should support Windows
+    local path. (Chuan Liu via cnauroth)
+
+    HADOOP-10039. Add Hive to the list of projects using 
+    AbstractDelegationTokenSecretManager. (Haohui Mai via jing9)
+
+    HADOOP-10040. hadoop.cmd in UNIX format and would not run by default on
+    Windows. (cnauroth)
+
+    HADOOP-10055. FileSystemShell.apt.vm doc has typo "numRepicas".
+    (Akira Ajisaka via cnauroth)
+
+    HADOOP-10072. TestNfsExports#testMultiMatchers fails due to non-deterministic
+    timing around cache expiry check. (cnauroth)
+
+    HADOOP-9898. Set SO_KEEPALIVE on all our sockets. (todd via wang)
+
+    HADOOP-9478. Fix race conditions during the initialization of Configuration
+    related to deprecatedKeyMap (cmccabe)
+
+    HADOOP-9660. [WINDOWS] Powershell / cmd parses -Dkey=value from command line
+    as [-Dkey, value] which breaks GenericsOptionParser.
+    (Enis Soztutar via cnauroth)
+
+    HADOOP-10078. KerberosAuthenticator always does SPNEGO. (rkanter via tucu)
+
+    HADOOP-10110. hadoop-auth has a build break due to missing dependency.
+    (Chuan Liu via arp)
+
+    HADOOP-9114. After defined the dfs.checksum.type as the NULL, write file and hflush will 
+    through java.lang.ArrayIndexOutOfBoundsException (Sathish via umamahesh)
+
+    HADOOP-10130. RawLocalFS::LocalFSFileInputStream.pread does not track
+    FS::Statistics (Binglin Chang via Colin Patrick McCabe)
+
+    HDFS-5560. Trash configuration log statements prints incorrect units.
+    (Josh Elser via Andrew Wang)
+
+    HADOOP-10081. Client.setupIOStreams can leak socket resources on exception
+    or error (Tsuyoshi OZAWA via jlowe)
+
+    HADOOP-10087. UserGroupInformation.getGroupNames() fails to return primary
+    group first when JniBasedUnixGroupsMappingWithFallback is used (cmccabe)
+
+    HADOOP-10175. Har files system authority should preserve userinfo.
+    (Chuan Liu via cnauroth)
+
+    HADOOP-10090. Jobtracker metrics not updated properly after execution
+    of a mapreduce job. (ivanmi)
+
+    HADOOP-10193. hadoop-auth's PseudoAuthenticationHandler can consume getInputStream. 
+    (gchanan via tucu)
+
+    HADOOP-10178. Configuration deprecation always emit "deprecated" warnings
+    when a new key is used. (Shanyu Zhao via cnauroth)
+
+    HADOOP-10234. "hadoop.cmd jar" does not propagate exit code. (cnauroth)
+
+    HADOOP-10240. Windows build instructions incorrectly state requirement of
+    protoc 2.4.1 instead of 2.5.0. (cnauroth)
+
+    HADOOP-10167. Mark hadoop-common source as UTF-8 in Maven pom files / refactoring
+    (Mikhail Antonov via cos)
+
+    HADOOP-9982. Fix dead links in hadoop site docs. (Akira Ajisaka via Arpit
+    Agarwal)
+
+    HADOOP-10212. Incorrect compile command in Native Library document.
+    (Akira Ajisaka via Arpit Agarwal)
+
+    HADOOP-9830. Fix typo at http://hadoop.apache.org/docs/current/
+    (Kousuke Saruta via Arpit Agarwal)
+
+    HADOOP-10255. Rename HttpServer to HttpServer2 to retain older 
+    HttpServer in branch-2 for compatibility. (Haohui Mai via suresh)
+
+    HADOOP-10291. TestSecurityUtil#testSocketAddrWithIP fails due to test
+    order dependency. (Mit Desai via Arpit Agarwal)
+
     HADOOP-9964. Fix deadlocks in TestHttpServer by synchronize
     ReflectionUtils.printThreadInfo. (Junping Du via llu)
 
@@ -459,7 +583,6 @@ Release 2.4.0 - UNRELEASED
     HADOOP-9865.  FileContext#globStatus has a regression with respect to
     relative path.  (Chuan Lin via Colin Patrick McCabe)
 
-
     HADOOP-9909. org.apache.hadoop.fs.Stat should permit other LANG.
     (Shinichi Yamashita via Andrew Wang)
 
@@ -539,106 +662,14 @@ Release 2.4.0 - UNRELEASED
     HADOOP-10203. Connection leak in
     Jets3tNativeFileSystemStore#retrieveMetadata. (Andrei Savu via atm)
 
-Release 2.3.0 - UNRELEASED
-
-  INCOMPATIBLE CHANGES
-
-  NEW FEATURES
-
-  IMPROVEMENTS
-
-    HADOOP-10046. Print a log message when SSL is enabled.
-    (David S. Wang via wang)
-
-    HADOOP-10079. log a warning message if group resolution takes too long.
-    (cmccabe)
-
-    HADOOP-9623 Update jets3t dependency to 0.9.0.  (Amandeep Khurana via Colin
-    Patrick McCabe)
-
-    HADOOP-10132. RPC#stopProxy() should log the class of proxy when IllegalArgumentException 
-    is encountered (Ted yu via umamahesh)
-
-    HADOOP-10248. Property name should be included in the exception where property value 
-    is null (Akira AJISAKA via umamahesh)
-
-  OPTIMIZATIONS
-
-    HADOOP-10142. Avoid groups lookup for unprivileged users such as "dr.who"
-    (vinay via cmccabe)
-
-  BUG FIXES
-
-    HADOOP-10028. Malformed ssl-server.xml.example. (Haohui Mai via jing9)
-
-    HADOOP-10030. FsShell -put/copyFromLocal should support Windows local path.
-    (Chuan Liu via cnauroth)
-
-    HADOOP-10031. FsShell -get/copyToLocal/moveFromLocal should support Windows
-    local path. (Chuan Liu via cnauroth)
-
-    HADOOP-10039. Add Hive to the list of projects using 
-    AbstractDelegationTokenSecretManager. (Haohui Mai via jing9)
-
-    HADOOP-10040. hadoop.cmd in UNIX format and would not run by default on
-    Windows. (cnauroth)
-
-    HADOOP-10055. FileSystemShell.apt.vm doc has typo "numRepicas".
-    (Akira Ajisaka via cnauroth)
-
-    HADOOP-10072. TestNfsExports#testMultiMatchers fails due to non-deterministic
-    timing around cache expiry check. (cnauroth)
-
-    HADOOP-9898. Set SO_KEEPALIVE on all our sockets. (todd via wang)
-
-    HADOOP-9478. Fix race conditions during the initialization of Configuration
-    related to deprecatedKeyMap (cmccabe)
-
-    HADOOP-9660. [WINDOWS] Powershell / cmd parses -Dkey=value from command line
-    as [-Dkey, value] which breaks GenericsOptionParser.
-    (Enis Soztutar via cnauroth)
-
-    HADOOP-10078. KerberosAuthenticator always does SPNEGO. (rkanter via tucu)
-
-    HADOOP-10110. hadoop-auth has a build break due to missing dependency.
-    (Chuan Liu via arp)
-
-    HADOOP-9114. After defined the dfs.checksum.type as the NULL, write file and hflush will 
-    through java.lang.ArrayIndexOutOfBoundsException (Sathish via umamahesh)
-
-    HADOOP-10130. RawLocalFS::LocalFSFileInputStream.pread does not track
-    FS::Statistics (Binglin Chang via Colin Patrick McCabe)
-
-    HDFS-5560. Trash configuration log statements prints incorrect units.
-    (Josh Elser via Andrew Wang)
-
-    HADOOP-10081. Client.setupIOStreams can leak socket resources on exception
-    or error (Tsuyoshi OZAWA via jlowe)
-
-    HADOOP-10087. UserGroupInformation.getGroupNames() fails to return primary
-    group first when JniBasedUnixGroupsMappingWithFallback is used (cmccabe)
-
-    HADOOP-10175. Har files system authority should preserve userinfo.
-    (Chuan Liu via cnauroth)
-
-    HADOOP-10090. Jobtracker metrics not updated properly after execution
-    of a mapreduce job. (ivanmi)
-
-    HADOOP-10193. hadoop-auth's PseudoAuthenticationHandler can consume getInputStream. 
-    (gchanan via tucu)
-
-    HADOOP-10178. Configuration deprecation always emit "deprecated" warnings
-    when a new key is used. (Shanyu Zhao via cnauroth)
-
-    HADOOP-10234. "hadoop.cmd jar" does not propagate exit code. (cnauroth)
+    HADOOP-10250. VersionUtil returns wrong value when comparing two versions.
+    (Yongjun Zhang via atm)
 
-    HADOOP-10240. Windows build instructions incorrectly state requirement of
-    protoc 2.4.1 instead of 2.5.0. (cnauroth)
-
-    HADOOP-10112. har file listing doesn't work with wild card. (brandonli)
+    HADOOP-10288. Explicit reference to Log4JLogger breaks non-log4j users
+    (todd)
 
-    HADOOP-10167. Mark hadoop-common source as UTF-8 in Maven pom files / refactoring
-    (Mikhail Antonov via cos)
+    HADOOP-10310. SaslRpcServer should be initialized even when no secret
+    manager present. (atm)
 
 Release 2.2.0 - 2013-10-13
 

+ 7 - 0
hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml

@@ -364,4 +364,11 @@
       <Bug pattern="OBL_UNSATISFIED_OBLIGATION"/>
     </Match>
 
+     <!-- code from maven source, null value is checked at callee side. -->
+     <Match>
+       <Class name="org.apache.hadoop.util.ComparableVersion$ListItem" />
+       <Method name="compareTo" />
+       <Bug code="NP" />
+     </Match>
+
 </FindBugsFilter>

+ 3 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfServlet.java

@@ -27,7 +27,7 @@ import javax.servlet.http.HttpServletResponse;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.http.HttpServer;
+import org.apache.hadoop.http.HttpServer2;
 
 /**
  * A servlet to print out the running configuration data.
@@ -47,7 +47,7 @@ public class ConfServlet extends HttpServlet {
    */
   private Configuration getConfFromContext() {
     Configuration conf = (Configuration)getServletContext().getAttribute(
-        HttpServer.CONF_CONTEXT_ATTRIBUTE);
+        HttpServer2.CONF_CONTEXT_ATTRIBUTE);
     assert conf != null;
     return conf;
   }
@@ -56,7 +56,7 @@ public class ConfServlet extends HttpServlet {
   public void doGet(HttpServletRequest request, HttpServletResponse response)
       throws ServletException, IOException {
 
-    if (!HttpServer.isInstrumentationAccessAllowed(getServletContext(),
+    if (!HttpServer2.isInstrumentationAccessAllowed(getServletContext(),
                                                    request, response)) {
       return;
     }

+ 1 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java

@@ -245,6 +245,7 @@ public class CommonConfigurationKeys extends CommonConfigurationKeysPublic {
 
   public static final String RPC_METRICS_QUANTILE_ENABLE =
       "rpc.metrics.quantile.enable";
+  public static final boolean RPC_METRICS_QUANTILE_ENABLE_DEFAULT = false;
   public static final String  RPC_METRICS_PERCENTILES_INTERVALS_KEY =
       "rpc.metrics.percentiles.intervals";
 }

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/AdminAuthorizedServlet.java

@@ -37,7 +37,7 @@ public class AdminAuthorizedServlet extends DefaultServlet {
   protected void doGet(HttpServletRequest request, HttpServletResponse response)
  throws ServletException, IOException {
     // Do the authorization
-    if (HttpServer.hasAdministratorAccess(getServletContext(), request,
+    if (HttpServer2.hasAdministratorAccess(getServletContext(), request,
         response)) {
       // Authorization is done. Just call super.
       super.doGet(request, response);

+ 11 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpRequestLog.java

@@ -53,7 +53,17 @@ public class HttpRequestLog {
     String appenderName = name + "requestlog";
     Log logger = LogFactory.getLog(loggerName);
 
-    if (logger instanceof Log4JLogger) {
+    boolean isLog4JLogger;;
+    try {
+      isLog4JLogger = logger instanceof Log4JLogger;
+    } catch (NoClassDefFoundError err) {
+      // In some dependent projects, log4j may not even be on the classpath at
+      // runtime, in which case the above instanceof check will throw
+      // NoClassDefFoundError.
+      LOG.debug("Could not load Log4JLogger class", err);
+      isLog4JLogger = false;
+    }
+    if (isLog4JLogger) {
       Log4JLogger httpLog4JLog = (Log4JLogger)logger;
       Logger httpLogger = httpLog4JLog.getLogger();
       Appender appender = null;

+ 59 - 187
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java → hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java

@@ -24,7 +24,6 @@ import java.io.PrintWriter;
 import java.net.BindException;
 import java.net.InetSocketAddress;
 import java.net.URI;
-import java.net.URISyntaxException;
 import java.net.URL;
 import java.util.ArrayList;
 import java.util.Collections;
@@ -89,17 +88,19 @@ import com.google.common.collect.Lists;
 import com.sun.jersey.spi.container.servlet.ServletContainer;
 
 /**
- * Create a Jetty embedded server to answer http requests. The primary goal
- * is to serve up status information for the server.
- * There are three contexts:
- *   "/logs/" -> points to the log directory
- *   "/static/" -> points to common static files (src/webapps/static)
- *   "/" -> the jsp server code from (src/webapps/<name>)
+ * Create a Jetty embedded server to answer http requests. The primary goal is
+ * to serve up status information for the server. There are three contexts:
+ * "/logs/" -> points to the log directory "/static/" -> points to common static
+ * files (src/webapps/static) "/" -> the jsp server code from
+ * (src/webapps/<name>)
+ *
+ * This class is a fork of the old HttpServer. HttpServer exists for
+ * compatibility reasons. See HBASE-10336 for more details.
  */
-@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce", "HBase"})
+@InterfaceAudience.Private
 @InterfaceStability.Evolving
-public class HttpServer implements FilterContainer {
-  public static final Log LOG = LogFactory.getLog(HttpServer.class);
+public final class HttpServer2 implements FilterContainer {
+  public static final Log LOG = LogFactory.getLog(HttpServer2.class);
 
   static final String FILTER_INITIALIZER_PROPERTY
       = "hadoop.http.filter.initializers";
@@ -166,11 +167,6 @@ public class HttpServer implements FilterContainer {
     // The -keypass option in keytool
     private String keyPassword;
 
-    @Deprecated
-    private String bindAddress;
-    @Deprecated
-    private int port = -1;
-
     private boolean findPort;
 
     private String hostName;
@@ -204,7 +200,7 @@ public class HttpServer implements FilterContainer {
       this.hostName = hostName;
       return this;
     }
-    
+
     public Builder trustStore(String location, String password, String type) {
       this.trustStore = location;
       this.trustStorePassword = password;
@@ -233,78 +229,51 @@ public class HttpServer implements FilterContainer {
       return this;
     }
 
-    /**
-     * Use addEndpoint() instead.
-     */
-    @Deprecated
-    public Builder setBindAddress(String bindAddress){
-      this.bindAddress = bindAddress;
-      return this;
-    }
-
-    /**
-     * Use addEndpoint() instead.
-     */
-    @Deprecated
-    public Builder setPort(int port) {
-      this.port = port;
-      return this;
-    }
-    
     public Builder setFindPort(boolean findPort) {
       this.findPort = findPort;
       return this;
     }
-    
+
     public Builder setConf(Configuration conf) {
       this.conf = conf;
       return this;
     }
-    
+
     public Builder setConnector(Connector connector) {
       this.connector = connector;
       return this;
     }
-    
+
     public Builder setPathSpec(String[] pathSpec) {
       this.pathSpecs = pathSpec;
       return this;
     }
-    
+
     public Builder setACL(AccessControlList acl) {
       this.adminsAcl = acl;
       return this;
     }
-    
+
     public Builder setSecurityEnabled(boolean securityEnabled) {
       this.securityEnabled = securityEnabled;
       return this;
     }
-    
+
     public Builder setUsernameConfKey(String usernameConfKey) {
       this.usernameConfKey = usernameConfKey;
       return this;
     }
-    
+
     public Builder setKeytabConfKey(String keytabConfKey) {
       this.keytabConfKey = keytabConfKey;
       return this;
     }
-    
-    public HttpServer build() throws IOException {
+
+    public HttpServer2 build() throws IOException {
       if (this.name == null) {
         throw new HadoopIllegalArgumentException("name is not set");
       }
 
-      // Make the behavior compatible with deprecated interfaces
-      if (bindAddress != null && port != -1) {
-        try {
-          endpoints.add(0, new URI("http", "", bindAddress, port, "", "", ""));
-        } catch (URISyntaxException e) {
-          throw new HadoopIllegalArgumentException("Invalid endpoint: "+ e);
-        }
-      }
-
       if (endpoints.size() == 0 && connector == null) {
         throw new HadoopIllegalArgumentException("No endpoints specified");
       }
@@ -313,12 +282,12 @@ public class HttpServer implements FilterContainer {
         hostName = endpoints.size() == 0 ? connector.getHost() : endpoints.get(
             0).getHost();
       }
-      
+
       if (this.conf == null) {
         conf = new Configuration();
       }
-      
-      HttpServer server = new HttpServer(this);
+
+      HttpServer2 server = new HttpServer2(this);
 
       if (this.securityEnabled) {
         server.initSpnego(conf, hostName, usernameConfKey, keytabConfKey);
@@ -332,7 +301,7 @@ public class HttpServer implements FilterContainer {
         Connector listener = null;
         String scheme = ep.getScheme();
         if ("http".equals(scheme)) {
-          listener = HttpServer.createDefaultChannelConnector();
+          listener = HttpServer2.createDefaultChannelConnector();
         } else if ("https".equals(scheme)) {
           SslSocketConnector c = new SslSocketConnector();
           c.setNeedClientAuth(needsClientAuth);
@@ -363,105 +332,8 @@ public class HttpServer implements FilterContainer {
       return server;
     }
   }
-  
-  /** Same as this(name, bindAddress, port, findPort, null); */
-  @Deprecated
-  public HttpServer(String name, String bindAddress, int port, boolean findPort
-      ) throws IOException {
-    this(name, bindAddress, port, findPort, new Configuration());
-  }
 
-  @Deprecated
-  public HttpServer(String name, String bindAddress, int port,
-      boolean findPort, Configuration conf, Connector connector) throws IOException {
-    this(name, bindAddress, port, findPort, conf, null, connector, null);
-  }
-
-  /**
-   * Create a status server on the given port. Allows you to specify the
-   * path specifications that this server will be serving so that they will be
-   * added to the filters properly.  
-   * 
-   * @param name The name of the server
-   * @param bindAddress The address for this server
-   * @param port The port to use on the server
-   * @param findPort whether the server should start at the given port and 
-   *        increment by 1 until it finds a free port.
-   * @param conf Configuration 
-   * @param pathSpecs Path specifications that this httpserver will be serving. 
-   *        These will be added to any filters.
-   */
-  @Deprecated
-  public HttpServer(String name, String bindAddress, int port,
-      boolean findPort, Configuration conf, String[] pathSpecs) throws IOException {
-    this(name, bindAddress, port, findPort, conf, null, null, pathSpecs);
-  }
-  
-  /**
-   * Create a status server on the given port.
-   * The jsp scripts are taken from src/webapps/<name>.
-   * @param name The name of the server
-   * @param port The port to use on the server
-   * @param findPort whether the server should start at the given port and 
-   *        increment by 1 until it finds a free port.
-   * @param conf Configuration 
-   */
-  @Deprecated
-  public HttpServer(String name, String bindAddress, int port,
-      boolean findPort, Configuration conf) throws IOException {
-    this(name, bindAddress, port, findPort, conf, null, null, null);
-  }
-
-  @Deprecated
-  public HttpServer(String name, String bindAddress, int port,
-      boolean findPort, Configuration conf, AccessControlList adminsAcl) 
-      throws IOException {
-    this(name, bindAddress, port, findPort, conf, adminsAcl, null, null);
-  }
-
-  /**
-   * Create a status server on the given port.
-   * The jsp scripts are taken from src/webapps/<name>.
-   * @param name The name of the server
-   * @param bindAddress The address for this server
-   * @param port The port to use on the server
-   * @param findPort whether the server should start at the given port and 
-   *        increment by 1 until it finds a free port.
-   * @param conf Configuration 
-   * @param adminsAcl {@link AccessControlList} of the admins
-   */
-  @Deprecated
-  public HttpServer(String name, String bindAddress, int port,
-      boolean findPort, Configuration conf, AccessControlList adminsAcl, 
-      Connector connector) throws IOException {
-    this(name, bindAddress, port, findPort, conf, adminsAcl, connector, null);
-  }
-
-  /**
-   * Create a status server on the given port.
-   * The jsp scripts are taken from src/webapps/<name>.
-   * @param name The name of the server
-   * @param bindAddress The address for this server
-   * @param port The port to use on the server
-   * @param findPort whether the server should start at the given port and 
-   *        increment by 1 until it finds a free port.
-   * @param conf Configuration 
-   * @param adminsAcl {@link AccessControlList} of the admins
-   * @param connector A jetty connection listener
-   * @param pathSpecs Path specifications that this httpserver will be serving. 
-   *        These will be added to any filters.
-   */
-  @Deprecated
-  public HttpServer(String name, String bindAddress, int port,
-      boolean findPort, Configuration conf, AccessControlList adminsAcl, 
-      Connector connector, String[] pathSpecs) throws IOException {
-    this(new Builder().setName(name).hostName(bindAddress)
-        .addEndpoint(URI.create("http://" + bindAddress + ":" + port))
-        .setFindPort(findPort).setConf(conf).setACL(adminsAcl)
-        .setConnector(connector).setPathSpec(pathSpecs));
-  }
-
-  private HttpServer(final Builder b) throws IOException {
+  private HttpServer2(final Builder b) throws IOException {
     final String appDir = getWebAppsPath(b.name);
     this.webServer = new Server();
     this.adminsAcl = b.adminsAcl;
@@ -554,9 +426,9 @@ public class HttpServer implements FilterContainer {
    * listener.
    */
   public Connector createBaseListener(Configuration conf) throws IOException {
-    return HttpServer.createDefaultChannelConnector();
+    return HttpServer2.createDefaultChannelConnector();
   }
-  
+
   @InterfaceAudience.Private
   public static Connector createDefaultChannelConnector() {
     SelectChannelConnector ret = new SelectChannelConnector();
@@ -567,7 +439,7 @@ public class HttpServer implements FilterContainer {
     if(Shell.WINDOWS) {
       // result of setting the SO_REUSEADDR flag is different on Windows
       // http://msdn.microsoft.com/en-us/library/ms740621(v=vs.85).aspx
-      // without this 2 NN's can start on the same machine and listen on 
+      // without this 2 NN's can start on the same machine and listen on
       // the same port with indeterminate routing of incoming requests to them
       ret.setReuseAddress(false);
     }
@@ -601,7 +473,7 @@ public class HttpServer implements FilterContainer {
    */
   protected void addDefaultApps(ContextHandlerCollection parent,
       final String appDir, Configuration conf) throws IOException {
-    // set up the context for "/logs/" if "hadoop.log.dir" property is defined. 
+    // set up the context for "/logs/" if "hadoop.log.dir" property is defined.
     String logDir = System.getProperty("hadoop.log.dir");
     if (logDir != null) {
       Context logContext = new Context(parent, "/logs");
@@ -628,7 +500,7 @@ public class HttpServer implements FilterContainer {
     setContextAttributes(staticContext, conf);
     defaultContexts.put(staticContext, true);
   }
-  
+
   private void setContextAttributes(Context context, Configuration conf) {
     context.getServletContext().setAttribute(CONF_CONTEXT_ATTRIBUTE, conf);
     context.getServletContext().setAttribute(ADMINS_ACL, adminsAcl);
@@ -654,10 +526,10 @@ public class HttpServer implements FilterContainer {
   }
 
   /**
-   * Add a context 
+   * Add a context
    * @param pathSpec The path spec for the context
    * @param dir The directory containing the context
-   * @param isFiltered if true, the servlet is added to the filter path mapping 
+   * @param isFiltered if true, the servlet is added to the filter path mapping
    * @throws IOException
    */
   protected void addContext(String pathSpec, String dir, boolean isFiltered) throws IOException {
@@ -680,7 +552,7 @@ public class HttpServer implements FilterContainer {
     webAppContext.setAttribute(name, value);
   }
 
-  /** 
+  /**
    * Add a Jersey resource package.
    * @param packageName The Java package name containing the Jersey resource.
    * @param pathSpec The path spec for the servlet
@@ -709,11 +581,11 @@ public class HttpServer implements FilterContainer {
   }
 
   /**
-   * Add an internal servlet in the server. 
+   * Add an internal servlet in the server.
    * Note: This method is to be used for adding servlets that facilitate
    * internal communication and not for user facing functionality. For
-   * servlets added using this method, filters are not enabled. 
-   * 
+   * servlets added using this method, filters are not enabled.
+   *
    * @param name The name of the servlet (can be passed as null)
    * @param pathSpec The path spec for the servlet
    * @param clazz The servlet class
@@ -725,18 +597,18 @@ public class HttpServer implements FilterContainer {
 
   /**
    * Add an internal servlet in the server, specifying whether or not to
-   * protect with Kerberos authentication. 
+   * protect with Kerberos authentication.
    * Note: This method is to be used for adding servlets that facilitate
    * internal communication and not for user facing functionality. For
    +   * servlets added using this method, filters (except internal Kerberos
-   * filters) are not enabled. 
-   * 
+   * filters) are not enabled.
+   *
    * @param name The name of the servlet (can be passed as null)
    * @param pathSpec The path spec for the servlet
    * @param clazz The servlet class
    * @param requireAuth Require Kerberos authenticate to access servlet
    */
-  public void addInternalServlet(String name, String pathSpec, 
+  public void addInternalServlet(String name, String pathSpec,
       Class<? extends HttpServlet> clazz, boolean requireAuth) {
     ServletHolder holder = new ServletHolder(clazz);
     if (name != null) {
@@ -820,7 +692,7 @@ public class HttpServer implements FilterContainer {
       handler.addFilterMapping(fmap);
     }
   }
-  
+
   /**
    * Get the value in the webapp context.
    * @param name The name of the attribute
@@ -829,7 +701,7 @@ public class HttpServer implements FilterContainer {
   public Object getAttribute(String name) {
     return webAppContext.getAttribute(name);
   }
-  
+
   public WebAppContext getWebAppContext(){
     return this.webAppContext;
   }
@@ -842,7 +714,7 @@ public class HttpServer implements FilterContainer {
    */
   protected String getWebAppsPath(String appName) throws FileNotFoundException {
     URL url = getClass().getClassLoader().getResource("webapps/" + appName);
-    if (url == null) 
+    if (url == null)
       throw new FileNotFoundException("webapps/" + appName
           + " not found in CLASSPATH");
     String urlString = url.toString();
@@ -900,7 +772,7 @@ public class HttpServer implements FilterContainer {
       params.put("kerberos.keytab", httpKeytab);
     }
     params.put(AuthenticationFilter.AUTH_TYPE, "kerberos");
-  
+
     defineFilter(webAppContext, SPNEGO_FILTER,
                  AuthenticationFilter.class.getName(), params, null);
   }
@@ -987,7 +859,7 @@ public class HttpServer implements FilterContainer {
       }
     }
   }
-  
+
   /**
    * stop the server
    */
@@ -1105,7 +977,7 @@ public class HttpServer implements FilterContainer {
   /**
    * Does the user sending the HttpServletRequest has the administrator ACLs? If
    * it isn't the case, response will be modified to send an error to the user.
-   * 
+   *
    * @param servletContext
    * @param request
    * @param response used to send the error response if user does not have admin access.
@@ -1130,7 +1002,7 @@ public class HttpServer implements FilterContainer {
                          "authorized to access this page.");
       return false;
     }
-    
+
     if (servletContext.getAttribute(ADMINS_ACL) != null &&
         !userHasAdministratorAccess(servletContext, remoteUser)) {
       response.sendError(HttpServletResponse.SC_UNAUTHORIZED, "User "
@@ -1144,7 +1016,7 @@ public class HttpServer implements FilterContainer {
   /**
    * Get the admin ACLs from the given ServletContext and check if the given
    * user is in the ACL.
-   * 
+   *
    * @param servletContext the context containing the admin ACL.
    * @param remoteUser the remote user to check for.
    * @return true if the user is present in the ACL, false if no ACL is set or
@@ -1171,7 +1043,7 @@ public class HttpServer implements FilterContainer {
     @Override
     public void doGet(HttpServletRequest request, HttpServletResponse response)
       throws ServletException, IOException {
-      if (!HttpServer.isInstrumentationAccessAllowed(getServletContext(),
+      if (!HttpServer2.isInstrumentationAccessAllowed(getServletContext(),
                                                      request, response)) {
         return;
       }
@@ -1179,10 +1051,10 @@ public class HttpServer implements FilterContainer {
       PrintWriter out = response.getWriter();
       ReflectionUtils.printThreadInfo(out, "");
       out.close();
-      ReflectionUtils.logThreadInfo(LOG, "jsp requested", 1);      
+      ReflectionUtils.logThreadInfo(LOG, "jsp requested", 1);
     }
   }
-  
+
   /**
    * A Servlet input filter that quotes all HTML active characters in the
    * parameter names and values. The goal is to quote the characters to make
@@ -1197,7 +1069,7 @@ public class HttpServer implements FilterContainer {
         super(rawRequest);
         this.rawRequest = rawRequest;
       }
-      
+
       /**
        * Return the set of parameter names, quoting each name.
        */
@@ -1218,7 +1090,7 @@ public class HttpServer implements FilterContainer {
           }
         };
       }
-      
+
       /**
        * Unquote the name and quote the value.
        */
@@ -1227,7 +1099,7 @@ public class HttpServer implements FilterContainer {
         return HtmlQuoting.quoteHtmlChars(rawRequest.getParameter
                                      (HtmlQuoting.unquoteHtmlChars(name)));
       }
-      
+
       @Override
       public String[] getParameterValues(String name) {
         String unquoteName = HtmlQuoting.unquoteHtmlChars(name);
@@ -1257,7 +1129,7 @@ public class HttpServer implements FilterContainer {
         }
         return result;
       }
-      
+
       /**
        * Quote the url so that users specifying the HOST HTTP header
        * can't inject attacks.
@@ -1267,7 +1139,7 @@ public class HttpServer implements FilterContainer {
         String url = rawRequest.getRequestURL().toString();
         return new StringBuffer(HtmlQuoting.quoteHtmlChars(url));
       }
-      
+
       /**
        * Quote the server name so that users specifying the HOST HTTP header
        * can't inject attacks.
@@ -1288,11 +1160,11 @@ public class HttpServer implements FilterContainer {
     }
 
     @Override
-    public void doFilter(ServletRequest request, 
+    public void doFilter(ServletRequest request,
                          ServletResponse response,
                          FilterChain chain
                          ) throws IOException, ServletException {
-      HttpServletRequestWrapper quoted = 
+      HttpServletRequestWrapper quoted =
         new RequestQuoter((HttpServletRequest) request);
       HttpServletResponse httpResponse = (HttpServletResponse) response;
 

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java

@@ -2206,7 +2206,7 @@ public abstract class Server {
     // Create the responder here
     responder = new Responder();
     
-    if (secretManager != null) {
+    if (secretManager != null || UserGroupInformation.isSecurityEnabled()) {
       SaslRpcServer.init(conf);
     }
     

+ 2 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/RpcMetrics.java

@@ -54,7 +54,8 @@ public class RpcMetrics {
     int[] intervals = conf.getInts(
         CommonConfigurationKeys.RPC_METRICS_PERCENTILES_INTERVALS_KEY);
     rpcQuantileEnable = (intervals.length > 0) && conf.getBoolean(
-        CommonConfigurationKeys.RPC_METRICS_QUANTILE_ENABLE, false);
+        CommonConfigurationKeys.RPC_METRICS_QUANTILE_ENABLE,
+        CommonConfigurationKeys.RPC_METRICS_QUANTILE_ENABLE_DEFAULT);
     if (rpcQuantileEnable) {
       rpcQueueTimeMillisQuantiles =
           new MutableQuantiles[intervals.length];

+ 2 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/jmx/JMXJsonServlet.java

@@ -46,7 +46,7 @@ import javax.servlet.http.HttpServletResponse;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.http.HttpServer;
+import org.apache.hadoop.http.HttpServer2;
 import org.codehaus.jackson.JsonFactory;
 import org.codehaus.jackson.JsonGenerator;
 
@@ -154,7 +154,7 @@ public class JMXJsonServlet extends HttpServlet {
   @Override
   public void doGet(HttpServletRequest request, HttpServletResponse response) {
     try {
-      if (!HttpServer.isInstrumentationAccessAllowed(getServletContext(),
+      if (!HttpServer2.isInstrumentationAccessAllowed(getServletContext(),
                                                      request, response)) {
         return;
       }

+ 2 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java

@@ -28,7 +28,7 @@ import org.apache.commons.logging.*;
 import org.apache.commons.logging.impl.*;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.http.HttpServer;
+import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.util.ServletUtil;
 
 /**
@@ -93,7 +93,7 @@ public class LogLevel {
         ) throws ServletException, IOException {
 
       // Do the authorization
-      if (!HttpServer.hasAdministratorAccess(getServletContext(), request,
+      if (!HttpServer2.hasAdministratorAccess(getServletContext(), request,
           response)) {
         return;
       }

+ 2 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/MetricsServlet.java

@@ -32,7 +32,7 @@ import javax.servlet.http.HttpServletResponse;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.http.HttpServer;
+import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.metrics.spi.OutputRecord;
 import org.apache.hadoop.metrics.spi.AbstractMetricsContext.MetricMap;
 import org.apache.hadoop.metrics.spi.AbstractMetricsContext.TagMap;
@@ -108,7 +108,7 @@ public class MetricsServlet extends HttpServlet {
   public void doGet(HttpServletRequest request, HttpServletResponse response)
       throws ServletException, IOException {
 
-    if (!HttpServer.isInstrumentationAccessAllowed(getServletContext(),
+    if (!HttpServer2.isInstrumentationAccessAllowed(getServletContext(),
                                                    request, response)) {
       return;
     }

+ 2 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationFilterInitializer.java

@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.security;
 
-import org.apache.hadoop.http.HttpServer;
+import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.http.FilterContainer;
@@ -94,7 +94,7 @@ public class AuthenticationFilterInitializer extends FilterInitializer {
     }
 
     //Resolve _HOST into bind address
-    String bindAddress = conf.get(HttpServer.BIND_ADDRESS);
+    String bindAddress = conf.get(HttpServer2.BIND_ADDRESS);
     String principal = filterConfig.get(KerberosAuthenticationHandler.PRINCIPAL);
     if (principal != null) {
       try {

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java

@@ -1560,7 +1560,7 @@ public class UserGroupInformation {
       return Subject.doAs(subject, action);
     } catch (PrivilegedActionException pae) {
       Throwable cause = pae.getCause();
-      LOG.error("PriviledgedActionException as:"+this+" cause:"+cause);
+      LOG.warn("PriviledgedActionException as:"+this+" cause:"+cause);
       if (cause instanceof IOException) {
         throw (IOException) cause;
       } else if (cause instanceof Error) {

+ 479 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ComparableVersion.java

@@ -0,0 +1,479 @@
+// Code source of this file: 
+//   http://grepcode.com/file/repo1.maven.org/maven2/
+//     org.apache.maven/maven-artifact/3.1.1/
+//       org/apache/maven/artifact/versioning/ComparableVersion.java/
+//
+// Modifications made on top of the source:
+//   1. Changed
+//        package org.apache.maven.artifact.versioning;
+//      to
+//        package org.apache.hadoop.util;
+//   2. Removed author tags to clear hadoop author tag warning
+//        author <a href="mailto:kenney@apache.org">Kenney Westerhof</a>
+//        author <a href="mailto:hboutemy@apache.org">Hervé Boutemy</a>
+//
+package org.apache.hadoop.util;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *  http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import java.math.BigInteger;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Iterator;
+import java.util.List;
+import java.util.ListIterator;
+import java.util.Locale;
+import java.util.Properties;
+import java.util.Stack;
+
+/**
+ * Generic implementation of version comparison.
+ * 
+ * <p>Features:
+ * <ul>
+ * <li>mixing of '<code>-</code>' (dash) and '<code>.</code>' (dot) separators,</li>
+ * <li>transition between characters and digits also constitutes a separator:
+ *     <code>1.0alpha1 =&gt; [1, 0, alpha, 1]</code></li>
+ * <li>unlimited number of version components,</li>
+ * <li>version components in the text can be digits or strings,</li>
+ * <li>strings are checked for well-known qualifiers and the qualifier ordering is used for version ordering.
+ *     Well-known qualifiers (case insensitive) are:<ul>
+ *     <li><code>alpha</code> or <code>a</code></li>
+ *     <li><code>beta</code> or <code>b</code></li>
+ *     <li><code>milestone</code> or <code>m</code></li>
+ *     <li><code>rc</code> or <code>cr</code></li>
+ *     <li><code>snapshot</code></li>
+ *     <li><code>(the empty string)</code> or <code>ga</code> or <code>final</code></li>
+ *     <li><code>sp</code></li>
+ *     </ul>
+ *     Unknown qualifiers are considered after known qualifiers, with lexical order (always case insensitive),
+ *   </li>
+ * <li>a dash usually precedes a qualifier, and is always less important than something preceded with a dot.</li>
+ * </ul></p>
+ *
+ * @see <a href="https://cwiki.apache.org/confluence/display/MAVENOLD/Versioning">"Versioning" on Maven Wiki</a>
+ */
+public class ComparableVersion
+    implements Comparable<ComparableVersion>
+{
+    private String value;
+
+    private String canonical;
+
+    private ListItem items;
+
+    private interface Item
+    {
+        int INTEGER_ITEM = 0;
+        int STRING_ITEM = 1;
+        int LIST_ITEM = 2;
+
+        int compareTo( Item item );
+
+        int getType();
+
+        boolean isNull();
+    }
+
+    /**
+     * Represents a numeric item in the version item list.
+     */
+    private static class IntegerItem
+        implements Item
+    {
+        private static final BigInteger BIG_INTEGER_ZERO = new BigInteger( "0" );
+
+        private final BigInteger value;
+
+        public static final IntegerItem ZERO = new IntegerItem();
+
+        private IntegerItem()
+        {
+            this.value = BIG_INTEGER_ZERO;
+        }
+
+        public IntegerItem( String str )
+        {
+            this.value = new BigInteger( str );
+        }
+
+        public int getType()
+        {
+            return INTEGER_ITEM;
+        }
+
+        public boolean isNull()
+        {
+            return BIG_INTEGER_ZERO.equals( value );
+        }
+
+        public int compareTo( Item item )
+        {
+            if ( item == null )
+            {
+                return BIG_INTEGER_ZERO.equals( value ) ? 0 : 1; // 1.0 == 1, 1.1 > 1
+            }
+
+            switch ( item.getType() )
+            {
+                case INTEGER_ITEM:
+                    return value.compareTo( ( (IntegerItem) item ).value );
+
+                case STRING_ITEM:
+                    return 1; // 1.1 > 1-sp
+
+                case LIST_ITEM:
+                    return 1; // 1.1 > 1-1
+
+                default:
+                    throw new RuntimeException( "invalid item: " + item.getClass() );
+            }
+        }
+
+        public String toString()
+        {
+            return value.toString();
+        }
+    }
+
+    /**
+     * Represents a string in the version item list, usually a qualifier.
+     */
+    private static class StringItem
+        implements Item
+    {
+        private static final String[] QUALIFIERS = { "alpha", "beta", "milestone", "rc", "snapshot", "", "sp" };
+
+        private static final List<String> _QUALIFIERS = Arrays.asList( QUALIFIERS );
+
+        private static final Properties ALIASES = new Properties();
+        static
+        {
+            ALIASES.put( "ga", "" );
+            ALIASES.put( "final", "" );
+            ALIASES.put( "cr", "rc" );
+        }
+
+        /**
+         * A comparable value for the empty-string qualifier. This one is used to determine if a given qualifier makes
+         * the version older than one without a qualifier, or more recent.
+         */
+        private static final String RELEASE_VERSION_INDEX = String.valueOf( _QUALIFIERS.indexOf( "" ) );
+
+        private String value;
+
+        public StringItem( String value, boolean followedByDigit )
+        {
+            if ( followedByDigit && value.length() == 1 )
+            {
+                // a1 = alpha-1, b1 = beta-1, m1 = milestone-1
+                switch ( value.charAt( 0 ) )
+                {
+                    case 'a':
+                        value = "alpha";
+                        break;
+                    case 'b':
+                        value = "beta";
+                        break;
+                    case 'm':
+                        value = "milestone";
+                        break;
+                }
+            }
+            this.value = ALIASES.getProperty( value , value );
+        }
+
+        public int getType()
+        {
+            return STRING_ITEM;
+        }
+
+        public boolean isNull()
+        {
+            return ( comparableQualifier( value ).compareTo( RELEASE_VERSION_INDEX ) == 0 );
+        }
+
+        /**
+         * Returns a comparable value for a qualifier.
+         *
+         * This method takes into account the ordering of known qualifiers then unknown qualifiers with lexical ordering.
+         *
+         * just returning an Integer with the index here is faster, but requires a lot of if/then/else to check for -1
+         * or QUALIFIERS.size and then resort to lexical ordering. Most comparisons are decided by the first character,
+         * so this is still fast. If more characters are needed then it requires a lexical sort anyway.
+         *
+         * @param qualifier
+         * @return an equivalent value that can be used with lexical comparison
+         */
+        public static String comparableQualifier( String qualifier )
+        {
+            int i = _QUALIFIERS.indexOf( qualifier );
+
+            return i == -1 ? ( _QUALIFIERS.size() + "-" + qualifier ) : String.valueOf( i );
+        }
+
+        public int compareTo( Item item )
+        {
+            if ( item == null )
+            {
+                // 1-rc < 1, 1-ga > 1
+                return comparableQualifier( value ).compareTo( RELEASE_VERSION_INDEX );
+            }
+            switch ( item.getType() )
+            {
+                case INTEGER_ITEM:
+                    return -1; // 1.any < 1.1 ?
+
+                case STRING_ITEM:
+                    return comparableQualifier( value ).compareTo( comparableQualifier( ( (StringItem) item ).value ) );
+
+                case LIST_ITEM:
+                    return -1; // 1.any < 1-1
+
+                default:
+                    throw new RuntimeException( "invalid item: " + item.getClass() );
+            }
+        }
+
+        public String toString()
+        {
+            return value;
+        }
+    }
+
+    /**
+     * Represents a version list item. This class is used both for the global item list and for sub-lists (which start
+     * with '-(number)' in the version specification).
+     */
+    private static class ListItem
+        extends ArrayList<Item>
+        implements Item
+    {
+        public int getType()
+        {
+            return LIST_ITEM;
+        }
+
+        public boolean isNull()
+        {
+            return ( size() == 0 );
+        }
+
+        void normalize()
+        {
+            for ( ListIterator<Item> iterator = listIterator( size() ); iterator.hasPrevious(); )
+            {
+                Item item = iterator.previous();
+                if ( item.isNull() )
+                {
+                    iterator.remove(); // remove null trailing items: 0, "", empty list
+                }
+                else
+                {
+                    break;
+                }
+            }
+        }
+
+        public int compareTo( Item item )
+        {
+            if ( item == null )
+            {
+                if ( size() == 0 )
+                {
+                    return 0; // 1-0 = 1- (normalize) = 1
+                }
+                Item first = get( 0 );
+                return first.compareTo( null );
+            }
+            switch ( item.getType() )
+            {
+                case INTEGER_ITEM:
+                    return -1; // 1-1 < 1.0.x
+
+                case STRING_ITEM:
+                    return 1; // 1-1 > 1-sp
+
+                case LIST_ITEM:
+                    Iterator<Item> left = iterator();
+                    Iterator<Item> right = ( (ListItem) item ).iterator();
+
+                    while ( left.hasNext() || right.hasNext() )
+                    {
+                        Item l = left.hasNext() ? left.next() : null;
+                        Item r = right.hasNext() ? right.next() : null;
+
+                        // if this is shorter, then invert the compare and mul with -1
+                        int result = l == null ? -1 * r.compareTo( l ) : l.compareTo( r );
+                        
+                        if ( result != 0 )
+                        {
+                            return result;
+                        }
+                    }
+
+                    return 0;
+
+                default:
+                    throw new RuntimeException( "invalid item: " + item.getClass() );
+            }
+        }
+
+        public String toString()
+        {
+            StringBuilder buffer = new StringBuilder( "(" );
+            for ( Iterator<Item> iter = iterator(); iter.hasNext(); )
+            {
+                buffer.append( iter.next() );
+                if ( iter.hasNext() )
+                {
+                    buffer.append( ',' );
+                }
+            }
+            buffer.append( ')' );
+            return buffer.toString();
+        }
+    }
+
+    public ComparableVersion( String version )
+    {
+        parseVersion( version );
+    }
+
+    public final void parseVersion( String version )
+    {
+        this.value = version;
+
+        items = new ListItem();
+
+        version = version.toLowerCase( Locale.ENGLISH );
+
+        ListItem list = items;
+
+        Stack<Item> stack = new Stack<Item>();
+        stack.push( list );
+
+        boolean isDigit = false;
+
+        int startIndex = 0;
+
+        for ( int i = 0; i < version.length(); i++ )
+        {
+            char c = version.charAt( i );
+
+            if ( c == '.' )
+            {
+                if ( i == startIndex )
+                {
+                    list.add( IntegerItem.ZERO );
+                }
+                else
+                {
+                    list.add( parseItem( isDigit, version.substring( startIndex, i ) ) );
+                }
+                startIndex = i + 1;
+            }
+            else if ( c == '-' )
+            {
+                if ( i == startIndex )
+                {
+                    list.add( IntegerItem.ZERO );
+                }
+                else
+                {
+                    list.add( parseItem( isDigit, version.substring( startIndex, i ) ) );
+                }
+                startIndex = i + 1;
+
+                if ( isDigit )
+                {
+                    list.normalize(); // 1.0-* = 1-*
+
+                    if ( ( i + 1 < version.length() ) && Character.isDigit( version.charAt( i + 1 ) ) )
+                    {
+                        // new ListItem only if previous were digits and new char is a digit,
+                        // ie need to differentiate only 1.1 from 1-1
+                        list.add( list = new ListItem() );
+
+                        stack.push( list );
+                    }
+                }
+            }
+            else if ( Character.isDigit( c ) )
+            {
+                if ( !isDigit && i > startIndex )
+                {
+                    list.add( new StringItem( version.substring( startIndex, i ), true ) );
+                    startIndex = i;
+                }
+
+                isDigit = true;
+            }
+            else
+            {
+                if ( isDigit && i > startIndex )
+                {
+                    list.add( parseItem( true, version.substring( startIndex, i ) ) );
+                    startIndex = i;
+                }
+
+                isDigit = false;
+            }
+        }
+
+        if ( version.length() > startIndex )
+        {
+            list.add( parseItem( isDigit, version.substring( startIndex ) ) );
+        }
+
+        while ( !stack.isEmpty() )
+        {
+            list = (ListItem) stack.pop();
+            list.normalize();
+        }
+
+        canonical = items.toString();
+    }
+
+    private static Item parseItem( boolean isDigit, String buf )
+    {
+        return isDigit ? new IntegerItem( buf ) : new StringItem( buf, false );
+    }
+
+    public int compareTo( ComparableVersion o )
+    {
+        return items.compareTo( o.items );
+    }
+
+    public String toString()
+    {
+        return value;
+    }
+
+    public boolean equals( Object o )
+    {
+        return ( o instanceof ComparableVersion ) && canonical.equals( ( (ComparableVersion) o ).canonical );
+    }
+
+    public int hashCode()
+    {
+        return canonical.hashCode();
+    }
+}

+ 9 - 97
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/VersionUtil.java

@@ -17,55 +17,17 @@
  */
 package org.apache.hadoop.util;
 
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
 import org.apache.hadoop.classification.InterfaceAudience;
 
-import com.google.common.collect.ComparisonChain;
-
+/**
+ * A wrapper class to maven's ComparableVersion class, to comply
+ * with maven's version name string convention 
+ */
 @InterfaceAudience.Private
 public abstract class VersionUtil {
-  
-  private static final Pattern COMPONENT_GROUPS = Pattern.compile("(\\d+)|(\\D+)");
-
   /**
-   * Suffix added by maven for nightly builds and other snapshot releases.
-   * These releases are considered to precede the non-SNAPSHOT version
-   * with the same version number.
-   */
-  private static final String SNAPSHOT_SUFFIX = "-SNAPSHOT";
-
-  /**
-   * This function splits the two versions on &quot;.&quot; and performs a
-   * naturally-ordered comparison of the resulting components. For example, the
-   * version string "0.3" is considered to precede "0.20", despite the fact that
-   * lexical comparison would consider "0.20" to precede "0.3". This method of
-   * comparison is similar to the method used by package versioning systems like
-   * deb and RPM.
-   * 
-   * Version components are compared numerically whenever possible, however a
-   * version component can contain non-numeric characters. When a non-numeric
-   * group of characters is found in a version component, this group is compared
-   * with the similarly-indexed group in the other version component. If the
-   * other group is numeric, then the numeric group is considered to precede the
-   * non-numeric group. If both groups are non-numeric, then a lexical
-   * comparison is performed.
-   * 
-   * If two versions have a different number of components, then only the lower
-   * number of components are compared. If those components are identical
-   * between the two versions, then the version with fewer components is
-   * considered to precede the version with more components.
-   * 
-   * In addition to the above rules, there is one special case: maven SNAPSHOT
-   * releases are considered to precede a non-SNAPSHOT release with an
-   * otherwise identical version number. For example, 2.0-SNAPSHOT precedes
-   * 2.0.
-   * 
-   * This function returns a negative integer if version1 precedes version2, a
-   * positive integer if version2 precedes version1, and 0 if and only if the
-   * two versions' components are identical in value and cardinality.
-   * 
+   * Compares two version name strings using maven's ComparableVersion class.
+   *
    * @param version1
    *          the first version to compare
    * @param version2
@@ -75,58 +37,8 @@ public abstract class VersionUtil {
    *         versions are equal.
    */
   public static int compareVersions(String version1, String version2) {
-    boolean isSnapshot1 = version1.endsWith(SNAPSHOT_SUFFIX);
-    boolean isSnapshot2 = version2.endsWith(SNAPSHOT_SUFFIX);
-    version1 = stripSnapshotSuffix(version1);
-    version2 = stripSnapshotSuffix(version2);
-    
-    String[] version1Parts = version1.split("\\.");
-    String[] version2Parts = version2.split("\\.");
-    
-    for (int i = 0; i < version1Parts.length && i < version2Parts.length; i++) {
-      String component1 = version1Parts[i];
-      String component2 = version2Parts[i];
-      if (!component1.equals(component2)) {
-        Matcher matcher1 = COMPONENT_GROUPS.matcher(component1);
-        Matcher matcher2 = COMPONENT_GROUPS.matcher(component2);
-        
-        while (matcher1.find() && matcher2.find()) {
-          String group1 = matcher1.group();
-          String group2 = matcher2.group();
-          if (!group1.equals(group2)) {
-            if (isNumeric(group1) && isNumeric(group2)) {
-              return Integer.parseInt(group1) - Integer.parseInt(group2);
-            } else if (!isNumeric(group1) && !isNumeric(group2)) {
-              return group1.compareTo(group2);
-            } else {
-              return isNumeric(group1) ? -1 : 1;
-            }
-          }
-        }
-        return component1.length() - component2.length();
-      }
-    }
-    
-    return ComparisonChain.start()
-      .compare(version1Parts.length, version2Parts.length)
-      .compare(isSnapshot2, isSnapshot1)
-      .result();
-  }
-  
-  private static String stripSnapshotSuffix(String version) {
-    if (version.endsWith(SNAPSHOT_SUFFIX)) {
-      return version.substring(0, version.length() - SNAPSHOT_SUFFIX.length());
-    } else {
-      return version;
-    }
-  }
-
-  private static boolean isNumeric(String s) {
-    try {
-      Integer.parseInt(s);
-      return true;
-    } catch (NumberFormatException nfe) {
-      return false;
-    }
+    ComparableVersion v1 = new ComparableVersion(version1);
+    ComparableVersion v2 = new ComparableVersion(version2);
+    return v1.compareTo(v2);
   }
 }

+ 22 - 0
hadoop-common-project/hadoop-common/src/main/resources/core-default.xml

@@ -1275,4 +1275,26 @@
     Default, "dr.who=;" will consider "dr.who" as user without groups.
   </description>
 </property>
+
+<property>
+  <name>rpc.metrics.quantile.enable</name>
+  <value>false</value>
+  <description>
+    Setting this property to true and rpc.metrics.percentiles.intervals
+    to a comma-separated list of the granularity in seconds, the
+    50/75/90/95/99th percentile latency for rpc queue/processing time in
+    milliseconds are added to rpc metrics.
+  </description>
+</property>
+
+<property>
+  <name>rpc.metrics.percentiles.intervals</name>
+  <value></value>
+  <description>
+    A comma-separated list of the granularity in seconds for the metrics which
+    describe the 50/75/90/95/99th percentile latency for rpc queue/processing
+    time. The metrics are outputted if rpc.metrics.quantile.enable is set to
+    true.
+  </description>
+</property>
 </configuration>

+ 2 - 3
hadoop-common-project/hadoop-common/src/site/apt/CLIMiniCluster.apt.vm

@@ -18,8 +18,6 @@
 
 Hadoop MapReduce Next Generation - CLI MiniCluster.
 
-  \[ {{{./index.html}Go Back}} \]
-
 %{toc|section=1|fromDepth=0}
 
 * {Purpose}
@@ -42,7 +40,8 @@ Hadoop MapReduce Next Generation - CLI MiniCluster.
 $ mvn clean install -DskipTests
 $ mvn package -Pdist -Dtar -DskipTests -Dmaven.javadoc.skip
 +---+
-  <<NOTE:>> You will need protoc 2.5.0 installed.
+  <<NOTE:>> You will need {{{http://code.google.com/p/protobuf/}protoc 2.5.0}}
+            installed.
 
   The tarball should be available in <<<hadoop-dist/target/>>> directory. 
 

+ 1 - 3
hadoop-common-project/hadoop-common/src/site/apt/ClusterSetup.apt.vm

@@ -16,8 +16,6 @@
   ---
   ${maven.build.timestamp}
 
-  \[ {{{../index.html}Go Back}} \]
-
 %{toc|section=1|fromDepth=0}
 
 Hadoop MapReduce Next Generation - Cluster Setup
@@ -29,7 +27,7 @@ Hadoop MapReduce Next Generation - Cluster Setup
   with thousands of nodes.
 
   To play with Hadoop, you may first want to install it on a single
-  machine (see {{{SingleCluster}Single Node Setup}}).
+  machine (see {{{./SingleCluster.html}Single Node Setup}}).
 
 * {Prerequisites}
 

+ 13 - 8
hadoop-common-project/hadoop-common/src/site/apt/CommandsManual.apt.vm

@@ -44,8 +44,9 @@ Overview
 Generic Options
 
    The following options are supported by {{dfsadmin}}, {{fs}}, {{fsck}},
-   {{job}} and {{fetchdt}}. Applications should implement {{{some_useful_url}Tool}} to support
-   {{{another_useful_url}GenericOptions}}.
+   {{job}} and {{fetchdt}}. Applications should implement 
+   {{{../../api/org/apache/hadoop/util/Tool.html}Tool}} to support
+   GenericOptions.
 
 *------------------------------------------------+-----------------------------+
 ||            GENERIC_OPTION                     ||            Description
@@ -123,7 +124,8 @@ User Commands
 
 * <<<fsck>>>
 
-   Runs a HDFS filesystem checking utility. See {{Fsck}} for more info.
+   Runs a HDFS filesystem checking utility.
+   See {{{../hadoop-hdfs/HdfsUserGuide.html#fsck}fsck}} for more info.
 
    Usage: <<<hadoop fsck [GENERIC_OPTIONS] <path> [-move | -delete | -openforwrite] [-files [-blocks [-locations | -racks]]]>>>
 
@@ -149,7 +151,8 @@ User Commands
 
 * <<<fetchdt>>>
 
-   Gets Delegation Token from a NameNode. See {{fetchdt}} for more info.
+   Gets Delegation Token from a NameNode.
+   See {{{../hadoop-hdfs/HdfsUserGuide.html#fetchdt}fetchdt}} for more info.
 
    Usage: <<<hadoop fetchdt [GENERIC_OPTIONS] [--webservice <namenode_http_addr>] <path> >>>
 
@@ -302,7 +305,8 @@ Administration Commands
 * <<<balancer>>>
 
    Runs a cluster balancing utility. An administrator can simply press Ctrl-C
-   to stop the rebalancing process. See Rebalancer for more details.
+   to stop the rebalancing process. See
+   {{{../hadoop-hdfs/HdfsUserGuide.html#Rebalancer}Rebalancer}} for more details.
 
    Usage: <<<hadoop balancer [-threshold <threshold>]>>>
 
@@ -445,7 +449,7 @@ Administration Commands
 * <<<namenode>>>
 
    Runs the namenode. More info about the upgrade, rollback and finalize is
-   at Upgrade Rollback
+   at {{{../hadoop-hdfs/HdfsUserGuide.html#Upgrade_and_Rollback}Upgrade Rollback}}.
 
    Usage: <<<hadoop namenode [-format] | [-upgrade] | [-rollback] | [-finalize] | [-importCheckpoint]>>>
 
@@ -474,8 +478,9 @@ Administration Commands
 
 * <<<secondarynamenode>>>
 
-   Runs the HDFS secondary namenode. See Secondary Namenode for more
-   info.
+   Runs the HDFS secondary namenode.
+   See {{{../hadoop-hdfs/HdfsUserGuide.html#Secondary_NameNode}Secondary Namenode}}
+   for more info.
 
    Usage: <<<hadoop secondarynamenode [-checkpoint [force]] | [-geteditsize]>>>
 

+ 10 - 8
hadoop-common-project/hadoop-common/src/site/apt/Compatibility.apt.vm

@@ -233,9 +233,10 @@ hand-in-hand to address this.
 
     * In particular for MapReduce applications, the developer community will 
       try our best to support provide binary compatibility across major 
-      releases e.g. applications using org.apache.hadop.mapred.* APIs are 
-      supported compatibly across hadoop-1.x and hadoop-2.x. See 
-      {{{../hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduce_Compatibility_Hadoop1_Hadoop2.html}
+      releases e.g. applications using org.apache.hadoop.mapred.
+      
+    * APIs are supported compatibly across hadoop-1.x and hadoop-2.x. See 
+      {{{../../hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduce_Compatibility_Hadoop1_Hadoop2.html}
       Compatibility for MapReduce applications between hadoop-1.x and hadoop-2.x}} 
       for more details.
 
@@ -248,13 +249,13 @@ hand-in-hand to address this.
 
   * {{{../hadoop-hdfs/WebHDFS.html}WebHDFS}} - Stable
 
-  * {{{../hadoop-yarn/hadoop-yarn-site/ResourceManagerRest.html}ResourceManager}}
+  * {{{../../hadoop-yarn/hadoop-yarn-site/ResourceManagerRest.html}ResourceManager}}
 
-  * {{{../hadoop-yarn/hadoop-yarn-site/NodeManagerRest.html}NodeManager}}
+  * {{{../../hadoop-yarn/hadoop-yarn-site/NodeManagerRest.html}NodeManager}}
 
-  * {{{../hadoop-yarn/hadoop-yarn-site/MapredAppMasterRest.html}MR Application Master}}
+  * {{{../../hadoop-yarn/hadoop-yarn-site/MapredAppMasterRest.html}MR Application Master}}
 
-  * {{{../hadoop-yarn/hadoop-yarn-site/HistoryServerRest.html}History Server}}
+  * {{{../../hadoop-yarn/hadoop-yarn-site/HistoryServerRest.html}History Server}}
   
 *** Policy
     
@@ -512,7 +513,8 @@ hand-in-hand to address this.
     {{{https://issues.apache.org/jira/browse/HADOOP-9517}HADOOP-9517}}
 
   * Binary compatibility for MapReduce end-user applications between hadoop-1.x and hadoop-2.x -
-    {{{../hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduce_Compatibility_Hadoop1_Hadoop2.html}MapReduce Compatibility between hadoop-1.x and hadoop-2.x}}
+    {{{../../hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduce_Compatibility_Hadoop1_Hadoop2.html}
+    MapReduce Compatibility between hadoop-1.x and hadoop-2.x}}
 
   * Annotations for interfaces as per interface classification
     schedule -

+ 7 - 5
hadoop-common-project/hadoop-common/src/site/apt/FileSystemShell.apt.vm

@@ -88,7 +88,7 @@ chgrp
 
    Change group association of files. The user must be the owner of files, or
    else a super-user. Additional information is in the
-   {{{betterurl}Permissions Guide}}.
+   {{{../hadoop-hdfs/HdfsPermissionsGuide.html}Permissions Guide}}.
 
    Options
 
@@ -101,7 +101,7 @@ chmod
    Change the permissions of files. With -R, make the change recursively
    through the directory structure. The user must be the owner of the file, or
    else a super-user. Additional information is in the
-   {{{betterurl}Permissions Guide}}.
+   {{{../hadoop-hdfs/HdfsPermissionsGuide.html}Permissions Guide}}.
 
    Options
 
@@ -112,7 +112,7 @@ chown
    Usage: <<<hdfs dfs -chown [-R] [OWNER][:[GROUP]] URI [URI ]>>>
 
    Change the owner of files. The user must be a super-user. Additional information
-   is in the {{{betterurl}Permissions Guide}}.
+   is in the {{{../hadoop-hdfs/HdfsPermissionsGuide.html}Permissions Guide}}.
 
    Options
 
@@ -210,8 +210,8 @@ expunge
 
    Usage: <<<hdfs dfs -expunge>>>
 
-   Empty the Trash. Refer to the {{{betterurl}HDFS Architecture Guide}} for
-   more information on the Trash feature.
+   Empty the Trash. Refer to the {{{../hadoop-hdfs/HdfsDesign.html}
+   HDFS Architecture Guide}} for more information on the Trash feature.
 
 get
 
@@ -439,7 +439,9 @@ test
    Options:
 
      * The -e option will check to see if the file exists, returning 0 if true.
+
      * The -z option will check to see if the file is zero length, returning 0 if true.
+
      * The -d option will check to see if the path is directory, returning 0 if true.
 
    Example:

+ 0 - 2
hadoop-common-project/hadoop-common/src/site/apt/InterfaceClassification.apt.vm

@@ -18,8 +18,6 @@
 
 Hadoop Interface Taxonomy: Audience and Stability Classification
 
-  \[ {{{./index.html}Go Back}} \]
-
 %{toc|section=1|fromDepth=0}
 
 * Motivation

+ 4 - 8
hadoop-common-project/hadoop-common/src/site/apt/NativeLibraries.apt.vm

@@ -117,23 +117,19 @@ Native Libraries Guide
      * zlib-development package (stable version >= 1.2.0)
 
    Once you installed the prerequisite packages use the standard hadoop
-   build.xml file and pass along the compile.native flag (set to true) to
-   build the native hadoop library:
+   pom.xml file and pass along the native flag to build the native hadoop 
+   library:
 
 ----
-   $ ant -Dcompile.native=true <target>
+   $ mvn package -Pdist,native -Dskiptests -Dtar
 ----
 
    You should see the newly-built library in:
 
 ----
-   $ build/native/<platform>/lib
+   $ hadoop-dist/target/hadoop-${project.version}/lib/native
 ----
 
-   where <platform> is a combination of the system-properties:
-   ${os.name}-${os.arch}-${sun.arch.data.model} (for example,
-   Linux-i386-32).
-
    Please note the following:
 
      * It is mandatory to install both the zlib and gzip development

+ 4 - 2
hadoop-common-project/hadoop-common/src/site/apt/ServiceLevelAuth.apt.vm

@@ -29,8 +29,10 @@ Service Level Authorization Guide
 
    Make sure Hadoop is installed, configured and setup correctly. For more
    information see:
-     * Single Node Setup for first-time users.
-     * Cluster Setup for large, distributed clusters.
+
+     * {{{./SingleCluster.html}Single Node Setup}} for first-time users.
+
+     * {{{./ClusterSetup.html}Cluster Setup}} for large, distributed clusters.
 
 * Overview
 

+ 236 - 144
hadoop-common-project/hadoop-common/src/site/apt/SingleCluster.apt.vm

@@ -18,177 +18,269 @@
 
 Hadoop MapReduce Next Generation - Setting up a Single Node Cluster.
 
-  \[ {{{./index.html}Go Back}} \]
-
 %{toc|section=1|fromDepth=0}
 
-* Mapreduce Tarball
+* Purpose
 
-  You should be able to obtain the MapReduce tarball from the release.
-  If not, you should be able to create a tarball from the source.
+  This document describes how to set up and configure a single-node Hadoop
+  installation so that you can quickly perform simple operations using Hadoop
+  MapReduce and the Hadoop Distributed File System (HDFS).
 
-+---+
-$ mvn clean install -DskipTests
-$ cd hadoop-mapreduce-project
-$ mvn clean install assembly:assembly -Pnative
-+---+
-  <<NOTE:>> You will need protoc 2.5.0 installed.
+* Prerequisites
 
-  To ignore the native builds in mapreduce you can omit the <<<-Pnative>>> argument
-  for maven. The tarball should be available in <<<target/>>> directory. 
+** Supported Platforms
 
-  
-* Setting up the environment.
+   * GNU/Linux is supported as a development and production platform.
+     Hadoop has been demonstrated on GNU/Linux clusters with 2000 nodes.
 
-  Assuming you have installed hadoop-common/hadoop-hdfs and exported
-  <<$HADOOP_COMMON_HOME>>/<<$HADOOP_HDFS_HOME>>, untar hadoop mapreduce 
-  tarball and set environment variable <<$HADOOP_MAPRED_HOME>> to the 
-  untarred directory. Set <<$HADOOP_YARN_HOME>> the same as <<$HADOOP_MAPRED_HOME>>. 
- 
-  <<NOTE:>> The following instructions assume you have hdfs running.
+   * Windows is also a supported platform but the followings steps
+     are for Linux only. To set up Hadoop on Windows, see
+     {{{http://wiki.apache.org/hadoop/Hadoop2OnWindows}wiki page}}.
 
-* Setting up Configuration.
+** Required Software
 
-  To start the ResourceManager and NodeManager, you will have to update the configs.
-  Assuming your $HADOOP_CONF_DIR is the configuration directory and has the installed
-  configs for HDFS and <<<core-site.xml>>>. There are 2 config files you will have to setup
-  <<<mapred-site.xml>>> and <<<yarn-site.xml>>>.
+   Required software for Linux include:
 
-** Setting up <<<mapred-site.xml>>>
+   [[1]] Java\u2122 must be installed. Recommended Java versions are described
+         at {{{http://wiki.apache.org/hadoop/HadoopJavaVersions}
+         HadoopJavaVersions}}.
 
-  Add the following configs to your <<<mapred-site.xml>>>.
+   [[2]] ssh must be installed and sshd must be running to use the Hadoop
+         scripts that manage remote Hadoop daemons.
 
-+---+
-  <property>
-    <name>mapreduce.cluster.temp.dir</name>
-    <value></value>
-    <description>No description</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>mapreduce.cluster.local.dir</name>
-    <value></value>
-    <description>No description</description>
-    <final>true</final>
-  </property>
-+---+
+** Installing Software
 
-** Setting up <<<yarn-site.xml>>>
+  If your cluster doesn't have the requisite software you will need to install
+  it.
 
-Add the following configs to your <<<yarn-site.xml>>>
+  For example on Ubuntu Linux:
 
-+---+
-  <property>
-    <name>yarn.resourcemanager.resource-tracker.address</name>
-    <value>host:port</value>
-    <description>host is the hostname of the resource manager and 
-    port is the port on which the NodeManagers contact the Resource Manager.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.resourcemanager.scheduler.address</name>
-    <value>host:port</value>
-    <description>host is the hostname of the resourcemanager and port is the port
-    on which the Applications in the cluster talk to the Resource Manager.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.resourcemanager.scheduler.class</name>
-    <value>org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler</value>
-    <description>In case you do not want to use the default scheduler</description>
-  </property>
-
-  <property>
-    <name>yarn.resourcemanager.address</name>
-    <value>host:port</value>
-    <description>the host is the hostname of the ResourceManager and the port is the port on
-    which the clients can talk to the Resource Manager. </description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.local-dirs</name>
-    <value></value>
-    <description>the local directories used by the nodemanager</description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.address</name>
-    <value>0.0.0.0:port</value>
-    <description>the nodemanagers bind to this port</description>
-  </property>  
-
-  <property>
-    <name>yarn.nodemanager.resource.memory-mb</name>
-    <value>10240</value>
-    <description>the amount of memory on the NodeManager in GB</description>
-  </property>
- 
-  <property>
-    <name>yarn.nodemanager.remote-app-log-dir</name>
-    <value>/app-logs</value>
-    <description>directory on hdfs where the application logs are moved to </description>
-  </property>
-
-   <property>
-    <name>yarn.nodemanager.log-dirs</name>
-    <value></value>
-    <description>the directories used by Nodemanagers as log directories</description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.aux-services</name>
-    <value>mapreduce_shuffle</value>
-    <description>shuffle service that needs to be set for Map Reduce to run </description>
-  </property>
-+---+
+----
+  $ sudo apt-get install ssh
+  $ sudo apt-get install rsync
+----
+
+* Download
+
+  To get a Hadoop distribution, download a recent stable release from one of
+  the {{{http://www.apache.org/dyn/closer.cgi/hadoop/common/}
+  Apache Download Mirrors}}.
+
+* Prepare to Start the Hadoop Cluster
+
+  Unpack the downloaded Hadoop distribution. In the distribution, edit
+  the file <<<etc/hadoop/hadoop-env.sh>>> to define some parameters as
+  follows:
+
+----
+  # set to the root of your Java installation
+  export JAVA_HOME=/usr/java/latest
+
+  # Assuming your installation directory is /usr/local/hadoop
+  export HADOOP_PREFIX=/usr/local/hadoop
+----
+
+  Try the following command:
+
+----
+  $ bin/hadoop
+----
+
+  This will display the usage documentation for the hadoop script.
+
+  Now you are ready to start your Hadoop cluster in one of the three supported
+  modes:
+
+   * {{{Standalone Operation}Local (Standalone) Mode}}
 
-* Setting up <<<capacity-scheduler.xml>>>
+   * {{{Pseudo-Distributed Operation}Pseudo-Distributed Mode}}
 
-   Make sure you populate the root queues in <<<capacity-scheduler.xml>>>.
+   * {{{Fully-Distributed Operation}Fully-Distributed Mode}}
+
+* Standalone Operation
+
+  By default, Hadoop is configured to run in a non-distributed mode, as a
+  single Java process. This is useful for debugging.
+
+  The following example copies the unpacked conf directory to use as input
+  and then finds and displays every match of the given regular expression.
+  Output is written to the given output directory.
+
+----
+  $ mkdir input
+  $ cp etc/hadoop/*.xml input
+  $ bin/hadoop jar share/hadoop/mapreduce/hadoop-mapreduce-examples-${project.version}.jar grep input output 'dfs[a-z.]+'
+  $ cat output/*
+----
+
+* Pseudo-Distributed Operation
+
+  Hadoop can also be run on a single-node in a pseudo-distributed mode where
+  each Hadoop daemon runs in a separate Java process.
+
+** Configuration
+
+  Use the following:
+
+  etc/hadoop/core-site.xml:
 
 +---+
-  <property>
-    <name>yarn.scheduler.capacity.root.queues</name>
-    <value>unfunded,default</value>
-  </property>
-  
-  <property>
-    <name>yarn.scheduler.capacity.root.capacity</name>
-    <value>100</value>
-  </property>
-  
-  <property>
-    <name>yarn.scheduler.capacity.root.unfunded.capacity</name>
-    <value>50</value>
-  </property>
+<configuration>
+    <property>
+        <name>fs.defaultFS</name>
+        <value>hdfs://localhost:9000</value>
+    </property>
+</configuration>
++---+
+
+  etc/hadoop/hdfs-site.xml:
   
-  <property>
-    <name>yarn.scheduler.capacity.root.default.capacity</name>
-    <value>50</value>
-  </property>
++---+
+<configuration>
+    <property>
+        <name>dfs.replication</name>
+        <value>1</value>
+    </property>
+</configuration>
 +---+
 
-* Running daemons.
+** Setup passphraseless ssh
+
+  Now check that you can ssh to the localhost without a passphrase:
+
+----
+  $ ssh localhost
+----
+
+  If you cannot ssh to localhost without a passphrase, execute the
+  following commands:
+
+----
+  $ ssh-keygen -t dsa -P '' -f ~/.ssh/id_dsa
+  $ cat ~/.ssh/id_dsa.pub >> ~/.ssh/authorized_keys
+----
+
+** Execution
+
+  The following instructions are to run a MapReduce job locally.
+  If you want to execute a job on YARN, see {{YARN on Single Node}}.
+
+  [[1]] Format the filesystem:
+
+----
+  $ bin/hdfs namenode -format
+----
+
+  [[2]] Start NameNode daemon and DataNode daemon:
+
+----
+  $ sbin/start-dfs.sh
+----
+
+        The hadoop daemon log output is written to the <<<${HADOOP_LOG_DIR}>>>
+        directory (defaults to <<<${HADOOP_HOME}/logs>>>).
+
+  [[3]] Browse the web interface for the NameNode; by default it is
+        available at:
+
+        * NameNode - <<<http://localhost:50070/>>>
+
+  [[4]] Make the HDFS directories required to execute MapReduce jobs:
+
+----
+  $ bin/hdfs dfs -mkdir /user
+  $ bin/hdfs dfs -mkdir /user/<username>
+----
+
+  [[5]] Copy the input files into the distributed filesystem:
+
+----
+  $ bin/hdfs dfs -put etc/hadoop input
+----
+
+  [[6]] Run some of the examples provided:
+
+----
+  $ bin/hadoop jar share/hadoop/mapreduce/hadoop-mapreduce-examples-${project.version}.jar grep input output 'dfs[a-z.]+'
+----
+
+  [[7]] Examine the output files:
+
+        Copy the output files from the distributed filesystem to the local
+        filesystem and examine them:
+
+----
+  $ bin/hdfs dfs -get output output
+  $ cat output/*
+----
+
+        or
+
+        View the output files on the distributed filesystem:
+
+----
+  $ bin/hdfs dfs -cat output/*
+----
+
+  [[8]] When you're done, stop the daemons with:
+
+----
+  $ sbin/stop-dfs.sh
+----
+
+** YARN on Single Node
+
+  You can run a MapReduce job on YARN in a pseudo-distributed mode by setting
+  a few parameters and running ResourceManager daemon and NodeManager daemon
+  in addition.
+
+  The following instructions assume that 1. ~ 4. steps of
+  {{{Execution}the above instructions}} are already executed.
+
+  [[1]] Configure parameters as follows:
+
+        etc/hadoop/mapred-site.xml:
 
-  Assuming that the environment variables <<$HADOOP_COMMON_HOME>>, <<$HADOOP_HDFS_HOME>>, <<$HADOO_MAPRED_HOME>>,
-  <<$HADOOP_YARN_HOME>>, <<$JAVA_HOME>> and <<$HADOOP_CONF_DIR>> have been set appropriately.
-  Set $<<$YARN_CONF_DIR>> the same as $<<HADOOP_CONF_DIR>>
- 
-  Run ResourceManager and NodeManager as:
-  
 +---+
-$ cd $HADOOP_MAPRED_HOME
-$ sbin/yarn-daemon.sh start resourcemanager
-$ sbin/yarn-daemon.sh start nodemanager
+<configuration>
+    <property>
+        <name>mapreduce.framework.name</name>
+        <value>yarn</value>
+    </property>
+</configuration>
 +---+
 
-  You should be up and running. You can run randomwriter as:
+        etc/hadoop/yarn-site.xml:
 
 +---+
-$ $HADOOP_COMMON_HOME/bin/hadoop jar hadoop-examples.jar randomwriter out
+<configuration>
+    <property>
+        <name>yarn.nodemanager.aux-services</name>
+        <value>mapreduce_shuffle</value>
+    </property>
+</configuration>
 +---+
 
-Good luck.
+  [[2]] Start ResourceManager daemon and NodeManager daemon:
+
+----
+  $ sbin/start-yarn.sh
+----
+
+  [[3]] Browse the web interface for the ResourceManager; by default it is
+        available at:
+
+        * ResourceManager - <<<http://localhost:8088/>>>
+
+  [[4]] Run a MapReduce job.
+
+  [[5]] When you're done, stop the daemons with:
+
+----
+  $ sbin/stop-yarn.sh
+----
+
+* Fully-Distributed Operation
+
+  For information on setting up fully-distributed, non-trivial clusters
+  see {{{./ClusterSetup.html}Cluster Setup}}.

+ 17 - 17
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/HttpServerFunctionalTest.java

@@ -23,7 +23,7 @@ import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.authorize.AccessControlList;
 import org.junit.Assert;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.http.HttpServer.Builder;
+import org.apache.hadoop.http.HttpServer2.Builder;
 
 import java.io.File;
 import java.io.IOException;
@@ -33,7 +33,7 @@ import java.net.URL;
 import java.net.MalformedURLException;
 
 /**
- * This is a base class for functional tests of the {@link HttpServer}.
+ * This is a base class for functional tests of the {@link HttpServer2}.
  * The methods are static for other classes to import statically.
  */
 public class HttpServerFunctionalTest extends Assert {
@@ -54,7 +54,7 @@ public class HttpServerFunctionalTest extends Assert {
    * @throws IOException if a problem occurs
    * @throws AssertionError if a condition was not met
    */
-  public static HttpServer createTestServer() throws IOException {
+  public static HttpServer2 createTestServer() throws IOException {
     prepareTestWebapp();
     return createServer(TEST);
   }
@@ -68,13 +68,13 @@ public class HttpServerFunctionalTest extends Assert {
    * @throws IOException if a problem occurs
    * @throws AssertionError if a condition was not met
    */
-  public static HttpServer createTestServer(Configuration conf)
+  public static HttpServer2 createTestServer(Configuration conf)
       throws IOException {
     prepareTestWebapp();
     return createServer(TEST, conf);
   }
 
-  public static HttpServer createTestServer(Configuration conf, AccessControlList adminsAcl)
+  public static HttpServer2 createTestServer(Configuration conf, AccessControlList adminsAcl)
       throws IOException {
     prepareTestWebapp();
     return createServer(TEST, conf, adminsAcl);
@@ -89,7 +89,7 @@ public class HttpServerFunctionalTest extends Assert {
    * @throws IOException if a problem occurs
    * @throws AssertionError if a condition was not met
    */
-  public static HttpServer createTestServer(Configuration conf, 
+  public static HttpServer2 createTestServer(Configuration conf,
       String[] pathSpecs) throws IOException {
     prepareTestWebapp();
     return createServer(TEST, conf, pathSpecs);
@@ -120,10 +120,10 @@ public class HttpServerFunctionalTest extends Assert {
    * @return the server
    * @throws IOException if it could not be created
    */
-  public static HttpServer createServer(String host, int port)
+  public static HttpServer2 createServer(String host, int port)
       throws IOException {
     prepareTestWebapp();
-    return new HttpServer.Builder().setName(TEST)
+    return new HttpServer2.Builder().setName(TEST)
         .addEndpoint(URI.create("http://" + host + ":" + port))
         .setFindPort(true).build();
   }
@@ -134,7 +134,7 @@ public class HttpServerFunctionalTest extends Assert {
    * @return the server
    * @throws IOException if it could not be created
    */
-  public static HttpServer createServer(String webapp) throws IOException {
+  public static HttpServer2 createServer(String webapp) throws IOException {
     return localServerBuilder(webapp).setFindPort(true).build();
   }
   /**
@@ -144,18 +144,18 @@ public class HttpServerFunctionalTest extends Assert {
    * @return the server
    * @throws IOException if it could not be created
    */
-  public static HttpServer createServer(String webapp, Configuration conf)
+  public static HttpServer2 createServer(String webapp, Configuration conf)
       throws IOException {
     return localServerBuilder(webapp).setFindPort(true).setConf(conf).build();
   }
 
-  public static HttpServer createServer(String webapp, Configuration conf, AccessControlList adminsAcl)
+  public static HttpServer2 createServer(String webapp, Configuration conf, AccessControlList adminsAcl)
       throws IOException {
     return localServerBuilder(webapp).setFindPort(true).setConf(conf).setACL(adminsAcl).build();
   }
 
   private static Builder localServerBuilder(String webapp) {
-    return new HttpServer.Builder().setName(webapp).addEndpoint(
+    return new HttpServer2.Builder().setName(webapp).addEndpoint(
         URI.create("http://localhost:0"));
   }
   
@@ -167,7 +167,7 @@ public class HttpServerFunctionalTest extends Assert {
    * @return the server
    * @throws IOException if it could not be created
    */
-  public static HttpServer createServer(String webapp, Configuration conf,
+  public static HttpServer2 createServer(String webapp, Configuration conf,
       String[] pathSpecs) throws IOException {
     return localServerBuilder(webapp).setFindPort(true).setConf(conf).setPathSpec(pathSpecs).build();
   }
@@ -180,8 +180,8 @@ public class HttpServerFunctionalTest extends Assert {
    * @throws IOException on any failure
    * @throws AssertionError if a condition was not met
    */
-  public static HttpServer createAndStartTestServer() throws IOException {
-    HttpServer server = createTestServer();
+  public static HttpServer2 createAndStartTestServer() throws IOException {
+    HttpServer2 server = createTestServer();
     server.start();
     return server;
   }
@@ -191,7 +191,7 @@ public class HttpServerFunctionalTest extends Assert {
    * @param server to stop
    * @throws Exception on any failure
    */
-  public static void stop(HttpServer server) throws Exception {
+  public static void stop(HttpServer2 server) throws Exception {
     if (server != null) {
       server.stop();
     }
@@ -203,7 +203,7 @@ public class HttpServerFunctionalTest extends Assert {
    * @return a URL bonded to the base of the server
    * @throws MalformedURLException if the URL cannot be created.
    */
-  public static URL getServerURL(HttpServer server)
+  public static URL getServerURL(HttpServer2 server)
       throws MalformedURLException {
     assertNotNull("No server", server);
     return new URL("http://"

+ 3 - 3
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestGlobalFilter.java

@@ -40,7 +40,7 @@ import org.apache.hadoop.net.NetUtils;
 import org.junit.Test;
 
 public class TestGlobalFilter extends HttpServerFunctionalTest {
-  static final Log LOG = LogFactory.getLog(HttpServer.class);
+  static final Log LOG = LogFactory.getLog(HttpServer2.class);
   static final Set<String> RECORDS = new TreeSet<String>(); 
 
   /** A very simple filter that records accessed uri's */
@@ -106,9 +106,9 @@ public class TestGlobalFilter extends HttpServerFunctionalTest {
     Configuration conf = new Configuration();
     
     //start a http server with CountingFilter
-    conf.set(HttpServer.FILTER_INITIALIZER_PROPERTY,
+    conf.set(HttpServer2.FILTER_INITIALIZER_PROPERTY,
         RecordingFilter.Initializer.class.getName());
-    HttpServer http = createTestServer(conf);
+    HttpServer2 http = createTestServer(conf);
     http.start();
 
     final String fsckURL = "/fsck";

+ 2 - 2
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHtmlQuoting.java

@@ -68,8 +68,8 @@ public class TestHtmlQuoting {
   @Test
   public void testRequestQuoting() throws Exception {
     HttpServletRequest mockReq = Mockito.mock(HttpServletRequest.class);
-    HttpServer.QuotingInputFilter.RequestQuoter quoter =
-      new HttpServer.QuotingInputFilter.RequestQuoter(mockReq);
+    HttpServer2.QuotingInputFilter.RequestQuoter quoter =
+      new HttpServer2.QuotingInputFilter.RequestQuoter(mockReq);
     
     Mockito.doReturn("a<b").when(mockReq).getParameter("x");
     assertEquals("Test simple param quoting",

+ 27 - 38
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java

@@ -51,7 +51,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
-import org.apache.hadoop.http.HttpServer.QuotingInputFilter.RequestQuoter;
+import org.apache.hadoop.http.HttpServer2.QuotingInputFilter.RequestQuoter;
 import org.apache.hadoop.http.resource.JerseyResource;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.Groups;
@@ -70,7 +70,7 @@ import static org.mockito.Mockito.*;
 
 public class TestHttpServer extends HttpServerFunctionalTest {
   static final Log LOG = LogFactory.getLog(TestHttpServer.class);
-  private static HttpServer server;
+  private static HttpServer2 server;
   private static URL baseUrl;
   private static final int MAX_THREADS = 10;
   
@@ -150,7 +150,7 @@ public class TestHttpServer extends HttpServerFunctionalTest {
 
   @BeforeClass public static void setup() throws Exception {
     Configuration conf = new Configuration();
-    conf.setInt(HttpServer.HTTP_MAX_THREADS, 10);
+    conf.setInt(HttpServer2.HTTP_MAX_THREADS, 10);
     server = createTestServer(conf);
     server.addServlet("echo", "/echo", EchoServlet.class);
     server.addServlet("echomap", "/echomap", EchoMapServlet.class);
@@ -357,7 +357,7 @@ public class TestHttpServer extends HttpServerFunctionalTest {
     Configuration conf = new Configuration();
 
     // Authorization is disabled by default
-    conf.set(HttpServer.FILTER_INITIALIZER_PROPERTY,
+    conf.set(HttpServer2.FILTER_INITIALIZER_PROPERTY,
         DummyFilterInitializer.class.getName());
     conf.set(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,
         MyGroupsProvider.class.getName());
@@ -366,9 +366,9 @@ public class TestHttpServer extends HttpServerFunctionalTest {
     MyGroupsProvider.mapping.put("userA", Arrays.asList("groupA"));
     MyGroupsProvider.mapping.put("userB", Arrays.asList("groupB"));
 
-    HttpServer myServer = new HttpServer.Builder().setName("test")
+    HttpServer2 myServer = new HttpServer2.Builder().setName("test")
         .addEndpoint(new URI("http://localhost:0")).setFindPort(true).build();
-    myServer.setAttribute(HttpServer.CONF_CONTEXT_ATTRIBUTE, conf);
+    myServer.setAttribute(HttpServer2.CONF_CONTEXT_ATTRIBUTE, conf);
     myServer.start();
     String serverURL = "http://" + NetUtils.getHostPortString(myServer.getConnectorAddress(0)) + "/";
     for (String servlet : new String[] { "conf", "logs", "stacks",
@@ -394,7 +394,7 @@ public class TestHttpServer extends HttpServerFunctionalTest {
         true);
     conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_INSTRUMENTATION_REQUIRES_ADMIN,
         true);
-    conf.set(HttpServer.FILTER_INITIALIZER_PROPERTY,
+    conf.set(HttpServer2.FILTER_INITIALIZER_PROPERTY,
         DummyFilterInitializer.class.getName());
 
     conf.set(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,
@@ -407,10 +407,10 @@ public class TestHttpServer extends HttpServerFunctionalTest {
     MyGroupsProvider.mapping.put("userD", Arrays.asList("groupD"));
     MyGroupsProvider.mapping.put("userE", Arrays.asList("groupE"));
 
-    HttpServer myServer = new HttpServer.Builder().setName("test")
+    HttpServer2 myServer = new HttpServer2.Builder().setName("test")
         .addEndpoint(new URI("http://localhost:0")).setFindPort(true).setConf(conf)
         .setACL(new AccessControlList("userA,userB groupC,groupD")).build();
-    myServer.setAttribute(HttpServer.CONF_CONTEXT_ATTRIBUTE, conf);
+    myServer.setAttribute(HttpServer2.CONF_CONTEXT_ATTRIBUTE, conf);
     myServer.start();
 
     String serverURL = "http://"
@@ -468,39 +468,39 @@ public class TestHttpServer extends HttpServerFunctionalTest {
     Configuration conf = new Configuration();
     conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false);
     ServletContext context = Mockito.mock(ServletContext.class);
-    Mockito.when(context.getAttribute(HttpServer.CONF_CONTEXT_ATTRIBUTE)).thenReturn(conf);
-    Mockito.when(context.getAttribute(HttpServer.ADMINS_ACL)).thenReturn(null);
+    Mockito.when(context.getAttribute(HttpServer2.CONF_CONTEXT_ATTRIBUTE)).thenReturn(conf);
+    Mockito.when(context.getAttribute(HttpServer2.ADMINS_ACL)).thenReturn(null);
     HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
     Mockito.when(request.getRemoteUser()).thenReturn(null);
     HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
 
     //authorization OFF
-    Assert.assertTrue(HttpServer.hasAdministratorAccess(context, request, response));
+    Assert.assertTrue(HttpServer2.hasAdministratorAccess(context, request, response));
 
     //authorization ON & user NULL
     response = Mockito.mock(HttpServletResponse.class);
     conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, true);
-    Assert.assertFalse(HttpServer.hasAdministratorAccess(context, request, response));
+    Assert.assertFalse(HttpServer2.hasAdministratorAccess(context, request, response));
     Mockito.verify(response).sendError(Mockito.eq(HttpServletResponse.SC_UNAUTHORIZED), Mockito.anyString());
 
     //authorization ON & user NOT NULL & ACLs NULL
     response = Mockito.mock(HttpServletResponse.class);
     Mockito.when(request.getRemoteUser()).thenReturn("foo");
-    Assert.assertTrue(HttpServer.hasAdministratorAccess(context, request, response));
+    Assert.assertTrue(HttpServer2.hasAdministratorAccess(context, request, response));
 
     //authorization ON & user NOT NULL & ACLs NOT NULL & user not in ACLs
     response = Mockito.mock(HttpServletResponse.class);
     AccessControlList acls = Mockito.mock(AccessControlList.class);
     Mockito.when(acls.isUserAllowed(Mockito.<UserGroupInformation>any())).thenReturn(false);
-    Mockito.when(context.getAttribute(HttpServer.ADMINS_ACL)).thenReturn(acls);
-    Assert.assertFalse(HttpServer.hasAdministratorAccess(context, request, response));
+    Mockito.when(context.getAttribute(HttpServer2.ADMINS_ACL)).thenReturn(acls);
+    Assert.assertFalse(HttpServer2.hasAdministratorAccess(context, request, response));
     Mockito.verify(response).sendError(Mockito.eq(HttpServletResponse.SC_UNAUTHORIZED), Mockito.anyString());
 
     //authorization ON & user NOT NULL & ACLs NOT NULL & user in in ACLs
     response = Mockito.mock(HttpServletResponse.class);
     Mockito.when(acls.isUserAllowed(Mockito.<UserGroupInformation>any())).thenReturn(true);
-    Mockito.when(context.getAttribute(HttpServer.ADMINS_ACL)).thenReturn(acls);
-    Assert.assertTrue(HttpServer.hasAdministratorAccess(context, request, response));
+    Mockito.when(context.getAttribute(HttpServer2.ADMINS_ACL)).thenReturn(acls);
+    Assert.assertTrue(HttpServer2.hasAdministratorAccess(context, request, response));
 
   }
 
@@ -508,38 +508,27 @@ public class TestHttpServer extends HttpServerFunctionalTest {
   public void testRequiresAuthorizationAccess() throws Exception {
     Configuration conf = new Configuration();
     ServletContext context = Mockito.mock(ServletContext.class);
-    Mockito.when(context.getAttribute(HttpServer.CONF_CONTEXT_ATTRIBUTE)).thenReturn(conf);
+    Mockito.when(context.getAttribute(HttpServer2.CONF_CONTEXT_ATTRIBUTE)).thenReturn(conf);
     HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
     HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
 
     //requires admin access to instrumentation, FALSE by default
-    Assert.assertTrue(HttpServer.isInstrumentationAccessAllowed(context, request, response));
+    Assert.assertTrue(HttpServer2.isInstrumentationAccessAllowed(context, request, response));
 
     //requires admin access to instrumentation, TRUE
     conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_INSTRUMENTATION_REQUIRES_ADMIN, true);
     conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, true);
     AccessControlList acls = Mockito.mock(AccessControlList.class);
     Mockito.when(acls.isUserAllowed(Mockito.<UserGroupInformation>any())).thenReturn(false);
-    Mockito.when(context.getAttribute(HttpServer.ADMINS_ACL)).thenReturn(acls);
-    Assert.assertFalse(HttpServer.isInstrumentationAccessAllowed(context, request, response));
-  }
-
-  @Test
-  @SuppressWarnings("deprecation")
-  public void testOldConstructor() throws Exception {
-    HttpServer server = new HttpServer("test", "0.0.0.0", 0, false);
-    try {
-      server.start();
-    } finally {
-      server.stop();
-    }
+    Mockito.when(context.getAttribute(HttpServer2.ADMINS_ACL)).thenReturn(acls);
+    Assert.assertFalse(HttpServer2.isInstrumentationAccessAllowed(context, request, response));
   }
 
   @Test public void testBindAddress() throws Exception {
     checkBindAddress("localhost", 0, false).stop();
     // hang onto this one for a bit more testing
-    HttpServer myServer = checkBindAddress("localhost", 0, false);
-    HttpServer myServer2 = null;
+    HttpServer2 myServer = checkBindAddress("localhost", 0, false);
+    HttpServer2 myServer2 = null;
     try { 
       int port = myServer.getConnectorAddress(0).getPort();
       // it's already in use, true = expect a higher port
@@ -558,9 +547,9 @@ public class TestHttpServer extends HttpServerFunctionalTest {
     }
   }
   
-  private HttpServer checkBindAddress(String host, int port, boolean findPort)
+  private HttpServer2 checkBindAddress(String host, int port, boolean findPort)
       throws Exception {
-    HttpServer server = createServer(host, port);
+    HttpServer2 server = createServer(host, port);
     try {
       // not bound, ephemeral should return requested port (0 for ephemeral)
       List<?> listeners = (List<?>) Whitebox.getInternalState(server,
@@ -608,7 +597,7 @@ public class TestHttpServer extends HttpServerFunctionalTest {
   public void testHttpServerBuilderWithExternalConnector() throws Exception {
     Connector c = mock(Connector.class);
     doReturn("localhost").when(c).getHost();
-    HttpServer s = new HttpServer.Builder().setName("test").setConnector(c)
+    HttpServer2 s = new HttpServer2.Builder().setName("test").setConnector(c)
         .build();
     s.stop();
   }

+ 16 - 16
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServerLifecycle.java

@@ -23,18 +23,18 @@ import org.junit.Test;
 public class TestHttpServerLifecycle extends HttpServerFunctionalTest {
 
   /**
-   * Check that a server is alive by probing the {@link HttpServer#isAlive()} method
+   * Check that a server is alive by probing the {@link HttpServer2#isAlive()} method
    * and the text of its toString() description
    * @param server server
    */
-  private void assertAlive(HttpServer server) {
+  private void assertAlive(HttpServer2 server) {
     assertTrue("Server is not alive", server.isAlive());
-    assertToStringContains(server, HttpServer.STATE_DESCRIPTION_ALIVE);
+    assertToStringContains(server, HttpServer2.STATE_DESCRIPTION_ALIVE);
   }
 
-  private void assertNotLive(HttpServer server) {
+  private void assertNotLive(HttpServer2 server) {
     assertTrue("Server should not be live", !server.isAlive());
-    assertToStringContains(server, HttpServer.STATE_DESCRIPTION_NOT_LIVE);
+    assertToStringContains(server, HttpServer2.STATE_DESCRIPTION_NOT_LIVE);
   }
 
   /**
@@ -43,12 +43,12 @@ public class TestHttpServerLifecycle extends HttpServerFunctionalTest {
    * @throws Throwable on failure
    */
   @Test public void testCreatedServerIsNotAlive() throws Throwable {
-    HttpServer server = createTestServer();
+    HttpServer2 server = createTestServer();
     assertNotLive(server);
   }
 
   @Test public void testStopUnstartedServer() throws Throwable {
-    HttpServer server = createTestServer();
+    HttpServer2 server = createTestServer();
     stop(server);
   }
 
@@ -59,7 +59,7 @@ public class TestHttpServerLifecycle extends HttpServerFunctionalTest {
    */
   @Test
   public void testStartedServerIsAlive() throws Throwable {
-    HttpServer server = null;
+    HttpServer2 server = null;
     server = createTestServer();
     assertNotLive(server);
     server.start();
@@ -78,22 +78,22 @@ public class TestHttpServerLifecycle extends HttpServerFunctionalTest {
     requestLogAppender.setName("httprequestlog");
     requestLogAppender.setFilename(System.getProperty("test.build.data", "/tmp/")
         + "jetty-name-yyyy_mm_dd.log");
-    Logger.getLogger(HttpServer.class.getName() + ".test").addAppender(requestLogAppender);
-    HttpServer server = null;
+    Logger.getLogger(HttpServer2.class.getName() + ".test").addAppender(requestLogAppender);
+    HttpServer2 server = null;
     server = createTestServer();
     assertNotLive(server);
     server.start();
     assertAlive(server);
     stop(server);
-    Logger.getLogger(HttpServer.class.getName() + ".test").removeAppender(requestLogAppender);
+    Logger.getLogger(HttpServer2.class.getName() + ".test").removeAppender(requestLogAppender);
   }
 
   /**
-   * Assert that the result of {@link HttpServer#toString()} contains the specific text
+   * Assert that the result of {@link HttpServer2#toString()} contains the specific text
    * @param server server to examine
    * @param text text to search for
    */
-  private void assertToStringContains(HttpServer server, String text) {
+  private void assertToStringContains(HttpServer2 server, String text) {
     String description = server.toString();
     assertTrue("Did not find \"" + text + "\" in \"" + description + "\"",
                description.contains(text));
@@ -105,7 +105,7 @@ public class TestHttpServerLifecycle extends HttpServerFunctionalTest {
    * @throws Throwable on failure
    */
   @Test public void testStoppedServerIsNotAlive() throws Throwable {
-    HttpServer server = createAndStartTestServer();
+    HttpServer2 server = createAndStartTestServer();
     assertAlive(server);
     stop(server);
     assertNotLive(server);
@@ -117,7 +117,7 @@ public class TestHttpServerLifecycle extends HttpServerFunctionalTest {
    * @throws Throwable on failure
    */
   @Test public void testStoppingTwiceServerIsAllowed() throws Throwable {
-    HttpServer server = createAndStartTestServer();
+    HttpServer2 server = createAndStartTestServer();
     assertAlive(server);
     stop(server);
     assertNotLive(server);
@@ -133,7 +133,7 @@ public class TestHttpServerLifecycle extends HttpServerFunctionalTest {
    */
   @Test
   public void testWepAppContextAfterServerStop() throws Throwable {
-    HttpServer server = null;
+    HttpServer2 server = null;
     String key = "test.attribute.key";
     String value = "test.attribute.value";
     server = createTestServer();

+ 2 - 2
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServerWebapps.java

@@ -36,7 +36,7 @@ public class TestHttpServerWebapps extends HttpServerFunctionalTest {
    */
   @Test
   public void testValidServerResource() throws Throwable {
-    HttpServer server = null;
+    HttpServer2 server = null;
     try {
       server = createServer("test");
     } finally {
@@ -51,7 +51,7 @@ public class TestHttpServerWebapps extends HttpServerFunctionalTest {
   @Test
   public void testMissingServerResource() throws Throwable {
     try {
-      HttpServer server = createServer("NoSuchWebapp");
+      HttpServer2 server = createServer("NoSuchWebapp");
       //should not have got here.
       //close the server
       String serverDescription = server.toString();

+ 3 - 3
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestPathFilter.java

@@ -40,7 +40,7 @@ import org.apache.hadoop.net.NetUtils;
 import org.junit.Test;
 
 public class TestPathFilter extends HttpServerFunctionalTest {
-  static final Log LOG = LogFactory.getLog(HttpServer.class);
+  static final Log LOG = LogFactory.getLog(HttpServer2.class);
   static final Set<String> RECORDS = new TreeSet<String>(); 
 
   /** A very simple filter that records accessed uri's */
@@ -107,10 +107,10 @@ public class TestPathFilter extends HttpServerFunctionalTest {
     Configuration conf = new Configuration();
     
     //start a http server with CountingFilter
-    conf.set(HttpServer.FILTER_INITIALIZER_PROPERTY,
+    conf.set(HttpServer2.FILTER_INITIALIZER_PROPERTY,
         RecordingFilter.Initializer.class.getName());
     String[] pathSpecs = { "/path", "/path/*" };
-    HttpServer http = createTestServer(conf, pathSpecs);
+    HttpServer2 http = createTestServer(conf, pathSpecs);
     http.start();
 
     final String baseURL = "/path";

+ 3 - 3
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java

@@ -48,7 +48,7 @@ public class TestSSLHttpServer extends HttpServerFunctionalTest {
 
   private static final Log LOG = LogFactory.getLog(TestSSLHttpServer.class);
   private static Configuration conf;
-  private static HttpServer server;
+  private static HttpServer2 server;
   private static URL baseUrl;
   private static String keystoresDir;
   private static String sslConfDir;
@@ -57,7 +57,7 @@ public class TestSSLHttpServer extends HttpServerFunctionalTest {
   @BeforeClass
   public static void setup() throws Exception {
     conf = new Configuration();
-    conf.setInt(HttpServer.HTTP_MAX_THREADS, 10);
+    conf.setInt(HttpServer2.HTTP_MAX_THREADS, 10);
 
     File base = new File(BASEDIR);
     FileUtil.fullyDelete(base);
@@ -73,7 +73,7 @@ public class TestSSLHttpServer extends HttpServerFunctionalTest {
     clientSslFactory = new SSLFactory(SSLFactory.Mode.CLIENT, sslConf);
     clientSslFactory.init();
 
-    server = new HttpServer.Builder()
+    server = new HttpServer2.Builder()
         .setName("test")
         .addEndpoint(new URI("https://localhost"))
         .setConf(conf)

+ 7 - 7
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestServletFilter.java

@@ -40,7 +40,7 @@ import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Test;
 
 public class TestServletFilter extends HttpServerFunctionalTest {
-  static final Log LOG = LogFactory.getLog(HttpServer.class);
+  static final Log LOG = LogFactory.getLog(HttpServer2.class);
   static volatile String uri = null; 
 
   /** A very simple filter which record the uri filtered. */
@@ -105,9 +105,9 @@ public class TestServletFilter extends HttpServerFunctionalTest {
     Configuration conf = new Configuration();
     
     //start a http server with CountingFilter
-    conf.set(HttpServer.FILTER_INITIALIZER_PROPERTY,
+    conf.set(HttpServer2.FILTER_INITIALIZER_PROPERTY,
         SimpleFilter.Initializer.class.getName());
-    HttpServer http = createTestServer(conf);
+    HttpServer2 http = createTestServer(conf);
     http.start();
 
     final String fsckURL = "/fsck";
@@ -167,9 +167,9 @@ public class TestServletFilter extends HttpServerFunctionalTest {
   public void testServletFilterWhenInitThrowsException() throws Exception {
     Configuration conf = new Configuration();
     // start a http server with ErrorFilter
-    conf.set(HttpServer.FILTER_INITIALIZER_PROPERTY,
+    conf.set(HttpServer2.FILTER_INITIALIZER_PROPERTY,
         ErrorFilter.Initializer.class.getName());
-    HttpServer http = createTestServer(conf);
+    HttpServer2 http = createTestServer(conf);
     try {
       http.start();
       fail("expecting exception");
@@ -186,8 +186,8 @@ public class TestServletFilter extends HttpServerFunctionalTest {
   public void testContextSpecificServletFilterWhenInitThrowsException()
       throws Exception {
     Configuration conf = new Configuration();
-    HttpServer http = createTestServer(conf);
-    HttpServer.defineFilter(http.webAppContext,
+    HttpServer2 http = createTestServer(conf);
+    HttpServer2.defineFilter(http.webAppContext,
         "ErrorFilter", ErrorFilter.class.getName(),
         null, null);
     try {

+ 2 - 2
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/jmx/TestJMXJsonServlet.java

@@ -24,7 +24,7 @@ import java.util.regex.Pattern;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.http.HttpServer;
+import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.http.HttpServerFunctionalTest;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
@@ -32,7 +32,7 @@ import org.junit.Test;
 
 public class TestJMXJsonServlet extends HttpServerFunctionalTest {
   private   static final Log LOG = LogFactory.getLog(TestJMXJsonServlet.class);
-  private static HttpServer server;
+  private static HttpServer2 server;
   private static URL baseUrl;
 
   @BeforeClass public static void setup() throws Exception {

+ 2 - 2
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLogLevel.java

@@ -20,7 +20,7 @@ package org.apache.hadoop.log;
 import java.io.*;
 import java.net.*;
 
-import org.apache.hadoop.http.HttpServer;
+import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.net.NetUtils;
 
 import junit.framework.TestCase;
@@ -44,7 +44,7 @@ public class TestLogLevel extends TestCase {
       log.error("log.error1");
       assertTrue(!Level.ERROR.equals(log.getEffectiveLevel()));
 
-      HttpServer server = new HttpServer.Builder().setName("..")
+      HttpServer2 server = new HttpServer2.Builder().setName("..")
           .addEndpoint(new URI("http://localhost:0")).setFindPort(true)
           .build();
       

+ 2 - 2
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestAuthenticationFilter.java

@@ -18,7 +18,7 @@ package org.apache.hadoop.security;
 
 
 import junit.framework.TestCase;
-import org.apache.hadoop.http.HttpServer;
+import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.http.FilterContainer;
@@ -49,7 +49,7 @@ public class TestAuthenticationFilter extends TestCase {
              AuthenticationFilterInitializer.SIGNATURE_SECRET_FILE, 
              secretFile.getAbsolutePath());
 
-    conf.set(HttpServer.BIND_ADDRESS, "barhost");
+    conf.set(HttpServer2.BIND_ADDRESS, "barhost");
     
     FilterContainer container = Mockito.mock(FilterContainer.class);
     Mockito.doAnswer(

+ 3 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestSecurityUtil.java

@@ -331,7 +331,9 @@ public class TestSecurityUtil {
 
   @Test
   public void testSocketAddrWithIP() {
-    verifyServiceAddr("127.0.0.1", "127.0.0.1");
+    String staticHost = "127.0.0.1";
+    NetUtils.addStaticResolution(staticHost, "localhost");
+    verifyServiceAddr(staticHost, "127.0.0.1");
   }
 
   @Test

+ 36 - 4
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestVersionUtil.java

@@ -28,10 +28,30 @@ public class TestVersionUtil {
     // Equal versions are equal.
     assertEquals(0, VersionUtil.compareVersions("2.0.0", "2.0.0"));
     assertEquals(0, VersionUtil.compareVersions("2.0.0a", "2.0.0a"));
-    assertEquals(0, VersionUtil.compareVersions("1", "1"));
     assertEquals(0, VersionUtil.compareVersions(
         "2.0.0-SNAPSHOT", "2.0.0-SNAPSHOT"));
-    
+
+    assertEquals(0, VersionUtil.compareVersions("1", "1"));
+    assertEquals(0, VersionUtil.compareVersions("1", "1.0"));
+    assertEquals(0, VersionUtil.compareVersions("1", "1.0.0"));
+
+    assertEquals(0, VersionUtil.compareVersions("1.0", "1"));
+    assertEquals(0, VersionUtil.compareVersions("1.0", "1.0"));
+    assertEquals(0, VersionUtil.compareVersions("1.0", "1.0.0"));
+
+    assertEquals(0, VersionUtil.compareVersions("1.0.0", "1"));
+    assertEquals(0, VersionUtil.compareVersions("1.0.0", "1.0"));
+    assertEquals(0, VersionUtil.compareVersions("1.0.0", "1.0.0"));
+
+    assertEquals(0, VersionUtil.compareVersions("1.0.0-alpha-1", "1.0.0-a1"));
+    assertEquals(0, VersionUtil.compareVersions("1.0.0-alpha-2", "1.0.0-a2"));
+    assertEquals(0, VersionUtil.compareVersions("1.0.0-alpha1", "1.0.0-alpha-1"));
+
+    assertEquals(0, VersionUtil.compareVersions("1a0", "1.0.0-alpha-0"));
+    assertEquals(0, VersionUtil.compareVersions("1a0", "1-a0"));
+    assertEquals(0, VersionUtil.compareVersions("1.a0", "1-a0"));
+    assertEquals(0, VersionUtil.compareVersions("1.a0", "1.0.0-alpha-0"));
+
     // Assert that lower versions are lower, and higher versions are higher.
     assertExpectedValues("1", "2.0.0");
     assertExpectedValues("1.0.0", "2");
@@ -51,15 +71,27 @@ public class TestVersionUtil {
     assertExpectedValues("1.0.2a", "1.0.2ab");
     assertExpectedValues("1.0.0a1", "1.0.0a2");
     assertExpectedValues("1.0.0a2", "1.0.0a10");
+    // The 'a' in "1.a" is not followed by digit, thus not treated as "alpha",
+    // and treated larger than "1.0", per maven's ComparableVersion class
+    // implementation.
     assertExpectedValues("1.0", "1.a");
-    assertExpectedValues("1.0", "1.a0");
+    //The 'a' in "1.a0" is followed by digit, thus treated as "alpha-<digit>"
+    assertExpectedValues("1.a0", "1.0");
+    assertExpectedValues("1a0", "1.0");    
+    assertExpectedValues("1.0.1-alpha-1", "1.0.1-alpha-2");    
+    assertExpectedValues("1.0.1-beta-1", "1.0.1-beta-2");
     
     // Snapshot builds precede their eventual releases.
     assertExpectedValues("1.0-SNAPSHOT", "1.0");
-    assertExpectedValues("1.0", "1.0.0-SNAPSHOT");
+    assertExpectedValues("1.0.0-SNAPSHOT", "1.0");
     assertExpectedValues("1.0.0-SNAPSHOT", "1.0.0");
     assertExpectedValues("1.0.0", "1.0.1-SNAPSHOT");
     assertExpectedValues("1.0.1-SNAPSHOT", "1.0.1");
+    assertExpectedValues("1.0.1-SNAPSHOT", "1.0.2");
+    
+    assertExpectedValues("1.0.1-alpha-1", "1.0.1-SNAPSHOT");
+    assertExpectedValues("1.0.1-beta-1", "1.0.1-SNAPSHOT");
+    assertExpectedValues("1.0.1-beta-2", "1.0.1-SNAPSHOT");
   }
   
   private static void assertExpectedValues(String lower, String higher) {

+ 236 - 214
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -290,6 +290,27 @@ Release 2.4.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES
 
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+    HDFS-5781. Use an array to record the mapping between FSEditLogOpCode and 
+    the corresponding byte value. (jing9)
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
+    HDFS-5492. Port HDFS-2069 (Incorrect default trash interval in the
+    docs) to trunk. (Akira Ajisaka via Arpit Agarwal)
+
+    HDFS-5843. DFSClient.getFileChecksum() throws IOException if checksum is 
+    disabled. (Laurent Goujon via jing9)
+
+Release 2.3.0 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
   NEW FEATURES
 
     HDFS-5122. Support failover and retry in WebHdfsFileSystem for NN HA.
@@ -329,6 +350,43 @@ Release 2.4.0 - UNRELEASED
 
   IMPROVEMENTS
 
+    HDFS-5360. Improvement of usage message of renameSnapshot and
+    deleteSnapshot. (Shinichi Yamashita via wang)
+
+    HDFS-5331. make SnapshotDiff.java to a o.a.h.util.Tool interface implementation. 
+    (Vinayakumar B via umamahesh)
+
+    HDFS-4657.  Limit the number of blocks logged by the NN after a block
+    report to a configurable value.  (Aaron T. Myers via Colin Patrick
+    McCabe)
+
+    HDFS-5344. Make LsSnapshottableDir as Tool interface implementation. (Sathish via umamahesh)
+
+    HDFS-5544. Adding Test case For Checking dfs.checksum type as NULL value. (Sathish via umamahesh)
+
+    HDFS-5568. Support includeSnapshots option with Fsck command. (Vinayakumar B via umamahesh)
+
+    HDFS-4983. Numeric usernames do not work with WebHDFS FS. (Yongjun Zhang via
+    jing9)
+
+    HDFS-5592. statechangeLog of completeFile should be logged only in case of success. 
+    (Vinayakumar via umamahesh)
+
+    HDFS-5662. Can't decommission a DataNode due to file's replication factor
+    larger than the rest of the cluster size. (brandonli)
+
+    HDFS-5068. Convert NNThroughputBenchmark to a Tool to allow generic options.
+    (shv)
+
+    HDFS-5675. Add Mkdirs operation to NNThroughputBenchmark.
+    (Plamen Jeliazkov via shv)
+
+    HDFS-5677. Need error checking for HA cluster configuration.
+    (Vincent Sheffer via cos)
+
+    HDFS-5825. Use FileUtils.copyFile() to implement DFSTestUtils.copyFile().
+    (Haohui Mai via Arpit Agarwal)
+
     HDFS-5267. Remove volatile from LightWeightHashSet. (Junping Du via llu)
 
     HDFS-4278. Log an ERROR when DFS_BLOCK_ACCESS_TOKEN_ENABLE config is
@@ -504,8 +562,7 @@ Release 2.4.0 - UNRELEASED
     HDFS-5788. listLocatedStatus response can be very large. (Nathan Roberts
     via kihwal)
 
-    HDFS-5781. Use an array to record the mapping between FSEditLogOpCode and 
-    the corresponding byte value. (jing9)
+    HDFS-5841. Update HDFS caching documentation with new changes. (wang)
 
   OPTIMIZATIONS
 
@@ -521,6 +578,177 @@ Release 2.4.0 - UNRELEASED
 
   BUG FIXES
 
+    HDFS-5307. Support both HTTP and HTTPS in jsp pages (Haohui Mai via
+    brandonli)
+
+    HDFS-5291. Standby namenode after transition to active goes into safemode.
+    (jing9)
+
+    HDFS-5317. Go back to DFS Home link does not work on datanode webUI
+    (Haohui Mai via brandonli)
+
+    HDFS-5316. Namenode ignores the default https port (Haohui Mai via
+    brandonli)
+
+    HDFS-5281. COMMIT request should not block. (brandonli)
+
+    HDFS-5337. should do hsync for a commit request even there is no pending
+    writes (brandonli)
+
+    HDFS-5335. Hive query failed with possible race in dfs output stream.
+    (Haohui Mai via suresh)
+
+    HDFS-5322. HDFS delegation token not found in cache errors seen on secure HA 
+    clusters. (jing9)
+
+    HDFS-5329. Update FSNamesystem#getListing() to handle inode path in startAfter
+    token. (brandonli)
+
+    HDFS-5330. fix readdir and readdirplus for large directories (brandonli)
+
+    HDFS-5370. Typo in Error Message: different between range in condition
+    and range in error message. (Kousuke Saruta via suresh)
+
+    HDFS-5365. Fix libhdfs compile error on FreeBSD9. (Radim Kolar via cnauroth)
+    
+    HDFS-5347. Add HDFS NFS user guide. (brandonli)
+
+    HDFS-5403. WebHdfs client cannot communicate with older WebHdfs servers
+    post HDFS-5306. (atm)
+
+    HDFS-5171. NFS should create input stream for a file and try to share it
+    with multiple read requests. (Haohui Mai via brandonli)
+
+    HDFS-5413. hdfs.cmd does not support passthrough to any arbitrary class.
+    (cnauroth)
+
+    HDFS-5433. When reloading fsimage during checkpointing, we should clear
+    existing snapshottable directories. (Aaron T. Myers via wang)
+
+    HDFS-5432. TestDatanodeJsp fails on Windows due to assumption that loopback
+    address resolves to host name localhost. (cnauroth)
+
+    HDFS-5065. TestSymlinkHdfsDisable fails on Windows. (ivanmi)
+
+    HDFS-4633 TestDFSClientExcludedNodes fails sporadically if excluded nodes
+    cache expires too quickly  (Chris Nauroth via Sanjay)
+
+    HDFS-5037. Active NN should trigger its own edit log rolls (wang)
+
+    HDFS-5035.  getFileLinkStatus and rename do not correctly check permissions
+    of symlinks.  (Andrew Wang via Colin Patrick McCabe)
+
+    HDFS-5456. NameNode startup progress creates new steps if caller attempts to
+    create a counter for a step that doesn't already exist.  (cnauroth)
+
+    HDFS-5458. Datanode failed volume threshold ignored if exception is thrown
+    in getDataDirsFromURIs. (Mike Mellenthin via wang)
+
+    HDFS-5252. Stable write is not handled correctly in someplace. (brandonli)
+
+    HDFS-5364. Add OpenFileCtx cache. (brandonli)
+
+    HDFS-5469. Add configuration property for the sub-directroy export path
+    (brandonli)
+
+    HDFS-5519. COMMIT handler should update the commit status after sync
+    (brandonli)
+
+    HDFS-5372. In FSNamesystem, hasReadLock() returns false if the current thread 
+    holds the write lock (VinayaKumar B via umamahesh)
+
+    HDFS-4516. Client crash after block allocation and NN switch before lease recovery for 
+    the same file can cause readers to fail forever (VinaayKumar B via umamahesh)
+
+    HDFS-5014. Process register commands with out holding BPOfferService lock. 
+    (Vinaykumar B via umamahesh)
+
+    HDFS-5288. Close idle connections in portmap (Haohui Mai via brandonli)
+
+    HDFS-5407. Fix typos in DFSClientCache (Haohui Mai via brandonli)
+
+    HDFS-5548. Use ConcurrentHashMap in portmap (Haohui Mai via brandonli)
+
+    HDFS-5577. NFS user guide update (brandonli)
+
+    HDFS-5563. NFS gateway should commit the buffered data when read request comes
+    after write to the same file (brandonli)
+
+    HDFS-4997. libhdfs doesn't return correct error codes in most cases (cmccabe)
+
+    HDFS-5587. add debug information when NFS fails to start with duplicate user
+    or group names (brandonli)
+
+    HDFS-5590. Block ID and generation stamp may be reused when persistBlocks is 
+    set to false. (jing9)
+
+    HDFS-5353. Short circuit reads fail when dfs.encrypt.data.transfer is 
+    enabled. (Colin Patrick McCabe via jing9)
+
+    HDFS-5283. Under construction blocks only inside snapshots should not be
+    counted in safemode threshhold.  (Vinay via szetszwo)
+
+    HDFS-5257. addBlock() retry should return LocatedBlock with locations else client 
+    will get AIOBE. (Vinay via jing9)
+
+    HDFS-5427. Not able to read deleted files from snapshot directly under 
+    snapshottable dir after checkpoint and NN restart. (Vinay via jing9)
+
+    HDFS-5443. Delete 0-sized block when deleting an under-construction file that 
+    is included in snapshot. (jing9)
+
+    HDFS-5476. Snapshot: clean the blocks/files/directories under a renamed 
+    file/directory while deletion. (jing9)
+
+    HDFS-5425. Renaming underconstruction file with snapshots can make NN failure on 
+    restart. (jing9 and Vinay)
+
+    HDFS-5474. Deletesnapshot can make Namenode in safemode on NN restarts. 
+    (Sathish via jing9)
+
+    HDFS-5504. In HA mode, OP_DELETE_SNAPSHOT is not decrementing the safemode threshold, 
+    leads to NN safemode. (Vinay via jing9)
+
+    HDFS-5428. Under construction files deletion after snapshot+checkpoint+nn restart 
+    leads nn safemode. (jing9)
+
+    HDFS-5074. Allow starting up from an fsimage checkpoint in the middle of a
+    segment. (Todd Lipcon via atm)
+
+    HDFS-4201. NPE in BPServiceActor#sendHeartBeat. (jxiang via cmccabe)
+
+    HDFS-5666. Fix inconsistent synchronization in BPOfferService (jxiang via cmccabe)
+    
+    HDFS-5657. race condition causes writeback state error in NFS gateway (brandonli)
+
+    HDFS-5661. Browsing FileSystem via web ui, should use datanode's fqdn instead of ip 
+    address. (Benoy Antony via jing9)
+
+    HDFS-5582. hdfs getconf -excludeFile or -includeFile always failed (sathish
+    via cmccabe)
+
+    HDFS-5671. Fix socket leak in DFSInputStream#getBlockReader. (JamesLi via umamahesh) 
+
+    HDFS-5649. Unregister NFS and Mount service when NFS gateway is shutting down.
+    (brandonli)
+
+    HDFS-5789. Some of snapshot APIs missing checkOperation double check in fsn. (umamahesh)
+
+    HDFS-5343. When cat command is issued on snapshot files getting unexpected result.
+    (Sathish via umamahesh)
+
+    HDFS-5297. Fix dead links in HDFS site documents. (Akira Ajisaka via
+    Arpit Agarwal)
+
+    HDFS-5830. WebHdfsFileSystem.getFileBlockLocations throws
+    IllegalArgumentException when accessing another cluster. (Yongjun Zhang via
+    Colin Patrick McCabe)
+
+    HDFS-5833. Fix SecondaryNameNode javadoc. (Bangtao Zhou via Arpit Agarwal)
+
+    HDFS-5844. Fix broken link in WebHDFS.apt.vm. (Akira Ajisaka via
+    Arpit Agarwal)
+
     HDFS-5034.  Remove debug prints from GetFileLinkInfo (Andrew Wang via Colin
     Patrick McCabe)
 
@@ -602,6 +830,12 @@ Release 2.4.0 - UNRELEASED
     HDFS-5728. Block recovery will fail if the metafile does not have crc 
     for all chunks of the block (Vinay via kihwal)
 
+    HDFS-5845. SecondaryNameNode dies when checkpointing with cache pools.
+    (wang)
+
+    HDFS-5842. Cannot create hftp filesystem when using a proxy user ugi and a doAs 
+    on a secure cluster. (jing9)
+
   BREAKDOWN OF HDFS-2832 SUBTASKS AND RELATED JIRAS
 
     HDFS-4985. Add storage type to the protocol and expose it in block report
@@ -939,218 +1173,6 @@ Release 2.4.0 - UNRELEASED
     HDFS-5724. modifyCacheDirective logging audit log command wrongly as
     addCacheDirective (Uma Maheswara Rao G via Colin Patrick McCabe)
 
-
-Release 2.3.0 - UNRELEASED
-
-  INCOMPATIBLE CHANGES
-
-  NEW FEATURES
-
-  IMPROVEMENTS
-
-    HDFS-5360. Improvement of usage message of renameSnapshot and
-    deleteSnapshot. (Shinichi Yamashita via wang)
-
-    HDFS-5331. make SnapshotDiff.java to a o.a.h.util.Tool interface implementation. 
-    (Vinayakumar B via umamahesh)
-
-    HDFS-4657.  Limit the number of blocks logged by the NN after a block
-    report to a configurable value.  (Aaron T. Myers via Colin Patrick
-    McCabe)
-
-    HDFS-5344. Make LsSnapshottableDir as Tool interface implementation. (Sathish via umamahesh)
-
-    HDFS-5544. Adding Test case For Checking dfs.checksum type as NULL value. (Sathish via umamahesh)
-
-    HDFS-5568. Support includeSnapshots option with Fsck command. (Vinayakumar B via umamahesh)
-
-    HDFS-4983. Numeric usernames do not work with WebHDFS FS. (Yongjun Zhang via
-    jing9)
-
-    HDFS-5592. statechangeLog of completeFile should be logged only in case of success. 
-    (Vinayakumar via umamahesh)
-
-    HDFS-5662. Can't decommission a DataNode due to file's replication factor
-    larger than the rest of the cluster size. (brandonli)
-
-    HDFS-5068. Convert NNThroughputBenchmark to a Tool to allow generic options.
-    (shv)
-
-    HDFS-5675. Add Mkdirs operation to NNThroughputBenchmark.
-    (Plamen Jeliazkov via shv)
-
-    HDFS-5677. Need error checking for HA cluster configuration.
-    (Vincent Sheffer via cos)
-
-    HADOOP-10086. User document for authentication in secure cluster.
-    (Masatake Iwasaki via Arpit Agarwal)
-
-    HDFS-5825. Use FileUtils.copyFile() to implement DFSTestUtils.copyFile().
-    (Haohui Mai via Arpit Agarwal)
-
-  OPTIMIZATIONS
-
-  BUG FIXES
-
-    HDFS-5307. Support both HTTP and HTTPS in jsp pages (Haohui Mai via
-    brandonli)
-
-    HDFS-5291. Standby namenode after transition to active goes into safemode.
-    (jing9)
-
-    HDFS-5317. Go back to DFS Home link does not work on datanode webUI
-    (Haohui Mai via brandonli)
-
-    HDFS-5316. Namenode ignores the default https port (Haohui Mai via
-    brandonli)
-
-    HDFS-5281. COMMIT request should not block. (brandonli)
-
-    HDFS-5337. should do hsync for a commit request even there is no pending
-    writes (brandonli)
-
-    HDFS-5335. Hive query failed with possible race in dfs output stream.
-    (Haohui Mai via suresh)
-
-    HDFS-5322. HDFS delegation token not found in cache errors seen on secure HA 
-    clusters. (jing9)
-
-    HDFS-5329. Update FSNamesystem#getListing() to handle inode path in startAfter
-    token. (brandonli)
-
-    HDFS-5330. fix readdir and readdirplus for large directories (brandonli)
-
-    HDFS-5370. Typo in Error Message: different between range in condition
-    and range in error message. (Kousuke Saruta via suresh)
-
-    HDFS-5365. Fix libhdfs compile error on FreeBSD9. (Radim Kolar via cnauroth)
-    
-    HDFS-5347. Add HDFS NFS user guide. (brandonli)
-
-    HDFS-5403. WebHdfs client cannot communicate with older WebHdfs servers
-    post HDFS-5306. (atm)
-
-    HDFS-5171. NFS should create input stream for a file and try to share it
-    with multiple read requests. (Haohui Mai via brandonli)
-
-    HDFS-5413. hdfs.cmd does not support passthrough to any arbitrary class.
-    (cnauroth)
-
-    HDFS-5433. When reloading fsimage during checkpointing, we should clear
-    existing snapshottable directories. (Aaron T. Myers via wang)
-
-    HDFS-5432. TestDatanodeJsp fails on Windows due to assumption that loopback
-    address resolves to host name localhost. (cnauroth)
-
-    HDFS-5065. TestSymlinkHdfsDisable fails on Windows. (ivanmi)
-
-    HDFS-4633 TestDFSClientExcludedNodes fails sporadically if excluded nodes
-    cache expires too quickly  (Chris Nauroth via Sanjay)
-
-    HDFS-5037. Active NN should trigger its own edit log rolls (wang)
-
-    HDFS-5035.  getFileLinkStatus and rename do not correctly check permissions
-    of symlinks.  (Andrew Wang via Colin Patrick McCabe)
-
-    HDFS-5456. NameNode startup progress creates new steps if caller attempts to
-    create a counter for a step that doesn't already exist.  (cnauroth)
-
-    HDFS-5458. Datanode failed volume threshold ignored if exception is thrown
-    in getDataDirsFromURIs. (Mike Mellenthin via wang)
-
-    HDFS-5252. Stable write is not handled correctly in someplace. (brandonli)
-
-    HDFS-5364. Add OpenFileCtx cache. (brandonli)
-
-    HDFS-5469. Add configuration property for the sub-directroy export path
-    (brandonli)
-
-    HDFS-5519. COMMIT handler should update the commit status after sync
-    (brandonli)
-
-    HDFS-5372. In FSNamesystem, hasReadLock() returns false if the current thread 
-    holds the write lock (VinayaKumar B via umamahesh)
-
-    HDFS-4516. Client crash after block allocation and NN switch before lease recovery for 
-    the same file can cause readers to fail forever (VinaayKumar B via umamahesh)
-
-    HDFS-5014. Process register commands with out holding BPOfferService lock. 
-    (Vinaykumar B via umamahesh)
-
-    HDFS-5288. Close idle connections in portmap (Haohui Mai via brandonli)
-
-    HDFS-5407. Fix typos in DFSClientCache (Haohui Mai via brandonli)
-
-    HDFS-5548. Use ConcurrentHashMap in portmap (Haohui Mai via brandonli)
-
-    HDFS-5577. NFS user guide update (brandonli)
-
-    HDFS-5563. NFS gateway should commit the buffered data when read request comes
-    after write to the same file (brandonli)
-
-    HDFS-4997. libhdfs doesn't return correct error codes in most cases (cmccabe)
-
-    HDFS-5587. add debug information when NFS fails to start with duplicate user
-    or group names (brandonli)
-
-    HDFS-5590. Block ID and generation stamp may be reused when persistBlocks is 
-    set to false. (jing9)
-
-    HDFS-5353. Short circuit reads fail when dfs.encrypt.data.transfer is 
-    enabled. (Colin Patrick McCabe via jing9)
-
-    HDFS-5283. Under construction blocks only inside snapshots should not be
-    counted in safemode threshhold.  (Vinay via szetszwo)
-
-    HDFS-5257. addBlock() retry should return LocatedBlock with locations else client 
-    will get AIOBE. (Vinay via jing9)
-
-    HDFS-5427. Not able to read deleted files from snapshot directly under 
-    snapshottable dir after checkpoint and NN restart. (Vinay via jing9)
-
-    HDFS-5443. Delete 0-sized block when deleting an under-construction file that 
-    is included in snapshot. (jing9)
-
-    HDFS-5476. Snapshot: clean the blocks/files/directories under a renamed 
-    file/directory while deletion. (jing9)
-
-    HDFS-5425. Renaming underconstruction file with snapshots can make NN failure on 
-    restart. (jing9 and Vinay)
-
-    HDFS-5474. Deletesnapshot can make Namenode in safemode on NN restarts. 
-    (Sathish via jing9)
-
-    HDFS-5504. In HA mode, OP_DELETE_SNAPSHOT is not decrementing the safemode threshold, 
-    leads to NN safemode. (Vinay via jing9)
-
-    HDFS-5428. Under construction files deletion after snapshot+checkpoint+nn restart 
-    leads nn safemode. (jing9)
-
-    HDFS-5074. Allow starting up from an fsimage checkpoint in the middle of a
-    segment. (Todd Lipcon via atm)
-
-    HDFS-4201. NPE in BPServiceActor#sendHeartBeat. (jxiang via cmccabe)
-
-    HDFS-5666. Fix inconsistent synchronization in BPOfferService (jxiang via cmccabe)
-    
-    HDFS-5657. race condition causes writeback state error in NFS gateway (brandonli)
-
-    HDFS-5661. Browsing FileSystem via web ui, should use datanode's fqdn instead of ip 
-    address. (Benoy Antony via jing9)
-
-    HDFS-5582. hdfs getconf -excludeFile or -includeFile always failed (sathish
-    via cmccabe)
-
-    HDFS-5671. Fix socket leak in DFSInputStream#getBlockReader. (JamesLi via umamahesh) 
-
-    HDFS-5649. Unregister NFS and Mount service when NFS gateway is shutting down.
-    (brandonli)
-
-    HDFS-5789. Some of snapshot APIs missing checkOperation double check in fsn. (umamahesh)
-
-    HDFS-5343. When cat command is issued on snapshot files getting unexpected result.
-    (Sathish via umamahesh)
-
 Release 2.2.0 - 2013-10-13
 
   INCOMPATIBLE CHANGES

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java

@@ -84,7 +84,7 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.web.SWebHdfsFileSystem;
 import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
 import org.apache.hadoop.http.HttpConfig;
-import org.apache.hadoop.http.HttpServer;
+import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.ipc.ProtobufRpcEngine;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.net.NetUtils;
@@ -1539,7 +1539,7 @@ public class DFSUtil {
     return policy;
   }
 
-  public static HttpServer.Builder loadSslConfToHttpServerBuilder(HttpServer.Builder builder,
+  public static HttpServer2.Builder loadSslConfToHttpServerBuilder(HttpServer2.Builder builder,
       Configuration sslConf) {
     return builder
         .needsClientAuth(
@@ -1644,13 +1644,13 @@ public class DFSUtil {
    * namenode can use to initialize their HTTP / HTTPS server.
    *
    */
-  public static HttpServer.Builder httpServerTemplateForNNAndJN(
+  public static HttpServer2.Builder httpServerTemplateForNNAndJN(
       Configuration conf, final InetSocketAddress httpAddr,
       final InetSocketAddress httpsAddr, String name, String spnegoUserNameKey,
       String spnegoKeytabFileKey) throws IOException {
     HttpConfig.Policy policy = getHttpPolicy(conf);
 
-    HttpServer.Builder builder = new HttpServer.Builder().setName(name)
+    HttpServer2.Builder builder = new HttpServer2.Builder().setName(name)
         .setConf(conf).setACL(new AccessControlList(conf.get(DFS_ADMIN, " ")))
         .setSecurityEnabled(UserGroupInformation.isSecurityEnabled())
         .setUsernameConfKey(spnegoUserNameKey)

+ 2 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java

@@ -98,9 +98,8 @@ public class LocatedBlock {
     }
     this.storageIDs = storageIDs;
     this.storageTypes = storageTypes;
-    Preconditions.checkArgument(cachedLocs != null,
-        "cachedLocs should not be null, use a different constructor");
-    if (cachedLocs.length == 0) {
+
+    if (cachedLocs == null || cachedLocs.length == 0) {
       this.cachedLocs = EMPTY_LOCS;
     } else {
       this.cachedLocs = cachedLocs;

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeHttpServer.java

@@ -28,7 +28,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.server.common.JspHelper;
-import org.apache.hadoop.http.HttpServer;
+import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.net.NetUtils;
 
 /**
@@ -38,7 +38,7 @@ import org.apache.hadoop.net.NetUtils;
 public class JournalNodeHttpServer {
   public static final String JN_ATTRIBUTE_KEY = "localjournal";
 
-  private HttpServer httpServer;
+  private HttpServer2 httpServer;
   private JournalNode localJournalNode;
 
   private final Configuration conf;
@@ -56,7 +56,7 @@ public class JournalNodeHttpServer {
         DFSConfigKeys.DFS_JOURNALNODE_HTTPS_ADDRESS_DEFAULT);
     InetSocketAddress httpsAddr = NetUtils.createSocketAddr(httpsAddrString);
 
-    HttpServer.Builder builder = DFSUtil.httpServerTemplateForNNAndJN(conf,
+    HttpServer2.Builder builder = DFSUtil.httpServerTemplateForNNAndJN(conf,
         httpAddr, httpsAddr, "journal",
         DFSConfigKeys.DFS_JOURNALNODE_INTERNAL_SPNEGO_USER_NAME_KEY,
         DFSConfigKeys.DFS_JOURNALNODE_KEYTAB_FILE_KEY);

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java

@@ -120,7 +120,7 @@ import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
 import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
 import org.apache.hadoop.hdfs.web.resources.Param;
 import org.apache.hadoop.http.HttpConfig;
-import org.apache.hadoop.http.HttpServer;
+import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.ReadaheadPool;
 import org.apache.hadoop.io.nativeio.NativeIO;
@@ -235,7 +235,7 @@ public class DataNode extends Configured
   private volatile boolean heartbeatsDisabledForTests = false;
   private DataStorage storage = null;
 
-  private HttpServer infoServer = null;
+  private HttpServer2 infoServer = null;
   private int infoPort;
   private int infoSecurePort;
 
@@ -358,7 +358,7 @@ public class DataNode extends Configured
    * Http Policy is decided.
    */
   private void startInfoServer(Configuration conf) throws IOException {
-    HttpServer.Builder builder = new HttpServer.Builder().setName("datanode")
+    HttpServer2.Builder builder = new HttpServer2.Builder().setName("datanode")
         .setConf(conf).setACL(new AccessControlList(conf.get(DFS_ADMIN, " ")));
 
     HttpConfig.Policy policy = DFSUtil.getHttpPolicy(conf);

+ 3 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java

@@ -655,8 +655,9 @@ class DataXceiver extends Receiver implements Runnable {
       final BlockMetadataHeader header = BlockMetadataHeader.readHeader(checksumIn);
       final DataChecksum checksum = header.getChecksum(); 
       final int bytesPerCRC = checksum.getBytesPerChecksum();
-      final long crcPerBlock = (metadataIn.getLength()
-          - BlockMetadataHeader.getHeaderSize())/checksum.getChecksumSize();
+      final long crcPerBlock = checksum.getChecksumSize() > 0 
+              ? (metadataIn.getLength() - BlockMetadataHeader.getHeaderSize())/checksum.getChecksumSize()
+              : 0;
       
       //compute block checksum
       final MD5Hash md5 = MD5Hash.digest(checksumIn);

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java

@@ -27,7 +27,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.http.HttpConfig;
-import org.apache.hadoop.http.HttpServer;
+import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.mortbay.jetty.Connector;
 
@@ -119,7 +119,7 @@ public class SecureDataNodeStarter implements Daemon {
     // certificates if they are communicating through SSL.
     Connector listener = null;
     if (policy.isHttpEnabled()) {
-      listener = HttpServer.createDefaultChannelConnector();
+      listener = HttpServer2.createDefaultChannelConnector();
       InetSocketAddress infoSocAddr = DataNode.getInfoAddr(conf);
       listener.setHost(infoSocAddr.getHostName());
       listener.setPort(infoSocAddr.getPort());

+ 11 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java

@@ -215,6 +215,17 @@ public final class CacheManager {
 
   }
 
+  /**
+   * Resets all tracked directives and pools. Called during 2NN checkpointing to
+   * reset FSNamesystem state. See {FSNamesystem{@link #clear()}.
+   */
+  void clear() {
+    directivesById.clear();
+    directivesByPath.clear();
+    cachePools.clear();
+    nextDirectiveId = 1;
+  }
+
   public void startMonitorThread() {
     crmLock.lock();
     try {

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

@@ -545,6 +545,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
     leaseManager.removeAllLeases();
     inodeId.setCurrentValue(INodeId.LAST_RESERVED_ID);
     snapshotManager.clearSnapshottableDirs();
+    cacheManager.clear();
   }
 
   @VisibleForTesting

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java

@@ -47,7 +47,7 @@ import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
 import org.apache.hadoop.hdfs.util.DataTransferThrottler;
 import org.apache.hadoop.hdfs.util.MD5FileUtils;
-import org.apache.hadoop.http.HttpServer;
+import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.MD5Hash;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -287,7 +287,7 @@ public class GetImageServlet extends HttpServlet {
       }
     }
     
-    if (HttpServer.userHasAdministratorAccess(context, remoteUser)) {
+    if (HttpServer2.userHasAdministratorAccess(context, remoteUser)) {
       LOG.info("GetImageServlet allowing administrator: " + remoteUser);
       return true;
     }

+ 9 - 9
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java

@@ -37,7 +37,7 @@ import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
 import org.apache.hadoop.hdfs.web.resources.Param;
 import org.apache.hadoop.hdfs.web.resources.UserParam;
 import org.apache.hadoop.http.HttpConfig;
-import org.apache.hadoop.http.HttpServer;
+import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -47,7 +47,7 @@ import org.apache.hadoop.security.UserGroupInformation;
  */
 @InterfaceAudience.Private
 public class NameNodeHttpServer {
-  private HttpServer httpServer;
+  private HttpServer2 httpServer;
   private final Configuration conf;
   private final NameNode nn;
   
@@ -68,7 +68,7 @@ public class NameNodeHttpServer {
   }
 
   private void initWebHdfs(Configuration conf) throws IOException {
-    if (WebHdfsFileSystem.isEnabled(conf, HttpServer.LOG)) {
+    if (WebHdfsFileSystem.isEnabled(conf, HttpServer2.LOG)) {
       // set user pattern based on configuration file
       UserParam.setUserPattern(conf.get(DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY, DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT));
 
@@ -77,9 +77,9 @@ public class NameNodeHttpServer {
       final String classname = AuthFilter.class.getName();
       final String pathSpec = WebHdfsFileSystem.PATH_PREFIX + "/*";
       Map<String, String> params = getAuthFilterParams(conf);
-      HttpServer.defineFilter(httpServer.getWebAppContext(), name, classname, params,
+      HttpServer2.defineFilter(httpServer.getWebAppContext(), name, classname, params,
           new String[]{pathSpec});
-      HttpServer.LOG.info("Added filter '" + name + "' (class=" + classname + ")");
+      HttpServer2.LOG.info("Added filter '" + name + "' (class=" + classname + ")");
 
       // add webhdfs packages
       httpServer.addJerseyResourcePackage(
@@ -103,7 +103,7 @@ public class NameNodeHttpServer {
         DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT);
     InetSocketAddress httpsAddr = NetUtils.createSocketAddr(httpsAddrString);
 
-    HttpServer.Builder builder = DFSUtil.httpServerTemplateForNNAndJN(conf,
+    HttpServer2.Builder builder = DFSUtil.httpServerTemplateForNNAndJN(conf,
         httpAddr, httpsAddr, "hdfs",
         DFSConfigKeys.DFS_NAMENODE_INTERNAL_SPNEGO_USER_NAME_KEY,
         DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY);
@@ -152,7 +152,7 @@ public class NameNodeHttpServer {
               SecurityUtil.getServerPrincipal(principalInConf,
                                               bindAddress.getHostName()));
     } else if (UserGroupInformation.isSecurityEnabled()) {
-      HttpServer.LOG.error(
+      HttpServer2.LOG.error(
           "WebHDFS and security are enabled, but configuration property '" +
           DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY +
           "' is not set.");
@@ -164,7 +164,7 @@ public class NameNodeHttpServer {
           DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY,
           httpKeytab);
     } else if (UserGroupInformation.isSecurityEnabled()) {
-      HttpServer.LOG.error(
+      HttpServer2.LOG.error(
           "WebHDFS and security are enabled, but configuration property '" +
           DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY +
           "' is not set.");
@@ -214,7 +214,7 @@ public class NameNodeHttpServer {
     httpServer.setAttribute(STARTUP_PROGRESS_ATTRIBUTE_KEY, prog);
   }
 
-  private static void setupServlets(HttpServer httpServer, Configuration conf) {
+  private static void setupServlets(HttpServer2 httpServer, Configuration conf) {
     httpServer.addInternalServlet("startupProgress",
         StartupProgressServlet.PATH_SPEC, StartupProgressServlet.class);
     httpServer.addInternalServlet("getDelegationToken",

+ 10 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java

@@ -65,7 +65,7 @@ import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
 import org.apache.hadoop.http.HttpConfig;
-import org.apache.hadoop.http.HttpServer;
+import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.io.MD5Hash;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
@@ -90,7 +90,7 @@ import com.google.common.collect.ImmutableList;
  * The Secondary NameNode is a daemon that periodically wakes
  * up (determined by the schedule specified in the configuration),
  * triggers a periodic checkpoint and then goes back to sleep.
- * The Secondary NameNode uses the ClientProtocol to talk to the
+ * The Secondary NameNode uses the NamenodeProtocol to talk to the
  * primary NameNode.
  *
  **********************************************************/
@@ -113,7 +113,7 @@ public class SecondaryNameNode implements Runnable {
   private Configuration conf;
   private InetSocketAddress nameNodeAddr;
   private volatile boolean shouldRun;
-  private HttpServer infoServer;
+  private HttpServer2 infoServer;
   private URL imageListenURL;
 
   private Collection<URI> checkpointDirs;
@@ -257,7 +257,7 @@ public class SecondaryNameNode implements Runnable {
         DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTPS_ADDRESS_DEFAULT);
     InetSocketAddress httpsAddr = NetUtils.createSocketAddr(httpsAddrString);
 
-    HttpServer.Builder builder = DFSUtil.httpServerTemplateForNNAndJN(conf,
+    HttpServer2.Builder builder = DFSUtil.httpServerTemplateForNNAndJN(conf,
         httpAddr, httpsAddr, "secondary",
         DFSConfigKeys.DFS_SECONDARY_NAMENODE_INTERNAL_SPNEGO_USER_NAME_KEY,
         DFSConfigKeys.DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY);
@@ -1001,7 +1001,12 @@ public class SecondaryNameNode implements Runnable {
             sig.mostRecentCheckpointTxId + " even though it should have " +
             "just been downloaded");
       }
-      dstImage.reloadFromImageFile(file, dstNamesystem);
+      dstNamesystem.writeLock();
+      try {
+        dstImage.reloadFromImageFile(file, dstNamesystem);
+      } finally {
+        dstNamesystem.writeUnlock();
+      }
       dstNamesystem.dir.imageLoadComplete();
     }
     // error simulation code for junit test

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java

@@ -620,7 +620,7 @@ public class CacheAdmin extends Configured implements Tool {
           "directives being added to the pool. This can be specified in " +
           "seconds, minutes, hours, and days, e.g. 120s, 30m, 4h, 2d. " +
           "Valid units are [smhd]. By default, no maximum is set. " +
-          "This can also be manually specified by \"never\".");
+          "A value of \"never\" specifies that there is no limit.");
       return getShortUsage() + "\n" +
           "Add a new cache pool.\n\n" + 
           listing.toString();

+ 8 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java

@@ -185,8 +185,8 @@ public class DelegationTokenFetcher {
             } else {
               // otherwise we are fetching
               if (webUrl != null) {
-                Credentials creds = getDTfromRemote(connectionFactory, new URI(webUrl),
-                    renewer);
+                Credentials creds = getDTfromRemote(connectionFactory, new URI(
+                    webUrl), renewer, null);
                 creds.writeTokenStorageFile(tokenFile, conf);
                 for (Token<?> token : creds.getAllTokens()) {
                   if(LOG.isDebugEnabled()) {	
@@ -213,12 +213,17 @@ public class DelegationTokenFetcher {
   }
   
   static public Credentials getDTfromRemote(URLConnectionFactory factory,
-      URI nnUri, String renewer) throws IOException {
+      URI nnUri, String renewer, String proxyUser) throws IOException {
     StringBuilder buf = new StringBuilder(nnUri.toString())
         .append(GetDelegationTokenServlet.PATH_SPEC);
+    String separator = "?";
     if (renewer != null) {
       buf.append("?").append(GetDelegationTokenServlet.RENEWER).append("=")
           .append(renewer);
+      separator = "&";
+    }
+    if (proxyUser != null) {
+      buf.append(separator).append("doas=").append(proxyUser);
     }
 
     boolean isHttps = nnUri.getScheme().equals("https");

+ 49 - 26
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/HftpFileSystem.java

@@ -57,7 +57,6 @@ import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.authentication.client.AuthenticationException;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.util.Progressable;
@@ -234,17 +233,23 @@ public class HftpFileSystem extends FileSystem
   }
 
   @Override
-  public synchronized Token<?> getDelegationToken(final String renewer
-                                                  ) throws IOException {
+  public synchronized Token<?> getDelegationToken(final String renewer)
+      throws IOException {
     try {
-      //Renew TGT if needed
-      ugi.checkTGTAndReloginFromKeytab();
-      return ugi.doAs(new PrivilegedExceptionAction<Token<?>>() {
+      // Renew TGT if needed
+      UserGroupInformation connectUgi = ugi.getRealUser();
+      final String proxyUser = connectUgi == null ? null : ugi
+          .getShortUserName();
+      if (connectUgi == null) {
+        connectUgi = ugi;
+      }
+      return connectUgi.doAs(new PrivilegedExceptionAction<Token<?>>() {
         @Override
         public Token<?> run() throws IOException {
           Credentials c;
           try {
-            c = DelegationTokenFetcher.getDTfromRemote(connectionFactory, nnUri, renewer);
+            c = DelegationTokenFetcher.getDTfromRemote(connectionFactory,
+                nnUri, renewer, proxyUser);
           } catch (IOException e) {
             if (e.getCause() instanceof ConnectException) {
               LOG.warn("Couldn't connect to " + nnUri +
@@ -299,13 +304,13 @@ public class HftpFileSystem extends FileSystem
    * @return user_shortname,group1,group2...
    */
   private String getEncodedUgiParameter() {
-    StringBuilder ugiParamenter = new StringBuilder(
+    StringBuilder ugiParameter = new StringBuilder(
         ServletUtil.encodeQueryValue(ugi.getShortUserName()));
     for(String g: ugi.getGroupNames()) {
-      ugiParamenter.append(",");
-      ugiParamenter.append(ServletUtil.encodeQueryValue(g));
+      ugiParameter.append(",");
+      ugiParameter.append(ServletUtil.encodeQueryValue(g));
     }
-    return ugiParamenter.toString();
+    return ugiParameter.toString();
   }
 
   /**
@@ -675,30 +680,48 @@ public class HftpFileSystem extends FileSystem
 
   @SuppressWarnings("unchecked")
   @Override
-  public long renewDelegationToken(Token<?> token) throws IOException {
+  public long renewDelegationToken(final Token<?> token) throws IOException {
     // update the kerberos credentials, if they are coming from a keytab
-    UserGroupInformation.getLoginUser().checkTGTAndReloginFromKeytab();
-    InetSocketAddress serviceAddr = SecurityUtil.getTokenServiceAddr(token);
+    UserGroupInformation connectUgi = ugi.getRealUser();
+    if (connectUgi == null) {
+      connectUgi = ugi;
+    }
     try {
-      return DelegationTokenFetcher.renewDelegationToken(connectionFactory,
-          DFSUtil.createUri(getUnderlyingProtocol(), serviceAddr),
-          (Token<DelegationTokenIdentifier>) token);
-    } catch (AuthenticationException e) {
+      return connectUgi.doAs(new PrivilegedExceptionAction<Long>() {
+        @Override
+        public Long run() throws Exception {
+          InetSocketAddress serviceAddr = SecurityUtil
+              .getTokenServiceAddr(token);
+          return DelegationTokenFetcher.renewDelegationToken(connectionFactory,
+              DFSUtil.createUri(getUnderlyingProtocol(), serviceAddr),
+              (Token<DelegationTokenIdentifier>) token);
+        }
+      });
+    } catch (InterruptedException e) {
       throw new IOException(e);
     }
   }
 
   @SuppressWarnings("unchecked")
   @Override
-  public void cancelDelegationToken(Token<?> token) throws IOException {
-    // update the kerberos credentials, if they are coming from a keytab
-    UserGroupInformation.getLoginUser().checkTGTAndReloginFromKeytab();
-    InetSocketAddress serviceAddr = SecurityUtil.getTokenServiceAddr(token);
+  public void cancelDelegationToken(final Token<?> token) throws IOException {
+    UserGroupInformation connectUgi = ugi.getRealUser();
+    if (connectUgi == null) {
+      connectUgi = ugi;
+    }
     try {
-      DelegationTokenFetcher.cancelDelegationToken(connectionFactory, DFSUtil
-          .createUri(getUnderlyingProtocol(), serviceAddr),
-          (Token<DelegationTokenIdentifier>) token);
-    } catch (AuthenticationException e) {
+      connectUgi.doAs(new PrivilegedExceptionAction<Void>() {
+        @Override
+        public Void run() throws Exception {
+          InetSocketAddress serviceAddr = SecurityUtil
+              .getTokenServiceAddr(token);
+          DelegationTokenFetcher.cancelDelegationToken(connectionFactory,
+              DFSUtil.createUri(getUnderlyingProtocol(), serviceAddr),
+              (Token<DelegationTokenIdentifier>) token);
+          return null;
+        }
+      });
+    } catch (InterruptedException e) {
       throw new IOException(e);
     }
   }

+ 123 - 81
hadoop-hdfs-project/hadoop-hdfs/src/site/apt/CentralizedCacheManagement.apt.vm

@@ -22,110 +22,140 @@ Centralized Cache Management in HDFS
 
 %{toc|section=1|fromDepth=2|toDepth=4}
 
-* {Background}
-
-  Normally, HDFS relies on the operating system to cache data it reads from disk.
-  However, HDFS can also be configured to use centralized cache management. Under
-  centralized cache management, the HDFS NameNode itself decides which blocks
-  should be cached, and where they should be cached.
-
-  Centralized cache management has several advantages. First of all, it
-  prevents frequently used block files from being evicted from memory. This is
-  particularly important when the size of the working set exceeds the size of
-  main memory, which is true for many big data applications. Secondly, when
-  HDFS decides what should be cached, it can let clients know about this
-  information through the getFileBlockLocations API. Finally, when the DataNode
-  knows a block is locked into memory, it can provide access to that block via
-  mmap.
+* {Overview}
+
+  <Centralized cache management> in HDFS is an explicit caching mechanism that
+  allows users to specify <paths> to be cached by HDFS. The NameNode will
+  communicate with DataNodes that have the desired blocks on disk, and instruct
+  them to cache the blocks in off-heap caches. 
+
+  Centralized cache management in HDFS has many significant advantages.
+
+  [[1]] Explicit pinning prevents frequently used data from being evicted from
+  memory. This is particularly important when the size of the working set
+  exceeds the size of main memory, which is common for many HDFS workloads.
+
+  [[1]] Because DataNode caches are managed by the NameNode, applications can
+  query the set of cached block locations when making task placement decisions.
+  Co-locating a task with a cached block replica improves read performance.
+
+  [[1]] When block has been cached by a DataNode, clients can use a new ,
+  more-efficient, zero-copy read API. Since checksum verification of cached
+  data is done once by the DataNode, clients can incur essentially zero
+  overhead when using this new API.
+
+  [[1]] Centralized caching can improve overall cluster memory utilization.
+  When relying on the OS buffer cache at each DataNode, repeated reads of
+  a block will result in all <n> replicas of the block being pulled into
+  buffer cache. With centralized cache management, a user can explicitly pin
+  only <m> of the <n> replicas, saving <n-m> memory.
 
 * {Use Cases}
 
-  Centralized cache management is most useful for files which are accessed very
-  often. For example, a "fact table" in Hive which is often used in joins is a
-  good candidate for caching. On the other hand, when running a classic
-  "word count" MapReduce job which counts the number of words in each
-  document, there may not be any good candidates for caching, since all the
-  files may be accessed exactly once.
+  Centralized cache management is useful for files that accessed repeatedly.
+  For example, a small <fact table> in Hive which is often used for joins is a
+  good candidate for caching. On the other hand, caching the input of a <
+  one year reporting query> is probably less useful, since the
+  historical data might only be read once.
+
+  Centralized cache management is also useful for mixed workloads with
+  performance SLAs. Caching the working set of a high-priority workload
+  insures that it does not contend for disk I/O with a low-priority workload.
 
 * {Architecture}
 
 [images/caching.png] Caching Architecture
 
-  With centralized cache management, the NameNode coordinates all caching
-  across the cluster. It receives cache information from each DataNode via the
-  cache report, a periodic message that describes all the blocks IDs cached on
-  a given DataNode. The NameNode will reply to DataNode heartbeat messages
-  with commands telling it which blocks to cache and which to uncache.
-
-  The NameNode stores a set of path cache directives, which tell it which files
-  to cache. The NameNode also stores a set of cache pools, which are groups of
-  cache directives.  These directives and pools are persisted to the edit log
-  and fsimage, and will be loaded if the cluster is restarted.
+  In this architecture, the NameNode is responsible for coordinating all the
+  DataNode off-heap caches in the cluster. The NameNode periodically receives
+  a <cache report> from each DataNode which describes all the blocks cached
+  on a given DN. The NameNode manages DataNode caches by piggybacking cache and
+  uncache commands on the DataNode heartbeat.
 
-  Periodically, the NameNode rescans the namespace, to see which blocks need to
-  be cached based on the current set of path cache directives. Rescans are also
-  triggered by relevant user actions, such as adding or removing a cache
-  directive or removing a cache pool.
+  The NameNode queries its set of <cache directives> to determine
+  which paths should be cached. Cache directives are persistently stored in the
+  fsimage and edit log, and can be added, removed, and modified via Java and
+  command-line APIs. The NameNode also stores a set of <cache pools>,
+  which are administrative entities used to group cache directives together for
+  resource management and enforcing permissions.
 
-  Cache directives also may specific a numeric cache replication, which is the
-  number of replicas to cache.  This number may be equal to or smaller than the
-  file's block replication.  If multiple cache directives cover the same file
-  with different cache replication settings, then the highest cache replication
-  setting is applied.
+  The NameNode periodically rescans the namespace and active cache directives
+  to determine which blocks need to be cached or uncached and assign caching
+  work to DataNodes. Rescans can also be triggered by user actions like adding
+  or removing a cache directive or removing a cache pool.
 
   We do not currently cache blocks which are under construction, corrupt, or
   otherwise incomplete.  If a cache directive covers a symlink, the symlink
   target is not cached.
 
-  Caching is currently done on a per-file basis, although we would like to add
-  block-level granularity in the future.
+  Caching is currently done on the file or directory-level. Block and sub-block
+  caching is an item of future work.
+
+* {Concepts}
+
+** {Cache directive}
+
+  A <cache directive> defines a path that should be cached. Paths can be either
+  directories or files. Directories are cached non-recursively, meaning only
+  files in the first-level listing of the directory.
+
+  Directives also specify additional parameters, such as the cache replication
+  factor and expiration time. The replication factor specifies the number of
+  block replicas to cache. If multiple cache directives refer to the same file,
+  the maximum cache replication factor is applied.
 
-* {Interface}
+  The expiration time is specified on the command line as a <time-to-live
+  (TTL)>, a relative expiration time in the future. After a cache directive
+  expires, it is no longer considered by the NameNode when making caching
+  decisions.
 
-  The NameNode stores a list of "cache directives."  These directives contain a
-  path as well as the number of times blocks in that path should be replicated.
+** {Cache pool}
 
-  Paths can be either directories or files. If the path specifies a file, that
-  file is cached. If the path specifies a directory, all the files in the
-  directory will be cached. However, this process is not recursive-- only the
-  direct children of the directory will be cached.
+  A <cache pool> is an administrative entity used to manage groups of cache
+  directives. Cache pools have UNIX-like <permissions>, which restrict which
+  users and groups have access to the pool. Write permissions allow users to
+  add and remove cache directives to the pool. Read permissions allow users to
+  list the cache directives in a pool, as well as additional metadata. Execute
+  permissions are unused.
 
-** {hdfs cacheadmin Shell}
+  Cache pools are also used for resource management. Pools can enforce a
+  maximum <limit>, which restricts the number of bytes that can be cached in
+  aggregate by directives in the pool. Normally, the sum of the pool limits
+  will approximately equal the amount of aggregate memory reserved for
+  HDFS caching on the cluster. Cache pools also track a number of statistics
+  to help cluster users determine what is and should be cached.
 
-  Path cache directives can be created by the <<<hdfs cacheadmin
-  -addDirective>>> command and removed via the <<<hdfs cacheadmin
-  -removeDirective>>> command. To list the current path cache directives, use
-  <<<hdfs cacheadmin -listDirectives>>>. Each path cache directive has a
-  unique 64-bit ID number which will not be reused if it is deleted.  To remove
-  all path cache directives with a specified path, use <<<hdfs cacheadmin
-  -removeDirectives>>>.
+  Pools also can enforce a maximum time-to-live. This restricts the maximum
+  expiration time of directives being added to the pool.
 
-  Directives are grouped into "cache pools."  Each cache pool gets a share of
-  the cluster's resources. Additionally, cache pools are used for
-  authentication. Cache pools have a mode, user, and group, similar to regular
-  files. The same authentication rules are applied as for normal files. So, for
-  example, if the mode is 0777, any user can add or remove directives from the
-  cache pool. If the mode is 0644, only the owner can write to the cache pool,
-  but anyone can read from it. And so forth.
+* {<<<cacheadmin>>> command-line interface}
 
-  Cache pools are identified by name. They can be created by the <<<hdfs
-  cacheAdmin -addPool>>> command, modified by the <<<hdfs cacheadmin
-  -modifyPool>>> command, and removed via the <<<hdfs cacheadmin
-  -removePool>>> command. To list the current cache pools, use <<<hdfs
-  cacheAdmin -listPools>>>
+  On the command-line, administrators and users can interact with cache pools
+  and directives via the <<<hdfs cacheadmin>>> subcommand.
+
+  Cache directives are identified by a unique, non-repeating 64-bit integer ID.
+  IDs will not be reused even if a cache directive is later removed.
+
+  Cache pools are identified by a unique string name.
+
+** {Cache directive commands}
 
 *** {addDirective}
 
-  Usage: <<<hdfs cacheadmin -addDirective -path <path> -replication <replication> -pool <pool-name> >>>
+  Usage: <<<hdfs cacheadmin -addDirective -path <path> -pool <pool-name> [-force] [-replication <replication>] [-ttl <time-to-live>]>>>
 
   Add a new cache directive.
 
 *--+--+
 \<path\> | A path to cache. The path can be a directory or a file.
 *--+--+
+\<pool-name\> | The pool to which the directive will be added. You must have write permission on the cache pool in order to add new directives.
+*--+--+
+-force | Skips checking of cache pool resource limits.
+*--+--+
 \<replication\> | The cache replication factor to use. Defaults to 1.
 *--+--+
-\<pool-name\> | The pool to which the directive will be added. You must have write permission on the cache pool in order to add new directives.
+\<time-to-live\> | How long the directive is valid. Can be specified in minutes, hours, and days, e.g. 30m, 4h, 2d. Valid units are [smhd]. "never" indicates a directive that never expires. If unspecified, the directive never expires.
 *--+--+
 
 *** {removeDirective}
@@ -150,7 +180,7 @@ Centralized Cache Management in HDFS
 
 *** {listDirectives}
 
-  Usage: <<<hdfs cacheadmin -listDirectives [-path <path>] [-pool <pool>] >>>
+  Usage: <<<hdfs cacheadmin -listDirectives [-stats] [-path <path>] [-pool <pool>]>>>
 
   List cache directives.
 
@@ -159,10 +189,14 @@ Centralized Cache Management in HDFS
 *--+--+
 \<pool\> | List only path cache directives in that pool.
 *--+--+
+-stats | List path-based cache directive statistics.
+*--+--+
+
+** {Cache pool commands}
 
 *** {addPool}
 
-  Usage: <<<hdfs cacheadmin -addPool <name> [-owner <owner>] [-group <group>] [-mode <mode>] [-weight <weight>] >>>
+  Usage: <<<hdfs cacheadmin -addPool <name> [-owner <owner>] [-group <group>] [-mode <mode>] [-limit <limit>] [-maxTtl <maxTtl>>>>
 
   Add a new cache pool.
 
@@ -175,12 +209,14 @@ Centralized Cache Management in HDFS
 *--+--+
 \<mode\> | UNIX-style permissions for the pool. Permissions are specified in octal, e.g. 0755. By default, this is set to 0755.
 *--+--+
-\<weight\> | Weight of the pool. This is a relative measure of the importance of the pool used during cache resource management. By default, it is set to 100.
+\<limit\> | The maximum number of bytes that can be cached by directives in this pool, in aggregate. By default, no limit is set.
+*--+--+
+\<maxTtl\> | The maximum allowed time-to-live for directives being added to the pool. This can be specified in seconds, minutes, hours, and days, e.g. 120s, 30m, 4h, 2d. Valid units are [smhd]. By default, no maximum is set. A value of \"never\" specifies that there is no limit.
 *--+--+
 
 *** {modifyPool}
 
-  Usage: <<<hdfs cacheadmin -modifyPool <name> [-owner <owner>] [-group <group>] [-mode <mode>] [-weight <weight>] >>>
+  Usage: <<<hdfs cacheadmin -modifyPool <name> [-owner <owner>] [-group <group>] [-mode <mode>] [-limit <limit>] [-maxTtl <maxTtl>]>>>
 
   Modifies the metadata of an existing cache pool.
 
@@ -193,7 +229,9 @@ Centralized Cache Management in HDFS
 *--+--+
 \<mode\> | Unix-style permissions of the pool in octal.
 *--+--+
-\<weight\> | Weight of the pool.
+\<limit\> | Maximum number of bytes that can be cached by this pool.
+*--+--+
+\<maxTtl\> | The maximum allowed time-to-live for directives being added to the pool.
 *--+--+
 
 *** {removePool}
@@ -208,11 +246,13 @@ Centralized Cache Management in HDFS
 
 *** {listPools}
 
-  Usage: <<<hdfs cacheadmin -listPools [name] >>>
+  Usage: <<<hdfs cacheadmin -listPools [-stats] [<name>]>>>
 
   Display information about one or more cache pools, e.g. name, owner, group,
   permissions, etc.
 
+*--+--+
+-stats | Display additional cache pool statistics.
 *--+--+
 \<name\> | If specified, list only the named cache pool.
 *--+--+
@@ -244,10 +284,12 @@ Centralized Cache Management in HDFS
 
   * dfs.datanode.max.locked.memory
 
-    The DataNode will treat this as the maximum amount of memory it can use for
-    its cache. When setting this value, please remember that you will need space
-    in memory for other things, such as the Java virtual machine (JVM) itself
-    and the operating system's page cache.
+    This determines the maximum amount of memory a DataNode will use for caching.
+    The "locked-in-memory size" ulimit (<<<ulimit -l>>>) of the DataNode user
+    also needs to be increased to match this parameter (see below section on
+    {{OS Limits}}). When setting this value, please remember that you will need
+    space in memory for other things as well, such as the DataNode and
+    application JVM heaps and the operating system page cache.
 
 *** Optional
 

+ 0 - 2
hadoop-hdfs-project/hadoop-hdfs/src/site/apt/Federation.apt.vm

@@ -19,8 +19,6 @@
 
 HDFS Federation
 
-  \[ {{{./index.html}Go Back}} \]
-
 %{toc|section=1|fromDepth=0}
 
   This guide provides an overview of the HDFS Federation feature and

+ 0 - 2
hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HDFSHighAvailabilityWithNFS.apt.vm

@@ -18,8 +18,6 @@
 
 HDFS High Availability
 
-  \[ {{{./index.html}Go Back}} \]
-
 %{toc|section=1|fromDepth=0}
 
 * {Purpose}

+ 0 - 2
hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HDFSHighAvailabilityWithQJM.apt.vm

@@ -18,8 +18,6 @@
 
 HDFS High Availability Using the Quorum Journal Manager
 
-  \[ {{{./index.html}Go Back}} \]
-
 %{toc|section=1|fromDepth=0}
 
 * {Purpose}

+ 41 - 41
hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsDesign.apt.vm

@@ -17,11 +17,11 @@
   ---
   ${maven.build.timestamp}
 
-%{toc|section=1|fromDepth=0}
-
 HDFS Architecture
 
-Introduction
+%{toc|section=1|fromDepth=0}
+
+* Introduction
 
    The Hadoop Distributed File System (HDFS) is a distributed file system
    designed to run on commodity hardware. It has many similarities with
@@ -35,9 +35,9 @@ Introduction
    is part of the Apache Hadoop Core project. The project URL is
    {{http://hadoop.apache.org/}}.
 
-Assumptions and Goals
+* Assumptions and Goals
 
-Hardware Failure
+** Hardware Failure
 
    Hardware failure is the norm rather than the exception. An HDFS
    instance may consist of hundreds or thousands of server machines, each
@@ -47,7 +47,7 @@ Hardware Failure
    non-functional. Therefore, detection of faults and quick, automatic
    recovery from them is a core architectural goal of HDFS.
 
-Streaming Data Access
+** Streaming Data Access
 
    Applications that run on HDFS need streaming access to their data sets.
    They are not general purpose applications that typically run on general
@@ -58,7 +58,7 @@ Streaming Data Access
    targeted for HDFS. POSIX semantics in a few key areas has been traded
    to increase data throughput rates.
 
-Large Data Sets
+** Large Data Sets
 
    Applications that run on HDFS have large data sets. A typical file in
    HDFS is gigabytes to terabytes in size. Thus, HDFS is tuned to support
@@ -66,7 +66,7 @@ Large Data Sets
    to hundreds of nodes in a single cluster. It should support tens of
    millions of files in a single instance.
 
-Simple Coherency Model
+** Simple Coherency Model
 
    HDFS applications need a write-once-read-many access model for files. A
    file once created, written, and closed need not be changed. This
@@ -75,7 +75,7 @@ Simple Coherency Model
    perfectly with this model. There is a plan to support appending-writes
    to files in the future.
 
-“Moving Computation is Cheaper than Moving Data”
+** “Moving Computation is Cheaper than Moving Data”
 
    A computation requested by an application is much more efficient if it
    is executed near the data it operates on. This is especially true when
@@ -86,13 +86,13 @@ Simple Coherency Model
    running. HDFS provides interfaces for applications to move themselves
    closer to where the data is located.
 
-Portability Across Heterogeneous Hardware and Software Platforms
+** Portability Across Heterogeneous Hardware and Software Platforms
 
    HDFS has been designed to be easily portable from one platform to
    another. This facilitates widespread adoption of HDFS as a platform of
    choice for a large set of applications.
 
-NameNode and DataNodes
+* NameNode and DataNodes
 
    HDFS has a master/slave architecture. An HDFS cluster consists of a
    single NameNode, a master server that manages the file system namespace
@@ -127,7 +127,7 @@ NameNode and DataNodes
    repository for all HDFS metadata. The system is designed in such a way
    that user data never flows through the NameNode.
 
-The File System Namespace
+* The File System Namespace
 
    HDFS supports a traditional hierarchical file organization. A user or
    an application can create directories and store files inside these
@@ -145,7 +145,7 @@ The File System Namespace
    replication factor of that file. This information is stored by the
    NameNode.
 
-Data Replication
+* Data Replication
 
    HDFS is designed to reliably store very large files across machines in
    a large cluster. It stores each file as a sequence of blocks; all
@@ -164,7 +164,7 @@ Data Replication
 
 [images/hdfsdatanodes.png] HDFS DataNodes
 
-Replica Placement: The First Baby Steps
+** Replica Placement: The First Baby Steps
 
    The placement of replicas is critical to HDFS reliability and
    performance. Optimizing replica placement distinguishes HDFS from most
@@ -210,7 +210,7 @@ Replica Placement: The First Baby Steps
    The current, default replica placement policy described here is a work
    in progress.
 
-Replica Selection
+** Replica Selection
 
    To minimize global bandwidth consumption and read latency, HDFS tries
    to satisfy a read request from a replica that is closest to the reader.
@@ -219,7 +219,7 @@ Replica Selection
    cluster spans multiple data centers, then a replica that is resident in
    the local data center is preferred over any remote replica.
 
-Safemode
+** Safemode
 
    On startup, the NameNode enters a special state called Safemode.
    Replication of data blocks does not occur when the NameNode is in the
@@ -234,7 +234,7 @@ Safemode
    blocks (if any) that still have fewer than the specified number of
    replicas. The NameNode then replicates these blocks to other DataNodes.
 
-The Persistence of File System Metadata
+* The Persistence of File System Metadata
 
    The HDFS namespace is stored by the NameNode. The NameNode uses a
    transaction log called the EditLog to persistently record every change
@@ -273,7 +273,7 @@ The Persistence of File System Metadata
    each of these local files and sends this report to the NameNode: this
    is the Blockreport.
 
-The Communication Protocols
+* The Communication Protocols
 
    All HDFS communication protocols are layered on top of the TCP/IP
    protocol. A client establishes a connection to a configurable TCP port
@@ -284,13 +284,13 @@ The Communication Protocols
    RPCs. Instead, it only responds to RPC requests issued by DataNodes or
    clients.
 
-Robustness
+* Robustness
 
    The primary objective of HDFS is to store data reliably even in the
    presence of failures. The three common types of failures are NameNode
    failures, DataNode failures and network partitions.
 
-Data Disk Failure, Heartbeats and Re-Replication
+** Data Disk Failure, Heartbeats and Re-Replication
 
    Each DataNode sends a Heartbeat message to the NameNode periodically. A
    network partition can cause a subset of DataNodes to lose connectivity
@@ -306,7 +306,7 @@ Data Disk Failure, Heartbeats and Re-Replication
    corrupted, a hard disk on a DataNode may fail, or the replication
    factor of a file may be increased.
 
-Cluster Rebalancing
+** Cluster Rebalancing
 
    The HDFS architecture is compatible with data rebalancing schemes. A
    scheme might automatically move data from one DataNode to another if
@@ -316,7 +316,7 @@ Cluster Rebalancing
    cluster. These types of data rebalancing schemes are not yet
    implemented.
 
-Data Integrity
+** Data Integrity
 
    It is possible that a block of data fetched from a DataNode arrives
    corrupted. This corruption can occur because of faults in a storage
@@ -330,7 +330,7 @@ Data Integrity
    to retrieve that block from another DataNode that has a replica of that
    block.
 
-Metadata Disk Failure
+** Metadata Disk Failure
 
    The FsImage and the EditLog are central data structures of HDFS. A
    corruption of these files can cause the HDFS instance to be
@@ -350,16 +350,16 @@ Metadata Disk Failure
    Currently, automatic restart and failover of the NameNode software to
    another machine is not supported.
 
-Snapshots
+** Snapshots
 
    Snapshots support storing a copy of data at a particular instant of
    time. One usage of the snapshot feature may be to roll back a corrupted
    HDFS instance to a previously known good point in time. HDFS does not
    currently support snapshots but will in a future release.
 
-Data Organization
+* Data Organization
 
-Data Blocks
+** Data Blocks
 
    HDFS is designed to support very large files. Applications that are
    compatible with HDFS are those that deal with large data sets. These
@@ -370,7 +370,7 @@ Data Blocks
    chunks, and if possible, each chunk will reside on a different
    DataNode.
 
-Staging
+** Staging
 
    A client request to create a file does not reach the NameNode
    immediately. In fact, initially the HDFS client caches the file data
@@ -397,7 +397,7 @@ Staging
    side caching to improve performance. A POSIX requirement has been
    relaxed to achieve higher performance of data uploads.
 
-Replication Pipelining
+** Replication Pipelining
 
    When a client is writing data to an HDFS file, its data is first
    written to a local file as explained in the previous section. Suppose
@@ -406,7 +406,7 @@ Replication Pipelining
    DataNodes from the NameNode. This list contains the DataNodes that will
    host a replica of that block. The client then flushes the data block to
    the first DataNode. The first DataNode starts receiving the data in
-   small portions (4 KB), writes each portion to its local repository and
+   small portions, writes each portion to its local repository and
    transfers that portion to the second DataNode in the list. The second
    DataNode, in turn starts receiving each portion of the data block,
    writes that portion to its repository and then flushes that portion to
@@ -416,7 +416,7 @@ Replication Pipelining
    the next one in the pipeline. Thus, the data is pipelined from one
    DataNode to the next.
 
-Accessibility
+* Accessibility
 
    HDFS can be accessed from applications in many different ways.
    Natively, HDFS provides a
@@ -426,7 +426,7 @@ Accessibility
    of an HDFS instance. Work is in progress to expose HDFS through the WebDAV
    protocol.
 
-FS Shell
+** FS Shell
 
    HDFS allows user data to be organized in the form of files and
    directories. It provides a commandline interface called FS shell that
@@ -447,7 +447,7 @@ FS Shell
    FS shell is targeted for applications that need a scripting language to
    interact with the stored data.
 
-DFSAdmin
+** DFSAdmin
 
    The DFSAdmin command set is used for administering an HDFS cluster.
    These are commands that are used only by an HDFS administrator. Here
@@ -463,16 +463,16 @@ DFSAdmin
 |Recommission or decommission DataNode(s) | <<<bin/hadoop dfsadmin -refreshNodes>>>
 *---------+---------+
 
-Browser Interface
+** Browser Interface
 
    A typical HDFS install configures a web server to expose the HDFS
    namespace through a configurable TCP port. This allows a user to
    navigate the HDFS namespace and view the contents of its files using a
    web browser.
 
-Space Reclamation
+* Space Reclamation
 
-File Deletes and Undeletes
+** File Deletes and Undeletes
 
    When a file is deleted by a user or an application, it is not
    immediately removed from HDFS. Instead, HDFS first renames it to a file
@@ -490,12 +490,12 @@ File Deletes and Undeletes
    file. The <<</trash>>> directory contains only the latest copy of the file
    that was deleted. The <<</trash>>> directory is just like any other directory
    with one special feature: HDFS applies specified policies to
-   automatically delete files from this directory. The current default
-   policy is to delete files from <<</trash>>> that are more than 6 hours old.
-   In the future, this policy will be configurable through a well defined
-   interface.
+   automatically delete files from this directory. Current default trash
+   interval is set to 0 (Deletes file without storing in trash). This value is
+   configurable parameter stored as <<<fs.trash.interval>>> stored in
+   core-site.xml.
 
-Decrease Replication Factor
+** Decrease Replication Factor
 
    When the replication factor of a file is reduced, the NameNode selects
    excess replicas that can be deleted. The next Heartbeat transfers this
@@ -505,7 +505,7 @@ Decrease Replication Factor
    of the setReplication API call and the appearance of free space in the
    cluster.
 
-References
+* References
 
    Hadoop {{{http://hadoop.apache.org/docs/current/api/}JavaDoc API}}.
 

+ 0 - 2
hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsEditsViewer.apt.vm

@@ -20,8 +20,6 @@
 
 Offline Edits Viewer Guide
 
-  \[ {{{./index.html}Go Back}} \]
-
 %{toc|section=1|fromDepth=0}
 
 * Overview

+ 3 - 5
hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsImageViewer.apt.vm

@@ -18,8 +18,6 @@
 
 Offline Image Viewer Guide
 
-  \[ {{{./index.html}Go Back}} \]
-
 %{toc|section=1|fromDepth=0}
 
 * Overview
@@ -64,9 +62,9 @@ Offline Image Viewer Guide
       but no data recorded. The default record delimiter is a tab, but
       this may be changed via the -delimiter command line argument. This
       processor is designed to create output that is easily analyzed by
-      other tools, such as [36]Apache Pig. See the [37]Analyzing Results
-      section for further information on using this processor to analyze
-      the contents of fsimage files.
+      other tools, such as {{{http://pig.apache.org}Apache Pig}}. See
+      the {{Analyzing Results}} section for further information on using
+      this processor to analyze the contents of fsimage files.
 
    [[4]] XML creates an XML document of the fsimage and includes all of the
       information within the fsimage, similar to the lsr processor. The

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsPermissionsGuide.apt.vm

@@ -18,8 +18,6 @@
 
 HDFS Permissions Guide
 
-  \[ {{{./index.html}Go Back}} \]
-
 %{toc|section=1|fromDepth=0}
 
 * Overview
@@ -55,8 +53,10 @@ HDFS Permissions Guide
 
      * If the user name matches the owner of foo, then the owner
        permissions are tested;
+
      * Else if the group of foo matches any of member of the groups list,
        then the group permissions are tested;
+
      * Otherwise the other permissions of foo are tested.
 
    If a permissions check fails, the client operation fails.

+ 0 - 2
hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsQuotaAdminGuide.apt.vm

@@ -18,8 +18,6 @@
 
 HDFS Quotas Guide
 
-  \[ {{{./index.html}Go Back}} \]
-
 %{toc|section=1|fromDepth=0}
 
 * Overview

+ 44 - 33
hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsUserGuide.apt.vm

@@ -108,9 +108,11 @@ HDFS Users Guide
    The following documents describe how to install and set up a Hadoop
    cluster:
 
-     * {{Single Node Setup}} for first-time users.
+     * {{{../hadoop-common/SingleCluster.html}Single Node Setup}}
+       for first-time users.
 
-     * {{Cluster Setup}} for large, distributed clusters.
+     * {{{../hadoop-common/ClusterSetup.html}Cluster Setup}}
+       for large, distributed clusters.
 
    The rest of this document assumes the user is able to set up and run a
    HDFS with at least one DataNode. For the purpose of this document, both
@@ -136,7 +138,8 @@ HDFS Users Guide
    for a command. These commands support most of the normal files system
    operations like copying files, changing file permissions, etc. It also
    supports a few HDFS specific operations like changing replication of
-   files. For more information see {{{File System Shell Guide}}}.
+   files. For more information see {{{../hadoop-common/FileSystemShell.html}
+   File System Shell Guide}}.
 
 **  DFSAdmin Command
 
@@ -169,7 +172,7 @@ HDFS Users Guide
        of racks and datanodes attached to the tracks as viewed by the
        NameNode.
 
-   For command usage, see {{{dfsadmin}}}.
+   For command usage, see {{{../hadoop-common/CommandsManual.html#dfsadmin}dfsadmin}}.
 
 * Secondary NameNode
 
@@ -203,7 +206,8 @@ HDFS Users Guide
    So that the check pointed image is always ready to be read by the
    primary NameNode if necessary.
 
-   For command usage, see {{{secondarynamenode}}}.
+   For command usage,
+   see {{{../hadoop-common/CommandsManual.html#secondarynamenode}secondarynamenode}}.
 
 * Checkpoint Node
 
@@ -245,7 +249,7 @@ HDFS Users Guide
    Multiple checkpoint nodes may be specified in the cluster configuration
    file.
 
-   For command usage, see {{{namenode}}}.
+   For command usage, see {{{../hadoop-common/CommandsManual.html#namenode}namenode}}.
 
 * Backup Node
 
@@ -287,7 +291,7 @@ HDFS Users Guide
 
    For a complete discussion of the motivation behind the creation of the
    Backup node and Checkpoint node, see {{{https://issues.apache.org/jira/browse/HADOOP-4539}HADOOP-4539}}.
-   For command usage, see {{{namenode}}}.
+   For command usage, see {{{../hadoop-common/CommandsManual.html#namenode}namenode}}.
 
 * Import Checkpoint
 
@@ -310,7 +314,7 @@ HDFS Users Guide
    verifies that the image in <<<dfs.namenode.checkpoint.dir>>> is consistent,
    but does not modify it in any way.
 
-   For command usage, see {{{namenode}}}.
+   For command usage, see {{{../hadoop-common/CommandsManual.html#namenode}namenode}}.
 
 * Rebalancer
 
@@ -337,7 +341,7 @@ HDFS Users Guide
    A brief administrator's guide for rebalancer as a PDF is attached to
    {{{https://issues.apache.org/jira/browse/HADOOP-1652}HADOOP-1652}}.
 
-   For command usage, see {{{balancer}}}.
+   For command usage, see {{{../hadoop-common/CommandsManual.html#balancer}balancer}}.
 
 * Rack Awareness
 
@@ -379,8 +383,9 @@ HDFS Users Guide
    most of the recoverable failures. By default fsck ignores open files
    but provides an option to select all files during reporting. The HDFS
    fsck command is not a Hadoop shell command. It can be run as
-   <<<bin/hadoop fsck>>>. For command usage, see {{{fsck}}}. fsck can be run on the
-   whole file system or on a subset of files.
+   <<<bin/hadoop fsck>>>. For command usage, see 
+   {{{../hadoop-common/CommandsManual.html#fsck}fsck}}. fsck can be run on
+   the whole file system or on a subset of files.
 
 * fetchdt
 
@@ -393,7 +398,8 @@ HDFS Users Guide
    command. It can be run as <<<bin/hadoop fetchdt DTfile>>>. After you got
    the token you can run an HDFS command without having Kerberos tickets,
    by pointing <<<HADOOP_TOKEN_FILE_LOCATION>>> environmental variable to the
-   delegation token file. For command usage, see {{{fetchdt}}} command.
+   delegation token file. For command usage, see
+   {{{../hadoop-common/CommandsManual.html#fetchdt}fetchdt}} command.
 
 * Recovery Mode
 
@@ -427,10 +433,11 @@ HDFS Users Guide
    let alone to restart HDFS from scratch. HDFS allows administrators to
    go back to earlier version of Hadoop and rollback the cluster to the
    state it was in before the upgrade. HDFS upgrade is described in more
-   detail in {{{Hadoop Upgrade}}} Wiki page. HDFS can have one such backup at a
-   time. Before upgrading, administrators need to remove existing backup
-   using bin/hadoop dfsadmin <<<-finalizeUpgrade>>> command. The following
-   briefly describes the typical upgrade procedure:
+   detail in {{{http://wiki.apache.org/hadoop/Hadoop_Upgrade}Hadoop Upgrade}}
+   Wiki page. HDFS can have one such backup at a time. Before upgrading,
+   administrators need to remove existing backupusing bin/hadoop dfsadmin
+   <<<-finalizeUpgrade>>> command. The following briefly describes the
+   typical upgrade procedure:
 
      * Before upgrading Hadoop software, finalize if there an existing
        backup. <<<dfsadmin -upgradeProgress>>> status can tell if the cluster
@@ -450,7 +457,7 @@ HDFS Users Guide
 
           * stop the cluster and distribute earlier version of Hadoop.
 
-          * start the cluster with rollback option. (<<<bin/start-dfs.h -rollback>>>).
+          * start the cluster with rollback option. (<<<bin/start-dfs.sh -rollback>>>).
 
 * File Permissions and Security
 
@@ -465,14 +472,15 @@ HDFS Users Guide
 * Scalability
 
    Hadoop currently runs on clusters with thousands of nodes. The
-   {{{PoweredBy}}} Wiki page lists some of the organizations that deploy Hadoop
-   on large clusters. HDFS has one NameNode for each cluster. Currently
-   the total memory available on NameNode is the primary scalability
-   limitation. On very large clusters, increasing average size of files
-   stored in HDFS helps with increasing cluster size without increasing
-   memory requirements on NameNode. The default configuration may not
-   suite very large clustes. The {{{FAQ}}} Wiki page lists suggested
-   configuration improvements for large Hadoop clusters.
+   {{{http://wiki.apache.org/hadoop/PoweredBy}PoweredBy}} Wiki page lists
+   some of the organizations that deploy Hadoop on large clusters.
+   HDFS has one NameNode for each cluster. Currently the total memory
+   available on NameNode is the primary scalability limitation.
+   On very large clusters, increasing average size of files stored in
+   HDFS helps with increasing cluster size without increasing memory
+   requirements on NameNode. The default configuration may not suite
+   very large clusters. The {{{http://wiki.apache.org/hadoop/FAQ}FAQ}}
+   Wiki page lists suggested configuration improvements for large Hadoop clusters.
 
 * Related Documentation
 
@@ -481,19 +489,22 @@ HDFS Users Guide
    documentation about Hadoop and HDFS. The following list is a starting
    point for further exploration:
 
-     * {{{Hadoop Site}}}: The home page for the Apache Hadoop site.
+     * {{{http://hadoop.apache.org}Hadoop Site}}: The home page for
+       the Apache Hadoop site.
 
-     * {{{Hadoop Wiki}}}: The home page (FrontPage) for the Hadoop Wiki. Unlike
+     * {{{http://wiki.apache.org/hadoop/FrontPage}Hadoop Wiki}}:
+       The home page (FrontPage) for the Hadoop Wiki. Unlike
        the released documentation, which is part of Hadoop source tree,
        Hadoop Wiki is regularly edited by Hadoop Community.
 
-     * {{{FAQ}}}: The FAQ Wiki page.
+     * {{{http://wiki.apache.org/hadoop/FAQ}FAQ}}: The FAQ Wiki page.
 
-     * {{{Hadoop JavaDoc API}}}.
+     * {{{../../api/index.html}Hadoop JavaDoc API}}.
 
-     * {{{Hadoop User Mailing List}}}: core-user[at]hadoop.apache.org.
+     * Hadoop User Mailing List: user[at]hadoop.apache.org.
 
-     * Explore {{{src/hdfs/hdfs-default.xml}}}. It includes brief description of
-       most of the configuration variables available.
+     * Explore {{{./hdfs-default.xml}hdfs-default.xml}}. It includes
+       brief description of most of the configuration variables available.
 
-     * {{{Hadoop Commands Guide}}}: Hadoop commands usage.
+     * {{{../hadoop-common/CommandsManual.html}Hadoop Commands Guide}}:
+       Hadoop commands usage.

+ 0 - 2
hadoop-hdfs-project/hadoop-hdfs/src/site/apt/Hftp.apt.vm

@@ -18,8 +18,6 @@
 
 HFTP Guide
 
-  \[ {{{./index.html}Go Back}} \]
-
 %{toc|section=1|fromDepth=0}
 
 * Introduction

+ 0 - 2
hadoop-hdfs-project/hadoop-hdfs/src/site/apt/ShortCircuitLocalReads.apt.vm

@@ -19,8 +19,6 @@
 
 HDFS Short-Circuit Local Reads
 
-  \[ {{{./index.html}Go Back}} \]
-
 %{toc|section=1|fromDepth=0}
 
 * {Background}

+ 6 - 8
hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm

@@ -18,8 +18,6 @@
 
 WebHDFS REST API
 
-  \[ {{{./index.html}Go Back}} \]
-
 %{toc|section=1|fromDepth=0}
 
 * {Document Conventions}
@@ -54,7 +52,7 @@ WebHDFS REST API
     * {{{Status of a File/Directory}<<<GETFILESTATUS>>>}}
         (see  {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.getFileStatus)
 
-    * {{<<<LISTSTATUS>>>}}
+    * {{{List a Directory}<<<LISTSTATUS>>>}}
         (see  {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.listStatus)
 
     * {{{Get Content Summary of a Directory}<<<GETCONTENTSUMMARY>>>}}
@@ -109,7 +107,7 @@ WebHDFS REST API
     * {{{Append to a File}<<<APPEND>>>}}
         (see  {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.append)
 
-    * {{{Concatenate Files}<<<CONCAT>>>}}
+    * {{{Concat File(s)}<<<CONCAT>>>}}
         (see  {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.concat)
 
   * HTTP DELETE
@@ -871,7 +869,7 @@ Content-Length: 0
 * {Error Responses}
 
   When an operation fails, the server may throw an exception.
-  The JSON schema of error responses is defined in {{<<<RemoteException>>> JSON schema}}.
+  The JSON schema of error responses is defined in {{RemoteException JSON Schema}}.
   The table below shows the mapping from exceptions to HTTP response codes.
 
 ** {HTTP Response Codes}
@@ -1119,7 +1117,7 @@ Transfer-Encoding: chunked
   See also:
   {{{FileStatus Properties}<<<FileStatus>>> Properties}},
   {{{Status of a File/Directory}<<<GETFILESTATUS>>>}},
-  {{{../../api/org/apache/hadoop/fs/FileStatus}FileStatus}}
+  {{{../../api/org/apache/hadoop/fs/FileStatus.html}FileStatus}}
 
 
 *** {FileStatus Properties}
@@ -1232,7 +1230,7 @@ var fileStatusProperties =
   See also:
   {{{FileStatus Properties}<<<FileStatus>>> Properties}},
   {{{List a Directory}<<<LISTSTATUS>>>}},
-  {{{../../api/org/apache/hadoop/fs/FileStatus}FileStatus}}
+  {{{../../api/org/apache/hadoop/fs/FileStatus.html}FileStatus}}
 
 
 ** {Long JSON Schema}
@@ -1275,7 +1273,7 @@ var fileStatusProperties =
 
   See also:
   {{{Get Home Directory}<<<GETHOMEDIRECTORY>>>}},
-  {{{../../api/org/apache/hadoop/fs/Path}Path}}
+  {{{../../api/org/apache/hadoop/fs/Path.html}Path}}
 
 
 ** {RemoteException JSON Schema}

+ 14 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java

@@ -118,6 +118,20 @@ public class TestDFSUtil {
     assertEquals(0, bs.length);
   }
 
+  /**
+   * Test constructing LocatedBlock with null cachedLocs
+   */
+  @Test
+  public void testLocatedBlockConstructorWithNullCachedLocs() {
+    DatanodeInfo d = DFSTestUtil.getLocalDatanodeInfo();
+    DatanodeInfo[] ds = new DatanodeInfo[1];
+    ds[0] = d;
+    
+    ExtendedBlock b1 = new ExtendedBlock("bpid", 1, 1, 1);
+    LocatedBlock l1 = new LocatedBlock(b1, ds, null, null, 0, false, null);
+    final DatanodeInfo[] cachedLocs = l1.getCachedLocations();
+    assertTrue(cachedLocs.length == 0);
+  }
 
   private Configuration setupAddress(String key) {
     HdfsConfiguration conf = new HdfsConfiguration();

+ 11 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSOutputSummer.java

@@ -71,7 +71,7 @@ public class TestFSOutputSummer {
     cleanupFile(name);
   }
   
-  /* create a file, write data with vairable amount of data */
+  /* create a file, write data with variable amount of data */
   private void writeFile3(Path name) throws Exception {
     FSDataOutputStream stm = fileSys.create(name, true, 
         fileSys.getConf().getInt(IO_FILE_BUFFER_SIZE_KEY, 4096),
@@ -103,6 +103,8 @@ public class TestFSOutputSummer {
     stm.readFully(0, actual);
     checkAndEraseData(actual, 0, expected, "Read Sanity Test");
     stm.close();
+    // do a sanity check. Get the file checksum
+    fileSys.getFileChecksum(name);
   }
 
   private void cleanupFile(Path name) throws IOException {
@@ -112,13 +114,20 @@ public class TestFSOutputSummer {
   }
   
   /**
-   * Test write opeation for output stream in DFS.
+   * Test write operation for output stream in DFS.
    */
   @Test
   public void testFSOutputSummer() throws Exception {
+    doTestFSOutputSummer("CRC32");
+    doTestFSOutputSummer("CRC32C");
+    doTestFSOutputSummer("NULL");
+  }
+  
+  private void doTestFSOutputSummer(String checksumType) throws Exception {
     Configuration conf = new HdfsConfiguration();
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
     conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, BYTES_PER_CHECKSUM);
+    conf.set(DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY, checksumType);
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
                                                .numDataNodes(NUM_OF_DATANODES)
                                                .build();

+ 103 - 68
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java

@@ -69,6 +69,7 @@ import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
 import org.apache.hadoop.hdfs.protocol.CachePoolStats;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList.Type;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
@@ -528,77 +529,111 @@ public class TestCacheDirectives {
 
   @Test(timeout=60000)
   public void testCacheManagerRestart() throws Exception {
-    // Create and validate a pool
-    final String pool = "poolparty";
-    String groupName = "partygroup";
-    FsPermission mode = new FsPermission((short)0777);
-    long limit = 747;
-    dfs.addCachePool(new CachePoolInfo(pool)
-        .setGroupName(groupName)
-        .setMode(mode)
-        .setLimit(limit));
-    RemoteIterator<CachePoolEntry> pit = dfs.listCachePools();
-    assertTrue("No cache pools found", pit.hasNext());
-    CachePoolInfo info = pit.next().getInfo();
-    assertEquals(pool, info.getPoolName());
-    assertEquals(groupName, info.getGroupName());
-    assertEquals(mode, info.getMode());
-    assertEquals(limit, (long)info.getLimit());
-    assertFalse("Unexpected # of cache pools found", pit.hasNext());
-  
-    // Create some cache entries
-    int numEntries = 10;
-    String entryPrefix = "/party-";
-    long prevId = -1;
-    final Date expiry = new Date();
-    for (int i=0; i<numEntries; i++) {
-      prevId = dfs.addCacheDirective(
-          new CacheDirectiveInfo.Builder().
-            setPath(new Path(entryPrefix + i)).setPool(pool).
-            setExpiration(
-                CacheDirectiveInfo.Expiration.newAbsolute(expiry.getTime())).
-            build());
-    }
-    RemoteIterator<CacheDirectiveEntry> dit
-        = dfs.listCacheDirectives(null);
-    for (int i=0; i<numEntries; i++) {
-      assertTrue("Unexpected # of cache entries: " + i, dit.hasNext());
-      CacheDirectiveInfo cd = dit.next().getInfo();
-      assertEquals(i+1, cd.getId().longValue());
-      assertEquals(entryPrefix + i, cd.getPath().toUri().getPath());
-      assertEquals(pool, cd.getPool());
-    }
-    assertFalse("Unexpected # of cache directives found", dit.hasNext());
-  
-    // Restart namenode
-    cluster.restartNameNode();
+    SecondaryNameNode secondary = null;
+    try {
+      // Start a secondary namenode
+      conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
+          "0.0.0.0:0");
+      secondary = new SecondaryNameNode(conf);
   
-    // Check that state came back up
-    pit = dfs.listCachePools();
-    assertTrue("No cache pools found", pit.hasNext());
-    info = pit.next().getInfo();
-    assertEquals(pool, info.getPoolName());
-    assertEquals(pool, info.getPoolName());
-    assertEquals(groupName, info.getGroupName());
-    assertEquals(mode, info.getMode());
-    assertEquals(limit, (long)info.getLimit());
-    assertFalse("Unexpected # of cache pools found", pit.hasNext());
+      // Create and validate a pool
+      final String pool = "poolparty";
+      String groupName = "partygroup";
+      FsPermission mode = new FsPermission((short)0777);
+      long limit = 747;
+      dfs.addCachePool(new CachePoolInfo(pool)
+          .setGroupName(groupName)
+          .setMode(mode)
+          .setLimit(limit));
+      RemoteIterator<CachePoolEntry> pit = dfs.listCachePools();
+      assertTrue("No cache pools found", pit.hasNext());
+      CachePoolInfo info = pit.next().getInfo();
+      assertEquals(pool, info.getPoolName());
+      assertEquals(groupName, info.getGroupName());
+      assertEquals(mode, info.getMode());
+      assertEquals(limit, (long)info.getLimit());
+      assertFalse("Unexpected # of cache pools found", pit.hasNext());
+    
+      // Create some cache entries
+      int numEntries = 10;
+      String entryPrefix = "/party-";
+      long prevId = -1;
+      final Date expiry = new Date();
+      for (int i=0; i<numEntries; i++) {
+        prevId = dfs.addCacheDirective(
+            new CacheDirectiveInfo.Builder().
+              setPath(new Path(entryPrefix + i)).setPool(pool).
+              setExpiration(
+                  CacheDirectiveInfo.Expiration.newAbsolute(expiry.getTime())).
+              build());
+      }
+      RemoteIterator<CacheDirectiveEntry> dit
+          = dfs.listCacheDirectives(null);
+      for (int i=0; i<numEntries; i++) {
+        assertTrue("Unexpected # of cache entries: " + i, dit.hasNext());
+        CacheDirectiveInfo cd = dit.next().getInfo();
+        assertEquals(i+1, cd.getId().longValue());
+        assertEquals(entryPrefix + i, cd.getPath().toUri().getPath());
+        assertEquals(pool, cd.getPool());
+      }
+      assertFalse("Unexpected # of cache directives found", dit.hasNext());
+      
+      // Checkpoint once to set some cache pools and directives on 2NN side
+      secondary.doCheckpoint();
+      
+      // Add some more CacheManager state
+      final String imagePool = "imagePool";
+      dfs.addCachePool(new CachePoolInfo(imagePool));
+      prevId = dfs.addCacheDirective(new CacheDirectiveInfo.Builder()
+        .setPath(new Path("/image")).setPool(imagePool).build());
+
+      // Save a new image to force a fresh fsimage download
+      dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
+      dfs.saveNamespace();
+      dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
+
+      // Checkpoint again forcing a reload of FSN state
+      boolean fetchImage = secondary.doCheckpoint();
+      assertTrue("Secondary should have fetched a new fsimage from NameNode",
+          fetchImage);
+
+      // Remove temp pool and directive
+      dfs.removeCachePool(imagePool);
+
+      // Restart namenode
+      cluster.restartNameNode();
+    
+      // Check that state came back up
+      pit = dfs.listCachePools();
+      assertTrue("No cache pools found", pit.hasNext());
+      info = pit.next().getInfo();
+      assertEquals(pool, info.getPoolName());
+      assertEquals(pool, info.getPoolName());
+      assertEquals(groupName, info.getGroupName());
+      assertEquals(mode, info.getMode());
+      assertEquals(limit, (long)info.getLimit());
+      assertFalse("Unexpected # of cache pools found", pit.hasNext());
+    
+      dit = dfs.listCacheDirectives(null);
+      for (int i=0; i<numEntries; i++) {
+        assertTrue("Unexpected # of cache entries: " + i, dit.hasNext());
+        CacheDirectiveInfo cd = dit.next().getInfo();
+        assertEquals(i+1, cd.getId().longValue());
+        assertEquals(entryPrefix + i, cd.getPath().toUri().getPath());
+        assertEquals(pool, cd.getPool());
+        assertEquals(expiry.getTime(), cd.getExpiration().getMillis());
+      }
+      assertFalse("Unexpected # of cache directives found", dit.hasNext());
   
-    dit = dfs.listCacheDirectives(null);
-    for (int i=0; i<numEntries; i++) {
-      assertTrue("Unexpected # of cache entries: " + i, dit.hasNext());
-      CacheDirectiveInfo cd = dit.next().getInfo();
-      assertEquals(i+1, cd.getId().longValue());
-      assertEquals(entryPrefix + i, cd.getPath().toUri().getPath());
-      assertEquals(pool, cd.getPool());
-      assertEquals(expiry.getTime(), cd.getExpiration().getMillis());
+      long nextId = dfs.addCacheDirective(
+            new CacheDirectiveInfo.Builder().
+              setPath(new Path("/foobar")).setPool(pool).build());
+      assertEquals(prevId + 1, nextId);
+    } finally {
+      if (secondary != null) {
+        secondary.shutdown();
+      }
     }
-    assertFalse("Unexpected # of cache directives found", dit.hasNext());
-
-    long nextId = dfs.addCacheDirective(
-          new CacheDirectiveInfo.Builder().
-            setPath(new Path("/foobar")).setPool(pool).build());
-    assertEquals(prevId + 1, nextId);
   }
 
   /**

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java

@@ -1634,7 +1634,7 @@ public class TestCheckpoint {
    * Test that the secondary namenode correctly deletes temporary edits
    * on startup.
    */
-  @Test(timeout = 30000)
+  @Test(timeout = 60000)
   public void testDeleteTemporaryEditsOnStartup() throws IOException {
     Configuration conf = new HdfsConfiguration();
     SecondaryNameNode secondary = null;

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetImageServlet.java

@@ -28,7 +28,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
-import org.apache.hadoop.http.HttpServer;
+import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authentication.util.KerberosName;
 import org.apache.hadoop.security.authorize.AccessControlList;
@@ -66,7 +66,7 @@ public class TestGetImageServlet {
     AccessControlList acls = Mockito.mock(AccessControlList.class);
     Mockito.when(acls.isUserAllowed(Mockito.<UserGroupInformation>any())).thenReturn(false);
     ServletContext context = Mockito.mock(ServletContext.class);
-    Mockito.when(context.getAttribute(HttpServer.ADMINS_ACL)).thenReturn(acls);
+    Mockito.when(context.getAttribute(HttpServer2.ADMINS_ACL)).thenReturn(acls);
     
     // Make sure that NN2 is considered a valid fsimage/edits requestor.
     assertTrue(GetImageServlet.isValidRequestor(context,

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java

@@ -37,7 +37,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.http.HttpServer;
+import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.http.HttpServerFunctionalTest;
 import org.apache.hadoop.test.PathUtils;
 import org.apache.hadoop.util.StringUtils;
@@ -119,7 +119,7 @@ public class TestTransferFsImage {
    */
   @Test(timeout = 5000)
   public void testImageTransferTimeout() throws Exception {
-    HttpServer testServer = HttpServerFunctionalTest.createServer("hdfs");
+    HttpServer2 testServer = HttpServerFunctionalTest.createServer("hdfs");
     try {
       testServer.addServlet("GetImage", "/getimage", TestGetImageServlet.class);
       testServer.start();

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotTestHelper.java

@@ -58,7 +58,7 @@ import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
 import org.apache.hadoop.hdfs.server.namenode.INodeFile;
 import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
-import org.apache.hadoop.http.HttpServer;
+import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.ipc.ProtobufRpcEngine.Server;
 import org.apache.hadoop.metrics2.impl.MetricsSystemImpl;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -89,7 +89,7 @@ public class SnapshotTestHelper {
     setLevel2OFF(LogFactory.getLog(MetricsSystemImpl.class));
     
     setLevel2OFF(DataBlockScanner.LOG);
-    setLevel2OFF(HttpServer.LOG);
+    setLevel2OFF(HttpServer2.LOG);
     setLevel2OFF(DataNode.LOG);
     setLevel2OFF(BlockPoolSliceStorage.LOG);
     setLevel2OFF(LeaseManager.LOG);

+ 78 - 69
hadoop-mapreduce-project/CHANGES.txt

@@ -82,9 +82,6 @@ Trunk (Unreleased)
 
   BUG FIXES
 
-    MAPREDUCE-4272. SortedRanges.Range#compareTo is not spec compliant.
-    (Yu Gao via llu)
-
     MAPREDUCE-3194. "mapred mradmin" command is broken in mrv2
                      (Jason Lowe via bobby)
 
@@ -130,15 +127,9 @@ Trunk (Unreleased)
     MAPREDUCE-4574. Fix TotalOrderParitioner to work with
     non-WritableComparable key types. (harsh)
 
-    MAPREDUCE-4884. Streaming tests fail to start MiniMRCluster due to missing
-    queue configuration. (Chris Nauroth via suresh)
-
     MAPREDUCE-5012. Typo in javadoc for IdentityMapper class. (Adam Monsen
     via suresh)
 
-    MAPREDUCE-4885. Streaming tests have multiple failures on Windows. (Chris
-    Nauroth via bikas)
-
     MAPREDUCE-4987. TestMRJobs#testDistributedCache fails on Windows due to
     classpath problems and unexpected behavior of symlinks (Chris Nauroth via
     bikas)
@@ -152,6 +143,24 @@ Release 2.4.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES
 
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+    MAPREDUCE-5464. Add analogs of the SLOTS_MILLIS counters that jive with the
+    YARN resource model (Sandy Ryza)
+
+    MAPREDUCE-5732. Report proper queue when job has been automatically placed
+    (Sandy Ryza)
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
+Release 2.3.0 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
   NEW FEATURES
 
     MAPREDUCE-5265. History server admin service to refresh user and superuser
@@ -169,6 +178,19 @@ Release 2.4.0 - UNRELEASED
     MAPREDUCE-5332. Support token-preserving restart of history server (jlowe)
 
   IMPROVEMENTS
+  
+    MAPREDUCE-5329. Allow MR applications to use additional AuxServices,
+    which are compatible with the default MapReduce shuffle.
+    (Avner BenHanoch via sseth)
+
+    MAPREDUCE-5463. Deprecate SLOTS_MILLIS counters (Tzuyoshi Ozawa via Sandy
+    Ryza)
+
+    MAPREDUCE-5457. Add a KeyOnlyTextOutputReader to enable streaming to write
+    out text files without separators (Sandy Ryza)
+
+    MAPREDUCE-5596. Allow configuring the number of threads used to serve
+    shuffle connections (Sandy Ryza via jlowe)
 
     MAPREDUCE-434. LocalJobRunner limited to single reducer (Sandy Ryza and
     Aaron Kimball via Sandy Ryza)
@@ -208,6 +230,9 @@ Release 2.4.0 - UNRELEASED
 
   OPTIMIZATIONS
 
+    MAPREDUCE-4680. Job history cleaner should only check timestamps of files in
+    old enough directories (Robert Kanter via Sandy Ryza)
+
     MAPREDUCE-5484. YarnChild unnecessarily loads job conf twice (Sandy Ryza)
 
     MAPREDUCE-5487. In task processes, JobConf is unnecessarily loaded again
@@ -218,6 +243,37 @@ Release 2.4.0 - UNRELEASED
 
   BUG FIXES
 
+    MAPREDUCE-5569. FloatSplitter is not generating correct splits (Nathan
+    Roberts via jlowe)
+
+    MAPREDUCE-5546. mapred.cmd on Windows set HADOOP_OPTS incorrectly (Chuan Liu
+    via cnauroth)
+
+    MAPREDUCE-5518. Fixed typo "can't read paritions file". (Albert Chu
+    via devaraj)
+
+    MAPREDUCE-5561. org.apache.hadoop.mapreduce.v2.app.job.impl.TestJobImpl
+    testcase failing on trunk (Karthik Kambatla via jlowe)
+
+    MAPREDUCE-5598. TestUserDefinedCounters.testMapReduceJob is flakey
+    (Robert Kanter via jlowe)
+
+    MAPREDUCE-5604. TestMRAMWithNonNormalizedCapabilities fails on Windows due to
+    exceeding max path length. (cnauroth)
+
+    MAPREDUCE-5451. MR uses LD_LIBRARY_PATH which doesn't mean anything in
+    Windows. (Yingda Chen via cnauroth)
+
+    MAPREDUCE-5409. MRAppMaster throws InvalidStateTransitonException: Invalid
+    event: TA_TOO_MANY_FETCH_FAILURE at KILLED for TaskAttemptImpl (Gera
+    Shegalov via jlowe)
+
+    MAPREDUCE-5674. Missing start and finish time in mapred.JobStatus.
+    (Chuan Liu via cnauroth)
+
+    MAPREDUCE-5650. Job fails when hprof mapreduce.task.profile.map/reduce.params
+    is specified (Gera Shegalov via Sandy Ryza)
+
     MAPREDUCE-5316. job -list-attempt-ids command does not handle illegal
     task-state (Ashwin Shankar via jlowe)
 
@@ -291,65 +347,6 @@ Release 2.4.0 - UNRELEASED
     MAPREDUCE-5723. MR AM container log can be truncated or empty.
     (Mohammad Kamrul Islam via kasha)
 
-Release 2.3.0 - UNRELEASED
-
-  INCOMPATIBLE CHANGES
-
-  NEW FEATURES
-
-  IMPROVEMENTS
-  
-    MAPREDUCE-5329. Allow MR applications to use additional AuxServices,
-    which are compatible with the default MapReduce shuffle.
-    (Avner BenHanoch via sseth)
-
-    MAPREDUCE-5463. Deprecate SLOTS_MILLIS counters (Tzuyoshi Ozawa via Sandy
-    Ryza)
-
-    MAPREDUCE-5457. Add a KeyOnlyTextOutputReader to enable streaming to write
-    out text files without separators (Sandy Ryza)
-
-    MAPREDUCE-5596. Allow configuring the number of threads used to serve
-    shuffle connections (Sandy Ryza via jlowe)
-
-  OPTIMIZATIONS
-
-    MAPREDUCE-4680. Job history cleaner should only check timestamps of files in
-    old enough directories (Robert Kanter via Sandy Ryza)
-
-  BUG FIXES
-
-    MAPREDUCE-5569. FloatSplitter is not generating correct splits (Nathan
-    Roberts via jlowe)
-
-    MAPREDUCE-5546. mapred.cmd on Windows set HADOOP_OPTS incorrectly (Chuan Liu
-    via cnauroth)
-
-    MAPREDUCE-5518. Fixed typo "can't read paritions file". (Albert Chu
-    via devaraj)
-
-    MAPREDUCE-5561. org.apache.hadoop.mapreduce.v2.app.job.impl.TestJobImpl
-    testcase failing on trunk (Karthik Kambatla via jlowe)
-
-    MAPREDUCE-5598. TestUserDefinedCounters.testMapReduceJob is flakey
-    (Robert Kanter via jlowe)
-
-    MAPREDUCE-5604. TestMRAMWithNonNormalizedCapabilities fails on Windows due to
-    exceeding max path length. (cnauroth)
-
-    MAPREDUCE-5451. MR uses LD_LIBRARY_PATH which doesn't mean anything in
-    Windows. (Yingda Chen via cnauroth)
-
-    MAPREDUCE-5409. MRAppMaster throws InvalidStateTransitonException: Invalid
-    event: TA_TOO_MANY_FETCH_FAILURE at KILLED for TaskAttemptImpl (Gera
-    Shegalov via jlowe)
-
-    MAPREDUCE-5674. Missing start and finish time in mapred.JobStatus.
-    (Chuan Liu via cnauroth)
-
-    MAPREDUCE-5650. Job fails when hprof mapreduce.task.profile.map/reduce.params
-    is specified (Gera Shegalov via Sandy Ryza)
-
 Release 2.2.0 - 2013-10-13
 
   INCOMPATIBLE CHANGES
@@ -996,9 +993,15 @@ Release 2.1.0-beta - 2013-08-22
     HADOOP-9372. Fix bad timeout annotations on tests.
     (Arpit Agarwal via suresh)
 
+    MAPREDUCE-4885. Streaming tests have multiple failures on Windows. (Chris
+    Nauroth via bikas)
+
     MAPREDUCE-5177. Use common utils FileUtil#setReadable/Writable/Executable & 
     FileUtil#canRead/Write/Execute. (Ivan Mitic via suresh)
 
+    MAPREDUCE-5349. TestClusterMapReduceTestCase and TestJobName fail on Windows
+    in branch-2. (Chuan Liu via cnauroth)
+
     MAPREDUCE-5355. MiniMRYarnCluster with localFs does not work on Windows.
     (Chuan Liu via cnauroth)
 
@@ -1149,6 +1152,9 @@ Release 2.0.3-alpha - 2013-02-06
 
   BUG FIXES
 
+    MAPREDUCE-4272. SortedRanges.Range#compareTo is not spec compliant.
+    (Yu Gao via llu)
+
     MAPREDUCE-4607. Race condition in ReduceTask completion can result in Task
     being incorrectly failed. (Bikas Saha via tomwhite)
 
@@ -1211,6 +1217,9 @@ Release 2.0.3-alpha - 2013-02-06
     MAPREDUCE-4969. TestKeyValueTextInputFormat test fails with Open JDK 7.
     (Arpit Agarwal via suresh)
 
+    MAPREDUCE-4884. Streaming tests fail to start MiniMRCluster due to missing
+    queue configuration. (Chris Nauroth via suresh)
+
     MAPREDUCE-4953. HadoopPipes misuses fprintf. (Andy Isaacson via atm)
 
 Release 2.0.2-alpha - 2012-09-07 
@@ -1219,7 +1228,7 @@ Release 2.0.2-alpha - 2012-09-07
 
   NEW FEATURES
 
-    MAPREDUCE-987. Exposing MiniDFS and MiniMR clusters as a single process 
+    MAPREDUCE-987. Exposing MiniDFS and MiniMR clusters as a single process
     command-line. (ahmed via tucu)
 
     MAPREDUCE-4417. add support for encrypted shuffle (tucu)

+ 6 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java

@@ -525,6 +525,12 @@ public class JobHistoryEventHandler extends AbstractService
         JobInitedEvent jie = (JobInitedEvent) event.getHistoryEvent();
         mi.getJobIndexInfo().setJobStartTime(jie.getLaunchTime());
       }
+      
+      if (event.getHistoryEvent().getEventType() == EventType.JOB_QUEUE_CHANGED) {
+        JobQueueChangeEvent jQueueEvent =
+            (JobQueueChangeEvent) event.getHistoryEvent();
+        mi.getJobIndexInfo().setQueueName(jQueueEvent.getJobQueueName());
+      }
 
       // If this is JobFinishedEvent, close the writer and setup the job-index
       if (event.getHistoryEvent().getEventType() == EventType.JOB_FINISHED) {

+ 3 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/Job.java

@@ -39,7 +39,7 @@ import org.apache.hadoop.security.authorize.AccessControlList;
 
 
 /**
- * Main interface to interact with the job. Provides only getters. 
+ * Main interface to interact with the job.
  */
 public interface Job {
 
@@ -98,4 +98,6 @@ public interface Job {
   List<AMInfo> getAMInfos();
   
   boolean checkAccess(UserGroupInformation callerUGI, JobACL jobOperation);
+  
+  public void setQueueName(String queueName);
 }

+ 9 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java

@@ -59,6 +59,7 @@ import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent;
 import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskInfo;
 import org.apache.hadoop.mapreduce.jobhistory.JobInfoChangeEvent;
 import org.apache.hadoop.mapreduce.jobhistory.JobInitedEvent;
+import org.apache.hadoop.mapreduce.jobhistory.JobQueueChangeEvent;
 import org.apache.hadoop.mapreduce.jobhistory.JobSubmittedEvent;
 import org.apache.hadoop.mapreduce.jobhistory.JobUnsuccessfulCompletionEvent;
 import org.apache.hadoop.mapreduce.lib.chain.ChainMapper;
@@ -181,7 +182,7 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
   private final EventHandler eventHandler;
   private final MRAppMetrics metrics;
   private final String userName;
-  private final String queueName;
+  private String queueName;
   private final long appSubmitTime;
   private final AppContext appContext;
 
@@ -1123,6 +1124,13 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
     return queueName;
   }
   
+  @Override
+  public void setQueueName(String queueName) {
+    this.queueName = queueName;
+    JobQueueChangeEvent jqce = new JobQueueChangeEvent(oldJobId, queueName);
+    eventHandler.handle(new JobHistoryEvent(jobId, jqce));
+  }
+  
   /*
    * (non-Javadoc)
    * @see org.apache.hadoop.mapreduce.v2.app.job.Job#getConfFile()

+ 26 - 34
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java

@@ -1265,57 +1265,56 @@ public abstract class TaskAttemptImpl implements
       }
     }
   }
-
-  private static long computeSlotMillis(TaskAttemptImpl taskAttempt) {
+  
+  private static void updateMillisCounters(JobCounterUpdateEvent jce,
+      TaskAttemptImpl taskAttempt) {
     TaskType taskType = taskAttempt.getID().getTaskId().getTaskType();
-    int slotMemoryReq =
+    long duration = (taskAttempt.getFinishTime() - taskAttempt.getLaunchTime());
+    int mbRequired =
         taskAttempt.getMemoryRequired(taskAttempt.conf, taskType);
+    int vcoresRequired = taskAttempt.getCpuRequired(taskAttempt.conf, taskType);
 
     int minSlotMemSize = taskAttempt.conf.getInt(
       YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB,
       YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB);
 
     int simSlotsRequired =
-        minSlotMemSize == 0 ? 0 : (int) Math.ceil((float) slotMemoryReq
+        minSlotMemSize == 0 ? 0 : (int) Math.ceil((float) mbRequired
             / minSlotMemSize);
 
-    long slotMillisIncrement =
-        simSlotsRequired
-            * (taskAttempt.getFinishTime() - taskAttempt.getLaunchTime());
-    return slotMillisIncrement;
+    if (taskType == TaskType.MAP) {
+      jce.addCounterUpdate(JobCounter.SLOTS_MILLIS_MAPS, simSlotsRequired * duration);
+      jce.addCounterUpdate(JobCounter.MB_MILLIS_MAPS, duration * mbRequired);
+      jce.addCounterUpdate(JobCounter.VCORES_MILLIS_MAPS, duration * vcoresRequired);
+      jce.addCounterUpdate(JobCounter.MILLIS_MAPS, duration);
+    } else {
+      jce.addCounterUpdate(JobCounter.SLOTS_MILLIS_REDUCES, simSlotsRequired * duration);
+      jce.addCounterUpdate(JobCounter.MB_MILLIS_REDUCES, duration * mbRequired);
+      jce.addCounterUpdate(JobCounter.VCORES_MILLIS_REDUCES, duration * vcoresRequired);
+      jce.addCounterUpdate(JobCounter.MILLIS_REDUCES, duration);
+    }
   }
 
   private static JobCounterUpdateEvent createJobCounterUpdateEventTASucceeded(
       TaskAttemptImpl taskAttempt) {
-    long slotMillis = computeSlotMillis(taskAttempt);
     TaskId taskId = taskAttempt.attemptId.getTaskId();
     JobCounterUpdateEvent jce = new JobCounterUpdateEvent(taskId.getJobId());
-    jce.addCounterUpdate(
-      taskId.getTaskType() == TaskType.MAP ?
-        JobCounter.SLOTS_MILLIS_MAPS : JobCounter.SLOTS_MILLIS_REDUCES,
-        slotMillis);
+    updateMillisCounters(jce, taskAttempt);
     return jce;
   }
-
+  
   private static JobCounterUpdateEvent createJobCounterUpdateEventTAFailed(
       TaskAttemptImpl taskAttempt, boolean taskAlreadyCompleted) {
     TaskType taskType = taskAttempt.getID().getTaskId().getTaskType();
     JobCounterUpdateEvent jce = new JobCounterUpdateEvent(taskAttempt.getID().getTaskId().getJobId());
     
-    long slotMillisIncrement = computeSlotMillis(taskAttempt);
-    
     if (taskType == TaskType.MAP) {
       jce.addCounterUpdate(JobCounter.NUM_FAILED_MAPS, 1);
-      if(!taskAlreadyCompleted) {
-        // dont double count the elapsed time
-        jce.addCounterUpdate(JobCounter.SLOTS_MILLIS_MAPS, slotMillisIncrement);
-      }
     } else {
       jce.addCounterUpdate(JobCounter.NUM_FAILED_REDUCES, 1);
-      if(!taskAlreadyCompleted) {
-        // dont double count the elapsed time
-        jce.addCounterUpdate(JobCounter.SLOTS_MILLIS_REDUCES, slotMillisIncrement);
-      }
+    }
+    if (!taskAlreadyCompleted) {
+      updateMillisCounters(jce, taskAttempt);
     }
     return jce;
   }
@@ -1325,20 +1324,13 @@ public abstract class TaskAttemptImpl implements
     TaskType taskType = taskAttempt.getID().getTaskId().getTaskType();
     JobCounterUpdateEvent jce = new JobCounterUpdateEvent(taskAttempt.getID().getTaskId().getJobId());
     
-    long slotMillisIncrement = computeSlotMillis(taskAttempt);
-    
     if (taskType == TaskType.MAP) {
       jce.addCounterUpdate(JobCounter.NUM_KILLED_MAPS, 1);
-      if(!taskAlreadyCompleted) {
-        // dont double count the elapsed time
-        jce.addCounterUpdate(JobCounter.SLOTS_MILLIS_MAPS, slotMillisIncrement);
-      }
     } else {
       jce.addCounterUpdate(JobCounter.NUM_KILLED_REDUCES, 1);
-      if(!taskAlreadyCompleted) {
-        // dont double count the elapsed time
-        jce.addCounterUpdate(JobCounter.SLOTS_MILLIS_REDUCES, slotMillisIncrement);
-      }
+    }
+    if (!taskAlreadyCompleted) {
+      updateMillisCounters(jce, taskAttempt);
     }
     return jce;
   }  

+ 5 - 2
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java

@@ -109,11 +109,11 @@ public abstract class RMCommunicator extends AbstractService
   @Override
   protected void serviceStart() throws Exception {
     scheduler= createSchedulerProxy();
-    register();
-    startAllocatorThread();
     JobID id = TypeConverter.fromYarn(this.applicationId);
     JobId jobId = TypeConverter.toYarn(id);
     job = context.getJob(jobId);
+    register();
+    startAllocatorThread();
     super.serviceStart();
   }
 
@@ -161,6 +161,9 @@ public abstract class RMCommunicator extends AbstractService
       }
       this.applicationACLs = response.getApplicationACLs();
       LOG.info("maxContainerCapability: " + maxContainerCapability.getMemory());
+      String queue = response.getQueue();
+      LOG.info("queue: " + queue);
+      job.setQueueName(queue);
     } catch (Exception are) {
       LOG.error("Exception while registering", are);
       throw new YarnRuntimeException(are);

+ 9 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestEvents.java

@@ -81,6 +81,15 @@ public class TestEvents {
     assertEquals(test.getPriority(), JobPriority.LOW);
 
   }
+  
+  @Test(timeout = 10000)
+  public void testJobQueueChange() throws Exception {
+    org.apache.hadoop.mapreduce.JobID jid = new JobID("001", 1);
+    JobQueueChangeEvent test = new JobQueueChangeEvent(jid,
+        "newqueue");
+    assertEquals(test.getJobId().toString(), jid.toString());
+    assertEquals(test.getJobQueueName(), "newqueue");
+  }
 
   /**
    * simple test TaskUpdatedEvent and TaskUpdated

+ 22 - 8
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java

@@ -117,6 +117,9 @@ public class MRApp extends MRAppMaster {
   private File testWorkDir;
   private Path testAbsPath;
   private ClusterInfo clusterInfo;
+  
+  // Queue to pretend the RM assigned us
+  private String assignedQueue;
 
   public static String NM_HOST = "localhost";
   public static int NM_PORT = 1234;
@@ -133,7 +136,7 @@ public class MRApp extends MRAppMaster {
 
   public MRApp(int maps, int reduces, boolean autoComplete, String testName,
       boolean cleanOnStart, Clock clock) {
-    this(maps, reduces, autoComplete, testName, cleanOnStart, 1, clock);
+    this(maps, reduces, autoComplete, testName, cleanOnStart, 1, clock, null);
   }
 
   public MRApp(int maps, int reduces, boolean autoComplete, String testName,
@@ -146,6 +149,12 @@ public class MRApp extends MRAppMaster {
       boolean cleanOnStart) {
     this(maps, reduces, autoComplete, testName, cleanOnStart, 1);
   }
+  
+  public MRApp(int maps, int reduces, boolean autoComplete, String testName,
+      boolean cleanOnStart, String assignedQueue) {
+    this(maps, reduces, autoComplete, testName, cleanOnStart, 1,
+        new SystemClock(), assignedQueue);
+  }
 
   public MRApp(int maps, int reduces, boolean autoComplete, String testName,
       boolean cleanOnStart, boolean unregistered) {
@@ -178,7 +187,7 @@ public class MRApp extends MRAppMaster {
   public MRApp(int maps, int reduces, boolean autoComplete, String testName,
       boolean cleanOnStart, int startCount) {
     this(maps, reduces, autoComplete, testName, cleanOnStart, startCount,
-        new SystemClock());
+        new SystemClock(), null);
   }
 
   public MRApp(int maps, int reduces, boolean autoComplete, String testName,
@@ -191,33 +200,34 @@ public class MRApp extends MRAppMaster {
       boolean cleanOnStart, int startCount, Clock clock, boolean unregistered) {
     this(getApplicationAttemptId(applicationId, startCount), getContainerId(
       applicationId, startCount), maps, reduces, autoComplete, testName,
-      cleanOnStart, startCount, clock, unregistered);
+      cleanOnStart, startCount, clock, unregistered, null);
   }
 
   public MRApp(int maps, int reduces, boolean autoComplete, String testName,
-      boolean cleanOnStart, int startCount, Clock clock) {
+      boolean cleanOnStart, int startCount, Clock clock, String assignedQueue) {
     this(getApplicationAttemptId(applicationId, startCount), getContainerId(
       applicationId, startCount), maps, reduces, autoComplete, testName,
-      cleanOnStart, startCount, clock, true);
+      cleanOnStart, startCount, clock, true, assignedQueue);
   }
 
   public MRApp(ApplicationAttemptId appAttemptId, ContainerId amContainerId,
       int maps, int reduces, boolean autoComplete, String testName,
       boolean cleanOnStart, int startCount, boolean unregistered) {
     this(appAttemptId, amContainerId, maps, reduces, autoComplete, testName,
-        cleanOnStart, startCount, new SystemClock(), unregistered);
+        cleanOnStart, startCount, new SystemClock(), unregistered, null);
   }
 
   public MRApp(ApplicationAttemptId appAttemptId, ContainerId amContainerId,
       int maps, int reduces, boolean autoComplete, String testName,
       boolean cleanOnStart, int startCount) {
     this(appAttemptId, amContainerId, maps, reduces, autoComplete, testName,
-        cleanOnStart, startCount, new SystemClock(), true);
+        cleanOnStart, startCount, new SystemClock(), true, null);
   }
 
   public MRApp(ApplicationAttemptId appAttemptId, ContainerId amContainerId,
       int maps, int reduces, boolean autoComplete, String testName,
-      boolean cleanOnStart, int startCount, Clock clock, boolean unregistered) {
+      boolean cleanOnStart, int startCount, Clock clock, boolean unregistered,
+      String assignedQueue) {
     super(appAttemptId, amContainerId, NM_HOST, NM_PORT, NM_HTTP_PORT, clock, System
         .currentTimeMillis(), MRJobConfig.DEFAULT_MR_AM_MAX_ATTEMPTS);
     this.testWorkDir = new File("target", testName);
@@ -239,6 +249,7 @@ public class MRApp extends MRAppMaster {
     // If safeToReportTerminationToUser is set to true, we can verify whether
     // the job can reaches the final state when MRAppMaster shuts down.
     this.successfullyUnregistered.set(unregistered);
+    this.assignedQueue = assignedQueue;
   }
 
   @Override
@@ -285,6 +296,9 @@ public class MRApp extends MRAppMaster {
     start();
     DefaultMetricsSystem.shutdown();
     Job job = getContext().getAllJobs().values().iterator().next();
+    if (assignedQueue != null) {
+      job.setQueueName(assignedQueue);
+    }
 
     // Write job.xml
     String jobFile = MRApps.getJobFile(conf, user,

+ 1 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockAppContext.java

@@ -39,6 +39,7 @@ public class MockAppContext implements AppContext {
   final Map<JobId, Job> jobs;
   final long startTime = System.currentTimeMillis();
   Set<String> blacklistedNodes;
+  String queue;
   
   public MockAppContext(int appid) {
     appID = MockJobs.newAppID(appid);

+ 5 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockJobs.java

@@ -629,6 +629,11 @@ public class MockJobs extends MockApps {
         jobConf.addResource(fc.open(configFile), configFile.toString());
         return jobConf;
       }
+
+      @Override
+      public void setQueueName(String queueName) {
+        // do nothing
+      }
     };
   }
 

+ 6 - 6
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestJobEndNotifier.java

@@ -37,7 +37,7 @@ import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.http.HttpServer;
+import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapred.JobContext;
 import org.apache.hadoop.mapreduce.MRJobConfig;
@@ -199,7 +199,7 @@ public class TestJobEndNotifier extends JobEndNotifier {
 
   @Test
   public void testNotificationOnLastRetryNormalShutdown() throws Exception {
-    HttpServer server = startHttpServer();
+    HttpServer2 server = startHttpServer();
     // Act like it is the second attempt. Default max attempts is 2
     MRApp app = spy(new MRAppWithCustomContainerAllocator(
         2, 2, true, this.getClass().getName(), true, 2, true));
@@ -223,7 +223,7 @@ public class TestJobEndNotifier extends JobEndNotifier {
   @Test
   public void testAbsentNotificationOnNotLastRetryUnregistrationFailure()
       throws Exception {
-    HttpServer server = startHttpServer();
+    HttpServer2 server = startHttpServer();
     MRApp app = spy(new MRAppWithCustomContainerAllocator(2, 2, false,
         this.getClass().getName(), true, 1, false));
     doNothing().when(app).sysexit();
@@ -250,7 +250,7 @@ public class TestJobEndNotifier extends JobEndNotifier {
   @Test
   public void testNotificationOnLastRetryUnregistrationFailure()
       throws Exception {
-    HttpServer server = startHttpServer();
+    HttpServer2 server = startHttpServer();
     MRApp app = spy(new MRAppWithCustomContainerAllocator(2, 2, false,
         this.getClass().getName(), true, 2, false));
     doNothing().when(app).sysexit();
@@ -274,10 +274,10 @@ public class TestJobEndNotifier extends JobEndNotifier {
     server.stop();
   }
 
-  private static HttpServer startHttpServer() throws Exception {
+  private static HttpServer2 startHttpServer() throws Exception {
     new File(System.getProperty(
         "build.webapps", "build/webapps") + "/test").mkdirs();
-    HttpServer server = new HttpServer.Builder().setName("test")
+    HttpServer2 server = new HttpServer2.Builder().setName("test")
         .addEndpoint(URI.create("http://localhost:0"))
         .setFindPort(true).build();
     server.addServlet("jobend", "/jobend", JobEndServlet.class);

+ 5 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java

@@ -505,6 +505,11 @@ public class TestRuntimeEstimators {
     public Configuration loadConfFile() {
       throw new UnsupportedOperationException();
     }
+
+    @Override
+    public void setQueueName(String queueName) {
+      // do nothing
+    }
   }
 
   /*

+ 22 - 11
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java

@@ -41,6 +41,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.RawLocalFileSystem;
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapred.MapTaskAttemptImpl;
+import org.apache.hadoop.mapreduce.Counters;
 import org.apache.hadoop.mapreduce.JobCounter;
 import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent;
@@ -182,13 +183,13 @@ public class TestTaskAttempt{
   }
 
   @Test
-  public void testSlotMillisCounterUpdate() throws Exception {
-    verifySlotMillis(2048, 2048, 1024);
-    verifySlotMillis(2048, 1024, 1024);
-    verifySlotMillis(10240, 1024, 2048);
+  public void testMillisCountersUpdate() throws Exception {
+    verifyMillisCounters(2048, 2048, 1024);
+    verifyMillisCounters(2048, 1024, 1024);
+    verifyMillisCounters(10240, 1024, 2048);
   }
 
-  public void verifySlotMillis(int mapMemMb, int reduceMemMb,
+  public void verifyMillisCounters(int mapMemMb, int reduceMemMb,
       int minContainerSize) throws Exception {
     Clock actualClock = new SystemClock();
     ControlledClock clock = new ControlledClock(actualClock);
@@ -232,13 +233,23 @@ public class TestTaskAttempt{
     Assert.assertEquals(mta.getLaunchTime(), 10);
     Assert.assertEquals(rta.getFinishTime(), 11);
     Assert.assertEquals(rta.getLaunchTime(), 10);
+    Counters counters = job.getAllCounters();
     Assert.assertEquals((int) Math.ceil((float) mapMemMb / minContainerSize),
-        job.getAllCounters().findCounter(JobCounter.SLOTS_MILLIS_MAPS)
-            .getValue());
-    Assert.assertEquals(
-        (int) Math.ceil((float) reduceMemMb / minContainerSize), job
-            .getAllCounters().findCounter(JobCounter.SLOTS_MILLIS_REDUCES)
-            .getValue());
+        counters.findCounter(JobCounter.SLOTS_MILLIS_MAPS).getValue());
+    Assert.assertEquals((int) Math.ceil((float) reduceMemMb / minContainerSize),
+        counters.findCounter(JobCounter.SLOTS_MILLIS_REDUCES).getValue());
+    Assert.assertEquals(1,
+        counters.findCounter(JobCounter.MILLIS_MAPS).getValue());
+    Assert.assertEquals(1,
+        counters.findCounter(JobCounter.MILLIS_REDUCES).getValue());
+    Assert.assertEquals(mapMemMb,
+        counters.findCounter(JobCounter.MB_MILLIS_MAPS).getValue());
+    Assert.assertEquals(reduceMemMb,
+        counters.findCounter(JobCounter.MB_MILLIS_REDUCES).getValue());
+    Assert.assertEquals(1,
+        counters.findCounter(JobCounter.VCORES_MILLIS_MAPS).getValue());
+    Assert.assertEquals(1,
+        counters.findCounter(JobCounter.VCORES_MILLIS_REDUCES).getValue());
   }
 
   private TaskAttemptImpl createMapTaskAttemptImplForTest(

+ 9 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/avro/Events.avpr

@@ -122,6 +122,13 @@
       ]
      },
 
+     {"type": "record", "name": "JobQueueChange",
+      "fields": [
+          {"name": "jobid", "type": "string"},
+          {"name": "jobQueueName", "type": "string"}
+      ]
+     },
+
      {"type": "record", "name": "JobUnsuccessfulCompletion",
       "fields": [
           {"name": "jobid", "type": "string"},
@@ -267,6 +274,7 @@
           "JOB_FINISHED",
           "JOB_PRIORITY_CHANGED",
           "JOB_STATUS_CHANGED",
+          "JOB_QUEUE_CHANGED",
           "JOB_FAILED",
           "JOB_KILLED",
           "JOB_ERROR",
@@ -306,6 +314,7 @@
                "JobInited",
                "AMStarted",
                "JobPriorityChange",
+               "JobQueueChange",
                "JobStatusChanged",
                "JobSubmitted",
                "JobUnsuccessfulCompletion",

+ 7 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobCounter.java

@@ -49,5 +49,11 @@ public enum JobCounter {
   TASKS_REQ_PREEMPT,
   CHECKPOINTS,
   CHECKPOINT_BYTES,
-  CHECKPOINT_TIME
+  CHECKPOINT_TIME,
+  MILLIS_MAPS,
+  MILLIS_REDUCES,
+  VCORES_MILLIS_MAPS,
+  VCORES_MILLIS_REDUCES,
+  MB_MILLIS_MAPS,
+  MB_MILLIS_REDUCES
 }

+ 2 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/EventReader.java

@@ -98,6 +98,8 @@ public class EventReader implements Closeable {
       result = new JobFinishedEvent(); break;
     case JOB_PRIORITY_CHANGED:
       result = new JobPriorityChangeEvent(); break;
+    case JOB_QUEUE_CHANGED:
+      result = new JobQueueChangeEvent(); break;
     case JOB_STATUS_CHANGED:
       result = new JobStatusChangedEvent(); break;
     case JOB_FAILED:

+ 7 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryParser.java

@@ -183,6 +183,9 @@ public class JobHistoryParser implements HistoryEventHandler {
     case JOB_PRIORITY_CHANGED:
       handleJobPriorityChangeEvent((JobPriorityChangeEvent) event);
       break;
+    case JOB_QUEUE_CHANGED:
+      handleJobQueueChangeEvent((JobQueueChangeEvent) event);
+      break;
     case JOB_FAILED:
     case JOB_KILLED:
     case JOB_ERROR:
@@ -385,6 +388,10 @@ public class JobHistoryParser implements HistoryEventHandler {
   private void handleJobPriorityChangeEvent(JobPriorityChangeEvent event) {
     info.priority = event.getPriority();
   }
+  
+  private void handleJobQueueChangeEvent(JobQueueChangeEvent event) {
+    info.jobQueueName = event.getJobQueueName();
+  }
 
   private void handleJobInitedEvent(JobInitedEvent event) {
     info.launchTime = event.getLaunchTime();

+ 63 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobQueueChangeEvent.java

@@ -0,0 +1,63 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.mapreduce.jobhistory;
+
+import org.apache.avro.util.Utf8;
+import org.apache.hadoop.mapreduce.JobID;
+
+@SuppressWarnings("deprecation")
+public class JobQueueChangeEvent implements HistoryEvent {
+  private JobQueueChange datum = new JobQueueChange();
+  
+  public JobQueueChangeEvent(JobID id, String queueName) {
+    datum.jobid = new Utf8(id.toString());
+    datum.jobQueueName = new Utf8(queueName);
+  }
+  
+  JobQueueChangeEvent() { }
+  
+  @Override
+  public EventType getEventType() {
+    return EventType.JOB_QUEUE_CHANGED;
+  }
+
+  @Override
+  public Object getDatum() {
+    return datum;
+  }
+
+  @Override
+  public void setDatum(Object datum) {
+    this.datum = (JobQueueChange) datum;
+  }
+  
+  /** Get the Job ID */
+  public JobID getJobId() {
+    return JobID.forName(datum.jobid.toString());
+  }
+  
+  /** Get the new Job queue name */
+  public String getJobQueueName() {
+    if (datum.jobQueueName != null) {
+      return datum.jobQueueName.toString();
+    }
+    return null;
+  }
+
+}

+ 7 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/org/apache/hadoop/mapreduce/JobCounter.properties

@@ -25,9 +25,15 @@ DATA_LOCAL_MAPS.name=              Data-local map tasks
 RACK_LOCAL_MAPS.name=              Rack-local map tasks
 SLOTS_MILLIS_MAPS.name=            Total time spent by all maps in occupied slots (ms)
 SLOTS_MILLIS_REDUCES.name=         Total time spent by all reduces in occupied slots (ms)
+MILLIS_MAPS.name=                  Total time spent by all map tasks (ms)
+MILLIS_REDUCES.name=               Total time spent by all reduce tasks (ms)
+MB_MILLIS_MAPS.name=               Total megabyte-seconds taken by all map tasks
+MB_MILLIS_REDUCES.name=            Total megabyte-seconds taken by all reduce tasks
+VCORES_MILLIS_MAPS.name=           Total vcore-seconds taken by all map tasks
+VCORES_MILLIS_REDUCES.name=        Total vcore-seconds taken by all reduce tasks
 FALLOW_SLOTS_MILLIS_MAPS.name=     Total time spent by all maps waiting after reserving slots (ms)
 FALLOW_SLOTS_MILLIS_REDUCES.name=  Total time spent by all reduces waiting after reserving slots (ms)
 TASKS_REQ_PREEMPT.name=            Tasks that have been asked to preempt
 CHECKPOINTS.name=                  Number of checkpoints reported
 CHECKPOINT_BYTES.name=             Total amount of bytes in checkpoints
-CHECKPOINT_TIME.name=              Total time spent checkpointing (ms)
+CHECKPOINT_TIME.name=              Total time spent checkpointing (ms)

+ 3 - 3
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestJobEndNotifier.java

@@ -34,10 +34,10 @@ import javax.servlet.http.HttpServletResponse;
 import junit.framework.TestCase;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.http.HttpServer;
+import org.apache.hadoop.http.HttpServer2;
 
 public class TestJobEndNotifier extends TestCase {
-  HttpServer server;
+  HttpServer2 server;
   URL baseUrl;
 
   @SuppressWarnings("serial")
@@ -102,7 +102,7 @@ public class TestJobEndNotifier extends TestCase {
   public void setUp() throws Exception {
     new File(System.getProperty("build.webapps", "build/webapps") + "/test"
         ).mkdirs();
-    server = new HttpServer.Builder().setName("test")
+    server = new HttpServer2.Builder().setName("test")
         .addEndpoint(URI.create("http://localhost:0"))
         .setFindPort(true).build();
     server.addServlet("delay", "/delay", DelayServlet.class);

+ 5 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java

@@ -453,4 +453,9 @@ public class CompletedJob implements org.apache.hadoop.mapreduce.v2.app.job.Job
     }
     return amInfos;
   }
+
+  @Override
+  public void setQueueName(String queueName) {
+    throw new UnsupportedOperationException("Can't set job's queue name in history");
+  }
 }

+ 5 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/PartialJob.java

@@ -190,5 +190,10 @@ public class PartialJob implements org.apache.hadoop.mapreduce.v2.app.job.Job {
   public List<AMInfo> getAMInfos() {
     return null;
   }
+  
+  @Override
+  public void setQueueName(String queueName) {
+    throw new UnsupportedOperationException("Can't set job's queue name in history");
+  }
 
 }

+ 40 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistoryEvents.java

@@ -155,6 +155,41 @@ public class TestJobHistoryEvents {
     Assert.assertEquals("JobHistoryEventHandler",
         services[services.length - 1].getName());
   }
+  
+  @Test
+  public void testAssignedQueue() throws Exception {
+    Configuration conf = new Configuration();
+    MRApp app = new MRAppWithHistory(2, 1, true, this.getClass().getName(),
+        true, "assignedQueue");
+    app.submit(conf);
+    Job job = app.getContext().getAllJobs().values().iterator().next();
+    JobId jobId = job.getID();
+    LOG.info("JOBID is " + TypeConverter.fromYarn(jobId).toString());
+    app.waitForState(job, JobState.SUCCEEDED);
+    
+    //make sure all events are flushed 
+    app.waitForState(Service.STATE.STOPPED);
+    /*
+     * Use HistoryContext to read logged events and verify the number of 
+     * completed maps 
+    */
+    HistoryContext context = new JobHistory();
+    // test start and stop states
+    ((JobHistory)context).init(conf);
+    ((JobHistory)context).start();
+    Assert.assertTrue( context.getStartTime()>0);
+    Assert.assertEquals(((JobHistory)context).getServiceState(),Service.STATE.STARTED);
+
+    // get job before stopping JobHistory
+    Job parsedJob = context.getJob(jobId);
+
+    // stop JobHistory
+    ((JobHistory)context).stop();
+    Assert.assertEquals(((JobHistory)context).getServiceState(),Service.STATE.STOPPED);
+
+    Assert.assertEquals("QueueName not correct", "assignedQueue",
+        parsedJob.getQueueName());
+  }
 
   private void verifyTask(Task task) {
     Assert.assertEquals("Task state not currect", TaskState.SUCCEEDED,
@@ -184,6 +219,11 @@ public class TestJobHistoryEvents {
       super(maps, reduces, autoComplete, testName, cleanOnStart);
     }
 
+    public MRAppWithHistory(int maps, int reduces, boolean autoComplete,
+        String testName, boolean cleanOnStart, String assignedQueue) {
+      super(maps, reduces, autoComplete, testName, cleanOnStart, assignedQueue);
+    }
+
     @Override
     protected EventHandler<JobHistoryEvent> createJobHistoryHandler(
         AppContext context) {

Alguns arquivos não foram mostrados porque muitos arquivos mudaram nesse diff